aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-ibft2
-rw-r--r--Documentation/DocBook/media/Makefile2
-rw-r--r--Documentation/DocBook/media/v4l/compat.xml4
-rw-r--r--Documentation/HOWTO1
-rw-r--r--Documentation/SubmittingPatches10
-rw-r--r--Documentation/arm64/memory.txt2
-rw-r--r--Documentation/development-process/2.Process4
-rw-r--r--Documentation/development-process/8.Conclusion4
-rw-r--r--Documentation/devicetree/bindings/ata/sata_rcar.txt6
-rw-r--r--Documentation/devicetree/bindings/interrupt-controller/interrupts.txt4
-rw-r--r--Documentation/devicetree/bindings/mailbox/mailbox.txt38
-rw-r--r--Documentation/devicetree/bindings/net/smsc-lan91c111.txt2
-rw-r--r--Documentation/devicetree/bindings/pci/pci.txt11
-rw-r--r--Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt2
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-fsl-ftm.txt19
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-rockchip.txt4
-rw-r--r--Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/sgtl5000.txt10
-rw-r--r--Documentation/devicetree/bindings/submitting-patches.txt3
-rw-r--r--Documentation/devicetree/bindings/thermal/imx-thermal.txt5
-rw-r--r--Documentation/devicetree/bindings/thermal/rcar-thermal.txt5
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt5
-rw-r--r--Documentation/devicetree/bindings/watchdog/cadence-wdt.txt24
-rw-r--r--Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.txt3
-rw-r--r--Documentation/devicetree/bindings/watchdog/meson6-wdt.txt13
-rw-r--r--Documentation/devicetree/bindings/watchdog/qcom-wdt.txt24
-rw-r--r--Documentation/devicetree/bindings/watchdog/samsung-wdt.txt1
-rw-r--r--Documentation/filesystems/Locking2
-rw-r--r--Documentation/filesystems/overlayfs.txt198
-rw-r--r--Documentation/filesystems/vfs.txt7
-rw-r--r--Documentation/input/elantech.txt81
-rw-r--r--Documentation/kernel-parameters.txt32
-rw-r--r--Documentation/kmemleak.txt4
-rw-r--r--Documentation/mailbox.txt122
-rw-r--r--Documentation/networking/ip-sysctl.txt14
-rw-r--r--Documentation/networking/timestamping.txt2
-rw-r--r--Documentation/power/pm_qos_interface.txt4
-rw-r--r--Documentation/prctl/Makefile2
-rw-r--r--Documentation/ptp/testptp.mk33
-rw-r--r--Documentation/scsi/osd.txt3
-rw-r--r--Documentation/target/tcmu-design.txt378
-rw-r--r--Documentation/vDSO/Makefile3
-rw-r--r--Documentation/vDSO/vdso_standalone_test_x86.c2
-rw-r--r--Documentation/video4linux/vivid.txt12
-rw-r--r--Documentation/vm/hugetlbpage.txt2
-rw-r--r--MAINTAINERS121
-rw-r--r--Makefile9
-rw-r--r--arch/arc/Kconfig6
-rw-r--r--arch/arc/Makefile17
-rw-r--r--arch/arc/boot/dts/angel4.dts5
-rw-r--r--arch/arc/boot/dts/nsimosci.dts7
-rw-r--r--arch/arc/configs/fpga_defconfig1
-rw-r--r--arch/arc/configs/fpga_noramfs_defconfig1
-rw-r--r--arch/arc/configs/nsimosci_defconfig1
-rw-r--r--arch/arc/include/asm/arcregs.h89
-rw-r--r--arch/arc/include/asm/atomic.h4
-rw-r--r--arch/arc/include/asm/bitops.h4
-rw-r--r--arch/arc/include/asm/bug.h7
-rw-r--r--arch/arc/include/asm/cache.h2
-rw-r--r--arch/arc/include/asm/current.h4
-rw-r--r--arch/arc/include/asm/irqflags.h4
-rw-r--r--arch/arc/include/asm/kgdb.h32
-rw-r--r--arch/arc/include/asm/processor.h13
-rw-r--r--arch/arc/include/asm/setup.h1
-rw-r--r--arch/arc/include/asm/smp.h10
-rw-r--r--arch/arc/include/asm/string.h3
-rw-r--r--arch/arc/include/asm/syscalls.h4
-rw-r--r--arch/arc/include/asm/thread_info.h4
-rw-r--r--arch/arc/include/asm/unaligned.h2
-rw-r--r--arch/arc/kernel/Makefile2
-rw-r--r--arch/arc/kernel/disasm.c4
-rw-r--r--arch/arc/kernel/head.S10
-rw-r--r--arch/arc/kernel/kgdb.c5
-rw-r--r--arch/arc/kernel/perf_event.c22
-rw-r--r--arch/arc/kernel/setup.c272
-rw-r--r--arch/arc/kernel/smp.c2
-rw-r--r--arch/arc/mm/cache_arc700.c14
-rw-r--r--arch/arc/mm/tlb.c8
-rw-r--r--arch/arc/plat-arcfpga/Kconfig13
-rw-r--r--arch/arc/plat-arcfpga/include/plat/irq.h27
-rw-r--r--arch/arc/plat-arcfpga/include/plat/memmap.h29
-rw-r--r--arch/arc/plat-arcfpga/platform.c61
-rw-r--r--arch/arc/plat-arcfpga/smp.c3
-rw-r--r--arch/arc/plat-tb10x/Kconfig1
-rw-r--r--arch/arc/plat-tb10x/tb10x.c13
-rw-r--r--arch/arm/Kconfig.debug2
-rw-r--r--arch/arm/boot/compressed/head.S20
-rw-r--r--arch/arm/boot/dts/am335x-evm.dts2
-rw-r--r--arch/arm/boot/dts/am437x-gp-evm.dts4
-rw-r--r--arch/arm/boot/dts/am437x-sk-evm.dts4
-rw-r--r--arch/arm/boot/dts/am43x-epos-evm.dts4
-rw-r--r--arch/arm/boot/dts/at91sam9263.dtsi5
-rw-r--r--arch/arm/boot/dts/exynos5250-snow.dts4
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi2
-rw-r--r--arch/arm/boot/dts/imx28-evk.dts1
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts2
-rw-r--r--arch/arm/boot/dts/r8a7740.dtsi2
-rw-r--r--arch/arm/boot/dts/r8a7790.dtsi4
-rw-r--r--arch/arm/boot/dts/sama5d31.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d33.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d34.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d35.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d36.dtsi2
-rw-r--r--arch/arm/boot/dts/sama5d3xcm.dtsi2
-rw-r--r--arch/arm/boot/dts/socfpga.dtsi12
-rw-r--r--arch/arm/boot/dts/socfpga_arria5.dtsi2
-rw-r--r--arch/arm/boot/dts/socfpga_arria5_socdk.dts12
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_socdk.dts15
-rw-r--r--arch/arm/boot/dts/socfpga_cyclone5_sockit.dts12
-rw-r--r--arch/arm/boot/dts/sun6i-a31.dtsi4
-rw-r--r--arch/arm/boot/dts/tegra114-dalmore.dts1
-rw-r--r--arch/arm/boot/dts/tegra114-roth.dts9
-rw-r--r--arch/arm/boot/dts/tegra114-tn7.dts5
-rw-r--r--arch/arm/boot/dts/tegra114.dtsi7
-rw-r--r--arch/arm/boot/dts/tegra124-jetson-tk1.dts1
-rw-r--r--arch/arm/boot/dts/tegra124-nyan-big.dts1
-rw-r--r--arch/arm/boot/dts/tegra124-venice2.dts1
-rw-r--r--arch/arm/boot/dts/tegra124.dtsi8
-rw-r--r--arch/arm/boot/dts/tegra20-harmony.dts1
-rw-r--r--arch/arm/boot/dts/tegra20-iris-512.dts5
-rw-r--r--arch/arm/boot/dts/tegra20-medcom-wide.dts4
-rw-r--r--arch/arm/boot/dts/tegra20-paz00.dts2
-rw-r--r--arch/arm/boot/dts/tegra20-seaboard.dts1
-rw-r--r--arch/arm/boot/dts/tegra20-tamonten.dtsi1
-rw-r--r--arch/arm/boot/dts/tegra20-trimslice.dts1
-rw-r--r--arch/arm/boot/dts/tegra20-ventana.dts1
-rw-r--r--arch/arm/boot/dts/tegra20-whistler.dts1
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi8
-rw-r--r--arch/arm/boot/dts/tegra30-apalis-eval.dts4
-rw-r--r--arch/arm/boot/dts/tegra30-beaver.dts1
-rw-r--r--arch/arm/boot/dts/tegra30-cardhu.dtsi2
-rw-r--r--arch/arm/boot/dts/tegra30-colibri-eval-v3.dts3
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi8
-rw-r--r--arch/arm/boot/dts/vf610-cosmic.dts19
-rw-r--r--arch/arm/boot/dts/zynq-7000.dtsi24
-rw-r--r--arch/arm/boot/dts/zynq-parallella.dts4
-rw-r--r--arch/arm/common/edma.c9
-rw-r--r--arch/arm/configs/exynos_defconfig2
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig1
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig1
-rw-r--r--arch/arm/configs/multi_v7_defconfig5
-rw-r--r--arch/arm/configs/omap2plus_defconfig4
-rw-r--r--arch/arm/configs/socfpga_defconfig71
-rw-r--r--arch/arm/configs/sunxi_defconfig1
-rw-r--r--arch/arm/include/asm/thread_info.h11
-rw-r--r--arch/arm/include/uapi/asm/unistd.h1
-rw-r--r--arch/arm/kernel/asm-offsets.c12
-rw-r--r--arch/arm/kernel/calls.S1
-rw-r--r--arch/arm/kernel/traps.c31
-rw-r--r--arch/arm/kvm/mmu.c10
-rw-r--r--arch/arm/mach-highbank/highbank.c2
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c14
-rw-r--r--arch/arm/mach-imx/clk-vf610.c134
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/io.h4
-rw-r--r--arch/arm/mach-mvebu/board-v7.c2
-rw-r--r--arch/arm/mach-mvebu/coherency.c2
-rw-r--r--arch/arm/mach-omap2/omap_device.c4
-rw-r--r--arch/arm/mach-omap2/pdata-quirks.c3
-rw-r--r--arch/arm/mach-pxa/include/mach/addr-map.h5
-rw-r--r--arch/arm/mach-shmobile/clock-r8a7740.c9
-rw-r--r--arch/arm/mach-shmobile/clock-r8a7790.c2
-rw-r--r--arch/arm/mach-shmobile/setup-sh73a0.c20
-rw-r--r--arch/arm/mach-socfpga/core.h2
-rw-r--r--arch/arm/mach-socfpga/headsmp.S25
-rw-r--r--arch/arm/mach-socfpga/platsmp.c4
-rw-r--r--arch/arm/mach-socfpga/socfpga.c4
-rw-r--r--arch/arm/mach-tegra/irq.c22
-rw-r--r--arch/arm/mm/Kconfig1
-rw-r--r--arch/arm/mm/cache-l2x0.c26
-rw-r--r--arch/arm/mm/dma-mapping.c1
-rw-r--r--arch/arm/mm/highmem.c3
-rw-r--r--arch/arm/mm/init.c8
-rw-r--r--arch/arm/mm/proc-v7.S2
-rw-r--r--arch/arm/mm/proc-xscale.S4
-rw-r--r--arch/arm/plat-orion/gpio.c36
-rw-r--r--arch/arm64/Kconfig3
-rw-r--r--arch/arm64/boot/dts/apm-storm.dtsi10
-rw-r--r--arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi35
-rw-r--r--arch/arm64/configs/defconfig28
-rw-r--r--arch/arm64/include/asm/compat.h4
-rw-r--r--arch/arm64/include/asm/elf.h4
-rw-r--r--arch/arm64/include/asm/irq_work.h11
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/include/asm/unistd32.h2
-rw-r--r--arch/arm64/kernel/efi-entry.S27
-rw-r--r--arch/arm64/kernel/efi.c44
-rw-r--r--arch/arm64/kernel/insn.c5
-rw-r--r--arch/arm64/kernel/process.c5
-rw-r--r--arch/arm64/kernel/psci.c2
-rw-r--r--arch/arm64/kvm/sys_regs.c9
-rw-r--r--arch/arm64/lib/clear_user.S2
-rw-r--r--arch/arm64/mm/ioremap.c4
-rw-r--r--arch/arm64/mm/mmu.c14
-rw-r--r--arch/arm64/mm/pgd.c18
-rw-r--r--arch/arm64/net/bpf_jit.h8
-rw-r--r--arch/arm64/net/bpf_jit_comp.c84
-rw-r--r--arch/ia64/kernel/efi.c6
-rw-r--r--arch/ia64/kvm/kvm-ia64.c2
-rw-r--r--arch/m68k/include/asm/unistd.h2
-rw-r--r--arch/m68k/include/uapi/asm/unistd.h1
-rw-r--r--arch/m68k/kernel/syscalltable.S1
-rw-r--r--arch/microblaze/Kconfig4
-rw-r--r--arch/microblaze/include/asm/unistd.h2
-rw-r--r--arch/microblaze/include/uapi/asm/unistd.h1
-rw-r--r--arch/microblaze/kernel/syscall_table.S1
-rw-r--r--arch/microblaze/pci/pci-common.c9
-rw-r--r--arch/mips/Kconfig15
-rw-r--r--arch/mips/Makefile9
-rw-r--r--arch/mips/ath79/mach-db120.c2
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c2
-rw-r--r--arch/mips/cavium-octeon/setup.c9
-rw-r--r--arch/mips/include/asm/asmmacro-32.h6
-rw-r--r--arch/mips/include/asm/asmmacro.h18
-rw-r--r--arch/mips/include/asm/cop2.h8
-rw-r--r--arch/mips/include/asm/fpregdef.h14
-rw-r--r--arch/mips/include/asm/fpu.h4
-rw-r--r--arch/mips/include/asm/ftrace.h4
-rw-r--r--arch/mips/include/asm/idle.h7
-rw-r--r--arch/mips/include/asm/jump_label.h8
-rw-r--r--arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mipsregs.h13
-rw-r--r--arch/mips/include/asm/r4kcache.h4
-rw-r--r--arch/mips/include/asm/uaccess.h18
-rw-r--r--arch/mips/include/uapi/asm/ptrace.h2
-rw-r--r--arch/mips/include/uapi/asm/unistd.h15
-rw-r--r--arch/mips/kernel/bmips_vec.S3
-rw-r--r--arch/mips/kernel/branch.c8
-rw-r--r--arch/mips/kernel/cps-vec.S2
-rw-r--r--arch/mips/kernel/cpu-probe.c40
-rw-r--r--arch/mips/kernel/genex.S1
-rw-r--r--arch/mips/kernel/idle.c3
-rw-r--r--arch/mips/kernel/jump_label.c42
-rw-r--r--arch/mips/kernel/r2300_fpu.S6
-rw-r--r--arch/mips/kernel/r2300_switch.S5
-rw-r--r--arch/mips/kernel/r4k_fpu.S27
-rw-r--r--arch/mips/kernel/r4k_switch.S15
-rw-r--r--arch/mips/kernel/r6000_fpu.S5
-rw-r--r--arch/mips/kernel/rtlx.c4
-rw-r--r--arch/mips/kernel/scall32-o32.S1
-rw-r--r--arch/mips/kernel/scall64-64.S1
-rw-r--r--arch/mips/kernel/scall64-n32.S1
-rw-r--r--arch/mips/kernel/scall64-o32.S1
-rw-r--r--arch/mips/kernel/setup.c7
-rw-r--r--arch/mips/kernel/signal.c8
-rw-r--r--arch/mips/lasat/Kconfig2
-rw-r--r--arch/mips/lib/memcpy.S1
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c4
-rw-r--r--arch/mips/lib/strnlen_user.S6
-rw-r--r--arch/mips/loongson/common/Makefile3
-rw-r--r--arch/mips/loongson/lemote-2f/clock.c5
-rw-r--r--arch/mips/loongson/loongson-3/numa.c1
-rw-r--r--arch/mips/math-emu/cp1emu.c10
-rw-r--r--arch/mips/mm/tlb-r4k.c4
-rw-r--r--arch/mips/mm/tlbex.c16
-rw-r--r--arch/mips/mti-malta/Makefile3
-rw-r--r--arch/mips/mti-sead3/Makefile1
-rw-r--r--arch/mips/mti-sead3/sead3-i2c.c8
-rw-r--r--arch/mips/mti-sead3/sead3-leds.c8
-rw-r--r--arch/mips/mti-sead3/sead3-pic32-bus.c102
-rw-r--r--arch/mips/mti-sead3/sead3-pic32-i2c-drv.c423
-rw-r--r--arch/mips/netlogic/xlp/Makefile12
-rw-r--r--arch/mips/oprofile/backtrace.c2
-rw-r--r--arch/mips/pci/msi-xlp.c4
-rw-r--r--arch/mips/pci/pci-lantiq.c7
-rw-r--r--arch/mips/pmcs-msp71xx/msp_irq.c1
-rw-r--r--arch/mips/pmcs-msp71xx/msp_irq_cic.c4
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c1
-rw-r--r--arch/mips/sibyte/Makefile1
-rw-r--r--arch/parisc/include/asm/uaccess.h19
-rw-r--r--arch/parisc/include/uapi/asm/bitsperlong.h8
-rw-r--r--arch/parisc/include/uapi/asm/msgbuf.h8
-rw-r--r--arch/parisc/include/uapi/asm/sembuf.h6
-rw-r--r--arch/parisc/include/uapi/asm/shmbuf.h35
-rw-r--r--arch/parisc/include/uapi/asm/signal.h2
-rw-r--r--arch/parisc/include/uapi/asm/unistd.h3
-rw-r--r--arch/parisc/kernel/syscall_table.S9
-rw-r--r--arch/powerpc/configs/pseries_le_defconfig7
-rw-r--r--arch/powerpc/include/asm/eeh.h3
-rw-r--r--arch/powerpc/include/asm/fadump.h52
-rw-r--r--arch/powerpc/include/asm/hugetlb.h2
-rw-r--r--arch/powerpc/include/asm/pci-bridge.h2
-rw-r--r--arch/powerpc/include/asm/perf_event.h2
-rw-r--r--arch/powerpc/include/asm/reg.h3
-rw-r--r--arch/powerpc/include/asm/syscall.h2
-rw-r--r--arch/powerpc/include/asm/systbl.h1
-rw-r--r--arch/powerpc/include/asm/unistd.h2
-rw-r--r--arch/powerpc/include/uapi/asm/unistd.h1
-rw-r--r--arch/powerpc/kernel/dma.c8
-rw-r--r--arch/powerpc/kernel/eeh.c19
-rw-r--r--arch/powerpc/kernel/eeh_driver.c12
-rw-r--r--arch/powerpc/kernel/eeh_pe.c10
-rw-r--r--arch/powerpc/kernel/eeh_sysfs.c2
-rw-r--r--arch/powerpc/kernel/entry_64.S6
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S5
-rw-r--r--arch/powerpc/kernel/fadump.c114
-rw-r--r--arch/powerpc/kernel/irq.c2
-rw-r--r--arch/powerpc/kernel/misc.S4
-rw-r--r--arch/powerpc/kernel/pci_64.c10
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c2
-rw-r--r--arch/powerpc/kernel/process.c2
-rw-r--r--arch/powerpc/kernel/rtas_pci.c30
-rw-r--r--arch/powerpc/kernel/setup_64.c32
-rw-r--r--arch/powerpc/kernel/stacktrace.c2
-rw-r--r--arch/powerpc/kernel/vdso32/getcpu.S4
-rw-r--r--arch/powerpc/mm/copro_fault.c3
-rw-r--r--arch/powerpc/mm/init_32.c2
-rw-r--r--arch/powerpc/mm/numa.c48
-rw-r--r--arch/powerpc/mm/slice.c3
-rw-r--r--arch/powerpc/perf/hv-24x7.c6
-rw-r--r--arch/powerpc/perf/hv-gpci.c6
-rw-r--r--arch/powerpc/platforms/powernv/eeh-ioda.c2
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c57
-rw-r--r--arch/powerpc/platforms/powernv/opal-hmi.c2
-rw-r--r--arch/powerpc/platforms/powernv/opal-lpc.c63
-rw-r--r--arch/powerpc/platforms/powernv/opal-wrappers.S2
-rw-r--r--arch/powerpc/platforms/powernv/opal.c21
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c5
-rw-r--r--arch/powerpc/platforms/powernv/pci.c5
-rw-r--r--arch/powerpc/platforms/pseries/dlpar.c26
-rw-r--r--arch/powerpc/platforms/pseries/hotplug-cpu.c4
-rw-r--r--arch/powerpc/platforms/pseries/iommu.c11
-rw-r--r--arch/powerpc/platforms/pseries/lpar.c14
-rw-r--r--arch/powerpc/platforms/pseries/msi.c2
-rw-r--r--arch/powerpc/platforms/pseries/pseries.h3
-rw-r--r--arch/powerpc/sysdev/fsl_msi.c2
-rw-r--r--arch/powerpc/sysdev/msi_bitmap.c66
-rw-r--r--arch/powerpc/xmon/xmon.c6
-rw-r--r--arch/s390/configs/default_defconfig36
-rw-r--r--arch/s390/configs/gcov_defconfig25
-rw-r--r--arch/s390/configs/performance_defconfig30
-rw-r--r--arch/s390/configs/zfcpdump_defconfig10
-rw-r--r--arch/s390/defconfig5
-rw-r--r--arch/s390/include/uapi/asm/unistd.h3
-rw-r--r--arch/s390/kernel/compat_wrapper.c1
-rw-r--r--arch/s390/kernel/ftrace.c2
-rw-r--r--arch/s390/kernel/nmi.c8
-rw-r--r--arch/s390/kernel/perf_cpum_sf.c6
-rw-r--r--arch/s390/kernel/syscalls.S1
-rw-r--r--arch/s390/kernel/uprobes.c2
-rw-r--r--arch/s390/kernel/vdso32/clock_gettime.S12
-rw-r--r--arch/s390/kernel/vdso32/gettimeofday.S14
-rw-r--r--arch/s390/kernel/vdso64/clock_gettime.S13
-rw-r--r--arch/s390/kernel/vdso64/gettimeofday.S6
-rw-r--r--arch/s390/kernel/vtime.c4
-rw-r--r--arch/s390/lib/probes.c2
-rw-r--r--arch/s390/mm/pgtable.c6
-rw-r--r--arch/sh/kernel/cpu/sh3/setup-sh770x.c6
-rw-r--r--arch/sparc/include/asm/atomic_32.h2
-rw-r--r--arch/sparc/include/asm/cmpxchg_32.h12
-rw-r--r--arch/sparc/include/asm/dma-mapping.h8
-rw-r--r--arch/sparc/include/asm/oplib_64.h3
-rw-r--r--arch/sparc/include/asm/setup.h2
-rw-r--r--arch/sparc/include/uapi/asm/swab.h12
-rw-r--r--arch/sparc/include/uapi/asm/unistd.h3
-rw-r--r--arch/sparc/kernel/entry.h3
-rw-r--r--arch/sparc/kernel/head_64.S40
-rw-r--r--arch/sparc/kernel/hvtramp.S1
-rw-r--r--arch/sparc/kernel/pci_schizo.c6
-rw-r--r--arch/sparc/kernel/setup_64.c28
-rw-r--r--arch/sparc/kernel/smp_64.c4
-rw-r--r--arch/sparc/kernel/systbls_32.S2
-rw-r--r--arch/sparc/kernel/systbls_64.S4
-rw-r--r--arch/sparc/kernel/trampoline_64.S12
-rw-r--r--arch/sparc/lib/atomic32.c27
-rw-r--r--arch/sparc/mm/gup.c30
-rw-r--r--arch/sparc/prom/cif.S5
-rw-r--r--arch/sparc/prom/init_64.c6
-rw-r--r--arch/sparc/prom/p1275.c2
-rw-r--r--arch/x86/Kconfig4
-rw-r--r--arch/x86/boot/compressed/Makefile4
-rw-r--r--arch/x86/boot/compressed/eboot.c32
-rw-r--r--arch/x86/boot/compressed/head_32.S5
-rw-r--r--arch/x86/boot/compressed/head_64.S5
-rw-r--r--arch/x86/boot/compressed/misc.c13
-rw-r--r--arch/x86/boot/compressed/mkpiggy.c9
-rw-r--r--arch/x86/ia32/ia32entry.S2
-rw-r--r--arch/x86/include/asm/efi.h31
-rw-r--r--arch/x86/include/asm/kvm_host.h16
-rw-r--r--arch/x86/include/asm/page_32_types.h1
-rw-r--r--arch/x86/include/asm/page_64_types.h11
-rw-r--r--arch/x86/include/asm/preempt.h1
-rw-r--r--arch/x86/include/asm/smp.h1
-rw-r--r--arch/x86/include/asm/thread_info.h2
-rw-r--r--arch/x86/include/asm/traps.h1
-rw-r--r--arch/x86/include/uapi/asm/vmx.h2
-rw-r--r--arch/x86/kernel/acpi/boot.c16
-rw-r--r--arch/x86/kernel/apb_timer.c2
-rw-r--r--arch/x86/kernel/apic/apic.c4
-rw-r--r--arch/x86/kernel/cpu/Makefile7
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/intel.c5
-rw-r--r--arch/x86/kernel/cpu/microcode/amd_early.c33
-rw-r--r--arch/x86/kernel/cpu/microcode/core.c10
-rw-r--r--arch/x86/kernel/cpu/microcode/core_early.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.c14
-rw-r--r--arch/x86/kernel/cpu/perf_event.h1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c173
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c49
-rw-r--r--arch/x86/kernel/dumpstack_64.c1
-rw-r--r--arch/x86/kernel/entry_32.S15
-rw-r--r--arch/x86/kernel/entry_64.S81
-rw-r--r--arch/x86/kernel/i8259.c3
-rw-r--r--arch/x86/kernel/irqinit.c3
-rw-r--r--arch/x86/kernel/ptrace.c2
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/smpboot.c15
-rw-r--r--arch/x86/kernel/traps.c71
-rw-r--r--arch/x86/kernel/tsc.c5
-rw-r--r--arch/x86/kvm/emulate.c307
-rw-r--r--arch/x86/kvm/i8254.c2
-rw-r--r--arch/x86/kvm/mmu.c6
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/svm.c8
-rw-r--r--arch/x86/kvm/vmx.c30
-rw-r--r--arch/x86/kvm/x86.c38
-rw-r--r--arch/x86/lib/csum-wrappers_64.c5
-rw-r--r--arch/x86/mm/init_64.c11
-rw-r--r--arch/x86/mm/pageattr.c2
-rw-r--r--arch/x86/platform/efi/efi-bgrt.c36
-rw-r--r--arch/x86/platform/efi/efi.c52
-rw-r--r--arch/x86/platform/efi/efi_32.c12
-rw-r--r--arch/x86/platform/efi/efi_64.c6
-rw-r--r--arch/x86/platform/efi/efi_stub_32.S4
-rw-r--r--arch/x86/platform/intel-mid/intel_mid_weak_decls.h7
-rw-r--r--arch/x86/platform/intel-mid/sfi.c2
-rw-r--r--arch/x86/tools/calc_run_size.pl39
-rw-r--r--arch/x86/xen/enlighten.c3
-rw-r--r--arch/x86/xen/mmu.c5
-rw-r--r--arch/x86/xen/p2m.c83
-rw-r--r--arch/x86/xen/setup.c1
-rw-r--r--arch/x86/xen/smp.c3
-rw-r--r--arch/x86/xen/time.c2
-rw-r--r--arch/xtensa/Kconfig4
-rw-r--r--arch/xtensa/boot/dts/lx200mx.dts16
-rw-r--r--arch/xtensa/configs/generic_kc705_defconfig131
-rw-r--r--arch/xtensa/configs/smp_lx200_defconfig135
-rw-r--r--arch/xtensa/include/asm/pgtable.h2
-rw-r--r--arch/xtensa/include/uapi/asm/unistd.h12
-rw-r--r--block/bio-integrity.c13
-rw-r--r--block/blk-merge.c18
-rw-r--r--block/blk-mq.c41
-rw-r--r--block/elevator.c4
-rw-r--r--block/ioprio.c14
-rw-r--r--block/scsi_ioctl.c11
-rw-r--r--crypto/cts.c3
-rw-r--r--crypto/sha1_generic.c2
-rw-r--r--crypto/sha256_generic.c5
-rw-r--r--crypto/sha512_generic.c2
-rw-r--r--crypto/tgr192.c4
-rw-r--r--crypto/vmac.c2
-rw-r--r--crypto/wp512.c8
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/Makefile1
-rw-r--r--drivers/acpi/acpi_platform.c3
-rw-r--r--drivers/acpi/acpica/achware.h2
-rw-r--r--drivers/acpi/acpica/aclocal.h4
-rw-r--r--drivers/acpi/acpica/actables.h2
-rw-r--r--drivers/acpi/acpica/amlresrc.h34
-rw-r--r--drivers/acpi/acpica/evgpe.c23
-rw-r--r--drivers/acpi/acpica/evgpeinit.c1
-rw-r--r--drivers/acpi/acpica/evxface.c27
-rw-r--r--drivers/acpi/acpica/evxfevnt.c40
-rw-r--r--drivers/acpi/acpica/evxfgpe.c12
-rw-r--r--drivers/acpi/acpica/hwgpe.c9
-rw-r--r--drivers/acpi/acpica/tbxfroot.c33
-rw-r--r--drivers/acpi/blacklist.c8
-rw-r--r--drivers/acpi/device_pm.c5
-rw-r--r--drivers/acpi/ec.c134
-rw-r--r--drivers/acpi/fan.c338
-rw-r--r--drivers/acpi/int340x_thermal.c51
-rw-r--r--drivers/acpi/internal.h10
-rw-r--r--drivers/acpi/scan.c73
-rw-r--r--drivers/acpi/sysfs.c4
-rw-r--r--drivers/acpi/thermal.c18
-rw-r--r--drivers/acpi/utils.c28
-rw-r--r--drivers/acpi/video.c3
-rw-r--r--drivers/ata/ahci.c32
-rw-r--r--drivers/ata/libahci.c78
-rw-r--r--drivers/ata/sata_fsl.c2
-rw-r--r--drivers/ata/sata_rcar.c15
-rw-r--r--drivers/atm/solos-pci.c2
-rw-r--r--drivers/base/Kconfig19
-rw-r--r--drivers/base/core.c4
-rw-r--r--drivers/base/dma-contiguous.c3
-rw-r--r--drivers/base/power/domain.c42
-rw-r--r--drivers/base/power/main.c2
-rw-r--r--drivers/base/regmap/Kconfig5
-rw-r--r--drivers/base/regmap/Makefile1
-rw-r--r--drivers/base/regmap/regmap-ac97.c114
-rw-r--r--drivers/bcma/host_pci.c5
-rw-r--r--drivers/bcma/main.c2
-rw-r--r--drivers/block/null_blk.c14
-rw-r--r--drivers/block/rbd.c35
-rw-r--r--drivers/block/sunvdc.c9
-rw-r--r--drivers/block/zram/zram_drv.c13
-rw-r--r--drivers/char/hw_random/pseries-rng.c11
-rw-r--r--drivers/char/random.c8
-rw-r--r--drivers/char/raw.c2
-rw-r--r--drivers/char/virtio_console.c4
-rw-r--r--drivers/clk/at91/clk-usb.c35
-rw-r--r--drivers/clk/clk-divider.c18
-rw-r--r--drivers/clk/pxa/clk-pxa27x.c4
-rw-r--r--drivers/clk/qcom/mmcc-apq8084.c2
-rw-r--r--drivers/clk/rockchip/clk.c4
-rw-r--r--drivers/clocksource/arm_arch_timer.c6
-rw-r--r--drivers/clocksource/sun4i_timer.c12
-rw-r--r--drivers/cpufreq/cpufreq-dt.c91
-rw-r--r--drivers/cpufreq/cpufreq.c41
-rw-r--r--drivers/cpufreq/highbank-cpufreq.c2
-rw-r--r--drivers/cpufreq/intel_pstate.c110
-rw-r--r--drivers/cpuidle/Kconfig.mips2
-rw-r--r--drivers/cpuidle/cpuidle-powernv.c27
-rw-r--r--drivers/crypto/caam/key_gen.c29
-rw-r--r--drivers/crypto/qat/qat_common/adf_accel_devices.h3
-rw-r--r--drivers/crypto/qat/qat_common/adf_transport.c12
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c7
-rw-r--r--drivers/crypto/qat/qat_common/qat_crypto.c8
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_admin.c2
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_drv.c32
-rw-r--r--drivers/crypto/qat/qat_dh895xcc/adf_isr.c2
-rw-r--r--drivers/dma/edma.c40
-rw-r--r--drivers/dma/pl330.c23
-rw-r--r--drivers/dma/sun6i-dma.c61
-rw-r--r--drivers/edac/cpc925_edac.c2
-rw-r--r--drivers/edac/e7xxx_edac.c2
-rw-r--r--drivers/edac/i3200_edac.c4
-rw-r--r--drivers/edac/i82860_edac.c2
-rw-r--r--drivers/firewire/core-cdev.c3
-rw-r--r--drivers/firmware/efi/efi.c79
-rw-r--r--drivers/firmware/efi/libstub/arm-stub.c4
-rw-r--r--drivers/firmware/efi/libstub/efi-stub-helper.c62
-rw-r--r--drivers/firmware/efi/runtime-wrappers.c164
-rw-r--r--drivers/firmware/efi/vars.c61
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c21
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c3
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp_core.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_crtc.c5
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c92
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_vidi.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c6
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c10
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c10
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c16
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c19
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c19
-rw-r--r--drivers/gpu/drm/i915/intel_display.c47
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c25
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h3
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c22
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c22
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c5
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/device/nvc0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c4
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c2
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c10
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_chan.c12
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c2
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c92
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.h4
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c25
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c16
-rw-r--r--drivers/gpu/drm/radeon/atom.c11
-rw-r--r--drivers/gpu/drm/radeon/atom.h2
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c4
-rw-r--r--drivers/gpu/drm/radeon/atombios_i2c.c4
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.c18
-rw-r--r--drivers/gpu/drm/radeon/btc_dpm.h2
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/cik.c7
-rw-r--r--drivers/gpu/drm/radeon/cik_sdma.c42
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/dce3_1_afmt.c6
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c8
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c8
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c19
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/r100.c3
-rw-r--r--drivers/gpu/drm/radeon/r600_dma.c41
-rw-r--r--drivers/gpu/drm/radeon/r600_dpm.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon.h2
-rw-r--r--drivers/gpu/drm/radeon/radeon_bios.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_irq_kms.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_ring.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c4
-rw-r--r--drivers/gpu/drm/radeon/rs600.c3
-rw-r--r--drivers/gpu/drm/radeon/rs690.c3
-rw-r--r--drivers/gpu/drm/radeon/rs780_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/rv515.c3
-rw-r--r--drivers/gpu/drm/radeon/rv6xx_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/si.c3
-rw-r--r--drivers/gpu/drm/radeon/si_dpm.c27
-rw-r--r--drivers/gpu/drm/radeon/sumo_dpm.c1
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c1
-rw-r--r--drivers/gpu/drm/tegra/dc.c9
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c28
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_drv.c6
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c24
-rw-r--r--drivers/hid/hid-core.c1
-rw-r--r--drivers/hid/hid-debug.c6
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-input.c12
-rw-r--r--drivers/hid/usbhid/hid-quirks.c3
-rw-r--r--drivers/hwmon/fam15h_power.c2
-rw-r--r--drivers/hwmon/g762.c6
-rw-r--r--drivers/hwmon/ibmpowernv.c6
-rw-r--r--drivers/hwmon/menf21bmc_hwmon.c1
-rw-r--r--drivers/hwmon/pwm-fan.c13
-rw-r--r--drivers/i2c/algos/i2c-algo-bit.c5
-rw-r--r--drivers/i2c/algos/i2c-algo-pca.c5
-rw-r--r--drivers/i2c/algos/i2c-algo-pcf.c5
-rw-r--r--drivers/i2c/algos/i2c-algo-pcf.h7
-rw-r--r--drivers/i2c/busses/i2c-ali1535.c4
-rw-r--r--drivers/i2c/busses/i2c-ali15x3.c4
-rw-r--r--drivers/i2c/busses/i2c-amd756-s4882.c4
-rw-r--r--drivers/i2c/busses/i2c-amd756.c4
-rw-r--r--drivers/i2c/busses/i2c-at91.c2
-rw-r--r--drivers/i2c/busses/i2c-au1550.c4
-rw-r--r--drivers/i2c/busses/i2c-cadence.c11
-rw-r--r--drivers/i2c/busses/i2c-cpm.c4
-rw-r--r--drivers/i2c/busses/i2c-davinci.c12
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c6
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h4
-rw-r--r--drivers/i2c/busses/i2c-designware-pcidrv.c4
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c4
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c4
-rw-r--r--drivers/i2c/busses/i2c-elektor.c6
-rw-r--r--drivers/i2c/busses/i2c-hydra.c4
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-imx.c5
-rw-r--r--drivers/i2c/busses/i2c-iop3xx.h6
-rw-r--r--drivers/i2c/busses/i2c-isch.c4
-rw-r--r--drivers/i2c/busses/i2c-ismt.c4
-rw-r--r--drivers/i2c/busses/i2c-nforce2-s4985.c4
-rw-r--r--drivers/i2c/busses/i2c-nforce2.c4
-rw-r--r--drivers/i2c/busses/i2c-omap.c14
-rw-r--r--drivers/i2c/busses/i2c-parport-light.c4
-rw-r--r--drivers/i2c/busses/i2c-parport.c4
-rw-r--r--drivers/i2c/busses/i2c-parport.h4
-rw-r--r--drivers/i2c/busses/i2c-pasemi.c4
-rw-r--r--drivers/i2c/busses/i2c-pca-isa.c4
-rw-r--r--drivers/i2c/busses/i2c-piix4.c4
-rw-r--r--drivers/i2c/busses/i2c-pmcmsp.c4
-rw-r--r--drivers/i2c/busses/i2c-powermac.c4
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c4
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c4
-rw-r--r--drivers/i2c/busses/i2c-sibyte.c4
-rw-r--r--drivers/i2c/busses/i2c-simtec.c4
-rw-r--r--drivers/i2c/busses/i2c-sis5595.c4
-rw-r--r--drivers/i2c/busses/i2c-sis630.c4
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c4
-rw-r--r--drivers/i2c/busses/i2c-taos-evm.c4
-rw-r--r--drivers/i2c/busses/i2c-via.c4
-rw-r--r--drivers/i2c/busses/i2c-viapro.c4
-rw-r--r--drivers/i2c/busses/i2c-xiic.c4
-rw-r--r--drivers/i2c/busses/scx200_acb.c4
-rw-r--r--drivers/i2c/i2c-boardinfo.c5
-rw-r--r--drivers/i2c/i2c-core.c10
-rw-r--r--drivers/i2c/i2c-core.h5
-rw-r--r--drivers/i2c/i2c-dev.c5
-rw-r--r--drivers/i2c/i2c-smbus.c5
-rw-r--r--drivers/i2c/i2c-stub.c4
-rw-r--r--drivers/iio/accel/bmc150-accel.c40
-rw-r--r--drivers/iio/accel/kxcjk-1013.c4
-rw-r--r--drivers/iio/adc/men_z188_adc.c1
-rw-r--r--drivers/iio/common/st_sensors/st_sensors_buffer.c2
-rw-r--r--drivers/iio/gyro/bmg160.c53
-rw-r--r--drivers/iio/light/tsl4531.c7
-rw-r--r--drivers/iio/proximity/as3935.c2
-rw-r--r--drivers/infiniband/hw/mlx4/main.c10
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c50
-rw-r--r--drivers/infiniband/ulp/srpt/ib_srpt.c8
-rw-r--r--drivers/input/evdev.c2
-rw-r--r--drivers/input/joystick/xpad.c16
-rw-r--r--drivers/input/keyboard/opencores-kbd.c2
-rw-r--r--drivers/input/keyboard/stmpe-keypad.c2
-rw-r--r--drivers/input/misc/ims-pcu.c2
-rw-r--r--drivers/input/misc/max77693-haptic.c5
-rw-r--r--drivers/input/misc/soc_button_array.c2
-rw-r--r--drivers/input/misc/twl4030-pwrbutton.c1
-rw-r--r--drivers/input/mouse/alps.c28
-rw-r--r--drivers/input/mouse/elantech.c64
-rw-r--r--drivers/input/mouse/psmouse-base.c7
-rw-r--r--drivers/input/mouse/synaptics.c9
-rw-r--r--drivers/input/mouse/vsxxxaa.c2
-rw-r--r--drivers/input/serio/altera_ps2.c4
-rw-r--r--drivers/input/serio/i8042-x86ia64io.h297
-rw-r--r--drivers/input/serio/i8042.c2
-rw-r--r--drivers/input/touchscreen/wm97xx-core.c4
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c23
-rw-r--r--drivers/irqchip/irq-atmel-aic-common.c6
-rw-r--r--drivers/irqchip/irq-bcm7120-l2.c4
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c4
-rw-r--r--drivers/leds/led-class.c23
-rw-r--r--drivers/leds/led-core.c19
-rw-r--r--drivers/leds/leds-gpio-register.c5
-rw-r--r--drivers/leds/leds-gpio.c14
-rw-r--r--drivers/leds/leds-lp3944.c3
-rw-r--r--drivers/leds/trigger/ledtrig-gpio.c2
-rw-r--r--drivers/mailbox/Makefile4
-rw-r--r--drivers/mailbox/mailbox.c465
-rw-r--r--drivers/mailbox/pl320-ipc.c2
-rw-r--r--drivers/md/dm-bufio.c12
-rw-r--r--drivers/md/dm-raid.c17
-rw-r--r--drivers/md/dm-stripe.c4
-rw-r--r--drivers/md/dm-thin.c16
-rw-r--r--drivers/md/md.c4
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h6
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c2
-rw-r--r--drivers/md/persistent-data/dm-btree.c24
-rw-r--r--drivers/media/common/saa7146/saa7146_core.c6
-rw-r--r--drivers/media/dvb-core/dvb_frontend.c6
-rw-r--r--drivers/media/dvb-frontends/ds3000.c7
-rw-r--r--drivers/media/dvb-frontends/sp2.c4
-rw-r--r--drivers/media/dvb-frontends/tc90522.c18
-rw-r--r--drivers/media/i2c/smiapp/smiapp-core.c2
-rw-r--r--drivers/media/pci/cx23885/cx23885-core.c6
-rw-r--r--drivers/media/pci/cx23885/cx23885-dvb.c3
-rw-r--r--drivers/media/pci/solo6x10/solo6x10-core.c10
-rw-r--r--drivers/media/pci/tw68/Kconfig1
-rw-r--r--drivers/media/pci/tw68/tw68-core.c2
-rw-r--r--drivers/media/platform/Kconfig6
-rw-r--r--drivers/media/platform/exynos4-is/Kconfig2
-rw-r--r--drivers/media/platform/exynos4-is/fimc-core.c2
-rw-r--r--drivers/media/platform/s5p-jpeg/jpeg-core.c6
-rw-r--r--drivers/media/platform/s5p-tv/Kconfig2
-rw-r--r--drivers/media/platform/vivid/Kconfig5
-rw-r--r--drivers/media/platform/vivid/vivid-core.c11
-rw-r--r--drivers/media/platform/vivid/vivid-tpg.c2
-rw-r--r--drivers/media/radio/wl128x/fmdrv_common.c2
-rw-r--r--drivers/media/rc/imon.c3
-rw-r--r--drivers/media/rc/ir-hix5hd2.c2
-rw-r--r--drivers/media/rc/ir-rc5-decoder.c2
-rw-r--r--drivers/media/rc/ir-rc6-decoder.c2
-rw-r--r--drivers/media/rc/rc-ir-raw.c1
-rw-r--r--drivers/media/rc/rc-main.c2
-rw-r--r--drivers/media/tuners/xc5000.c2
-rw-r--r--drivers/media/usb/dvb-usb-v2/af9035.c6
-rw-r--r--drivers/media/usb/dvb-usb-v2/anysee.c6
-rw-r--r--drivers/media/usb/em28xx/em28xx-core.c2
-rw-r--r--drivers/media/usb/em28xx/em28xx-input.c4
-rw-r--r--drivers/media/usb/hackrf/hackrf.c2
-rw-r--r--drivers/media/usb/s2255/s2255drv.c2
-rw-r--r--drivers/media/usb/usbvision/usbvision-video.c2
-rw-r--r--drivers/media/usb/uvc/uvc_v4l2.c1
-rw-r--r--drivers/media/usb/uvc/uvc_video.c2
-rw-r--r--drivers/media/usb/uvc/uvcvideo.h1
-rw-r--r--drivers/media/v4l2-core/videobuf-dma-contig.c9
-rw-r--r--drivers/mfd/max77693.c14
-rw-r--r--drivers/mfd/rtsx_pcr.c2
-rw-r--r--drivers/mfd/stmpe.h2
-rw-r--r--drivers/mfd/twl4030-power.c52
-rw-r--r--drivers/mfd/viperboard.c5
-rw-r--r--drivers/misc/cxl/fault.c74
-rw-r--r--drivers/misc/cxl/native.c4
-rw-r--r--drivers/mmc/core/host.c21
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c2
-rw-r--r--drivers/mtd/devices/m25p80.c64
-rw-r--r--drivers/mtd/nand/omap_elm.c2
-rw-r--r--drivers/mtd/spi-nor/fsl-quadspi.c7
-rw-r--r--drivers/mtd/spi-nor/spi-nor.c16
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/bonding/bond_main.c3
-rw-r--r--drivers/net/bonding/bond_netlink.c7
-rw-r--r--drivers/net/can/dev.c4
-rw-r--r--drivers/net/can/m_can/Kconfig1
-rw-r--r--drivers/net/can/m_can/m_can.c219
-rw-r--r--drivers/net/can/rcar_can.c1
-rw-r--r--drivers/net/can/sja1000/kvaser_pci.c5
-rw-r--r--drivers/net/can/usb/ems_usb.c3
-rw-r--r--drivers/net/can/usb/esd_usb2.c3
-rw-r--r--drivers/net/can/usb/gs_usb.c1
-rw-r--r--drivers/net/can/xilinx_can.c4
-rw-r--r--drivers/net/dsa/bcm_sf2.c58
-rw-r--r--drivers/net/dsa/mv88e6171.c2
-rw-r--r--drivers/net/ethernet/amd/xgbe/xgbe-drv.c22
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.c18
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_hw.h4
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.c11
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_main.h5
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c23
-rw-r--r--drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c7
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c24
-rw-r--r--drivers/net/ethernet/broadcom/cnic.c5
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c11
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h3
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmmii.c9
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c3
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c86
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c15
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/sge.c30
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c51
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_regs.h10
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/adapter.h8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/sge.c136
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h2
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c28
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_clsf.c12
-rw-r--r--drivers/net/ethernet/cisco/enic/enic_main.c24
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c11
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c42
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-scc.c3
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c4
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c29
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c4
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c19
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c4
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c18
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c27
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c35
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mcg.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/resource_tracker.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/eq.c8
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c4
-rw-r--r--drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c3
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c6
-rw-r--r--drivers/net/ethernet/qualcomm/Kconfig3
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c96
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.h5
-rw-r--r--drivers/net/ethernet/sfc/ef10.c3
-rw-r--r--drivers/net/ethernet/sfc/tx.c4
-rw-r--r--drivers/net/ethernet/smsc/smc91x.c78
-rw-r--r--drivers/net/ethernet/smsc/smc91x.h3
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c61
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c52
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c27
-rw-r--r--drivers/net/ethernet/sun/sunhme.c62
-rw-r--r--drivers/net/ethernet/ti/cpsw.c54
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.c30
-rw-r--r--drivers/net/ethernet/ti/cpsw_ale.h2
-rw-r--r--drivers/net/ethernet/ti/cpts.c2
-rw-r--r--drivers/net/hyperv/netvsc_drv.c1
-rw-r--r--drivers/net/ieee802154/fakehard.c13
-rw-r--r--drivers/net/macvlan.c10
-rw-r--r--drivers/net/macvtap.c18
-rw-r--r--drivers/net/phy/dp83640.c4
-rw-r--r--drivers/net/phy/marvell.c19
-rw-r--r--drivers/net/phy/phy.c36
-rw-r--r--drivers/net/ppp/ppp_generic.c40
-rw-r--r--drivers/net/ppp/pptp.c4
-rw-r--r--drivers/net/tun.c53
-rw-r--r--drivers/net/usb/asix_devices.c14
-rw-r--r--drivers/net/usb/ax88179_178a.c7
-rw-r--r--drivers/net/usb/cdc_ether.c47
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c17
-rw-r--r--drivers/net/usb/usbnet.c20
-rw-r--r--drivers/net/virtio_net.c61
-rw-r--r--drivers/net/vxlan.c41
-rw-r--r--drivers/net/wireless/ath/ath.h1
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_phy.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/common.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c13
-rw-r--r--drivers/net/wireless/ath/ath9k/init.c55
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c12
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c10
-rw-r--r--drivers/net/wireless/ath/regd.c14
-rw-r--r--drivers/net/wireless/b43/phy_common.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c25
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/of.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/pcie.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c6
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c6
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/mac80211.c24
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-8000.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans.h2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/coex_legacy.c4
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-power.h35
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw.c19
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c45
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mvm.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/ops.c13
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c23
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c8
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c29
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c4
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.c52
-rw-r--r--drivers/net/wireless/mwifiex/11n_rxreorder.h2
-rw-r--r--drivers/net/wireless/mwifiex/main.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c50
-rw-r--r--drivers/net/wireless/rtlwifi/base.c2
-rw-r--r--drivers/net/wireless/rtlwifi/core.c6
-rw-r--r--drivers/net/wireless/rtlwifi/core.h1
-rw-r--r--drivers/net/wireless/rtlwifi/pci.c42
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h4
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/def.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/hw.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ce/trx.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.c17
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/hw.h1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192cu/sw.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192de/sw.c1
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192ee/hw.c8
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/def.h2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/hw.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/phy.c2
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/sw.c38
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8192se/trx.c3
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/hw.c5
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8821ae/phy.c15
-rw-r--r--drivers/net/wireless/rtlwifi/usb.c11
-rw-r--r--drivers/net/xen-netback/common.h39
-rw-r--r--drivers/net/xen-netback/interface.c74
-rw-r--r--drivers/net/xen-netback/netback.c319
-rw-r--r--drivers/net/xen-netback/xenbus.c37
-rw-r--r--drivers/net/xen-netfront.c5
-rw-r--r--drivers/of/address.c19
-rw-r--r--drivers/of/base.c88
-rw-r--r--drivers/of/dynamic.c2
-rw-r--r--drivers/of/fdt.c4
-rw-r--r--drivers/of/of_reserved_mem.c14
-rw-r--r--drivers/of/selftest.c77
-rw-r--r--drivers/of/testcase-data/tests-phandle.dtsi2
-rw-r--r--drivers/pci/access.c2
-rw-r--r--drivers/pci/host/pci-imx6.c13
-rw-r--r--drivers/pci/host/pci-tegra.c28
-rw-r--r--drivers/pci/host/pci-xgene.c7
-rw-r--r--drivers/pci/hotplug/pciehp_core.c7
-rw-r--r--drivers/pci/msi.c26
-rw-r--r--drivers/pci/pci-sysfs.c8
-rw-r--r--drivers/pci/pci.h2
-rw-r--r--drivers/pci/pcie/pme.c6
-rw-r--r--drivers/pci/probe.c30
-rw-r--r--drivers/phy/phy-omap-usb2.c6
-rw-r--r--drivers/pinctrl/pinctrl-baytrail.c8
-rw-r--r--drivers/platform/x86/Kconfig1
-rw-r--r--drivers/platform/x86/acer-wmi.c11
-rw-r--r--drivers/platform/x86/asus-nb-wmi.c9
-rw-r--r--drivers/platform/x86/hp_accel.c44
-rw-r--r--drivers/platform/x86/ideapad-laptop.c7
-rw-r--r--drivers/platform/x86/samsung-laptop.c10
-rw-r--r--drivers/platform/x86/toshiba_acpi.c6
-rw-r--r--drivers/power/ab8500_fg.c17
-rw-r--r--drivers/power/bq2415x_charger.c23
-rw-r--r--drivers/power/charger-manager.c164
-rw-r--r--drivers/power/power_supply_core.c3
-rw-r--r--drivers/power/reset/at91-reset.c4
-rw-r--r--drivers/pwm/Kconfig22
-rw-r--r--drivers/pwm/Makefile2
-rw-r--r--drivers/pwm/core.c31
-rw-r--r--drivers/pwm/pwm-atmel.c24
-rw-r--r--drivers/pwm/pwm-fsl-ftm.c90
-rw-r--r--drivers/pwm/pwm-imx.c71
-rw-r--r--drivers/pwm/pwm-lpss-pci.c64
-rw-r--r--drivers/pwm/pwm-lpss-platform.c68
-rw-r--r--drivers/pwm/pwm-lpss.c137
-rw-r--r--drivers/pwm/pwm-lpss.h32
-rw-r--r--drivers/pwm/pwm-rockchip.c57
-rw-r--r--drivers/regulator/max1586.c2
-rw-r--r--drivers/regulator/max77686.c2
-rw-r--r--drivers/regulator/max77693.c2
-rw-r--r--drivers/regulator/max77802.c2
-rw-r--r--drivers/regulator/max8660.c2
-rw-r--r--drivers/regulator/of_regulator.c3
-rw-r--r--drivers/regulator/rk808-regulator.c2
-rw-r--r--drivers/regulator/s2mpa01.c2
-rw-r--r--drivers/rtc/Kconfig4
-rw-r--r--drivers/rtc/rtc-bq32k.c2
-rw-r--r--drivers/rtc/rtc-efi.c1
-rw-r--r--drivers/rtc/rtc-pm8xxx.c222
-rw-r--r--drivers/rtc/rtc-s3c.c14
-rw-r--r--drivers/s390/char/Kconfig2
-rw-r--r--drivers/s390/kvm/virtio_ccw.c1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c2
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c7
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c19
-rw-r--r--drivers/scsi/cxgbi/cxgb4i/cxgb4i.c17
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.c62
-rw-r--r--drivers/scsi/cxgbi/libcxgbi.h5
-rw-r--r--drivers/scsi/device_handler/scsi_dh_alua.c7
-rw-r--r--drivers/scsi/megaraid/megaraid_sas_base.c2
-rw-r--r--drivers/scsi/osd/Kbuild2
-rw-r--r--drivers/scsi/osd/Kconfig2
-rw-r--r--drivers/scsi/osd/osd_debug.h2
-rw-r--r--drivers/scsi/osd/osd_initiator.c4
-rw-r--r--drivers/scsi/osd/osd_uld.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c35
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h12
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c11
-rw-r--r--drivers/scsi/scsi_devinfo.c1
-rw-r--r--drivers/scsi/scsi_error.c20
-rw-r--r--drivers/scsi/scsi_lib.c5
-rw-r--r--drivers/scsi/ufs/ufshcd-pltfrm.c15
-rw-r--r--drivers/scsi/ufs/ufshcd.c104
-rw-r--r--drivers/scsi/ufs/ufshcd.h2
-rw-r--r--drivers/soc/versatile/soc-realview.c1
-rw-r--r--drivers/spi/spi-dw.c7
-rw-r--r--drivers/spi/spi-fsl-dspi.c4
-rw-r--r--drivers/spi/spi-orion.c2
-rw-r--r--drivers/spi/spi-pl022.c2
-rw-r--r--drivers/spi/spi-pxa2xx.c7
-rw-r--r--drivers/spi/spi-rockchip.c50
-rw-r--r--drivers/spi/spi-sirf.c4
-rw-r--r--drivers/spi/spi.c6
-rw-r--r--drivers/spi/spidev.c79
-rw-r--r--drivers/staging/android/logger.c13
-rw-r--r--drivers/staging/comedi/Kconfig2
-rw-r--r--drivers/staging/comedi/comedi_fops.c26
-rw-r--r--drivers/staging/iio/adc/mxs-lradc.c12
-rw-r--r--drivers/staging/iio/impedance-analyzer/ad5933.c15
-rw-r--r--drivers/staging/iio/meter/ade7758.h1
-rw-r--r--drivers/staging/iio/meter/ade7758_core.c57
-rw-r--r--drivers/staging/iio/meter/ade7758_ring.c5
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_cmd.c22
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_mlme_ext.c12
-rw-r--r--drivers/staging/rtl8188eu/core/rtw_wlan_util.c2
-rw-r--r--drivers/staging/rtl8188eu/os_dep/usb_intf.c1
-rw-r--r--drivers/staging/rtl8723au/include/rtw_eeprom.h2
-rw-r--r--drivers/target/Kconfig7
-rw-r--r--drivers/target/Makefile1
-rw-r--r--drivers/target/iscsi/iscsi_target.c8
-rw-r--r--drivers/target/iscsi/iscsi_target_configfs.c10
-rw-r--r--drivers/target/iscsi/iscsi_target_erl0.c6
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c8
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c5
-rw-r--r--drivers/target/loopback/tcm_loop.c29
-rw-r--r--drivers/target/target_core_alua.c33
-rw-r--r--drivers/target/target_core_configfs.c26
-rw-r--r--drivers/target/target_core_device.c48
-rw-r--r--drivers/target/target_core_fabric_configfs.c13
-rw-r--r--drivers/target/target_core_fabric_lib.c6
-rw-r--r--drivers/target/target_core_file.c13
-rw-r--r--drivers/target/target_core_internal.h6
-rw-r--r--drivers/target/target_core_pr.c116
-rw-r--r--drivers/target/target_core_pr.h2
-rw-r--r--drivers/target/target_core_pscsi.c16
-rw-r--r--drivers/target/target_core_sbc.c2
-rw-r--r--drivers/target/target_core_tmr.c24
-rw-r--r--drivers/target/target_core_tpg.c53
-rw-r--r--drivers/target/target_core_transport.c29
-rw-r--r--drivers/target/target_core_ua.c15
-rw-r--r--drivers/target/target_core_ua.h1
-rw-r--r--drivers/target/target_core_user.c1167
-rw-r--r--drivers/target/tcm_fc/tfc_sess.c2
-rw-r--r--drivers/thermal/Kconfig49
-rw-r--r--drivers/thermal/Makefile3
-rw-r--r--drivers/thermal/cpu_cooling.c37
-rw-r--r--drivers/thermal/fair_share.c12
-rw-r--r--drivers/thermal/gov_bang_bang.c131
-rw-r--r--drivers/thermal/imx_thermal.c136
-rw-r--r--drivers/thermal/int3403_thermal.c296
-rw-r--r--drivers/thermal/int340x_thermal/Makefile4
-rw-r--r--drivers/thermal/int340x_thermal/acpi_thermal_rel.c400
-rw-r--r--drivers/thermal/int340x_thermal/acpi_thermal_rel.h84
-rw-r--r--drivers/thermal/int340x_thermal/int3400_thermal.c271
-rw-r--r--drivers/thermal/int340x_thermal/int3402_thermal.c242
-rw-r--r--drivers/thermal/int340x_thermal/int3403_thermal.c483
-rw-r--r--drivers/thermal/of-thermal.c52
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.c9
-rw-r--r--drivers/thermal/samsung/exynos_thermal_common.h2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c170
-rw-r--r--drivers/thermal/samsung/exynos_tmu.h89
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.c105
-rw-r--r--drivers/thermal/samsung/exynos_tmu_data.h55
-rw-r--r--drivers/thermal/st/st_thermal.c3
-rw-r--r--drivers/thermal/step_wise.c7
-rw-r--r--drivers/thermal/thermal_core.c15
-rw-r--r--drivers/thermal/thermal_core.h8
-rw-r--r--drivers/tty/n_tty.c9
-rw-r--r--drivers/tty/serial/8250/8250_mtk.c2
-rw-r--r--drivers/tty/serial/of_serial.c29
-rw-r--r--drivers/tty/serial/serial_core.c2
-rw-r--r--drivers/tty/tty_io.c15
-rw-r--r--drivers/tty/vt/consolemap.c7
-rw-r--r--drivers/uio/uio.c12
-rw-r--r--drivers/usb/chipidea/core.c1
-rw-r--r--drivers/usb/class/cdc-acm.c25
-rw-r--r--drivers/usb/class/cdc-acm.h2
-rw-r--r--drivers/usb/core/hcd.c2
-rw-r--r--drivers/usb/core/hub.c10
-rw-r--r--drivers/usb/core/quirks.c9
-rw-r--r--drivers/usb/dwc2/core.h2
-rw-r--r--drivers/usb/dwc2/gadget.c16
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c15
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c2
-rw-r--r--drivers/usb/dwc3/ep0.c48
-rw-r--r--drivers/usb/dwc3/gadget.c39
-rw-r--r--drivers/usb/dwc3/gadget.h3
-rw-r--r--drivers/usb/dwc3/trace.h53
-rw-r--r--drivers/usb/gadget/composite.c2
-rw-r--r--drivers/usb/gadget/function/f_acm.c8
-rw-r--r--drivers/usb/gadget/function/f_eem.c1
-rw-r--r--drivers/usb/gadget/function/f_fs.c42
-rw-r--r--drivers/usb/gadget/function/f_hid.c5
-rw-r--r--drivers/usb/gadget/function/f_loopback.c87
-rw-r--r--drivers/usb/gadget/function/f_ncm.c1
-rw-r--r--drivers/usb/gadget/function/f_obex.c9
-rw-r--r--drivers/usb/gadget/function/f_phonet.c2
-rw-r--r--drivers/usb/gadget/function/f_rndis.c9
-rw-r--r--drivers/usb/gadget/function/f_subset.c1
-rw-r--r--drivers/usb/gadget/function/f_uac2.c23
-rw-r--r--drivers/usb/gadget/function/f_uvc.c54
-rw-r--r--drivers/usb/gadget/function/uvc_video.c3
-rw-r--r--drivers/usb/gadget/udc/Kconfig1
-rw-r--r--drivers/usb/gadget/udc/udc-core.c5
-rw-r--r--drivers/usb/host/Kconfig4
-rw-r--r--drivers/usb/host/hwa-hc.c2
-rw-r--r--drivers/usb/host/xhci-hub.c5
-rw-r--r--drivers/usb/host/xhci-pci.c20
-rw-r--r--drivers/usb/host/xhci-plat.c10
-rw-r--r--drivers/usb/host/xhci-ring.c43
-rw-r--r--drivers/usb/host/xhci.c107
-rw-r--r--drivers/usb/host/xhci.h2
-rw-r--r--drivers/usb/musb/musb_cppi41.c3
-rw-r--r--drivers/usb/musb/musb_dsps.c18
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/ftdi_sio.c36
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h51
-rw-r--r--drivers/usb/serial/keyspan.c97
-rw-r--r--drivers/usb/serial/kobil_sct.c20
-rw-r--r--drivers/usb/serial/opticon.c2
-rw-r--r--drivers/usb/serial/option.c10
-rw-r--r--drivers/usb/serial/ssu100.c11
-rw-r--r--drivers/usb/storage/initializers.c4
-rw-r--r--drivers/usb/storage/realtek_cr.c2
-rw-r--r--drivers/usb/storage/transport.c26
-rw-r--r--drivers/usb/storage/unusual_uas.h35
-rw-r--r--drivers/vhost/scsi.c24
-rw-r--r--drivers/video/console/fbcon.c19
-rw-r--r--drivers/video/console/vgacon.c24
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c3
-rw-r--r--drivers/video/fbdev/omap2/displays-new/connector-dvi.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/connector-hdmi.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-dpi.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c1
-rw-r--r--drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c1
-rw-r--r--drivers/video/fbdev/omap2/dss/apply.c2
-rw-r--r--drivers/video/fbdev/omap2/dss/dispc.c8
-rw-r--r--drivers/video/fbdev/omap2/dss/dispc.h3
-rw-r--r--drivers/video/fbdev/omap2/dss/dpi.c1
-rw-r--r--drivers/video/fbdev/omap2/dss/dsi.c3
-rw-r--r--drivers/video/fbdev/omap2/dss/dss.c1
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi4.c1
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi5.c1
-rw-r--r--drivers/video/fbdev/omap2/dss/hdmi_pll.c13
-rw-r--r--drivers/video/fbdev/omap2/dss/rfbi.c1
-rw-r--r--drivers/video/fbdev/omap2/dss/sdi.c1
-rw-r--r--drivers/video/fbdev/omap2/dss/venc.c1
-rw-r--r--drivers/video/fbdev/omap2/omapfb/omapfb-main.c18
-rw-r--r--drivers/watchdog/Kconfig54
-rw-r--r--drivers/watchdog/Makefile5
-rw-r--r--drivers/watchdog/booke_wdt.c28
-rw-r--r--drivers/watchdog/cadence_wdt.c516
-rw-r--r--drivers/watchdog/da9063_wdt.c191
-rw-r--r--drivers/watchdog/dw_wdt.c36
-rw-r--r--drivers/watchdog/imx2_wdt.c43
-rw-r--r--drivers/watchdog/meson_wdt.c236
-rw-r--r--drivers/watchdog/of_xilinx_wdt.c1
-rw-r--r--drivers/watchdog/qcom-wdt.c224
-rw-r--r--drivers/watchdog/rn5t618_wdt.c198
-rw-r--r--drivers/watchdog/s3c2410_wdt.c47
-rw-r--r--drivers/watchdog/stmp3xxx_rtc_wdt.c24
-rw-r--r--drivers/watchdog/sunxi_wdt.c111
-rw-r--r--drivers/watchdog/ts72xx_wdt.c6
-rw-r--r--drivers/xen/balloon.c3
-rw-r--r--drivers/xen/pci.c27
-rw-r--r--fs/Kconfig1
-rw-r--r--fs/Makefile1
-rw-r--r--fs/aio.c21
-rw-r--r--fs/block_dev.c3
-rw-r--r--fs/btrfs/compression.c33
-rw-r--r--fs/btrfs/compression.h4
-rw-r--r--fs/btrfs/ctree.c14
-rw-r--r--fs/btrfs/ctree.h2
-rw-r--r--fs/btrfs/disk-io.c43
-rw-r--r--fs/btrfs/extent-tree.c18
-rw-r--r--fs/btrfs/file-item.c2
-rw-r--r--fs/btrfs/ioctl.c20
-rw-r--r--fs/btrfs/locking.c24
-rw-r--r--fs/btrfs/locking.h2
-rw-r--r--fs/btrfs/lzo.c15
-rw-r--r--fs/btrfs/super.c1
-rw-r--r--fs/btrfs/tree-log.c2
-rw-r--r--fs/btrfs/zlib.c20
-rw-r--r--fs/buffer.c86
-rw-r--r--fs/ceph/caps.c2
-rw-r--r--fs/dcache.c3
-rw-r--r--fs/ecryptfs/main.c7
-rw-r--r--fs/exofs/Kbuild2
-rw-r--r--fs/exofs/common.h2
-rw-r--r--fs/exofs/dir.c2
-rw-r--r--fs/exofs/exofs.h2
-rw-r--r--fs/exofs/file.c2
-rw-r--r--fs/exofs/inode.c2
-rw-r--r--fs/exofs/namei.c2
-rw-r--r--fs/exofs/ore.c4
-rw-r--r--fs/exofs/ore_raid.c2
-rw-r--r--fs/exofs/ore_raid.h2
-rw-r--r--fs/exofs/super.c2
-rw-r--r--fs/exofs/symlink.c2
-rw-r--r--fs/exofs/sys.c2
-rw-r--r--fs/ext3/super.c7
-rw-r--r--fs/ext4/balloc.c15
-rw-r--r--fs/ext4/bitmap.c12
-rw-r--r--fs/ext4/dir.c8
-rw-r--r--fs/ext4/ext4.h50
-rw-r--r--fs/ext4/ext4_extents.h1
-rw-r--r--fs/ext4/ext4_jbd2.c4
-rw-r--r--fs/ext4/ext4_jbd2.h6
-rw-r--r--fs/ext4/extents.c626
-rw-r--r--fs/ext4/extents_status.c200
-rw-r--r--fs/ext4/extents_status.h13
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/ialloc.c7
-rw-r--r--fs/ext4/indirect.c86
-rw-r--r--fs/ext4/inline.c7
-rw-r--r--fs/ext4/inode.c140
-rw-r--r--fs/ext4/ioctl.c13
-rw-r--r--fs/ext4/mballoc.c15
-rw-r--r--fs/ext4/migrate.c11
-rw-r--r--fs/ext4/mmp.c6
-rw-r--r--fs/ext4/move_extent.c1068
-rw-r--r--fs/ext4/namei.c361
-rw-r--r--fs/ext4/resize.c5
-rw-r--r--fs/ext4/super.c262
-rw-r--r--fs/ext4/xattr.c44
-rw-r--r--fs/fat/namei_vfat.c20
-rw-r--r--fs/internal.h7
-rw-r--r--fs/isofs/inode.c64
-rw-r--r--fs/isofs/namei.c22
-rw-r--r--fs/jbd/journal.c2
-rw-r--r--fs/jbd/revoke.c7
-rw-r--r--fs/jbd2/checkpoint.c334
-rw-r--r--fs/jbd2/journal.c23
-rw-r--r--fs/jbd2/recovery.c1
-rw-r--r--fs/jbd2/revoke.c10
-rw-r--r--fs/namei.c46
-rw-r--r--fs/namespace.c27
-rw-r--r--fs/nfs/blocklayout/blocklayout.c2
-rw-r--r--fs/nfs/blocklayout/rpc_pipefs.c14
-rw-r--r--fs/nfs/delegation.c25
-rw-r--r--fs/nfs/delegation.h1
-rw-r--r--fs/nfs/dir.c1
-rw-r--r--fs/nfs/direct.c1
-rw-r--r--fs/nfs/filelayout/filelayout.c3
-rw-r--r--fs/nfs/inode.c2
-rw-r--r--fs/nfs/netns.h1
-rw-r--r--fs/nfs/nfs4proc.c95
-rw-r--r--fs/nfs/objlayout/objio_osd.c2
-rw-r--r--fs/nfs/objlayout/objlayout.c2
-rw-r--r--fs/nfs/objlayout/objlayout.h2
-rw-r--r--fs/nfs/objlayout/pnfs_osd_xdr_cli.c2
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/nfsd/nfs4callback.c8
-rw-r--r--fs/nfsd/nfs4proc.c7
-rw-r--r--fs/nfsd/nfsd.h9
-rw-r--r--fs/notify/fsnotify.c36
-rw-r--r--fs/notify/fsnotify.h4
-rw-r--r--fs/notify/inode_mark.c25
-rw-r--r--fs/notify/mark.c36
-rw-r--r--fs/notify/vfsmount_mark.c8
-rw-r--r--fs/ocfs2/cluster/tcp.c2
-rw-r--r--fs/ocfs2/namei.c2
-rw-r--r--fs/open.c23
-rw-r--r--fs/overlayfs/Kconfig10
-rw-r--r--fs/overlayfs/Makefile7
-rw-r--r--fs/overlayfs/copy_up.c414
-rw-r--r--fs/overlayfs/dir.c928
-rw-r--r--fs/overlayfs/inode.c434
-rw-r--r--fs/overlayfs/overlayfs.h191
-rw-r--r--fs/overlayfs/readdir.c586
-rw-r--r--fs/overlayfs/super.c833
-rw-r--r--fs/quota/dquot.c2
-rw-r--r--fs/splice.c1
-rw-r--r--fs/xfs/xfs_bmap_util.c72
-rw-r--r--fs/xfs/xfs_itable.c250
-rw-r--r--fs/xfs/xfs_itable.h16
-rw-r--r--include/acpi/acnames.h1
-rw-r--r--include/acpi/acpi_bus.h1
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actypes.h4
-rw-r--r--include/drm/drm_pciids.h1
-rw-r--r--include/dt-bindings/clock/imx6qdl-clock.h8
-rw-r--r--include/dt-bindings/clock/qcom,mmcc-apq8084.h2
-rw-r--r--include/dt-bindings/clock/vf610-clock.h39
-rw-r--r--include/dt-bindings/pinctrl/dra.h4
-rw-r--r--include/linux/acpi.h1
-rw-r--r--include/linux/audit.h2
-rw-r--r--include/linux/bitops.h7
-rw-r--r--include/linux/blkdev.h7
-rw-r--r--include/linux/bootmem.h1
-rw-r--r--include/linux/buffer_head.h47
-rw-r--r--include/linux/can/dev.h6
-rw-r--r--include/linux/clk-provider.h1
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--include/linux/cma.h8
-rw-r--r--include/linux/compiler-gcc4.h1
-rw-r--r--include/linux/compiler-gcc5.h1
-rw-r--r--include/linux/cpufreq-dt.h22
-rw-r--r--include/linux/cpufreq.h2
-rw-r--r--include/linux/crash_dump.h15
-rw-r--r--include/linux/efi.h17
-rw-r--r--include/linux/fs.h49
-rw-r--r--include/linux/iio/events.h2
-rw-r--r--include/linux/inetdevice.h2
-rw-r--r--include/linux/jbd2.h2
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/kernel_stat.h5
-rw-r--r--include/linux/kgdb.h2
-rw-r--r--include/linux/khugepaged.h17
-rw-r--r--include/linux/kvm_host.h3
-rw-r--r--include/linux/leds.h16
-rw-r--r--include/linux/mailbox_client.h46
-rw-r--r--include/linux/mailbox_controller.h133
-rw-r--r--include/linux/memcontrol.h58
-rw-r--r--include/linux/memory.h2
-rw-r--r--include/linux/mfd/arizona/core.h1
-rw-r--r--include/linux/mfd/davinci_voicecodec.h7
-rw-r--r--include/linux/mfd/max77693-private.h7
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mmzone.h9
-rw-r--r--include/linux/mount.h3
-rw-r--r--include/linux/mtd/spi-nor.h21
-rw-r--r--include/linux/nfs_xdr.h11
-rw-r--r--include/linux/of.h84
-rw-r--r--include/linux/of_reserved_mem.h9
-rw-r--r--include/linux/oom.h3
-rw-r--r--include/linux/page-isolation.h8
-rw-r--r--include/linux/pci-acpi.h7
-rw-r--r--include/linux/pci.h1
-rw-r--r--include/linux/percpu-refcount.h8
-rw-r--r--include/linux/pl320-ipc.h (renamed from include/linux/mailbox.h)0
-rw-r--r--include/linux/pm_domain.h8
-rw-r--r--include/linux/pm_qos.h5
-rw-r--r--include/linux/pnfs_osd_xdr.h2
-rw-r--r--include/linux/power/charger-manager.h3
-rw-r--r--include/linux/power_supply.h6
-rw-r--r--include/linux/rcupdate.h15
-rw-r--r--include/linux/regmap.h7
-rw-r--r--include/linux/regulator/consumer.h2
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--include/linux/skbuff.h12
-rw-r--r--include/linux/socket.h2
-rw-r--r--include/linux/string.h5
-rw-r--r--include/linux/thermal.h4
-rw-r--r--include/linux/uio_driver.h12
-rw-r--r--include/linux/uprobes.h14
-rw-r--r--include/linux/usb/usbnet.h4
-rw-r--r--include/linux/watchdog.h9
-rw-r--r--include/net/9p/transport.h1
-rw-r--r--include/net/inet_common.h2
-rw-r--r--include/net/ipv6.h2
-rw-r--r--include/net/netfilter/ipv4/nf_reject.h10
-rw-r--r--include/net/netfilter/ipv6/nf_reject.h10
-rw-r--r--include/net/netfilter/nf_tables.h5
-rw-r--r--include/net/netfilter/nft_masq.h3
-rw-r--r--include/net/udp_tunnel.h9
-rw-r--r--include/net/vxlan.h18
-rw-r--r--include/scsi/osd_initiator.h2
-rw-r--r--include/scsi/osd_ore.h2
-rw-r--r--include/scsi/osd_protocol.h4
-rw-r--r--include/scsi/osd_sec.h2
-rw-r--r--include/scsi/osd_sense.h2
-rw-r--r--include/scsi/osd_types.h2
-rw-r--r--include/scsi/scsi_tcq.h8
-rw-r--r--include/sound/pcm.h2
-rw-r--r--include/sound/soc-dai.h7
-rw-r--r--include/sound/soc-dapm.h9
-rw-r--r--include/sound/soc-dpcm.h2
-rw-r--r--include/sound/soc.h113
-rw-r--r--include/target/target_core_base.h17
-rw-r--r--include/trace/events/asoc.h25
-rw-r--r--include/trace/events/ext4.h59
-rw-r--r--include/trace/events/rcu.h18
-rw-r--r--include/trace/events/thermal.h83
-rw-r--r--include/uapi/linux/Kbuild5
-rw-r--r--include/uapi/linux/fs.h1
-rw-r--r--include/uapi/linux/if_bridge.h1
-rw-r--r--include/uapi/linux/input.h7
-rw-r--r--include/uapi/linux/perf_event.h14
-rw-r--r--include/uapi/linux/sched.h2
-rw-r--r--include/uapi/linux/target_core_user.h142
-rw-r--r--include/uapi/linux/v4l2-dv-timings.h9
-rw-r--r--include/uapi/sound/asound.h4
-rw-r--r--init/Kconfig14
-rw-r--r--init/main.c2
-rw-r--r--ipc/sem.c15
-rw-r--r--kernel/Makefile2
-rw-r--r--kernel/audit.c2
-rw-r--r--kernel/audit_tree.c1
-rw-r--r--kernel/bpf/Makefile6
-rw-r--r--kernel/bpf/core.c9
-rw-r--r--kernel/bpf/verifier.c3
-rw-r--r--kernel/context_tracking.c40
-rw-r--r--kernel/cpu.c14
-rw-r--r--kernel/events/core.c23
-rw-r--r--kernel/events/hw_breakpoint.c7
-rw-r--r--kernel/events/uprobes.c1
-rw-r--r--kernel/freezer.c9
-rw-r--r--kernel/futex.c36
-rw-r--r--kernel/gcov/Kconfig2
-rw-r--r--kernel/kmod.c76
-rw-r--r--kernel/panic.c1
-rw-r--r--kernel/power/hibernate.c8
-rw-r--r--kernel/power/process.c57
-rw-r--r--kernel/power/qos.c27
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/rcu/tree.c15
-rw-r--r--kernel/rcu/tree.h1
-rw-r--r--kernel/rcu/tree_plugin.h33
-rw-r--r--kernel/sched/core.c118
-rw-r--r--kernel/sched/deadline.c43
-rw-r--r--kernel/sched/fair.c35
-rw-r--r--kernel/sched/idle_task.c5
-rw-r--r--kernel/sched/rt.c2
-rw-r--r--kernel/sched/sched.h2
-rw-r--r--kernel/sched/stop_task.c5
-rw-r--r--kernel/sysctl.c3
-rw-r--r--kernel/time/clockevents.c2
-rw-r--r--kernel/time/posix-cpu-timers.c2
-rw-r--r--kernel/time/posix-timers.c1
-rw-r--r--kernel/trace/ftrace.c54
-rw-r--r--kernel/trace/ring_buffer.c81
-rw-r--r--kernel/trace/trace.c33
-rw-r--r--kernel/trace/trace_syscalls.c8
-rw-r--r--lib/Makefile4
-rw-r--r--lib/bitmap.c8
-rw-r--r--lib/cmdline.c29
-rw-r--r--lib/genalloc.c1
-rw-r--r--lib/rhashtable.c10
-rw-r--r--lib/scatterlist.c6
-rw-r--r--lib/show_mem.c2
-rw-r--r--lib/string.c16
-rw-r--r--mm/balloon_compaction.c2
-rw-r--r--mm/bootmem.c9
-rw-r--r--mm/cma.c68
-rw-r--r--mm/compaction.c21
-rw-r--r--mm/frontswap.c4
-rw-r--r--mm/huge_memory.c15
-rw-r--r--mm/internal.h25
-rw-r--r--mm/iov_iter.c4
-rw-r--r--mm/memcontrol.c105
-rw-r--r--mm/memory.c27
-rw-r--r--mm/memory_hotplug.c31
-rw-r--r--mm/mmap.c18
-rw-r--r--mm/nobootmem.c8
-rw-r--r--mm/oom_kill.c17
-rw-r--r--mm/page-writeback.c43
-rw-r--r--mm/page_alloc.c76
-rw-r--r--mm/page_cgroup.c1
-rw-r--r--mm/page_isolation.c43
-rw-r--r--mm/rmap.c94
-rw-r--r--mm/shmem.c36
-rw-r--r--mm/slab.c2
-rw-r--r--mm/slab_common.c14
-rw-r--r--mm/truncate.c61
-rw-r--r--mm/vmpressure.c8
-rw-r--r--net/Kconfig2
-rw-r--r--net/bridge/br_forward.c1
-rw-r--r--net/bridge/br_multicast.c3
-rw-r--r--net/bridge/br_netfilter.c24
-rw-r--r--net/bridge/br_netlink.c1
-rw-r--r--net/bridge/netfilter/nf_tables_bridge.c6
-rw-r--r--net/bridge/netfilter/nft_reject_bridge.c297
-rw-r--r--net/ceph/auth_x.c25
-rw-r--r--net/ceph/crypto.c169
-rw-r--r--net/ceph/messenger.c10
-rw-r--r--net/ceph/osd_client.c7
-rw-r--r--net/core/dev.c4
-rw-r--r--net/core/ethtool.c6
-rw-r--r--net/core/rtnetlink.c24
-rw-r--r--net/core/skbuff.c36
-rw-r--r--net/core/tso.c3
-rw-r--r--net/dcb/dcbnl.c36
-rw-r--r--net/dsa/dsa.c5
-rw-r--r--net/dsa/slave.c7
-rw-r--r--net/ipv4/af_inet.c13
-rw-r--r--net/ipv4/fib_rules.c4
-rw-r--r--net/ipv4/fou.c2
-rw-r--r--net/ipv4/geneve.c3
-rw-r--r--net/ipv4/gre_offload.c4
-rw-r--r--net/ipv4/igmp.c11
-rw-r--r--net/ipv4/inet_fragment.c4
-rw-r--r--net/ipv4/ip_output.c2
-rw-r--r--net/ipv4/ip_sockglue.c2
-rw-r--r--net/ipv4/ip_vti.c1
-rw-r--r--net/ipv4/netfilter/nf_reject_ipv4.c91
-rw-r--r--net/ipv4/netfilter/nft_masq_ipv4.c2
-rw-r--r--net/ipv4/ping.c14
-rw-r--r--net/ipv4/route.c1
-rw-r--r--net/ipv4/tcp.c61
-rw-r--r--net/ipv4/tcp_input.c64
-rw-r--r--net/ipv4/tcp_ipv4.c9
-rw-r--r--net/ipv4/tcp_output.c2
-rw-r--r--net/ipv4/udp_offload.c2
-rw-r--r--net/ipv6/addrconf.c1
-rw-r--r--net/ipv6/ip6_gre.c9
-rw-r--r--net/ipv6/ip6_offload.c5
-rw-r--r--net/ipv6/ip6_tunnel.c10
-rw-r--r--net/ipv6/ip6_udp_tunnel.c4
-rw-r--r--net/ipv6/ip6_vti.c22
-rw-r--r--net/ipv6/ip6mr.c4
-rw-r--r--net/ipv6/mcast.c9
-rw-r--r--net/ipv6/netfilter/nf_reject_ipv6.c179
-rw-r--r--net/ipv6/netfilter/nft_masq_ipv6.c2
-rw-r--r--net/ipv6/output_core.c34
-rw-r--r--net/ipv6/sit.c15
-rw-r--r--net/ipv6/tcp_ipv6.c9
-rw-r--r--net/ipv6/xfrm6_policy.c11
-rw-r--r--net/ipx/af_ipx.c6
-rw-r--r--net/irda/af_irda.c2
-rw-r--r--net/mac80211/aes_ccm.c3
-rw-r--r--net/mac80211/cfg.c2
-rw-r--r--net/mac80211/ibss.c2
-rw-r--r--net/mac80211/ieee80211_i.h3
-rw-r--r--net/mac80211/iface.c18
-rw-r--r--net/mac80211/mesh.c2
-rw-r--r--net/mac80211/mlme.c5
-rw-r--r--net/mac80211/rate.c2
-rw-r--r--net/mac80211/rc80211_minstrel_debugfs.c12
-rw-r--r--net/mac80211/rc80211_minstrel_ht.c15
-rw-r--r--net/mac80211/rc80211_minstrel_ht_debugfs.c13
-rw-r--r--net/mac80211/rx.c14
-rw-r--r--net/mac80211/spectmgmt.c18
-rw-r--r--net/mac80211/sta_info.h1
-rw-r--r--net/mpls/Makefile2
-rw-r--r--net/mpls/mpls_gso.c5
-rw-r--r--net/netfilter/ipset/ip_set_core.c8
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c6
-rw-r--r--net/netfilter/nf_conntrack_proto_tcp.c4
-rw-r--r--net/netfilter/nf_tables_api.c42
-rw-r--r--net/netfilter/nfnetlink.c12
-rw-r--r--net/netfilter/nfnetlink_log.c31
-rw-r--r--net/netfilter/nfnetlink_queue_core.c2
-rw-r--r--net/netfilter/nft_compat.c57
-rw-r--r--net/netfilter/nft_masq.c12
-rw-r--r--net/netfilter/nft_nat.c86
-rw-r--r--net/netlink/af_netlink.c42
-rw-r--r--net/openvswitch/actions.c10
-rw-r--r--net/openvswitch/datapath.c16
-rw-r--r--net/openvswitch/flow_netlink.c9
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/sched/sch_api.c2
-rw-r--r--net/sched/sch_pie.c2
-rw-r--r--net/sctp/auth.c2
-rw-r--r--net/sctp/sm_make_chunk.c3
-rw-r--r--net/sunrpc/auth_gss/auth_gss.c35
-rw-r--r--net/sunrpc/svcsock.c27
-rw-r--r--net/tipc/node.c46
-rw-r--r--net/tipc/node.h7
-rw-r--r--net/tipc/socket.c6
-rw-r--r--net/wireless/nl80211.c10
-rw-r--r--net/xfrm/xfrm_output.c2
-rw-r--r--net/xfrm/xfrm_policy.c2
-rw-r--r--samples/bpf/test_verifier.c11
-rw-r--r--security/integrity/evm/evm_main.c9
-rw-r--r--security/integrity/ima/ima_appraise.c2
-rw-r--r--security/integrity/integrity.h1
-rw-r--r--security/keys/internal.h1
-rw-r--r--security/keys/keyctl.c56
-rw-r--r--security/keys/keyring.c10
-rw-r--r--security/keys/request_key.c2
-rw-r--r--security/keys/request_key_auth.c1
-rw-r--r--security/selinux/hooks.c7
-rw-r--r--sound/core/pcm.c2
-rw-r--r--sound/core/pcm_compat.c2
-rw-r--r--sound/core/pcm_misc.c8
-rw-r--r--sound/core/pcm_native.c14
-rw-r--r--sound/firewire/bebob/bebob_focusrite.c62
-rw-r--r--sound/firewire/bebob/bebob_stream.c18
-rw-r--r--sound/firewire/bebob/bebob_terratec.c7
-rw-r--r--sound/pci/ad1889.c8
-rw-r--r--sound/pci/hda/hda_intel.c32
-rw-r--r--sound/pci/hda/hda_local.h4
-rw-r--r--sound/pci/hda/hda_priv.h1
-rw-r--r--sound/pci/hda/patch_conexant.c31
-rw-r--r--sound/pci/hda/patch_hdmi.c15
-rw-r--r--sound/pci/hda/patch_realtek.c241
-rw-r--r--sound/soc/Kconfig1
-rw-r--r--sound/soc/Makefile7
-rw-r--r--sound/soc/atmel/Kconfig9
-rw-r--r--sound/soc/atmel/Makefile1
-rw-r--r--sound/soc/atmel/atmel_ssc_dai.c5
-rw-r--r--sound/soc/atmel/snd-soc-afeb9260.c151
-rw-r--r--sound/soc/au1x/ac97c.c2
-rw-r--r--sound/soc/au1x/psc-ac97.c2
-rw-r--r--sound/soc/blackfin/bf5xx-ac97.c2
-rw-r--r--sound/soc/blackfin/bf5xx-ad1980.c2
-rw-r--r--sound/soc/cirrus/Kconfig3
-rw-r--r--sound/soc/cirrus/ep93xx-ac97.c2
-rw-r--r--sound/soc/codecs/Kconfig22
-rw-r--r--sound/soc/codecs/Makefile4
-rw-r--r--sound/soc/codecs/ab8500-codec.c32
-rw-r--r--sound/soc/codecs/ac97.c18
-rw-r--r--sound/soc/codecs/ad193x.c14
-rw-r--r--sound/soc/codecs/ad1980.c212
-rw-r--r--sound/soc/codecs/ad1980.h26
-rw-r--r--sound/soc/codecs/adau1373.c6
-rw-r--r--sound/soc/codecs/adau1761.c7
-rw-r--r--sound/soc/codecs/adau1781.c2
-rw-r--r--sound/soc/codecs/adau17x1.c3
-rw-r--r--sound/soc/codecs/adav80x.c4
-rw-r--r--sound/soc/codecs/ak4535.c31
-rw-r--r--sound/soc/codecs/ak4641.c33
-rw-r--r--sound/soc/codecs/ak4642.c16
-rw-r--r--sound/soc/codecs/ak4671.c13
-rw-r--r--sound/soc/codecs/alc5623.c22
-rw-r--r--sound/soc/codecs/alc5632.c22
-rw-r--r--sound/soc/codecs/arizona.c34
-rw-r--r--sound/soc/codecs/cq93vc.c33
-rw-r--r--sound/soc/codecs/cs4265.c2
-rw-r--r--sound/soc/codecs/cs4271-i2c.c62
-rw-r--r--sound/soc/codecs/cs4271-spi.c55
-rw-r--r--sound/soc/codecs/cs4271.c155
-rw-r--r--sound/soc/codecs/cs4271.h11
-rw-r--r--sound/soc/codecs/cs42l51-i2c.c1
-rw-r--r--sound/soc/codecs/cs42l51.c10
-rw-r--r--sound/soc/codecs/cs42l51.h1
-rw-r--r--sound/soc/codecs/cs42l73.c6
-rw-r--r--sound/soc/codecs/es8328-i2c.c2
-rw-r--r--sound/soc/codecs/max98090.c16
-rw-r--r--sound/soc/codecs/max98095.c12
-rw-r--r--sound/soc/codecs/rt5645.c2
-rw-r--r--sound/soc/codecs/rt5670.c36
-rw-r--r--sound/soc/codecs/sgtl5000.c3
-rw-r--r--sound/soc/codecs/sgtl5000.h2
-rw-r--r--sound/soc/codecs/sigmadsp.c7
-rw-r--r--sound/soc/codecs/stac9766.c40
-rw-r--r--sound/soc/codecs/tlv320aic31xx.c13
-rw-r--r--sound/soc/codecs/wm5102.c18
-rw-r--r--sound/soc/codecs/wm8731.c9
-rw-r--r--sound/soc/codecs/wm8903.c8
-rw-r--r--sound/soc/codecs/wm8958-dsp2.c12
-rw-r--r--sound/soc/codecs/wm8962.c8
-rw-r--r--sound/soc/codecs/wm8994.c2
-rw-r--r--sound/soc/codecs/wm8994.h2
-rw-r--r--sound/soc/codecs/wm9705.c46
-rw-r--r--sound/soc/codecs/wm9712.c209
-rw-r--r--sound/soc/codecs/wm9713.c228
-rw-r--r--sound/soc/codecs/wm_adsp.c98
-rw-r--r--sound/soc/davinci/davinci-mcasp.c339
-rw-r--r--sound/soc/davinci/davinci-mcasp.h17
-rw-r--r--sound/soc/dwc/designware_i2s.c46
-rw-r--r--sound/soc/fsl/eukrea-tlv320.c5
-rw-r--r--sound/soc/fsl/fsl-asoc-card.c19
-rw-r--r--sound/soc/fsl/fsl_asrc.c28
-rw-r--r--sound/soc/fsl/fsl_esai.c14
-rw-r--r--sound/soc/fsl/fsl_ssi.c17
-rw-r--r--sound/soc/fsl/imx-sgtl5000.c6
-rw-r--r--sound/soc/fsl/imx-spdif.c3
-rw-r--r--sound/soc/fsl/imx-ssi.c2
-rw-r--r--sound/soc/fsl/imx-wm8962.c6
-rw-r--r--sound/soc/fsl/mpc5200_dma.c3
-rw-r--r--sound/soc/fsl/mpc5200_psc_ac97.c6
-rw-r--r--sound/soc/intel/sst-haswell-pcm.c4
-rw-r--r--sound/soc/nuc900/nuc900-ac97.c2
-rw-r--r--sound/soc/pxa/pxa2xx-ac97.c6
-rw-r--r--sound/soc/rockchip/rockchip_i2s.c4
-rw-r--r--sound/soc/s6000/Kconfig26
-rw-r--r--sound/soc/s6000/Makefile11
-rw-r--r--sound/soc/s6000/s6000-i2s.c617
-rw-r--r--sound/soc/s6000/s6000-i2s.h23
-rw-r--r--sound/soc/s6000/s6000-pcm.c521
-rw-r--r--sound/soc/s6000/s6000-pcm.h33
-rw-r--r--sound/soc/s6000/s6105-ipcam.c221
-rw-r--r--sound/soc/samsung/ac97.c4
-rw-r--r--sound/soc/samsung/snow.c1
-rw-r--r--sound/soc/sh/fsi.c12
-rw-r--r--sound/soc/sh/hac.c2
-rw-r--r--sound/soc/sh/rcar/core.c3
-rw-r--r--sound/soc/soc-ac97.c256
-rw-r--r--sound/soc/soc-cache.c149
-rw-r--r--sound/soc/soc-compress.c11
-rw-r--r--sound/soc/soc-core.c1534
-rw-r--r--sound/soc/soc-dapm.c755
-rw-r--r--sound/soc/soc-jack.c2
-rw-r--r--sound/soc/soc-ops.c952
-rw-r--r--sound/soc/soc-pcm.c95
-rw-r--r--sound/soc/tegra/tegra20_ac97.c2
-rw-r--r--sound/soc/txx9/txx9aclc-ac97.c2
-rw-r--r--sound/soc/txx9/txx9aclc.c2
-rw-r--r--sound/usb/card.c9
-rw-r--r--sound/usb/mixer.c7
-rw-r--r--sound/usb/mixer_quirks.c10
-rw-r--r--sound/usb/quirks-table.h30
-rw-r--r--sound/usb/quirks.c18
-rw-r--r--tools/perf/builtin-diff.c5
-rw-r--r--tools/perf/builtin-probe.c2
-rw-r--r--tools/perf/perf-sys.h30
-rw-r--r--tools/perf/util/header.c27
-rw-r--r--tools/perf/util/sort.c102
-rw-r--r--tools/perf/util/thread.c3
-rw-r--r--tools/perf/util/unwind-libunwind.c12
-rw-r--r--tools/perf/util/unwind.h3
-rw-r--r--tools/power/acpi/os_specific/service_layers/osunixxf.c8
-rw-r--r--tools/power/acpi/tools/acpidump/apdump.c2
-rwxr-xr-xtools/testing/selftests/ftrace/ftracetest2
-rw-r--r--tools/testing/selftests/net/psock_fanout.c2
-rw-r--r--virt/kvm/arm/vgic.c8
-rw-r--r--virt/kvm/iommu.c8
-rw-r--r--virt/kvm/kvm_main.c23
-rw-r--r--virt/kvm/vfio.c5
-rw-r--r--virt/kvm/vfio.h4
1740 files changed, 29975 insertions, 16113 deletions
diff --git a/Documentation/ABI/testing/sysfs-ibft b/Documentation/ABI/testing/sysfs-ibft
index c2b7d1154bec..cac3930bdb04 100644
--- a/Documentation/ABI/testing/sysfs-ibft
+++ b/Documentation/ABI/testing/sysfs-ibft
@@ -20,4 +20,4 @@ Date: November 2007
20Contact: Konrad Rzeszutek <ketuzsezr@darnok.org> 20Contact: Konrad Rzeszutek <ketuzsezr@darnok.org>
21Description: The /sys/firmware/ibft/ethernetX directory will contain 21Description: The /sys/firmware/ibft/ethernetX directory will contain
22 files that expose the iSCSI Boot Firmware Table NIC data. 22 files that expose the iSCSI Boot Firmware Table NIC data.
23 This can this can the IP address, MAC, and gateway of the NIC. 23 Usually this contains the IP address, MAC, and gateway of the NIC.
diff --git a/Documentation/DocBook/media/Makefile b/Documentation/DocBook/media/Makefile
index df2962d9e11e..8bf7c6191296 100644
--- a/Documentation/DocBook/media/Makefile
+++ b/Documentation/DocBook/media/Makefile
@@ -25,7 +25,7 @@ GENFILES := $(addprefix $(MEDIA_OBJ_DIR)/, $(MEDIA_TEMP))
25PHONY += cleanmediadocs 25PHONY += cleanmediadocs
26 26
27cleanmediadocs: 27cleanmediadocs:
28 -@rm `find $(MEDIA_OBJ_DIR) -type l` $(GENFILES) $(OBJIMGFILES) 2>/dev/null 28 -@rm -f `find $(MEDIA_OBJ_DIR) -type l` $(GENFILES) $(OBJIMGFILES) 2>/dev/null
29 29
30$(obj)/media_api.xml: $(GENFILES) FORCE 30$(obj)/media_api.xml: $(GENFILES) FORCE
31 31
diff --git a/Documentation/DocBook/media/v4l/compat.xml b/Documentation/DocBook/media/v4l/compat.xml
index 07ffc76553ba..0a2debfa68f6 100644
--- a/Documentation/DocBook/media/v4l/compat.xml
+++ b/Documentation/DocBook/media/v4l/compat.xml
@@ -2566,6 +2566,10 @@ fields changed from _s32 to _u32.
2566 <para>Added compound control types and &VIDIOC-QUERY-EXT-CTRL;. 2566 <para>Added compound control types and &VIDIOC-QUERY-EXT-CTRL;.
2567 </para> 2567 </para>
2568 </listitem> 2568 </listitem>
2569 </orderedlist>
2570 </section>
2571
2572 <section>
2569 <title>V4L2 in Linux 3.18</title> 2573 <title>V4L2 in Linux 3.18</title>
2570 <orderedlist> 2574 <orderedlist>
2571 <listitem> 2575 <listitem>
diff --git a/Documentation/HOWTO b/Documentation/HOWTO
index 57cf5efb044d..93aa8604630e 100644
--- a/Documentation/HOWTO
+++ b/Documentation/HOWTO
@@ -324,7 +324,6 @@ tree, they need to be integration-tested. For this purpose, a special
324testing repository exists into which virtually all subsystem trees are 324testing repository exists into which virtually all subsystem trees are
325pulled on an almost daily basis: 325pulled on an almost daily basis:
326 http://git.kernel.org/?p=linux/kernel/git/next/linux-next.git 326 http://git.kernel.org/?p=linux/kernel/git/next/linux-next.git
327 http://linux.f-seidel.de/linux-next/pmwiki/
328 327
329This way, the -next kernel gives a summary outlook onto what will be 328This way, the -next kernel gives a summary outlook onto what will be
330expected to go into the mainline kernel at the next merge period. 329expected to go into the mainline kernel at the next merge period.
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index 482c74947de0..1fa1caa198eb 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -483,12 +483,10 @@ have been included in the discussion
483 483
48414) Using Reported-by:, Tested-by:, Reviewed-by:, Suggested-by: and Fixes: 48414) Using Reported-by:, Tested-by:, Reviewed-by:, Suggested-by: and Fixes:
485 485
486If this patch fixes a problem reported by somebody else, consider adding a 486The Reported-by tag gives credit to people who find bugs and report them and it
487Reported-by: tag to credit the reporter for their contribution. Please 487hopefully inspires them to help us again in the future. Please note that if
488note that this tag should not be added without the reporter's permission, 488the bug was reported in private, then ask for permission first before using the
489especially if the problem was not reported in a public forum. That said, 489Reported-by tag.
490if we diligently credit our bug reporters, they will, hopefully, be
491inspired to help us again in the future.
492 490
493A Tested-by: tag indicates that the patch has been successfully tested (in 491A Tested-by: tag indicates that the patch has been successfully tested (in
494some environment) by the person named. This tag informs maintainers that 492some environment) by the person named. This tag informs maintainers that
diff --git a/Documentation/arm64/memory.txt b/Documentation/arm64/memory.txt
index 344e85cc7323..d7273a5f6456 100644
--- a/Documentation/arm64/memory.txt
+++ b/Documentation/arm64/memory.txt
@@ -17,7 +17,7 @@ User addresses have bits 63:48 set to 0 while the kernel addresses have
17the same bits set to 1. TTBRx selection is given by bit 63 of the 17the same bits set to 1. TTBRx selection is given by bit 63 of the
18virtual address. The swapper_pg_dir contains only kernel (global) 18virtual address. The swapper_pg_dir contains only kernel (global)
19mappings while the user pgd contains only user (non-global) mappings. 19mappings while the user pgd contains only user (non-global) mappings.
20The swapper_pgd_dir address is written to TTBR1 and never written to 20The swapper_pg_dir address is written to TTBR1 and never written to
21TTBR0. 21TTBR0.
22 22
23 23
diff --git a/Documentation/development-process/2.Process b/Documentation/development-process/2.Process
index 2e0617936e8f..c24e156a6118 100644
--- a/Documentation/development-process/2.Process
+++ b/Documentation/development-process/2.Process
@@ -289,10 +289,6 @@ lists when they are assembled; they can be downloaded from:
289 289
290 http://www.kernel.org/pub/linux/kernel/next/ 290 http://www.kernel.org/pub/linux/kernel/next/
291 291
292Some information about linux-next has been gathered at:
293
294 http://linux.f-seidel.de/linux-next/pmwiki/
295
296Linux-next has become an integral part of the kernel development process; 292Linux-next has become an integral part of the kernel development process;
297all patches merged during a given merge window should really have found 293all patches merged during a given merge window should really have found
298their way into linux-next some time before the merge window opens. 294their way into linux-next some time before the merge window opens.
diff --git a/Documentation/development-process/8.Conclusion b/Documentation/development-process/8.Conclusion
index 1990ab4b4949..caef69022e9c 100644
--- a/Documentation/development-process/8.Conclusion
+++ b/Documentation/development-process/8.Conclusion
@@ -22,10 +22,6 @@ Beyond that, a valuable resource for kernel developers is:
22 22
23 http://kernelnewbies.org/ 23 http://kernelnewbies.org/
24 24
25Information about the linux-next tree gathers at:
26
27 http://linux.f-seidel.de/linux-next/pmwiki/
28
29And, of course, one should not forget http://kernel.org/, the definitive 25And, of course, one should not forget http://kernel.org/, the definitive
30location for kernel release information. 26location for kernel release information.
31 27
diff --git a/Documentation/devicetree/bindings/ata/sata_rcar.txt b/Documentation/devicetree/bindings/ata/sata_rcar.txt
index 1e6111333fa8..80ae87a0784b 100644
--- a/Documentation/devicetree/bindings/ata/sata_rcar.txt
+++ b/Documentation/devicetree/bindings/ata/sata_rcar.txt
@@ -3,8 +3,10 @@
3Required properties: 3Required properties:
4- compatible : should contain one of the following: 4- compatible : should contain one of the following:
5 - "renesas,sata-r8a7779" for R-Car H1 5 - "renesas,sata-r8a7779" for R-Car H1
6 - "renesas,sata-r8a7790" for R-Car H2 6 - "renesas,sata-r8a7790-es1" for R-Car H2 ES1
7 - "renesas,sata-r8a7791" for R-Car M2 7 - "renesas,sata-r8a7790" for R-Car H2 other than ES1
8 - "renesas,sata-r8a7791" for R-Car M2-W
9 - "renesas,sata-r8a7793" for R-Car M2-N
8- reg : address and length of the SATA registers; 10- reg : address and length of the SATA registers;
9- interrupts : must consist of one interrupt specifier. 11- interrupts : must consist of one interrupt specifier.
10 12
diff --git a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
index ce6a1a072028..8a3c40829899 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/interrupts.txt
@@ -30,10 +30,6 @@ should only be used when a device has multiple interrupt parents.
30 Example: 30 Example:
31 interrupts-extended = <&intc1 5 1>, <&intc2 1 0>; 31 interrupts-extended = <&intc1 5 1>, <&intc2 1 0>;
32 32
33A device node may contain either "interrupts" or "interrupts-extended", but not
34both. If both properties are present, then the operating system should log an
35error and use only the data in "interrupts".
36
372) Interrupt controller nodes 332) Interrupt controller nodes
38----------------------------- 34-----------------------------
39 35
diff --git a/Documentation/devicetree/bindings/mailbox/mailbox.txt b/Documentation/devicetree/bindings/mailbox/mailbox.txt
new file mode 100644
index 000000000000..1a2cd3d266db
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/mailbox.txt
@@ -0,0 +1,38 @@
1* Generic Mailbox Controller and client driver bindings
2
3Generic binding to provide a way for Mailbox controller drivers to
4assign appropriate mailbox channel to client drivers.
5
6* Mailbox Controller
7
8Required property:
9- #mbox-cells: Must be at least 1. Number of cells in a mailbox
10 specifier.
11
12Example:
13 mailbox: mailbox {
14 ...
15 #mbox-cells = <1>;
16 };
17
18
19* Mailbox Client
20
21Required property:
22- mboxes: List of phandle and mailbox channel specifiers.
23
24Optional property:
25- mbox-names: List of identifier strings for each mailbox channel
26 required by the client. The use of this property
27 is discouraged in favor of using index in list of
28 'mboxes' while requesting a mailbox. Instead the
29 platforms may define channel indices, in DT headers,
30 to something legible.
31
32Example:
33 pwr_cntrl: power {
34 ...
35 mbox-names = "pwr-ctrl", "rpc";
36 mboxes = <&mailbox 0
37 &mailbox 1>;
38 };
diff --git a/Documentation/devicetree/bindings/net/smsc-lan91c111.txt b/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
index 0f8487b88822..e77e167593db 100644
--- a/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
+++ b/Documentation/devicetree/bindings/net/smsc-lan91c111.txt
@@ -11,3 +11,5 @@ Optional properties:
11 are supported on the device. Valid value for SMSC LAN91c111 are 11 are supported on the device. Valid value for SMSC LAN91c111 are
12 1, 2 or 4. If it's omitted or invalid, the size would be 2 meaning 12 1, 2 or 4. If it's omitted or invalid, the size would be 2 meaning
13 16-bit access only. 13 16-bit access only.
14- power-gpios: GPIO to control the PWRDWN pin
15- reset-gpios: GPIO to control the RESET pin
diff --git a/Documentation/devicetree/bindings/pci/pci.txt b/Documentation/devicetree/bindings/pci/pci.txt
index 41aeed38926d..f8fbe9af7b2f 100644
--- a/Documentation/devicetree/bindings/pci/pci.txt
+++ b/Documentation/devicetree/bindings/pci/pci.txt
@@ -7,3 +7,14 @@ And for the interrupt mapping part:
7 7
8Open Firmware Recommended Practice: Interrupt Mapping 8Open Firmware Recommended Practice: Interrupt Mapping
9http://www.openfirmware.org/1275/practice/imap/imap0_9d.pdf 9http://www.openfirmware.org/1275/practice/imap/imap0_9d.pdf
10
11Additionally to the properties specified in the above standards a host bridge
12driver implementation may support the following properties:
13
14- linux,pci-domain:
15 If present this property assigns a fixed PCI domain number to a host bridge,
16 otherwise an unstable (across boots) unique number will be assigned.
17 It is required to either not set this property at all or set it for all
18 host bridges in the system, otherwise potentially conflicting domain numbers
19 may be assigned to root buses behind different host bridges. The domain
20 number for each host bridge in the system must be unique.
diff --git a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt
index a186181c402b..51b943cc9770 100644
--- a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pdc-pinctrl.txt
@@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the
9common pinctrl bindings used by client devices, including the meaning of the 9common pinctrl bindings used by client devices, including the meaning of the
10phrase "pin configuration node". 10phrase "pin configuration node".
11 11
12TZ1090-PDC's pin configuration nodes act as a container for an abitrary number 12TZ1090-PDC's pin configuration nodes act as a container for an arbitrary number
13of subnodes. Each of these subnodes represents some desired configuration for a 13of subnodes. Each of these subnodes represents some desired configuration for a
14pin, a group, or a list of pins or groups. This configuration can include the 14pin, a group, or a list of pins or groups. This configuration can include the
15mux function to select on those pin(s)/group(s), and various pin configuration 15mux function to select on those pin(s)/group(s), and various pin configuration
diff --git a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt
index 4b27c99f7f9d..49d0e6050940 100644
--- a/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/img,tz1090-pinctrl.txt
@@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the
9common pinctrl bindings used by client devices, including the meaning of the 9common pinctrl bindings used by client devices, including the meaning of the
10phrase "pin configuration node". 10phrase "pin configuration node".
11 11
12TZ1090's pin configuration nodes act as a container for an abitrary number of 12TZ1090's pin configuration nodes act as a container for an arbitrary number of
13subnodes. Each of these subnodes represents some desired configuration for a 13subnodes. Each of these subnodes represents some desired configuration for a
14pin, a group, or a list of pins or groups. This configuration can include the 14pin, a group, or a list of pins or groups. This configuration can include the
15mux function to select on those pin(s)/group(s), and various pin configuration 15mux function to select on those pin(s)/group(s), and various pin configuration
diff --git a/Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt b/Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt
index daa768956069..ac4da9fe07bd 100644
--- a/Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt
+++ b/Documentation/devicetree/bindings/pinctrl/lantiq,falcon-pinumx.txt
@@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the
9common pinctrl bindings used by client devices, including the meaning of the 9common pinctrl bindings used by client devices, including the meaning of the
10phrase "pin configuration node". 10phrase "pin configuration node".
11 11
12Lantiq's pin configuration nodes act as a container for an abitrary number of 12Lantiq's pin configuration nodes act as a container for an arbitrary number of
13subnodes. Each of these subnodes represents some desired configuration for a 13subnodes. Each of these subnodes represents some desired configuration for a
14pin, a group, or a list of pins or groups. This configuration can include the 14pin, a group, or a list of pins or groups. This configuration can include the
15mux function to select on those group(s), and two pin configuration parameters: 15mux function to select on those group(s), and two pin configuration parameters:
diff --git a/Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt b/Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt
index b5469db1d7ad..e89b4677567d 100644
--- a/Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt
+++ b/Documentation/devicetree/bindings/pinctrl/lantiq,xway-pinumx.txt
@@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the
9common pinctrl bindings used by client devices, including the meaning of the 9common pinctrl bindings used by client devices, including the meaning of the
10phrase "pin configuration node". 10phrase "pin configuration node".
11 11
12Lantiq's pin configuration nodes act as a container for an abitrary number of 12Lantiq's pin configuration nodes act as a container for an arbitrary number of
13subnodes. Each of these subnodes represents some desired configuration for a 13subnodes. Each of these subnodes represents some desired configuration for a
14pin, a group, or a list of pins or groups. This configuration can include the 14pin, a group, or a list of pins or groups. This configuration can include the
15mux function to select on those group(s), and two pin configuration parameters: 15mux function to select on those group(s), and two pin configuration parameters:
diff --git a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt
index 61e73cde9ae9..3c8ce28baad6 100644
--- a/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt
+++ b/Documentation/devicetree/bindings/pinctrl/nvidia,tegra20-pinmux.txt
@@ -9,7 +9,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the
9common pinctrl bindings used by client devices, including the meaning of the 9common pinctrl bindings used by client devices, including the meaning of the
10phrase "pin configuration node". 10phrase "pin configuration node".
11 11
12Tegra's pin configuration nodes act as a container for an abitrary number of 12Tegra's pin configuration nodes act as a container for an arbitrary number of
13subnodes. Each of these subnodes represents some desired configuration for a 13subnodes. Each of these subnodes represents some desired configuration for a
14pin, a group, or a list of pins or groups. This configuration can include the 14pin, a group, or a list of pins or groups. This configuration can include the
15mux function to select on those pin(s)/group(s), and various pin configuration 15mux function to select on those pin(s)/group(s), and various pin configuration
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt
index c596a6ad3285..5f55be59d914 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl-sirf.txt
@@ -13,7 +13,7 @@ Optional properties:
13Please refer to pinctrl-bindings.txt in this directory for details of the common 13Please refer to pinctrl-bindings.txt in this directory for details of the common
14pinctrl bindings used by client devices. 14pinctrl bindings used by client devices.
15 15
16SiRFprimaII's pinmux nodes act as a container for an abitrary number of subnodes. 16SiRFprimaII's pinmux nodes act as a container for an arbitrary number of subnodes.
17Each of these subnodes represents some desired configuration for a group of pins. 17Each of these subnodes represents some desired configuration for a group of pins.
18 18
19Required subnode-properties: 19Required subnode-properties:
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt
index b4480d5c3aca..458615596946 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt
@@ -32,7 +32,7 @@ Required properties:
32Please refer to pinctrl-bindings.txt in this directory for details of the common 32Please refer to pinctrl-bindings.txt in this directory for details of the common
33pinctrl bindings used by client devices. 33pinctrl bindings used by client devices.
34 34
35SPEAr's pinmux nodes act as a container for an abitrary number of subnodes. Each 35SPEAr's pinmux nodes act as a container for an arbitrary number of subnodes. Each
36of these subnodes represents muxing for a pin, a group, or a list of pins or 36of these subnodes represents muxing for a pin, a group, or a list of pins or
37groups. 37groups.
38 38
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
index 2fb90b37aa09..a7bde64798c7 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,apq8064-pinctrl.txt
@@ -18,7 +18,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the
18common pinctrl bindings used by client devices, including the meaning of the 18common pinctrl bindings used by client devices, including the meaning of the
19phrase "pin configuration node". 19phrase "pin configuration node".
20 20
21Qualcomm's pin configuration nodes act as a container for an abitrary number of 21Qualcomm's pin configuration nodes act as a container for an arbitrary number of
22subnodes. Each of these subnodes represents some desired configuration for a 22subnodes. Each of these subnodes represents some desired configuration for a
23pin, a group, or a list of pins or groups. This configuration can include the 23pin, a group, or a list of pins or groups. This configuration can include the
24mux function to select on those pin(s)/group(s), and various pin configuration 24mux function to select on those pin(s)/group(s), and various pin configuration
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt
index ffafa1990a30..c4ea61ac56f2 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,apq8084-pinctrl.txt
@@ -47,7 +47,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the
47common pinctrl bindings used by client devices, including the meaning of the 47common pinctrl bindings used by client devices, including the meaning of the
48phrase "pin configuration node". 48phrase "pin configuration node".
49 49
50The pin configuration nodes act as a container for an abitrary number of 50The pin configuration nodes act as a container for an arbitrary number of
51subnodes. Each of these subnodes represents some desired configuration for a 51subnodes. Each of these subnodes represents some desired configuration for a
52pin, a group, or a list of pins or groups. This configuration can include the 52pin, a group, or a list of pins or groups. This configuration can include the
53mux function to select on those pin(s)/group(s), and various pin configuration 53mux function to select on those pin(s)/group(s), and various pin configuration
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt
index e33e4dcdce79..6e88e91feb11 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,ipq8064-pinctrl.txt
@@ -18,7 +18,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the
18common pinctrl bindings used by client devices, including the meaning of the 18common pinctrl bindings used by client devices, including the meaning of the
19phrase "pin configuration node". 19phrase "pin configuration node".
20 20
21Qualcomm's pin configuration nodes act as a container for an abitrary number of 21Qualcomm's pin configuration nodes act as a container for an arbitrary number of
22subnodes. Each of these subnodes represents some desired configuration for a 22subnodes. Each of these subnodes represents some desired configuration for a
23pin, a group, or a list of pins or groups. This configuration can include the 23pin, a group, or a list of pins or groups. This configuration can include the
24mux function to select on those pin(s)/group(s), and various pin configuration 24mux function to select on those pin(s)/group(s), and various pin configuration
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt
index 93b7de91b9f6..eb8d8aa41f20 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8960-pinctrl.txt
@@ -47,7 +47,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the
47common pinctrl bindings used by client devices, including the meaning of the 47common pinctrl bindings used by client devices, including the meaning of the
48phrase "pin configuration node". 48phrase "pin configuration node".
49 49
50The pin configuration nodes act as a container for an abitrary number of 50The pin configuration nodes act as a container for an arbitrary number of
51subnodes. Each of these subnodes represents some desired configuration for a 51subnodes. Each of these subnodes represents some desired configuration for a
52pin, a group, or a list of pins or groups. This configuration can include the 52pin, a group, or a list of pins or groups. This configuration can include the
53mux function to select on those pin(s)/group(s), and various pin configuration 53mux function to select on those pin(s)/group(s), and various pin configuration
diff --git a/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt b/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt
index d2ea80dc43eb..e4d6a9d20f7d 100644
--- a/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt
+++ b/Documentation/devicetree/bindings/pinctrl/qcom,msm8974-pinctrl.txt
@@ -18,7 +18,7 @@ Please refer to pinctrl-bindings.txt in this directory for details of the
18common pinctrl bindings used by client devices, including the meaning of the 18common pinctrl bindings used by client devices, including the meaning of the
19phrase "pin configuration node". 19phrase "pin configuration node".
20 20
21Qualcomm's pin configuration nodes act as a container for an abitrary number of 21Qualcomm's pin configuration nodes act as a container for an arbitrary number of
22subnodes. Each of these subnodes represents some desired configuration for a 22subnodes. Each of these subnodes represents some desired configuration for a
23pin, a group, or a list of pins or groups. This configuration can include the 23pin, a group, or a list of pins or groups. This configuration can include the
24mux function to select on those pin(s)/group(s), and various pin configuration 24mux function to select on those pin(s)/group(s), and various pin configuration
diff --git a/Documentation/devicetree/bindings/pwm/pwm-fsl-ftm.txt b/Documentation/devicetree/bindings/pwm/pwm-fsl-ftm.txt
index 0bda229a6171..3899d6a557c1 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-fsl-ftm.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-fsl-ftm.txt
@@ -1,5 +1,20 @@
1Freescale FlexTimer Module (FTM) PWM controller 1Freescale FlexTimer Module (FTM) PWM controller
2 2
3The same FTM PWM device can have a different endianness on different SoCs. The
4device tree provides a property to describing this so that an operating system
5device driver can handle all variants of the device. Refer to the table below
6for the endianness of the FTM PWM block as integrated into the existing SoCs:
7
8 SoC | FTM-PWM endianness
9 --------+-------------------
10 Vybrid | LE
11 LS1 | BE
12 LS2 | LE
13
14Please see ../regmap/regmap.txt for more detail about how to specify endian
15modes in device tree.
16
17
3Required properties: 18Required properties:
4- compatible: Should be "fsl,vf610-ftm-pwm". 19- compatible: Should be "fsl,vf610-ftm-pwm".
5- reg: Physical base address and length of the controller's registers 20- reg: Physical base address and length of the controller's registers
@@ -16,7 +31,8 @@ Required properties:
16- pinctrl-names: Must contain a "default" entry. 31- pinctrl-names: Must contain a "default" entry.
17- pinctrl-NNN: One property must exist for each entry in pinctrl-names. 32- pinctrl-NNN: One property must exist for each entry in pinctrl-names.
18 See pinctrl/pinctrl-bindings.txt for details of the property values. 33 See pinctrl/pinctrl-bindings.txt for details of the property values.
19 34- big-endian: Boolean property, required if the FTM PWM registers use a big-
35 endian rather than little-endian layout.
20 36
21Example: 37Example:
22 38
@@ -32,4 +48,5 @@ pwm0: pwm@40038000 {
32 <&clks VF610_CLK_FTM0_EXT_FIX_EN>; 48 <&clks VF610_CLK_FTM0_EXT_FIX_EN>;
33 pinctrl-names = "default"; 49 pinctrl-names = "default";
34 pinctrl-0 = <&pinctrl_pwm0_1>; 50 pinctrl-0 = <&pinctrl_pwm0_1>;
51 big-endian;
35}; 52};
diff --git a/Documentation/devicetree/bindings/pwm/pwm-rockchip.txt b/Documentation/devicetree/bindings/pwm/pwm-rockchip.txt
index d47d15a6a298..b8be3d09ee26 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-rockchip.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-rockchip.txt
@@ -7,8 +7,8 @@ Required properties:
7 "rockchip,vop-pwm": found integrated in VOP on RK3288 SoC 7 "rockchip,vop-pwm": found integrated in VOP on RK3288 SoC
8 - reg: physical base address and length of the controller's registers 8 - reg: physical base address and length of the controller's registers
9 - clocks: phandle and clock specifier of the PWM reference clock 9 - clocks: phandle and clock specifier of the PWM reference clock
10 - #pwm-cells: should be 2. See pwm.txt in this directory for a 10 - #pwm-cells: must be 2 (rk2928) or 3 (rk3288). See pwm.txt in this directory
11 description of the cell format. 11 for a description of the cell format.
12 12
13Example: 13Example:
14 14
diff --git a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
index 60ca07996458..46bc9829c71a 100644
--- a/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
+++ b/Documentation/devicetree/bindings/sound/davinci-mcasp-audio.txt
@@ -32,7 +32,7 @@ Optional properties:
32- rx-num-evt : FIFO levels. 32- rx-num-evt : FIFO levels.
33- sram-size-playback : size of sram to be allocated during playback 33- sram-size-playback : size of sram to be allocated during playback
34- sram-size-capture : size of sram to be allocated during capture 34- sram-size-capture : size of sram to be allocated during capture
35- interrupts : Interrupt numbers for McASP, currently not used by the driver 35- interrupts : Interrupt numbers for McASP
36- interrupt-names : Known interrupt names are "tx" and "rx" 36- interrupt-names : Known interrupt names are "tx" and "rx"
37- pinctrl-0: Should specify pin control group used for this controller. 37- pinctrl-0: Should specify pin control group used for this controller.
38- pinctrl-names: Should contain only one value - "default", for more details 38- pinctrl-names: Should contain only one value - "default", for more details
diff --git a/Documentation/devicetree/bindings/sound/sgtl5000.txt b/Documentation/devicetree/bindings/sound/sgtl5000.txt
index 955df60a118c..d556dcb8816b 100644
--- a/Documentation/devicetree/bindings/sound/sgtl5000.txt
+++ b/Documentation/devicetree/bindings/sound/sgtl5000.txt
@@ -7,10 +7,20 @@ Required properties:
7 7
8- clocks : the clock provider of SYS_MCLK 8- clocks : the clock provider of SYS_MCLK
9 9
10- VDDA-supply : the regulator provider of VDDA
11
12- VDDIO-supply: the regulator provider of VDDIO
13
14Optional properties:
15
16- VDDD-supply : the regulator provider of VDDD
17
10Example: 18Example:
11 19
12codec: sgtl5000@0a { 20codec: sgtl5000@0a {
13 compatible = "fsl,sgtl5000"; 21 compatible = "fsl,sgtl5000";
14 reg = <0x0a>; 22 reg = <0x0a>;
15 clocks = <&clks 150>; 23 clocks = <&clks 150>;
24 VDDA-supply = <&reg_3p3v>;
25 VDDIO-supply = <&reg_3p3v>;
16}; 26};
diff --git a/Documentation/devicetree/bindings/submitting-patches.txt b/Documentation/devicetree/bindings/submitting-patches.txt
index 042a0273b8ba..b7ba01ad1426 100644
--- a/Documentation/devicetree/bindings/submitting-patches.txt
+++ b/Documentation/devicetree/bindings/submitting-patches.txt
@@ -12,6 +12,9 @@ I. For patch submitters
12 12
13 devicetree@vger.kernel.org 13 devicetree@vger.kernel.org
14 14
15 3) The Documentation/ portion of the patch should come in the series before
16 the code implementing the binding.
17
15II. For kernel maintainers 18II. For kernel maintainers
16 19
17 1) If you aren't comfortable reviewing a given binding, reply to it and ask 20 1) If you aren't comfortable reviewing a given binding, reply to it and ask
diff --git a/Documentation/devicetree/bindings/thermal/imx-thermal.txt b/Documentation/devicetree/bindings/thermal/imx-thermal.txt
index 1f0f67234a91..3c67bd50aa10 100644
--- a/Documentation/devicetree/bindings/thermal/imx-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/imx-thermal.txt
@@ -1,7 +1,10 @@
1* Temperature Monitor (TEMPMON) on Freescale i.MX SoCs 1* Temperature Monitor (TEMPMON) on Freescale i.MX SoCs
2 2
3Required properties: 3Required properties:
4- compatible : "fsl,imx6q-thermal" 4- compatible : "fsl,imx6q-tempmon" for i.MX6Q, "fsl,imx6sx-tempmon" for i.MX6SX.
5 i.MX6SX has two more IRQs than i.MX6Q, one is IRQ_LOW and the other is IRQ_PANIC,
6 when temperature is below than low threshold, IRQ_LOW will be triggered, when temperature
7 is higher than panic threshold, system will auto reboot by SRC module.
5- fsl,tempmon : phandle pointer to system controller that contains TEMPMON 8- fsl,tempmon : phandle pointer to system controller that contains TEMPMON
6 control registers, e.g. ANATOP on imx6q. 9 control registers, e.g. ANATOP on imx6q.
7- fsl,tempmon-data : phandle pointer to fuse controller that contains TEMPMON 10- fsl,tempmon-data : phandle pointer to fuse controller that contains TEMPMON
diff --git a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
index 0ef00be44b01..43404b197933 100644
--- a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
@@ -7,7 +7,10 @@ Required properties:
7 - "renesas,thermal-r8a73a4" (R-Mobile AP6) 7 - "renesas,thermal-r8a73a4" (R-Mobile AP6)
8 - "renesas,thermal-r8a7779" (R-Car H1) 8 - "renesas,thermal-r8a7779" (R-Car H1)
9 - "renesas,thermal-r8a7790" (R-Car H2) 9 - "renesas,thermal-r8a7790" (R-Car H2)
10 - "renesas,thermal-r8a7791" (R-Car M2) 10 - "renesas,thermal-r8a7791" (R-Car M2-W)
11 - "renesas,thermal-r8a7792" (R-Car V2H)
12 - "renesas,thermal-r8a7793" (R-Car M2-N)
13 - "renesas,thermal-r8a7794" (R-Car E2)
11- reg : Address range of the thermal registers. 14- reg : Address range of the thermal registers.
12 The 1st reg will be recognized as common register 15 The 1st reg will be recognized as common register
13 if it has "interrupts". 16 if it has "interrupts".
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 723999d73744..a344ec2713a5 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -34,6 +34,7 @@ chipidea Chipidea, Inc
34chrp Common Hardware Reference Platform 34chrp Common Hardware Reference Platform
35chunghwa Chunghwa Picture Tubes Ltd. 35chunghwa Chunghwa Picture Tubes Ltd.
36cirrus Cirrus Logic, Inc. 36cirrus Cirrus Logic, Inc.
37cnm Chips&Media, Inc.
37cortina Cortina Systems, Inc. 38cortina Cortina Systems, Inc.
38crystalfontz Crystalfontz America, Inc. 39crystalfontz Crystalfontz America, Inc.
39dallas Maxim Integrated Products (formerly Dallas Semiconductor) 40dallas Maxim Integrated Products (formerly Dallas Semiconductor)
@@ -92,6 +93,7 @@ maxim Maxim Integrated Products
92mediatek MediaTek Inc. 93mediatek MediaTek Inc.
93micrel Micrel Inc. 94micrel Micrel Inc.
94microchip Microchip Technology Inc. 95microchip Microchip Technology Inc.
96micron Micron Technology Inc.
95mitsubishi Mitsubishi Electric Corporation 97mitsubishi Mitsubishi Electric Corporation
96mosaixtech Mosaix Technologies, Inc. 98mosaixtech Mosaix Technologies, Inc.
97moxa Moxa 99moxa Moxa
@@ -127,6 +129,7 @@ renesas Renesas Electronics Corporation
127ricoh Ricoh Co. Ltd. 129ricoh Ricoh Co. Ltd.
128rockchip Fuzhou Rockchip Electronics Co., Ltd 130rockchip Fuzhou Rockchip Electronics Co., Ltd
129samsung Samsung Semiconductor 131samsung Samsung Semiconductor
132sandisk Sandisk Corporation
130sbs Smart Battery System 133sbs Smart Battery System
131schindler Schindler 134schindler Schindler
132seagate Seagate Technology PLC 135seagate Seagate Technology PLC
@@ -138,7 +141,7 @@ silergy Silergy Corp.
138sirf SiRF Technology, Inc. 141sirf SiRF Technology, Inc.
139sitronix Sitronix Technology Corporation 142sitronix Sitronix Technology Corporation
140smsc Standard Microsystems Corporation 143smsc Standard Microsystems Corporation
141snps Synopsys, Inc. 144snps Synopsys, Inc.
142solidrun SolidRun 145solidrun SolidRun
143sony Sony Corporation 146sony Sony Corporation
144spansion Spansion Inc. 147spansion Spansion Inc.
diff --git a/Documentation/devicetree/bindings/watchdog/cadence-wdt.txt b/Documentation/devicetree/bindings/watchdog/cadence-wdt.txt
new file mode 100644
index 000000000000..c3a36ee45552
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/cadence-wdt.txt
@@ -0,0 +1,24 @@
1Zynq Watchdog Device Tree Bindings
2-------------------------------------------
3
4Required properties:
5- compatible : Should be "cdns,wdt-r1p2".
6- clocks : This is pclk (APB clock).
7- interrupts : This is wd_irq - watchdog timeout interrupt.
8- interrupt-parent : Must be core interrupt controller.
9
10Optional properties
11- reset-on-timeout : If this property exists, then a reset is done
12 when watchdog times out.
13- timeout-sec : Watchdog timeout value (in seconds).
14
15Example:
16 watchdog@f8005000 {
17 compatible = "cdns,wdt-r1p2";
18 clocks = <&clkc 45>;
19 interrupt-parent = <&intc>;
20 interrupts = <0 9 1>;
21 reg = <0xf8005000 0x1000>;
22 reset-on-timeout;
23 timeout-sec = <10>;
24 };
diff --git a/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.txt b/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.txt
index e52ba2da868c..8dab6fd024aa 100644
--- a/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/fsl-imx-wdt.txt
@@ -7,7 +7,8 @@ Required properties:
7 7
8Optional property: 8Optional property:
9- big-endian: If present the watchdog device's registers are implemented 9- big-endian: If present the watchdog device's registers are implemented
10 in big endian mode, otherwise in little mode. 10 in big endian mode, otherwise in native mode(same with CPU), for more
11 detail please see: Documentation/devicetree/bindings/regmap/regmap.txt.
11 12
12Examples: 13Examples:
13 14
diff --git a/Documentation/devicetree/bindings/watchdog/meson6-wdt.txt b/Documentation/devicetree/bindings/watchdog/meson6-wdt.txt
new file mode 100644
index 000000000000..9200fc2d508c
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/meson6-wdt.txt
@@ -0,0 +1,13 @@
1Meson SoCs Watchdog timer
2
3Required properties:
4
5- compatible : should be "amlogic,meson6-wdt"
6- reg : Specifies base physical address and size of the registers.
7
8Example:
9
10wdt: watchdog@c1109900 {
11 compatible = "amlogic,meson6-wdt";
12 reg = <0xc1109900 0x8>;
13};
diff --git a/Documentation/devicetree/bindings/watchdog/qcom-wdt.txt b/Documentation/devicetree/bindings/watchdog/qcom-wdt.txt
new file mode 100644
index 000000000000..4726924d034e
--- /dev/null
+++ b/Documentation/devicetree/bindings/watchdog/qcom-wdt.txt
@@ -0,0 +1,24 @@
1Qualcomm Krait Processor Sub-system (KPSS) Watchdog
2---------------------------------------------------
3
4Required properties :
5- compatible : shall contain only one of the following:
6
7 "qcom,kpss-wdt-msm8960"
8 "qcom,kpss-wdt-apq8064"
9 "qcom,kpss-wdt-ipq8064"
10
11- reg : shall contain base register location and length
12- clocks : shall contain the input clock
13
14Optional properties :
15- timeout-sec : shall contain the default watchdog timeout in seconds,
16 if unset, the default timeout is 30 seconds
17
18Example:
19 watchdog@208a038 {
20 compatible = "qcom,kpss-wdt-ipq8064";
21 reg = <0x0208a038 0x40>;
22 clocks = <&sleep_clk>;
23 timeout-sec = <10>;
24 };
diff --git a/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt b/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt
index cfff37511aac..8f3d96af81d7 100644
--- a/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt
+++ b/Documentation/devicetree/bindings/watchdog/samsung-wdt.txt
@@ -9,6 +9,7 @@ Required properties:
9 (a) "samsung,s3c2410-wdt" for Exynos4 and previous SoCs 9 (a) "samsung,s3c2410-wdt" for Exynos4 and previous SoCs
10 (b) "samsung,exynos5250-wdt" for Exynos5250 10 (b) "samsung,exynos5250-wdt" for Exynos5250
11 (c) "samsung,exynos5420-wdt" for Exynos5420 11 (c) "samsung,exynos5420-wdt" for Exynos5420
12 (c) "samsung,exynos7-wdt" for Exynos7
12 13
13- reg : base physical address of the controller and length of memory mapped 14- reg : base physical address of the controller and length of memory mapped
14 region. 15 region.
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 94d93b1f8b53..b30753cbf431 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -67,6 +67,7 @@ prototypes:
67 struct file *, unsigned open_flag, 67 struct file *, unsigned open_flag,
68 umode_t create_mode, int *opened); 68 umode_t create_mode, int *opened);
69 int (*tmpfile) (struct inode *, struct dentry *, umode_t); 69 int (*tmpfile) (struct inode *, struct dentry *, umode_t);
70 int (*dentry_open)(struct dentry *, struct file *, const struct cred *);
70 71
71locking rules: 72locking rules:
72 all may block 73 all may block
@@ -96,6 +97,7 @@ fiemap: no
96update_time: no 97update_time: no
97atomic_open: yes 98atomic_open: yes
98tmpfile: no 99tmpfile: no
100dentry_open: no
99 101
100 Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on 102 Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
101victim. 103victim.
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
new file mode 100644
index 000000000000..a27c950ece61
--- /dev/null
+++ b/Documentation/filesystems/overlayfs.txt
@@ -0,0 +1,198 @@
1Written by: Neil Brown <neilb@suse.de>
2
3Overlay Filesystem
4==================
5
6This document describes a prototype for a new approach to providing
7overlay-filesystem functionality in Linux (sometimes referred to as
8union-filesystems). An overlay-filesystem tries to present a
9filesystem which is the result over overlaying one filesystem on top
10of the other.
11
12The result will inevitably fail to look exactly like a normal
13filesystem for various technical reasons. The expectation is that
14many use cases will be able to ignore these differences.
15
16This approach is 'hybrid' because the objects that appear in the
17filesystem do not all appear to belong to that filesystem. In many
18cases an object accessed in the union will be indistinguishable
19from accessing the corresponding object from the original filesystem.
20This is most obvious from the 'st_dev' field returned by stat(2).
21
22While directories will report an st_dev from the overlay-filesystem,
23all non-directory objects will report an st_dev from the lower or
24upper filesystem that is providing the object. Similarly st_ino will
25only be unique when combined with st_dev, and both of these can change
26over the lifetime of a non-directory object. Many applications and
27tools ignore these values and will not be affected.
28
29Upper and Lower
30---------------
31
32An overlay filesystem combines two filesystems - an 'upper' filesystem
33and a 'lower' filesystem. When a name exists in both filesystems, the
34object in the 'upper' filesystem is visible while the object in the
35'lower' filesystem is either hidden or, in the case of directories,
36merged with the 'upper' object.
37
38It would be more correct to refer to an upper and lower 'directory
39tree' rather than 'filesystem' as it is quite possible for both
40directory trees to be in the same filesystem and there is no
41requirement that the root of a filesystem be given for either upper or
42lower.
43
44The lower filesystem can be any filesystem supported by Linux and does
45not need to be writable. The lower filesystem can even be another
46overlayfs. The upper filesystem will normally be writable and if it
47is it must support the creation of trusted.* extended attributes, and
48must provide valid d_type in readdir responses, so NFS is not suitable.
49
50A read-only overlay of two read-only filesystems may use any
51filesystem type.
52
53Directories
54-----------
55
56Overlaying mainly involves directories. If a given name appears in both
57upper and lower filesystems and refers to a non-directory in either,
58then the lower object is hidden - the name refers only to the upper
59object.
60
61Where both upper and lower objects are directories, a merged directory
62is formed.
63
64At mount time, the two directories given as mount options "lowerdir" and
65"upperdir" are combined into a merged directory:
66
67 mount -t overlay overlay -olowerdir=/lower,upperdir=/upper,\
68workdir=/work /merged
69
70The "workdir" needs to be an empty directory on the same filesystem
71as upperdir.
72
73Then whenever a lookup is requested in such a merged directory, the
74lookup is performed in each actual directory and the combined result
75is cached in the dentry belonging to the overlay filesystem. If both
76actual lookups find directories, both are stored and a merged
77directory is created, otherwise only one is stored: the upper if it
78exists, else the lower.
79
80Only the lists of names from directories are merged. Other content
81such as metadata and extended attributes are reported for the upper
82directory only. These attributes of the lower directory are hidden.
83
84whiteouts and opaque directories
85--------------------------------
86
87In order to support rm and rmdir without changing the lower
88filesystem, an overlay filesystem needs to record in the upper filesystem
89that files have been removed. This is done using whiteouts and opaque
90directories (non-directories are always opaque).
91
92A whiteout is created as a character device with 0/0 device number.
93When a whiteout is found in the upper level of a merged directory, any
94matching name in the lower level is ignored, and the whiteout itself
95is also hidden.
96
97A directory is made opaque by setting the xattr "trusted.overlay.opaque"
98to "y". Where the upper filesystem contains an opaque directory, any
99directory in the lower filesystem with the same name is ignored.
100
101readdir
102-------
103
104When a 'readdir' request is made on a merged directory, the upper and
105lower directories are each read and the name lists merged in the
106obvious way (upper is read first, then lower - entries that already
107exist are not re-added). This merged name list is cached in the
108'struct file' and so remains as long as the file is kept open. If the
109directory is opened and read by two processes at the same time, they
110will each have separate caches. A seekdir to the start of the
111directory (offset 0) followed by a readdir will cause the cache to be
112discarded and rebuilt.
113
114This means that changes to the merged directory do not appear while a
115directory is being read. This is unlikely to be noticed by many
116programs.
117
118seek offsets are assigned sequentially when the directories are read.
119Thus if
120 - read part of a directory
121 - remember an offset, and close the directory
122 - re-open the directory some time later
123 - seek to the remembered offset
124
125there may be little correlation between the old and new locations in
126the list of filenames, particularly if anything has changed in the
127directory.
128
129Readdir on directories that are not merged is simply handled by the
130underlying directory (upper or lower).
131
132
133Non-directories
134---------------
135
136Objects that are not directories (files, symlinks, device-special
137files etc.) are presented either from the upper or lower filesystem as
138appropriate. When a file in the lower filesystem is accessed in a way
139the requires write-access, such as opening for write access, changing
140some metadata etc., the file is first copied from the lower filesystem
141to the upper filesystem (copy_up). Note that creating a hard-link
142also requires copy_up, though of course creation of a symlink does
143not.
144
145The copy_up may turn out to be unnecessary, for example if the file is
146opened for read-write but the data is not modified.
147
148The copy_up process first makes sure that the containing directory
149exists in the upper filesystem - creating it and any parents as
150necessary. It then creates the object with the same metadata (owner,
151mode, mtime, symlink-target etc.) and then if the object is a file, the
152data is copied from the lower to the upper filesystem. Finally any
153extended attributes are copied up.
154
155Once the copy_up is complete, the overlay filesystem simply
156provides direct access to the newly created file in the upper
157filesystem - future operations on the file are barely noticed by the
158overlay filesystem (though an operation on the name of the file such as
159rename or unlink will of course be noticed and handled).
160
161
162Non-standard behavior
163---------------------
164
165The copy_up operation essentially creates a new, identical file and
166moves it over to the old name. The new file may be on a different
167filesystem, so both st_dev and st_ino of the file may change.
168
169Any open files referring to this inode will access the old data and
170metadata. Similarly any file locks obtained before copy_up will not
171apply to the copied up file.
172
173On a file opened with O_RDONLY fchmod(2), fchown(2), futimesat(2) and
174fsetxattr(2) will fail with EROFS.
175
176If a file with multiple hard links is copied up, then this will
177"break" the link. Changes will not be propagated to other names
178referring to the same inode.
179
180Symlinks in /proc/PID/ and /proc/PID/fd which point to a non-directory
181object in overlayfs will not contain valid absolute paths, only
182relative paths leading up to the filesystem's root. This will be
183fixed in the future.
184
185Some operations are not atomic, for example a crash during copy_up or
186rename will leave the filesystem in an inconsistent state. This will
187be addressed in the future.
188
189Changes to underlying filesystems
190---------------------------------
191
192Offline changes, when the overlay is not mounted, are allowed to either
193the upper or the lower trees.
194
195Changes to the underlying filesystems while part of a mounted overlay
196filesystem are not allowed. If the underlying filesystem is changed,
197the behavior of the overlay is undefined, though it will not result in
198a crash or deadlock.
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index fceff7c00a3c..20bf204426ca 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -364,6 +364,7 @@ struct inode_operations {
364 int (*atomic_open)(struct inode *, struct dentry *, struct file *, 364 int (*atomic_open)(struct inode *, struct dentry *, struct file *,
365 unsigned open_flag, umode_t create_mode, int *opened); 365 unsigned open_flag, umode_t create_mode, int *opened);
366 int (*tmpfile) (struct inode *, struct dentry *, umode_t); 366 int (*tmpfile) (struct inode *, struct dentry *, umode_t);
367 int (*dentry_open)(struct dentry *, struct file *, const struct cred *);
367}; 368};
368 369
369Again, all methods are called without any locks being held, unless 370Again, all methods are called without any locks being held, unless
@@ -696,6 +697,12 @@ struct address_space_operations {
696 but instead uses bmap to find out where the blocks in the file 697 but instead uses bmap to find out where the blocks in the file
697 are and uses those addresses directly. 698 are and uses those addresses directly.
698 699
700 dentry_open: *WARNING: probably going away soon, do not use!* This is an
701 alternative to f_op->open(), the difference is that this method may open
702 a file not necessarily originating from the same filesystem as the one
703 i_op->open() was called on. It may be useful for stacking filesystems
704 which want to allow native I/O directly on underlying files.
705
699 706
700 invalidatepage: If a page has PagePrivate set, then invalidatepage 707 invalidatepage: If a page has PagePrivate set, then invalidatepage
701 will be called when part or all of the page is to be removed 708 will be called when part or all of the page is to be removed
diff --git a/Documentation/input/elantech.txt b/Documentation/input/elantech.txt
index e1ae127ed099..1ec0db7879d3 100644
--- a/Documentation/input/elantech.txt
+++ b/Documentation/input/elantech.txt
@@ -38,22 +38,38 @@ Contents
38 7.2.1 Status packet 38 7.2.1 Status packet
39 7.2.2 Head packet 39 7.2.2 Head packet
40 7.2.3 Motion packet 40 7.2.3 Motion packet
41 8. Trackpoint (for Hardware version 3 and 4)
42 8.1 Registers
43 8.2 Native relative mode 6 byte packet format
44 8.2.1 Status Packet
41 45
42 46
43 47
441. Introduction 481. Introduction
45 ~~~~~~~~~~~~ 49 ~~~~~~~~~~~~
46 50
47Currently the Linux Elantech touchpad driver is aware of two different 51Currently the Linux Elantech touchpad driver is aware of four different
48hardware versions unimaginatively called version 1 and version 2. Version 1 52hardware versions unimaginatively called version 1,version 2, version 3
49is found in "older" laptops and uses 4 bytes per packet. Version 2 seems to 53and version 4. Version 1 is found in "older" laptops and uses 4 bytes per
50be introduced with the EeePC and uses 6 bytes per packet, and provides 54packet. Version 2 seems to be introduced with the EeePC and uses 6 bytes
51additional features such as position of two fingers, and width of the touch. 55per packet, and provides additional features such as position of two fingers,
56and width of the touch. Hardware version 3 uses 6 bytes per packet (and
57for 2 fingers the concatenation of two 6 bytes packets) and allows tracking
58of up to 3 fingers. Hardware version 4 uses 6 bytes per packet, and can
59combine a status packet with multiple head or motion packets. Hardware version
604 allows tracking up to 5 fingers.
61
62Some Hardware version 3 and version 4 also have a trackpoint which uses a
63separate packet format. It is also 6 bytes per packet.
52 64
53The driver tries to support both hardware versions and should be compatible 65The driver tries to support both hardware versions and should be compatible
54with the Xorg Synaptics touchpad driver and its graphical configuration 66with the Xorg Synaptics touchpad driver and its graphical configuration
55utilities. 67utilities.
56 68
69Note that a mouse button is also associated with either the touchpad or the
70trackpoint when a trackpoint is available. Disabling the Touchpad in xorg
71(TouchPadOff=0) will also disable the buttons associated with the touchpad.
72
57Additionally the operation of the touchpad can be altered by adjusting the 73Additionally the operation of the touchpad can be altered by adjusting the
58contents of some of its internal registers. These registers are represented 74contents of some of its internal registers. These registers are represented
59by the driver as sysfs entries under /sys/bus/serio/drivers/psmouse/serio? 75by the driver as sysfs entries under /sys/bus/serio/drivers/psmouse/serio?
@@ -78,7 +94,7 @@ completeness sake.
782. Extra knobs 942. Extra knobs
79 ~~~~~~~~~~~ 95 ~~~~~~~~~~~
80 96
81Currently the Linux Elantech touchpad driver provides two extra knobs under 97Currently the Linux Elantech touchpad driver provides three extra knobs under
82/sys/bus/serio/drivers/psmouse/serio? for the user. 98/sys/bus/serio/drivers/psmouse/serio? for the user.
83 99
84* debug 100* debug
@@ -112,6 +128,20 @@ Currently the Linux Elantech touchpad driver provides two extra knobs under
112 data consistency checking can be done. For now checking is disabled by 128 data consistency checking can be done. For now checking is disabled by
113 default. Currently even turning it on will do nothing. 129 default. Currently even turning it on will do nothing.
114 130
131* crc_enabled
132
133 Sets crc_enabled to 0/1. The name "crc_enabled" is the official name of
134 this integrity check, even though it is not an actual cyclic redundancy
135 check.
136
137 Depending on the state of crc_enabled, certain basic data integrity
138 verification is done by the driver on hardware version 3 and 4. The
139 driver will reject any packet that appears corrupted. Using this knob,
140 The state of crc_enabled can be altered with this knob.
141
142 Reading the crc_enabled value will show the active value. Echoing
143 "0" or "1" to this file will set the state to "0" or "1".
144
115///////////////////////////////////////////////////////////////////////////// 145/////////////////////////////////////////////////////////////////////////////
116 146
1173. Differentiating hardware versions 1473. Differentiating hardware versions
@@ -746,3 +776,42 @@ byte 5:
746 776
747 byte 0 ~ 2 for one finger 777 byte 0 ~ 2 for one finger
748 byte 3 ~ 5 for another 778 byte 3 ~ 5 for another
779
780
7818. Trackpoint (for Hardware version 3 and 4)
782 =========================================
7838.1 Registers
784 ~~~~~~~~~
785No special registers have been identified.
786
7878.2 Native relative mode 6 byte packet format
788 ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
7898.2.1 Status Packet
790 ~~~~~~~~~~~~~
791
792byte 0:
793 bit 7 6 5 4 3 2 1 0
794 0 0 sx sy 0 M R L
795byte 1:
796 bit 7 6 5 4 3 2 1 0
797 ~sx 0 0 0 0 0 0 0
798byte 2:
799 bit 7 6 5 4 3 2 1 0
800 ~sy 0 0 0 0 0 0 0
801byte 3:
802 bit 7 6 5 4 3 2 1 0
803 0 0 ~sy ~sx 0 1 1 0
804byte 4:
805 bit 7 6 5 4 3 2 1 0
806 x7 x6 x5 x4 x3 x2 x1 x0
807byte 5:
808 bit 7 6 5 4 3 2 1 0
809 y7 y6 y5 y4 y3 y2 y1 y0
810
811
812 x and y are written in two's complement spread
813 over 9 bits with sx/sy the relative top bit and
814 x7..x0 and y7..y0 the lower bits.
815 ~sx is the inverse of sx, ~sy is the inverse of sy.
816 The sign of y is opposite to what the input driver
817 expects for a relative movement
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 7dbe5ec9d9cd..479f33204a37 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1015,10 +1015,14 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1015 Format: {"off" | "on" | "skip[mbr]"} 1015 Format: {"off" | "on" | "skip[mbr]"}
1016 1016
1017 efi= [EFI] 1017 efi= [EFI]
1018 Format: { "old_map" } 1018 Format: { "old_map", "nochunk", "noruntime" }
1019 old_map [X86-64]: switch to the old ioremap-based EFI 1019 old_map [X86-64]: switch to the old ioremap-based EFI
1020 runtime services mapping. 32-bit still uses this one by 1020 runtime services mapping. 32-bit still uses this one by
1021 default. 1021 default.
1022 nochunk: disable reading files in "chunks" in the EFI
1023 boot stub, as chunking can cause problems with some
1024 firmware implementations.
1025 noruntime : disable EFI runtime services support
1022 1026
1023 efi_no_storage_paranoia [EFI; X86] 1027 efi_no_storage_paranoia [EFI; X86]
1024 Using this parameter you can use more than 50% of 1028 Using this parameter you can use more than 50% of
@@ -1260,7 +1264,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1260 i8042.noloop [HW] Disable the AUX Loopback command while probing 1264 i8042.noloop [HW] Disable the AUX Loopback command while probing
1261 for the AUX port 1265 for the AUX port
1262 i8042.nomux [HW] Don't check presence of an active multiplexing 1266 i8042.nomux [HW] Don't check presence of an active multiplexing
1263 controller. Default: true. 1267 controller
1264 i8042.nopnp [HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX 1268 i8042.nopnp [HW] Don't use ACPIPnP / PnPBIOS to discover KBD/AUX
1265 controllers 1269 controllers
1266 i8042.notimeout [HW] Ignore timeout condition signalled by controller 1270 i8042.notimeout [HW] Ignore timeout condition signalled by controller
@@ -1303,6 +1307,18 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1303 .cdrom .chs .ignore_cable are additional options 1307 .cdrom .chs .ignore_cable are additional options
1304 See Documentation/ide/ide.txt. 1308 See Documentation/ide/ide.txt.
1305 1309
1310 ide-generic.probe-mask= [HW] (E)IDE subsystem
1311 Format: <int>
1312 Probe mask for legacy ISA IDE ports. Depending on
1313 platform up to 6 ports are supported, enabled by
1314 setting corresponding bits in the mask to 1. The
1315 default value is 0x0, which has a special meaning.
1316 On systems that have PCI, it triggers scanning the
1317 PCI bus for the first and the second port, which
1318 are then probed. On systems without PCI the value
1319 of 0x0 enables probing the two first ports as if it
1320 was 0x3.
1321
1306 ide-pci-generic.all-generic-ide [HW] (E)IDE subsystem 1322 ide-pci-generic.all-generic-ide [HW] (E)IDE subsystem
1307 Claim all unknown PCI IDE storage controllers. 1323 Claim all unknown PCI IDE storage controllers.
1308 1324
@@ -1583,6 +1599,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1583 kmemleak= [KNL] Boot-time kmemleak enable/disable 1599 kmemleak= [KNL] Boot-time kmemleak enable/disable
1584 Valid arguments: on, off 1600 Valid arguments: on, off
1585 Default: on 1601 Default: on
1602 Built with CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y,
1603 the default is off.
1586 1604
1587 kmemcheck= [X86] Boot-time kmemcheck enable/disable/one-shot mode 1605 kmemcheck= [X86] Boot-time kmemcheck enable/disable/one-shot mode
1588 Valid arguments: 0, 1, 2 1606 Valid arguments: 0, 1, 2
@@ -2232,7 +2250,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2232 2250
2233 nodsp [SH] Disable hardware DSP at boot time. 2251 nodsp [SH] Disable hardware DSP at boot time.
2234 2252
2235 noefi [X86] Disable EFI runtime services support. 2253 noefi Disable EFI runtime services support.
2236 2254
2237 noexec [IA-64] 2255 noexec [IA-64]
2238 2256
@@ -3465,6 +3483,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3465 e.g. base its process migration decisions on it. 3483 e.g. base its process migration decisions on it.
3466 Default is on. 3484 Default is on.
3467 3485
3486 topology_updates= [KNL, PPC, NUMA]
3487 Format: {off}
3488 Specify if the kernel should ignore (off)
3489 topology updates sent by the hypervisor to this
3490 LPAR.
3491
3468 tp720= [HW,PS2] 3492 tp720= [HW,PS2]
3469 3493
3470 tpm_suspend_pcr=[HW,TPM] 3494 tpm_suspend_pcr=[HW,TPM]
@@ -3597,7 +3621,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3597 3621
3598 usb-storage.delay_use= 3622 usb-storage.delay_use=
3599 [UMS] The delay in seconds before a new device is 3623 [UMS] The delay in seconds before a new device is
3600 scanned for Logical Units (default 5). 3624 scanned for Logical Units (default 1).
3601 3625
3602 usb-storage.quirks= 3626 usb-storage.quirks=
3603 [UMS] A list of quirks entries to supplement or 3627 [UMS] A list of quirks entries to supplement or
diff --git a/Documentation/kmemleak.txt b/Documentation/kmemleak.txt
index f4f033c8d856..45e777f4e41d 100644
--- a/Documentation/kmemleak.txt
+++ b/Documentation/kmemleak.txt
@@ -62,6 +62,10 @@ Memory may be allocated or freed before kmemleak is initialised and
62these actions are stored in an early log buffer. The size of this buffer 62these actions are stored in an early log buffer. The size of this buffer
63is configured via the CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE option. 63is configured via the CONFIG_DEBUG_KMEMLEAK_EARLY_LOG_SIZE option.
64 64
65If CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF are enabled, the kmemleak is
66disabled by default. Passing "kmemleak=on" on the kernel command
67line enables the function.
68
65Basic Algorithm 69Basic Algorithm
66--------------- 70---------------
67 71
diff --git a/Documentation/mailbox.txt b/Documentation/mailbox.txt
new file mode 100644
index 000000000000..60f43ff629aa
--- /dev/null
+++ b/Documentation/mailbox.txt
@@ -0,0 +1,122 @@
1 The Common Mailbox Framework
2 Jassi Brar <jaswinder.singh@linaro.org>
3
4 This document aims to help developers write client and controller
5drivers for the API. But before we start, let us note that the
6client (especially) and controller drivers are likely going to be
7very platform specific because the remote firmware is likely to be
8proprietary and implement non-standard protocol. So even if two
9platforms employ, say, PL320 controller, the client drivers can't
10be shared across them. Even the PL320 driver might need to accommodate
11some platform specific quirks. So the API is meant mainly to avoid
12similar copies of code written for each platform. Having said that,
13nothing prevents the remote f/w to also be Linux based and use the
14same api there. However none of that helps us locally because we only
15ever deal at client's protocol level.
16 Some of the choices made during implementation are the result of this
17peculiarity of this "common" framework.
18
19
20
21 Part 1 - Controller Driver (See include/linux/mailbox_controller.h)
22
23 Allocate mbox_controller and the array of mbox_chan.
24Populate mbox_chan_ops, except peek_data() all are mandatory.
25The controller driver might know a message has been consumed
26by the remote by getting an IRQ or polling some hardware flag
27or it can never know (the client knows by way of the protocol).
28The method in order of preference is IRQ -> Poll -> None, which
29the controller driver should set via 'txdone_irq' or 'txdone_poll'
30or neither.
31
32
33 Part 2 - Client Driver (See include/linux/mailbox_client.h)
34
35 The client might want to operate in blocking mode (synchronously
36send a message through before returning) or non-blocking/async mode (submit
37a message and a callback function to the API and return immediately).
38
39
40struct demo_client {
41 struct mbox_client cl;
42 struct mbox_chan *mbox;
43 struct completion c;
44 bool async;
45 /* ... */
46};
47
48/*
49 * This is the handler for data received from remote. The behaviour is purely
50 * dependent upon the protocol. This is just an example.
51 */
52static void message_from_remote(struct mbox_client *cl, void *mssg)
53{
54 struct demo_client *dc = container_of(mbox_client,
55 struct demo_client, cl);
56 if (dc->aysnc) {
57 if (is_an_ack(mssg)) {
58 /* An ACK to our last sample sent */
59 return; /* Or do something else here */
60 } else { /* A new message from remote */
61 queue_req(mssg);
62 }
63 } else {
64 /* Remote f/w sends only ACK packets on this channel */
65 return;
66 }
67}
68
69static void sample_sent(struct mbox_client *cl, void *mssg, int r)
70{
71 struct demo_client *dc = container_of(mbox_client,
72 struct demo_client, cl);
73 complete(&dc->c);
74}
75
76static void client_demo(struct platform_device *pdev)
77{
78 struct demo_client *dc_sync, *dc_async;
79 /* The controller already knows async_pkt and sync_pkt */
80 struct async_pkt ap;
81 struct sync_pkt sp;
82
83 dc_sync = kzalloc(sizeof(*dc_sync), GFP_KERNEL);
84 dc_async = kzalloc(sizeof(*dc_async), GFP_KERNEL);
85
86 /* Populate non-blocking mode client */
87 dc_async->cl.dev = &pdev->dev;
88 dc_async->cl.rx_callback = message_from_remote;
89 dc_async->cl.tx_done = sample_sent;
90 dc_async->cl.tx_block = false;
91 dc_async->cl.tx_tout = 0; /* doesn't matter here */
92 dc_async->cl.knows_txdone = false; /* depending upon protocol */
93 dc_async->async = true;
94 init_completion(&dc_async->c);
95
96 /* Populate blocking mode client */
97 dc_sync->cl.dev = &pdev->dev;
98 dc_sync->cl.rx_callback = message_from_remote;
99 dc_sync->cl.tx_done = NULL; /* operate in blocking mode */
100 dc_sync->cl.tx_block = true;
101 dc_sync->cl.tx_tout = 500; /* by half a second */
102 dc_sync->cl.knows_txdone = false; /* depending upon protocol */
103 dc_sync->async = false;
104
105 /* ASync mailbox is listed second in 'mboxes' property */
106 dc_async->mbox = mbox_request_channel(&dc_async->cl, 1);
107 /* Populate data packet */
108 /* ap.xxx = 123; etc */
109 /* Send async message to remote */
110 mbox_send_message(dc_async->mbox, &ap);
111
112 /* Sync mailbox is listed first in 'mboxes' property */
113 dc_sync->mbox = mbox_request_channel(&dc_sync->cl, 0);
114 /* Populate data packet */
115 /* sp.abc = 123; etc */
116 /* Send message to remote in blocking mode */
117 mbox_send_message(dc_sync->mbox, &sp);
118 /* At this point 'sp' has been sent */
119
120 /* Now wait for async chan to be done */
121 wait_for_completion(&dc_async->c);
122}
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index 0307e2875f21..a476b08a43e0 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -56,6 +56,13 @@ ip_forward_use_pmtu - BOOLEAN
56 0 - disabled 56 0 - disabled
57 1 - enabled 57 1 - enabled
58 58
59fwmark_reflect - BOOLEAN
60 Controls the fwmark of kernel-generated IPv4 reply packets that are not
61 associated with a socket for example, TCP RSTs or ICMP echo replies).
62 If unset, these packets have a fwmark of zero. If set, they have the
63 fwmark of the packet they are replying to.
64 Default: 0
65
59route/max_size - INTEGER 66route/max_size - INTEGER
60 Maximum number of routes allowed in the kernel. Increase 67 Maximum number of routes allowed in the kernel. Increase
61 this when using large numbers of interfaces and/or routes. 68 this when using large numbers of interfaces and/or routes.
@@ -1201,6 +1208,13 @@ conf/all/forwarding - BOOLEAN
1201proxy_ndp - BOOLEAN 1208proxy_ndp - BOOLEAN
1202 Do proxy ndp. 1209 Do proxy ndp.
1203 1210
1211fwmark_reflect - BOOLEAN
1212 Controls the fwmark of kernel-generated IPv6 reply packets that are not
1213 associated with a socket for example, TCP RSTs or ICMPv6 echo replies).
1214 If unset, these packets have a fwmark of zero. If set, they have the
1215 fwmark of the packet they are replying to.
1216 Default: 0
1217
1204conf/interface/*: 1218conf/interface/*:
1205 Change special settings per interface. 1219 Change special settings per interface.
1206 1220
diff --git a/Documentation/networking/timestamping.txt b/Documentation/networking/timestamping.txt
index 412f45ca2d73..1d6d02d6ba52 100644
--- a/Documentation/networking/timestamping.txt
+++ b/Documentation/networking/timestamping.txt
@@ -136,7 +136,7 @@ SOF_TIMESTAMPING_OPT_ID:
136 136
137 This option is implemented only for transmit timestamps. There, the 137 This option is implemented only for transmit timestamps. There, the
138 timestamp is always looped along with a struct sock_extended_err. 138 timestamp is always looped along with a struct sock_extended_err.
139 The option modifies field ee_info to pass an id that is unique 139 The option modifies field ee_data to pass an id that is unique
140 among all possibly concurrently outstanding timestamp requests for 140 among all possibly concurrently outstanding timestamp requests for
141 that socket. In practice, it is a monotonically increasing u32 141 that socket. In practice, it is a monotonically increasing u32
142 (that wraps). 142 (that wraps).
diff --git a/Documentation/power/pm_qos_interface.txt b/Documentation/power/pm_qos_interface.txt
index a5da5c7e7128..129f7c0e1483 100644
--- a/Documentation/power/pm_qos_interface.txt
+++ b/Documentation/power/pm_qos_interface.txt
@@ -5,7 +5,8 @@ performance expectations by drivers, subsystems and user space applications on
5one of the parameters. 5one of the parameters.
6 6
7Two different PM QoS frameworks are available: 7Two different PM QoS frameworks are available:
81. PM QoS classes for cpu_dma_latency, network_latency, network_throughput. 81. PM QoS classes for cpu_dma_latency, network_latency, network_throughput,
9memory_bandwidth.
92. the per-device PM QoS framework provides the API to manage the per-device latency 102. the per-device PM QoS framework provides the API to manage the per-device latency
10constraints and PM QoS flags. 11constraints and PM QoS flags.
11 12
@@ -13,6 +14,7 @@ Each parameters have defined units:
13 * latency: usec 14 * latency: usec
14 * timeout: usec 15 * timeout: usec
15 * throughput: kbs (kilo bit / sec) 16 * throughput: kbs (kilo bit / sec)
17 * memory bandwidth: mbs (mega bit / sec)
16 18
17 19
181. PM QoS framework 201. PM QoS framework
diff --git a/Documentation/prctl/Makefile b/Documentation/prctl/Makefile
index 3e3232dcb2b8..2948b7b124b9 100644
--- a/Documentation/prctl/Makefile
+++ b/Documentation/prctl/Makefile
@@ -1,5 +1,5 @@
1# List of programs to build 1# List of programs to build
2hostprogs-y := disable-tsc-ctxt-sw-stress-test disable-tsc-on-off-stress-test disable-tsc-test 2hostprogs-$(CONFIG_X86) := disable-tsc-ctxt-sw-stress-test disable-tsc-on-off-stress-test disable-tsc-test
3# Tell kbuild to always build the programs 3# Tell kbuild to always build the programs
4always := $(hostprogs-y) 4always := $(hostprogs-y)
5 5
diff --git a/Documentation/ptp/testptp.mk b/Documentation/ptp/testptp.mk
new file mode 100644
index 000000000000..4ef2d9755421
--- /dev/null
+++ b/Documentation/ptp/testptp.mk
@@ -0,0 +1,33 @@
1# PTP 1588 clock support - User space test program
2#
3# Copyright (C) 2010 OMICRON electronics GmbH
4#
5# This program is free software; you can redistribute it and/or modify
6# it under the terms of the GNU General Public License as published by
7# the Free Software Foundation; either version 2 of the License, or
8# (at your option) any later version.
9#
10# This program is distributed in the hope that it will be useful,
11# but WITHOUT ANY WARRANTY; without even the implied warranty of
12# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13# GNU General Public License for more details.
14#
15# You should have received a copy of the GNU General Public License
16# along with this program; if not, write to the Free Software
17# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18
19CC = $(CROSS_COMPILE)gcc
20INC = -I$(KBUILD_OUTPUT)/usr/include
21CFLAGS = -Wall $(INC)
22LDLIBS = -lrt
23PROGS = testptp
24
25all: $(PROGS)
26
27testptp: testptp.o
28
29clean:
30 rm -f testptp.o
31
32distclean: clean
33 rm -f $(PROGS)
diff --git a/Documentation/scsi/osd.txt b/Documentation/scsi/osd.txt
index da162f7fd5f5..5a9879bad073 100644
--- a/Documentation/scsi/osd.txt
+++ b/Documentation/scsi/osd.txt
@@ -184,8 +184,7 @@ Any problems, questions, bug reports, lonely OSD nights, please email:
184More up-to-date information can be found on: 184More up-to-date information can be found on:
185http://open-osd.org 185http://open-osd.org
186 186
187Boaz Harrosh <bharrosh@panasas.com> 187Boaz Harrosh <ooo@electrozaur.com>
188Benny Halevy <bhalevy@panasas.com>
189 188
190References 189References
191========== 190==========
diff --git a/Documentation/target/tcmu-design.txt b/Documentation/target/tcmu-design.txt
new file mode 100644
index 000000000000..5518465290bf
--- /dev/null
+++ b/Documentation/target/tcmu-design.txt
@@ -0,0 +1,378 @@
1Contents:
2
31) TCM Userspace Design
4 a) Background
5 b) Benefits
6 c) Design constraints
7 d) Implementation overview
8 i. Mailbox
9 ii. Command ring
10 iii. Data Area
11 e) Device discovery
12 f) Device events
13 g) Other contingencies
142) Writing a user pass-through handler
15 a) Discovering and configuring TCMU uio devices
16 b) Waiting for events on the device(s)
17 c) Managing the command ring
183) Command filtering and pass_level
194) A final note
20
21
22TCM Userspace Design
23--------------------
24
25TCM is another name for LIO, an in-kernel iSCSI target (server).
26Existing TCM targets run in the kernel. TCMU (TCM in Userspace)
27allows userspace programs to be written which act as iSCSI targets.
28This document describes the design.
29
30The existing kernel provides modules for different SCSI transport
31protocols. TCM also modularizes the data storage. There are existing
32modules for file, block device, RAM or using another SCSI device as
33storage. These are called "backstores" or "storage engines". These
34built-in modules are implemented entirely as kernel code.
35
36Background:
37
38In addition to modularizing the transport protocol used for carrying
39SCSI commands ("fabrics"), the Linux kernel target, LIO, also modularizes
40the actual data storage as well. These are referred to as "backstores"
41or "storage engines". The target comes with backstores that allow a
42file, a block device, RAM, or another SCSI device to be used for the
43local storage needed for the exported SCSI LUN. Like the rest of LIO,
44these are implemented entirely as kernel code.
45
46These backstores cover the most common use cases, but not all. One new
47use case that other non-kernel target solutions, such as tgt, are able
48to support is using Gluster's GLFS or Ceph's RBD as a backstore. The
49target then serves as a translator, allowing initiators to store data
50in these non-traditional networked storage systems, while still only
51using standard protocols themselves.
52
53If the target is a userspace process, supporting these is easy. tgt,
54for example, needs only a small adapter module for each, because the
55modules just use the available userspace libraries for RBD and GLFS.
56
57Adding support for these backstores in LIO is considerably more
58difficult, because LIO is entirely kernel code. Instead of undertaking
59the significant work to port the GLFS or RBD APIs and protocols to the
60kernel, another approach is to create a userspace pass-through
61backstore for LIO, "TCMU".
62
63
64Benefits:
65
66In addition to allowing relatively easy support for RBD and GLFS, TCMU
67will also allow easier development of new backstores. TCMU combines
68with the LIO loopback fabric to become something similar to FUSE
69(Filesystem in Userspace), but at the SCSI layer instead of the
70filesystem layer. A SUSE, if you will.
71
72The disadvantage is there are more distinct components to configure, and
73potentially to malfunction. This is unavoidable, but hopefully not
74fatal if we're careful to keep things as simple as possible.
75
76Design constraints:
77
78- Good performance: high throughput, low latency
79- Cleanly handle if userspace:
80 1) never attaches
81 2) hangs
82 3) dies
83 4) misbehaves
84- Allow future flexibility in user & kernel implementations
85- Be reasonably memory-efficient
86- Simple to configure & run
87- Simple to write a userspace backend
88
89
90Implementation overview:
91
92The core of the TCMU interface is a memory region that is shared
93between kernel and userspace. Within this region is: a control area
94(mailbox); a lockless producer/consumer circular buffer for commands
95to be passed up, and status returned; and an in/out data buffer area.
96
97TCMU uses the pre-existing UIO subsystem. UIO allows device driver
98development in userspace, and this is conceptually very close to the
99TCMU use case, except instead of a physical device, TCMU implements a
100memory-mapped layout designed for SCSI commands. Using UIO also
101benefits TCMU by handling device introspection (e.g. a way for
102userspace to determine how large the shared region is) and signaling
103mechanisms in both directions.
104
105There are no embedded pointers in the memory region. Everything is
106expressed as an offset from the region's starting address. This allows
107the ring to still work if the user process dies and is restarted with
108the region mapped at a different virtual address.
109
110See target_core_user.h for the struct definitions.
111
112The Mailbox:
113
114The mailbox is always at the start of the shared memory region, and
115contains a version, details about the starting offset and size of the
116command ring, and head and tail pointers to be used by the kernel and
117userspace (respectively) to put commands on the ring, and indicate
118when the commands are completed.
119
120version - 1 (userspace should abort if otherwise)
121flags - none yet defined.
122cmdr_off - The offset of the start of the command ring from the start
123of the memory region, to account for the mailbox size.
124cmdr_size - The size of the command ring. This does *not* need to be a
125power of two.
126cmd_head - Modified by the kernel to indicate when a command has been
127placed on the ring.
128cmd_tail - Modified by userspace to indicate when it has completed
129processing of a command.
130
131The Command Ring:
132
133Commands are placed on the ring by the kernel incrementing
134mailbox.cmd_head by the size of the command, modulo cmdr_size, and
135then signaling userspace via uio_event_notify(). Once the command is
136completed, userspace updates mailbox.cmd_tail in the same way and
137signals the kernel via a 4-byte write(). When cmd_head equals
138cmd_tail, the ring is empty -- no commands are currently waiting to be
139processed by userspace.
140
141TCMU commands start with a common header containing "len_op", a 32-bit
142value that stores the length, as well as the opcode in the lowest
143unused bits. Currently only two opcodes are defined, TCMU_OP_PAD and
144TCMU_OP_CMD. When userspace encounters a command with PAD opcode, it
145should skip ahead by the bytes in "length". (The kernel inserts PAD
146entries to ensure each CMD entry fits contigously into the circular
147buffer.)
148
149When userspace handles a CMD, it finds the SCSI CDB (Command Data
150Block) via tcmu_cmd_entry.req.cdb_off. This is an offset from the
151start of the overall shared memory region, not the entry. The data
152in/out buffers are accessible via tht req.iov[] array. Note that
153each iov.iov_base is also an offset from the start of the region.
154
155TCMU currently does not support BIDI operations.
156
157When completing a command, userspace sets rsp.scsi_status, and
158rsp.sense_buffer if necessary. Userspace then increments
159mailbox.cmd_tail by entry.hdr.length (mod cmdr_size) and signals the
160kernel via the UIO method, a 4-byte write to the file descriptor.
161
162The Data Area:
163
164This is shared-memory space after the command ring. The organization
165of this area is not defined in the TCMU interface, and userspace
166should access only the parts referenced by pending iovs.
167
168
169Device Discovery:
170
171Other devices may be using UIO besides TCMU. Unrelated user processes
172may also be handling different sets of TCMU devices. TCMU userspace
173processes must find their devices by scanning sysfs
174class/uio/uio*/name. For TCMU devices, these names will be of the
175format:
176
177tcm-user/<hba_num>/<device_name>/<subtype>/<path>
178
179where "tcm-user" is common for all TCMU-backed UIO devices. <hba_num>
180and <device_name> allow userspace to find the device's path in the
181kernel target's configfs tree. Assuming the usual mount point, it is
182found at:
183
184/sys/kernel/config/target/core/user_<hba_num>/<device_name>
185
186This location contains attributes such as "hw_block_size", that
187userspace needs to know for correct operation.
188
189<subtype> will be a userspace-process-unique string to identify the
190TCMU device as expecting to be backed by a certain handler, and <path>
191will be an additional handler-specific string for the user process to
192configure the device, if needed. The name cannot contain ':', due to
193LIO limitations.
194
195For all devices so discovered, the user handler opens /dev/uioX and
196calls mmap():
197
198mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0)
199
200where size must be equal to the value read from
201/sys/class/uio/uioX/maps/map0/size.
202
203
204Device Events:
205
206If a new device is added or removed, a notification will be broadcast
207over netlink, using a generic netlink family name of "TCM-USER" and a
208multicast group named "config". This will include the UIO name as
209described in the previous section, as well as the UIO minor
210number. This should allow userspace to identify both the UIO device and
211the LIO device, so that after determining the device is supported
212(based on subtype) it can take the appropriate action.
213
214
215Other contingencies:
216
217Userspace handler process never attaches:
218
219- TCMU will post commands, and then abort them after a timeout period
220 (30 seconds.)
221
222Userspace handler process is killed:
223
224- It is still possible to restart and re-connect to TCMU
225 devices. Command ring is preserved. However, after the timeout period,
226 the kernel will abort pending tasks.
227
228Userspace handler process hangs:
229
230- The kernel will abort pending tasks after a timeout period.
231
232Userspace handler process is malicious:
233
234- The process can trivially break the handling of devices it controls,
235 but should not be able to access kernel memory outside its shared
236 memory areas.
237
238
239Writing a user pass-through handler (with example code)
240-------------------------------------------------------
241
242A user process handing a TCMU device must support the following:
243
244a) Discovering and configuring TCMU uio devices
245b) Waiting for events on the device(s)
246c) Managing the command ring: Parsing operations and commands,
247 performing work as needed, setting response fields (scsi_status and
248 possibly sense_buffer), updating cmd_tail, and notifying the kernel
249 that work has been finished
250
251First, consider instead writing a plugin for tcmu-runner. tcmu-runner
252implements all of this, and provides a higher-level API for plugin
253authors.
254
255TCMU is designed so that multiple unrelated processes can manage TCMU
256devices separately. All handlers should make sure to only open their
257devices, based opon a known subtype string.
258
259a) Discovering and configuring TCMU UIO devices:
260
261(error checking omitted for brevity)
262
263int fd, dev_fd;
264char buf[256];
265unsigned long long map_len;
266void *map;
267
268fd = open("/sys/class/uio/uio0/name", O_RDONLY);
269ret = read(fd, buf, sizeof(buf));
270close(fd);
271buf[ret-1] = '\0'; /* null-terminate and chop off the \n */
272
273/* we only want uio devices whose name is a format we expect */
274if (strncmp(buf, "tcm-user", 8))
275 exit(-1);
276
277/* Further checking for subtype also needed here */
278
279fd = open(/sys/class/uio/%s/maps/map0/size, O_RDONLY);
280ret = read(fd, buf, sizeof(buf));
281close(fd);
282str_buf[ret-1] = '\0'; /* null-terminate and chop off the \n */
283
284map_len = strtoull(buf, NULL, 0);
285
286dev_fd = open("/dev/uio0", O_RDWR);
287map = mmap(NULL, map_len, PROT_READ|PROT_WRITE, MAP_SHARED, dev_fd, 0);
288
289
290b) Waiting for events on the device(s)
291
292while (1) {
293 char buf[4];
294
295 int ret = read(dev_fd, buf, 4); /* will block */
296
297 handle_device_events(dev_fd, map);
298}
299
300
301c) Managing the command ring
302
303#include <linux/target_core_user.h>
304
305int handle_device_events(int fd, void *map)
306{
307 struct tcmu_mailbox *mb = map;
308 struct tcmu_cmd_entry *ent = (void *) mb + mb->cmdr_off + mb->cmd_tail;
309 int did_some_work = 0;
310
311 /* Process events from cmd ring until we catch up with cmd_head */
312 while (ent != (void *)mb + mb->cmdr_off + mb->cmd_head) {
313
314 if (tcmu_hdr_get_op(&ent->hdr) == TCMU_OP_CMD) {
315 uint8_t *cdb = (void *)mb + ent->req.cdb_off;
316 bool success = true;
317
318 /* Handle command here. */
319 printf("SCSI opcode: 0x%x\n", cdb[0]);
320
321 /* Set response fields */
322 if (success)
323 ent->rsp.scsi_status = SCSI_NO_SENSE;
324 else {
325 /* Also fill in rsp->sense_buffer here */
326 ent->rsp.scsi_status = SCSI_CHECK_CONDITION;
327 }
328 }
329 else {
330 /* Do nothing for PAD entries */
331 }
332
333 /* update cmd_tail */
334 mb->cmd_tail = (mb->cmd_tail + tcmu_hdr_get_len(&ent->hdr)) % mb->cmdr_size;
335 ent = (void *) mb + mb->cmdr_off + mb->cmd_tail;
336 did_some_work = 1;
337 }
338
339 /* Notify the kernel that work has been finished */
340 if (did_some_work) {
341 uint32_t buf = 0;
342
343 write(fd, &buf, 4);
344 }
345
346 return 0;
347}
348
349
350Command filtering and pass_level
351--------------------------------
352
353TCMU supports a "pass_level" option with valid values of 0 or 1. When
354the value is 0 (the default), nearly all SCSI commands received for
355the device are passed through to the handler. This allows maximum
356flexibility but increases the amount of code required by the handler,
357to support all mandatory SCSI commands. If pass_level is set to 1,
358then only IO-related commands are presented, and the rest are handled
359by LIO's in-kernel command emulation. The commands presented at level
3601 include all versions of:
361
362READ
363WRITE
364WRITE_VERIFY
365XDWRITEREAD
366WRITE_SAME
367COMPARE_AND_WRITE
368SYNCHRONIZE_CACHE
369UNMAP
370
371
372A final note
373------------
374
375Please be careful to return codes as defined by the SCSI
376specifications. These are different than some values defined in the
377scsi/scsi.h include file. For example, CHECK CONDITION's status code
378is 2, not 1.
diff --git a/Documentation/vDSO/Makefile b/Documentation/vDSO/Makefile
index 2b99e57207c1..ee075c3d2124 100644
--- a/Documentation/vDSO/Makefile
+++ b/Documentation/vDSO/Makefile
@@ -10,3 +10,6 @@ always := $(hostprogs-y)
10HOSTCFLAGS := -I$(objtree)/usr/include -std=gnu99 10HOSTCFLAGS := -I$(objtree)/usr/include -std=gnu99
11HOSTCFLAGS_vdso_standalone_test_x86.o := -fno-asynchronous-unwind-tables -fno-stack-protector 11HOSTCFLAGS_vdso_standalone_test_x86.o := -fno-asynchronous-unwind-tables -fno-stack-protector
12HOSTLOADLIBES_vdso_standalone_test_x86 := -nostdlib 12HOSTLOADLIBES_vdso_standalone_test_x86 := -nostdlib
13ifeq ($(CONFIG_X86_32),y)
14HOSTLOADLIBES_vdso_standalone_test_x86 += -lgcc_s
15endif
diff --git a/Documentation/vDSO/vdso_standalone_test_x86.c b/Documentation/vDSO/vdso_standalone_test_x86.c
index d46240265c50..93b0ebf8cc38 100644
--- a/Documentation/vDSO/vdso_standalone_test_x86.c
+++ b/Documentation/vDSO/vdso_standalone_test_x86.c
@@ -63,7 +63,7 @@ static inline void linux_exit(int code)
63 x86_syscall3(__NR_exit, code, 0, 0); 63 x86_syscall3(__NR_exit, code, 0, 0);
64} 64}
65 65
66void to_base10(char *lastdig, uint64_t n) 66void to_base10(char *lastdig, time_t n)
67{ 67{
68 while (n) { 68 while (n) {
69 *lastdig = (n % 10) + '0'; 69 *lastdig = (n % 10) + '0';
diff --git a/Documentation/video4linux/vivid.txt b/Documentation/video4linux/vivid.txt
index eeb11a28e4fc..e5a940e3d304 100644
--- a/Documentation/video4linux/vivid.txt
+++ b/Documentation/video4linux/vivid.txt
@@ -221,12 +221,11 @@ ccs_out_mode: specify the allowed video output crop/compose/scaling combination
221 key, not quality. 221 key, not quality.
222 222
223multiplanar: select whether each device instance supports multi-planar formats, 223multiplanar: select whether each device instance supports multi-planar formats,
224 and thus the V4L2 multi-planar API. By default the first device instance 224 and thus the V4L2 multi-planar API. By default device instances are
225 is single-planar, the second multi-planar, and it keeps alternating. 225 single-planar.
226 226
227 This module option can override that for each instance. Values are: 227 This module option can override that for each instance. Values are:
228 228
229 0: use alternating single and multi-planar devices.
230 1: this is a single-planar instance. 229 1: this is a single-planar instance.
231 2: this is a multi-planar instance. 230 2: this is a multi-planar instance.
232 231
@@ -975,9 +974,8 @@ is set, then the alpha component is only used for the color red and set to
9750 otherwise. 9740 otherwise.
976 975
977The driver has to be configured to support the multiplanar formats. By default 976The driver has to be configured to support the multiplanar formats. By default
978the first driver instance is single-planar, the second is multi-planar, and it 977the driver instances are single-planar. This can be changed by setting the
979keeps alternating. This can be changed by setting the multiplanar module option, 978multiplanar module option, see section 1 for more details on that option.
980see section 1 for more details on that option.
981 979
982If the driver instance is using the multiplanar formats/API, then the first 980If the driver instance is using the multiplanar formats/API, then the first
983single planar format (YUYV) and the multiplanar NV16M and NV61M formats the 981single planar format (YUYV) and the multiplanar NV16M and NV61M formats the
@@ -1021,7 +1019,7 @@ the output overlay for the video output, turn on video looping and capture
1021to see the blended framebuffer overlay that's being written to by the second 1019to see the blended framebuffer overlay that's being written to by the second
1022instance. This setup would require the following commands: 1020instance. This setup would require the following commands:
1023 1021
1024 $ sudo modprobe vivid n_devs=2 node_types=0x10101,0x1 multiplanar=1,1 1022 $ sudo modprobe vivid n_devs=2 node_types=0x10101,0x1
1025 $ v4l2-ctl -d1 --find-fb 1023 $ v4l2-ctl -d1 --find-fb
1026 /dev/fb1 is the framebuffer associated with base address 0x12800000 1024 /dev/fb1 is the framebuffer associated with base address 0x12800000
1027 $ sudo v4l2-ctl -d2 --set-fbuf fb=1 1025 $ sudo v4l2-ctl -d2 --set-fbuf fb=1
diff --git a/Documentation/vm/hugetlbpage.txt b/Documentation/vm/hugetlbpage.txt
index bdd4bb97fff7..b64e0af9cc56 100644
--- a/Documentation/vm/hugetlbpage.txt
+++ b/Documentation/vm/hugetlbpage.txt
@@ -274,7 +274,7 @@ This command mounts a (pseudo) filesystem of type hugetlbfs on the directory
274/mnt/huge. Any files created on /mnt/huge uses huge pages. The uid and gid 274/mnt/huge. Any files created on /mnt/huge uses huge pages. The uid and gid
275options sets the owner and group of the root of the file system. By default 275options sets the owner and group of the root of the file system. By default
276the uid and gid of the current process are taken. The mode option sets the 276the uid and gid of the current process are taken. The mode option sets the
277mode of root of file system to value & 0777. This value is given in octal. 277mode of root of file system to value & 01777. This value is given in octal.
278By default the value 0755 is picked. The size option sets the maximum value of 278By default the value 0755 is picked. The size option sets the maximum value of
279memory (huge pages) allowed for that filesystem (/mnt/huge). The size is 279memory (huge pages) allowed for that filesystem (/mnt/huge). The size is
280rounded down to HPAGE_SIZE. The option nr_inodes sets the maximum number of 280rounded down to HPAGE_SIZE. The option nr_inodes sets the maximum number of
diff --git a/MAINTAINERS b/MAINTAINERS
index a20df9bf8ab0..c721042e7e45 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1543,6 +1543,7 @@ F: arch/arm/mach-pxa/include/mach/z2.h
1543 1543
1544ARM/ZYNQ ARCHITECTURE 1544ARM/ZYNQ ARCHITECTURE
1545M: Michal Simek <michal.simek@xilinx.com> 1545M: Michal Simek <michal.simek@xilinx.com>
1546R: Sören Brinkmann <soren.brinkmann@xilinx.com>
1546L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1547L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1547W: http://wiki.xilinx.com 1548W: http://wiki.xilinx.com
1548T: git git://git.xilinx.com/linux-xlnx.git 1549T: git git://git.xilinx.com/linux-xlnx.git
@@ -1749,6 +1750,13 @@ M: Nicolas Ferre <nicolas.ferre@atmel.com>
1749S: Supported 1750S: Supported
1750F: drivers/spi/spi-atmel.* 1751F: drivers/spi/spi-atmel.*
1751 1752
1753ATMEL SSC DRIVER
1754M: Bo Shen <voice.shen@atmel.com>
1755L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1756S: Supported
1757F: drivers/misc/atmel-ssc.c
1758F: include/linux/atmel-ssc.h
1759
1752ATMEL Timer Counter (TC) AND CLOCKSOURCE DRIVERS 1760ATMEL Timer Counter (TC) AND CLOCKSOURCE DRIVERS
1753M: Nicolas Ferre <nicolas.ferre@atmel.com> 1761M: Nicolas Ferre <nicolas.ferre@atmel.com>
1754L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1762L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1820,7 +1828,7 @@ F: include/net/ax25.h
1820F: net/ax25/ 1828F: net/ax25/
1821 1829
1822AZ6007 DVB DRIVER 1830AZ6007 DVB DRIVER
1823M: Mauro Carvalho Chehab <m.chehab@samsung.com> 1831M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
1824L: linux-media@vger.kernel.org 1832L: linux-media@vger.kernel.org
1825W: http://linuxtv.org 1833W: http://linuxtv.org
1826T: git git://linuxtv.org/media_tree.git 1834T: git git://linuxtv.org/media_tree.git
@@ -2064,8 +2072,9 @@ F: drivers/clocksource/bcm_kona_timer.c
2064 2072
2065BROADCOM BCM2835 ARM ARCHITECTURE 2073BROADCOM BCM2835 ARM ARCHITECTURE
2066M: Stephen Warren <swarren@wwwdotorg.org> 2074M: Stephen Warren <swarren@wwwdotorg.org>
2075M: Lee Jones <lee@kernel.org>
2067L: linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers) 2076L: linux-rpi-kernel@lists.infradead.org (moderated for non-subscribers)
2068T: git git://git.kernel.org/pub/scm/linux/kernel/git/swarren/linux-rpi.git 2077T: git git://git.kernel.org/pub/scm/linux/kernel/git/rpi/linux-rpi.git
2069S: Maintained 2078S: Maintained
2070N: bcm2835 2079N: bcm2835
2071 2080
@@ -2189,7 +2198,7 @@ F: Documentation/filesystems/btrfs.txt
2189F: fs/btrfs/ 2198F: fs/btrfs/
2190 2199
2191BTTV VIDEO4LINUX DRIVER 2200BTTV VIDEO4LINUX DRIVER
2192M: Mauro Carvalho Chehab <m.chehab@samsung.com> 2201M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
2193L: linux-media@vger.kernel.org 2202L: linux-media@vger.kernel.org
2194W: http://linuxtv.org 2203W: http://linuxtv.org
2195T: git git://linuxtv.org/media_tree.git 2204T: git git://linuxtv.org/media_tree.git
@@ -2710,7 +2719,7 @@ F: drivers/media/common/cx2341x*
2710F: include/media/cx2341x* 2719F: include/media/cx2341x*
2711 2720
2712CX88 VIDEO4LINUX DRIVER 2721CX88 VIDEO4LINUX DRIVER
2713M: Mauro Carvalho Chehab <m.chehab@samsung.com> 2722M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
2714L: linux-media@vger.kernel.org 2723L: linux-media@vger.kernel.org
2715W: http://linuxtv.org 2724W: http://linuxtv.org
2716T: git git://linuxtv.org/media_tree.git 2725T: git git://linuxtv.org/media_tree.git
@@ -2735,6 +2744,13 @@ W: http://www.chelsio.com
2735S: Supported 2744S: Supported
2736F: drivers/net/ethernet/chelsio/cxgb3/ 2745F: drivers/net/ethernet/chelsio/cxgb3/
2737 2746
2747CXGB3 ISCSI DRIVER (CXGB3I)
2748M: Karen Xie <kxie@chelsio.com>
2749L: linux-scsi@vger.kernel.org
2750W: http://www.chelsio.com
2751S: Supported
2752F: drivers/scsi/cxgbi/cxgb3i
2753
2738CXGB3 IWARP RNIC DRIVER (IW_CXGB3) 2754CXGB3 IWARP RNIC DRIVER (IW_CXGB3)
2739M: Steve Wise <swise@chelsio.com> 2755M: Steve Wise <swise@chelsio.com>
2740L: linux-rdma@vger.kernel.org 2756L: linux-rdma@vger.kernel.org
@@ -2749,6 +2765,13 @@ W: http://www.chelsio.com
2749S: Supported 2765S: Supported
2750F: drivers/net/ethernet/chelsio/cxgb4/ 2766F: drivers/net/ethernet/chelsio/cxgb4/
2751 2767
2768CXGB4 ISCSI DRIVER (CXGB4I)
2769M: Karen Xie <kxie@chelsio.com>
2770L: linux-scsi@vger.kernel.org
2771W: http://www.chelsio.com
2772S: Supported
2773F: drivers/scsi/cxgbi/cxgb4i
2774
2752CXGB4 IWARP RNIC DRIVER (IW_CXGB4) 2775CXGB4 IWARP RNIC DRIVER (IW_CXGB4)
2753M: Steve Wise <swise@chelsio.com> 2776M: Steve Wise <swise@chelsio.com>
2754L: linux-rdma@vger.kernel.org 2777L: linux-rdma@vger.kernel.org
@@ -3379,7 +3402,7 @@ F: fs/ecryptfs/
3379EDAC-CORE 3402EDAC-CORE
3380M: Doug Thompson <dougthompson@xmission.com> 3403M: Doug Thompson <dougthompson@xmission.com>
3381M: Borislav Petkov <bp@alien8.de> 3404M: Borislav Petkov <bp@alien8.de>
3382M: Mauro Carvalho Chehab <m.chehab@samsung.com> 3405M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
3383L: linux-edac@vger.kernel.org 3406L: linux-edac@vger.kernel.org
3384W: bluesmoke.sourceforge.net 3407W: bluesmoke.sourceforge.net
3385S: Supported 3408S: Supported
@@ -3428,7 +3451,7 @@ S: Maintained
3428F: drivers/edac/e7xxx_edac.c 3451F: drivers/edac/e7xxx_edac.c
3429 3452
3430EDAC-GHES 3453EDAC-GHES
3431M: Mauro Carvalho Chehab <m.chehab@samsung.com> 3454M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
3432L: linux-edac@vger.kernel.org 3455L: linux-edac@vger.kernel.org
3433W: bluesmoke.sourceforge.net 3456W: bluesmoke.sourceforge.net
3434S: Maintained 3457S: Maintained
@@ -3456,21 +3479,21 @@ S: Maintained
3456F: drivers/edac/i5000_edac.c 3479F: drivers/edac/i5000_edac.c
3457 3480
3458EDAC-I5400 3481EDAC-I5400
3459M: Mauro Carvalho Chehab <m.chehab@samsung.com> 3482M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
3460L: linux-edac@vger.kernel.org 3483L: linux-edac@vger.kernel.org
3461W: bluesmoke.sourceforge.net 3484W: bluesmoke.sourceforge.net
3462S: Maintained 3485S: Maintained
3463F: drivers/edac/i5400_edac.c 3486F: drivers/edac/i5400_edac.c
3464 3487
3465EDAC-I7300 3488EDAC-I7300
3466M: Mauro Carvalho Chehab <m.chehab@samsung.com> 3489M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
3467L: linux-edac@vger.kernel.org 3490L: linux-edac@vger.kernel.org
3468W: bluesmoke.sourceforge.net 3491W: bluesmoke.sourceforge.net
3469S: Maintained 3492S: Maintained
3470F: drivers/edac/i7300_edac.c 3493F: drivers/edac/i7300_edac.c
3471 3494
3472EDAC-I7CORE 3495EDAC-I7CORE
3473M: Mauro Carvalho Chehab <m.chehab@samsung.com> 3496M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
3474L: linux-edac@vger.kernel.org 3497L: linux-edac@vger.kernel.org
3475W: bluesmoke.sourceforge.net 3498W: bluesmoke.sourceforge.net
3476S: Maintained 3499S: Maintained
@@ -3513,7 +3536,7 @@ S: Maintained
3513F: drivers/edac/r82600_edac.c 3536F: drivers/edac/r82600_edac.c
3514 3537
3515EDAC-SBRIDGE 3538EDAC-SBRIDGE
3516M: Mauro Carvalho Chehab <m.chehab@samsung.com> 3539M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
3517L: linux-edac@vger.kernel.org 3540L: linux-edac@vger.kernel.org
3518W: bluesmoke.sourceforge.net 3541W: bluesmoke.sourceforge.net
3519S: Maintained 3542S: Maintained
@@ -3573,7 +3596,7 @@ S: Maintained
3573F: drivers/net/ethernet/ibm/ehea/ 3596F: drivers/net/ethernet/ibm/ehea/
3574 3597
3575EM28XX VIDEO4LINUX DRIVER 3598EM28XX VIDEO4LINUX DRIVER
3576M: Mauro Carvalho Chehab <m.chehab@samsung.com> 3599M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
3577L: linux-media@vger.kernel.org 3600L: linux-media@vger.kernel.org
3578W: http://linuxtv.org 3601W: http://linuxtv.org
3579T: git git://linuxtv.org/media_tree.git 3602T: git git://linuxtv.org/media_tree.git
@@ -4305,8 +4328,10 @@ F: Documentation/blockdev/cpqarray.txt
4305F: drivers/block/cpqarray.* 4328F: drivers/block/cpqarray.*
4306 4329
4307HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa) 4330HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa)
4308M: "Stephen M. Cameron" <scameron@beardog.cce.hp.com> 4331M: Don Brace <don.brace@pmcs.com>
4309L: iss_storagedev@hp.com 4332L: iss_storagedev@hp.com
4333L: storagedev@pmcs.com
4334L: linux-scsi@vger.kernel.org
4310S: Supported 4335S: Supported
4311F: Documentation/scsi/hpsa.txt 4336F: Documentation/scsi/hpsa.txt
4312F: drivers/scsi/hpsa*.[ch] 4337F: drivers/scsi/hpsa*.[ch]
@@ -4314,8 +4339,10 @@ F: include/linux/cciss*.h
4314F: include/uapi/linux/cciss*.h 4339F: include/uapi/linux/cciss*.h
4315 4340
4316HEWLETT-PACKARD SMART CISS RAID DRIVER (cciss) 4341HEWLETT-PACKARD SMART CISS RAID DRIVER (cciss)
4317M: Mike Miller <mike.miller@hp.com> 4342M: Don Brace <don.brace@pmcs.com>
4318L: iss_storagedev@hp.com 4343L: iss_storagedev@hp.com
4344L: storagedev@pmcs.com
4345L: linux-scsi@vger.kernel.org
4319S: Supported 4346S: Supported
4320F: Documentation/blockdev/cciss.txt 4347F: Documentation/blockdev/cciss.txt
4321F: drivers/block/cciss* 4348F: drivers/block/cciss*
@@ -4601,7 +4628,7 @@ S: Supported
4601F: drivers/crypto/nx/ 4628F: drivers/crypto/nx/
4602 4629
4603IBM Power 842 compression accelerator 4630IBM Power 842 compression accelerator
4604M: Nathan Fontenot <nfont@linux.vnet.ibm.com> 4631M: Dan Streetman <ddstreet@us.ibm.com>
4605S: Supported 4632S: Supported
4606F: drivers/crypto/nx/nx-842.c 4633F: drivers/crypto/nx/nx-842.c
4607F: include/linux/nx842.h 4634F: include/linux/nx842.h
@@ -4703,6 +4730,7 @@ L: linux-iio@vger.kernel.org
4703S: Maintained 4730S: Maintained
4704F: drivers/iio/ 4731F: drivers/iio/
4705F: drivers/staging/iio/ 4732F: drivers/staging/iio/
4733F: include/linux/iio/
4706 4734
4707IKANOS/ADI EAGLE ADSL USB DRIVER 4735IKANOS/ADI EAGLE ADSL USB DRIVER
4708M: Matthieu Castet <castet.matthieu@free.fr> 4736M: Matthieu Castet <castet.matthieu@free.fr>
@@ -5834,6 +5862,14 @@ S: Maintained
5834F: drivers/net/macvlan.c 5862F: drivers/net/macvlan.c
5835F: include/linux/if_macvlan.h 5863F: include/linux/if_macvlan.h
5836 5864
5865MAILBOX API
5866M: Jassi Brar <jassisinghbrar@gmail.com>
5867L: linux-kernel@vger.kernel.org
5868S: Maintained
5869F: drivers/mailbox/
5870F: include/linux/mailbox_client.h
5871F: include/linux/mailbox_controller.h
5872
5837MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7 5873MAN-PAGES: MANUAL PAGES FOR LINUX -- Sections 2, 3, 4, 5, and 7
5838M: Michael Kerrisk <mtk.manpages@gmail.com> 5874M: Michael Kerrisk <mtk.manpages@gmail.com>
5839W: http://www.kernel.org/doc/man-pages 5875W: http://www.kernel.org/doc/man-pages
@@ -5926,7 +5962,7 @@ S: Maintained
5926F: drivers/media/radio/radio-maxiradio* 5962F: drivers/media/radio/radio-maxiradio*
5927 5963
5928MEDIA INPUT INFRASTRUCTURE (V4L/DVB) 5964MEDIA INPUT INFRASTRUCTURE (V4L/DVB)
5929M: Mauro Carvalho Chehab <m.chehab@samsung.com> 5965M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
5930P: LinuxTV.org Project 5966P: LinuxTV.org Project
5931L: linux-media@vger.kernel.org 5967L: linux-media@vger.kernel.org
5932W: http://linuxtv.org 5968W: http://linuxtv.org
@@ -6575,6 +6611,23 @@ T: git git://git.kernel.org/pub/scm/linux/kernel/git/tmlind/linux-omap.git
6575S: Maintained 6611S: Maintained
6576F: arch/arm/*omap*/ 6612F: arch/arm/*omap*/
6577F: drivers/i2c/busses/i2c-omap.c 6613F: drivers/i2c/busses/i2c-omap.c
6614F: drivers/irqchip/irq-omap-intc.c
6615F: drivers/mfd/*omap*.c
6616F: drivers/mfd/menelaus.c
6617F: drivers/mfd/palmas.c
6618F: drivers/mfd/tps65217.c
6619F: drivers/mfd/tps65218.c
6620F: drivers/mfd/tps65910.c
6621F: drivers/mfd/twl-core.[ch]
6622F: drivers/mfd/twl4030*.c
6623F: drivers/mfd/twl6030*.c
6624F: drivers/mfd/twl6040*.c
6625F: drivers/regulator/palmas-regulator*.c
6626F: drivers/regulator/pbias-regulator.c
6627F: drivers/regulator/tps65217-regulator.c
6628F: drivers/regulator/tps65218-regulator.c
6629F: drivers/regulator/tps65910-regulator.c
6630F: drivers/regulator/twl-regulator.c
6578F: include/linux/i2c-omap.h 6631F: include/linux/i2c-omap.h
6579 6632
6580OMAP DEVICE TREE SUPPORT 6633OMAP DEVICE TREE SUPPORT
@@ -6585,6 +6638,9 @@ L: devicetree@vger.kernel.org
6585S: Maintained 6638S: Maintained
6586F: arch/arm/boot/dts/*omap* 6639F: arch/arm/boot/dts/*omap*
6587F: arch/arm/boot/dts/*am3* 6640F: arch/arm/boot/dts/*am3*
6641F: arch/arm/boot/dts/*am4*
6642F: arch/arm/boot/dts/*am5*
6643F: arch/arm/boot/dts/*dra7*
6588 6644
6589OMAP CLOCK FRAMEWORK SUPPORT 6645OMAP CLOCK FRAMEWORK SUPPORT
6590M: Paul Walmsley <paul@pwsan.com> 6646M: Paul Walmsley <paul@pwsan.com>
@@ -6822,7 +6878,7 @@ S: Orphan
6822F: drivers/net/wireless/orinoco/ 6878F: drivers/net/wireless/orinoco/
6823 6879
6824OSD LIBRARY and FILESYSTEM 6880OSD LIBRARY and FILESYSTEM
6825M: Boaz Harrosh <bharrosh@panasas.com> 6881M: Boaz Harrosh <ooo@electrozaur.com>
6826M: Benny Halevy <bhalevy@primarydata.com> 6882M: Benny Halevy <bhalevy@primarydata.com>
6827L: osd-dev@open-osd.org 6883L: osd-dev@open-osd.org
6828W: http://open-osd.org 6884W: http://open-osd.org
@@ -6832,6 +6888,14 @@ F: drivers/scsi/osd/
6832F: include/scsi/osd_* 6888F: include/scsi/osd_*
6833F: fs/exofs/ 6889F: fs/exofs/
6834 6890
6891OVERLAY FILESYSTEM
6892M: Miklos Szeredi <miklos@szeredi.hu>
6893L: linux-unionfs@vger.kernel.org
6894T: git git://git.kernel.org/pub/scm/linux/kernel/git/mszeredi/vfs.git
6895S: Supported
6896F: fs/overlayfs/
6897F: Documentation/filesystems/overlayfs.txt
6898
6835P54 WIRELESS DRIVER 6899P54 WIRELESS DRIVER
6836M: Christian Lamparter <chunkeey@googlemail.com> 6900M: Christian Lamparter <chunkeey@googlemail.com>
6837L: linux-wireless@vger.kernel.org 6901L: linux-wireless@vger.kernel.org
@@ -7153,6 +7217,7 @@ F: drivers/crypto/picoxcell*
7153 7217
7154PIN CONTROL SUBSYSTEM 7218PIN CONTROL SUBSYSTEM
7155M: Linus Walleij <linus.walleij@linaro.org> 7219M: Linus Walleij <linus.walleij@linaro.org>
7220L: linux-gpio@vger.kernel.org
7156S: Maintained 7221S: Maintained
7157F: drivers/pinctrl/ 7222F: drivers/pinctrl/
7158F: include/linux/pinctrl/ 7223F: include/linux/pinctrl/
@@ -7948,7 +8013,7 @@ S: Odd Fixes
7948F: drivers/media/i2c/saa6588* 8013F: drivers/media/i2c/saa6588*
7949 8014
7950SAA7134 VIDEO4LINUX DRIVER 8015SAA7134 VIDEO4LINUX DRIVER
7951M: Mauro Carvalho Chehab <m.chehab@samsung.com> 8016M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
7952L: linux-media@vger.kernel.org 8017L: linux-media@vger.kernel.org
7953W: http://linuxtv.org 8018W: http://linuxtv.org
7954T: git git://linuxtv.org/media_tree.git 8019T: git git://linuxtv.org/media_tree.git
@@ -8406,7 +8471,7 @@ S: Maintained
8406F: drivers/media/radio/si4713/radio-usb-si4713.c 8471F: drivers/media/radio/si4713/radio-usb-si4713.c
8407 8472
8408SIANO DVB DRIVER 8473SIANO DVB DRIVER
8409M: Mauro Carvalho Chehab <m.chehab@samsung.com> 8474M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
8410L: linux-media@vger.kernel.org 8475L: linux-media@vger.kernel.org
8411W: http://linuxtv.org 8476W: http://linuxtv.org
8412T: git git://linuxtv.org/media_tree.git 8477T: git git://linuxtv.org/media_tree.git
@@ -8457,7 +8522,6 @@ F: arch/arm/mach-s3c24xx/bast-irq.c
8457TI DAVINCI MACHINE SUPPORT 8522TI DAVINCI MACHINE SUPPORT
8458M: Sekhar Nori <nsekhar@ti.com> 8523M: Sekhar Nori <nsekhar@ti.com>
8459M: Kevin Hilman <khilman@deeprootsystems.com> 8524M: Kevin Hilman <khilman@deeprootsystems.com>
8460L: davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers)
8461T: git git://gitorious.org/linux-davinci/linux-davinci.git 8525T: git git://gitorious.org/linux-davinci/linux-davinci.git
8462Q: http://patchwork.kernel.org/project/linux-davinci/list/ 8526Q: http://patchwork.kernel.org/project/linux-davinci/list/
8463S: Supported 8527S: Supported
@@ -8467,7 +8531,6 @@ F: drivers/i2c/busses/i2c-davinci.c
8467TI DAVINCI SERIES MEDIA DRIVER 8531TI DAVINCI SERIES MEDIA DRIVER
8468M: Lad, Prabhakar <prabhakar.csengg@gmail.com> 8532M: Lad, Prabhakar <prabhakar.csengg@gmail.com>
8469L: linux-media@vger.kernel.org 8533L: linux-media@vger.kernel.org
8470L: davinci-linux-open-source@linux.davincidsp.com (moderated for non-subscribers)
8471W: http://linuxtv.org/ 8534W: http://linuxtv.org/
8472Q: http://patchwork.linuxtv.org/project/linux-media/list/ 8535Q: http://patchwork.linuxtv.org/project/linux-media/list/
8473T: git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git 8536T: git git://linuxtv.org/mhadli/v4l-dvb-davinci_devices.git
@@ -8619,7 +8682,9 @@ S: Maintained
8619F: drivers/leds/leds-net48xx.c 8682F: drivers/leds/leds-net48xx.c
8620 8683
8621SOFTLOGIC 6x10 MPEG CODEC 8684SOFTLOGIC 6x10 MPEG CODEC
8622M: Ismael Luceno <ismael.luceno@corp.bluecherry.net> 8685M: Bluecherry Maintainers <maintainers@bluecherrydvr.com>
8686M: Andrey Utkin <andrey.utkin@corp.bluecherry.net>
8687M: Andrey Utkin <andrey.krieger.utkin@gmail.com>
8623L: linux-media@vger.kernel.org 8688L: linux-media@vger.kernel.org
8624S: Supported 8689S: Supported
8625F: drivers/media/pci/solo6x10/ 8690F: drivers/media/pci/solo6x10/
@@ -9093,7 +9158,7 @@ S: Maintained
9093F: drivers/media/i2c/tda9840* 9158F: drivers/media/i2c/tda9840*
9094 9159
9095TEA5761 TUNER DRIVER 9160TEA5761 TUNER DRIVER
9096M: Mauro Carvalho Chehab <m.chehab@samsung.com> 9161M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
9097L: linux-media@vger.kernel.org 9162L: linux-media@vger.kernel.org
9098W: http://linuxtv.org 9163W: http://linuxtv.org
9099T: git git://linuxtv.org/media_tree.git 9164T: git git://linuxtv.org/media_tree.git
@@ -9101,7 +9166,7 @@ S: Odd fixes
9101F: drivers/media/tuners/tea5761.* 9166F: drivers/media/tuners/tea5761.*
9102 9167
9103TEA5767 TUNER DRIVER 9168TEA5767 TUNER DRIVER
9104M: Mauro Carvalho Chehab <m.chehab@samsung.com> 9169M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
9105L: linux-media@vger.kernel.org 9170L: linux-media@vger.kernel.org
9106W: http://linuxtv.org 9171W: http://linuxtv.org
9107T: git git://linuxtv.org/media_tree.git 9172T: git git://linuxtv.org/media_tree.git
@@ -9413,7 +9478,7 @@ F: include/linux/shmem_fs.h
9413F: mm/shmem.c 9478F: mm/shmem.c
9414 9479
9415TM6000 VIDEO4LINUX DRIVER 9480TM6000 VIDEO4LINUX DRIVER
9416M: Mauro Carvalho Chehab <m.chehab@samsung.com> 9481M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
9417L: linux-media@vger.kernel.org 9482L: linux-media@vger.kernel.org
9418W: http://linuxtv.org 9483W: http://linuxtv.org
9419T: git git://linuxtv.org/media_tree.git 9484T: git git://linuxtv.org/media_tree.git
@@ -9584,7 +9649,6 @@ F: drivers/staging/unisys/
9584 9649
9585UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER 9650UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER
9586M: Vinayak Holikatti <vinholikatti@gmail.com> 9651M: Vinayak Holikatti <vinholikatti@gmail.com>
9587M: Santosh Y <santoshsy@gmail.com>
9588L: linux-scsi@vger.kernel.org 9652L: linux-scsi@vger.kernel.org
9589S: Supported 9653S: Supported
9590F: Documentation/scsi/ufs.txt 9654F: Documentation/scsi/ufs.txt
@@ -9678,11 +9742,6 @@ S: Maintained
9678F: Documentation/hid/hiddev.txt 9742F: Documentation/hid/hiddev.txt
9679F: drivers/hid/usbhid/ 9743F: drivers/hid/usbhid/
9680 9744
9681USB/IP DRIVERS
9682L: linux-usb@vger.kernel.org
9683S: Orphan
9684F: drivers/staging/usbip/
9685
9686USB ISP116X DRIVER 9745USB ISP116X DRIVER
9687M: Olav Kongas <ok@artecdesign.ee> 9746M: Olav Kongas <ok@artecdesign.ee>
9688L: linux-usb@vger.kernel.org 9747L: linux-usb@vger.kernel.org
@@ -10240,7 +10299,7 @@ S: Maintained
10240F: arch/x86/kernel/cpu/mcheck/* 10299F: arch/x86/kernel/cpu/mcheck/*
10241 10300
10242XC2028/3028 TUNER DRIVER 10301XC2028/3028 TUNER DRIVER
10243M: Mauro Carvalho Chehab <m.chehab@samsung.com> 10302M: Mauro Carvalho Chehab <mchehab@osg.samsung.com>
10244L: linux-media@vger.kernel.org 10303L: linux-media@vger.kernel.org
10245W: http://linuxtv.org 10304W: http://linuxtv.org
10246T: git git://linuxtv.org/media_tree.git 10305T: git git://linuxtv.org/media_tree.git
diff --git a/Makefile b/Makefile
index 05d67af376c5..fd80c6e9bc23 100644
--- a/Makefile
+++ b/Makefile
@@ -1,8 +1,8 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 18 2PATCHLEVEL = 18
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc1 4EXTRAVERSION =
5NAME = Shuffling Zombie Juror 5NAME = Diseased Newt
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
8# To see a list of typical targets execute "make help" 8# To see a list of typical targets execute "make help"
@@ -297,7 +297,7 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
297 297
298HOSTCC = gcc 298HOSTCC = gcc
299HOSTCXX = g++ 299HOSTCXX = g++
300HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer 300HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -std=gnu89
301HOSTCXXFLAGS = -O2 301HOSTCXXFLAGS = -O2
302 302
303ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1) 303ifeq ($(shell $(HOSTCC) -v 2>&1 | grep -c "clang version"), 1)
@@ -401,7 +401,8 @@ KBUILD_CPPFLAGS := -D__KERNEL__
401KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \ 401KBUILD_CFLAGS := -Wall -Wundef -Wstrict-prototypes -Wno-trigraphs \
402 -fno-strict-aliasing -fno-common \ 402 -fno-strict-aliasing -fno-common \
403 -Werror-implicit-function-declaration \ 403 -Werror-implicit-function-declaration \
404 -Wno-format-security 404 -Wno-format-security \
405 -std=gnu89
405 406
406KBUILD_AFLAGS_KERNEL := 407KBUILD_AFLAGS_KERNEL :=
407KBUILD_CFLAGS_KERNEL := 408KBUILD_CFLAGS_KERNEL :=
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 9596b0ab108d..fe44b2494609 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -9,6 +9,7 @@
9config ARC 9config ARC
10 def_bool y 10 def_bool y
11 select BUILDTIME_EXTABLE_SORT 11 select BUILDTIME_EXTABLE_SORT
12 select COMMON_CLK
12 select CLONE_BACKWARDS 13 select CLONE_BACKWARDS
13 # ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev 14 # ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev
14 select DEVTMPFS if !INITRAMFS_SOURCE="" 15 select DEVTMPFS if !INITRAMFS_SOURCE=""
@@ -73,9 +74,6 @@ config STACKTRACE_SUPPORT
73config HAVE_LATENCYTOP_SUPPORT 74config HAVE_LATENCYTOP_SUPPORT
74 def_bool y 75 def_bool y
75 76
76config NO_DMA
77 def_bool n
78
79source "init/Kconfig" 77source "init/Kconfig"
80source "kernel/Kconfig.freezer" 78source "kernel/Kconfig.freezer"
81 79
@@ -354,7 +352,7 @@ config ARC_CURR_IN_REG
354 kernel mode. This saves memory access for each such access 352 kernel mode. This saves memory access for each such access
355 353
356 354
357config ARC_MISALIGN_ACCESS 355config ARC_EMUL_UNALIGNED
358 bool "Emulate unaligned memory access (userspace only)" 356 bool "Emulate unaligned memory access (userspace only)"
359 select SYSCTL_ARCH_UNALIGN_NO_WARN 357 select SYSCTL_ARCH_UNALIGN_NO_WARN
360 select SYSCTL_ARCH_UNALIGN_ALLOW 358 select SYSCTL_ARCH_UNALIGN_ALLOW
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 8c0b1aa56f7e..10bc3d4e8a44 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -25,7 +25,6 @@ ifdef CONFIG_ARC_CURR_IN_REG
25LINUXINCLUDE += -include ${src}/arch/arc/include/asm/current.h 25LINUXINCLUDE += -include ${src}/arch/arc/include/asm/current.h
26endif 26endif
27 27
28upto_gcc42 := $(call cc-ifversion, -le, 0402, y)
29upto_gcc44 := $(call cc-ifversion, -le, 0404, y) 28upto_gcc44 := $(call cc-ifversion, -le, 0404, y)
30atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y) 29atleast_gcc44 := $(call cc-ifversion, -ge, 0404, y)
31atleast_gcc48 := $(call cc-ifversion, -ge, 0408, y) 30atleast_gcc48 := $(call cc-ifversion, -ge, 0408, y)
@@ -60,25 +59,11 @@ ldflags-$(CONFIG_CPU_BIG_ENDIAN) += -EB
60# --build-id w/o "-marclinux". Default arc-elf32-ld is OK 59# --build-id w/o "-marclinux". Default arc-elf32-ld is OK
61ldflags-$(upto_gcc44) += -marclinux 60ldflags-$(upto_gcc44) += -marclinux
62 61
63ARC_LIBGCC := -mA7
64cflags-$(CONFIG_ARC_HAS_HW_MPY) += -multcost=16
65
66ifndef CONFIG_ARC_HAS_HW_MPY 62ifndef CONFIG_ARC_HAS_HW_MPY
67 cflags-y += -mno-mpy 63 cflags-y += -mno-mpy
68
69# newlib for ARC700 assumes MPY to be always present, which is generally true
70# However, if someone really doesn't want MPY, we need to use the 600 ver
71# which coupled with -mno-mpy will use mpy emulation
72# With gcc 4.4.7, -mno-mpy is enough to make any other related adjustments,
73# e.g. increased cost of MPY. With gcc 4.2.1 this had to be explicitly hinted
74
75 ifeq ($(upto_gcc42),y)
76 ARC_LIBGCC := -marc600
77 cflags-y += -multcost=30
78 endif
79endif 64endif
80 65
81LIBGCC := $(shell $(CC) $(ARC_LIBGCC) $(cflags-y) --print-libgcc-file-name) 66LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
82 67
83# Modules with short calls might break for calls into builtin-kernel 68# Modules with short calls might break for calls into builtin-kernel
84KBUILD_CFLAGS_MODULE += -mlong-calls 69KBUILD_CFLAGS_MODULE += -mlong-calls
diff --git a/arch/arc/boot/dts/angel4.dts b/arch/arc/boot/dts/angel4.dts
index 6b57475967a6..757e0c62c4f9 100644
--- a/arch/arc/boot/dts/angel4.dts
+++ b/arch/arc/boot/dts/angel4.dts
@@ -24,11 +24,6 @@
24 serial0 = &arcuart0; 24 serial0 = &arcuart0;
25 }; 25 };
26 26
27 memory {
28 device_type = "memory";
29 reg = <0x00000000 0x10000000>; /* 256M */
30 };
31
32 fpga { 27 fpga {
33 compatible = "simple-bus"; 28 compatible = "simple-bus";
34 #address-cells = <1>; 29 #address-cells = <1>;
diff --git a/arch/arc/boot/dts/nsimosci.dts b/arch/arc/boot/dts/nsimosci.dts
index 4f31b2eb5cdf..cfaedd9c61c9 100644
--- a/arch/arc/boot/dts/nsimosci.dts
+++ b/arch/arc/boot/dts/nsimosci.dts
@@ -20,18 +20,13 @@
20 /* this is for console on PGU */ 20 /* this is for console on PGU */
21 /* bootargs = "console=tty0 consoleblank=0"; */ 21 /* bootargs = "console=tty0 consoleblank=0"; */
22 /* this is for console on serial */ 22 /* this is for console on serial */
23 bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=ttyS0,115200n8 consoleblank=0 debug"; 23 bootargs = "earlycon=uart8250,mmio32,0xc0000000,115200n8 console=tty0 console=ttyS0,115200n8 consoleblank=0 debug";
24 }; 24 };
25 25
26 aliases { 26 aliases {
27 serial0 = &uart0; 27 serial0 = &uart0;
28 }; 28 };
29 29
30 memory {
31 device_type = "memory";
32 reg = <0x80000000 0x10000000>; /* 256M */
33 };
34
35 fpga { 30 fpga {
36 compatible = "simple-bus"; 31 compatible = "simple-bus";
37 #address-cells = <1>; 32 #address-cells = <1>;
diff --git a/arch/arc/configs/fpga_defconfig b/arch/arc/configs/fpga_defconfig
index e283aa586934..ef4d3bc7b6c0 100644
--- a/arch/arc/configs/fpga_defconfig
+++ b/arch/arc/configs/fpga_defconfig
@@ -23,7 +23,6 @@ CONFIG_MODULES=y
23# CONFIG_IOSCHED_DEADLINE is not set 23# CONFIG_IOSCHED_DEADLINE is not set
24# CONFIG_IOSCHED_CFQ is not set 24# CONFIG_IOSCHED_CFQ is not set
25CONFIG_ARC_PLAT_FPGA_LEGACY=y 25CONFIG_ARC_PLAT_FPGA_LEGACY=y
26CONFIG_ARC_BOARD_ML509=y
27# CONFIG_ARC_HAS_RTSC is not set 26# CONFIG_ARC_HAS_RTSC is not set
28CONFIG_ARC_BUILTIN_DTB_NAME="angel4" 27CONFIG_ARC_BUILTIN_DTB_NAME="angel4"
29CONFIG_PREEMPT=y 28CONFIG_PREEMPT=y
diff --git a/arch/arc/configs/fpga_noramfs_defconfig b/arch/arc/configs/fpga_noramfs_defconfig
index 5276a52f6a2f..49c93011ab96 100644
--- a/arch/arc/configs/fpga_noramfs_defconfig
+++ b/arch/arc/configs/fpga_noramfs_defconfig
@@ -20,7 +20,6 @@ CONFIG_MODULES=y
20# CONFIG_IOSCHED_DEADLINE is not set 20# CONFIG_IOSCHED_DEADLINE is not set
21# CONFIG_IOSCHED_CFQ is not set 21# CONFIG_IOSCHED_CFQ is not set
22CONFIG_ARC_PLAT_FPGA_LEGACY=y 22CONFIG_ARC_PLAT_FPGA_LEGACY=y
23CONFIG_ARC_BOARD_ML509=y
24# CONFIG_ARC_HAS_RTSC is not set 23# CONFIG_ARC_HAS_RTSC is not set
25CONFIG_ARC_BUILTIN_DTB_NAME="angel4" 24CONFIG_ARC_BUILTIN_DTB_NAME="angel4"
26CONFIG_PREEMPT=y 25CONFIG_PREEMPT=y
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index c01ba35a4eff..278dacf2a3f9 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -21,7 +21,6 @@ CONFIG_MODULES=y
21# CONFIG_IOSCHED_DEADLINE is not set 21# CONFIG_IOSCHED_DEADLINE is not set
22# CONFIG_IOSCHED_CFQ is not set 22# CONFIG_IOSCHED_CFQ is not set
23CONFIG_ARC_PLAT_FPGA_LEGACY=y 23CONFIG_ARC_PLAT_FPGA_LEGACY=y
24CONFIG_ARC_BOARD_ML509=y
25# CONFIG_ARC_IDE is not set 24# CONFIG_ARC_IDE is not set
26# CONFIG_ARCTANGENT_EMAC is not set 25# CONFIG_ARCTANGENT_EMAC is not set
27# CONFIG_ARC_HAS_RTSC is not set 26# CONFIG_ARC_HAS_RTSC is not set
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 372466b371bf..be33db8a2ee3 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -9,19 +9,16 @@
9#ifndef _ASM_ARC_ARCREGS_H 9#ifndef _ASM_ARC_ARCREGS_H
10#define _ASM_ARC_ARCREGS_H 10#define _ASM_ARC_ARCREGS_H
11 11
12#ifdef __KERNEL__
13
14/* Build Configuration Registers */ 12/* Build Configuration Registers */
15#define ARC_REG_DCCMBASE_BCR 0x61 /* DCCM Base Addr */ 13#define ARC_REG_DCCMBASE_BCR 0x61 /* DCCM Base Addr */
16#define ARC_REG_CRC_BCR 0x62 14#define ARC_REG_CRC_BCR 0x62
17#define ARC_REG_DVFB_BCR 0x64
18#define ARC_REG_EXTARITH_BCR 0x65
19#define ARC_REG_VECBASE_BCR 0x68 15#define ARC_REG_VECBASE_BCR 0x68
20#define ARC_REG_PERIBASE_BCR 0x69 16#define ARC_REG_PERIBASE_BCR 0x69
21#define ARC_REG_FP_BCR 0x6B /* Single-Precision FPU */ 17#define ARC_REG_FP_BCR 0x6B /* ARCompact: Single-Precision FPU */
22#define ARC_REG_DPFP_BCR 0x6C /* Dbl Precision FPU */ 18#define ARC_REG_DPFP_BCR 0x6C /* ARCompact: Dbl Precision FPU */
23#define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */ 19#define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */
24#define ARC_REG_TIMERS_BCR 0x75 20#define ARC_REG_TIMERS_BCR 0x75
21#define ARC_REG_AP_BCR 0x76
25#define ARC_REG_ICCM_BCR 0x78 22#define ARC_REG_ICCM_BCR 0x78
26#define ARC_REG_XY_MEM_BCR 0x79 23#define ARC_REG_XY_MEM_BCR 0x79
27#define ARC_REG_MAC_BCR 0x7a 24#define ARC_REG_MAC_BCR 0x7a
@@ -31,6 +28,9 @@
31#define ARC_REG_MIXMAX_BCR 0x7e 28#define ARC_REG_MIXMAX_BCR 0x7e
32#define ARC_REG_BARREL_BCR 0x7f 29#define ARC_REG_BARREL_BCR 0x7f
33#define ARC_REG_D_UNCACH_BCR 0x6A 30#define ARC_REG_D_UNCACH_BCR 0x6A
31#define ARC_REG_BPU_BCR 0xc0
32#define ARC_REG_ISA_CFG_BCR 0xc1
33#define ARC_REG_SMART_BCR 0xFF
34 34
35/* status32 Bits Positions */ 35/* status32 Bits Positions */
36#define STATUS_AE_BIT 5 /* Exception active */ 36#define STATUS_AE_BIT 5 /* Exception active */
@@ -191,14 +191,6 @@
191#define PAGES_TO_KB(n_pages) ((n_pages) << (PAGE_SHIFT - 10)) 191#define PAGES_TO_KB(n_pages) ((n_pages) << (PAGE_SHIFT - 10))
192#define PAGES_TO_MB(n_pages) (PAGES_TO_KB(n_pages) >> 10) 192#define PAGES_TO_MB(n_pages) (PAGES_TO_KB(n_pages) >> 10)
193 193
194#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
195/* These DPFP regs need to be saved/restored across ctx-sw */
196struct arc_fpu {
197 struct {
198 unsigned int l, h;
199 } aux_dpfp[2];
200};
201#endif
202 194
203/* 195/*
204 *************************************************************** 196 ***************************************************************
@@ -212,27 +204,19 @@ struct bcr_identity {
212#endif 204#endif
213}; 205};
214 206
215#define EXTN_SWAP_VALID 0x1 207struct bcr_isa {
216#define EXTN_NORM_VALID 0x2
217#define EXTN_MINMAX_VALID 0x2
218#define EXTN_BARREL_VALID 0x2
219
220struct bcr_extn {
221#ifdef CONFIG_CPU_BIG_ENDIAN 208#ifdef CONFIG_CPU_BIG_ENDIAN
222 unsigned int pad:20, crc:1, ext_arith:2, mul:2, barrel:2, minmax:2, 209 unsigned int pad1:23, atomic1:1, ver:8;
223 norm:2, swap:1;
224#else 210#else
225 unsigned int swap:1, norm:2, minmax:2, barrel:2, mul:2, ext_arith:2, 211 unsigned int ver:8, atomic1:1, pad1:23;
226 crc:1, pad:20;
227#endif 212#endif
228}; 213};
229 214
230/* DSP Options Ref Manual */ 215struct bcr_mpy {
231struct bcr_extn_mac_mul {
232#ifdef CONFIG_CPU_BIG_ENDIAN 216#ifdef CONFIG_CPU_BIG_ENDIAN
233 unsigned int pad:16, type:8, ver:8; 217 unsigned int pad:8, x1616:8, dsp:4, cycles:2, type:2, ver:8;
234#else 218#else
235 unsigned int ver:8, type:8, pad:16; 219 unsigned int ver:8, type:2, cycles:2, dsp:4, x1616:8, pad:8;
236#endif 220#endif
237}; 221};
238 222
@@ -251,6 +235,7 @@ struct bcr_perip {
251 unsigned int pad:8, sz:8, pad2:8, start:8; 235 unsigned int pad:8, sz:8, pad2:8, start:8;
252#endif 236#endif
253}; 237};
238
254struct bcr_iccm { 239struct bcr_iccm {
255#ifdef CONFIG_CPU_BIG_ENDIAN 240#ifdef CONFIG_CPU_BIG_ENDIAN
256 unsigned int base:16, pad:5, sz:3, ver:8; 241 unsigned int base:16, pad:5, sz:3, ver:8;
@@ -277,8 +262,8 @@ struct bcr_dccm {
277#endif 262#endif
278}; 263};
279 264
280/* Both SP and DP FPU BCRs have same format */ 265/* ARCompact: Both SP and DP FPU BCRs have same format */
281struct bcr_fp { 266struct bcr_fp_arcompact {
282#ifdef CONFIG_CPU_BIG_ENDIAN 267#ifdef CONFIG_CPU_BIG_ENDIAN
283 unsigned int fast:1, ver:8; 268 unsigned int fast:1, ver:8;
284#else 269#else
@@ -286,6 +271,30 @@ struct bcr_fp {
286#endif 271#endif
287}; 272};
288 273
274struct bcr_timer {
275#ifdef CONFIG_CPU_BIG_ENDIAN
276 unsigned int pad2:15, rtsc:1, pad1:6, t1:1, t0:1, ver:8;
277#else
278 unsigned int ver:8, t0:1, t1:1, pad1:6, rtsc:1, pad2:15;
279#endif
280};
281
282struct bcr_bpu_arcompact {
283#ifdef CONFIG_CPU_BIG_ENDIAN
284 unsigned int pad2:19, fam:1, pad:2, ent:2, ver:8;
285#else
286 unsigned int ver:8, ent:2, pad:2, fam:1, pad2:19;
287#endif
288};
289
290struct bcr_generic {
291#ifdef CONFIG_CPU_BIG_ENDIAN
292 unsigned int pad:24, ver:8;
293#else
294 unsigned int ver:8, pad:24;
295#endif
296};
297
289/* 298/*
290 ******************************************************************* 299 *******************************************************************
291 * Generic structures to hold build configuration used at runtime 300 * Generic structures to hold build configuration used at runtime
@@ -299,6 +308,10 @@ struct cpuinfo_arc_cache {
299 unsigned int sz_k:8, line_len:8, assoc:4, ver:4, alias:1, vipt:1, pad:6; 308 unsigned int sz_k:8, line_len:8, assoc:4, ver:4, alias:1, vipt:1, pad:6;
300}; 309};
301 310
311struct cpuinfo_arc_bpu {
312 unsigned int ver, full, num_cache, num_pred;
313};
314
302struct cpuinfo_arc_ccm { 315struct cpuinfo_arc_ccm {
303 unsigned int base_addr, sz; 316 unsigned int base_addr, sz;
304}; 317};
@@ -306,21 +319,25 @@ struct cpuinfo_arc_ccm {
306struct cpuinfo_arc { 319struct cpuinfo_arc {
307 struct cpuinfo_arc_cache icache, dcache; 320 struct cpuinfo_arc_cache icache, dcache;
308 struct cpuinfo_arc_mmu mmu; 321 struct cpuinfo_arc_mmu mmu;
322 struct cpuinfo_arc_bpu bpu;
309 struct bcr_identity core; 323 struct bcr_identity core;
310 unsigned int timers; 324 struct bcr_isa isa;
325 struct bcr_timer timers;
311 unsigned int vec_base; 326 unsigned int vec_base;
312 unsigned int uncached_base; 327 unsigned int uncached_base;
313 struct cpuinfo_arc_ccm iccm, dccm; 328 struct cpuinfo_arc_ccm iccm, dccm;
314 struct bcr_extn extn; 329 struct {
330 unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3,
331 fpu_sp:1, fpu_dp:1, pad2:6,
332 debug:1, ap:1, smart:1, rtt:1, pad3:4,
333 pad4:8;
334 } extn;
335 struct bcr_mpy extn_mpy;
315 struct bcr_extn_xymem extn_xymem; 336 struct bcr_extn_xymem extn_xymem;
316 struct bcr_extn_mac_mul extn_mac_mul;
317 struct bcr_fp fp, dpfp;
318}; 337};
319 338
320extern struct cpuinfo_arc cpuinfo_arc700[]; 339extern struct cpuinfo_arc cpuinfo_arc700[];
321 340
322#endif /* __ASEMBLY__ */ 341#endif /* __ASEMBLY__ */
323 342
324#endif /* __KERNEL__ */
325
326#endif /* _ASM_ARC_ARCREGS_H */ 343#endif /* _ASM_ARC_ARCREGS_H */
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 173f303a868f..067551b6920a 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -9,8 +9,6 @@
9#ifndef _ASM_ARC_ATOMIC_H 9#ifndef _ASM_ARC_ATOMIC_H
10#define _ASM_ARC_ATOMIC_H 10#define _ASM_ARC_ATOMIC_H
11 11
12#ifdef __KERNEL__
13
14#ifndef __ASSEMBLY__ 12#ifndef __ASSEMBLY__
15 13
16#include <linux/types.h> 14#include <linux/types.h>
@@ -170,5 +168,3 @@ ATOMIC_OP(and, &=, and)
170#endif 168#endif
171 169
172#endif 170#endif
173
174#endif
diff --git a/arch/arc/include/asm/bitops.h b/arch/arc/include/asm/bitops.h
index ebc0cf3164dc..1a5bf07eefe2 100644
--- a/arch/arc/include/asm/bitops.h
+++ b/arch/arc/include/asm/bitops.h
@@ -13,8 +13,6 @@
13#error only <linux/bitops.h> can be included directly 13#error only <linux/bitops.h> can be included directly
14#endif 14#endif
15 15
16#ifdef __KERNEL__
17
18#ifndef __ASSEMBLY__ 16#ifndef __ASSEMBLY__
19 17
20#include <linux/types.h> 18#include <linux/types.h>
@@ -508,6 +506,4 @@ static inline __attribute__ ((const)) int __ffs(unsigned long word)
508 506
509#endif /* !__ASSEMBLY__ */ 507#endif /* !__ASSEMBLY__ */
510 508
511#endif /* __KERNEL__ */
512
513#endif 509#endif
diff --git a/arch/arc/include/asm/bug.h b/arch/arc/include/asm/bug.h
index 5b18e94c6678..ea022d47896c 100644
--- a/arch/arc/include/asm/bug.h
+++ b/arch/arc/include/asm/bug.h
@@ -21,10 +21,9 @@ void show_kernel_fault_diag(const char *str, struct pt_regs *regs,
21 unsigned long address); 21 unsigned long address);
22void die(const char *str, struct pt_regs *regs, unsigned long address); 22void die(const char *str, struct pt_regs *regs, unsigned long address);
23 23
24#define BUG() do { \ 24#define BUG() do { \
25 dump_stack(); \ 25 pr_warn("BUG: failure at %s:%d/%s()!\n", __FILE__, __LINE__, __func__); \
26 pr_warn("Kernel BUG in %s: %s: %d!\n", \ 26 dump_stack(); \
27 __FILE__, __func__, __LINE__); \
28} while (0) 27} while (0)
29 28
30#define HAVE_ARCH_BUG 29#define HAVE_ARCH_BUG
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index b3c750979aa1..7861255da32d 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -20,7 +20,7 @@
20#define CACHE_LINE_MASK (~(L1_CACHE_BYTES - 1)) 20#define CACHE_LINE_MASK (~(L1_CACHE_BYTES - 1))
21 21
22/* 22/*
23 * ARC700 doesn't cache any access in top 256M. 23 * ARC700 doesn't cache any access in top 1G (0xc000_0000 to 0xFFFF_FFFF)
24 * Ideal for wiring memory mapped peripherals as we don't need to do 24 * Ideal for wiring memory mapped peripherals as we don't need to do
25 * explicit uncached accesses (LD.di/ST.di) hence more portable drivers 25 * explicit uncached accesses (LD.di/ST.di) hence more portable drivers
26 */ 26 */
diff --git a/arch/arc/include/asm/current.h b/arch/arc/include/asm/current.h
index 87b918585c4a..c2453ee62801 100644
--- a/arch/arc/include/asm/current.h
+++ b/arch/arc/include/asm/current.h
@@ -12,8 +12,6 @@
12#ifndef _ASM_ARC_CURRENT_H 12#ifndef _ASM_ARC_CURRENT_H
13#define _ASM_ARC_CURRENT_H 13#define _ASM_ARC_CURRENT_H
14 14
15#ifdef __KERNEL__
16
17#ifndef __ASSEMBLY__ 15#ifndef __ASSEMBLY__
18 16
19#ifdef CONFIG_ARC_CURR_IN_REG 17#ifdef CONFIG_ARC_CURR_IN_REG
@@ -27,6 +25,4 @@ register struct task_struct *curr_arc asm("r25");
27 25
28#endif /* ! __ASSEMBLY__ */ 26#endif /* ! __ASSEMBLY__ */
29 27
30#endif /* __KERNEL__ */
31
32#endif /* _ASM_ARC_CURRENT_H */ 28#endif /* _ASM_ARC_CURRENT_H */
diff --git a/arch/arc/include/asm/irqflags.h b/arch/arc/include/asm/irqflags.h
index 587df8236e8b..742816f1b210 100644
--- a/arch/arc/include/asm/irqflags.h
+++ b/arch/arc/include/asm/irqflags.h
@@ -15,8 +15,6 @@
15 * -Conditionally disable interrupts (if they are not enabled, don't disable) 15 * -Conditionally disable interrupts (if they are not enabled, don't disable)
16*/ 16*/
17 17
18#ifdef __KERNEL__
19
20#include <asm/arcregs.h> 18#include <asm/arcregs.h>
21 19
22/* status32 Reg bits related to Interrupt Handling */ 20/* status32 Reg bits related to Interrupt Handling */
@@ -169,6 +167,4 @@ static inline int arch_irqs_disabled(void)
169 167
170#endif /* __ASSEMBLY__ */ 168#endif /* __ASSEMBLY__ */
171 169
172#endif /* KERNEL */
173
174#endif 170#endif
diff --git a/arch/arc/include/asm/kgdb.h b/arch/arc/include/asm/kgdb.h
index b65fca7ffeb5..fea931634136 100644
--- a/arch/arc/include/asm/kgdb.h
+++ b/arch/arc/include/asm/kgdb.h
@@ -19,7 +19,7 @@
19 * register API yet */ 19 * register API yet */
20#undef DBG_MAX_REG_NUM 20#undef DBG_MAX_REG_NUM
21 21
22#define GDB_MAX_REGS 39 22#define GDB_MAX_REGS 87
23 23
24#define BREAK_INSTR_SIZE 2 24#define BREAK_INSTR_SIZE 2
25#define CACHE_FLUSH_IS_SAFE 1 25#define CACHE_FLUSH_IS_SAFE 1
@@ -33,23 +33,27 @@ static inline void arch_kgdb_breakpoint(void)
33 33
34extern void kgdb_trap(struct pt_regs *regs); 34extern void kgdb_trap(struct pt_regs *regs);
35 35
36enum arc700_linux_regnums { 36/* This is the numbering of registers according to the GDB. See GDB's
37 * arc-tdep.h for details.
38 *
39 * Registers are ordered for GDB 7.5. It is incompatible with GDB 6.8. */
40enum arc_linux_regnums {
37 _R0 = 0, 41 _R0 = 0,
38 _R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13, 42 _R1, _R2, _R3, _R4, _R5, _R6, _R7, _R8, _R9, _R10, _R11, _R12, _R13,
39 _R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24, 43 _R14, _R15, _R16, _R17, _R18, _R19, _R20, _R21, _R22, _R23, _R24,
40 _R25, _R26, 44 _R25, _R26,
41 _BTA = 27, 45 _FP = 27,
42 _LP_START = 28, 46 __SP = 28,
43 _LP_END = 29, 47 _R30 = 30,
44 _LP_COUNT = 30, 48 _BLINK = 31,
45 _STATUS32 = 31, 49 _LP_COUNT = 60,
46 _BLINK = 32, 50 _STOP_PC = 64,
47 _FP = 33, 51 _RET = 64,
48 __SP = 34, 52 _LP_START = 65,
49 _EFA = 35, 53 _LP_END = 66,
50 _RET = 36, 54 _STATUS32 = 67,
51 _ORIG_R8 = 37, 55 _ECR = 76,
52 _STOP_PC = 38 56 _BTA = 82,
53}; 57};
54 58
55#else 59#else
diff --git a/arch/arc/include/asm/processor.h b/arch/arc/include/asm/processor.h
index 82588f3ba77f..210fe97464c3 100644
--- a/arch/arc/include/asm/processor.h
+++ b/arch/arc/include/asm/processor.h
@@ -14,12 +14,19 @@
14#ifndef __ASM_ARC_PROCESSOR_H 14#ifndef __ASM_ARC_PROCESSOR_H
15#define __ASM_ARC_PROCESSOR_H 15#define __ASM_ARC_PROCESSOR_H
16 16
17#ifdef __KERNEL__
18
19#ifndef __ASSEMBLY__ 17#ifndef __ASSEMBLY__
20 18
21#include <asm/ptrace.h> 19#include <asm/ptrace.h>
22 20
21#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
22/* These DPFP regs need to be saved/restored across ctx-sw */
23struct arc_fpu {
24 struct {
25 unsigned int l, h;
26 } aux_dpfp[2];
27};
28#endif
29
23/* Arch specific stuff which needs to be saved per task. 30/* Arch specific stuff which needs to be saved per task.
24 * However these items are not so important so as to earn a place in 31 * However these items are not so important so as to earn a place in
25 * struct thread_info 32 * struct thread_info
@@ -128,6 +135,4 @@ extern unsigned int get_wchan(struct task_struct *p);
128 */ 135 */
129#define TASK_UNMAPPED_BASE (TASK_SIZE / 3) 136#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
130 137
131#endif /* __KERNEL__ */
132
133#endif /* __ASM_ARC_PROCESSOR_H */ 138#endif /* __ASM_ARC_PROCESSOR_H */
diff --git a/arch/arc/include/asm/setup.h b/arch/arc/include/asm/setup.h
index e10f8cef56a8..6e3ef5ba4f74 100644
--- a/arch/arc/include/asm/setup.h
+++ b/arch/arc/include/asm/setup.h
@@ -29,7 +29,6 @@ struct cpuinfo_data {
29}; 29};
30 30
31extern int root_mountflags, end_mem; 31extern int root_mountflags, end_mem;
32extern int running_on_hw;
33 32
34void setup_processor(void); 33void setup_processor(void);
35void __init setup_arch_memory(void); 34void __init setup_arch_memory(void);
diff --git a/arch/arc/include/asm/smp.h b/arch/arc/include/asm/smp.h
index 5d06eee43ea9..3845b9e94f69 100644
--- a/arch/arc/include/asm/smp.h
+++ b/arch/arc/include/asm/smp.h
@@ -59,7 +59,15 @@ struct plat_smp_ops {
59/* TBD: stop exporting it for direct population by platform */ 59/* TBD: stop exporting it for direct population by platform */
60extern struct plat_smp_ops plat_smp_ops; 60extern struct plat_smp_ops plat_smp_ops;
61 61
62#endif /* CONFIG_SMP */ 62#else /* CONFIG_SMP */
63
64static inline void smp_init_cpus(void) {}
65static inline const char *arc_platform_smp_cpuinfo(void)
66{
67 return "";
68}
69
70#endif /* !CONFIG_SMP */
63 71
64/* 72/*
65 * ARC700 doesn't support atomic Read-Modify-Write ops. 73 * ARC700 doesn't support atomic Read-Modify-Write ops.
diff --git a/arch/arc/include/asm/string.h b/arch/arc/include/asm/string.h
index 87676c8f1412..95822b550a18 100644
--- a/arch/arc/include/asm/string.h
+++ b/arch/arc/include/asm/string.h
@@ -17,8 +17,6 @@
17 17
18#include <linux/types.h> 18#include <linux/types.h>
19 19
20#ifdef __KERNEL__
21
22#define __HAVE_ARCH_MEMSET 20#define __HAVE_ARCH_MEMSET
23#define __HAVE_ARCH_MEMCPY 21#define __HAVE_ARCH_MEMCPY
24#define __HAVE_ARCH_MEMCMP 22#define __HAVE_ARCH_MEMCMP
@@ -36,5 +34,4 @@ extern char *strcpy(char *dest, const char *src);
36extern int strcmp(const char *cs, const char *ct); 34extern int strcmp(const char *cs, const char *ct);
37extern __kernel_size_t strlen(const char *); 35extern __kernel_size_t strlen(const char *);
38 36
39#endif /* __KERNEL__ */
40#endif /* _ASM_ARC_STRING_H */ 37#endif /* _ASM_ARC_STRING_H */
diff --git a/arch/arc/include/asm/syscalls.h b/arch/arc/include/asm/syscalls.h
index dd785befe7fd..e56f9fcc5581 100644
--- a/arch/arc/include/asm/syscalls.h
+++ b/arch/arc/include/asm/syscalls.h
@@ -9,8 +9,6 @@
9#ifndef _ASM_ARC_SYSCALLS_H 9#ifndef _ASM_ARC_SYSCALLS_H
10#define _ASM_ARC_SYSCALLS_H 1 10#define _ASM_ARC_SYSCALLS_H 1
11 11
12#ifdef __KERNEL__
13
14#include <linux/compiler.h> 12#include <linux/compiler.h>
15#include <linux/linkage.h> 13#include <linux/linkage.h>
16#include <linux/types.h> 14#include <linux/types.h>
@@ -22,6 +20,4 @@ int sys_arc_gettls(void);
22 20
23#include <asm-generic/syscalls.h> 21#include <asm-generic/syscalls.h>
24 22
25#endif /* __KERNEL__ */
26
27#endif 23#endif
diff --git a/arch/arc/include/asm/thread_info.h b/arch/arc/include/asm/thread_info.h
index 45be21672011..02bc5ec0fb2e 100644
--- a/arch/arc/include/asm/thread_info.h
+++ b/arch/arc/include/asm/thread_info.h
@@ -16,8 +16,6 @@
16#ifndef _ASM_THREAD_INFO_H 16#ifndef _ASM_THREAD_INFO_H
17#define _ASM_THREAD_INFO_H 17#define _ASM_THREAD_INFO_H
18 18
19#ifdef __KERNEL__
20
21#include <asm/page.h> 19#include <asm/page.h>
22 20
23#ifdef CONFIG_16KSTACKS 21#ifdef CONFIG_16KSTACKS
@@ -114,6 +112,4 @@ static inline __attribute_const__ struct thread_info *current_thread_info(void)
114 * syscall, so all that reamins to be tested is _TIF_WORK_MASK 112 * syscall, so all that reamins to be tested is _TIF_WORK_MASK
115 */ 113 */
116 114
117#endif /* __KERNEL__ */
118
119#endif /* _ASM_THREAD_INFO_H */ 115#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/arc/include/asm/unaligned.h b/arch/arc/include/asm/unaligned.h
index 3e5f071bc00c..6da6b4edaeda 100644
--- a/arch/arc/include/asm/unaligned.h
+++ b/arch/arc/include/asm/unaligned.h
@@ -14,7 +14,7 @@
14#include <asm-generic/unaligned.h> 14#include <asm-generic/unaligned.h>
15#include <asm/ptrace.h> 15#include <asm/ptrace.h>
16 16
17#ifdef CONFIG_ARC_MISALIGN_ACCESS 17#ifdef CONFIG_ARC_EMUL_UNALIGNED
18int misaligned_fixup(unsigned long address, struct pt_regs *regs, 18int misaligned_fixup(unsigned long address, struct pt_regs *regs,
19 struct callee_regs *cregs); 19 struct callee_regs *cregs);
20#else 20#else
diff --git a/arch/arc/kernel/Makefile b/arch/arc/kernel/Makefile
index 8004b4fa6461..113f2033da9f 100644
--- a/arch/arc/kernel/Makefile
+++ b/arch/arc/kernel/Makefile
@@ -16,7 +16,7 @@ obj-$(CONFIG_MODULES) += arcksyms.o module.o
16obj-$(CONFIG_SMP) += smp.o 16obj-$(CONFIG_SMP) += smp.o
17obj-$(CONFIG_ARC_DW2_UNWIND) += unwind.o 17obj-$(CONFIG_ARC_DW2_UNWIND) += unwind.o
18obj-$(CONFIG_KPROBES) += kprobes.o 18obj-$(CONFIG_KPROBES) += kprobes.o
19obj-$(CONFIG_ARC_MISALIGN_ACCESS) += unaligned.o 19obj-$(CONFIG_ARC_EMUL_UNALIGNED) += unaligned.o
20obj-$(CONFIG_KGDB) += kgdb.o 20obj-$(CONFIG_KGDB) += kgdb.o
21obj-$(CONFIG_ARC_METAWARE_HLINK) += arc_hostlink.o 21obj-$(CONFIG_ARC_METAWARE_HLINK) += arc_hostlink.o
22obj-$(CONFIG_PERF_EVENTS) += perf_event.o 22obj-$(CONFIG_PERF_EVENTS) += perf_event.o
diff --git a/arch/arc/kernel/disasm.c b/arch/arc/kernel/disasm.c
index b8a549c4f540..3b7cd4864ba2 100644
--- a/arch/arc/kernel/disasm.c
+++ b/arch/arc/kernel/disasm.c
@@ -15,7 +15,7 @@
15#include <linux/uaccess.h> 15#include <linux/uaccess.h>
16#include <asm/disasm.h> 16#include <asm/disasm.h>
17 17
18#if defined(CONFIG_KGDB) || defined(CONFIG_ARC_MISALIGN_ACCESS) || \ 18#if defined(CONFIG_KGDB) || defined(CONFIG_ARC_EMUL_UNALIGNED) || \
19 defined(CONFIG_KPROBES) 19 defined(CONFIG_KPROBES)
20 20
21/* disasm_instr: Analyses instruction at addr, stores 21/* disasm_instr: Analyses instruction at addr, stores
@@ -535,4 +535,4 @@ int __kprobes disasm_next_pc(unsigned long pc, struct pt_regs *regs,
535 return instr.is_branch; 535 return instr.is_branch;
536} 536}
537 537
538#endif /* CONFIG_KGDB || CONFIG_ARC_MISALIGN_ACCESS || CONFIG_KPROBES */ 538#endif /* CONFIG_KGDB || CONFIG_ARC_EMUL_UNALIGNED || CONFIG_KPROBES */
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 4d2481bd8b98..b0e8666fdccc 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -91,16 +91,6 @@ stext:
91 st r0, [@uboot_tag] 91 st r0, [@uboot_tag]
92 st r2, [@uboot_arg] 92 st r2, [@uboot_arg]
93 93
94 ; Identify if running on ISS vs Silicon
95 ; IDENTITY Reg [ 3 2 1 0 ]
96 ; (chip-id) ^^^^^ ==> 0xffff for ISS
97 lr r0, [identity]
98 lsr r3, r0, 16
99 cmp r3, 0xffff
100 mov.z r4, 0
101 mov.nz r4, 1
102 st r4, [@running_on_hw]
103
104 ; setup "current" tsk and optionally cache it in dedicated r25 94 ; setup "current" tsk and optionally cache it in dedicated r25
105 mov r9, @init_task 95 mov r9, @init_task
106 SET_CURR_TASK_ON_CPU r9, r0 ; r9 = tsk, r0 = scratch 96 SET_CURR_TASK_ON_CPU r9, r0 ; r9 = tsk, r0 = scratch
diff --git a/arch/arc/kernel/kgdb.c b/arch/arc/kernel/kgdb.c
index a2ff5c5d1450..ecf6a7869375 100644
--- a/arch/arc/kernel/kgdb.c
+++ b/arch/arc/kernel/kgdb.c
@@ -158,11 +158,6 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
158 return -1; 158 return -1;
159} 159}
160 160
161unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs)
162{
163 return instruction_pointer(regs);
164}
165
166int kgdb_arch_init(void) 161int kgdb_arch_init(void)
167{ 162{
168 single_step_data.armed = 0; 163 single_step_data.armed = 0;
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index b9a5685a990e..ae1c485cbc68 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -244,25 +244,23 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
244 pr_err("This core does not have performance counters!\n"); 244 pr_err("This core does not have performance counters!\n");
245 return -ENODEV; 245 return -ENODEV;
246 } 246 }
247 BUG_ON(pct_bcr.c > ARC_PMU_MAX_HWEVENTS);
247 248
248 arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), 249 READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
249 GFP_KERNEL); 250 if (!cc_bcr.v) {
251 pr_err("Performance counters exist, but no countable conditions?\n");
252 return -ENODEV;
253 }
254
255 arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL);
250 if (!arc_pmu) 256 if (!arc_pmu)
251 return -ENOMEM; 257 return -ENOMEM;
252 258
253 arc_pmu->n_counters = pct_bcr.c; 259 arc_pmu->n_counters = pct_bcr.c;
254 BUG_ON(arc_pmu->n_counters > ARC_PMU_MAX_HWEVENTS);
255
256 arc_pmu->counter_size = 32 + (pct_bcr.s << 4); 260 arc_pmu->counter_size = 32 + (pct_bcr.s << 4);
257 pr_info("ARC PMU found with %d counters of size %d bits\n",
258 arc_pmu->n_counters, arc_pmu->counter_size);
259
260 READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
261
262 if (!cc_bcr.v)
263 pr_err("Strange! Performance counters exist, but no countable conditions?\n");
264 261
265 pr_info("ARC PMU has %d countable conditions\n", cc_bcr.c); 262 pr_info("ARC perf\t: %d counters (%d bits), %d countable conditions\n",
263 arc_pmu->n_counters, arc_pmu->counter_size, cc_bcr.c);
266 264
267 cc_name.str[8] = 0; 265 cc_name.str[8] = 0;
268 for (i = 0; i < PERF_COUNT_HW_MAX; i++) 266 for (i = 0; i < PERF_COUNT_HW_MAX; i++)
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 119dddb752b2..252bf603db9c 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -13,7 +13,9 @@
13#include <linux/console.h> 13#include <linux/console.h>
14#include <linux/module.h> 14#include <linux/module.h>
15#include <linux/cpu.h> 15#include <linux/cpu.h>
16#include <linux/clk-provider.h>
16#include <linux/of_fdt.h> 17#include <linux/of_fdt.h>
18#include <linux/of_platform.h>
17#include <linux/cache.h> 19#include <linux/cache.h>
18#include <asm/sections.h> 20#include <asm/sections.h>
19#include <asm/arcregs.h> 21#include <asm/arcregs.h>
@@ -24,11 +26,10 @@
24#include <asm/unwind.h> 26#include <asm/unwind.h>
25#include <asm/clk.h> 27#include <asm/clk.h>
26#include <asm/mach_desc.h> 28#include <asm/mach_desc.h>
29#include <asm/smp.h>
27 30
28#define FIX_PTR(x) __asm__ __volatile__(";" : "+r"(x)) 31#define FIX_PTR(x) __asm__ __volatile__(";" : "+r"(x))
29 32
30int running_on_hw = 1; /* vs. on ISS */
31
32/* Part of U-boot ABI: see head.S */ 33/* Part of U-boot ABI: see head.S */
33int __initdata uboot_tag; 34int __initdata uboot_tag;
34char __initdata *uboot_arg; 35char __initdata *uboot_arg;
@@ -42,26 +43,26 @@ struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
42static void read_arc_build_cfg_regs(void) 43static void read_arc_build_cfg_regs(void)
43{ 44{
44 struct bcr_perip uncached_space; 45 struct bcr_perip uncached_space;
46 struct bcr_generic bcr;
45 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 47 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
46 FIX_PTR(cpu); 48 FIX_PTR(cpu);
47 49
48 READ_BCR(AUX_IDENTITY, cpu->core); 50 READ_BCR(AUX_IDENTITY, cpu->core);
51 READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
49 52
50 cpu->timers = read_aux_reg(ARC_REG_TIMERS_BCR); 53 READ_BCR(ARC_REG_TIMERS_BCR, cpu->timers);
51 cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE); 54 cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
52 55
53 READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space); 56 READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
54 cpu->uncached_base = uncached_space.start << 24; 57 cpu->uncached_base = uncached_space.start << 24;
55 58
56 cpu->extn.mul = read_aux_reg(ARC_REG_MUL_BCR); 59 READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
57 cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR);
58 cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR);
59 cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR);
60 cpu->extn.barrel = read_aux_reg(ARC_REG_BARREL_BCR);
61 READ_BCR(ARC_REG_MAC_BCR, cpu->extn_mac_mul);
62 60
63 cpu->extn.ext_arith = read_aux_reg(ARC_REG_EXTARITH_BCR); 61 cpu->extn.norm = read_aux_reg(ARC_REG_NORM_BCR) > 1 ? 1 : 0; /* 2,3 */
64 cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR); 62 cpu->extn.barrel = read_aux_reg(ARC_REG_BARREL_BCR) > 1 ? 1 : 0; /* 2,3 */
63 cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */
64 cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0;
65 cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */
65 66
66 /* Note that we read the CCM BCRs independent of kernel config 67 /* Note that we read the CCM BCRs independent of kernel config
67 * This is to catch the cases where user doesn't know that 68 * This is to catch the cases where user doesn't know that
@@ -95,43 +96,76 @@ static void read_arc_build_cfg_regs(void)
95 read_decode_mmu_bcr(); 96 read_decode_mmu_bcr();
96 read_decode_cache_bcr(); 97 read_decode_cache_bcr();
97 98
98 READ_BCR(ARC_REG_FP_BCR, cpu->fp); 99 {
99 READ_BCR(ARC_REG_DPFP_BCR, cpu->dpfp); 100 struct bcr_fp_arcompact sp, dp;
101 struct bcr_bpu_arcompact bpu;
102
103 READ_BCR(ARC_REG_FP_BCR, sp);
104 READ_BCR(ARC_REG_DPFP_BCR, dp);
105 cpu->extn.fpu_sp = sp.ver ? 1 : 0;
106 cpu->extn.fpu_dp = dp.ver ? 1 : 0;
107
108 READ_BCR(ARC_REG_BPU_BCR, bpu);
109 cpu->bpu.ver = bpu.ver;
110 cpu->bpu.full = bpu.fam ? 1 : 0;
111 if (bpu.ent) {
112 cpu->bpu.num_cache = 256 << (bpu.ent - 1);
113 cpu->bpu.num_pred = 256 << (bpu.ent - 1);
114 }
115 }
116
117 READ_BCR(ARC_REG_AP_BCR, bcr);
118 cpu->extn.ap = bcr.ver ? 1 : 0;
119
120 READ_BCR(ARC_REG_SMART_BCR, bcr);
121 cpu->extn.smart = bcr.ver ? 1 : 0;
122
123 cpu->extn.debug = cpu->extn.ap | cpu->extn.smart;
100} 124}
101 125
102static const struct cpuinfo_data arc_cpu_tbl[] = { 126static const struct cpuinfo_data arc_cpu_tbl[] = {
103 { {0x10, "ARCTangent A5"}, 0x1F},
104 { {0x20, "ARC 600" }, 0x2F}, 127 { {0x20, "ARC 600" }, 0x2F},
105 { {0x30, "ARC 700" }, 0x33}, 128 { {0x30, "ARC 700" }, 0x33},
106 { {0x34, "ARC 700 R4.10"}, 0x34}, 129 { {0x34, "ARC 700 R4.10"}, 0x34},
130 { {0x35, "ARC 700 R4.11"}, 0x35},
107 { {0x00, NULL } } 131 { {0x00, NULL } }
108}; 132};
109 133
134#define IS_AVAIL1(v, str) ((v) ? str : "")
135#define IS_USED(cfg) (IS_ENABLED(cfg) ? "" : "(not used) ")
136#define IS_AVAIL2(v, str, cfg) IS_AVAIL1(v, str), IS_AVAIL1(v, IS_USED(cfg))
137
110static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) 138static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
111{ 139{
112 int n = 0;
113 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; 140 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
114 struct bcr_identity *core = &cpu->core; 141 struct bcr_identity *core = &cpu->core;
115 const struct cpuinfo_data *tbl; 142 const struct cpuinfo_data *tbl;
116 int be = 0; 143 char *isa_nm;
117#ifdef CONFIG_CPU_BIG_ENDIAN 144 int i, be, atomic;
118 be = 1; 145 int n = 0;
119#endif 146
120 FIX_PTR(cpu); 147 FIX_PTR(cpu);
121 148
149 {
150 isa_nm = "ARCompact";
151 be = IS_ENABLED(CONFIG_CPU_BIG_ENDIAN);
152
153 atomic = cpu->isa.atomic1;
154 if (!cpu->isa.ver) /* ISA BCR absent, use Kconfig info */
155 atomic = IS_ENABLED(CONFIG_ARC_HAS_LLSC);
156 }
157
122 n += scnprintf(buf + n, len - n, 158 n += scnprintf(buf + n, len - n,
123 "\nARC IDENTITY\t: Family [%#02x]" 159 "\nIDENTITY\t: ARCVER [%#02x] ARCNUM [%#02x] CHIPID [%#4x]\n",
124 " Cpu-id [%#02x] Chip-id [%#4x]\n", 160 core->family, core->cpu_id, core->chip_id);
125 core->family, core->cpu_id,
126 core->chip_id);
127 161
128 for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) { 162 for (tbl = &arc_cpu_tbl[0]; tbl->info.id != 0; tbl++) {
129 if ((core->family >= tbl->info.id) && 163 if ((core->family >= tbl->info.id) &&
130 (core->family <= tbl->up_range)) { 164 (core->family <= tbl->up_range)) {
131 n += scnprintf(buf + n, len - n, 165 n += scnprintf(buf + n, len - n,
132 "processor\t: %s %s\n", 166 "processor [%d]\t: %s (%s ISA) %s\n",
133 tbl->info.str, 167 cpu_id, tbl->info.str, isa_nm,
134 be ? "[Big Endian]" : ""); 168 IS_AVAIL1(be, "[Big-Endian]"));
135 break; 169 break;
136 } 170 }
137 } 171 }
@@ -143,34 +177,35 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
143 (unsigned int)(arc_get_core_freq() / 1000000), 177 (unsigned int)(arc_get_core_freq() / 1000000),
144 (unsigned int)(arc_get_core_freq() / 10000) % 100); 178 (unsigned int)(arc_get_core_freq() / 10000) % 100);
145 179
146 n += scnprintf(buf + n, len - n, "Timers\t\t: %s %s\n", 180 n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",
147 (cpu->timers & 0x200) ? "TIMER1" : "", 181 IS_AVAIL1(cpu->timers.t0, "Timer0 "),
148 (cpu->timers & 0x100) ? "TIMER0" : ""); 182 IS_AVAIL1(cpu->timers.t1, "Timer1 "),
183 IS_AVAIL2(cpu->timers.rtsc, "64-bit RTSC ", CONFIG_ARC_HAS_RTSC));
149 184
150 n += scnprintf(buf + n, len - n, "Vect Tbl Base\t: %#x\n", 185 n += i = scnprintf(buf + n, len - n, "%s%s",
151 cpu->vec_base); 186 IS_AVAIL2(atomic, "atomic ", CONFIG_ARC_HAS_LLSC));
152 187
153 n += scnprintf(buf + n, len - n, "UNCACHED Base\t: %#x\n", 188 if (i)
154 cpu->uncached_base); 189 n += scnprintf(buf + n, len - n, "\n\t\t: ");
155 190
156 return buf; 191 n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",
157} 192 IS_AVAIL1(cpu->extn_mpy.ver, "mpy "),
193 IS_AVAIL1(cpu->extn.norm, "norm "),
194 IS_AVAIL1(cpu->extn.barrel, "barrel-shift "),
195 IS_AVAIL1(cpu->extn.swap, "swap "),
196 IS_AVAIL1(cpu->extn.minmax, "minmax "),
197 IS_AVAIL1(cpu->extn.crc, "crc "),
198 IS_AVAIL2(1, "swape", CONFIG_ARC_HAS_SWAPE));
158 199
159static const struct id_to_str mul_type_nm[] = { 200 if (cpu->bpu.ver)
160 { 0x0, "N/A"}, 201 n += scnprintf(buf + n, len - n,
161 { 0x1, "32x32 (spl Result Reg)" }, 202 "BPU\t\t: %s%s match, cache:%d, Predict Table:%d\n",
162 { 0x2, "32x32 (ANY Result Reg)" } 203 IS_AVAIL1(cpu->bpu.full, "full"),
163}; 204 IS_AVAIL1(!cpu->bpu.full, "partial"),
205 cpu->bpu.num_cache, cpu->bpu.num_pred);
164 206
165static const struct id_to_str mac_mul_nm[] = { 207 return buf;
166 {0x0, "N/A"}, 208}
167 {0x1, "N/A"},
168 {0x2, "Dual 16 x 16"},
169 {0x3, "N/A"},
170 {0x4, "32x16"},
171 {0x5, "N/A"},
172 {0x6, "Dual 16x16 and 32x16"}
173};
174 209
175static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len) 210static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
176{ 211{
@@ -178,67 +213,46 @@ static char *arc_extn_mumbojumbo(int cpu_id, char *buf, int len)
178 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; 213 struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
179 214
180 FIX_PTR(cpu); 215 FIX_PTR(cpu);
181#define IS_AVAIL1(var, str) ((var) ? str : "")
182#define IS_AVAIL2(var, str) ((var == 0x2) ? str : "")
183#define IS_USED(cfg) (IS_ENABLED(cfg) ? "(in-use)" : "(not used)")
184 216
185 n += scnprintf(buf + n, len - n, 217 n += scnprintf(buf + n, len - n,
186 "Extn [700-Base]\t: %s %s %s %s %s %s\n", 218 "Vector Table\t: %#x\nUncached Base\t: %#x\n",
187 IS_AVAIL2(cpu->extn.norm, "norm,"), 219 cpu->vec_base, cpu->uncached_base);
188 IS_AVAIL2(cpu->extn.barrel, "barrel-shift,"), 220
189 IS_AVAIL1(cpu->extn.swap, "swap,"), 221 if (cpu->extn.fpu_sp || cpu->extn.fpu_dp)
190 IS_AVAIL2(cpu->extn.minmax, "minmax,"), 222 n += scnprintf(buf + n, len - n, "FPU\t\t: %s%s\n",
191 IS_AVAIL1(cpu->extn.crc, "crc,"), 223 IS_AVAIL1(cpu->extn.fpu_sp, "SP "),
192 IS_AVAIL2(cpu->extn.ext_arith, "ext-arith")); 224 IS_AVAIL1(cpu->extn.fpu_dp, "DP "));
193 225
194 n += scnprintf(buf + n, len - n, "Extn [700-MPY]\t: %s", 226 if (cpu->extn.debug)
195 mul_type_nm[cpu->extn.mul].str); 227 n += scnprintf(buf + n, len - n, "DEBUG\t\t: %s%s%s\n",
196 228 IS_AVAIL1(cpu->extn.ap, "ActionPoint "),
197 n += scnprintf(buf + n, len - n, " MAC MPY: %s\n", 229 IS_AVAIL1(cpu->extn.smart, "smaRT "),
198 mac_mul_nm[cpu->extn_mac_mul.type].str); 230 IS_AVAIL1(cpu->extn.rtt, "RTT "));
199 231
200 if (cpu->core.family == 0x34) { 232 if (cpu->dccm.sz || cpu->iccm.sz)
201 n += scnprintf(buf + n, len - n, 233 n += scnprintf(buf + n, len - n, "Extn [CCM]\t: DCCM @ %x, %d KB / ICCM: @ %x, %d KB\n",
202 "Extn [700-4.10]\t: LLOCK/SCOND %s, SWAPE %s, RTSC %s\n", 234 cpu->dccm.base_addr, TO_KB(cpu->dccm.sz),
203 IS_USED(CONFIG_ARC_HAS_LLSC),
204 IS_USED(CONFIG_ARC_HAS_SWAPE),
205 IS_USED(CONFIG_ARC_HAS_RTSC));
206 }
207
208 n += scnprintf(buf + n, len - n, "Extn [CCM]\t: %s",
209 !(cpu->dccm.sz || cpu->iccm.sz) ? "N/A" : "");
210
211 if (cpu->dccm.sz)
212 n += scnprintf(buf + n, len - n, "DCCM: @ %x, %d KB ",
213 cpu->dccm.base_addr, TO_KB(cpu->dccm.sz));
214
215 if (cpu->iccm.sz)
216 n += scnprintf(buf + n, len - n, "ICCM: @ %x, %d KB",
217 cpu->iccm.base_addr, TO_KB(cpu->iccm.sz)); 235 cpu->iccm.base_addr, TO_KB(cpu->iccm.sz));
218 236
219 n += scnprintf(buf + n, len - n, "\nExtn [FPU]\t: %s",
220 !(cpu->fp.ver || cpu->dpfp.ver) ? "N/A" : "");
221
222 if (cpu->fp.ver)
223 n += scnprintf(buf + n, len - n, "SP [v%d] %s",
224 cpu->fp.ver, cpu->fp.fast ? "(fast)" : "");
225
226 if (cpu->dpfp.ver)
227 n += scnprintf(buf + n, len - n, "DP [v%d] %s",
228 cpu->dpfp.ver, cpu->dpfp.fast ? "(fast)" : "");
229
230 n += scnprintf(buf + n, len - n, "\n");
231
232 n += scnprintf(buf + n, len - n, 237 n += scnprintf(buf + n, len - n,
233 "OS ABI [v3]\t: no-legacy-syscalls\n"); 238 "OS ABI [v3]\t: no-legacy-syscalls\n");
234 239
235 return buf; 240 return buf;
236} 241}
237 242
238static void arc_chk_ccms(void) 243static void arc_chk_core_config(void)
239{ 244{
240#if defined(CONFIG_ARC_HAS_DCCM) || defined(CONFIG_ARC_HAS_ICCM)
241 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 245 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
246 int fpu_enabled;
247
248 if (!cpu->timers.t0)
249 panic("Timer0 is not present!\n");
250
251 if (!cpu->timers.t1)
252 panic("Timer1 is not present!\n");
253
254 if (IS_ENABLED(CONFIG_ARC_HAS_RTSC) && !cpu->timers.rtsc)
255 panic("RTSC is not present\n");
242 256
243#ifdef CONFIG_ARC_HAS_DCCM 257#ifdef CONFIG_ARC_HAS_DCCM
244 /* 258 /*
@@ -256,33 +270,20 @@ static void arc_chk_ccms(void)
256 if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz) 270 if (CONFIG_ARC_ICCM_SZ != cpu->iccm.sz)
257 panic("Linux built with incorrect ICCM Size\n"); 271 panic("Linux built with incorrect ICCM Size\n");
258#endif 272#endif
259#endif
260}
261 273
262/* 274 /*
263 * Ensure that FP hardware and kernel config match 275 * FP hardware/software config sanity
264 * -If hardware contains DPFP, kernel needs to save/restore FPU state 276 * -If hardware contains DPFP, kernel needs to save/restore FPU state
265 * across context switches 277 * -If not, it will crash trying to save/restore the non-existant regs
266 * -If hardware lacks DPFP, but kernel configured to save FPU state then 278 *
267 * kernel trying to access non-existant DPFP regs will crash 279 * (only DPDP checked since SP has no arch visible regs)
268 * 280 */
269 * We only check for Dbl precision Floating Point, because only DPFP 281 fpu_enabled = IS_ENABLED(CONFIG_ARC_FPU_SAVE_RESTORE);
270 * hardware has dedicated regs which need to be saved/restored on ctx-sw
271 * (Single Precision uses core regs), thus kernel is kind of oblivious to it
272 */
273static void arc_chk_fpu(void)
274{
275 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
276 282
277 if (cpu->dpfp.ver) { 283 if (cpu->extn.fpu_dp && !fpu_enabled)
278#ifndef CONFIG_ARC_FPU_SAVE_RESTORE 284 pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n");
279 pr_warn("DPFP support broken in this kernel...\n"); 285 else if (!cpu->extn.fpu_dp && fpu_enabled)
280#endif 286 panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
281 } else {
282#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
283 panic("H/w lacks DPFP support, apps won't work\n");
284#endif
285 }
286} 287}
287 288
288/* 289/*
@@ -303,15 +304,11 @@ void setup_processor(void)
303 304
304 arc_mmu_init(); 305 arc_mmu_init();
305 arc_cache_init(); 306 arc_cache_init();
306 arc_chk_ccms();
307 307
308 printk(arc_extn_mumbojumbo(cpu_id, str, sizeof(str))); 308 printk(arc_extn_mumbojumbo(cpu_id, str, sizeof(str)));
309
310#ifdef CONFIG_SMP
311 printk(arc_platform_smp_cpuinfo()); 309 printk(arc_platform_smp_cpuinfo());
312#endif
313 310
314 arc_chk_fpu(); 311 arc_chk_core_config();
315} 312}
316 313
317static inline int is_kernel(unsigned long addr) 314static inline int is_kernel(unsigned long addr)
@@ -360,11 +357,7 @@ void __init setup_arch(char **cmdline_p)
360 machine_desc->init_early(); 357 machine_desc->init_early();
361 358
362 setup_processor(); 359 setup_processor();
363
364#ifdef CONFIG_SMP
365 smp_init_cpus(); 360 smp_init_cpus();
366#endif
367
368 setup_arch_memory(); 361 setup_arch_memory();
369 362
370 /* copy flat DT out of .init and then unflatten it */ 363 /* copy flat DT out of .init and then unflatten it */
@@ -385,7 +378,13 @@ void __init setup_arch(char **cmdline_p)
385 378
386static int __init customize_machine(void) 379static int __init customize_machine(void)
387{ 380{
388 /* Add platform devices */ 381 of_clk_init(NULL);
382 /*
383 * Traverses flattened DeviceTree - registering platform devices
384 * (if any) complete with their resources
385 */
386 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
387
389 if (machine_desc->init_machine) 388 if (machine_desc->init_machine)
390 machine_desc->init_machine(); 389 machine_desc->init_machine();
391 390
@@ -419,19 +418,14 @@ static int show_cpuinfo(struct seq_file *m, void *v)
419 418
420 seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE)); 419 seq_printf(m, arc_cpu_mumbojumbo(cpu_id, str, PAGE_SIZE));
421 420
422 seq_printf(m, "Bogo MIPS : \t%lu.%02lu\n", 421 seq_printf(m, "Bogo MIPS\t: %lu.%02lu\n",
423 loops_per_jiffy / (500000 / HZ), 422 loops_per_jiffy / (500000 / HZ),
424 (loops_per_jiffy / (5000 / HZ)) % 100); 423 (loops_per_jiffy / (5000 / HZ)) % 100);
425 424
426 seq_printf(m, arc_mmu_mumbojumbo(cpu_id, str, PAGE_SIZE)); 425 seq_printf(m, arc_mmu_mumbojumbo(cpu_id, str, PAGE_SIZE));
427
428 seq_printf(m, arc_cache_mumbojumbo(cpu_id, str, PAGE_SIZE)); 426 seq_printf(m, arc_cache_mumbojumbo(cpu_id, str, PAGE_SIZE));
429
430 seq_printf(m, arc_extn_mumbojumbo(cpu_id, str, PAGE_SIZE)); 427 seq_printf(m, arc_extn_mumbojumbo(cpu_id, str, PAGE_SIZE));
431
432#ifdef CONFIG_SMP
433 seq_printf(m, arc_platform_smp_cpuinfo()); 428 seq_printf(m, arc_platform_smp_cpuinfo());
434#endif
435 429
436 free_page((unsigned long)str); 430 free_page((unsigned long)str);
437done: 431done:
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index dcd317c47d09..d01df0c517a2 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -101,7 +101,7 @@ void __weak arc_platform_smp_wait_to_boot(int cpu)
101 101
102const char *arc_platform_smp_cpuinfo(void) 102const char *arc_platform_smp_cpuinfo(void)
103{ 103{
104 return plat_smp_ops.info; 104 return plat_smp_ops.info ? : "";
105} 105}
106 106
107/* 107/*
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
index 9e1142729fd1..8c3a3e02ba92 100644
--- a/arch/arc/mm/cache_arc700.c
+++ b/arch/arc/mm/cache_arc700.c
@@ -530,16 +530,9 @@ EXPORT_SYMBOL(dma_cache_wback);
530 */ 530 */
531void flush_icache_range(unsigned long kstart, unsigned long kend) 531void flush_icache_range(unsigned long kstart, unsigned long kend)
532{ 532{
533 unsigned int tot_sz, off, sz; 533 unsigned int tot_sz;
534 unsigned long phy, pfn;
535 534
536 /* printk("Kernel Cache Cohenercy: %lx to %lx\n",kstart, kend); */ 535 WARN(kstart < TASK_SIZE, "%s() can't handle user vaddr", __func__);
537
538 /* This is not the right API for user virtual address */
539 if (kstart < TASK_SIZE) {
540 BUG_ON("Flush icache range for user virtual addr space");
541 return;
542 }
543 536
544 /* Shortcut for bigger flush ranges. 537 /* Shortcut for bigger flush ranges.
545 * Here we don't care if this was kernel virtual or phy addr 538 * Here we don't care if this was kernel virtual or phy addr
@@ -572,6 +565,9 @@ void flush_icache_range(unsigned long kstart, unsigned long kend)
572 * straddles across 2 virtual pages and hence need for loop 565 * straddles across 2 virtual pages and hence need for loop
573 */ 566 */
574 while (tot_sz > 0) { 567 while (tot_sz > 0) {
568 unsigned int off, sz;
569 unsigned long phy, pfn;
570
575 off = kstart % PAGE_SIZE; 571 off = kstart % PAGE_SIZE;
576 pfn = vmalloc_to_pfn((void *)kstart); 572 pfn = vmalloc_to_pfn((void *)kstart);
577 phy = (pfn << PAGE_SHIFT) + off; 573 phy = (pfn << PAGE_SHIFT) + off;
diff --git a/arch/arc/mm/tlb.c b/arch/arc/mm/tlb.c
index e1acf0ce5647..7f47d2a56f44 100644
--- a/arch/arc/mm/tlb.c
+++ b/arch/arc/mm/tlb.c
@@ -609,14 +609,12 @@ char *arc_mmu_mumbojumbo(int cpu_id, char *buf, int len)
609 int n = 0; 609 int n = 0;
610 struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu; 610 struct cpuinfo_arc_mmu *p_mmu = &cpuinfo_arc700[cpu_id].mmu;
611 611
612 n += scnprintf(buf + n, len - n, "ARC700 MMU [v%x]\t: %dk PAGE, ",
613 p_mmu->ver, TO_KB(p_mmu->pg_sz));
614
615 n += scnprintf(buf + n, len - n, 612 n += scnprintf(buf + n, len - n,
616 "J-TLB %d (%dx%d), uDTLB %d, uITLB %d, %s\n", 613 "MMU [v%x]\t: %dk PAGE, JTLB %d (%dx%d), uDTLB %d, uITLB %d %s\n",
614 p_mmu->ver, TO_KB(p_mmu->pg_sz),
617 p_mmu->num_tlb, p_mmu->sets, p_mmu->ways, 615 p_mmu->num_tlb, p_mmu->sets, p_mmu->ways,
618 p_mmu->u_dtlb, p_mmu->u_itlb, 616 p_mmu->u_dtlb, p_mmu->u_itlb,
619 IS_ENABLED(CONFIG_ARC_MMU_SASID) ? "SASID" : ""); 617 IS_ENABLED(CONFIG_ARC_MMU_SASID) ? ",SASID" : "");
620 618
621 return buf; 619 return buf;
622} 620}
diff --git a/arch/arc/plat-arcfpga/Kconfig b/arch/arc/plat-arcfpga/Kconfig
index b9f34cf55acf..217593a70751 100644
--- a/arch/arc/plat-arcfpga/Kconfig
+++ b/arch/arc/plat-arcfpga/Kconfig
@@ -8,7 +8,7 @@
8 8
9menuconfig ARC_PLAT_FPGA_LEGACY 9menuconfig ARC_PLAT_FPGA_LEGACY
10 bool "\"Legacy\" ARC FPGA dev Boards" 10 bool "\"Legacy\" ARC FPGA dev Boards"
11 select ISS_SMP_EXTN if SMP 11 select ARC_HAS_COH_CACHES if SMP
12 help 12 help
13 Support for ARC development boards, provided by Synopsys. 13 Support for ARC development boards, provided by Synopsys.
14 These are based on FPGA or ISS. e.g. 14 These are based on FPGA or ISS. e.g.
@@ -18,17 +18,6 @@ menuconfig ARC_PLAT_FPGA_LEGACY
18 18
19if ARC_PLAT_FPGA_LEGACY 19if ARC_PLAT_FPGA_LEGACY
20 20
21config ARC_BOARD_ANGEL4
22 bool "ARC Angel4"
23 default y
24 help
25 ARC Angel4 FPGA Ref Platform (Xilinx Virtex Based)
26
27config ARC_BOARD_ML509
28 bool "ML509"
29 help
30 ARC ML509 FPGA Ref Platform (Xilinx Virtex-5 Based)
31
32config ISS_SMP_EXTN 21config ISS_SMP_EXTN
33 bool "ARC SMP Extensions (ISS Models only)" 22 bool "ARC SMP Extensions (ISS Models only)"
34 default n 23 default n
diff --git a/arch/arc/plat-arcfpga/include/plat/irq.h b/arch/arc/plat-arcfpga/include/plat/irq.h
deleted file mode 100644
index 2c9dea690ac4..000000000000
--- a/arch/arc/plat-arcfpga/include/plat/irq.h
+++ /dev/null
@@ -1,27 +0,0 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: Feb 2009
9 * -For AA4 board, IRQ assignments to peripherals
10 */
11
12#ifndef __PLAT_IRQ_H
13#define __PLAT_IRQ_H
14
15#define UART0_IRQ 5
16#define UART1_IRQ 10
17#define UART2_IRQ 11
18
19#define IDE_IRQ 13
20#define PCI_IRQ 14
21#define PS2_IRQ 15
22
23#ifdef CONFIG_SMP
24#define IDU_INTERRUPT_0 16
25#endif
26
27#endif
diff --git a/arch/arc/plat-arcfpga/include/plat/memmap.h b/arch/arc/plat-arcfpga/include/plat/memmap.h
deleted file mode 100644
index 5c78e6135a1f..000000000000
--- a/arch/arc/plat-arcfpga/include/plat/memmap.h
+++ /dev/null
@@ -1,29 +0,0 @@
1/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: Feb 2009
9 * -For AA4 board, System Memory Map for Peripherals etc
10 */
11
12#ifndef __PLAT_MEMMAP_H
13#define __PLAT_MEMMAP_H
14
15#define UART0_BASE 0xC0FC1000
16#define UART1_BASE 0xC0FC1100
17
18#define IDE_CONTROLLER_BASE 0xC0FC9000
19
20#define AHB_PCI_HOST_BRG_BASE 0xC0FD0000
21
22#define PGU_BASEADDR 0xC0FC8000
23#define VLCK_ADDR 0xC0FCF028
24
25#define BVCI_LAT_UNIT_BASE 0xC0FED000
26
27#define PS2_BASE_ADDR 0xC0FCC000
28
29#endif
diff --git a/arch/arc/plat-arcfpga/platform.c b/arch/arc/plat-arcfpga/platform.c
index 1038949a99a1..afc88254acc1 100644
--- a/arch/arc/plat-arcfpga/platform.c
+++ b/arch/arc/plat-arcfpga/platform.c
@@ -8,37 +8,9 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#include <linux/types.h>
12#include <linux/init.h> 11#include <linux/init.h>
13#include <linux/device.h>
14#include <linux/platform_device.h>
15#include <linux/io.h>
16#include <linux/console.h>
17#include <linux/of_platform.h>
18#include <asm/setup.h>
19#include <asm/clk.h>
20#include <asm/mach_desc.h> 12#include <asm/mach_desc.h>
21#include <plat/memmap.h>
22#include <plat/smp.h> 13#include <plat/smp.h>
23#include <plat/irq.h>
24
25static void __init plat_fpga_early_init(void)
26{
27 pr_info("[plat-arcfpga]: registering early dev resources\n");
28
29#ifdef CONFIG_ISS_SMP_EXTN
30 iss_model_init_early_smp();
31#endif
32}
33
34static void __init plat_fpga_populate_dev(void)
35{
36 /*
37 * Traverses flattened DeviceTree - registering platform devices
38 * (if any) complete with their resources
39 */
40 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
41}
42 14
43/*----------------------- Machine Descriptions ------------------------------ 15/*----------------------- Machine Descriptions ------------------------------
44 * 16 *
@@ -48,41 +20,26 @@ static void __init plat_fpga_populate_dev(void)
48 * callback set, by matching the DT compatible name. 20 * callback set, by matching the DT compatible name.
49 */ 21 */
50 22
51static const char *aa4_compat[] __initconst = { 23static const char *legacy_fpga_compat[] __initconst = {
52 "snps,arc-angel4", 24 "snps,arc-angel4",
53 NULL,
54};
55
56MACHINE_START(ANGEL4, "angel4")
57 .dt_compat = aa4_compat,
58 .init_early = plat_fpga_early_init,
59 .init_machine = plat_fpga_populate_dev,
60#ifdef CONFIG_ISS_SMP_EXTN
61 .init_smp = iss_model_init_smp,
62#endif
63MACHINE_END
64
65static const char *ml509_compat[] __initconst = {
66 "snps,arc-ml509", 25 "snps,arc-ml509",
67 NULL, 26 NULL,
68}; 27};
69 28
70MACHINE_START(ML509, "ml509") 29MACHINE_START(LEGACY_FPGA, "legacy_fpga")
71 .dt_compat = ml509_compat, 30 .dt_compat = legacy_fpga_compat,
72 .init_early = plat_fpga_early_init, 31#ifdef CONFIG_ISS_SMP_EXTN
73 .init_machine = plat_fpga_populate_dev, 32 .init_early = iss_model_init_early_smp,
74#ifdef CONFIG_SMP
75 .init_smp = iss_model_init_smp, 33 .init_smp = iss_model_init_smp,
76#endif 34#endif
77MACHINE_END 35MACHINE_END
78 36
79static const char *nsimosci_compat[] __initconst = { 37static const char *simulation_compat[] __initconst = {
38 "snps,nsim",
80 "snps,nsimosci", 39 "snps,nsimosci",
81 NULL, 40 NULL,
82}; 41};
83 42
84MACHINE_START(NSIMOSCI, "nsimosci") 43MACHINE_START(SIMULATION, "simulation")
85 .dt_compat = nsimosci_compat, 44 .dt_compat = simulation_compat,
86 .init_early = NULL,
87 .init_machine = plat_fpga_populate_dev,
88MACHINE_END 45MACHINE_END
diff --git a/arch/arc/plat-arcfpga/smp.c b/arch/arc/plat-arcfpga/smp.c
index 92bad9122077..64797ba3bbe3 100644
--- a/arch/arc/plat-arcfpga/smp.c
+++ b/arch/arc/plat-arcfpga/smp.c
@@ -13,9 +13,10 @@
13 13
14#include <linux/smp.h> 14#include <linux/smp.h>
15#include <linux/irq.h> 15#include <linux/irq.h>
16#include <plat/irq.h>
17#include <plat/smp.h> 16#include <plat/smp.h>
18 17
18#define IDU_INTERRUPT_0 16
19
19static char smp_cpuinfo_buf[128]; 20static char smp_cpuinfo_buf[128];
20 21
21/* 22/*
diff --git a/arch/arc/plat-tb10x/Kconfig b/arch/arc/plat-tb10x/Kconfig
index 6994c188dc88..d14b3d3c5dfd 100644
--- a/arch/arc/plat-tb10x/Kconfig
+++ b/arch/arc/plat-tb10x/Kconfig
@@ -18,7 +18,6 @@
18 18
19menuconfig ARC_PLAT_TB10X 19menuconfig ARC_PLAT_TB10X
20 bool "Abilis TB10x" 20 bool "Abilis TB10x"
21 select COMMON_CLK
22 select PINCTRL 21 select PINCTRL
23 select PINCTRL_TB10X 22 select PINCTRL_TB10X
24 select PINMUX 23 select PINMUX
diff --git a/arch/arc/plat-tb10x/tb10x.c b/arch/arc/plat-tb10x/tb10x.c
index 06cb30929460..da0ac0960a4b 100644
--- a/arch/arc/plat-tb10x/tb10x.c
+++ b/arch/arc/plat-tb10x/tb10x.c
@@ -19,21 +19,9 @@
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 20 */
21 21
22
23#include <linux/init.h> 22#include <linux/init.h>
24#include <linux/of_platform.h>
25#include <linux/clk-provider.h>
26#include <linux/pinctrl/consumer.h>
27
28#include <asm/mach_desc.h> 23#include <asm/mach_desc.h>
29 24
30
31static void __init tb10x_platform_init(void)
32{
33 of_clk_init(NULL);
34 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
35}
36
37static const char *tb10x_compat[] __initdata = { 25static const char *tb10x_compat[] __initdata = {
38 "abilis,arc-tb10x", 26 "abilis,arc-tb10x",
39 NULL, 27 NULL,
@@ -41,5 +29,4 @@ static const char *tb10x_compat[] __initdata = {
41 29
42MACHINE_START(TB10x, "tb10x") 30MACHINE_START(TB10x, "tb10x")
43 .dt_compat = tb10x_compat, 31 .dt_compat = tb10x_compat,
44 .init_machine = tb10x_platform_init,
45MACHINE_END 32MACHINE_END
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 03dc4c1a8736..d8f6a2ec3d4e 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -1187,7 +1187,7 @@ config DEBUG_UART_VIRT
1187 default 0xf1c28000 if DEBUG_SUNXI_UART0 1187 default 0xf1c28000 if DEBUG_SUNXI_UART0
1188 default 0xf1c28400 if DEBUG_SUNXI_UART1 1188 default 0xf1c28400 if DEBUG_SUNXI_UART1
1189 default 0xf1f02800 if DEBUG_SUNXI_R_UART 1189 default 0xf1f02800 if DEBUG_SUNXI_R_UART
1190 default 0xf2100000 if DEBUG_PXA_UART1 1190 default 0xf6200000 if DEBUG_PXA_UART1
1191 default 0xf4090000 if ARCH_LPC32XX 1191 default 0xf4090000 if ARCH_LPC32XX
1192 default 0xf4200000 if ARCH_GEMINI 1192 default 0xf4200000 if ARCH_GEMINI
1193 default 0xf7000000 if DEBUG_S3C24XX_UART && (DEBUG_S3C_UART0 || \ 1193 default 0xf7000000 if DEBUG_S3C24XX_UART && (DEBUG_S3C_UART0 || \
diff --git a/arch/arm/boot/compressed/head.S b/arch/arm/boot/compressed/head.S
index 413fd94b5301..68be9017593d 100644
--- a/arch/arm/boot/compressed/head.S
+++ b/arch/arm/boot/compressed/head.S
@@ -397,8 +397,7 @@ dtb_check_done:
397 add sp, sp, r6 397 add sp, sp, r6
398#endif 398#endif
399 399
400 tst r4, #1 400 bl cache_clean_flush
401 bleq cache_clean_flush
402 401
403 adr r0, BSYM(restart) 402 adr r0, BSYM(restart)
404 add r0, r0, r6 403 add r0, r0, r6
@@ -1047,6 +1046,8 @@ cache_clean_flush:
1047 b call_cache_fn 1046 b call_cache_fn
1048 1047
1049__armv4_mpu_cache_flush: 1048__armv4_mpu_cache_flush:
1049 tst r4, #1
1050 movne pc, lr
1050 mov r2, #1 1051 mov r2, #1
1051 mov r3, #0 1052 mov r3, #0
1052 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache 1053 mcr p15, 0, ip, c7, c6, 0 @ invalidate D cache
@@ -1064,6 +1065,8 @@ __armv4_mpu_cache_flush:
1064 mov pc, lr 1065 mov pc, lr
1065 1066
1066__fa526_cache_flush: 1067__fa526_cache_flush:
1068 tst r4, #1
1069 movne pc, lr
1067 mov r1, #0 1070 mov r1, #0
1068 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache 1071 mcr p15, 0, r1, c7, c14, 0 @ clean and invalidate D cache
1069 mcr p15, 0, r1, c7, c5, 0 @ flush I cache 1072 mcr p15, 0, r1, c7, c5, 0 @ flush I cache
@@ -1072,13 +1075,16 @@ __fa526_cache_flush:
1072 1075
1073__armv6_mmu_cache_flush: 1076__armv6_mmu_cache_flush:
1074 mov r1, #0 1077 mov r1, #0
1075 mcr p15, 0, r1, c7, c14, 0 @ clean+invalidate D 1078 tst r4, #1
1079 mcreq p15, 0, r1, c7, c14, 0 @ clean+invalidate D
1076 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB 1080 mcr p15, 0, r1, c7, c5, 0 @ invalidate I+BTB
1077 mcr p15, 0, r1, c7, c15, 0 @ clean+invalidate unified 1081 mcreq p15, 0, r1, c7, c15, 0 @ clean+invalidate unified
1078 mcr p15, 0, r1, c7, c10, 4 @ drain WB 1082 mcr p15, 0, r1, c7, c10, 4 @ drain WB
1079 mov pc, lr 1083 mov pc, lr
1080 1084
1081__armv7_mmu_cache_flush: 1085__armv7_mmu_cache_flush:
1086 tst r4, #1
1087 bne iflush
1082 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1 1088 mrc p15, 0, r10, c0, c1, 5 @ read ID_MMFR1
1083 tst r10, #0xf << 16 @ hierarchical cache (ARMv7) 1089 tst r10, #0xf << 16 @ hierarchical cache (ARMv7)
1084 mov r10, #0 1090 mov r10, #0
@@ -1139,6 +1145,8 @@ iflush:
1139 mov pc, lr 1145 mov pc, lr
1140 1146
1141__armv5tej_mmu_cache_flush: 1147__armv5tej_mmu_cache_flush:
1148 tst r4, #1
1149 movne pc, lr
11421: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache 11501: mrc p15, 0, r15, c7, c14, 3 @ test,clean,invalidate D cache
1143 bne 1b 1151 bne 1b
1144 mcr p15, 0, r0, c7, c5, 0 @ flush I cache 1152 mcr p15, 0, r0, c7, c5, 0 @ flush I cache
@@ -1146,6 +1154,8 @@ __armv5tej_mmu_cache_flush:
1146 mov pc, lr 1154 mov pc, lr
1147 1155
1148__armv4_mmu_cache_flush: 1156__armv4_mmu_cache_flush:
1157 tst r4, #1
1158 movne pc, lr
1149 mov r2, #64*1024 @ default: 32K dcache size (*2) 1159 mov r2, #64*1024 @ default: 32K dcache size (*2)
1150 mov r11, #32 @ default: 32 byte line size 1160 mov r11, #32 @ default: 32 byte line size
1151 mrc p15, 0, r3, c0, c0, 1 @ read cache type 1161 mrc p15, 0, r3, c0, c0, 1 @ read cache type
@@ -1179,6 +1189,8 @@ no_cache_id:
1179 1189
1180__armv3_mmu_cache_flush: 1190__armv3_mmu_cache_flush:
1181__armv3_mpu_cache_flush: 1191__armv3_mpu_cache_flush:
1192 tst r4, #1
1193 movne pc, lr
1182 mov r1, #0 1194 mov r1, #0
1183 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3 1195 mcr p15, 0, r1, c7, c0, 0 @ invalidate whole cache v3
1184 mov pc, lr 1196 mov pc, lr
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index e2156a583de7..c4b968f0feb5 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -489,7 +489,7 @@
489 reg = <0x00060000 0x00020000>; 489 reg = <0x00060000 0x00020000>;
490 }; 490 };
491 partition@4 { 491 partition@4 {
492 label = "NAND.u-boot-spl"; 492 label = "NAND.u-boot-spl-os";
493 reg = <0x00080000 0x00040000>; 493 reg = <0x00080000 0x00040000>;
494 }; 494 };
495 partition@5 { 495 partition@5 {
diff --git a/arch/arm/boot/dts/am437x-gp-evm.dts b/arch/arm/boot/dts/am437x-gp-evm.dts
index e7ac47fa6615..a521ac0a7d5a 100644
--- a/arch/arm/boot/dts/am437x-gp-evm.dts
+++ b/arch/arm/boot/dts/am437x-gp-evm.dts
@@ -291,8 +291,8 @@
291 dcdc3: regulator-dcdc3 { 291 dcdc3: regulator-dcdc3 {
292 compatible = "ti,tps65218-dcdc3"; 292 compatible = "ti,tps65218-dcdc3";
293 regulator-name = "vdcdc3"; 293 regulator-name = "vdcdc3";
294 regulator-min-microvolt = <1350000>; 294 regulator-min-microvolt = <1500000>;
295 regulator-max-microvolt = <1350000>; 295 regulator-max-microvolt = <1500000>;
296 regulator-boot-on; 296 regulator-boot-on;
297 regulator-always-on; 297 regulator-always-on;
298 }; 298 };
diff --git a/arch/arm/boot/dts/am437x-sk-evm.dts b/arch/arm/boot/dts/am437x-sk-evm.dts
index 859ff3d620ee..87aa4f3b8b3d 100644
--- a/arch/arm/boot/dts/am437x-sk-evm.dts
+++ b/arch/arm/boot/dts/am437x-sk-evm.dts
@@ -363,8 +363,8 @@
363 dcdc3: regulator-dcdc3 { 363 dcdc3: regulator-dcdc3 {
364 compatible = "ti,tps65218-dcdc3"; 364 compatible = "ti,tps65218-dcdc3";
365 regulator-name = "vdds_ddr"; 365 regulator-name = "vdds_ddr";
366 regulator-min-microvolt = <1350000>; 366 regulator-min-microvolt = <1500000>;
367 regulator-max-microvolt = <1350000>; 367 regulator-max-microvolt = <1500000>;
368 regulator-boot-on; 368 regulator-boot-on;
369 regulator-always-on; 369 regulator-always-on;
370 }; 370 };
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index ac3e4859935f..f7e9bba10bd6 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -358,8 +358,8 @@
358 dcdc3: regulator-dcdc3 { 358 dcdc3: regulator-dcdc3 {
359 compatible = "ti,tps65218-dcdc3"; 359 compatible = "ti,tps65218-dcdc3";
360 regulator-name = "vdcdc3"; 360 regulator-name = "vdcdc3";
361 regulator-min-microvolt = <1350000>; 361 regulator-min-microvolt = <1500000>;
362 regulator-max-microvolt = <1350000>; 362 regulator-max-microvolt = <1500000>;
363 regulator-boot-on; 363 regulator-boot-on;
364 regulator-always-on; 364 regulator-always-on;
365 }; 365 };
diff --git a/arch/arm/boot/dts/at91sam9263.dtsi b/arch/arm/boot/dts/at91sam9263.dtsi
index d68b3c4862bc..51416c7d0625 100644
--- a/arch/arm/boot/dts/at91sam9263.dtsi
+++ b/arch/arm/boot/dts/at91sam9263.dtsi
@@ -122,9 +122,10 @@
122 interrupts-extended = <&pmc AT91_PMC_LOCKB>; 122 interrupts-extended = <&pmc AT91_PMC_LOCKB>;
123 clocks = <&main>; 123 clocks = <&main>;
124 reg = <1>; 124 reg = <1>;
125 atmel,clk-input-range = <1000000 5000000>; 125 atmel,clk-input-range = <1000000 32000000>;
126 #atmel,pll-clk-output-range-cells = <4>; 126 #atmel,pll-clk-output-range-cells = <4>;
127 atmel,pll-clk-output-ranges = <70000000 130000000 1 1>; 127 atmel,pll-clk-output-ranges = <80000000 200000000 0 1>,
128 <190000000 240000000 2 1>;
128 }; 129 };
129 130
130 mck: masterck { 131 mck: masterck {
diff --git a/arch/arm/boot/dts/exynos5250-snow.dts b/arch/arm/boot/dts/exynos5250-snow.dts
index e51fcef884a4..60429ad1c5d8 100644
--- a/arch/arm/boot/dts/exynos5250-snow.dts
+++ b/arch/arm/boot/dts/exynos5250-snow.dts
@@ -624,4 +624,8 @@
624 num-cs = <1>; 624 num-cs = <1>;
625}; 625};
626 626
627&usbdrd_dwc3 {
628 dr_mode = "host";
629};
630
627#include "cros-ec-keyboard.dtsi" 631#include "cros-ec-keyboard.dtsi"
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index f21b9aa00fbb..d55c1a2eb798 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -555,7 +555,7 @@
555 #size-cells = <1>; 555 #size-cells = <1>;
556 ranges; 556 ranges;
557 557
558 dwc3 { 558 usbdrd_dwc3: dwc3 {
559 compatible = "synopsys,dwc3"; 559 compatible = "synopsys,dwc3";
560 reg = <0x12000000 0x10000>; 560 reg = <0x12000000 0x10000>;
561 interrupts = <0 72 0>; 561 interrupts = <0 72 0>;
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts
index 09664fcf5afb..0e13b4b10a92 100644
--- a/arch/arm/boot/dts/imx28-evk.dts
+++ b/arch/arm/boot/dts/imx28-evk.dts
@@ -193,7 +193,6 @@
193 i2c0: i2c@80058000 { 193 i2c0: i2c@80058000 {
194 pinctrl-names = "default"; 194 pinctrl-names = "default";
195 pinctrl-0 = <&i2c0_pins_a>; 195 pinctrl-0 = <&i2c0_pins_a>;
196 clock-frequency = <400000>;
197 status = "okay"; 196 status = "okay";
198 197
199 sgtl5000: codec@0a { 198 sgtl5000: codec@0a {
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index 739fcf29c643..bc82a12d4c2c 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -668,6 +668,8 @@
668 bank-width = <2>; 668 bank-width = <2>;
669 pinctrl-names = "default"; 669 pinctrl-names = "default";
670 pinctrl-0 = <&ethernet_pins>; 670 pinctrl-0 = <&ethernet_pins>;
671 power-gpios = <&gpio3 22 GPIO_ACTIVE_HIGH>; /* gpio86 */
672 reset-gpios = <&gpio6 4 GPIO_ACTIVE_HIGH>; /* gpio164 */
671 gpmc,device-width = <2>; 673 gpmc,device-width = <2>;
672 gpmc,sync-clk-ps = <0>; 674 gpmc,sync-clk-ps = <0>;
673 gpmc,cs-on-ns = <0>; 675 gpmc,cs-on-ns = <0>;
diff --git a/arch/arm/boot/dts/r8a7740.dtsi b/arch/arm/boot/dts/r8a7740.dtsi
index d46c213a17ad..eed697a6bd6b 100644
--- a/arch/arm/boot/dts/r8a7740.dtsi
+++ b/arch/arm/boot/dts/r8a7740.dtsi
@@ -433,7 +433,7 @@
433 clocks = <&cpg_clocks R8A7740_CLK_S>, 433 clocks = <&cpg_clocks R8A7740_CLK_S>,
434 <&cpg_clocks R8A7740_CLK_S>, <&sub_clk>, 434 <&cpg_clocks R8A7740_CLK_S>, <&sub_clk>,
435 <&cpg_clocks R8A7740_CLK_B>, 435 <&cpg_clocks R8A7740_CLK_B>,
436 <&sub_clk>, <&sub_clk>, 436 <&cpg_clocks R8A7740_CLK_HPP>, <&sub_clk>,
437 <&cpg_clocks R8A7740_CLK_B>; 437 <&cpg_clocks R8A7740_CLK_B>;
438 #clock-cells = <1>; 438 #clock-cells = <1>;
439 renesas,clock-indices = < 439 renesas,clock-indices = <
diff --git a/arch/arm/boot/dts/r8a7790.dtsi b/arch/arm/boot/dts/r8a7790.dtsi
index d0e17733dc1a..e20affe156c1 100644
--- a/arch/arm/boot/dts/r8a7790.dtsi
+++ b/arch/arm/boot/dts/r8a7790.dtsi
@@ -666,9 +666,9 @@
666 #clock-cells = <0>; 666 #clock-cells = <0>;
667 clock-output-names = "sd2"; 667 clock-output-names = "sd2";
668 }; 668 };
669 sd3_clk: sd3_clk@e615007c { 669 sd3_clk: sd3_clk@e615026c {
670 compatible = "renesas,r8a7790-div6-clock", "renesas,cpg-div6-clock"; 670 compatible = "renesas,r8a7790-div6-clock", "renesas,cpg-div6-clock";
671 reg = <0 0xe615007c 0 4>; 671 reg = <0 0xe615026c 0 4>;
672 clocks = <&pll1_div2_clk>; 672 clocks = <&pll1_div2_clk>;
673 #clock-cells = <0>; 673 #clock-cells = <0>;
674 clock-output-names = "sd3"; 674 clock-output-names = "sd3";
diff --git a/arch/arm/boot/dts/sama5d31.dtsi b/arch/arm/boot/dts/sama5d31.dtsi
index 7997dc9863ed..883878b32971 100644
--- a/arch/arm/boot/dts/sama5d31.dtsi
+++ b/arch/arm/boot/dts/sama5d31.dtsi
@@ -12,5 +12,5 @@
12#include "sama5d3_uart.dtsi" 12#include "sama5d3_uart.dtsi"
13 13
14/ { 14/ {
15 compatible = "atmel,samad31", "atmel,sama5d3", "atmel,sama5"; 15 compatible = "atmel,sama5d31", "atmel,sama5d3", "atmel,sama5";
16}; 16};
diff --git a/arch/arm/boot/dts/sama5d33.dtsi b/arch/arm/boot/dts/sama5d33.dtsi
index 39f832253caf..4b4434aca351 100644
--- a/arch/arm/boot/dts/sama5d33.dtsi
+++ b/arch/arm/boot/dts/sama5d33.dtsi
@@ -10,5 +10,5 @@
10#include "sama5d3_gmac.dtsi" 10#include "sama5d3_gmac.dtsi"
11 11
12/ { 12/ {
13 compatible = "atmel,samad33", "atmel,sama5d3", "atmel,sama5"; 13 compatible = "atmel,sama5d33", "atmel,sama5d3", "atmel,sama5";
14}; 14};
diff --git a/arch/arm/boot/dts/sama5d34.dtsi b/arch/arm/boot/dts/sama5d34.dtsi
index 89cda2c0da39..aa01573fdee9 100644
--- a/arch/arm/boot/dts/sama5d34.dtsi
+++ b/arch/arm/boot/dts/sama5d34.dtsi
@@ -12,5 +12,5 @@
12#include "sama5d3_mci2.dtsi" 12#include "sama5d3_mci2.dtsi"
13 13
14/ { 14/ {
15 compatible = "atmel,samad34", "atmel,sama5d3", "atmel,sama5"; 15 compatible = "atmel,sama5d34", "atmel,sama5d3", "atmel,sama5";
16}; 16};
diff --git a/arch/arm/boot/dts/sama5d35.dtsi b/arch/arm/boot/dts/sama5d35.dtsi
index d20cd71b5f0e..16c39f4c96a4 100644
--- a/arch/arm/boot/dts/sama5d35.dtsi
+++ b/arch/arm/boot/dts/sama5d35.dtsi
@@ -14,5 +14,5 @@
14#include "sama5d3_tcb1.dtsi" 14#include "sama5d3_tcb1.dtsi"
15 15
16/ { 16/ {
17 compatible = "atmel,samad35", "atmel,sama5d3", "atmel,sama5"; 17 compatible = "atmel,sama5d35", "atmel,sama5d3", "atmel,sama5";
18}; 18};
diff --git a/arch/arm/boot/dts/sama5d36.dtsi b/arch/arm/boot/dts/sama5d36.dtsi
index db58cad6acd3..e85139ef40af 100644
--- a/arch/arm/boot/dts/sama5d36.dtsi
+++ b/arch/arm/boot/dts/sama5d36.dtsi
@@ -16,5 +16,5 @@
16#include "sama5d3_uart.dtsi" 16#include "sama5d3_uart.dtsi"
17 17
18/ { 18/ {
19 compatible = "atmel,samad36", "atmel,sama5d3", "atmel,sama5"; 19 compatible = "atmel,sama5d36", "atmel,sama5d3", "atmel,sama5";
20}; 20};
diff --git a/arch/arm/boot/dts/sama5d3xcm.dtsi b/arch/arm/boot/dts/sama5d3xcm.dtsi
index 962dc28dc37b..cfcd200b0c17 100644
--- a/arch/arm/boot/dts/sama5d3xcm.dtsi
+++ b/arch/arm/boot/dts/sama5d3xcm.dtsi
@@ -8,7 +8,7 @@
8 */ 8 */
9 9
10/ { 10/ {
11 compatible = "atmel,samad3xcm", "atmel,sama5d3", "atmel,sama5"; 11 compatible = "atmel,sama5d3xcm", "atmel,sama5d3", "atmel,sama5";
12 12
13 chosen { 13 chosen {
14 bootargs = "console=ttyS0,115200 rootfstype=ubifs ubi.mtd=5 root=ubi0:rootfs"; 14 bootargs = "console=ttyS0,115200 rootfstype=ubifs ubi.mtd=5 root=ubi0:rootfs";
diff --git a/arch/arm/boot/dts/socfpga.dtsi b/arch/arm/boot/dts/socfpga.dtsi
index 45fce2cf6fed..4472fd92685c 100644
--- a/arch/arm/boot/dts/socfpga.dtsi
+++ b/arch/arm/boot/dts/socfpga.dtsi
@@ -547,7 +547,7 @@
547 status = "disabled"; 547 status = "disabled";
548 }; 548 };
549 549
550 gpio@ff708000 { 550 gpio0: gpio@ff708000 {
551 #address-cells = <1>; 551 #address-cells = <1>;
552 #size-cells = <0>; 552 #size-cells = <0>;
553 compatible = "snps,dw-apb-gpio"; 553 compatible = "snps,dw-apb-gpio";
@@ -555,7 +555,7 @@
555 clocks = <&per_base_clk>; 555 clocks = <&per_base_clk>;
556 status = "disabled"; 556 status = "disabled";
557 557
558 gpio0: gpio-controller@0 { 558 porta: gpio-controller@0 {
559 compatible = "snps,dw-apb-gpio-port"; 559 compatible = "snps,dw-apb-gpio-port";
560 gpio-controller; 560 gpio-controller;
561 #gpio-cells = <2>; 561 #gpio-cells = <2>;
@@ -567,7 +567,7 @@
567 }; 567 };
568 }; 568 };
569 569
570 gpio@ff709000 { 570 gpio1: gpio@ff709000 {
571 #address-cells = <1>; 571 #address-cells = <1>;
572 #size-cells = <0>; 572 #size-cells = <0>;
573 compatible = "snps,dw-apb-gpio"; 573 compatible = "snps,dw-apb-gpio";
@@ -575,7 +575,7 @@
575 clocks = <&per_base_clk>; 575 clocks = <&per_base_clk>;
576 status = "disabled"; 576 status = "disabled";
577 577
578 gpio1: gpio-controller@0 { 578 portb: gpio-controller@0 {
579 compatible = "snps,dw-apb-gpio-port"; 579 compatible = "snps,dw-apb-gpio-port";
580 gpio-controller; 580 gpio-controller;
581 #gpio-cells = <2>; 581 #gpio-cells = <2>;
@@ -587,7 +587,7 @@
587 }; 587 };
588 }; 588 };
589 589
590 gpio@ff70a000 { 590 gpio2: gpio@ff70a000 {
591 #address-cells = <1>; 591 #address-cells = <1>;
592 #size-cells = <0>; 592 #size-cells = <0>;
593 compatible = "snps,dw-apb-gpio"; 593 compatible = "snps,dw-apb-gpio";
@@ -595,7 +595,7 @@
595 clocks = <&per_base_clk>; 595 clocks = <&per_base_clk>;
596 status = "disabled"; 596 status = "disabled";
597 597
598 gpio2: gpio-controller@0 { 598 portc: gpio-controller@0 {
599 compatible = "snps,dw-apb-gpio-port"; 599 compatible = "snps,dw-apb-gpio-port";
600 gpio-controller; 600 gpio-controller;
601 #gpio-cells = <2>; 601 #gpio-cells = <2>;
diff --git a/arch/arm/boot/dts/socfpga_arria5.dtsi b/arch/arm/boot/dts/socfpga_arria5.dtsi
index 03e8268ae219..1907cc600452 100644
--- a/arch/arm/boot/dts/socfpga_arria5.dtsi
+++ b/arch/arm/boot/dts/socfpga_arria5.dtsi
@@ -29,7 +29,7 @@
29 }; 29 };
30 }; 30 };
31 31
32 dwmmc0@ff704000 { 32 mmc0: dwmmc0@ff704000 {
33 num-slots = <1>; 33 num-slots = <1>;
34 broken-cd; 34 broken-cd;
35 bus-width = <4>; 35 bus-width = <4>;
diff --git a/arch/arm/boot/dts/socfpga_arria5_socdk.dts b/arch/arm/boot/dts/socfpga_arria5_socdk.dts
index 27d551c384d0..ccaf41742fc3 100644
--- a/arch/arm/boot/dts/socfpga_arria5_socdk.dts
+++ b/arch/arm/boot/dts/socfpga_arria5_socdk.dts
@@ -37,6 +37,13 @@
37 */ 37 */
38 ethernet0 = &gmac1; 38 ethernet0 = &gmac1;
39 }; 39 };
40
41 regulator_3_3v: 3-3-v-regulator {
42 compatible = "regulator-fixed";
43 regulator-name = "3.3V";
44 regulator-min-microvolt = <3300000>;
45 regulator-max-microvolt = <3300000>;
46 };
40}; 47};
41 48
42&gmac1 { 49&gmac1 {
@@ -68,6 +75,11 @@
68 }; 75 };
69}; 76};
70 77
78&mmc0 {
79 vmmc-supply = <&regulator_3_3v>;
80 vqmmc-supply = <&regulator_3_3v>;
81};
82
71&usb1 { 83&usb1 {
72 status = "okay"; 84 status = "okay";
73}; 85};
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
index d7296a5f750c..258865da8f6a 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_socdk.dts
@@ -37,6 +37,13 @@
37 */ 37 */
38 ethernet0 = &gmac1; 38 ethernet0 = &gmac1;
39 }; 39 };
40
41 regulator_3_3v: 3-3-v-regulator {
42 compatible = "regulator-fixed";
43 regulator-name = "3.3V";
44 regulator-min-microvolt = <3300000>;
45 regulator-max-microvolt = <3300000>;
46 };
40}; 47};
41 48
42&gmac1 { 49&gmac1 {
@@ -53,6 +60,10 @@
53 rxc-skew-ps = <2000>; 60 rxc-skew-ps = <2000>;
54}; 61};
55 62
63&gpio1 {
64 status = "okay";
65};
66
56&i2c0 { 67&i2c0 {
57 status = "okay"; 68 status = "okay";
58 69
@@ -69,7 +80,9 @@
69}; 80};
70 81
71&mmc0 { 82&mmc0 {
72 cd-gpios = <&gpio1 18 0>; 83 cd-gpios = <&portb 18 0>;
84 vmmc-supply = <&regulator_3_3v>;
85 vqmmc-supply = <&regulator_3_3v>;
73}; 86};
74 87
75&usb1 { 88&usb1 {
diff --git a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
index d26f155f5fd9..16ea6f5f2ab8 100644
--- a/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
+++ b/arch/arm/boot/dts/socfpga_cyclone5_sockit.dts
@@ -37,6 +37,13 @@
37 */ 37 */
38 ethernet0 = &gmac1; 38 ethernet0 = &gmac1;
39 }; 39 };
40
41 regulator_3_3v: vcc3p3-regulator {
42 compatible = "regulator-fixed";
43 regulator-name = "VCC3P3";
44 regulator-min-microvolt = <3300000>;
45 regulator-max-microvolt = <3300000>;
46 };
40}; 47};
41 48
42&gmac1 { 49&gmac1 {
@@ -53,6 +60,11 @@
53 rxc-skew-ps = <2000>; 60 rxc-skew-ps = <2000>;
54}; 61};
55 62
63&mmc0 {
64 vmmc-supply = <&regulator_3_3v>;
65 vqmmc-supply = <&regulator_3_3v>;
66};
67
56&usb1 { 68&usb1 {
57 status = "okay"; 69 status = "okay";
58}; 70};
diff --git a/arch/arm/boot/dts/sun6i-a31.dtsi b/arch/arm/boot/dts/sun6i-a31.dtsi
index 543f895d18d3..2e652e2339e9 100644
--- a/arch/arm/boot/dts/sun6i-a31.dtsi
+++ b/arch/arm/boot/dts/sun6i-a31.dtsi
@@ -361,6 +361,10 @@
361 clocks = <&ahb1_gates 6>; 361 clocks = <&ahb1_gates 6>;
362 resets = <&ahb1_rst 6>; 362 resets = <&ahb1_rst 6>;
363 #dma-cells = <1>; 363 #dma-cells = <1>;
364
365 /* DMA controller requires AHB1 clocked from PLL6 */
366 assigned-clocks = <&ahb1_mux>;
367 assigned-clock-parents = <&pll6>;
364 }; 368 };
365 369
366 mmc0: mmc@01c0f000 { 370 mmc0: mmc@01c0f000 {
diff --git a/arch/arm/boot/dts/tegra114-dalmore.dts b/arch/arm/boot/dts/tegra114-dalmore.dts
index 5c21d216515a..8b7aa0dcdc6e 100644
--- a/arch/arm/boot/dts/tegra114-dalmore.dts
+++ b/arch/arm/boot/dts/tegra114-dalmore.dts
@@ -15,6 +15,7 @@
15 aliases { 15 aliases {
16 rtc0 = "/i2c@7000d000/tps65913@58"; 16 rtc0 = "/i2c@7000d000/tps65913@58";
17 rtc1 = "/rtc@7000e000"; 17 rtc1 = "/rtc@7000e000";
18 serial0 = &uartd;
18 }; 19 };
19 20
20 memory { 21 memory {
diff --git a/arch/arm/boot/dts/tegra114-roth.dts b/arch/arm/boot/dts/tegra114-roth.dts
index c7c6825f11fb..38acf78d7815 100644
--- a/arch/arm/boot/dts/tegra114-roth.dts
+++ b/arch/arm/boot/dts/tegra114-roth.dts
@@ -15,6 +15,10 @@
15 linux,initrd-end = <0x82800000>; 15 linux,initrd-end = <0x82800000>;
16 }; 16 };
17 17
18 aliases {
19 serial0 = &uartd;
20 };
21
18 firmware { 22 firmware {
19 trusted-foundations { 23 trusted-foundations {
20 compatible = "tlm,trusted-foundations"; 24 compatible = "tlm,trusted-foundations";
@@ -916,8 +920,6 @@
916 regulator-name = "vddio-sdmmc3"; 920 regulator-name = "vddio-sdmmc3";
917 regulator-min-microvolt = <1800000>; 921 regulator-min-microvolt = <1800000>;
918 regulator-max-microvolt = <3300000>; 922 regulator-max-microvolt = <3300000>;
919 regulator-always-on;
920 regulator-boot-on;
921 }; 923 };
922 924
923 ldousb { 925 ldousb {
@@ -962,7 +964,7 @@
962 sdhci@78000400 { 964 sdhci@78000400 {
963 status = "okay"; 965 status = "okay";
964 bus-width = <4>; 966 bus-width = <4>;
965 vmmc-supply = <&vddio_sdmmc3>; 967 vqmmc-supply = <&vddio_sdmmc3>;
966 cd-gpios = <&gpio TEGRA_GPIO(V, 2) GPIO_ACTIVE_LOW>; 968 cd-gpios = <&gpio TEGRA_GPIO(V, 2) GPIO_ACTIVE_LOW>;
967 power-gpios = <&gpio TEGRA_GPIO(H, 0) GPIO_ACTIVE_HIGH>; 969 power-gpios = <&gpio TEGRA_GPIO(H, 0) GPIO_ACTIVE_HIGH>;
968 }; 970 };
@@ -971,7 +973,6 @@
971 sdhci@78000600 { 973 sdhci@78000600 {
972 status = "okay"; 974 status = "okay";
973 bus-width = <8>; 975 bus-width = <8>;
974 vmmc-supply = <&vdd_1v8>;
975 non-removable; 976 non-removable;
976 }; 977 };
977 978
diff --git a/arch/arm/boot/dts/tegra114-tn7.dts b/arch/arm/boot/dts/tegra114-tn7.dts
index 963662145635..f91c2c9b2f94 100644
--- a/arch/arm/boot/dts/tegra114-tn7.dts
+++ b/arch/arm/boot/dts/tegra114-tn7.dts
@@ -15,6 +15,10 @@
15 linux,initrd-end = <0x82800000>; 15 linux,initrd-end = <0x82800000>;
16 }; 16 };
17 17
18 aliases {
19 serial0 = &uartd;
20 };
21
18 firmware { 22 firmware {
19 trusted-foundations { 23 trusted-foundations {
20 compatible = "tlm,trusted-foundations"; 24 compatible = "tlm,trusted-foundations";
@@ -240,7 +244,6 @@
240 sdhci@78000600 { 244 sdhci@78000600 {
241 status = "okay"; 245 status = "okay";
242 bus-width = <8>; 246 bus-width = <8>;
243 vmmc-supply = <&vdd_1v8>;
244 non-removable; 247 non-removable;
245 }; 248 };
246 249
diff --git a/arch/arm/boot/dts/tegra114.dtsi b/arch/arm/boot/dts/tegra114.dtsi
index 2ca9c1807f72..222f3b3f4dd5 100644
--- a/arch/arm/boot/dts/tegra114.dtsi
+++ b/arch/arm/boot/dts/tegra114.dtsi
@@ -9,13 +9,6 @@
9 compatible = "nvidia,tegra114"; 9 compatible = "nvidia,tegra114";
10 interrupt-parent = <&gic>; 10 interrupt-parent = <&gic>;
11 11
12 aliases {
13 serial0 = &uarta;
14 serial1 = &uartb;
15 serial2 = &uartc;
16 serial3 = &uartd;
17 };
18
19 host1x@50000000 { 12 host1x@50000000 {
20 compatible = "nvidia,tegra114-host1x", "simple-bus"; 13 compatible = "nvidia,tegra114-host1x", "simple-bus";
21 reg = <0x50000000 0x00028000>; 14 reg = <0x50000000 0x00028000>;
diff --git a/arch/arm/boot/dts/tegra124-jetson-tk1.dts b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
index 029c9a021541..51b373ff1065 100644
--- a/arch/arm/boot/dts/tegra124-jetson-tk1.dts
+++ b/arch/arm/boot/dts/tegra124-jetson-tk1.dts
@@ -10,6 +10,7 @@
10 aliases { 10 aliases {
11 rtc0 = "/i2c@0,7000d000/pmic@40"; 11 rtc0 = "/i2c@0,7000d000/pmic@40";
12 rtc1 = "/rtc@0,7000e000"; 12 rtc1 = "/rtc@0,7000e000";
13 serial0 = &uartd;
13 }; 14 };
14 15
15 memory { 16 memory {
diff --git a/arch/arm/boot/dts/tegra124-nyan-big.dts b/arch/arm/boot/dts/tegra124-nyan-big.dts
index 7d0784ce4c74..53181d310247 100644
--- a/arch/arm/boot/dts/tegra124-nyan-big.dts
+++ b/arch/arm/boot/dts/tegra124-nyan-big.dts
@@ -10,6 +10,7 @@
10 aliases { 10 aliases {
11 rtc0 = "/i2c@0,7000d000/pmic@40"; 11 rtc0 = "/i2c@0,7000d000/pmic@40";
12 rtc1 = "/rtc@0,7000e000"; 12 rtc1 = "/rtc@0,7000e000";
13 serial0 = &uarta;
13 }; 14 };
14 15
15 memory { 16 memory {
diff --git a/arch/arm/boot/dts/tegra124-venice2.dts b/arch/arm/boot/dts/tegra124-venice2.dts
index 13008858e967..5c3f7813360d 100644
--- a/arch/arm/boot/dts/tegra124-venice2.dts
+++ b/arch/arm/boot/dts/tegra124-venice2.dts
@@ -10,6 +10,7 @@
10 aliases { 10 aliases {
11 rtc0 = "/i2c@0,7000d000/pmic@40"; 11 rtc0 = "/i2c@0,7000d000/pmic@40";
12 rtc1 = "/rtc@0,7000e000"; 12 rtc1 = "/rtc@0,7000e000";
13 serial0 = &uarta;
13 }; 14 };
14 15
15 memory { 16 memory {
diff --git a/arch/arm/boot/dts/tegra124.dtsi b/arch/arm/boot/dts/tegra124.dtsi
index 478c555ebd96..df2b06b29985 100644
--- a/arch/arm/boot/dts/tegra124.dtsi
+++ b/arch/arm/boot/dts/tegra124.dtsi
@@ -286,7 +286,7 @@
286 * the APB DMA based serial driver, the comptible is 286 * the APB DMA based serial driver, the comptible is
287 * "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart". 287 * "nvidia,tegra124-hsuart", "nvidia,tegra30-hsuart".
288 */ 288 */
289 serial@0,70006000 { 289 uarta: serial@0,70006000 {
290 compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart"; 290 compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
291 reg = <0x0 0x70006000 0x0 0x40>; 291 reg = <0x0 0x70006000 0x0 0x40>;
292 reg-shift = <2>; 292 reg-shift = <2>;
@@ -299,7 +299,7 @@
299 status = "disabled"; 299 status = "disabled";
300 }; 300 };
301 301
302 serial@0,70006040 { 302 uartb: serial@0,70006040 {
303 compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart"; 303 compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
304 reg = <0x0 0x70006040 0x0 0x40>; 304 reg = <0x0 0x70006040 0x0 0x40>;
305 reg-shift = <2>; 305 reg-shift = <2>;
@@ -312,7 +312,7 @@
312 status = "disabled"; 312 status = "disabled";
313 }; 313 };
314 314
315 serial@0,70006200 { 315 uartc: serial@0,70006200 {
316 compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart"; 316 compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
317 reg = <0x0 0x70006200 0x0 0x40>; 317 reg = <0x0 0x70006200 0x0 0x40>;
318 reg-shift = <2>; 318 reg-shift = <2>;
@@ -325,7 +325,7 @@
325 status = "disabled"; 325 status = "disabled";
326 }; 326 };
327 327
328 serial@0,70006300 { 328 uartd: serial@0,70006300 {
329 compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart"; 329 compatible = "nvidia,tegra124-uart", "nvidia,tegra20-uart";
330 reg = <0x0 0x70006300 0x0 0x40>; 330 reg = <0x0 0x70006300 0x0 0x40>;
331 reg-shift = <2>; 331 reg-shift = <2>;
diff --git a/arch/arm/boot/dts/tegra20-harmony.dts b/arch/arm/boot/dts/tegra20-harmony.dts
index a37279af687c..b926a07b9443 100644
--- a/arch/arm/boot/dts/tegra20-harmony.dts
+++ b/arch/arm/boot/dts/tegra20-harmony.dts
@@ -10,6 +10,7 @@
10 aliases { 10 aliases {
11 rtc0 = "/i2c@7000d000/tps6586x@34"; 11 rtc0 = "/i2c@7000d000/tps6586x@34";
12 rtc1 = "/rtc@7000e000"; 12 rtc1 = "/rtc@7000e000";
13 serial0 = &uartd;
13 }; 14 };
14 15
15 memory { 16 memory {
diff --git a/arch/arm/boot/dts/tegra20-iris-512.dts b/arch/arm/boot/dts/tegra20-iris-512.dts
index 8cfb83f42e1f..1dd7d7bfdfcc 100644
--- a/arch/arm/boot/dts/tegra20-iris-512.dts
+++ b/arch/arm/boot/dts/tegra20-iris-512.dts
@@ -6,6 +6,11 @@
6 model = "Toradex Colibri T20 512MB on Iris"; 6 model = "Toradex Colibri T20 512MB on Iris";
7 compatible = "toradex,iris", "toradex,colibri_t20-512", "nvidia,tegra20"; 7 compatible = "toradex,iris", "toradex,colibri_t20-512", "nvidia,tegra20";
8 8
9 aliases {
10 serial0 = &uarta;
11 serial1 = &uartd;
12 };
13
9 host1x@50000000 { 14 host1x@50000000 {
10 hdmi@54280000 { 15 hdmi@54280000 {
11 status = "okay"; 16 status = "okay";
diff --git a/arch/arm/boot/dts/tegra20-medcom-wide.dts b/arch/arm/boot/dts/tegra20-medcom-wide.dts
index 1b7c56b33aca..9b87526ab0b7 100644
--- a/arch/arm/boot/dts/tegra20-medcom-wide.dts
+++ b/arch/arm/boot/dts/tegra20-medcom-wide.dts
@@ -6,6 +6,10 @@
6 model = "Avionic Design Medcom-Wide board"; 6 model = "Avionic Design Medcom-Wide board";
7 compatible = "ad,medcom-wide", "ad,tamonten", "nvidia,tegra20"; 7 compatible = "ad,medcom-wide", "ad,tamonten", "nvidia,tegra20";
8 8
9 aliases {
10 serial0 = &uartd;
11 };
12
9 pwm@7000a000 { 13 pwm@7000a000 {
10 status = "okay"; 14 status = "okay";
11 }; 15 };
diff --git a/arch/arm/boot/dts/tegra20-paz00.dts b/arch/arm/boot/dts/tegra20-paz00.dts
index d4438e30de45..ed7e1009326c 100644
--- a/arch/arm/boot/dts/tegra20-paz00.dts
+++ b/arch/arm/boot/dts/tegra20-paz00.dts
@@ -10,6 +10,8 @@
10 aliases { 10 aliases {
11 rtc0 = "/i2c@7000d000/tps6586x@34"; 11 rtc0 = "/i2c@7000d000/tps6586x@34";
12 rtc1 = "/rtc@7000e000"; 12 rtc1 = "/rtc@7000e000";
13 serial0 = &uarta;
14 serial1 = &uartc;
13 }; 15 };
14 16
15 memory { 17 memory {
diff --git a/arch/arm/boot/dts/tegra20-seaboard.dts b/arch/arm/boot/dts/tegra20-seaboard.dts
index a1d4bf9895d7..ea282c7c0ca5 100644
--- a/arch/arm/boot/dts/tegra20-seaboard.dts
+++ b/arch/arm/boot/dts/tegra20-seaboard.dts
@@ -10,6 +10,7 @@
10 aliases { 10 aliases {
11 rtc0 = "/i2c@7000d000/tps6586x@34"; 11 rtc0 = "/i2c@7000d000/tps6586x@34";
12 rtc1 = "/rtc@7000e000"; 12 rtc1 = "/rtc@7000e000";
13 serial0 = &uartd;
13 }; 14 };
14 15
15 memory { 16 memory {
diff --git a/arch/arm/boot/dts/tegra20-tamonten.dtsi b/arch/arm/boot/dts/tegra20-tamonten.dtsi
index 80e7d386ce34..13d4e6185275 100644
--- a/arch/arm/boot/dts/tegra20-tamonten.dtsi
+++ b/arch/arm/boot/dts/tegra20-tamonten.dtsi
@@ -7,6 +7,7 @@
7 aliases { 7 aliases {
8 rtc0 = "/i2c@7000d000/tps6586x@34"; 8 rtc0 = "/i2c@7000d000/tps6586x@34";
9 rtc1 = "/rtc@7000e000"; 9 rtc1 = "/rtc@7000e000";
10 serial0 = &uartd;
10 }; 11 };
11 12
12 memory { 13 memory {
diff --git a/arch/arm/boot/dts/tegra20-trimslice.dts b/arch/arm/boot/dts/tegra20-trimslice.dts
index 5ad87979ab13..d99af4ef9c64 100644
--- a/arch/arm/boot/dts/tegra20-trimslice.dts
+++ b/arch/arm/boot/dts/tegra20-trimslice.dts
@@ -10,6 +10,7 @@
10 aliases { 10 aliases {
11 rtc0 = "/i2c@7000c500/rtc@56"; 11 rtc0 = "/i2c@7000c500/rtc@56";
12 rtc1 = "/rtc@7000e000"; 12 rtc1 = "/rtc@7000e000";
13 serial0 = &uarta;
13 }; 14 };
14 15
15 memory { 16 memory {
diff --git a/arch/arm/boot/dts/tegra20-ventana.dts b/arch/arm/boot/dts/tegra20-ventana.dts
index ca8484cccddc..04c58e9ca490 100644
--- a/arch/arm/boot/dts/tegra20-ventana.dts
+++ b/arch/arm/boot/dts/tegra20-ventana.dts
@@ -10,6 +10,7 @@
10 aliases { 10 aliases {
11 rtc0 = "/i2c@7000d000/tps6586x@34"; 11 rtc0 = "/i2c@7000d000/tps6586x@34";
12 rtc1 = "/rtc@7000e000"; 12 rtc1 = "/rtc@7000e000";
13 serial0 = &uartd;
13 }; 14 };
14 15
15 memory { 16 memory {
diff --git a/arch/arm/boot/dts/tegra20-whistler.dts b/arch/arm/boot/dts/tegra20-whistler.dts
index 1843725785c9..340d81108df1 100644
--- a/arch/arm/boot/dts/tegra20-whistler.dts
+++ b/arch/arm/boot/dts/tegra20-whistler.dts
@@ -10,6 +10,7 @@
10 aliases { 10 aliases {
11 rtc0 = "/i2c@7000d000/max8907@3c"; 11 rtc0 = "/i2c@7000d000/max8907@3c";
12 rtc1 = "/rtc@7000e000"; 12 rtc1 = "/rtc@7000e000";
13 serial0 = &uarta;
13 }; 14 };
14 15
15 memory { 16 memory {
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 3b374c49d04d..8acf5d85c99d 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -9,14 +9,6 @@
9 compatible = "nvidia,tegra20"; 9 compatible = "nvidia,tegra20";
10 interrupt-parent = <&intc>; 10 interrupt-parent = <&intc>;
11 11
12 aliases {
13 serial0 = &uarta;
14 serial1 = &uartb;
15 serial2 = &uartc;
16 serial3 = &uartd;
17 serial4 = &uarte;
18 };
19
20 host1x@50000000 { 12 host1x@50000000 {
21 compatible = "nvidia,tegra20-host1x", "simple-bus"; 13 compatible = "nvidia,tegra20-host1x", "simple-bus";
22 reg = <0x50000000 0x00024000>; 14 reg = <0x50000000 0x00024000>;
diff --git a/arch/arm/boot/dts/tegra30-apalis-eval.dts b/arch/arm/boot/dts/tegra30-apalis-eval.dts
index 45d40f024585..6236bdecb48b 100644
--- a/arch/arm/boot/dts/tegra30-apalis-eval.dts
+++ b/arch/arm/boot/dts/tegra30-apalis-eval.dts
@@ -11,6 +11,10 @@
11 rtc0 = "/i2c@7000c000/rtc@68"; 11 rtc0 = "/i2c@7000c000/rtc@68";
12 rtc1 = "/i2c@7000d000/tps65911@2d"; 12 rtc1 = "/i2c@7000d000/tps65911@2d";
13 rtc2 = "/rtc@7000e000"; 13 rtc2 = "/rtc@7000e000";
14 serial0 = &uarta;
15 serial1 = &uartb;
16 serial2 = &uartc;
17 serial3 = &uartd;
14 }; 18 };
15 19
16 pcie-controller@00003000 { 20 pcie-controller@00003000 {
diff --git a/arch/arm/boot/dts/tegra30-beaver.dts b/arch/arm/boot/dts/tegra30-beaver.dts
index cee8f2246fdb..6b157eeabcc5 100644
--- a/arch/arm/boot/dts/tegra30-beaver.dts
+++ b/arch/arm/boot/dts/tegra30-beaver.dts
@@ -9,6 +9,7 @@
9 aliases { 9 aliases {
10 rtc0 = "/i2c@7000d000/tps65911@2d"; 10 rtc0 = "/i2c@7000d000/tps65911@2d";
11 rtc1 = "/rtc@7000e000"; 11 rtc1 = "/rtc@7000e000";
12 serial0 = &uarta;
12 }; 13 };
13 14
14 memory { 15 memory {
diff --git a/arch/arm/boot/dts/tegra30-cardhu.dtsi b/arch/arm/boot/dts/tegra30-cardhu.dtsi
index 206379546244..a1b682ea01bd 100644
--- a/arch/arm/boot/dts/tegra30-cardhu.dtsi
+++ b/arch/arm/boot/dts/tegra30-cardhu.dtsi
@@ -30,6 +30,8 @@
30 aliases { 30 aliases {
31 rtc0 = "/i2c@7000d000/tps65911@2d"; 31 rtc0 = "/i2c@7000d000/tps65911@2d";
32 rtc1 = "/rtc@7000e000"; 32 rtc1 = "/rtc@7000e000";
33 serial0 = &uarta;
34 serial1 = &uartc;
33 }; 35 };
34 36
35 memory { 37 memory {
diff --git a/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts b/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts
index 7793abd5bef1..4d3ddc585641 100644
--- a/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts
+++ b/arch/arm/boot/dts/tegra30-colibri-eval-v3.dts
@@ -10,6 +10,9 @@
10 rtc0 = "/i2c@7000c000/rtc@68"; 10 rtc0 = "/i2c@7000c000/rtc@68";
11 rtc1 = "/i2c@7000d000/tps65911@2d"; 11 rtc1 = "/i2c@7000d000/tps65911@2d";
12 rtc2 = "/rtc@7000e000"; 12 rtc2 = "/rtc@7000e000";
13 serial0 = &uarta;
14 serial1 = &uartb;
15 serial2 = &uartd;
13 }; 16 };
14 17
15 host1x@50000000 { 18 host1x@50000000 {
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index aa6ccea13d30..b270b9e3d455 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -9,14 +9,6 @@
9 compatible = "nvidia,tegra30"; 9 compatible = "nvidia,tegra30";
10 interrupt-parent = <&intc>; 10 interrupt-parent = <&intc>;
11 11
12 aliases {
13 serial0 = &uarta;
14 serial1 = &uartb;
15 serial2 = &uartc;
16 serial3 = &uartd;
17 serial4 = &uarte;
18 };
19
20 pcie-controller@00003000 { 12 pcie-controller@00003000 {
21 compatible = "nvidia,tegra30-pcie"; 13 compatible = "nvidia,tegra30-pcie";
22 device_type = "pci"; 14 device_type = "pci";
diff --git a/arch/arm/boot/dts/vf610-cosmic.dts b/arch/arm/boot/dts/vf610-cosmic.dts
index 3fd1b74e1216..de1b453c2932 100644
--- a/arch/arm/boot/dts/vf610-cosmic.dts
+++ b/arch/arm/boot/dts/vf610-cosmic.dts
@@ -33,6 +33,13 @@
33 33
34}; 34};
35 35
36&esdhc1 {
37 pinctrl-names = "default";
38 pinctrl-0 = <&pinctrl_esdhc1>;
39 bus-width = <4>;
40 status = "okay";
41};
42
36&fec1 { 43&fec1 {
37 phy-mode = "rmii"; 44 phy-mode = "rmii";
38 pinctrl-names = "default"; 45 pinctrl-names = "default";
@@ -42,6 +49,18 @@
42 49
43&iomuxc { 50&iomuxc {
44 vf610-cosmic { 51 vf610-cosmic {
52 pinctrl_esdhc1: esdhc1grp {
53 fsl,pins = <
54 VF610_PAD_PTA24__ESDHC1_CLK 0x31ef
55 VF610_PAD_PTA25__ESDHC1_CMD 0x31ef
56 VF610_PAD_PTA26__ESDHC1_DAT0 0x31ef
57 VF610_PAD_PTA27__ESDHC1_DAT1 0x31ef
58 VF610_PAD_PTA28__ESDHC1_DATA2 0x31ef
59 VF610_PAD_PTA29__ESDHC1_DAT3 0x31ef
60 VF610_PAD_PTB28__GPIO_98 0x219d
61 >;
62 };
63
45 pinctrl_fec1: fec1grp { 64 pinctrl_fec1: fec1grp {
46 fsl,pins = < 65 fsl,pins = <
47 VF610_PAD_PTC9__ENET_RMII1_MDC 0x30d2 66 VF610_PAD_PTC9__ENET_RMII1_MDC 0x30d2
diff --git a/arch/arm/boot/dts/zynq-7000.dtsi b/arch/arm/boot/dts/zynq-7000.dtsi
index 24036c440440..ce2ef5bec4f2 100644
--- a/arch/arm/boot/dts/zynq-7000.dtsi
+++ b/arch/arm/boot/dts/zynq-7000.dtsi
@@ -30,7 +30,6 @@
30 /* kHz uV */ 30 /* kHz uV */
31 666667 1000000 31 666667 1000000
32 333334 1000000 32 333334 1000000
33 222223 1000000
34 >; 33 >;
35 }; 34 };
36 35
@@ -65,7 +64,7 @@
65 interrupt-parent = <&intc>; 64 interrupt-parent = <&intc>;
66 ranges; 65 ranges;
67 66
68 adc@f8007100 { 67 adc: adc@f8007100 {
69 compatible = "xlnx,zynq-xadc-1.00.a"; 68 compatible = "xlnx,zynq-xadc-1.00.a";
70 reg = <0xf8007100 0x20>; 69 reg = <0xf8007100 0x20>;
71 interrupts = <0 7 4>; 70 interrupts = <0 7 4>;
@@ -137,7 +136,7 @@
137 <0xF8F00100 0x100>; 136 <0xF8F00100 0x100>;
138 }; 137 };
139 138
140 L2: cache-controller { 139 L2: cache-controller@f8f02000 {
141 compatible = "arm,pl310-cache"; 140 compatible = "arm,pl310-cache";
142 reg = <0xF8F02000 0x1000>; 141 reg = <0xF8F02000 0x1000>;
143 arm,data-latency = <3 2 2>; 142 arm,data-latency = <3 2 2>;
@@ -146,10 +145,10 @@
146 cache-level = <2>; 145 cache-level = <2>;
147 }; 146 };
148 147
149 memory-controller@f8006000 { 148 mc: memory-controller@f8006000 {
150 compatible = "xlnx,zynq-ddrc-a05"; 149 compatible = "xlnx,zynq-ddrc-a05";
151 reg = <0xf8006000 0x1000>; 150 reg = <0xf8006000 0x1000>;
152 } ; 151 };
153 152
154 uart0: serial@e0000000 { 153 uart0: serial@e0000000 {
155 compatible = "xlnx,xuartps", "cdns,uart-r1p8"; 154 compatible = "xlnx,xuartps", "cdns,uart-r1p8";
@@ -195,7 +194,7 @@
195 194
196 gem0: ethernet@e000b000 { 195 gem0: ethernet@e000b000 {
197 compatible = "cdns,gem"; 196 compatible = "cdns,gem";
198 reg = <0xe000b000 0x4000>; 197 reg = <0xe000b000 0x1000>;
199 status = "disabled"; 198 status = "disabled";
200 interrupts = <0 22 4>; 199 interrupts = <0 22 4>;
201 clocks = <&clkc 30>, <&clkc 30>, <&clkc 13>; 200 clocks = <&clkc 30>, <&clkc 30>, <&clkc 13>;
@@ -206,7 +205,7 @@
206 205
207 gem1: ethernet@e000c000 { 206 gem1: ethernet@e000c000 {
208 compatible = "cdns,gem"; 207 compatible = "cdns,gem";
209 reg = <0xe000c000 0x4000>; 208 reg = <0xe000c000 0x1000>;
210 status = "disabled"; 209 status = "disabled";
211 interrupts = <0 45 4>; 210 interrupts = <0 45 4>;
212 clocks = <&clkc 31>, <&clkc 31>, <&clkc 14>; 211 clocks = <&clkc 31>, <&clkc 31>, <&clkc 14>;
@@ -315,5 +314,16 @@
315 reg = <0xf8f00600 0x20>; 314 reg = <0xf8f00600 0x20>;
316 clocks = <&clkc 4>; 315 clocks = <&clkc 4>;
317 }; 316 };
317
318 watchdog0: watchdog@f8005000 {
319 clocks = <&clkc 45>;
320 compatible = "xlnx,zynq-wdt-r1p2";
321 device_type = "watchdog";
322 interrupt-parent = <&intc>;
323 interrupts = <0 9 1>;
324 reg = <0xf8005000 0x1000>;
325 reset = <0>;
326 timeout-sec = <10>;
327 };
318 }; 328 };
319}; 329};
diff --git a/arch/arm/boot/dts/zynq-parallella.dts b/arch/arm/boot/dts/zynq-parallella.dts
index e1f51ca127fe..0429bbd89fba 100644
--- a/arch/arm/boot/dts/zynq-parallella.dts
+++ b/arch/arm/boot/dts/zynq-parallella.dts
@@ -34,6 +34,10 @@
34 }; 34 };
35}; 35};
36 36
37&clkc {
38 fclk-enable = <0xf>;
39};
40
37&gem0 { 41&gem0 {
38 status = "okay"; 42 status = "okay";
39 phy-mode = "rgmii-id"; 43 phy-mode = "rgmii-id";
diff --git a/arch/arm/common/edma.c b/arch/arm/common/edma.c
index d86771abbf57..72041f002b7e 100644
--- a/arch/arm/common/edma.c
+++ b/arch/arm/common/edma.c
@@ -26,6 +26,7 @@
26#include <linux/io.h> 26#include <linux/io.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/edma.h> 28#include <linux/edma.h>
29#include <linux/dma-mapping.h>
29#include <linux/of_address.h> 30#include <linux/of_address.h>
30#include <linux/of_device.h> 31#include <linux/of_device.h>
31#include <linux/of_dma.h> 32#include <linux/of_dma.h>
@@ -1623,6 +1624,11 @@ static int edma_probe(struct platform_device *pdev)
1623 struct device_node *node = pdev->dev.of_node; 1624 struct device_node *node = pdev->dev.of_node;
1624 struct device *dev = &pdev->dev; 1625 struct device *dev = &pdev->dev;
1625 int ret; 1626 int ret;
1627 struct platform_device_info edma_dev_info = {
1628 .name = "edma-dma-engine",
1629 .dma_mask = DMA_BIT_MASK(32),
1630 .parent = &pdev->dev,
1631 };
1626 1632
1627 if (node) { 1633 if (node) {
1628 /* Check if this is a second instance registered */ 1634 /* Check if this is a second instance registered */
@@ -1793,6 +1799,9 @@ static int edma_probe(struct platform_device *pdev)
1793 edma_write_array(j, EDMA_QRAE, i, 0x0); 1799 edma_write_array(j, EDMA_QRAE, i, 0x0);
1794 } 1800 }
1795 arch_num_cc++; 1801 arch_num_cc++;
1802
1803 edma_dev_info.id = j;
1804 platform_device_register_full(&edma_dev_info);
1796 } 1805 }
1797 1806
1798 return 0; 1807 return 0;
diff --git a/arch/arm/configs/exynos_defconfig b/arch/arm/configs/exynos_defconfig
index 72058b8a6f4d..e21ef830a483 100644
--- a/arch/arm/configs/exynos_defconfig
+++ b/arch/arm/configs/exynos_defconfig
@@ -142,11 +142,13 @@ CONFIG_MMC_DW_IDMAC=y
142CONFIG_MMC_DW_EXYNOS=y 142CONFIG_MMC_DW_EXYNOS=y
143CONFIG_RTC_CLASS=y 143CONFIG_RTC_CLASS=y
144CONFIG_RTC_DRV_MAX77686=y 144CONFIG_RTC_DRV_MAX77686=y
145CONFIG_RTC_DRV_MAX77802=y
145CONFIG_RTC_DRV_S5M=y 146CONFIG_RTC_DRV_S5M=y
146CONFIG_RTC_DRV_S3C=y 147CONFIG_RTC_DRV_S3C=y
147CONFIG_DMADEVICES=y 148CONFIG_DMADEVICES=y
148CONFIG_PL330_DMA=y 149CONFIG_PL330_DMA=y
149CONFIG_COMMON_CLK_MAX77686=y 150CONFIG_COMMON_CLK_MAX77686=y
151CONFIG_COMMON_CLK_MAX77802=y
150CONFIG_COMMON_CLK_S2MPS11=y 152CONFIG_COMMON_CLK_S2MPS11=y
151CONFIG_EXYNOS_IOMMU=y 153CONFIG_EXYNOS_IOMMU=y
152CONFIG_IIO=y 154CONFIG_IIO=y
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index e688741c89aa..e6b0007355f8 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -97,6 +97,7 @@ CONFIG_SERIAL_IMX_CONSOLE=y
97# CONFIG_HW_RANDOM is not set 97# CONFIG_HW_RANDOM is not set
98CONFIG_I2C_CHARDEV=y 98CONFIG_I2C_CHARDEV=y
99CONFIG_I2C_IMX=y 99CONFIG_I2C_IMX=y
100CONFIG_SPI=y
100CONFIG_SPI_IMX=y 101CONFIG_SPI_IMX=y
101CONFIG_SPI_SPIDEV=y 102CONFIG_SPI_SPIDEV=y
102CONFIG_GPIO_SYSFS=y 103CONFIG_GPIO_SYSFS=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index 8fca6e276b69..6790f1b3f3a1 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -158,6 +158,7 @@ CONFIG_I2C_CHARDEV=y
158CONFIG_I2C_ALGOPCF=m 158CONFIG_I2C_ALGOPCF=m
159CONFIG_I2C_ALGOPCA=m 159CONFIG_I2C_ALGOPCA=m
160CONFIG_I2C_IMX=y 160CONFIG_I2C_IMX=y
161CONFIG_SPI=y
161CONFIG_SPI_IMX=y 162CONFIG_SPI_IMX=y
162CONFIG_GPIO_SYSFS=y 163CONFIG_GPIO_SYSFS=y
163CONFIG_GPIO_MC9S08DZ60=y 164CONFIG_GPIO_MC9S08DZ60=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 491b7d5523bf..9d7a32f93fcf 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -217,6 +217,7 @@ CONFIG_I2C_CADENCE=y
217CONFIG_I2C_DESIGNWARE_PLATFORM=y 217CONFIG_I2C_DESIGNWARE_PLATFORM=y
218CONFIG_I2C_EXYNOS5=y 218CONFIG_I2C_EXYNOS5=y
219CONFIG_I2C_MV64XXX=y 219CONFIG_I2C_MV64XXX=y
220CONFIG_I2C_S3C2410=y
220CONFIG_I2C_SIRF=y 221CONFIG_I2C_SIRF=y
221CONFIG_I2C_TEGRA=y 222CONFIG_I2C_TEGRA=y
222CONFIG_I2C_ST=y 223CONFIG_I2C_ST=y
@@ -235,6 +236,7 @@ CONFIG_SPI_TEGRA20_SLINK=y
235CONFIG_SPI_XILINX=y 236CONFIG_SPI_XILINX=y
236CONFIG_PINCTRL_AS3722=y 237CONFIG_PINCTRL_AS3722=y
237CONFIG_PINCTRL_PALMAS=y 238CONFIG_PINCTRL_PALMAS=y
239CONFIG_PINCTRL_APQ8084=y
238CONFIG_GPIO_SYSFS=y 240CONFIG_GPIO_SYSFS=y
239CONFIG_GPIO_GENERIC_PLATFORM=y 241CONFIG_GPIO_GENERIC_PLATFORM=y
240CONFIG_GPIO_DWAPB=y 242CONFIG_GPIO_DWAPB=y
@@ -261,6 +263,7 @@ CONFIG_WATCHDOG=y
261CONFIG_XILINX_WATCHDOG=y 263CONFIG_XILINX_WATCHDOG=y
262CONFIG_ORION_WATCHDOG=y 264CONFIG_ORION_WATCHDOG=y
263CONFIG_SUNXI_WATCHDOG=y 265CONFIG_SUNXI_WATCHDOG=y
266CONFIG_MESON_WATCHDOG=y
264CONFIG_MFD_AS3722=y 267CONFIG_MFD_AS3722=y
265CONFIG_MFD_BCM590XX=y 268CONFIG_MFD_BCM590XX=y
266CONFIG_MFD_CROS_EC=y 269CONFIG_MFD_CROS_EC=y
@@ -353,6 +356,7 @@ CONFIG_MMC_MVSDIO=y
353CONFIG_MMC_SUNXI=y 356CONFIG_MMC_SUNXI=y
354CONFIG_MMC_DW=y 357CONFIG_MMC_DW=y
355CONFIG_MMC_DW_EXYNOS=y 358CONFIG_MMC_DW_EXYNOS=y
359CONFIG_MMC_DW_ROCKCHIP=y
356CONFIG_NEW_LEDS=y 360CONFIG_NEW_LEDS=y
357CONFIG_LEDS_CLASS=y 361CONFIG_LEDS_CLASS=y
358CONFIG_LEDS_GPIO=y 362CONFIG_LEDS_GPIO=y
@@ -409,6 +413,7 @@ CONFIG_NVEC_POWER=y
409CONFIG_NVEC_PAZ00=y 413CONFIG_NVEC_PAZ00=y
410CONFIG_QCOM_GSBI=y 414CONFIG_QCOM_GSBI=y
411CONFIG_COMMON_CLK_QCOM=y 415CONFIG_COMMON_CLK_QCOM=y
416CONFIG_APQ_MMCC_8084=y
412CONFIG_MSM_GCC_8660=y 417CONFIG_MSM_GCC_8660=y
413CONFIG_MSM_MMCC_8960=y 418CONFIG_MSM_MMCC_8960=y
414CONFIG_MSM_MMCC_8974=y 419CONFIG_MSM_MMCC_8974=y
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index 16e719c268dd..b3f86670d2eb 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -86,7 +86,6 @@ CONFIG_IP_PNP_DHCP=y
86CONFIG_IP_PNP_BOOTP=y 86CONFIG_IP_PNP_BOOTP=y
87CONFIG_IP_PNP_RARP=y 87CONFIG_IP_PNP_RARP=y
88# CONFIG_INET_LRO is not set 88# CONFIG_INET_LRO is not set
89CONFIG_IPV6=y
90CONFIG_NETFILTER=y 89CONFIG_NETFILTER=y
91CONFIG_CAN=m 90CONFIG_CAN=m
92CONFIG_CAN_C_CAN=m 91CONFIG_CAN_C_CAN=m
@@ -112,6 +111,7 @@ CONFIG_MTD_OOPS=y
112CONFIG_MTD_CFI=y 111CONFIG_MTD_CFI=y
113CONFIG_MTD_CFI_INTELEXT=y 112CONFIG_MTD_CFI_INTELEXT=y
114CONFIG_MTD_NAND=y 113CONFIG_MTD_NAND=y
114CONFIG_MTD_NAND_ECC_BCH=y
115CONFIG_MTD_NAND_OMAP2=y 115CONFIG_MTD_NAND_OMAP2=y
116CONFIG_MTD_ONENAND=y 116CONFIG_MTD_ONENAND=y
117CONFIG_MTD_ONENAND_VERIFY_WRITE=y 117CONFIG_MTD_ONENAND_VERIFY_WRITE=y
@@ -317,7 +317,7 @@ CONFIG_EXT4_FS=y
317CONFIG_FANOTIFY=y 317CONFIG_FANOTIFY=y
318CONFIG_QUOTA=y 318CONFIG_QUOTA=y
319CONFIG_QFMT_V2=y 319CONFIG_QFMT_V2=y
320CONFIG_AUTOFS4_FS=y 320CONFIG_AUTOFS4_FS=m
321CONFIG_MSDOS_FS=y 321CONFIG_MSDOS_FS=y
322CONFIG_VFAT_FS=y 322CONFIG_VFAT_FS=y
323CONFIG_TMPFS=y 323CONFIG_TMPFS=y
diff --git a/arch/arm/configs/socfpga_defconfig b/arch/arm/configs/socfpga_defconfig
index d7a5855a5db8..a2956c3112f1 100644
--- a/arch/arm/configs/socfpga_defconfig
+++ b/arch/arm/configs/socfpga_defconfig
@@ -1,5 +1,6 @@
1CONFIG_EXPERIMENTAL=y
2CONFIG_SYSVIPC=y 1CONFIG_SYSVIPC=y
2CONFIG_FHANDLE=y
3CONFIG_HIGH_RES_TIMERS=y
3CONFIG_IKCONFIG=y 4CONFIG_IKCONFIG=y
4CONFIG_IKCONFIG_PROC=y 5CONFIG_IKCONFIG_PROC=y
5CONFIG_LOG_BUF_SHIFT=14 6CONFIG_LOG_BUF_SHIFT=14
@@ -11,23 +12,17 @@ CONFIG_PROFILING=y
11CONFIG_OPROFILE=y 12CONFIG_OPROFILE=y
12CONFIG_MODULES=y 13CONFIG_MODULES=y
13CONFIG_MODULE_UNLOAD=y 14CONFIG_MODULE_UNLOAD=y
14CONFIG_HOTPLUG=y
15# CONFIG_LBDAF is not set 15# CONFIG_LBDAF is not set
16# CONFIG_BLK_DEV_BSG is not set 16# CONFIG_BLK_DEV_BSG is not set
17# CONFIG_IOSCHED_DEADLINE is not set 17# CONFIG_IOSCHED_DEADLINE is not set
18# CONFIG_IOSCHED_CFQ is not set 18# CONFIG_IOSCHED_CFQ is not set
19CONFIG_ARCH_SOCFPGA=y 19CONFIG_ARCH_SOCFPGA=y
20CONFIG_MACH_SOCFPGA_CYCLONE5=y
21CONFIG_ARM_THUMBEE=y 20CONFIG_ARM_THUMBEE=y
22# CONFIG_ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA is not set
23# CONFIG_CACHE_L2X0 is not set
24CONFIG_HIGH_RES_TIMERS=y
25CONFIG_SMP=y 21CONFIG_SMP=y
26CONFIG_NR_CPUS=2 22CONFIG_NR_CPUS=2
27CONFIG_AEABI=y 23CONFIG_AEABI=y
28CONFIG_ZBOOT_ROM_TEXT=0x0 24CONFIG_ZBOOT_ROM_TEXT=0x0
29CONFIG_ZBOOT_ROM_BSS=0x0 25CONFIG_ZBOOT_ROM_BSS=0x0
30CONFIG_CMDLINE=""
31CONFIG_VFP=y 26CONFIG_VFP=y
32CONFIG_NEON=y 27CONFIG_NEON=y
33CONFIG_NET=y 28CONFIG_NET=y
@@ -41,38 +36,30 @@ CONFIG_IP_PNP=y
41CONFIG_IP_PNP_DHCP=y 36CONFIG_IP_PNP_DHCP=y
42CONFIG_IP_PNP_BOOTP=y 37CONFIG_IP_PNP_BOOTP=y
43CONFIG_IP_PNP_RARP=y 38CONFIG_IP_PNP_RARP=y
39CONFIG_IPV6=y
40CONFIG_NETWORK_PHY_TIMESTAMPING=y
41CONFIG_VLAN_8021Q=y
42CONFIG_VLAN_8021Q_GVRP=y
44CONFIG_CAN=y 43CONFIG_CAN=y
45CONFIG_CAN_RAW=y
46CONFIG_CAN_BCM=y
47CONFIG_CAN_GW=y
48CONFIG_CAN_DEV=y
49CONFIG_CAN_CALC_BITTIMING=y
50CONFIG_CAN_C_CAN=y 44CONFIG_CAN_C_CAN=y
51CONFIG_CAN_C_CAN_PLATFORM=y 45CONFIG_CAN_C_CAN_PLATFORM=y
52CONFIG_CAN_DEBUG_DEVICES=y 46CONFIG_CAN_DEBUG_DEVICES=y
53CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 47CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
54CONFIG_DEVTMPFS=y 48CONFIG_DEVTMPFS=y
55CONFIG_PROC_DEVICETREE=y 49CONFIG_DEVTMPFS_MOUNT=y
56CONFIG_BLK_DEV_RAM=y 50CONFIG_BLK_DEV_RAM=y
57CONFIG_BLK_DEV_RAM_COUNT=2 51CONFIG_BLK_DEV_RAM_COUNT=2
58CONFIG_BLK_DEV_RAM_SIZE=8192 52CONFIG_BLK_DEV_RAM_SIZE=8192
53CONFIG_SRAM=y
59CONFIG_SCSI=y 54CONFIG_SCSI=y
60# CONFIG_SCSI_PROC_FS is not set 55# CONFIG_SCSI_PROC_FS is not set
61CONFIG_BLK_DEV_SD=y 56CONFIG_BLK_DEV_SD=y
62# CONFIG_SCSI_LOWLEVEL is not set 57# CONFIG_SCSI_LOWLEVEL is not set
63CONFIG_NETDEVICES=y 58CONFIG_NETDEVICES=y
64CONFIG_STMMAC_ETH=y 59CONFIG_STMMAC_ETH=y
60CONFIG_DWMAC_SOCFPGA=y
65CONFIG_MICREL_PHY=y 61CONFIG_MICREL_PHY=y
66# CONFIG_STMMAC_PHY_ID_ZERO_WORKAROUND is not set
67CONFIG_INPUT_EVDEV=y 62CONFIG_INPUT_EVDEV=y
68CONFIG_DWMAC_SOCFPGA=y
69CONFIG_PPS=y
70CONFIG_NETWORK_PHY_TIMESTAMPING=y
71CONFIG_PTP_1588_CLOCK=y
72CONFIG_VLAN_8021Q=y
73CONFIG_VLAN_8021Q_GVRP=y
74CONFIG_GARP=y
75CONFIG_IPV6=y
76# CONFIG_SERIO_SERPORT is not set 63# CONFIG_SERIO_SERPORT is not set
77CONFIG_SERIO_AMBAKMI=y 64CONFIG_SERIO_AMBAKMI=y
78CONFIG_LEGACY_PTY_COUNT=16 65CONFIG_LEGACY_PTY_COUNT=16
@@ -81,45 +68,43 @@ CONFIG_SERIAL_8250_CONSOLE=y
81CONFIG_SERIAL_8250_NR_UARTS=2 68CONFIG_SERIAL_8250_NR_UARTS=2
82CONFIG_SERIAL_8250_RUNTIME_UARTS=2 69CONFIG_SERIAL_8250_RUNTIME_UARTS=2
83CONFIG_SERIAL_8250_DW=y 70CONFIG_SERIAL_8250_DW=y
71CONFIG_I2C=y
72CONFIG_I2C_CHARDEV=y
73CONFIG_I2C_DESIGNWARE_PLATFORM=y
84CONFIG_GPIOLIB=y 74CONFIG_GPIOLIB=y
85CONFIG_GPIO_SYSFS=y 75CONFIG_GPIO_SYSFS=y
86CONFIG_GPIO_DWAPB=y 76CONFIG_GPIO_DWAPB=y
87# CONFIG_RTC_HCTOSYS is not set 77CONFIG_PMBUS=y
78CONFIG_SENSORS_LTC2978=y
79CONFIG_SENSORS_LTC2978_REGULATOR=y
88CONFIG_WATCHDOG=y 80CONFIG_WATCHDOG=y
89CONFIG_DW_WATCHDOG=y 81CONFIG_DW_WATCHDOG=y
82CONFIG_REGULATOR=y
83CONFIG_REGULATOR_FIXED_VOLTAGE=y
84CONFIG_USB=y
85CONFIG_USB_DWC2=y
86CONFIG_USB_DWC2_HOST=y
87CONFIG_MMC=y
88CONFIG_MMC_DW=y
90CONFIG_EXT2_FS=y 89CONFIG_EXT2_FS=y
91CONFIG_EXT2_FS_XATTR=y 90CONFIG_EXT2_FS_XATTR=y
92CONFIG_EXT2_FS_POSIX_ACL=y 91CONFIG_EXT2_FS_POSIX_ACL=y
93CONFIG_EXT3_FS=y 92CONFIG_EXT3_FS=y
94CONFIG_NFS_FS=y 93CONFIG_EXT4_FS=y
95CONFIG_ROOT_NFS=y
96# CONFIG_DNOTIFY is not set
97# CONFIG_INOTIFY_USER is not set
98CONFIG_FHANDLE=y
99CONFIG_VFAT_FS=y 94CONFIG_VFAT_FS=y
100CONFIG_NTFS_FS=y 95CONFIG_NTFS_FS=y
101CONFIG_NTFS_RW=y 96CONFIG_NTFS_RW=y
102CONFIG_TMPFS=y 97CONFIG_TMPFS=y
103CONFIG_JFFS2_FS=y 98CONFIG_CONFIGFS_FS=y
99CONFIG_NFS_FS=y
100CONFIG_ROOT_NFS=y
104CONFIG_NLS_CODEPAGE_437=y 101CONFIG_NLS_CODEPAGE_437=y
105CONFIG_NLS_ISO8859_1=y 102CONFIG_NLS_ISO8859_1=y
103CONFIG_PRINTK_TIME=y
104CONFIG_DEBUG_INFO=y
106CONFIG_MAGIC_SYSRQ=y 105CONFIG_MAGIC_SYSRQ=y
107CONFIG_DETECT_HUNG_TASK=y 106CONFIG_DETECT_HUNG_TASK=y
108# CONFIG_SCHED_DEBUG is not set 107# CONFIG_SCHED_DEBUG is not set
109CONFIG_DEBUG_INFO=y
110CONFIG_ENABLE_DEFAULT_TRACERS=y 108CONFIG_ENABLE_DEFAULT_TRACERS=y
111CONFIG_DEBUG_USER=y 109CONFIG_DEBUG_USER=y
112CONFIG_XZ_DEC=y 110CONFIG_XZ_DEC=y
113CONFIG_I2C=y
114CONFIG_I2C_DESIGNWARE_CORE=y
115CONFIG_I2C_DESIGNWARE_PLATFORM=y
116CONFIG_I2C_CHARDEV=y
117CONFIG_MMC=y
118CONFIG_MMC_DW=y
119CONFIG_PM=y
120CONFIG_SUSPEND=y
121CONFIG_MMC_UNSAFE_RESUME=y
122CONFIG_USB=y
123CONFIG_USB_DWC2=y
124CONFIG_USB_DWC2_HOST=y
125CONFIG_USB_DWC2_PLATFORM=y
diff --git a/arch/arm/configs/sunxi_defconfig b/arch/arm/configs/sunxi_defconfig
index 847045313101..f7ac0379850f 100644
--- a/arch/arm/configs/sunxi_defconfig
+++ b/arch/arm/configs/sunxi_defconfig
@@ -76,6 +76,7 @@ CONFIG_WATCHDOG=y
76CONFIG_SUNXI_WATCHDOG=y 76CONFIG_SUNXI_WATCHDOG=y
77CONFIG_MFD_AXP20X=y 77CONFIG_MFD_AXP20X=y
78CONFIG_REGULATOR=y 78CONFIG_REGULATOR=y
79CONFIG_REGULATOR_FIXED_VOLTAGE=y
79CONFIG_REGULATOR_GPIO=y 80CONFIG_REGULATOR_GPIO=y
80CONFIG_USB=y 81CONFIG_USB=y
81CONFIG_USB_EHCI_HCD=y 82CONFIG_USB_EHCI_HCD=y
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index fc44d3761f9e..ce73ab635414 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -44,16 +44,6 @@ struct cpu_context_save {
44 __u32 extra[2]; /* Xscale 'acc' register, etc */ 44 __u32 extra[2]; /* Xscale 'acc' register, etc */
45}; 45};
46 46
47struct arm_restart_block {
48 union {
49 /* For user cache flushing */
50 struct {
51 unsigned long start;
52 unsigned long end;
53 } cache;
54 };
55};
56
57/* 47/*
58 * low level task data that entry.S needs immediate access to. 48 * low level task data that entry.S needs immediate access to.
59 * __switch_to() assumes cpu_context follows immediately after cpu_domain. 49 * __switch_to() assumes cpu_context follows immediately after cpu_domain.
@@ -79,7 +69,6 @@ struct thread_info {
79 unsigned long thumbee_state; /* ThumbEE Handler Base register */ 69 unsigned long thumbee_state; /* ThumbEE Handler Base register */
80#endif 70#endif
81 struct restart_block restart_block; 71 struct restart_block restart_block;
82 struct arm_restart_block arm_restart_block;
83}; 72};
84 73
85#define INIT_THREAD_INFO(tsk) \ 74#define INIT_THREAD_INFO(tsk) \
diff --git a/arch/arm/include/uapi/asm/unistd.h b/arch/arm/include/uapi/asm/unistd.h
index 3aaa75cae90c..705bb7620673 100644
--- a/arch/arm/include/uapi/asm/unistd.h
+++ b/arch/arm/include/uapi/asm/unistd.h
@@ -412,6 +412,7 @@
412#define __NR_seccomp (__NR_SYSCALL_BASE+383) 412#define __NR_seccomp (__NR_SYSCALL_BASE+383)
413#define __NR_getrandom (__NR_SYSCALL_BASE+384) 413#define __NR_getrandom (__NR_SYSCALL_BASE+384)
414#define __NR_memfd_create (__NR_SYSCALL_BASE+385) 414#define __NR_memfd_create (__NR_SYSCALL_BASE+385)
415#define __NR_bpf (__NR_SYSCALL_BASE+386)
415 416
416/* 417/*
417 * The following SWIs are ARM private. 418 * The following SWIs are ARM private.
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 713e807621d2..2d2d6087b9b1 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -10,6 +10,7 @@
10 * it under the terms of the GNU General Public License version 2 as 10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13#include <linux/compiler.h>
13#include <linux/sched.h> 14#include <linux/sched.h>
14#include <linux/mm.h> 15#include <linux/mm.h>
15#include <linux/dma-mapping.h> 16#include <linux/dma-mapping.h>
@@ -39,10 +40,19 @@
39 * GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c 40 * GCC 3.2.x: miscompiles NEW_AUX_ENT in fs/binfmt_elf.c
40 * (http://gcc.gnu.org/PR8896) and incorrect structure 41 * (http://gcc.gnu.org/PR8896) and incorrect structure
41 * initialisation in fs/jffs2/erase.c 42 * initialisation in fs/jffs2/erase.c
43 * GCC 4.8.0-4.8.2: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=58854
44 * miscompiles find_get_entry(), and can result in EXT3 and EXT4
45 * filesystem corruption (possibly other FS too).
42 */ 46 */
47#ifdef __GNUC__
43#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3) 48#if (__GNUC__ == 3 && __GNUC_MINOR__ < 3)
44#error Your compiler is too buggy; it is known to miscompile kernels. 49#error Your compiler is too buggy; it is known to miscompile kernels.
45#error Known good compilers: 3.3 50#error Known good compilers: 3.3, 4.x
51#endif
52#if GCC_VERSION >= 40800 && GCC_VERSION < 40803
53#error Your compiler is too buggy; it is known to miscompile kernels
54#error and result in filesystem corruption and oopses.
55#endif
46#endif 56#endif
47 57
48int main(void) 58int main(void)
diff --git a/arch/arm/kernel/calls.S b/arch/arm/kernel/calls.S
index 9f899d8fdcca..e51833f8cc38 100644
--- a/arch/arm/kernel/calls.S
+++ b/arch/arm/kernel/calls.S
@@ -395,6 +395,7 @@
395 CALL(sys_seccomp) 395 CALL(sys_seccomp)
396 CALL(sys_getrandom) 396 CALL(sys_getrandom)
397/* 385 */ CALL(sys_memfd_create) 397/* 385 */ CALL(sys_memfd_create)
398 CALL(sys_bpf)
398#ifndef syscalls_counted 399#ifndef syscalls_counted
399.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls 400.equ syscalls_padding, ((NR_syscalls + 3) & ~3) - NR_syscalls
400#define syscalls_counted 401#define syscalls_counted
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 0c8b10801d36..9f5d81881eb6 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -533,8 +533,6 @@ static int bad_syscall(int n, struct pt_regs *regs)
533 return regs->ARM_r0; 533 return regs->ARM_r0;
534} 534}
535 535
536static long do_cache_op_restart(struct restart_block *);
537
538static inline int 536static inline int
539__do_cache_op(unsigned long start, unsigned long end) 537__do_cache_op(unsigned long start, unsigned long end)
540{ 538{
@@ -543,24 +541,8 @@ __do_cache_op(unsigned long start, unsigned long end)
543 do { 541 do {
544 unsigned long chunk = min(PAGE_SIZE, end - start); 542 unsigned long chunk = min(PAGE_SIZE, end - start);
545 543
546 if (signal_pending(current)) { 544 if (fatal_signal_pending(current))
547 struct thread_info *ti = current_thread_info(); 545 return 0;
548
549 ti->restart_block = (struct restart_block) {
550 .fn = do_cache_op_restart,
551 };
552
553 ti->arm_restart_block = (struct arm_restart_block) {
554 {
555 .cache = {
556 .start = start,
557 .end = end,
558 },
559 },
560 };
561
562 return -ERESTART_RESTARTBLOCK;
563 }
564 546
565 ret = flush_cache_user_range(start, start + chunk); 547 ret = flush_cache_user_range(start, start + chunk);
566 if (ret) 548 if (ret)
@@ -573,15 +555,6 @@ __do_cache_op(unsigned long start, unsigned long end)
573 return 0; 555 return 0;
574} 556}
575 557
576static long do_cache_op_restart(struct restart_block *unused)
577{
578 struct arm_restart_block *restart_block;
579
580 restart_block = &current_thread_info()->arm_restart_block;
581 return __do_cache_op(restart_block->cache.start,
582 restart_block->cache.end);
583}
584
585static inline int 558static inline int
586do_cache_op(unsigned long start, unsigned long end, int flags) 559do_cache_op(unsigned long start, unsigned long end, int flags)
587{ 560{
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 57a403a5c22b..8664ff17cbbe 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -197,7 +197,8 @@ static void unmap_range(struct kvm *kvm, pgd_t *pgdp,
197 pgd = pgdp + pgd_index(addr); 197 pgd = pgdp + pgd_index(addr);
198 do { 198 do {
199 next = kvm_pgd_addr_end(addr, end); 199 next = kvm_pgd_addr_end(addr, end);
200 unmap_puds(kvm, pgd, addr, next); 200 if (!pgd_none(*pgd))
201 unmap_puds(kvm, pgd, addr, next);
201 } while (pgd++, addr = next, addr != end); 202 } while (pgd++, addr = next, addr != end);
202} 203}
203 204
@@ -834,6 +835,11 @@ static bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
834 return kvm_vcpu_dabt_iswrite(vcpu); 835 return kvm_vcpu_dabt_iswrite(vcpu);
835} 836}
836 837
838static bool kvm_is_device_pfn(unsigned long pfn)
839{
840 return !pfn_valid(pfn);
841}
842
837static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, 843static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
838 struct kvm_memory_slot *memslot, unsigned long hva, 844 struct kvm_memory_slot *memslot, unsigned long hva,
839 unsigned long fault_status) 845 unsigned long fault_status)
@@ -904,7 +910,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
904 if (is_error_pfn(pfn)) 910 if (is_error_pfn(pfn))
905 return -EFAULT; 911 return -EFAULT;
906 912
907 if (kvm_is_mmio_pfn(pfn)) 913 if (kvm_is_device_pfn(pfn))
908 mem_type = PAGE_S2_DEVICE; 914 mem_type = PAGE_S2_DEVICE;
909 915
910 spin_lock(&kvm->mmu_lock); 916 spin_lock(&kvm->mmu_lock);
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 8c35ae4ff176..07a09570175d 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -20,7 +20,7 @@
20#include <linux/input.h> 20#include <linux/input.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/irqchip.h> 22#include <linux/irqchip.h>
23#include <linux/mailbox.h> 23#include <linux/pl320-ipc.h>
24#include <linux/of.h> 24#include <linux/of.h>
25#include <linux/of_irq.h> 25#include <linux/of_irq.h>
26#include <linux/of_platform.h> 26#include <linux/of_platform.h>
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 1412daf4a714..4e79da7c5e30 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -50,8 +50,8 @@ static const char *pcie_axi_sels[] = { "axi", "ahb", };
50static const char *ssi_sels[] = { "pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_audio_div", }; 50static const char *ssi_sels[] = { "pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_audio_div", };
51static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", }; 51static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
52static const char *enfc_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", }; 52static const char *enfc_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", };
53static const char *emi_sels[] = { "pll2_pfd2_396m", "pll3_usb_otg", "axi", "pll2_pfd0_352m", }; 53static const char *eim_sels[] = { "pll2_pfd2_396m", "pll3_usb_otg", "axi", "pll2_pfd0_352m", };
54static const char *emi_slow_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", }; 54static const char *eim_slow_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", };
55static const char *vdo_axi_sels[] = { "axi", "ahb", }; 55static const char *vdo_axi_sels[] = { "axi", "ahb", };
56static const char *vpu_axi_sels[] = { "axi", "pll2_pfd2_396m", "pll2_pfd0_352m", }; 56static const char *vpu_axi_sels[] = { "axi", "pll2_pfd2_396m", "pll2_pfd0_352m", };
57static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video_div", 57static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video_div",
@@ -302,8 +302,8 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
302 clk[IMX6QDL_CLK_USDHC3_SEL] = imx_clk_fixup_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); 302 clk[IMX6QDL_CLK_USDHC3_SEL] = imx_clk_fixup_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup);
303 clk[IMX6QDL_CLK_USDHC4_SEL] = imx_clk_fixup_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup); 303 clk[IMX6QDL_CLK_USDHC4_SEL] = imx_clk_fixup_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels), imx_cscmr1_fixup);
304 clk[IMX6QDL_CLK_ENFC_SEL] = imx_clk_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels)); 304 clk[IMX6QDL_CLK_ENFC_SEL] = imx_clk_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels));
305 clk[IMX6QDL_CLK_EMI_SEL] = imx_clk_fixup_mux("emi_sel", base + 0x1c, 27, 2, emi_sels, ARRAY_SIZE(emi_sels), imx_cscmr1_fixup); 305 clk[IMX6QDL_CLK_EIM_SEL] = imx_clk_fixup_mux("eim_sel", base + 0x1c, 27, 2, eim_sels, ARRAY_SIZE(eim_sels), imx_cscmr1_fixup);
306 clk[IMX6QDL_CLK_EMI_SLOW_SEL] = imx_clk_fixup_mux("emi_slow_sel", base + 0x1c, 29, 2, emi_slow_sels, ARRAY_SIZE(emi_slow_sels), imx_cscmr1_fixup); 306 clk[IMX6QDL_CLK_EIM_SLOW_SEL] = imx_clk_fixup_mux("eim_slow_sel", base + 0x1c, 29, 2, eim_slow_sels, ARRAY_SIZE(eim_slow_sels), imx_cscmr1_fixup);
307 clk[IMX6QDL_CLK_VDO_AXI_SEL] = imx_clk_mux("vdo_axi_sel", base + 0x18, 11, 1, vdo_axi_sels, ARRAY_SIZE(vdo_axi_sels)); 307 clk[IMX6QDL_CLK_VDO_AXI_SEL] = imx_clk_mux("vdo_axi_sel", base + 0x18, 11, 1, vdo_axi_sels, ARRAY_SIZE(vdo_axi_sels));
308 clk[IMX6QDL_CLK_VPU_AXI_SEL] = imx_clk_mux("vpu_axi_sel", base + 0x18, 14, 2, vpu_axi_sels, ARRAY_SIZE(vpu_axi_sels)); 308 clk[IMX6QDL_CLK_VPU_AXI_SEL] = imx_clk_mux("vpu_axi_sel", base + 0x18, 14, 2, vpu_axi_sels, ARRAY_SIZE(vpu_axi_sels));
309 clk[IMX6QDL_CLK_CKO1_SEL] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels)); 309 clk[IMX6QDL_CLK_CKO1_SEL] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels));
@@ -354,8 +354,8 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
354 clk[IMX6QDL_CLK_USDHC4_PODF] = imx_clk_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3); 354 clk[IMX6QDL_CLK_USDHC4_PODF] = imx_clk_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3);
355 clk[IMX6QDL_CLK_ENFC_PRED] = imx_clk_divider("enfc_pred", "enfc_sel", base + 0x2c, 18, 3); 355 clk[IMX6QDL_CLK_ENFC_PRED] = imx_clk_divider("enfc_pred", "enfc_sel", base + 0x2c, 18, 3);
356 clk[IMX6QDL_CLK_ENFC_PODF] = imx_clk_divider("enfc_podf", "enfc_pred", base + 0x2c, 21, 6); 356 clk[IMX6QDL_CLK_ENFC_PODF] = imx_clk_divider("enfc_podf", "enfc_pred", base + 0x2c, 21, 6);
357 clk[IMX6QDL_CLK_EMI_PODF] = imx_clk_fixup_divider("emi_podf", "emi_sel", base + 0x1c, 20, 3, imx_cscmr1_fixup); 357 clk[IMX6QDL_CLK_EIM_PODF] = imx_clk_fixup_divider("eim_podf", "eim_sel", base + 0x1c, 20, 3, imx_cscmr1_fixup);
358 clk[IMX6QDL_CLK_EMI_SLOW_PODF] = imx_clk_fixup_divider("emi_slow_podf", "emi_slow_sel", base + 0x1c, 23, 3, imx_cscmr1_fixup); 358 clk[IMX6QDL_CLK_EIM_SLOW_PODF] = imx_clk_fixup_divider("eim_slow_podf", "eim_slow_sel", base + 0x1c, 23, 3, imx_cscmr1_fixup);
359 clk[IMX6QDL_CLK_VPU_AXI_PODF] = imx_clk_divider("vpu_axi_podf", "vpu_axi_sel", base + 0x24, 25, 3); 359 clk[IMX6QDL_CLK_VPU_AXI_PODF] = imx_clk_divider("vpu_axi_podf", "vpu_axi_sel", base + 0x24, 25, 3);
360 clk[IMX6QDL_CLK_CKO1_PODF] = imx_clk_divider("cko1_podf", "cko1_sel", base + 0x60, 4, 3); 360 clk[IMX6QDL_CLK_CKO1_PODF] = imx_clk_divider("cko1_podf", "cko1_sel", base + 0x60, 4, 3);
361 clk[IMX6QDL_CLK_CKO2_PODF] = imx_clk_divider("cko2_podf", "cko2_sel", base + 0x60, 21, 3); 361 clk[IMX6QDL_CLK_CKO2_PODF] = imx_clk_divider("cko2_podf", "cko2_sel", base + 0x60, 21, 3);
@@ -456,7 +456,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
456 clk[IMX6QDL_CLK_USDHC2] = imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4); 456 clk[IMX6QDL_CLK_USDHC2] = imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4);
457 clk[IMX6QDL_CLK_USDHC3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6); 457 clk[IMX6QDL_CLK_USDHC3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6);
458 clk[IMX6QDL_CLK_USDHC4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8); 458 clk[IMX6QDL_CLK_USDHC4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8);
459 clk[IMX6QDL_CLK_EIM_SLOW] = imx_clk_gate2("eim_slow", "emi_slow_podf", base + 0x80, 10); 459 clk[IMX6QDL_CLK_EIM_SLOW] = imx_clk_gate2("eim_slow", "eim_slow_podf", base + 0x80, 10);
460 clk[IMX6QDL_CLK_VDO_AXI] = imx_clk_gate2("vdo_axi", "vdo_axi_sel", base + 0x80, 12); 460 clk[IMX6QDL_CLK_VDO_AXI] = imx_clk_gate2("vdo_axi", "vdo_axi_sel", base + 0x80, 12);
461 clk[IMX6QDL_CLK_VPU_AXI] = imx_clk_gate2("vpu_axi", "vpu_axi_podf", base + 0x80, 14); 461 clk[IMX6QDL_CLK_VPU_AXI] = imx_clk_gate2("vpu_axi", "vpu_axi_podf", base + 0x80, 14);
462 clk[IMX6QDL_CLK_CKO1] = imx_clk_gate("cko1", "cko1_podf", base + 0x60, 7); 462 clk[IMX6QDL_CLK_CKO1] = imx_clk_gate("cko1", "cko1_podf", base + 0x60, 7);
diff --git a/arch/arm/mach-imx/clk-vf610.c b/arch/arm/mach-imx/clk-vf610.c
index a17818475050..409637254594 100644
--- a/arch/arm/mach-imx/clk-vf610.c
+++ b/arch/arm/mach-imx/clk-vf610.c
@@ -58,8 +58,14 @@
58#define PFD_PLL1_BASE (anatop_base + 0x2b0) 58#define PFD_PLL1_BASE (anatop_base + 0x2b0)
59#define PFD_PLL2_BASE (anatop_base + 0x100) 59#define PFD_PLL2_BASE (anatop_base + 0x100)
60#define PFD_PLL3_BASE (anatop_base + 0xf0) 60#define PFD_PLL3_BASE (anatop_base + 0xf0)
61#define PLL1_CTRL (anatop_base + 0x270)
62#define PLL2_CTRL (anatop_base + 0x30)
61#define PLL3_CTRL (anatop_base + 0x10) 63#define PLL3_CTRL (anatop_base + 0x10)
64#define PLL4_CTRL (anatop_base + 0x70)
65#define PLL5_CTRL (anatop_base + 0xe0)
66#define PLL6_CTRL (anatop_base + 0xa0)
62#define PLL7_CTRL (anatop_base + 0x20) 67#define PLL7_CTRL (anatop_base + 0x20)
68#define ANA_MISC1 (anatop_base + 0x160)
63 69
64static void __iomem *anatop_base; 70static void __iomem *anatop_base;
65static void __iomem *ccm_base; 71static void __iomem *ccm_base;
@@ -67,25 +73,34 @@ static void __iomem *ccm_base;
67/* sources for multiplexer clocks, this is used multiple times */ 73/* sources for multiplexer clocks, this is used multiple times */
68static const char *fast_sels[] = { "firc", "fxosc", }; 74static const char *fast_sels[] = { "firc", "fxosc", };
69static const char *slow_sels[] = { "sirc_32k", "sxosc", }; 75static const char *slow_sels[] = { "sirc_32k", "sxosc", };
70static const char *pll1_sels[] = { "pll1_main", "pll1_pfd1", "pll1_pfd2", "pll1_pfd3", "pll1_pfd4", }; 76static const char *pll1_sels[] = { "pll1_sys", "pll1_pfd1", "pll1_pfd2", "pll1_pfd3", "pll1_pfd4", };
71static const char *pll2_sels[] = { "pll2_main", "pll2_pfd1", "pll2_pfd2", "pll2_pfd3", "pll2_pfd4", }; 77static const char *pll2_sels[] = { "pll2_bus", "pll2_pfd1", "pll2_pfd2", "pll2_pfd3", "pll2_pfd4", };
72static const char *sys_sels[] = { "fast_clk_sel", "slow_clk_sel", "pll2_pfd_sel", "pll2_main", "pll1_pfd_sel", "pll3_main", }; 78static const char *pll_bypass_src_sels[] = { "fast_clk_sel", "lvds1_in", };
79static const char *pll1_bypass_sels[] = { "pll1", "pll1_bypass_src", };
80static const char *pll2_bypass_sels[] = { "pll2", "pll2_bypass_src", };
81static const char *pll3_bypass_sels[] = { "pll3", "pll3_bypass_src", };
82static const char *pll4_bypass_sels[] = { "pll4", "pll4_bypass_src", };
83static const char *pll5_bypass_sels[] = { "pll5", "pll5_bypass_src", };
84static const char *pll6_bypass_sels[] = { "pll6", "pll6_bypass_src", };
85static const char *pll7_bypass_sels[] = { "pll7", "pll7_bypass_src", };
86static const char *sys_sels[] = { "fast_clk_sel", "slow_clk_sel", "pll2_pfd_sel", "pll2_bus", "pll1_pfd_sel", "pll3_usb_otg", };
73static const char *ddr_sels[] = { "pll2_pfd2", "sys_sel", }; 87static const char *ddr_sels[] = { "pll2_pfd2", "sys_sel", };
74static const char *rmii_sels[] = { "enet_ext", "audio_ext", "enet_50m", "enet_25m", }; 88static const char *rmii_sels[] = { "enet_ext", "audio_ext", "enet_50m", "enet_25m", };
75static const char *enet_ts_sels[] = { "enet_ext", "fxosc", "audio_ext", "usb", "enet_ts", "enet_25m", "enet_50m", }; 89static const char *enet_ts_sels[] = { "enet_ext", "fxosc", "audio_ext", "usb", "enet_ts", "enet_25m", "enet_50m", };
76static const char *esai_sels[] = { "audio_ext", "mlb", "spdif_rx", "pll4_main_div", }; 90static const char *esai_sels[] = { "audio_ext", "mlb", "spdif_rx", "pll4_audio_div", };
77static const char *sai_sels[] = { "audio_ext", "mlb", "spdif_rx", "pll4_main_div", }; 91static const char *sai_sels[] = { "audio_ext", "mlb", "spdif_rx", "pll4_audio_div", };
78static const char *nfc_sels[] = { "platform_bus", "pll1_pfd1", "pll3_pfd1", "pll3_pfd3", }; 92static const char *nfc_sels[] = { "platform_bus", "pll1_pfd1", "pll3_pfd1", "pll3_pfd3", };
79static const char *qspi_sels[] = { "pll3_main", "pll3_pfd4", "pll2_pfd4", "pll1_pfd4", }; 93static const char *qspi_sels[] = { "pll3_usb_otg", "pll3_pfd4", "pll2_pfd4", "pll1_pfd4", };
80static const char *esdhc_sels[] = { "pll3_main", "pll3_pfd3", "pll1_pfd3", "platform_bus", }; 94static const char *esdhc_sels[] = { "pll3_usb_otg", "pll3_pfd3", "pll1_pfd3", "platform_bus", };
81static const char *dcu_sels[] = { "pll1_pfd2", "pll3_main", }; 95static const char *dcu_sels[] = { "pll1_pfd2", "pll3_usb_otg", };
82static const char *gpu_sels[] = { "pll2_pfd2", "pll3_pfd2", }; 96static const char *gpu_sels[] = { "pll2_pfd2", "pll3_pfd2", };
83static const char *vadc_sels[] = { "pll6_main_div", "pll3_main_div", "pll3_main", }; 97static const char *vadc_sels[] = { "pll6_video_div", "pll3_usb_otg_div", "pll3_usb_otg", };
84/* FTM counter clock source, not module clock */ 98/* FTM counter clock source, not module clock */
85static const char *ftm_ext_sels[] = {"sirc_128k", "sxosc", "fxosc_half", "audio_ext", }; 99static const char *ftm_ext_sels[] = {"sirc_128k", "sxosc", "fxosc_half", "audio_ext", };
86static const char *ftm_fix_sels[] = { "sxosc", "ipg_bus", }; 100static const char *ftm_fix_sels[] = { "sxosc", "ipg_bus", };
87 101
88static struct clk_div_table pll4_main_div_table[] = { 102
103static struct clk_div_table pll4_audio_div_table[] = {
89 { .val = 0, .div = 1 }, 104 { .val = 0, .div = 1 },
90 { .val = 1, .div = 2 }, 105 { .val = 1, .div = 2 },
91 { .val = 2, .div = 6 }, 106 { .val = 2, .div = 6 },
@@ -120,6 +135,9 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
120 clk[VF610_CLK_AUDIO_EXT] = imx_obtain_fixed_clock("audio_ext", 0); 135 clk[VF610_CLK_AUDIO_EXT] = imx_obtain_fixed_clock("audio_ext", 0);
121 clk[VF610_CLK_ENET_EXT] = imx_obtain_fixed_clock("enet_ext", 0); 136 clk[VF610_CLK_ENET_EXT] = imx_obtain_fixed_clock("enet_ext", 0);
122 137
138 /* Clock source from external clock via LVDs PAD */
139 clk[VF610_CLK_ANACLK1] = imx_obtain_fixed_clock("anaclk1", 0);
140
123 clk[VF610_CLK_FXOSC_HALF] = imx_clk_fixed_factor("fxosc_half", "fxosc", 1, 2); 141 clk[VF610_CLK_FXOSC_HALF] = imx_clk_fixed_factor("fxosc_half", "fxosc", 1, 2);
124 142
125 np = of_find_compatible_node(NULL, NULL, "fsl,vf610-anatop"); 143 np = of_find_compatible_node(NULL, NULL, "fsl,vf610-anatop");
@@ -133,31 +151,63 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
133 clk[VF610_CLK_SLOW_CLK_SEL] = imx_clk_mux("slow_clk_sel", CCM_CCSR, 4, 1, slow_sels, ARRAY_SIZE(slow_sels)); 151 clk[VF610_CLK_SLOW_CLK_SEL] = imx_clk_mux("slow_clk_sel", CCM_CCSR, 4, 1, slow_sels, ARRAY_SIZE(slow_sels));
134 clk[VF610_CLK_FASK_CLK_SEL] = imx_clk_mux("fast_clk_sel", CCM_CCSR, 5, 1, fast_sels, ARRAY_SIZE(fast_sels)); 152 clk[VF610_CLK_FASK_CLK_SEL] = imx_clk_mux("fast_clk_sel", CCM_CCSR, 5, 1, fast_sels, ARRAY_SIZE(fast_sels));
135 153
136 clk[VF610_CLK_PLL1_MAIN] = imx_clk_fixed_factor("pll1_main", "fast_clk_sel", 22, 1); 154 clk[VF610_CLK_PLL1_BYPASS_SRC] = imx_clk_mux("pll1_bypass_src", PLL1_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
137 clk[VF610_CLK_PLL1_PFD1] = imx_clk_pfd("pll1_pfd1", "pll1_main", PFD_PLL1_BASE, 0); 155 clk[VF610_CLK_PLL2_BYPASS_SRC] = imx_clk_mux("pll2_bypass_src", PLL2_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
138 clk[VF610_CLK_PLL1_PFD2] = imx_clk_pfd("pll1_pfd2", "pll1_main", PFD_PLL1_BASE, 1); 156 clk[VF610_CLK_PLL3_BYPASS_SRC] = imx_clk_mux("pll3_bypass_src", PLL3_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
139 clk[VF610_CLK_PLL1_PFD3] = imx_clk_pfd("pll1_pfd3", "pll1_main", PFD_PLL1_BASE, 2); 157 clk[VF610_CLK_PLL4_BYPASS_SRC] = imx_clk_mux("pll4_bypass_src", PLL4_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
140 clk[VF610_CLK_PLL1_PFD4] = imx_clk_pfd("pll1_pfd4", "pll1_main", PFD_PLL1_BASE, 3); 158 clk[VF610_CLK_PLL5_BYPASS_SRC] = imx_clk_mux("pll5_bypass_src", PLL5_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
141 159 clk[VF610_CLK_PLL6_BYPASS_SRC] = imx_clk_mux("pll6_bypass_src", PLL6_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
142 clk[VF610_CLK_PLL2_MAIN] = imx_clk_fixed_factor("pll2_main", "fast_clk_sel", 22, 1); 160 clk[VF610_CLK_PLL7_BYPASS_SRC] = imx_clk_mux("pll7_bypass_src", PLL7_CTRL, 14, 1, pll_bypass_src_sels, ARRAY_SIZE(pll_bypass_src_sels));
143 clk[VF610_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1", "pll2_main", PFD_PLL2_BASE, 0); 161
144 clk[VF610_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2", "pll2_main", PFD_PLL2_BASE, 1); 162 clk[VF610_CLK_PLL1] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll1", "pll1_bypass_src", PLL1_CTRL, 0x1);
145 clk[VF610_CLK_PLL2_PFD3] = imx_clk_pfd("pll2_pfd3", "pll2_main", PFD_PLL2_BASE, 2); 163 clk[VF610_CLK_PLL2] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2", "pll2_bypass_src", PLL2_CTRL, 0x1);
146 clk[VF610_CLK_PLL2_PFD4] = imx_clk_pfd("pll2_pfd4", "pll2_main", PFD_PLL2_BASE, 3); 164 clk[VF610_CLK_PLL3] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3", "pll3_bypass_src", PLL3_CTRL, 0x1);
147 165 clk[VF610_CLK_PLL4] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4", "pll4_bypass_src", PLL4_CTRL, 0x7f);
148 clk[VF610_CLK_PLL3_MAIN] = imx_clk_fixed_factor("pll3_main", "fast_clk_sel", 20, 1); 166 clk[VF610_CLK_PLL5] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll5", "pll5_bypass_src", PLL5_CTRL, 0x3);
149 clk[VF610_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1", "pll3_main", PFD_PLL3_BASE, 0); 167 clk[VF610_CLK_PLL6] = imx_clk_pllv3(IMX_PLLV3_AV, "pll6", "pll6_bypass_src", PLL6_CTRL, 0x7f);
150 clk[VF610_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2", "pll3_main", PFD_PLL3_BASE, 1); 168 clk[VF610_CLK_PLL7] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7", "pll7_bypass_src", PLL7_CTRL, 0x1);
151 clk[VF610_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3", "pll3_main", PFD_PLL3_BASE, 2); 169
152 clk[VF610_CLK_PLL3_PFD4] = imx_clk_pfd("pll3_pfd4", "pll3_main", PFD_PLL3_BASE, 3); 170 clk[VF610_PLL1_BYPASS] = imx_clk_mux_flags("pll1_bypass", PLL1_CTRL, 16, 1, pll1_bypass_sels, ARRAY_SIZE(pll1_bypass_sels), CLK_SET_RATE_PARENT);
153 171 clk[VF610_PLL2_BYPASS] = imx_clk_mux_flags("pll2_bypass", PLL2_CTRL, 16, 1, pll2_bypass_sels, ARRAY_SIZE(pll2_bypass_sels), CLK_SET_RATE_PARENT);
154 clk[VF610_CLK_PLL4_MAIN] = imx_clk_fixed_factor("pll4_main", "fast_clk_sel", 25, 1); 172 clk[VF610_PLL3_BYPASS] = imx_clk_mux_flags("pll3_bypass", PLL3_CTRL, 16, 1, pll3_bypass_sels, ARRAY_SIZE(pll3_bypass_sels), CLK_SET_RATE_PARENT);
155 /* Enet pll: fixed 50Mhz */ 173 clk[VF610_PLL4_BYPASS] = imx_clk_mux_flags("pll4_bypass", PLL4_CTRL, 16, 1, pll4_bypass_sels, ARRAY_SIZE(pll4_bypass_sels), CLK_SET_RATE_PARENT);
156 clk[VF610_CLK_PLL5_MAIN] = imx_clk_fixed_factor("pll5_main", "fast_clk_sel", 125, 6); 174 clk[VF610_PLL5_BYPASS] = imx_clk_mux_flags("pll5_bypass", PLL5_CTRL, 16, 1, pll5_bypass_sels, ARRAY_SIZE(pll5_bypass_sels), CLK_SET_RATE_PARENT);
157 /* pll6: default 960Mhz */ 175 clk[VF610_PLL6_BYPASS] = imx_clk_mux_flags("pll6_bypass", PLL6_CTRL, 16, 1, pll6_bypass_sels, ARRAY_SIZE(pll6_bypass_sels), CLK_SET_RATE_PARENT);
158 clk[VF610_CLK_PLL6_MAIN] = imx_clk_fixed_factor("pll6_main", "fast_clk_sel", 40, 1); 176 clk[VF610_PLL7_BYPASS] = imx_clk_mux_flags("pll7_bypass", PLL7_CTRL, 16, 1, pll7_bypass_sels, ARRAY_SIZE(pll7_bypass_sels), CLK_SET_RATE_PARENT);
159 /* pll7: USB1 PLL at 480MHz */ 177
160 clk[VF610_CLK_PLL7_MAIN] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_main", "fast_clk_sel", PLL7_CTRL, 0x2); 178 /* Do not bypass PLLs initially */
179 clk_set_parent(clk[VF610_PLL1_BYPASS], clk[VF610_CLK_PLL1]);
180 clk_set_parent(clk[VF610_PLL2_BYPASS], clk[VF610_CLK_PLL2]);
181 clk_set_parent(clk[VF610_PLL3_BYPASS], clk[VF610_CLK_PLL3]);
182 clk_set_parent(clk[VF610_PLL4_BYPASS], clk[VF610_CLK_PLL4]);
183 clk_set_parent(clk[VF610_PLL5_BYPASS], clk[VF610_CLK_PLL5]);
184 clk_set_parent(clk[VF610_PLL6_BYPASS], clk[VF610_CLK_PLL6]);
185 clk_set_parent(clk[VF610_PLL7_BYPASS], clk[VF610_CLK_PLL7]);
186
187 clk[VF610_CLK_PLL1_SYS] = imx_clk_gate("pll1_sys", "pll1_bypass", PLL1_CTRL, 13);
188 clk[VF610_CLK_PLL2_BUS] = imx_clk_gate("pll2_bus", "pll2_bypass", PLL2_CTRL, 13);
189 clk[VF610_CLK_PLL3_USB_OTG] = imx_clk_gate("pll3_usb_otg", "pll3_bypass", PLL3_CTRL, 13);
190 clk[VF610_CLK_PLL4_AUDIO] = imx_clk_gate("pll4_audio", "pll4_bypass", PLL4_CTRL, 13);
191 clk[VF610_CLK_PLL5_ENET] = imx_clk_gate("pll5_enet", "pll5_bypass", PLL5_CTRL, 13);
192 clk[VF610_CLK_PLL6_VIDEO] = imx_clk_gate("pll6_video", "pll6_bypass", PLL6_CTRL, 13);
193 clk[VF610_CLK_PLL7_USB_HOST] = imx_clk_gate("pll7_usb_host", "pll7_bypass", PLL7_CTRL, 13);
194
195 clk[VF610_CLK_LVDS1_IN] = imx_clk_gate_exclusive("lvds1_in", "anaclk1", ANA_MISC1, 12, BIT(10));
196
197 clk[VF610_CLK_PLL1_PFD1] = imx_clk_pfd("pll1_pfd1", "pll1_sys", PFD_PLL1_BASE, 0);
198 clk[VF610_CLK_PLL1_PFD2] = imx_clk_pfd("pll1_pfd2", "pll1_sys", PFD_PLL1_BASE, 1);
199 clk[VF610_CLK_PLL1_PFD3] = imx_clk_pfd("pll1_pfd3", "pll1_sys", PFD_PLL1_BASE, 2);
200 clk[VF610_CLK_PLL1_PFD4] = imx_clk_pfd("pll1_pfd4", "pll1_sys", PFD_PLL1_BASE, 3);
201
202 clk[VF610_CLK_PLL2_PFD1] = imx_clk_pfd("pll2_pfd1", "pll2_bus", PFD_PLL2_BASE, 0);
203 clk[VF610_CLK_PLL2_PFD2] = imx_clk_pfd("pll2_pfd2", "pll2_bus", PFD_PLL2_BASE, 1);
204 clk[VF610_CLK_PLL2_PFD3] = imx_clk_pfd("pll2_pfd3", "pll2_bus", PFD_PLL2_BASE, 2);
205 clk[VF610_CLK_PLL2_PFD4] = imx_clk_pfd("pll2_pfd4", "pll2_bus", PFD_PLL2_BASE, 3);
206
207 clk[VF610_CLK_PLL3_PFD1] = imx_clk_pfd("pll3_pfd1", "pll3_usb_otg", PFD_PLL3_BASE, 0);
208 clk[VF610_CLK_PLL3_PFD2] = imx_clk_pfd("pll3_pfd2", "pll3_usb_otg", PFD_PLL3_BASE, 1);
209 clk[VF610_CLK_PLL3_PFD3] = imx_clk_pfd("pll3_pfd3", "pll3_usb_otg", PFD_PLL3_BASE, 2);
210 clk[VF610_CLK_PLL3_PFD4] = imx_clk_pfd("pll3_pfd4", "pll3_usb_otg", PFD_PLL3_BASE, 3);
161 211
162 clk[VF610_CLK_PLL1_PFD_SEL] = imx_clk_mux("pll1_pfd_sel", CCM_CCSR, 16, 3, pll1_sels, 5); 212 clk[VF610_CLK_PLL1_PFD_SEL] = imx_clk_mux("pll1_pfd_sel", CCM_CCSR, 16, 3, pll1_sels, 5);
163 clk[VF610_CLK_PLL2_PFD_SEL] = imx_clk_mux("pll2_pfd_sel", CCM_CCSR, 19, 3, pll2_sels, 5); 213 clk[VF610_CLK_PLL2_PFD_SEL] = imx_clk_mux("pll2_pfd_sel", CCM_CCSR, 19, 3, pll2_sels, 5);
@@ -167,12 +217,12 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
167 clk[VF610_CLK_PLATFORM_BUS] = imx_clk_divider("platform_bus", "sys_bus", CCM_CACRR, 3, 3); 217 clk[VF610_CLK_PLATFORM_BUS] = imx_clk_divider("platform_bus", "sys_bus", CCM_CACRR, 3, 3);
168 clk[VF610_CLK_IPG_BUS] = imx_clk_divider("ipg_bus", "platform_bus", CCM_CACRR, 11, 2); 218 clk[VF610_CLK_IPG_BUS] = imx_clk_divider("ipg_bus", "platform_bus", CCM_CACRR, 11, 2);
169 219
170 clk[VF610_CLK_PLL3_MAIN_DIV] = imx_clk_divider("pll3_main_div", "pll3_main", CCM_CACRR, 20, 1); 220 clk[VF610_CLK_PLL3_MAIN_DIV] = imx_clk_divider("pll3_usb_otg_div", "pll3_usb_otg", CCM_CACRR, 20, 1);
171 clk[VF610_CLK_PLL4_MAIN_DIV] = clk_register_divider_table(NULL, "pll4_main_div", "pll4_main", 0, CCM_CACRR, 6, 3, 0, pll4_main_div_table, &imx_ccm_lock); 221 clk[VF610_CLK_PLL4_MAIN_DIV] = clk_register_divider_table(NULL, "pll4_audio_div", "pll4_audio", 0, CCM_CACRR, 6, 3, 0, pll4_audio_div_table, &imx_ccm_lock);
172 clk[VF610_CLK_PLL6_MAIN_DIV] = imx_clk_divider("pll6_main_div", "pll6_main", CCM_CACRR, 21, 1); 222 clk[VF610_CLK_PLL6_MAIN_DIV] = imx_clk_divider("pll6_video_div", "pll6_video", CCM_CACRR, 21, 1);
173 223
174 clk[VF610_CLK_USBPHY0] = imx_clk_gate("usbphy0", "pll3_main", PLL3_CTRL, 6); 224 clk[VF610_CLK_USBPHY0] = imx_clk_gate("usbphy0", "pll3_usb_otg", PLL3_CTRL, 6);
175 clk[VF610_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll7_main", PLL7_CTRL, 6); 225 clk[VF610_CLK_USBPHY1] = imx_clk_gate("usbphy1", "pll7_usb_host", PLL7_CTRL, 6);
176 226
177 clk[VF610_CLK_USBC0] = imx_clk_gate2("usbc0", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(4)); 227 clk[VF610_CLK_USBC0] = imx_clk_gate2("usbc0", "ipg_bus", CCM_CCGR1, CCM_CCGRx_CGn(4));
178 clk[VF610_CLK_USBC1] = imx_clk_gate2("usbc1", "ipg_bus", CCM_CCGR7, CCM_CCGRx_CGn(4)); 228 clk[VF610_CLK_USBC1] = imx_clk_gate2("usbc1", "ipg_bus", CCM_CCGR7, CCM_CCGRx_CGn(4));
@@ -191,8 +241,8 @@ static void __init vf610_clocks_init(struct device_node *ccm_node)
191 clk[VF610_CLK_QSPI1_X1_DIV] = imx_clk_divider("qspi1_x1", "qspi1_x2", CCM_CSCDR3, 11, 1); 241 clk[VF610_CLK_QSPI1_X1_DIV] = imx_clk_divider("qspi1_x1", "qspi1_x2", CCM_CSCDR3, 11, 1);
192 clk[VF610_CLK_QSPI1] = imx_clk_gate2("qspi1", "qspi1_x1", CCM_CCGR8, CCM_CCGRx_CGn(4)); 242 clk[VF610_CLK_QSPI1] = imx_clk_gate2("qspi1", "qspi1_x1", CCM_CCGR8, CCM_CCGRx_CGn(4));
193 243
194 clk[VF610_CLK_ENET_50M] = imx_clk_fixed_factor("enet_50m", "pll5_main", 1, 10); 244 clk[VF610_CLK_ENET_50M] = imx_clk_fixed_factor("enet_50m", "pll5_enet", 1, 10);
195 clk[VF610_CLK_ENET_25M] = imx_clk_fixed_factor("enet_25m", "pll5_main", 1, 20); 245 clk[VF610_CLK_ENET_25M] = imx_clk_fixed_factor("enet_25m", "pll5_enet", 1, 20);
196 clk[VF610_CLK_ENET_SEL] = imx_clk_mux("enet_sel", CCM_CSCMR2, 4, 2, rmii_sels, 4); 246 clk[VF610_CLK_ENET_SEL] = imx_clk_mux("enet_sel", CCM_CSCMR2, 4, 2, rmii_sels, 4);
197 clk[VF610_CLK_ENET_TS_SEL] = imx_clk_mux("enet_ts_sel", CCM_CSCMR2, 0, 3, enet_ts_sels, 7); 247 clk[VF610_CLK_ENET_TS_SEL] = imx_clk_mux("enet_ts_sel", CCM_CSCMR2, 0, 3, enet_ts_sels, 7);
198 clk[VF610_CLK_ENET] = imx_clk_gate("enet", "enet_sel", CCM_CSCDR1, 24); 248 clk[VF610_CLK_ENET] = imx_clk_gate("enet", "enet_sel", CCM_CSCDR1, 24);
diff --git a/arch/arm/mach-ixp4xx/include/mach/io.h b/arch/arm/mach-ixp4xx/include/mach/io.h
index 559c69a47731..7d11979da030 100644
--- a/arch/arm/mach-ixp4xx/include/mach/io.h
+++ b/arch/arm/mach-ixp4xx/include/mach/io.h
@@ -76,7 +76,7 @@ static inline void __indirect_writeb(u8 value, volatile void __iomem *p)
76 u32 n, byte_enables, data; 76 u32 n, byte_enables, data;
77 77
78 if (!is_pci_memory(addr)) { 78 if (!is_pci_memory(addr)) {
79 __raw_writeb(value, addr); 79 __raw_writeb(value, p);
80 return; 80 return;
81 } 81 }
82 82
@@ -141,7 +141,7 @@ static inline unsigned char __indirect_readb(const volatile void __iomem *p)
141 u32 n, byte_enables, data; 141 u32 n, byte_enables, data;
142 142
143 if (!is_pci_memory(addr)) 143 if (!is_pci_memory(addr))
144 return __raw_readb(addr); 144 return __raw_readb(p);
145 145
146 n = addr % 4; 146 n = addr % 4;
147 byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL; 147 byte_enables = (0xf & ~BIT(n)) << IXP4XX_PCI_NP_CBE_BESL;
diff --git a/arch/arm/mach-mvebu/board-v7.c b/arch/arm/mach-mvebu/board-v7.c
index 6478626e3ff6..d0d39f150fab 100644
--- a/arch/arm/mach-mvebu/board-v7.c
+++ b/arch/arm/mach-mvebu/board-v7.c
@@ -188,7 +188,7 @@ static void __init thermal_quirk(void)
188 188
189static void __init mvebu_dt_init(void) 189static void __init mvebu_dt_init(void)
190{ 190{
191 if (of_machine_is_compatible("plathome,openblocks-ax3-4")) 191 if (of_machine_is_compatible("marvell,armadaxp"))
192 i2c_quirk(); 192 i2c_quirk();
193 if (of_machine_is_compatible("marvell,a375-db")) { 193 if (of_machine_is_compatible("marvell,a375-db")) {
194 external_abort_quirk(); 194 external_abort_quirk();
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 2bdc3233abe2..044b51185fcc 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -400,6 +400,8 @@ int __init coherency_init(void)
400 type == COHERENCY_FABRIC_TYPE_ARMADA_380) 400 type == COHERENCY_FABRIC_TYPE_ARMADA_380)
401 armada_375_380_coherency_init(np); 401 armada_375_380_coherency_init(np);
402 402
403 of_node_put(np);
404
403 return 0; 405 return 0;
404} 406}
405 407
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index d22c30d3ccfa..8c58b71c2727 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -917,6 +917,10 @@ static int __init omap_device_late_idle(struct device *dev, void *data)
917static int __init omap_device_late_init(void) 917static int __init omap_device_late_init(void)
918{ 918{
919 bus_for_each_dev(&platform_bus_type, NULL, NULL, omap_device_late_idle); 919 bus_for_each_dev(&platform_bus_type, NULL, NULL, omap_device_late_idle);
920
921 WARN(!of_have_populated_dt(),
922 "legacy booting deprecated, please update to boot with .dts\n");
923
920 return 0; 924 return 0;
921} 925}
922omap_late_initcall_sync(omap_device_late_init); 926omap_late_initcall_sync(omap_device_late_init);
diff --git a/arch/arm/mach-omap2/pdata-quirks.c b/arch/arm/mach-omap2/pdata-quirks.c
index c95346c94829..cec9d6c6442c 100644
--- a/arch/arm/mach-omap2/pdata-quirks.c
+++ b/arch/arm/mach-omap2/pdata-quirks.c
@@ -252,9 +252,6 @@ static void __init nokia_n900_legacy_init(void)
252 platform_device_register(&omap3_rom_rng_device); 252 platform_device_register(&omap3_rom_rng_device);
253 253
254 } 254 }
255
256 /* Only on some development boards */
257 gpio_request_one(164, GPIOF_OUT_INIT_LOW, "smc91x reset");
258} 255}
259 256
260static void __init omap3_tao3530_legacy_init(void) 257static void __init omap3_tao3530_legacy_init(void)
diff --git a/arch/arm/mach-pxa/include/mach/addr-map.h b/arch/arm/mach-pxa/include/mach/addr-map.h
index bbf9df37ad4b..d28fe291233a 100644
--- a/arch/arm/mach-pxa/include/mach/addr-map.h
+++ b/arch/arm/mach-pxa/include/mach/addr-map.h
@@ -39,6 +39,11 @@
39#define DMEMC_SIZE 0x00100000 39#define DMEMC_SIZE 0x00100000
40 40
41/* 41/*
42 * Reserved space for low level debug virtual addresses within
43 * 0xf6200000..0xf6201000
44 */
45
46/*
42 * Internal Memory Controller (PXA27x and later) 47 * Internal Memory Controller (PXA27x and later)
43 */ 48 */
44#define IMEMC_PHYS 0x58000000 49#define IMEMC_PHYS 0x58000000
diff --git a/arch/arm/mach-shmobile/clock-r8a7740.c b/arch/arm/mach-shmobile/clock-r8a7740.c
index 0794f0426e70..19df9cb30495 100644
--- a/arch/arm/mach-shmobile/clock-r8a7740.c
+++ b/arch/arm/mach-shmobile/clock-r8a7740.c
@@ -455,7 +455,7 @@ enum {
455 MSTP128, MSTP127, MSTP125, 455 MSTP128, MSTP127, MSTP125,
456 MSTP116, MSTP111, MSTP100, MSTP117, 456 MSTP116, MSTP111, MSTP100, MSTP117,
457 457
458 MSTP230, 458 MSTP230, MSTP229,
459 MSTP222, 459 MSTP222,
460 MSTP218, MSTP217, MSTP216, MSTP214, 460 MSTP218, MSTP217, MSTP216, MSTP214,
461 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200, 461 MSTP207, MSTP206, MSTP204, MSTP203, MSTP202, MSTP201, MSTP200,
@@ -474,11 +474,12 @@ static struct clk mstp_clks[MSTP_NR] = {
474 [MSTP127] = SH_CLK_MSTP32(&div4_clks[DIV4_S], SMSTPCR1, 27, 0), /* CEU20 */ 474 [MSTP127] = SH_CLK_MSTP32(&div4_clks[DIV4_S], SMSTPCR1, 27, 0), /* CEU20 */
475 [MSTP125] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */ 475 [MSTP125] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 25, 0), /* TMU0 */
476 [MSTP117] = SH_CLK_MSTP32(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */ 476 [MSTP117] = SH_CLK_MSTP32(&div4_clks[DIV4_B], SMSTPCR1, 17, 0), /* LCDC1 */
477 [MSTP116] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 16, 0), /* IIC0 */ 477 [MSTP116] = SH_CLK_MSTP32(&div4_clks[DIV4_HPP], SMSTPCR1, 16, 0), /* IIC0 */
478 [MSTP111] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 11, 0), /* TMU1 */ 478 [MSTP111] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR1, 11, 0), /* TMU1 */
479 [MSTP100] = SH_CLK_MSTP32(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */ 479 [MSTP100] = SH_CLK_MSTP32(&div4_clks[DIV4_B], SMSTPCR1, 0, 0), /* LCDC0 */
480 480
481 [MSTP230] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 30, 0), /* SCIFA6 */ 481 [MSTP230] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 30, 0), /* SCIFA6 */
482 [MSTP229] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 29, 0), /* INTCA */
482 [MSTP222] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 22, 0), /* SCIFA7 */ 483 [MSTP222] = SH_CLK_MSTP32(&div6_clks[DIV6_SUB], SMSTPCR2, 22, 0), /* SCIFA7 */
483 [MSTP218] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */ 484 [MSTP218] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 18, 0), /* DMAC1 */
484 [MSTP217] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */ 485 [MSTP217] = SH_CLK_MSTP32(&div4_clks[DIV4_HP], SMSTPCR2, 17, 0), /* DMAC2 */
@@ -575,6 +576,10 @@ static struct clk_lookup lookups[] = {
575 CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]), 576 CLKDEV_DEV_ID("sh-dma-engine.0", &mstp_clks[MSTP218]),
576 CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP222]), 577 CLKDEV_DEV_ID("sh-sci.7", &mstp_clks[MSTP222]),
577 CLKDEV_DEV_ID("e6cd0000.serial", &mstp_clks[MSTP222]), 578 CLKDEV_DEV_ID("e6cd0000.serial", &mstp_clks[MSTP222]),
579 CLKDEV_DEV_ID("renesas_intc_irqpin.0", &mstp_clks[MSTP229]),
580 CLKDEV_DEV_ID("renesas_intc_irqpin.1", &mstp_clks[MSTP229]),
581 CLKDEV_DEV_ID("renesas_intc_irqpin.2", &mstp_clks[MSTP229]),
582 CLKDEV_DEV_ID("renesas_intc_irqpin.3", &mstp_clks[MSTP229]),
578 CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP230]), 583 CLKDEV_DEV_ID("sh-sci.6", &mstp_clks[MSTP230]),
579 CLKDEV_DEV_ID("e6cc0000.serial", &mstp_clks[MSTP230]), 584 CLKDEV_DEV_ID("e6cc0000.serial", &mstp_clks[MSTP230]),
580 585
diff --git a/arch/arm/mach-shmobile/clock-r8a7790.c b/arch/arm/mach-shmobile/clock-r8a7790.c
index 126ddafad526..f62265200592 100644
--- a/arch/arm/mach-shmobile/clock-r8a7790.c
+++ b/arch/arm/mach-shmobile/clock-r8a7790.c
@@ -68,7 +68,7 @@
68 68
69#define SDCKCR 0xE6150074 69#define SDCKCR 0xE6150074
70#define SD2CKCR 0xE6150078 70#define SD2CKCR 0xE6150078
71#define SD3CKCR 0xE615007C 71#define SD3CKCR 0xE615026C
72#define MMC0CKCR 0xE6150240 72#define MMC0CKCR 0xE6150240
73#define MMC1CKCR 0xE6150244 73#define MMC1CKCR 0xE6150244
74#define SSPCKCR 0xE6150248 74#define SSPCKCR 0xE6150248
diff --git a/arch/arm/mach-shmobile/setup-sh73a0.c b/arch/arm/mach-shmobile/setup-sh73a0.c
index b7bd8e509668..328657d011d5 100644
--- a/arch/arm/mach-shmobile/setup-sh73a0.c
+++ b/arch/arm/mach-shmobile/setup-sh73a0.c
@@ -26,6 +26,7 @@
26#include <linux/of_platform.h> 26#include <linux/of_platform.h>
27#include <linux/delay.h> 27#include <linux/delay.h>
28#include <linux/input.h> 28#include <linux/input.h>
29#include <linux/i2c/i2c-sh_mobile.h>
29#include <linux/io.h> 30#include <linux/io.h>
30#include <linux/serial_sci.h> 31#include <linux/serial_sci.h>
31#include <linux/sh_dma.h> 32#include <linux/sh_dma.h>
@@ -192,11 +193,18 @@ static struct resource i2c4_resources[] = {
192 }, 193 },
193}; 194};
194 195
196static struct i2c_sh_mobile_platform_data i2c_platform_data = {
197 .clks_per_count = 2,
198};
199
195static struct platform_device i2c0_device = { 200static struct platform_device i2c0_device = {
196 .name = "i2c-sh_mobile", 201 .name = "i2c-sh_mobile",
197 .id = 0, 202 .id = 0,
198 .resource = i2c0_resources, 203 .resource = i2c0_resources,
199 .num_resources = ARRAY_SIZE(i2c0_resources), 204 .num_resources = ARRAY_SIZE(i2c0_resources),
205 .dev = {
206 .platform_data = &i2c_platform_data,
207 },
200}; 208};
201 209
202static struct platform_device i2c1_device = { 210static struct platform_device i2c1_device = {
@@ -204,6 +212,9 @@ static struct platform_device i2c1_device = {
204 .id = 1, 212 .id = 1,
205 .resource = i2c1_resources, 213 .resource = i2c1_resources,
206 .num_resources = ARRAY_SIZE(i2c1_resources), 214 .num_resources = ARRAY_SIZE(i2c1_resources),
215 .dev = {
216 .platform_data = &i2c_platform_data,
217 },
207}; 218};
208 219
209static struct platform_device i2c2_device = { 220static struct platform_device i2c2_device = {
@@ -211,6 +222,9 @@ static struct platform_device i2c2_device = {
211 .id = 2, 222 .id = 2,
212 .resource = i2c2_resources, 223 .resource = i2c2_resources,
213 .num_resources = ARRAY_SIZE(i2c2_resources), 224 .num_resources = ARRAY_SIZE(i2c2_resources),
225 .dev = {
226 .platform_data = &i2c_platform_data,
227 },
214}; 228};
215 229
216static struct platform_device i2c3_device = { 230static struct platform_device i2c3_device = {
@@ -218,6 +232,9 @@ static struct platform_device i2c3_device = {
218 .id = 3, 232 .id = 3,
219 .resource = i2c3_resources, 233 .resource = i2c3_resources,
220 .num_resources = ARRAY_SIZE(i2c3_resources), 234 .num_resources = ARRAY_SIZE(i2c3_resources),
235 .dev = {
236 .platform_data = &i2c_platform_data,
237 },
221}; 238};
222 239
223static struct platform_device i2c4_device = { 240static struct platform_device i2c4_device = {
@@ -225,6 +242,9 @@ static struct platform_device i2c4_device = {
225 .id = 4, 242 .id = 4,
226 .resource = i2c4_resources, 243 .resource = i2c4_resources,
227 .num_resources = ARRAY_SIZE(i2c4_resources), 244 .num_resources = ARRAY_SIZE(i2c4_resources),
245 .dev = {
246 .platform_data = &i2c_platform_data,
247 },
228}; 248};
229 249
230static const struct sh_dmae_slave_config sh73a0_dmae_slaves[] = { 250static const struct sh_dmae_slave_config sh73a0_dmae_slaves[] = {
diff --git a/arch/arm/mach-socfpga/core.h b/arch/arm/mach-socfpga/core.h
index 572b8f719ffb..60c443dadb58 100644
--- a/arch/arm/mach-socfpga/core.h
+++ b/arch/arm/mach-socfpga/core.h
@@ -40,7 +40,7 @@ extern void __iomem *rst_manager_base_addr;
40extern struct smp_operations socfpga_smp_ops; 40extern struct smp_operations socfpga_smp_ops;
41extern char secondary_trampoline, secondary_trampoline_end; 41extern char secondary_trampoline, secondary_trampoline_end;
42 42
43extern unsigned long cpu1start_addr; 43extern unsigned long socfpga_cpu1start_addr;
44 44
45#define SOCFPGA_SCU_VIRT_BASE 0xfffec000 45#define SOCFPGA_SCU_VIRT_BASE 0xfffec000
46 46
diff --git a/arch/arm/mach-socfpga/headsmp.S b/arch/arm/mach-socfpga/headsmp.S
index 95c115d8b5ee..f65ea0af4af3 100644
--- a/arch/arm/mach-socfpga/headsmp.S
+++ b/arch/arm/mach-socfpga/headsmp.S
@@ -9,21 +9,26 @@
9 */ 9 */
10#include <linux/linkage.h> 10#include <linux/linkage.h>
11#include <linux/init.h> 11#include <linux/init.h>
12#include <asm/memory.h>
12 13
13 .arch armv7-a 14 .arch armv7-a
14 15
15ENTRY(secondary_trampoline) 16ENTRY(secondary_trampoline)
16 movw r2, #:lower16:cpu1start_addr 17 /* CPU1 will always fetch from 0x0 when it is brought out of reset.
17 movt r2, #:upper16:cpu1start_addr 18 * Thus, we can just subtract the PAGE_OFFSET to get the physical
18 19 * address of &cpu1start_addr. This would not work for platforms
19 /* The socfpga VT cannot handle a 0xC0000000 page offset when loading 20 * where the physical memory does not start at 0x0.
20 the cpu1start_addr, we bit clear it. Tested on HW and VT. */ 21 */
21 bic r2, r2, #0x40000000 22 adr r0, 1f
22 23 ldmia r0, {r1, r2}
23 ldr r0, [r2] 24 sub r2, r2, #PAGE_OFFSET
24 ldr r1, [r0] 25 ldr r3, [r2]
25 bx r1 26 ldr r4, [r3]
27 bx r4
26 28
29 .align
301: .long .
31 .long socfpga_cpu1start_addr
27ENTRY(secondary_trampoline_end) 32ENTRY(secondary_trampoline_end)
28 33
29ENTRY(socfpga_secondary_startup) 34ENTRY(socfpga_secondary_startup)
diff --git a/arch/arm/mach-socfpga/platsmp.c b/arch/arm/mach-socfpga/platsmp.c
index 5356a72bc8ce..16ca97b039f9 100644
--- a/arch/arm/mach-socfpga/platsmp.c
+++ b/arch/arm/mach-socfpga/platsmp.c
@@ -33,11 +33,11 @@ static int socfpga_boot_secondary(unsigned int cpu, struct task_struct *idle)
33{ 33{
34 int trampoline_size = &secondary_trampoline_end - &secondary_trampoline; 34 int trampoline_size = &secondary_trampoline_end - &secondary_trampoline;
35 35
36 if (cpu1start_addr) { 36 if (socfpga_cpu1start_addr) {
37 memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size); 37 memcpy(phys_to_virt(0), &secondary_trampoline, trampoline_size);
38 38
39 __raw_writel(virt_to_phys(socfpga_secondary_startup), 39 __raw_writel(virt_to_phys(socfpga_secondary_startup),
40 (sys_manager_base_addr + (cpu1start_addr & 0x000000ff))); 40 (sys_manager_base_addr + (socfpga_cpu1start_addr & 0x000000ff)));
41 41
42 flush_cache_all(); 42 flush_cache_all();
43 smp_wmb(); 43 smp_wmb();
diff --git a/arch/arm/mach-socfpga/socfpga.c b/arch/arm/mach-socfpga/socfpga.c
index adbf38314ca8..383d61e138af 100644
--- a/arch/arm/mach-socfpga/socfpga.c
+++ b/arch/arm/mach-socfpga/socfpga.c
@@ -29,7 +29,7 @@
29void __iomem *socfpga_scu_base_addr = ((void __iomem *)(SOCFPGA_SCU_VIRT_BASE)); 29void __iomem *socfpga_scu_base_addr = ((void __iomem *)(SOCFPGA_SCU_VIRT_BASE));
30void __iomem *sys_manager_base_addr; 30void __iomem *sys_manager_base_addr;
31void __iomem *rst_manager_base_addr; 31void __iomem *rst_manager_base_addr;
32unsigned long cpu1start_addr; 32unsigned long socfpga_cpu1start_addr;
33 33
34static struct map_desc scu_io_desc __initdata = { 34static struct map_desc scu_io_desc __initdata = {
35 .virtual = SOCFPGA_SCU_VIRT_BASE, 35 .virtual = SOCFPGA_SCU_VIRT_BASE,
@@ -70,7 +70,7 @@ void __init socfpga_sysmgr_init(void)
70 np = of_find_compatible_node(NULL, NULL, "altr,sys-mgr"); 70 np = of_find_compatible_node(NULL, NULL, "altr,sys-mgr");
71 71
72 if (of_property_read_u32(np, "cpu1-start-addr", 72 if (of_property_read_u32(np, "cpu1-start-addr",
73 (u32 *) &cpu1start_addr)) 73 (u32 *) &socfpga_cpu1start_addr))
74 pr_err("SMP: Need cpu1-start-addr in device tree.\n"); 74 pr_err("SMP: Need cpu1-start-addr in device tree.\n");
75 75
76 sys_manager_base_addr = of_iomap(np, 0); 76 sys_manager_base_addr = of_iomap(np, 0);
diff --git a/arch/arm/mach-tegra/irq.c b/arch/arm/mach-tegra/irq.c
index da7be13aecce..ab95f5391a2b 100644
--- a/arch/arm/mach-tegra/irq.c
+++ b/arch/arm/mach-tegra/irq.c
@@ -99,42 +99,42 @@ static inline void tegra_irq_write_mask(unsigned int irq, unsigned long reg)
99 99
100static void tegra_mask(struct irq_data *d) 100static void tegra_mask(struct irq_data *d)
101{ 101{
102 if (d->irq < FIRST_LEGACY_IRQ) 102 if (d->hwirq < FIRST_LEGACY_IRQ)
103 return; 103 return;
104 104
105 tegra_irq_write_mask(d->irq, ICTLR_CPU_IER_CLR); 105 tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IER_CLR);
106} 106}
107 107
108static void tegra_unmask(struct irq_data *d) 108static void tegra_unmask(struct irq_data *d)
109{ 109{
110 if (d->irq < FIRST_LEGACY_IRQ) 110 if (d->hwirq < FIRST_LEGACY_IRQ)
111 return; 111 return;
112 112
113 tegra_irq_write_mask(d->irq, ICTLR_CPU_IER_SET); 113 tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IER_SET);
114} 114}
115 115
116static void tegra_ack(struct irq_data *d) 116static void tegra_ack(struct irq_data *d)
117{ 117{
118 if (d->irq < FIRST_LEGACY_IRQ) 118 if (d->hwirq < FIRST_LEGACY_IRQ)
119 return; 119 return;
120 120
121 tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_CLR); 121 tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_CLR);
122} 122}
123 123
124static void tegra_eoi(struct irq_data *d) 124static void tegra_eoi(struct irq_data *d)
125{ 125{
126 if (d->irq < FIRST_LEGACY_IRQ) 126 if (d->hwirq < FIRST_LEGACY_IRQ)
127 return; 127 return;
128 128
129 tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_CLR); 129 tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_CLR);
130} 130}
131 131
132static int tegra_retrigger(struct irq_data *d) 132static int tegra_retrigger(struct irq_data *d)
133{ 133{
134 if (d->irq < FIRST_LEGACY_IRQ) 134 if (d->hwirq < FIRST_LEGACY_IRQ)
135 return 0; 135 return 0;
136 136
137 tegra_irq_write_mask(d->irq, ICTLR_CPU_IEP_FIR_SET); 137 tegra_irq_write_mask(d->hwirq, ICTLR_CPU_IEP_FIR_SET);
138 138
139 return 1; 139 return 1;
140} 140}
@@ -142,7 +142,7 @@ static int tegra_retrigger(struct irq_data *d)
142#ifdef CONFIG_PM_SLEEP 142#ifdef CONFIG_PM_SLEEP
143static int tegra_set_wake(struct irq_data *d, unsigned int enable) 143static int tegra_set_wake(struct irq_data *d, unsigned int enable)
144{ 144{
145 u32 irq = d->irq; 145 u32 irq = d->hwirq;
146 u32 index, mask; 146 u32 index, mask;
147 147
148 if (irq < FIRST_LEGACY_IRQ || 148 if (irq < FIRST_LEGACY_IRQ ||
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index ae69809a9e47..7eb94e6fc376 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -798,6 +798,7 @@ config NEED_KUSER_HELPERS
798 798
799config KUSER_HELPERS 799config KUSER_HELPERS
800 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS 800 bool "Enable kuser helpers in vector page" if !NEED_KUSER_HELPERS
801 depends on MMU
801 default y 802 default y
802 help 803 help
803 Warning: disabling this option may break user programs. 804 Warning: disabling this option may break user programs.
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index 55f9d6e0cc88..5e65ca8dea62 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -956,7 +956,7 @@ static u32 cache_id_part_number_from_dt;
956 * @associativity: variable to return the calculated associativity in 956 * @associativity: variable to return the calculated associativity in
957 * @max_way_size: the maximum size in bytes for the cache ways 957 * @max_way_size: the maximum size in bytes for the cache ways
958 */ 958 */
959static void __init l2x0_cache_size_of_parse(const struct device_node *np, 959static int __init l2x0_cache_size_of_parse(const struct device_node *np,
960 u32 *aux_val, u32 *aux_mask, 960 u32 *aux_val, u32 *aux_mask,
961 u32 *associativity, 961 u32 *associativity,
962 u32 max_way_size) 962 u32 max_way_size)
@@ -974,7 +974,7 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np,
974 of_property_read_u32(np, "cache-line-size", &line_size); 974 of_property_read_u32(np, "cache-line-size", &line_size);
975 975
976 if (!cache_size || !sets) 976 if (!cache_size || !sets)
977 return; 977 return -ENODEV;
978 978
979 /* All these l2 caches have the same line = block size actually */ 979 /* All these l2 caches have the same line = block size actually */
980 if (!line_size) { 980 if (!line_size) {
@@ -1009,7 +1009,7 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np,
1009 1009
1010 if (way_size > max_way_size) { 1010 if (way_size > max_way_size) {
1011 pr_err("L2C OF: set size %dKB is too large\n", way_size); 1011 pr_err("L2C OF: set size %dKB is too large\n", way_size);
1012 return; 1012 return -EINVAL;
1013 } 1013 }
1014 1014
1015 pr_info("L2C OF: override cache size: %d bytes (%dKB)\n", 1015 pr_info("L2C OF: override cache size: %d bytes (%dKB)\n",
@@ -1027,7 +1027,7 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np,
1027 if (way_size_bits < 1 || way_size_bits > 6) { 1027 if (way_size_bits < 1 || way_size_bits > 6) {
1028 pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n", 1028 pr_err("L2C OF: cache way size illegal: %dKB is not mapped\n",
1029 way_size); 1029 way_size);
1030 return; 1030 return -EINVAL;
1031 } 1031 }
1032 1032
1033 mask |= L2C_AUX_CTRL_WAY_SIZE_MASK; 1033 mask |= L2C_AUX_CTRL_WAY_SIZE_MASK;
@@ -1036,6 +1036,8 @@ static void __init l2x0_cache_size_of_parse(const struct device_node *np,
1036 *aux_val &= ~mask; 1036 *aux_val &= ~mask;
1037 *aux_val |= val; 1037 *aux_val |= val;
1038 *aux_mask &= ~mask; 1038 *aux_mask &= ~mask;
1039
1040 return 0;
1039} 1041}
1040 1042
1041static void __init l2x0_of_parse(const struct device_node *np, 1043static void __init l2x0_of_parse(const struct device_node *np,
@@ -1046,6 +1048,7 @@ static void __init l2x0_of_parse(const struct device_node *np,
1046 u32 dirty = 0; 1048 u32 dirty = 0;
1047 u32 val = 0, mask = 0; 1049 u32 val = 0, mask = 0;
1048 u32 assoc; 1050 u32 assoc;
1051 int ret;
1049 1052
1050 of_property_read_u32(np, "arm,tag-latency", &tag); 1053 of_property_read_u32(np, "arm,tag-latency", &tag);
1051 if (tag) { 1054 if (tag) {
@@ -1068,7 +1071,10 @@ static void __init l2x0_of_parse(const struct device_node *np,
1068 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT; 1071 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
1069 } 1072 }
1070 1073
1071 l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K); 1074 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_256K);
1075 if (ret)
1076 return;
1077
1072 if (assoc > 8) { 1078 if (assoc > 8) {
1073 pr_err("l2x0 of: cache setting yield too high associativity\n"); 1079 pr_err("l2x0 of: cache setting yield too high associativity\n");
1074 pr_err("l2x0 of: %d calculated, max 8\n", assoc); 1080 pr_err("l2x0 of: %d calculated, max 8\n", assoc);
@@ -1125,6 +1131,7 @@ static void __init l2c310_of_parse(const struct device_node *np,
1125 u32 tag[3] = { 0, 0, 0 }; 1131 u32 tag[3] = { 0, 0, 0 };
1126 u32 filter[2] = { 0, 0 }; 1132 u32 filter[2] = { 0, 0 };
1127 u32 assoc; 1133 u32 assoc;
1134 int ret;
1128 1135
1129 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag)); 1136 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
1130 if (tag[0] && tag[1] && tag[2]) 1137 if (tag[0] && tag[1] && tag[2])
@@ -1152,7 +1159,10 @@ static void __init l2c310_of_parse(const struct device_node *np,
1152 l2x0_base + L310_ADDR_FILTER_START); 1159 l2x0_base + L310_ADDR_FILTER_START);
1153 } 1160 }
1154 1161
1155 l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K); 1162 ret = l2x0_cache_size_of_parse(np, aux_val, aux_mask, &assoc, SZ_512K);
1163 if (ret)
1164 return;
1165
1156 switch (assoc) { 1166 switch (assoc) {
1157 case 16: 1167 case 16:
1158 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1168 *aux_val &= ~L2X0_AUX_CTRL_ASSOC_MASK;
@@ -1164,8 +1174,8 @@ static void __init l2c310_of_parse(const struct device_node *np,
1164 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK; 1174 *aux_mask &= ~L2X0_AUX_CTRL_ASSOC_MASK;
1165 break; 1175 break;
1166 default: 1176 default:
1167 pr_err("PL310 OF: cache setting yield illegal associativity\n"); 1177 pr_err("L2C-310 OF cache associativity %d invalid, only 8 or 16 permitted\n",
1168 pr_err("PL310 OF: %d calculated, only 8 and 16 legal\n", assoc); 1178 assoc);
1169 break; 1179 break;
1170 } 1180 }
1171} 1181}
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index c245d903927f..e8907117861e 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -1198,7 +1198,6 @@ __iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot,
1198{ 1198{
1199 return dma_common_pages_remap(pages, size, 1199 return dma_common_pages_remap(pages, size,
1200 VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller); 1200 VM_ARM_DMA_CONSISTENT | VM_USERMAP, prot, caller);
1201 return NULL;
1202} 1201}
1203 1202
1204/* 1203/*
diff --git a/arch/arm/mm/highmem.c b/arch/arm/mm/highmem.c
index 45aeaaca9052..e17ed00828d7 100644
--- a/arch/arm/mm/highmem.c
+++ b/arch/arm/mm/highmem.c
@@ -127,8 +127,11 @@ void *kmap_atomic_pfn(unsigned long pfn)
127{ 127{
128 unsigned long vaddr; 128 unsigned long vaddr;
129 int idx, type; 129 int idx, type;
130 struct page *page = pfn_to_page(pfn);
130 131
131 pagefault_disable(); 132 pagefault_disable();
133 if (!PageHighMem(page))
134 return page_address(page);
132 135
133 type = kmap_atomic_idx_push(); 136 type = kmap_atomic_idx_push();
134 idx = type + KM_TYPE_NR * smp_processor_id(); 137 idx = type + KM_TYPE_NR * smp_processor_id();
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 92bba32d9230..9481f85c56e6 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -559,10 +559,10 @@ void __init mem_init(void)
559#ifdef CONFIG_MODULES 559#ifdef CONFIG_MODULES
560 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n" 560 " modules : 0x%08lx - 0x%08lx (%4ld MB)\n"
561#endif 561#endif
562 " .text : 0x%p" " - 0x%p" " (%4d kB)\n" 562 " .text : 0x%p" " - 0x%p" " (%4td kB)\n"
563 " .init : 0x%p" " - 0x%p" " (%4d kB)\n" 563 " .init : 0x%p" " - 0x%p" " (%4td kB)\n"
564 " .data : 0x%p" " - 0x%p" " (%4d kB)\n" 564 " .data : 0x%p" " - 0x%p" " (%4td kB)\n"
565 " .bss : 0x%p" " - 0x%p" " (%4d kB)\n", 565 " .bss : 0x%p" " - 0x%p" " (%4td kB)\n",
566 566
567 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) + 567 MLK(UL(CONFIG_VECTORS_BASE), UL(CONFIG_VECTORS_BASE) +
568 (PAGE_SIZE)), 568 (PAGE_SIZE)),
diff --git a/arch/arm/mm/proc-v7.S b/arch/arm/mm/proc-v7.S
index b3a947863ac7..22ac2a6fbfe3 100644
--- a/arch/arm/mm/proc-v7.S
+++ b/arch/arm/mm/proc-v7.S
@@ -270,7 +270,6 @@ __v7_pj4b_setup:
270/* Auxiliary Debug Modes Control 1 Register */ 270/* Auxiliary Debug Modes Control 1 Register */
271#define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */ 271#define PJ4B_STATIC_BP (1 << 2) /* Enable Static BP */
272#define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */ 272#define PJ4B_INTER_PARITY (1 << 8) /* Disable Internal Parity Handling */
273#define PJ4B_BCK_OFF_STREX (1 << 5) /* Enable the back off of STREX instr */
274#define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */ 273#define PJ4B_CLEAN_LINE (1 << 16) /* Disable data transfer for clean line */
275 274
276/* Auxiliary Debug Modes Control 2 Register */ 275/* Auxiliary Debug Modes Control 2 Register */
@@ -293,7 +292,6 @@ __v7_pj4b_setup:
293 /* Auxiliary Debug Modes Control 1 Register */ 292 /* Auxiliary Debug Modes Control 1 Register */
294 mrc p15, 1, r0, c15, c1, 1 293 mrc p15, 1, r0, c15, c1, 1
295 orr r0, r0, #PJ4B_CLEAN_LINE 294 orr r0, r0, #PJ4B_CLEAN_LINE
296 orr r0, r0, #PJ4B_BCK_OFF_STREX
297 orr r0, r0, #PJ4B_INTER_PARITY 295 orr r0, r0, #PJ4B_INTER_PARITY
298 bic r0, r0, #PJ4B_STATIC_BP 296 bic r0, r0, #PJ4B_STATIC_BP
299 mcr p15, 1, r0, c15, c1, 1 297 mcr p15, 1, r0, c15, c1, 1
diff --git a/arch/arm/mm/proc-xscale.S b/arch/arm/mm/proc-xscale.S
index 23259f104c66..afa2b3c4df4a 100644
--- a/arch/arm/mm/proc-xscale.S
+++ b/arch/arm/mm/proc-xscale.S
@@ -535,7 +535,7 @@ ENTRY(cpu_xscale_do_suspend)
535 mrc p15, 0, r5, c15, c1, 0 @ CP access reg 535 mrc p15, 0, r5, c15, c1, 0 @ CP access reg
536 mrc p15, 0, r6, c13, c0, 0 @ PID 536 mrc p15, 0, r6, c13, c0, 0 @ PID
537 mrc p15, 0, r7, c3, c0, 0 @ domain ID 537 mrc p15, 0, r7, c3, c0, 0 @ domain ID
538 mrc p15, 0, r8, c1, c1, 0 @ auxiliary control reg 538 mrc p15, 0, r8, c1, c0, 1 @ auxiliary control reg
539 mrc p15, 0, r9, c1, c0, 0 @ control reg 539 mrc p15, 0, r9, c1, c0, 0 @ control reg
540 bic r4, r4, #2 @ clear frequency change bit 540 bic r4, r4, #2 @ clear frequency change bit
541 stmia r0, {r4 - r9} @ store cp regs 541 stmia r0, {r4 - r9} @ store cp regs
@@ -552,7 +552,7 @@ ENTRY(cpu_xscale_do_resume)
552 mcr p15, 0, r6, c13, c0, 0 @ PID 552 mcr p15, 0, r6, c13, c0, 0 @ PID
553 mcr p15, 0, r7, c3, c0, 0 @ domain ID 553 mcr p15, 0, r7, c3, c0, 0 @ domain ID
554 mcr p15, 0, r1, c2, c0, 0 @ translation table base addr 554 mcr p15, 0, r1, c2, c0, 0 @ translation table base addr
555 mcr p15, 0, r8, c1, c1, 0 @ auxiliary control reg 555 mcr p15, 0, r8, c1, c0, 1 @ auxiliary control reg
556 mov r0, r9 @ control register 556 mov r0, r9 @ control register
557 b cpu_resume_mmu 557 b cpu_resume_mmu
558ENDPROC(cpu_xscale_do_resume) 558ENDPROC(cpu_xscale_do_resume)
diff --git a/arch/arm/plat-orion/gpio.c b/arch/arm/plat-orion/gpio.c
index b61a3bcc2fa8..e048f6198d68 100644
--- a/arch/arm/plat-orion/gpio.c
+++ b/arch/arm/plat-orion/gpio.c
@@ -497,6 +497,34 @@ static void orion_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip)
497#define orion_gpio_dbg_show NULL 497#define orion_gpio_dbg_show NULL
498#endif 498#endif
499 499
500static void orion_gpio_unmask_irq(struct irq_data *d)
501{
502 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
503 struct irq_chip_type *ct = irq_data_get_chip_type(d);
504 u32 reg_val;
505 u32 mask = d->mask;
506
507 irq_gc_lock(gc);
508 reg_val = irq_reg_readl(gc->reg_base + ct->regs.mask);
509 reg_val |= mask;
510 irq_reg_writel(reg_val, gc->reg_base + ct->regs.mask);
511 irq_gc_unlock(gc);
512}
513
514static void orion_gpio_mask_irq(struct irq_data *d)
515{
516 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
517 struct irq_chip_type *ct = irq_data_get_chip_type(d);
518 u32 mask = d->mask;
519 u32 reg_val;
520
521 irq_gc_lock(gc);
522 reg_val = irq_reg_readl(gc->reg_base + ct->regs.mask);
523 reg_val &= ~mask;
524 irq_reg_writel(reg_val, gc->reg_base + ct->regs.mask);
525 irq_gc_unlock(gc);
526}
527
500void __init orion_gpio_init(struct device_node *np, 528void __init orion_gpio_init(struct device_node *np,
501 int gpio_base, int ngpio, 529 int gpio_base, int ngpio,
502 void __iomem *base, int mask_offset, 530 void __iomem *base, int mask_offset,
@@ -565,8 +593,8 @@ void __init orion_gpio_init(struct device_node *np,
565 ct = gc->chip_types; 593 ct = gc->chip_types;
566 ct->regs.mask = ochip->mask_offset + GPIO_LEVEL_MASK_OFF; 594 ct->regs.mask = ochip->mask_offset + GPIO_LEVEL_MASK_OFF;
567 ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW; 595 ct->type = IRQ_TYPE_LEVEL_HIGH | IRQ_TYPE_LEVEL_LOW;
568 ct->chip.irq_mask = irq_gc_mask_clr_bit; 596 ct->chip.irq_mask = orion_gpio_mask_irq;
569 ct->chip.irq_unmask = irq_gc_mask_set_bit; 597 ct->chip.irq_unmask = orion_gpio_unmask_irq;
570 ct->chip.irq_set_type = gpio_irq_set_type; 598 ct->chip.irq_set_type = gpio_irq_set_type;
571 ct->chip.name = ochip->chip.label; 599 ct->chip.name = ochip->chip.label;
572 600
@@ -575,8 +603,8 @@ void __init orion_gpio_init(struct device_node *np,
575 ct->regs.ack = GPIO_EDGE_CAUSE_OFF; 603 ct->regs.ack = GPIO_EDGE_CAUSE_OFF;
576 ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING; 604 ct->type = IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING;
577 ct->chip.irq_ack = irq_gc_ack_clr_bit; 605 ct->chip.irq_ack = irq_gc_ack_clr_bit;
578 ct->chip.irq_mask = irq_gc_mask_clr_bit; 606 ct->chip.irq_mask = orion_gpio_mask_irq;
579 ct->chip.irq_unmask = irq_gc_mask_set_bit; 607 ct->chip.irq_unmask = orion_gpio_unmask_irq;
580 ct->chip.irq_set_type = gpio_irq_set_type; 608 ct->chip.irq_set_type = gpio_irq_set_type;
581 ct->handler = handle_edge_irq; 609 ct->handler = handle_edge_irq;
582 ct->chip.name = ochip->chip.label; 610 ct->chip.name = ochip->chip.label;
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index ac9afde76dea..9532f8d5857e 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -1,5 +1,6 @@
1config ARM64 1config ARM64
2 def_bool y 2 def_bool y
3 select ARCH_BINFMT_ELF_RANDOMIZE_PIE
3 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE 4 select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
4 select ARCH_HAS_SG_CHAIN 5 select ARCH_HAS_SG_CHAIN
5 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 6 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
@@ -232,7 +233,7 @@ config ARM64_VA_BITS_42
232 233
233config ARM64_VA_BITS_48 234config ARM64_VA_BITS_48
234 bool "48-bit" 235 bool "48-bit"
235 depends on BROKEN 236 depends on !ARM_SMMU
236 237
237endchoice 238endchoice
238 239
diff --git a/arch/arm64/boot/dts/apm-storm.dtsi b/arch/arm64/boot/dts/apm-storm.dtsi
index 295c72d52a1f..f1ad9c2ab2e9 100644
--- a/arch/arm64/boot/dts/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm-storm.dtsi
@@ -599,7 +599,7 @@
599 compatible = "apm,xgene-enet"; 599 compatible = "apm,xgene-enet";
600 status = "disabled"; 600 status = "disabled";
601 reg = <0x0 0x17020000 0x0 0xd100>, 601 reg = <0x0 0x17020000 0x0 0xd100>,
602 <0x0 0X17030000 0x0 0X400>, 602 <0x0 0X17030000 0x0 0Xc300>,
603 <0x0 0X10000000 0x0 0X200>; 603 <0x0 0X10000000 0x0 0X200>;
604 reg-names = "enet_csr", "ring_csr", "ring_cmd"; 604 reg-names = "enet_csr", "ring_csr", "ring_cmd";
605 interrupts = <0x0 0x3c 0x4>; 605 interrupts = <0x0 0x3c 0x4>;
@@ -624,9 +624,9 @@
624 sgenet0: ethernet@1f210000 { 624 sgenet0: ethernet@1f210000 {
625 compatible = "apm,xgene-enet"; 625 compatible = "apm,xgene-enet";
626 status = "disabled"; 626 status = "disabled";
627 reg = <0x0 0x1f210000 0x0 0x10000>, 627 reg = <0x0 0x1f210000 0x0 0xd100>,
628 <0x0 0x1f200000 0x0 0X10000>, 628 <0x0 0x1f200000 0x0 0Xc300>,
629 <0x0 0x1B000000 0x0 0X20000>; 629 <0x0 0x1B000000 0x0 0X200>;
630 reg-names = "enet_csr", "ring_csr", "ring_cmd"; 630 reg-names = "enet_csr", "ring_csr", "ring_cmd";
631 interrupts = <0x0 0xA0 0x4>; 631 interrupts = <0x0 0xA0 0x4>;
632 dma-coherent; 632 dma-coherent;
@@ -639,7 +639,7 @@
639 compatible = "apm,xgene-enet"; 639 compatible = "apm,xgene-enet";
640 status = "disabled"; 640 status = "disabled";
641 reg = <0x0 0x1f610000 0x0 0xd100>, 641 reg = <0x0 0x1f610000 0x0 0xd100>,
642 <0x0 0x1f600000 0x0 0X400>, 642 <0x0 0x1f600000 0x0 0Xc300>,
643 <0x0 0x18000000 0x0 0X200>; 643 <0x0 0x18000000 0x0 0X200>;
644 reg-names = "enet_csr", "ring_csr", "ring_cmd"; 644 reg-names = "enet_csr", "ring_csr", "ring_cmd";
645 interrupts = <0x0 0x60 0x4>; 645 interrupts = <0x0 0x60 0x4>;
diff --git a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
index ac2cb2418025..c46cbb29f3c6 100644
--- a/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
+++ b/arch/arm64/boot/dts/rtsm_ve-motherboard.dtsi
@@ -22,7 +22,7 @@
22 bank-width = <4>; 22 bank-width = <4>;
23 }; 23 };
24 24
25 vram@2,00000000 { 25 v2m_video_ram: vram@2,00000000 {
26 compatible = "arm,vexpress-vram"; 26 compatible = "arm,vexpress-vram";
27 reg = <2 0x00000000 0x00800000>; 27 reg = <2 0x00000000 0x00800000>;
28 }; 28 };
@@ -179,9 +179,42 @@
179 clcd@1f0000 { 179 clcd@1f0000 {
180 compatible = "arm,pl111", "arm,primecell"; 180 compatible = "arm,pl111", "arm,primecell";
181 reg = <0x1f0000 0x1000>; 181 reg = <0x1f0000 0x1000>;
182 interrupt-names = "combined";
182 interrupts = <14>; 183 interrupts = <14>;
183 clocks = <&v2m_oscclk1>, <&v2m_clk24mhz>; 184 clocks = <&v2m_oscclk1>, <&v2m_clk24mhz>;
184 clock-names = "clcdclk", "apb_pclk"; 185 clock-names = "clcdclk", "apb_pclk";
186 arm,pl11x,framebuffer = <0x18000000 0x00180000>;
187 memory-region = <&v2m_video_ram>;
188 max-memory-bandwidth = <130000000>; /* 16bpp @ 63.5MHz */
189
190 port {
191 v2m_clcd_pads: endpoint {
192 remote-endpoint = <&v2m_clcd_panel>;
193 arm,pl11x,tft-r0g0b0-pads = <0 8 16>;
194 };
195 };
196
197 panel {
198 compatible = "panel-dpi";
199
200 port {
201 v2m_clcd_panel: endpoint {
202 remote-endpoint = <&v2m_clcd_pads>;
203 };
204 };
205
206 panel-timing {
207 clock-frequency = <63500127>;
208 hactive = <1024>;
209 hback-porch = <152>;
210 hfront-porch = <48>;
211 hsync-len = <104>;
212 vactive = <768>;
213 vback-porch = <23>;
214 vfront-porch = <3>;
215 vsync-len = <4>;
216 };
217 };
185 }; 218 };
186 219
187 virtio_block@0130000 { 220 virtio_block@0130000 {
diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig
index 9cd37de9aa8d..dd301be89ecc 100644
--- a/arch/arm64/configs/defconfig
+++ b/arch/arm64/configs/defconfig
@@ -35,6 +35,9 @@ CONFIG_MODULE_UNLOAD=y
35CONFIG_ARCH_THUNDER=y 35CONFIG_ARCH_THUNDER=y
36CONFIG_ARCH_VEXPRESS=y 36CONFIG_ARCH_VEXPRESS=y
37CONFIG_ARCH_XGENE=y 37CONFIG_ARCH_XGENE=y
38CONFIG_PCI=y
39CONFIG_PCI_MSI=y
40CONFIG_PCI_XGENE=y
38CONFIG_SMP=y 41CONFIG_SMP=y
39CONFIG_PREEMPT=y 42CONFIG_PREEMPT=y
40CONFIG_KSM=y 43CONFIG_KSM=y
@@ -52,6 +55,7 @@ CONFIG_IP_PNP_DHCP=y
52CONFIG_IP_PNP_BOOTP=y 55CONFIG_IP_PNP_BOOTP=y
53# CONFIG_INET_LRO is not set 56# CONFIG_INET_LRO is not set
54# CONFIG_IPV6 is not set 57# CONFIG_IPV6 is not set
58CONFIG_BPF_JIT=y
55# CONFIG_WIRELESS is not set 59# CONFIG_WIRELESS is not set
56CONFIG_NET_9P=y 60CONFIG_NET_9P=y
57CONFIG_NET_9P_VIRTIO=y 61CONFIG_NET_9P_VIRTIO=y
@@ -65,19 +69,21 @@ CONFIG_VIRTIO_BLK=y
65CONFIG_BLK_DEV_SD=y 69CONFIG_BLK_DEV_SD=y
66# CONFIG_SCSI_LOWLEVEL is not set 70# CONFIG_SCSI_LOWLEVEL is not set
67CONFIG_ATA=y 71CONFIG_ATA=y
72CONFIG_SATA_AHCI=y
73CONFIG_SATA_AHCI_PLATFORM=y
68CONFIG_AHCI_XGENE=y 74CONFIG_AHCI_XGENE=y
69CONFIG_PHY_XGENE=y
70CONFIG_PATA_PLATFORM=y 75CONFIG_PATA_PLATFORM=y
71CONFIG_PATA_OF_PLATFORM=y 76CONFIG_PATA_OF_PLATFORM=y
72CONFIG_NETDEVICES=y 77CONFIG_NETDEVICES=y
73CONFIG_TUN=y 78CONFIG_TUN=y
74CONFIG_VIRTIO_NET=y 79CONFIG_VIRTIO_NET=y
80CONFIG_NET_XGENE=y
75CONFIG_SMC91X=y 81CONFIG_SMC91X=y
76CONFIG_SMSC911X=y 82CONFIG_SMSC911X=y
77CONFIG_NET_XGENE=y
78# CONFIG_WLAN is not set 83# CONFIG_WLAN is not set
79CONFIG_INPUT_EVDEV=y 84CONFIG_INPUT_EVDEV=y
80# CONFIG_SERIO_SERPORT is not set 85# CONFIG_SERIO_SERPORT is not set
86CONFIG_SERIO_AMBAKMI=y
81CONFIG_LEGACY_PTY_COUNT=16 87CONFIG_LEGACY_PTY_COUNT=16
82CONFIG_SERIAL_8250=y 88CONFIG_SERIAL_8250=y
83CONFIG_SERIAL_8250_CONSOLE=y 89CONFIG_SERIAL_8250_CONSOLE=y
@@ -86,22 +92,40 @@ CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
86CONFIG_SERIAL_OF_PLATFORM=y 92CONFIG_SERIAL_OF_PLATFORM=y
87CONFIG_VIRTIO_CONSOLE=y 93CONFIG_VIRTIO_CONSOLE=y
88# CONFIG_HW_RANDOM is not set 94# CONFIG_HW_RANDOM is not set
95# CONFIG_HMC_DRV is not set
96CONFIG_SPI=y
97CONFIG_SPI_PL022=y
98CONFIG_GPIO_PL061=y
99CONFIG_GPIO_XGENE=y
89# CONFIG_HWMON is not set 100# CONFIG_HWMON is not set
90CONFIG_REGULATOR=y 101CONFIG_REGULATOR=y
91CONFIG_REGULATOR_FIXED_VOLTAGE=y 102CONFIG_REGULATOR_FIXED_VOLTAGE=y
92CONFIG_FB=y 103CONFIG_FB=y
104CONFIG_FB_ARMCLCD=y
93CONFIG_FRAMEBUFFER_CONSOLE=y 105CONFIG_FRAMEBUFFER_CONSOLE=y
94CONFIG_LOGO=y 106CONFIG_LOGO=y
95# CONFIG_LOGO_LINUX_MONO is not set 107# CONFIG_LOGO_LINUX_MONO is not set
96# CONFIG_LOGO_LINUX_VGA16 is not set 108# CONFIG_LOGO_LINUX_VGA16 is not set
97CONFIG_USB=y 109CONFIG_USB=y
110CONFIG_USB_EHCI_HCD=y
111CONFIG_USB_EHCI_HCD_PLATFORM=y
98CONFIG_USB_ISP1760_HCD=y 112CONFIG_USB_ISP1760_HCD=y
113CONFIG_USB_OHCI_HCD=y
114CONFIG_USB_OHCI_HCD_PLATFORM=y
99CONFIG_USB_STORAGE=y 115CONFIG_USB_STORAGE=y
116CONFIG_USB_ULPI=y
100CONFIG_MMC=y 117CONFIG_MMC=y
101CONFIG_MMC_ARMMMCI=y 118CONFIG_MMC_ARMMMCI=y
119CONFIG_MMC_SDHCI=y
120CONFIG_MMC_SDHCI_PLTFM=y
121CONFIG_MMC_SPI=y
122CONFIG_RTC_CLASS=y
123CONFIG_RTC_DRV_EFI=y
124CONFIG_RTC_DRV_XGENE=y
102CONFIG_VIRTIO_BALLOON=y 125CONFIG_VIRTIO_BALLOON=y
103CONFIG_VIRTIO_MMIO=y 126CONFIG_VIRTIO_MMIO=y
104# CONFIG_IOMMU_SUPPORT is not set 127# CONFIG_IOMMU_SUPPORT is not set
128CONFIG_PHY_XGENE=y
105CONFIG_EXT2_FS=y 129CONFIG_EXT2_FS=y
106CONFIG_EXT3_FS=y 130CONFIG_EXT3_FS=y
107# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set 131# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 253e33bc94fb..56de5aadede2 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -37,8 +37,8 @@ typedef s32 compat_ssize_t;
37typedef s32 compat_time_t; 37typedef s32 compat_time_t;
38typedef s32 compat_clock_t; 38typedef s32 compat_clock_t;
39typedef s32 compat_pid_t; 39typedef s32 compat_pid_t;
40typedef u32 __compat_uid_t; 40typedef u16 __compat_uid_t;
41typedef u32 __compat_gid_t; 41typedef u16 __compat_gid_t;
42typedef u16 __compat_uid16_t; 42typedef u16 __compat_uid16_t;
43typedef u16 __compat_gid16_t; 43typedef u16 __compat_gid16_t;
44typedef u32 __compat_uid32_t; 44typedef u32 __compat_uid32_t;
diff --git a/arch/arm64/include/asm/elf.h b/arch/arm64/include/asm/elf.h
index 01d3aab64b79..1f65be393139 100644
--- a/arch/arm64/include/asm/elf.h
+++ b/arch/arm64/include/asm/elf.h
@@ -126,7 +126,7 @@ typedef struct user_fpsimd_state elf_fpregset_t;
126 * that it will "exec", and that there is sufficient room for the brk. 126 * that it will "exec", and that there is sufficient room for the brk.
127 */ 127 */
128extern unsigned long randomize_et_dyn(unsigned long base); 128extern unsigned long randomize_et_dyn(unsigned long base);
129#define ELF_ET_DYN_BASE (randomize_et_dyn(2 * TASK_SIZE_64 / 3)) 129#define ELF_ET_DYN_BASE (2 * TASK_SIZE_64 / 3)
130 130
131/* 131/*
132 * When the program starts, a1 contains a pointer to a function to be 132 * When the program starts, a1 contains a pointer to a function to be
@@ -169,7 +169,7 @@ extern unsigned long arch_randomize_brk(struct mm_struct *mm);
169#define COMPAT_ELF_PLATFORM ("v8l") 169#define COMPAT_ELF_PLATFORM ("v8l")
170#endif 170#endif
171 171
172#define COMPAT_ELF_ET_DYN_BASE (randomize_et_dyn(2 * TASK_SIZE_32 / 3)) 172#define COMPAT_ELF_ET_DYN_BASE (2 * TASK_SIZE_32 / 3)
173 173
174/* AArch32 registers. */ 174/* AArch32 registers. */
175#define COMPAT_ELF_NGREG 18 175#define COMPAT_ELF_NGREG 18
diff --git a/arch/arm64/include/asm/irq_work.h b/arch/arm64/include/asm/irq_work.h
index 8e24ef3f7c82..b4f6b19a8a68 100644
--- a/arch/arm64/include/asm/irq_work.h
+++ b/arch/arm64/include/asm/irq_work.h
@@ -1,6 +1,8 @@
1#ifndef __ASM_IRQ_WORK_H 1#ifndef __ASM_IRQ_WORK_H
2#define __ASM_IRQ_WORK_H 2#define __ASM_IRQ_WORK_H
3 3
4#ifdef CONFIG_SMP
5
4#include <asm/smp.h> 6#include <asm/smp.h>
5 7
6static inline bool arch_irq_work_has_interrupt(void) 8static inline bool arch_irq_work_has_interrupt(void)
@@ -8,4 +10,13 @@ static inline bool arch_irq_work_has_interrupt(void)
8 return !!__smp_cross_call; 10 return !!__smp_cross_call;
9} 11}
10 12
13#else
14
15static inline bool arch_irq_work_has_interrupt(void)
16{
17 return false;
18}
19
20#endif
21
11#endif /* __ASM_IRQ_WORK_H */ 22#endif /* __ASM_IRQ_WORK_H */
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index ccc7087d3c4e..a62cd077457b 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -142,7 +142,7 @@ static inline void *phys_to_virt(phys_addr_t x)
142 * virt_to_page(k) convert a _valid_ virtual address to struct page * 142 * virt_to_page(k) convert a _valid_ virtual address to struct page *
143 * virt_addr_valid(k) indicates whether a virtual address is valid 143 * virt_addr_valid(k) indicates whether a virtual address is valid
144 */ 144 */
145#define ARCH_PFN_OFFSET PHYS_PFN_OFFSET 145#define ARCH_PFN_OFFSET ((unsigned long)PHYS_PFN_OFFSET)
146 146
147#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) 147#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
148#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) 148#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
diff --git a/arch/arm64/include/asm/unistd32.h b/arch/arm64/include/asm/unistd32.h
index da1f06b535e3..9dfdac4a74a1 100644
--- a/arch/arm64/include/asm/unistd32.h
+++ b/arch/arm64/include/asm/unistd32.h
@@ -792,3 +792,5 @@ __SYSCALL(__NR_renameat2, sys_renameat2)
792__SYSCALL(__NR_getrandom, sys_getrandom) 792__SYSCALL(__NR_getrandom, sys_getrandom)
793#define __NR_memfd_create 385 793#define __NR_memfd_create 385
794__SYSCALL(__NR_memfd_create, sys_memfd_create) 794__SYSCALL(__NR_memfd_create, sys_memfd_create)
795#define __NR_bpf 386
796__SYSCALL(__NR_bpf, sys_bpf)
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
index 619b1dd7bcde..d18a44940968 100644
--- a/arch/arm64/kernel/efi-entry.S
+++ b/arch/arm64/kernel/efi-entry.S
@@ -54,18 +54,17 @@ ENTRY(efi_stub_entry)
54 b.eq efi_load_fail 54 b.eq efi_load_fail
55 55
56 /* 56 /*
57 * efi_entry() will have relocated the kernel image if necessary 57 * efi_entry() will have copied the kernel image if necessary and we
58 * and we return here with device tree address in x0 and the kernel 58 * return here with device tree address in x0 and the kernel entry
59 * entry point stored at *image_addr. Save those values in registers 59 * point stored at *image_addr. Save those values in registers which
60 * which are callee preserved. 60 * are callee preserved.
61 */ 61 */
62 mov x20, x0 // DTB address 62 mov x20, x0 // DTB address
63 ldr x0, [sp, #16] // relocated _text address 63 ldr x0, [sp, #16] // relocated _text address
64 mov x21, x0 64 mov x21, x0
65 65
66 /* 66 /*
67 * Flush dcache covering current runtime addresses 67 * Calculate size of the kernel Image (same for original and copy).
68 * of kernel text/data. Then flush all of icache.
69 */ 68 */
70 adrp x1, _text 69 adrp x1, _text
71 add x1, x1, #:lo12:_text 70 add x1, x1, #:lo12:_text
@@ -73,9 +72,24 @@ ENTRY(efi_stub_entry)
73 add x2, x2, #:lo12:_edata 72 add x2, x2, #:lo12:_edata
74 sub x1, x2, x1 73 sub x1, x2, x1
75 74
75 /*
76 * Flush the copied Image to the PoC, and ensure it is not shadowed by
77 * stale icache entries from before relocation.
78 */
76 bl __flush_dcache_area 79 bl __flush_dcache_area
77 ic ialluis 80 ic ialluis
78 81
82 /*
83 * Ensure that the rest of this function (in the original Image) is
84 * visible when the caches are disabled. The I-cache can't have stale
85 * entries for the VA range of the current image, so no maintenance is
86 * necessary.
87 */
88 adr x0, efi_stub_entry
89 adr x1, efi_stub_entry_end
90 sub x1, x1, x0
91 bl __flush_dcache_area
92
79 /* Turn off Dcache and MMU */ 93 /* Turn off Dcache and MMU */
80 mrs x0, CurrentEL 94 mrs x0, CurrentEL
81 cmp x0, #CurrentEL_EL2 95 cmp x0, #CurrentEL_EL2
@@ -105,4 +119,5 @@ efi_load_fail:
105 ldp x29, x30, [sp], #32 119 ldp x29, x30, [sp], #32
106 ret 120 ret
107 121
122efi_stub_entry_end:
108ENDPROC(efi_stub_entry) 123ENDPROC(efi_stub_entry)
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 03aaa99e1ea0..95c49ebc660d 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -89,7 +89,8 @@ static int __init uefi_init(void)
89 */ 89 */
90 if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) { 90 if (efi.systab->hdr.signature != EFI_SYSTEM_TABLE_SIGNATURE) {
91 pr_err("System table signature incorrect\n"); 91 pr_err("System table signature incorrect\n");
92 return -EINVAL; 92 retval = -EINVAL;
93 goto out;
93 } 94 }
94 if ((efi.systab->hdr.revision >> 16) < 2) 95 if ((efi.systab->hdr.revision >> 16) < 2)
95 pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n", 96 pr_warn("Warning: EFI system table version %d.%02d, expected 2.00 or greater\n",
@@ -103,6 +104,7 @@ static int __init uefi_init(void)
103 for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) 104 for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
104 vendor[i] = c16[i]; 105 vendor[i] = c16[i];
105 vendor[i] = '\0'; 106 vendor[i] = '\0';
107 early_memunmap(c16, sizeof(vendor));
106 } 108 }
107 109
108 pr_info("EFI v%u.%.02u by %s\n", 110 pr_info("EFI v%u.%.02u by %s\n",
@@ -113,29 +115,11 @@ static int __init uefi_init(void)
113 if (retval == 0) 115 if (retval == 0)
114 set_bit(EFI_CONFIG_TABLES, &efi.flags); 116 set_bit(EFI_CONFIG_TABLES, &efi.flags);
115 117
116 early_memunmap(c16, sizeof(vendor)); 118out:
117 early_memunmap(efi.systab, sizeof(efi_system_table_t)); 119 early_memunmap(efi.systab, sizeof(efi_system_table_t));
118
119 return retval; 120 return retval;
120} 121}
121 122
122static __initdata char memory_type_name[][32] = {
123 {"Reserved"},
124 {"Loader Code"},
125 {"Loader Data"},
126 {"Boot Code"},
127 {"Boot Data"},
128 {"Runtime Code"},
129 {"Runtime Data"},
130 {"Conventional Memory"},
131 {"Unusable Memory"},
132 {"ACPI Reclaim Memory"},
133 {"ACPI Memory NVS"},
134 {"Memory Mapped I/O"},
135 {"MMIO Port Space"},
136 {"PAL Code"},
137};
138
139/* 123/*
140 * Return true for RAM regions we want to permanently reserve. 124 * Return true for RAM regions we want to permanently reserve.
141 */ 125 */
@@ -166,10 +150,13 @@ static __init void reserve_regions(void)
166 paddr = md->phys_addr; 150 paddr = md->phys_addr;
167 npages = md->num_pages; 151 npages = md->num_pages;
168 152
169 if (uefi_debug) 153 if (uefi_debug) {
170 pr_info(" 0x%012llx-0x%012llx [%s]", 154 char buf[64];
155
156 pr_info(" 0x%012llx-0x%012llx %s",
171 paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1, 157 paddr, paddr + (npages << EFI_PAGE_SHIFT) - 1,
172 memory_type_name[md->type]); 158 efi_md_typeattr_format(buf, sizeof(buf), md));
159 }
173 160
174 memrange_efi_to_native(&paddr, &npages); 161 memrange_efi_to_native(&paddr, &npages);
175 size = npages << PAGE_SHIFT; 162 size = npages << PAGE_SHIFT;
@@ -393,11 +380,16 @@ static int __init arm64_enter_virtual_mode(void)
393 return -1; 380 return -1;
394 } 381 }
395 382
396 pr_info("Remapping and enabling EFI services.\n");
397
398 /* replace early memmap mapping with permanent mapping */
399 mapsize = memmap.map_end - memmap.map; 383 mapsize = memmap.map_end - memmap.map;
400 early_memunmap(memmap.map, mapsize); 384 early_memunmap(memmap.map, mapsize);
385
386 if (efi_runtime_disabled()) {
387 pr_info("EFI runtime services will be disabled.\n");
388 return -1;
389 }
390
391 pr_info("Remapping and enabling EFI services.\n");
392 /* replace early memmap mapping with permanent mapping */
401 memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map, 393 memmap.map = (__force void *)ioremap_cache((phys_addr_t)memmap.phys_map,
402 mapsize); 394 mapsize);
403 memmap.map_end = memmap.map + mapsize; 395 memmap.map_end = memmap.map + mapsize;
diff --git a/arch/arm64/kernel/insn.c b/arch/arm64/kernel/insn.c
index e007714ded04..8cd27fedc8b6 100644
--- a/arch/arm64/kernel/insn.c
+++ b/arch/arm64/kernel/insn.c
@@ -163,9 +163,10 @@ static int __kprobes aarch64_insn_patch_text_cb(void *arg)
163 * which ends with "dsb; isb" pair guaranteeing global 163 * which ends with "dsb; isb" pair guaranteeing global
164 * visibility. 164 * visibility.
165 */ 165 */
166 atomic_set(&pp->cpu_count, -1); 166 /* Notify other processors with an additional increment. */
167 atomic_inc(&pp->cpu_count);
167 } else { 168 } else {
168 while (atomic_read(&pp->cpu_count) != -1) 169 while (atomic_read(&pp->cpu_count) <= num_online_cpus())
169 cpu_relax(); 170 cpu_relax();
170 isb(); 171 isb();
171 } 172 }
diff --git a/arch/arm64/kernel/process.c b/arch/arm64/kernel/process.c
index c3065dbc4fa2..fde9923af859 100644
--- a/arch/arm64/kernel/process.c
+++ b/arch/arm64/kernel/process.c
@@ -378,8 +378,3 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
378{ 378{
379 return randomize_base(mm->brk); 379 return randomize_base(mm->brk);
380} 380}
381
382unsigned long randomize_et_dyn(unsigned long base)
383{
384 return randomize_base(base);
385}
diff --git a/arch/arm64/kernel/psci.c b/arch/arm64/kernel/psci.c
index 866c1c821860..663da771580a 100644
--- a/arch/arm64/kernel/psci.c
+++ b/arch/arm64/kernel/psci.c
@@ -528,7 +528,7 @@ static int __maybe_unused cpu_psci_cpu_suspend(unsigned long index)
528 if (WARN_ON_ONCE(!index)) 528 if (WARN_ON_ONCE(!index))
529 return -EINVAL; 529 return -EINVAL;
530 530
531 if (state->type == PSCI_POWER_STATE_TYPE_STANDBY) 531 if (state[index - 1].type == PSCI_POWER_STATE_TYPE_STANDBY)
532 ret = psci_ops.cpu_suspend(state[index - 1], 0); 532 ret = psci_ops.cpu_suspend(state[index - 1], 0);
533 else 533 else
534 ret = __cpu_suspend(index, psci_suspend_finisher); 534 ret = __cpu_suspend(index, psci_suspend_finisher);
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 4cc3b719208e..3d7c2df89946 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -424,6 +424,11 @@ static const struct sys_reg_desc sys_reg_descs[] = {
424 /* VBAR_EL1 */ 424 /* VBAR_EL1 */
425 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000), 425 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000),
426 NULL, reset_val, VBAR_EL1, 0 }, 426 NULL, reset_val, VBAR_EL1, 0 },
427
428 /* ICC_SRE_EL1 */
429 { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b1100), Op2(0b101),
430 trap_raz_wi },
431
427 /* CONTEXTIDR_EL1 */ 432 /* CONTEXTIDR_EL1 */
428 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), 433 { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001),
429 access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, 434 access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
@@ -690,6 +695,10 @@ static const struct sys_reg_desc cp15_regs[] = {
690 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, 695 { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
691 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 }, 696 { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
692 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, 697 { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
698
699 /* ICC_SRE */
700 { Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
701
693 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, 702 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
694}; 703};
695 704
diff --git a/arch/arm64/lib/clear_user.S b/arch/arm64/lib/clear_user.S
index 6e0ed93d51fe..c17967fdf5f6 100644
--- a/arch/arm64/lib/clear_user.S
+++ b/arch/arm64/lib/clear_user.S
@@ -46,7 +46,7 @@ USER(9f, strh wzr, [x0], #2 )
46 sub x1, x1, #2 46 sub x1, x1, #2
474: adds x1, x1, #1 474: adds x1, x1, #1
48 b.mi 5f 48 b.mi 5f
49 strb wzr, [x0] 49USER(9f, strb wzr, [x0] )
505: mov x0, #0 505: mov x0, #0
51 ret 51 ret
52ENDPROC(__clear_user) 52ENDPROC(__clear_user)
diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c
index fa324bd5a5c4..4a07630a6616 100644
--- a/arch/arm64/mm/ioremap.c
+++ b/arch/arm64/mm/ioremap.c
@@ -105,10 +105,10 @@ EXPORT_SYMBOL(ioremap_cache);
105 105
106static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; 106static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss;
107#if CONFIG_ARM64_PGTABLE_LEVELS > 2 107#if CONFIG_ARM64_PGTABLE_LEVELS > 2
108static pte_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss; 108static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss;
109#endif 109#endif
110#if CONFIG_ARM64_PGTABLE_LEVELS > 3 110#if CONFIG_ARM64_PGTABLE_LEVELS > 3
111static pte_t bm_pud[PTRS_PER_PUD] __page_aligned_bss; 111static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss;
112#endif 112#endif
113 113
114static inline pud_t * __init early_ioremap_pud(unsigned long addr) 114static inline pud_t * __init early_ioremap_pud(unsigned long addr)
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index 6894ef3e6234..f4f8b500f74c 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -202,7 +202,7 @@ static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
202} 202}
203 203
204static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr, 204static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
205 unsigned long end, unsigned long phys, 205 unsigned long end, phys_addr_t phys,
206 int map_io) 206 int map_io)
207{ 207{
208 pud_t *pud; 208 pud_t *pud;
@@ -297,11 +297,15 @@ static void __init map_mem(void)
297 * create_mapping requires puds, pmds and ptes to be allocated from 297 * create_mapping requires puds, pmds and ptes to be allocated from
298 * memory addressable from the initial direct kernel mapping. 298 * memory addressable from the initial direct kernel mapping.
299 * 299 *
300 * The initial direct kernel mapping, located at swapper_pg_dir, 300 * The initial direct kernel mapping, located at swapper_pg_dir, gives
301 * gives us PUD_SIZE memory starting from PHYS_OFFSET (which must be 301 * us PUD_SIZE (4K pages) or PMD_SIZE (64K pages) memory starting from
302 * aligned to 2MB as per Documentation/arm64/booting.txt). 302 * PHYS_OFFSET (which must be aligned to 2MB as per
303 * Documentation/arm64/booting.txt).
303 */ 304 */
304 limit = PHYS_OFFSET + PUD_SIZE; 305 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES))
306 limit = PHYS_OFFSET + PMD_SIZE;
307 else
308 limit = PHYS_OFFSET + PUD_SIZE;
305 memblock_set_current_limit(limit); 309 memblock_set_current_limit(limit);
306 310
307 /* map all the memory banks */ 311 /* map all the memory banks */
diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c
index 62c6101df260..6682b361d3ac 100644
--- a/arch/arm64/mm/pgd.c
+++ b/arch/arm64/mm/pgd.c
@@ -30,12 +30,14 @@
30 30
31#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t)) 31#define PGD_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
32 32
33static struct kmem_cache *pgd_cache;
34
33pgd_t *pgd_alloc(struct mm_struct *mm) 35pgd_t *pgd_alloc(struct mm_struct *mm)
34{ 36{
35 if (PGD_SIZE == PAGE_SIZE) 37 if (PGD_SIZE == PAGE_SIZE)
36 return (pgd_t *)get_zeroed_page(GFP_KERNEL); 38 return (pgd_t *)get_zeroed_page(GFP_KERNEL);
37 else 39 else
38 return kzalloc(PGD_SIZE, GFP_KERNEL); 40 return kmem_cache_zalloc(pgd_cache, GFP_KERNEL);
39} 41}
40 42
41void pgd_free(struct mm_struct *mm, pgd_t *pgd) 43void pgd_free(struct mm_struct *mm, pgd_t *pgd)
@@ -43,5 +45,17 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
43 if (PGD_SIZE == PAGE_SIZE) 45 if (PGD_SIZE == PAGE_SIZE)
44 free_page((unsigned long)pgd); 46 free_page((unsigned long)pgd);
45 else 47 else
46 kfree(pgd); 48 kmem_cache_free(pgd_cache, pgd);
49}
50
51static int __init pgd_cache_init(void)
52{
53 /*
54 * Naturally aligned pgds required by the architecture.
55 */
56 if (PGD_SIZE != PAGE_SIZE)
57 pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE,
58 SLAB_PANIC, NULL);
59 return 0;
47} 60}
61core_initcall(pgd_cache_init);
diff --git a/arch/arm64/net/bpf_jit.h b/arch/arm64/net/bpf_jit.h
index 2134f7e6c288..de0a81a539a0 100644
--- a/arch/arm64/net/bpf_jit.h
+++ b/arch/arm64/net/bpf_jit.h
@@ -144,8 +144,12 @@
144 144
145/* Data-processing (2 source) */ 145/* Data-processing (2 source) */
146/* Rd = Rn OP Rm */ 146/* Rd = Rn OP Rm */
147#define A64_UDIV(sf, Rd, Rn, Rm) aarch64_insn_gen_data2(Rd, Rn, Rm, \ 147#define A64_DATA2(sf, Rd, Rn, Rm, type) aarch64_insn_gen_data2(Rd, Rn, Rm, \
148 A64_VARIANT(sf), AARCH64_INSN_DATA2_UDIV) 148 A64_VARIANT(sf), AARCH64_INSN_DATA2_##type)
149#define A64_UDIV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, UDIV)
150#define A64_LSLV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSLV)
151#define A64_LSRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, LSRV)
152#define A64_ASRV(sf, Rd, Rn, Rm) A64_DATA2(sf, Rd, Rn, Rm, ASRV)
149 153
150/* Data-processing (3 source) */ 154/* Data-processing (3 source) */
151/* Rd = Ra + Rn * Rm */ 155/* Rd = Ra + Rn * Rm */
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index 7ae33545535b..41f1e3e2ea24 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -19,12 +19,13 @@
19#define pr_fmt(fmt) "bpf_jit: " fmt 19#define pr_fmt(fmt) "bpf_jit: " fmt
20 20
21#include <linux/filter.h> 21#include <linux/filter.h>
22#include <linux/moduleloader.h>
23#include <linux/printk.h> 22#include <linux/printk.h>
24#include <linux/skbuff.h> 23#include <linux/skbuff.h>
25#include <linux/slab.h> 24#include <linux/slab.h>
25
26#include <asm/byteorder.h> 26#include <asm/byteorder.h>
27#include <asm/cacheflush.h> 27#include <asm/cacheflush.h>
28#include <asm/debug-monitors.h>
28 29
29#include "bpf_jit.h" 30#include "bpf_jit.h"
30 31
@@ -119,6 +120,14 @@ static inline int bpf2a64_offset(int bpf_to, int bpf_from,
119 return to - from; 120 return to - from;
120} 121}
121 122
123static void jit_fill_hole(void *area, unsigned int size)
124{
125 u32 *ptr;
126 /* We are guaranteed to have aligned memory. */
127 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
128 *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
129}
130
122static inline int epilogue_offset(const struct jit_ctx *ctx) 131static inline int epilogue_offset(const struct jit_ctx *ctx)
123{ 132{
124 int to = ctx->offset[ctx->prog->len - 1]; 133 int to = ctx->offset[ctx->prog->len - 1];
@@ -196,6 +205,12 @@ static void build_epilogue(struct jit_ctx *ctx)
196 emit(A64_RET(A64_LR), ctx); 205 emit(A64_RET(A64_LR), ctx);
197} 206}
198 207
208/* JITs an eBPF instruction.
209 * Returns:
210 * 0 - successfully JITed an 8-byte eBPF instruction.
211 * >0 - successfully JITed a 16-byte eBPF instruction.
212 * <0 - failed to JIT.
213 */
199static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx) 214static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
200{ 215{
201 const u8 code = insn->code; 216 const u8 code = insn->code;
@@ -252,6 +267,18 @@ static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx)
252 emit(A64_MUL(is64, tmp, tmp, src), ctx); 267 emit(A64_MUL(is64, tmp, tmp, src), ctx);
253 emit(A64_SUB(is64, dst, dst, tmp), ctx); 268 emit(A64_SUB(is64, dst, dst, tmp), ctx);
254 break; 269 break;
270 case BPF_ALU | BPF_LSH | BPF_X:
271 case BPF_ALU64 | BPF_LSH | BPF_X:
272 emit(A64_LSLV(is64, dst, dst, src), ctx);
273 break;
274 case BPF_ALU | BPF_RSH | BPF_X:
275 case BPF_ALU64 | BPF_RSH | BPF_X:
276 emit(A64_LSRV(is64, dst, dst, src), ctx);
277 break;
278 case BPF_ALU | BPF_ARSH | BPF_X:
279 case BPF_ALU64 | BPF_ARSH | BPF_X:
280 emit(A64_ASRV(is64, dst, dst, src), ctx);
281 break;
255 /* dst = -dst */ 282 /* dst = -dst */
256 case BPF_ALU | BPF_NEG: 283 case BPF_ALU | BPF_NEG:
257 case BPF_ALU64 | BPF_NEG: 284 case BPF_ALU64 | BPF_NEG:
@@ -443,6 +470,27 @@ emit_cond_jmp:
443 emit(A64_B(jmp_offset), ctx); 470 emit(A64_B(jmp_offset), ctx);
444 break; 471 break;
445 472
473 /* dst = imm64 */
474 case BPF_LD | BPF_IMM | BPF_DW:
475 {
476 const struct bpf_insn insn1 = insn[1];
477 u64 imm64;
478
479 if (insn1.code != 0 || insn1.src_reg != 0 ||
480 insn1.dst_reg != 0 || insn1.off != 0) {
481 /* Note: verifier in BPF core must catch invalid
482 * instructions.
483 */
484 pr_err_once("Invalid BPF_LD_IMM64 instruction\n");
485 return -EINVAL;
486 }
487
488 imm64 = (u64)insn1.imm << 32 | imm;
489 emit_a64_mov_i64(dst, imm64, ctx);
490
491 return 1;
492 }
493
446 /* LDX: dst = *(size *)(src + off) */ 494 /* LDX: dst = *(size *)(src + off) */
447 case BPF_LDX | BPF_MEM | BPF_W: 495 case BPF_LDX | BPF_MEM | BPF_W:
448 case BPF_LDX | BPF_MEM | BPF_H: 496 case BPF_LDX | BPF_MEM | BPF_H:
@@ -594,6 +642,10 @@ static int build_body(struct jit_ctx *ctx)
594 ctx->offset[i] = ctx->idx; 642 ctx->offset[i] = ctx->idx;
595 643
596 ret = build_insn(insn, ctx); 644 ret = build_insn(insn, ctx);
645 if (ret > 0) {
646 i++;
647 continue;
648 }
597 if (ret) 649 if (ret)
598 return ret; 650 return ret;
599 } 651 }
@@ -613,8 +665,10 @@ void bpf_jit_compile(struct bpf_prog *prog)
613 665
614void bpf_int_jit_compile(struct bpf_prog *prog) 666void bpf_int_jit_compile(struct bpf_prog *prog)
615{ 667{
668 struct bpf_binary_header *header;
616 struct jit_ctx ctx; 669 struct jit_ctx ctx;
617 int image_size; 670 int image_size;
671 u8 *image_ptr;
618 672
619 if (!bpf_jit_enable) 673 if (!bpf_jit_enable)
620 return; 674 return;
@@ -636,23 +690,25 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
636 goto out; 690 goto out;
637 691
638 build_prologue(&ctx); 692 build_prologue(&ctx);
639
640 build_epilogue(&ctx); 693 build_epilogue(&ctx);
641 694
642 /* Now we know the actual image size. */ 695 /* Now we know the actual image size. */
643 image_size = sizeof(u32) * ctx.idx; 696 image_size = sizeof(u32) * ctx.idx;
644 ctx.image = module_alloc(image_size); 697 header = bpf_jit_binary_alloc(image_size, &image_ptr,
645 if (unlikely(ctx.image == NULL)) 698 sizeof(u32), jit_fill_hole);
699 if (header == NULL)
646 goto out; 700 goto out;
647 701
648 /* 2. Now, the actual pass. */ 702 /* 2. Now, the actual pass. */
649 703
704 ctx.image = (u32 *)image_ptr;
650 ctx.idx = 0; 705 ctx.idx = 0;
706
651 build_prologue(&ctx); 707 build_prologue(&ctx);
652 708
653 ctx.body_offset = ctx.idx; 709 ctx.body_offset = ctx.idx;
654 if (build_body(&ctx)) { 710 if (build_body(&ctx)) {
655 module_free(NULL, ctx.image); 711 bpf_jit_binary_free(header);
656 goto out; 712 goto out;
657 } 713 }
658 714
@@ -663,17 +719,25 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
663 bpf_jit_dump(prog->len, image_size, 2, ctx.image); 719 bpf_jit_dump(prog->len, image_size, 2, ctx.image);
664 720
665 bpf_flush_icache(ctx.image, ctx.image + ctx.idx); 721 bpf_flush_icache(ctx.image, ctx.image + ctx.idx);
666 prog->bpf_func = (void *)ctx.image;
667 prog->jited = 1;
668 722
723 set_memory_ro((unsigned long)header, header->pages);
724 prog->bpf_func = (void *)ctx.image;
725 prog->jited = true;
669out: 726out:
670 kfree(ctx.offset); 727 kfree(ctx.offset);
671} 728}
672 729
673void bpf_jit_free(struct bpf_prog *prog) 730void bpf_jit_free(struct bpf_prog *prog)
674{ 731{
675 if (prog->jited) 732 unsigned long addr = (unsigned long)prog->bpf_func & PAGE_MASK;
676 module_free(NULL, prog->bpf_func); 733 struct bpf_binary_header *header = (void *)addr;
734
735 if (!prog->jited)
736 goto free_filter;
737
738 set_memory_rw(addr, header->pages);
739 bpf_jit_binary_free(header);
677 740
678 kfree(prog); 741free_filter:
742 bpf_prog_unlock_free(prog);
679} 743}
diff --git a/arch/ia64/kernel/efi.c b/arch/ia64/kernel/efi.c
index 741b99c1a0b1..c52d7540dc05 100644
--- a/arch/ia64/kernel/efi.c
+++ b/arch/ia64/kernel/efi.c
@@ -568,6 +568,7 @@ efi_init (void)
568 { 568 {
569 const char *unit; 569 const char *unit;
570 unsigned long size; 570 unsigned long size;
571 char buf[64];
571 572
572 md = p; 573 md = p;
573 size = md->num_pages << EFI_PAGE_SHIFT; 574 size = md->num_pages << EFI_PAGE_SHIFT;
@@ -586,9 +587,10 @@ efi_init (void)
586 unit = "KB"; 587 unit = "KB";
587 } 588 }
588 589
589 printk("mem%02d: type=%2u, attr=0x%016lx, " 590 printk("mem%02d: %s "
590 "range=[0x%016lx-0x%016lx) (%4lu%s)\n", 591 "range=[0x%016lx-0x%016lx) (%4lu%s)\n",
591 i, md->type, md->attribute, md->phys_addr, 592 i, efi_md_typeattr_format(buf, sizeof(buf), md),
593 md->phys_addr,
592 md->phys_addr + efi_md_size(md), size, unit); 594 md->phys_addr + efi_md_size(md), size, unit);
593 } 595 }
594 } 596 }
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index ec6b9acb6bea..dbe46f43884d 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -1563,7 +1563,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
1563 1563
1564 for (i = 0; i < npages; i++) { 1564 for (i = 0; i < npages; i++) {
1565 pfn = gfn_to_pfn(kvm, base_gfn + i); 1565 pfn = gfn_to_pfn(kvm, base_gfn + i);
1566 if (!kvm_is_mmio_pfn(pfn)) { 1566 if (!kvm_is_reserved_pfn(pfn)) {
1567 kvm_set_pmt_entry(kvm, base_gfn + i, 1567 kvm_set_pmt_entry(kvm, base_gfn + i,
1568 pfn << PAGE_SHIFT, 1568 pfn << PAGE_SHIFT,
1569 _PAGE_AR_RWX | _PAGE_MA_WB); 1569 _PAGE_AR_RWX | _PAGE_MA_WB);
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index 4ef7a54813e6..75e75d7b1702 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
4#include <uapi/asm/unistd.h> 4#include <uapi/asm/unistd.h>
5 5
6 6
7#define NR_syscalls 354 7#define NR_syscalls 355
8 8
9#define __ARCH_WANT_OLD_READDIR 9#define __ARCH_WANT_OLD_READDIR
10#define __ARCH_WANT_OLD_STAT 10#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index b419c6b7ac37..2c1bec9a14b6 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -359,5 +359,6 @@
359#define __NR_renameat2 351 359#define __NR_renameat2 351
360#define __NR_getrandom 352 360#define __NR_getrandom 352
361#define __NR_memfd_create 353 361#define __NR_memfd_create 353
362#define __NR_bpf 354
362 363
363#endif /* _UAPI_ASM_M68K_UNISTD_H_ */ 364#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 05b46c2b08b8..2ca219e184cd 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -374,4 +374,5 @@ ENTRY(sys_call_table)
374 .long sys_renameat2 374 .long sys_renameat2
375 .long sys_getrandom 375 .long sys_getrandom
376 .long sys_memfd_create 376 .long sys_memfd_create
377 .long sys_bpf
377 378
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 6feded3b0c4c..a7736fa0580c 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -129,6 +129,10 @@ endmenu
129 129
130menu "Kernel features" 130menu "Kernel features"
131 131
132config NR_CPUS
133 int
134 default "1"
135
132config ADVANCED_OPTIONS 136config ADVANCED_OPTIONS
133 bool "Prompt for advanced kernel configuration options" 137 bool "Prompt for advanced kernel configuration options"
134 help 138 help
diff --git a/arch/microblaze/include/asm/unistd.h b/arch/microblaze/include/asm/unistd.h
index ea4b233647c1..0a53362d5548 100644
--- a/arch/microblaze/include/asm/unistd.h
+++ b/arch/microblaze/include/asm/unistd.h
@@ -38,6 +38,6 @@
38 38
39#endif /* __ASSEMBLY__ */ 39#endif /* __ASSEMBLY__ */
40 40
41#define __NR_syscalls 387 41#define __NR_syscalls 388
42 42
43#endif /* _ASM_MICROBLAZE_UNISTD_H */ 43#endif /* _ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/include/uapi/asm/unistd.h b/arch/microblaze/include/uapi/asm/unistd.h
index 1c2380bf8fe6..c712677f8a2a 100644
--- a/arch/microblaze/include/uapi/asm/unistd.h
+++ b/arch/microblaze/include/uapi/asm/unistd.h
@@ -402,5 +402,6 @@
402#define __NR_seccomp 384 402#define __NR_seccomp 384
403#define __NR_getrandom 385 403#define __NR_getrandom 385
404#define __NR_memfd_create 386 404#define __NR_memfd_create 386
405#define __NR_bpf 387
405 406
406#endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */ 407#endif /* _UAPI_ASM_MICROBLAZE_UNISTD_H */
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
index de59ee1d7010..0166e890486c 100644
--- a/arch/microblaze/kernel/syscall_table.S
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -387,3 +387,4 @@ ENTRY(sys_call_table)
387 .long sys_seccomp 387 .long sys_seccomp
388 .long sys_getrandom /* 385 */ 388 .long sys_getrandom /* 385 */
389 .long sys_memfd_create 389 .long sys_memfd_create
390 .long sys_bpf
diff --git a/arch/microblaze/pci/pci-common.c b/arch/microblaze/pci/pci-common.c
index 9037914f6985..b30e41c0c033 100644
--- a/arch/microblaze/pci/pci-common.c
+++ b/arch/microblaze/pci/pci-common.c
@@ -660,8 +660,13 @@ void pci_process_bridge_OF_ranges(struct pci_controller *hose,
660 res = &hose->mem_resources[memno++]; 660 res = &hose->mem_resources[memno++];
661 break; 661 break;
662 } 662 }
663 if (res != NULL) 663 if (res != NULL) {
664 of_pci_range_to_resource(&range, dev, res); 664 res->name = dev->full_name;
665 res->flags = range.flags;
666 res->start = range.cpu_addr;
667 res->end = range.cpu_addr + range.size - 1;
668 res->parent = res->child = res->sibling = NULL;
669 }
665 } 670 }
666 671
667 /* If there's an ISA hole and the pci_mem_offset is -not- matching 672 /* If there's an ISA hole and the pci_mem_offset is -not- matching
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index ad6badb6be71..9536ef912f59 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2066,6 +2066,7 @@ config MIPS_CPS
2066 support is unavailable. 2066 support is unavailable.
2067 2067
2068config MIPS_CPS_PM 2068config MIPS_CPS_PM
2069 depends on MIPS_CPS
2069 select MIPS_CPC 2070 select MIPS_CPC
2070 bool 2071 bool
2071 2072
@@ -2100,9 +2101,17 @@ config 64BIT_PHYS_ADDR
2100config ARCH_PHYS_ADDR_T_64BIT 2101config ARCH_PHYS_ADDR_T_64BIT
2101 def_bool 64BIT_PHYS_ADDR 2102 def_bool 64BIT_PHYS_ADDR
2102 2103
2104choice
2105 prompt "SmartMIPS or microMIPS ASE support"
2106
2107config CPU_NEEDS_NO_SMARTMIPS_OR_MICROMIPS
2108 bool "None"
2109 help
2110 Select this if you want neither microMIPS nor SmartMIPS support
2111
2103config CPU_HAS_SMARTMIPS 2112config CPU_HAS_SMARTMIPS
2104 depends on SYS_SUPPORTS_SMARTMIPS 2113 depends on SYS_SUPPORTS_SMARTMIPS
2105 bool "Support for the SmartMIPS ASE" 2114 bool "SmartMIPS"
2106 help 2115 help
2107 SmartMIPS is a extension of the MIPS32 architecture aimed at 2116 SmartMIPS is a extension of the MIPS32 architecture aimed at
2108 increased security at both hardware and software level for 2117 increased security at both hardware and software level for
@@ -2114,11 +2123,13 @@ config CPU_HAS_SMARTMIPS
2114 2123
2115config CPU_MICROMIPS 2124config CPU_MICROMIPS
2116 depends on SYS_SUPPORTS_MICROMIPS 2125 depends on SYS_SUPPORTS_MICROMIPS
2117 bool "Build kernel using microMIPS ISA" 2126 bool "microMIPS"
2118 help 2127 help
2119 When this option is enabled the kernel will be built using the 2128 When this option is enabled the kernel will be built using the
2120 microMIPS ISA 2129 microMIPS ISA
2121 2130
2131endchoice
2132
2122config CPU_HAS_MSA 2133config CPU_HAS_MSA
2123 bool "Support for the MIPS SIMD Architecture (EXPERIMENTAL)" 2134 bool "Support for the MIPS SIMD Architecture (EXPERIMENTAL)"
2124 depends on CPU_SUPPORTS_MSA 2135 depends on CPU_SUPPORTS_MSA
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 23cb94806fbc..58076472bdd8 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -93,6 +93,15 @@ LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
93KBUILD_AFLAGS_MODULE += -mlong-calls 93KBUILD_AFLAGS_MODULE += -mlong-calls
94KBUILD_CFLAGS_MODULE += -mlong-calls 94KBUILD_CFLAGS_MODULE += -mlong-calls
95 95
96#
97# pass -msoft-float to GAS if it supports it. However on newer binutils
98# (specifically newer than 2.24.51.20140728) we then also need to explicitly
99# set ".set hardfloat" in all files which manipulate floating point registers.
100#
101ifneq ($(call as-option,-Wa$(comma)-msoft-float,),)
102 cflags-y += -DGAS_HAS_SET_HARDFLOAT -Wa,-msoft-float
103endif
104
96cflags-y += -ffreestanding 105cflags-y += -ffreestanding
97 106
98# 107#
diff --git a/arch/mips/ath79/mach-db120.c b/arch/mips/ath79/mach-db120.c
index 4d661a1d2dae..9423f5aed287 100644
--- a/arch/mips/ath79/mach-db120.c
+++ b/arch/mips/ath79/mach-db120.c
@@ -113,7 +113,7 @@ static void __init db120_pci_init(u8 *eeprom)
113 ath79_register_pci(); 113 ath79_register_pci();
114} 114}
115#else 115#else
116static inline void db120_pci_init(void) {} 116static inline void db120_pci_init(u8 *eeprom) {}
117#endif /* CONFIG_PCI */ 117#endif /* CONFIG_PCI */
118 118
119static void __init db120_setup(void) 119static void __init db120_setup(void)
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 741734049675..2bc4aa95944e 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -809,6 +809,7 @@ static struct irq_chip octeon_irq_chip_ciu_gpio_v2 = {
809 .irq_set_type = octeon_irq_ciu_gpio_set_type, 809 .irq_set_type = octeon_irq_ciu_gpio_set_type,
810#ifdef CONFIG_SMP 810#ifdef CONFIG_SMP
811 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2, 811 .irq_set_affinity = octeon_irq_ciu_set_affinity_v2,
812 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
812#endif 813#endif
813 .flags = IRQCHIP_SET_TYPE_MASKED, 814 .flags = IRQCHIP_SET_TYPE_MASKED,
814}; 815};
@@ -823,6 +824,7 @@ static struct irq_chip octeon_irq_chip_ciu_gpio = {
823 .irq_set_type = octeon_irq_ciu_gpio_set_type, 824 .irq_set_type = octeon_irq_ciu_gpio_set_type,
824#ifdef CONFIG_SMP 825#ifdef CONFIG_SMP
825 .irq_set_affinity = octeon_irq_ciu_set_affinity, 826 .irq_set_affinity = octeon_irq_ciu_set_affinity,
827 .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
826#endif 828#endif
827 .flags = IRQCHIP_SET_TYPE_MASKED, 829 .flags = IRQCHIP_SET_TYPE_MASKED,
828}; 830};
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index 38f4c32e2816..5ebdb32d9a2b 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -806,15 +806,6 @@ void __init prom_init(void)
806#endif 806#endif
807 } 807 }
808 808
809 if (octeon_is_simulation()) {
810 /*
811 * The simulator uses a mtdram device pre filled with
812 * the filesystem. Also specify the calibration delay
813 * to avoid calculating it every time.
814 */
815 strcat(arcs_cmdline, " rw root=1f00 slram=root,0x40000000,+1073741824");
816 }
817
818 mips_hpt_frequency = octeon_get_clock_rate(); 809 mips_hpt_frequency = octeon_get_clock_rate();
819 810
820 octeon_init_cvmcount(); 811 octeon_init_cvmcount();
diff --git a/arch/mips/include/asm/asmmacro-32.h b/arch/mips/include/asm/asmmacro-32.h
index e38c2811d4e2..cdac7b3eeaf7 100644
--- a/arch/mips/include/asm/asmmacro-32.h
+++ b/arch/mips/include/asm/asmmacro-32.h
@@ -13,6 +13,8 @@
13#include <asm/mipsregs.h> 13#include <asm/mipsregs.h>
14 14
15 .macro fpu_save_single thread tmp=t0 15 .macro fpu_save_single thread tmp=t0
16 .set push
17 SET_HARDFLOAT
16 cfc1 \tmp, fcr31 18 cfc1 \tmp, fcr31
17 swc1 $f0, THREAD_FPR0_LS64(\thread) 19 swc1 $f0, THREAD_FPR0_LS64(\thread)
18 swc1 $f1, THREAD_FPR1_LS64(\thread) 20 swc1 $f1, THREAD_FPR1_LS64(\thread)
@@ -47,9 +49,12 @@
47 swc1 $f30, THREAD_FPR30_LS64(\thread) 49 swc1 $f30, THREAD_FPR30_LS64(\thread)
48 swc1 $f31, THREAD_FPR31_LS64(\thread) 50 swc1 $f31, THREAD_FPR31_LS64(\thread)
49 sw \tmp, THREAD_FCR31(\thread) 51 sw \tmp, THREAD_FCR31(\thread)
52 .set pop
50 .endm 53 .endm
51 54
52 .macro fpu_restore_single thread tmp=t0 55 .macro fpu_restore_single thread tmp=t0
56 .set push
57 SET_HARDFLOAT
53 lw \tmp, THREAD_FCR31(\thread) 58 lw \tmp, THREAD_FCR31(\thread)
54 lwc1 $f0, THREAD_FPR0_LS64(\thread) 59 lwc1 $f0, THREAD_FPR0_LS64(\thread)
55 lwc1 $f1, THREAD_FPR1_LS64(\thread) 60 lwc1 $f1, THREAD_FPR1_LS64(\thread)
@@ -84,6 +89,7 @@
84 lwc1 $f30, THREAD_FPR30_LS64(\thread) 89 lwc1 $f30, THREAD_FPR30_LS64(\thread)
85 lwc1 $f31, THREAD_FPR31_LS64(\thread) 90 lwc1 $f31, THREAD_FPR31_LS64(\thread)
86 ctc1 \tmp, fcr31 91 ctc1 \tmp, fcr31
92 .set pop
87 .endm 93 .endm
88 94
89 .macro cpu_save_nonscratch thread 95 .macro cpu_save_nonscratch thread
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index cd9a98bc8f60..6caf8766b80f 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -57,6 +57,8 @@
57#endif /* CONFIG_CPU_MIPSR2 */ 57#endif /* CONFIG_CPU_MIPSR2 */
58 58
59 .macro fpu_save_16even thread tmp=t0 59 .macro fpu_save_16even thread tmp=t0
60 .set push
61 SET_HARDFLOAT
60 cfc1 \tmp, fcr31 62 cfc1 \tmp, fcr31
61 sdc1 $f0, THREAD_FPR0_LS64(\thread) 63 sdc1 $f0, THREAD_FPR0_LS64(\thread)
62 sdc1 $f2, THREAD_FPR2_LS64(\thread) 64 sdc1 $f2, THREAD_FPR2_LS64(\thread)
@@ -75,11 +77,13 @@
75 sdc1 $f28, THREAD_FPR28_LS64(\thread) 77 sdc1 $f28, THREAD_FPR28_LS64(\thread)
76 sdc1 $f30, THREAD_FPR30_LS64(\thread) 78 sdc1 $f30, THREAD_FPR30_LS64(\thread)
77 sw \tmp, THREAD_FCR31(\thread) 79 sw \tmp, THREAD_FCR31(\thread)
80 .set pop
78 .endm 81 .endm
79 82
80 .macro fpu_save_16odd thread 83 .macro fpu_save_16odd thread
81 .set push 84 .set push
82 .set mips64r2 85 .set mips64r2
86 SET_HARDFLOAT
83 sdc1 $f1, THREAD_FPR1_LS64(\thread) 87 sdc1 $f1, THREAD_FPR1_LS64(\thread)
84 sdc1 $f3, THREAD_FPR3_LS64(\thread) 88 sdc1 $f3, THREAD_FPR3_LS64(\thread)
85 sdc1 $f5, THREAD_FPR5_LS64(\thread) 89 sdc1 $f5, THREAD_FPR5_LS64(\thread)
@@ -110,6 +114,8 @@
110 .endm 114 .endm
111 115
112 .macro fpu_restore_16even thread tmp=t0 116 .macro fpu_restore_16even thread tmp=t0
117 .set push
118 SET_HARDFLOAT
113 lw \tmp, THREAD_FCR31(\thread) 119 lw \tmp, THREAD_FCR31(\thread)
114 ldc1 $f0, THREAD_FPR0_LS64(\thread) 120 ldc1 $f0, THREAD_FPR0_LS64(\thread)
115 ldc1 $f2, THREAD_FPR2_LS64(\thread) 121 ldc1 $f2, THREAD_FPR2_LS64(\thread)
@@ -133,6 +139,7 @@
133 .macro fpu_restore_16odd thread 139 .macro fpu_restore_16odd thread
134 .set push 140 .set push
135 .set mips64r2 141 .set mips64r2
142 SET_HARDFLOAT
136 ldc1 $f1, THREAD_FPR1_LS64(\thread) 143 ldc1 $f1, THREAD_FPR1_LS64(\thread)
137 ldc1 $f3, THREAD_FPR3_LS64(\thread) 144 ldc1 $f3, THREAD_FPR3_LS64(\thread)
138 ldc1 $f5, THREAD_FPR5_LS64(\thread) 145 ldc1 $f5, THREAD_FPR5_LS64(\thread)
@@ -277,6 +284,7 @@
277 .macro cfcmsa rd, cs 284 .macro cfcmsa rd, cs
278 .set push 285 .set push
279 .set noat 286 .set noat
287 SET_HARDFLOAT
280 .insn 288 .insn
281 .word CFC_MSA_INSN | (\cs << 11) 289 .word CFC_MSA_INSN | (\cs << 11)
282 move \rd, $1 290 move \rd, $1
@@ -286,6 +294,7 @@
286 .macro ctcmsa cd, rs 294 .macro ctcmsa cd, rs
287 .set push 295 .set push
288 .set noat 296 .set noat
297 SET_HARDFLOAT
289 move $1, \rs 298 move $1, \rs
290 .word CTC_MSA_INSN | (\cd << 6) 299 .word CTC_MSA_INSN | (\cd << 6)
291 .set pop 300 .set pop
@@ -294,6 +303,7 @@
294 .macro ld_d wd, off, base 303 .macro ld_d wd, off, base
295 .set push 304 .set push
296 .set noat 305 .set noat
306 SET_HARDFLOAT
297 add $1, \base, \off 307 add $1, \base, \off
298 .word LDD_MSA_INSN | (\wd << 6) 308 .word LDD_MSA_INSN | (\wd << 6)
299 .set pop 309 .set pop
@@ -302,6 +312,7 @@
302 .macro st_d wd, off, base 312 .macro st_d wd, off, base
303 .set push 313 .set push
304 .set noat 314 .set noat
315 SET_HARDFLOAT
305 add $1, \base, \off 316 add $1, \base, \off
306 .word STD_MSA_INSN | (\wd << 6) 317 .word STD_MSA_INSN | (\wd << 6)
307 .set pop 318 .set pop
@@ -310,6 +321,7 @@
310 .macro copy_u_w rd, ws, n 321 .macro copy_u_w rd, ws, n
311 .set push 322 .set push
312 .set noat 323 .set noat
324 SET_HARDFLOAT
313 .insn 325 .insn
314 .word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11) 326 .word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11)
315 /* move triggers an assembler bug... */ 327 /* move triggers an assembler bug... */
@@ -320,6 +332,7 @@
320 .macro copy_u_d rd, ws, n 332 .macro copy_u_d rd, ws, n
321 .set push 333 .set push
322 .set noat 334 .set noat
335 SET_HARDFLOAT
323 .insn 336 .insn
324 .word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11) 337 .word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11)
325 /* move triggers an assembler bug... */ 338 /* move triggers an assembler bug... */
@@ -330,6 +343,7 @@
330 .macro insert_w wd, n, rs 343 .macro insert_w wd, n, rs
331 .set push 344 .set push
332 .set noat 345 .set noat
346 SET_HARDFLOAT
333 /* move triggers an assembler bug... */ 347 /* move triggers an assembler bug... */
334 or $1, \rs, zero 348 or $1, \rs, zero
335 .word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6) 349 .word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6)
@@ -339,6 +353,7 @@
339 .macro insert_d wd, n, rs 353 .macro insert_d wd, n, rs
340 .set push 354 .set push
341 .set noat 355 .set noat
356 SET_HARDFLOAT
342 /* move triggers an assembler bug... */ 357 /* move triggers an assembler bug... */
343 or $1, \rs, zero 358 or $1, \rs, zero
344 .word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6) 359 .word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6)
@@ -381,6 +396,7 @@
381 st_d 31, THREAD_FPR31, \thread 396 st_d 31, THREAD_FPR31, \thread
382 .set push 397 .set push
383 .set noat 398 .set noat
399 SET_HARDFLOAT
384 cfcmsa $1, MSA_CSR 400 cfcmsa $1, MSA_CSR
385 sw $1, THREAD_MSA_CSR(\thread) 401 sw $1, THREAD_MSA_CSR(\thread)
386 .set pop 402 .set pop
@@ -389,6 +405,7 @@
389 .macro msa_restore_all thread 405 .macro msa_restore_all thread
390 .set push 406 .set push
391 .set noat 407 .set noat
408 SET_HARDFLOAT
392 lw $1, THREAD_MSA_CSR(\thread) 409 lw $1, THREAD_MSA_CSR(\thread)
393 ctcmsa MSA_CSR, $1 410 ctcmsa MSA_CSR, $1
394 .set pop 411 .set pop
@@ -441,6 +458,7 @@
441 .macro msa_init_all_upper 458 .macro msa_init_all_upper
442 .set push 459 .set push
443 .set noat 460 .set noat
461 SET_HARDFLOAT
444 not $1, zero 462 not $1, zero
445 msa_init_upper 0 463 msa_init_upper 0
446 .set pop 464 .set pop
diff --git a/arch/mips/include/asm/cop2.h b/arch/mips/include/asm/cop2.h
index 51f80bd36fcc..63b3468ede4c 100644
--- a/arch/mips/include/asm/cop2.h
+++ b/arch/mips/include/asm/cop2.h
@@ -37,15 +37,15 @@ extern void nlm_cop2_restore(struct nlm_cop2_state *);
37 37
38#define cop2_present 1 38#define cop2_present 1
39#define cop2_lazy_restore 1 39#define cop2_lazy_restore 1
40#define cop2_save(r) do { (r); } while (0) 40#define cop2_save(r) do { (void)(r); } while (0)
41#define cop2_restore(r) do { (r); } while (0) 41#define cop2_restore(r) do { (void)(r); } while (0)
42 42
43#else 43#else
44 44
45#define cop2_present 0 45#define cop2_present 0
46#define cop2_lazy_restore 0 46#define cop2_lazy_restore 0
47#define cop2_save(r) do { (r); } while (0) 47#define cop2_save(r) do { (void)(r); } while (0)
48#define cop2_restore(r) do { (r); } while (0) 48#define cop2_restore(r) do { (void)(r); } while (0)
49#endif 49#endif
50 50
51enum cu2_ops { 51enum cu2_ops {
diff --git a/arch/mips/include/asm/fpregdef.h b/arch/mips/include/asm/fpregdef.h
index 429481f9028d..f184ba088532 100644
--- a/arch/mips/include/asm/fpregdef.h
+++ b/arch/mips/include/asm/fpregdef.h
@@ -14,6 +14,20 @@
14 14
15#include <asm/sgidefs.h> 15#include <asm/sgidefs.h>
16 16
17/*
18 * starting with binutils 2.24.51.20140729, MIPS binutils warn about mixing
19 * hardfloat and softfloat object files. The kernel build uses soft-float by
20 * default, so we also need to pass -msoft-float along to GAS if it supports it.
21 * But this in turn causes assembler errors in files which access hardfloat
22 * registers. We detect if GAS supports "-msoft-float" in the Makefile and
23 * explicitly put ".set hardfloat" where floating point registers are touched.
24 */
25#ifdef GAS_HAS_SET_HARDFLOAT
26#define SET_HARDFLOAT .set hardfloat
27#else
28#define SET_HARDFLOAT
29#endif
30
17#if _MIPS_SIM == _MIPS_SIM_ABI32 31#if _MIPS_SIM == _MIPS_SIM_ABI32
18 32
19/* 33/*
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 4d0aeda68397..dd562414cd5e 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -145,8 +145,8 @@ static inline void lose_fpu(int save)
145 if (is_msa_enabled()) { 145 if (is_msa_enabled()) {
146 if (save) { 146 if (save) {
147 save_msa(current); 147 save_msa(current);
148 asm volatile("cfc1 %0, $31" 148 current->thread.fpu.fcr31 =
149 : "=r"(current->thread.fpu.fcr31)); 149 read_32bit_cp1_register(CP1_STATUS);
150 } 150 }
151 disable_msa(); 151 disable_msa();
152 clear_thread_flag(TIF_USEDMSA); 152 clear_thread_flag(TIF_USEDMSA);
diff --git a/arch/mips/include/asm/ftrace.h b/arch/mips/include/asm/ftrace.h
index 992aaba603b5..b463f2aa5a61 100644
--- a/arch/mips/include/asm/ftrace.h
+++ b/arch/mips/include/asm/ftrace.h
@@ -24,7 +24,7 @@ do { \
24 asm volatile ( \ 24 asm volatile ( \
25 "1: " load " %[tmp_dst], 0(%[tmp_src])\n" \ 25 "1: " load " %[tmp_dst], 0(%[tmp_src])\n" \
26 " li %[tmp_err], 0\n" \ 26 " li %[tmp_err], 0\n" \
27 "2:\n" \ 27 "2: .insn\n" \
28 \ 28 \
29 ".section .fixup, \"ax\"\n" \ 29 ".section .fixup, \"ax\"\n" \
30 "3: li %[tmp_err], 1\n" \ 30 "3: li %[tmp_err], 1\n" \
@@ -46,7 +46,7 @@ do { \
46 asm volatile ( \ 46 asm volatile ( \
47 "1: " store " %[tmp_src], 0(%[tmp_dst])\n"\ 47 "1: " store " %[tmp_src], 0(%[tmp_dst])\n"\
48 " li %[tmp_err], 0\n" \ 48 " li %[tmp_err], 0\n" \
49 "2:\n" \ 49 "2: .insn\n" \
50 \ 50 \
51 ".section .fixup, \"ax\"\n" \ 51 ".section .fixup, \"ax\"\n" \
52 "3: li %[tmp_err], 1\n" \ 52 "3: li %[tmp_err], 1\n" \
diff --git a/arch/mips/include/asm/idle.h b/arch/mips/include/asm/idle.h
index d9f932de80e9..1c967abd545c 100644
--- a/arch/mips/include/asm/idle.h
+++ b/arch/mips/include/asm/idle.h
@@ -8,19 +8,12 @@ extern void (*cpu_wait)(void);
8extern void r4k_wait(void); 8extern void r4k_wait(void);
9extern asmlinkage void __r4k_wait(void); 9extern asmlinkage void __r4k_wait(void);
10extern void r4k_wait_irqoff(void); 10extern void r4k_wait_irqoff(void);
11extern void __pastwait(void);
12 11
13static inline int using_rollback_handler(void) 12static inline int using_rollback_handler(void)
14{ 13{
15 return cpu_wait == r4k_wait; 14 return cpu_wait == r4k_wait;
16} 15}
17 16
18static inline int address_is_in_r4k_wait_irqoff(unsigned long addr)
19{
20 return addr >= (unsigned long)r4k_wait_irqoff &&
21 addr < (unsigned long)__pastwait;
22}
23
24extern int mips_cpuidle_wait_enter(struct cpuidle_device *dev, 17extern int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
25 struct cpuidle_driver *drv, int index); 18 struct cpuidle_driver *drv, int index);
26 19
diff --git a/arch/mips/include/asm/jump_label.h b/arch/mips/include/asm/jump_label.h
index e194f957ca8c..fdbff44e5482 100644
--- a/arch/mips/include/asm/jump_label.h
+++ b/arch/mips/include/asm/jump_label.h
@@ -20,9 +20,15 @@
20#define WORD_INSN ".word" 20#define WORD_INSN ".word"
21#endif 21#endif
22 22
23#ifdef CONFIG_CPU_MICROMIPS
24#define NOP_INSN "nop32"
25#else
26#define NOP_INSN "nop"
27#endif
28
23static __always_inline bool arch_static_branch(struct static_key *key) 29static __always_inline bool arch_static_branch(struct static_key *key)
24{ 30{
25 asm_volatile_goto("1:\tnop\n\t" 31 asm_volatile_goto("1:\t" NOP_INSN "\n\t"
26 "nop\n\t" 32 "nop\n\t"
27 ".pushsection __jump_table, \"aw\"\n\t" 33 ".pushsection __jump_table, \"aw\"\n\t"
28 WORD_INSN " 1b, %l[l_yes], %0\n\t" 34 WORD_INSN " 1b, %l[l_yes], %0\n\t"
diff --git a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
index 7d28f95b0512..6d69332f21ec 100644
--- a/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson/cpu-feature-overrides.h
@@ -41,10 +41,8 @@
41#define cpu_has_mcheck 0 41#define cpu_has_mcheck 0
42#define cpu_has_mdmx 0 42#define cpu_has_mdmx 0
43#define cpu_has_mips16 0 43#define cpu_has_mips16 0
44#define cpu_has_mips32r1 0
45#define cpu_has_mips32r2 0 44#define cpu_has_mips32r2 0
46#define cpu_has_mips3d 0 45#define cpu_has_mips3d 0
47#define cpu_has_mips64r1 0
48#define cpu_has_mips64r2 0 46#define cpu_has_mips64r2 0
49#define cpu_has_mipsmt 0 47#define cpu_has_mipsmt 0
50#define cpu_has_prefetch 0 48#define cpu_has_prefetch 0
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index cf3b580c3df6..22a135ac91de 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -661,6 +661,8 @@
661#define MIPS_CONF6_SYND (_ULCAST_(1) << 13) 661#define MIPS_CONF6_SYND (_ULCAST_(1) << 13)
662/* proAptiv FTLB on/off bit */ 662/* proAptiv FTLB on/off bit */
663#define MIPS_CONF6_FTLBEN (_ULCAST_(1) << 15) 663#define MIPS_CONF6_FTLBEN (_ULCAST_(1) << 15)
664/* FTLB probability bits */
665#define MIPS_CONF6_FTLBP_SHIFT (16)
664 666
665#define MIPS_CONF7_WII (_ULCAST_(1) << 31) 667#define MIPS_CONF7_WII (_ULCAST_(1) << 31)
666 668
@@ -1324,7 +1326,7 @@ do { \
1324/* 1326/*
1325 * Macros to access the floating point coprocessor control registers 1327 * Macros to access the floating point coprocessor control registers
1326 */ 1328 */
1327#define read_32bit_cp1_register(source) \ 1329#define _read_32bit_cp1_register(source, gas_hardfloat) \
1328({ \ 1330({ \
1329 int __res; \ 1331 int __res; \
1330 \ 1332 \
@@ -1334,12 +1336,21 @@ do { \
1334 " # gas fails to assemble cfc1 for some archs, \n" \ 1336 " # gas fails to assemble cfc1 for some archs, \n" \
1335 " # like Octeon. \n" \ 1337 " # like Octeon. \n" \
1336 " .set mips1 \n" \ 1338 " .set mips1 \n" \
1339 " "STR(gas_hardfloat)" \n" \
1337 " cfc1 %0,"STR(source)" \n" \ 1340 " cfc1 %0,"STR(source)" \n" \
1338 " .set pop \n" \ 1341 " .set pop \n" \
1339 : "=r" (__res)); \ 1342 : "=r" (__res)); \
1340 __res; \ 1343 __res; \
1341}) 1344})
1342 1345
1346#ifdef GAS_HAS_SET_HARDFLOAT
1347#define read_32bit_cp1_register(source) \
1348 _read_32bit_cp1_register(source, .set hardfloat)
1349#else
1350#define read_32bit_cp1_register(source) \
1351 _read_32bit_cp1_register(source, )
1352#endif
1353
1343#ifdef HAVE_AS_DSP 1354#ifdef HAVE_AS_DSP
1344#define rddsp(mask) \ 1355#define rddsp(mask) \
1345({ \ 1356({ \
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index 4520adc8699b..cd6e0afc6833 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -257,7 +257,11 @@ static inline void protected_flush_icache_line(unsigned long addr)
257 */ 257 */
258static inline void protected_writeback_dcache_line(unsigned long addr) 258static inline void protected_writeback_dcache_line(unsigned long addr)
259{ 259{
260#ifdef CONFIG_EVA
261 protected_cachee_op(Hit_Writeback_Inv_D, addr);
262#else
260 protected_cache_op(Hit_Writeback_Inv_D, addr); 263 protected_cache_op(Hit_Writeback_Inv_D, addr);
264#endif
261} 265}
262 266
263static inline void protected_writeback_scache_line(unsigned long addr) 267static inline void protected_writeback_scache_line(unsigned long addr)
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index a10951090234..22a5624e2fd2 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -301,7 +301,8 @@ do { \
301 __get_kernel_common((x), size, __gu_ptr); \ 301 __get_kernel_common((x), size, __gu_ptr); \
302 else \ 302 else \
303 __get_user_common((x), size, __gu_ptr); \ 303 __get_user_common((x), size, __gu_ptr); \
304 } \ 304 } else \
305 (x) = 0; \
305 \ 306 \
306 __gu_err; \ 307 __gu_err; \
307}) 308})
@@ -316,6 +317,7 @@ do { \
316 " .insn \n" \ 317 " .insn \n" \
317 " .section .fixup,\"ax\" \n" \ 318 " .section .fixup,\"ax\" \n" \
318 "3: li %0, %4 \n" \ 319 "3: li %0, %4 \n" \
320 " move %1, $0 \n" \
319 " j 2b \n" \ 321 " j 2b \n" \
320 " .previous \n" \ 322 " .previous \n" \
321 " .section __ex_table,\"a\" \n" \ 323 " .section __ex_table,\"a\" \n" \
@@ -630,6 +632,7 @@ do { \
630 " .insn \n" \ 632 " .insn \n" \
631 " .section .fixup,\"ax\" \n" \ 633 " .section .fixup,\"ax\" \n" \
632 "3: li %0, %4 \n" \ 634 "3: li %0, %4 \n" \
635 " move %1, $0 \n" \
633 " j 2b \n" \ 636 " j 2b \n" \
634 " .previous \n" \ 637 " .previous \n" \
635 " .section __ex_table,\"a\" \n" \ 638 " .section __ex_table,\"a\" \n" \
@@ -773,10 +776,11 @@ extern void __put_user_unaligned_unknown(void);
773 "jal\t" #destination "\n\t" 776 "jal\t" #destination "\n\t"
774#endif 777#endif
775 778
776#ifndef CONFIG_CPU_DADDI_WORKAROUNDS 779#if defined(CONFIG_CPU_DADDI_WORKAROUNDS) || (defined(CONFIG_EVA) && \
777#define DADDI_SCRATCH "$0" 780 defined(CONFIG_CPU_HAS_PREFETCH))
778#else
779#define DADDI_SCRATCH "$3" 781#define DADDI_SCRATCH "$3"
782#else
783#define DADDI_SCRATCH "$0"
780#endif 784#endif
781 785
782extern size_t __copy_user(void *__to, const void *__from, size_t __n); 786extern size_t __copy_user(void *__to, const void *__from, size_t __n);
@@ -1418,7 +1422,7 @@ static inline long __strnlen_user(const char __user *s, long n)
1418} 1422}
1419 1423
1420/* 1424/*
1421 * strlen_user: - Get the size of a string in user space. 1425 * strnlen_user: - Get the size of a string in user space.
1422 * @str: The string to measure. 1426 * @str: The string to measure.
1423 * 1427 *
1424 * Context: User context only. This function may sleep. 1428 * Context: User context only. This function may sleep.
@@ -1427,9 +1431,7 @@ static inline long __strnlen_user(const char __user *s, long n)
1427 * 1431 *
1428 * Returns the size of the string INCLUDING the terminating NUL. 1432 * Returns the size of the string INCLUDING the terminating NUL.
1429 * On exception, returns 0. 1433 * On exception, returns 0.
1430 * 1434 * If the string is too long, returns a value greater than @n.
1431 * If there is a limit on the length of a valid string, you may wish to
1432 * consider using strnlen_user() instead.
1433 */ 1435 */
1434static inline long strnlen_user(const char __user *s, long n) 1436static inline long strnlen_user(const char __user *s, long n)
1435{ 1437{
diff --git a/arch/mips/include/uapi/asm/ptrace.h b/arch/mips/include/uapi/asm/ptrace.h
index bbcfb8ba8106..91a3d197ede3 100644
--- a/arch/mips/include/uapi/asm/ptrace.h
+++ b/arch/mips/include/uapi/asm/ptrace.h
@@ -9,6 +9,8 @@
9#ifndef _UAPI_ASM_PTRACE_H 9#ifndef _UAPI_ASM_PTRACE_H
10#define _UAPI_ASM_PTRACE_H 10#define _UAPI_ASM_PTRACE_H
11 11
12#include <linux/types.h>
13
12/* 0 - 31 are integer registers, 32 - 63 are fp registers. */ 14/* 0 - 31 are integer registers, 32 - 63 are fp registers. */
13#define FPR_BASE 32 15#define FPR_BASE 32
14#define PC 64 16#define PC 64
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index fdb4923777d1..d001bb1ad177 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -375,16 +375,17 @@
375#define __NR_seccomp (__NR_Linux + 352) 375#define __NR_seccomp (__NR_Linux + 352)
376#define __NR_getrandom (__NR_Linux + 353) 376#define __NR_getrandom (__NR_Linux + 353)
377#define __NR_memfd_create (__NR_Linux + 354) 377#define __NR_memfd_create (__NR_Linux + 354)
378#define __NR_bpf (__NR_Linux + 355)
378 379
379/* 380/*
380 * Offset of the last Linux o32 flavoured syscall 381 * Offset of the last Linux o32 flavoured syscall
381 */ 382 */
382#define __NR_Linux_syscalls 354 383#define __NR_Linux_syscalls 355
383 384
384#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 385#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
385 386
386#define __NR_O32_Linux 4000 387#define __NR_O32_Linux 4000
387#define __NR_O32_Linux_syscalls 354 388#define __NR_O32_Linux_syscalls 355
388 389
389#if _MIPS_SIM == _MIPS_SIM_ABI64 390#if _MIPS_SIM == _MIPS_SIM_ABI64
390 391
@@ -707,16 +708,17 @@
707#define __NR_seccomp (__NR_Linux + 312) 708#define __NR_seccomp (__NR_Linux + 312)
708#define __NR_getrandom (__NR_Linux + 313) 709#define __NR_getrandom (__NR_Linux + 313)
709#define __NR_memfd_create (__NR_Linux + 314) 710#define __NR_memfd_create (__NR_Linux + 314)
711#define __NR_bpf (__NR_Linux + 315)
710 712
711/* 713/*
712 * Offset of the last Linux 64-bit flavoured syscall 714 * Offset of the last Linux 64-bit flavoured syscall
713 */ 715 */
714#define __NR_Linux_syscalls 314 716#define __NR_Linux_syscalls 315
715 717
716#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */ 718#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
717 719
718#define __NR_64_Linux 5000 720#define __NR_64_Linux 5000
719#define __NR_64_Linux_syscalls 314 721#define __NR_64_Linux_syscalls 315
720 722
721#if _MIPS_SIM == _MIPS_SIM_NABI32 723#if _MIPS_SIM == _MIPS_SIM_NABI32
722 724
@@ -1043,15 +1045,16 @@
1043#define __NR_seccomp (__NR_Linux + 316) 1045#define __NR_seccomp (__NR_Linux + 316)
1044#define __NR_getrandom (__NR_Linux + 317) 1046#define __NR_getrandom (__NR_Linux + 317)
1045#define __NR_memfd_create (__NR_Linux + 318) 1047#define __NR_memfd_create (__NR_Linux + 318)
1048#define __NR_bpf (__NR_Linux + 319)
1046 1049
1047/* 1050/*
1048 * Offset of the last N32 flavoured syscall 1051 * Offset of the last N32 flavoured syscall
1049 */ 1052 */
1050#define __NR_Linux_syscalls 318 1053#define __NR_Linux_syscalls 319
1051 1054
1052#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */ 1055#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
1053 1056
1054#define __NR_N32_Linux 6000 1057#define __NR_N32_Linux 6000
1055#define __NR_N32_Linux_syscalls 318 1058#define __NR_N32_Linux_syscalls 319
1056 1059
1057#endif /* _UAPI_ASM_UNISTD_H */ 1060#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
index 290c23b51678..86495072a922 100644
--- a/arch/mips/kernel/bmips_vec.S
+++ b/arch/mips/kernel/bmips_vec.S
@@ -208,7 +208,6 @@ bmips_reset_nmi_vec_end:
208END(bmips_reset_nmi_vec) 208END(bmips_reset_nmi_vec)
209 209
210 .set pop 210 .set pop
211 .previous
212 211
213/*********************************************************************** 212/***********************************************************************
214 * CPU1 warm restart vector (used for second and subsequent boots). 213 * CPU1 warm restart vector (used for second and subsequent boots).
@@ -281,5 +280,3 @@ LEAF(bmips_enable_xks01)
281 jr ra 280 jr ra
282 281
283END(bmips_enable_xks01) 282END(bmips_enable_xks01)
284
285 .previous
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index 7b2df224f041..4d7d99d601cc 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -144,7 +144,7 @@ int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
144 case mm_bc1t_op: 144 case mm_bc1t_op:
145 preempt_disable(); 145 preempt_disable();
146 if (is_fpu_owner()) 146 if (is_fpu_owner())
147 asm volatile("cfc1\t%0,$31" : "=r" (fcr31)); 147 fcr31 = read_32bit_cp1_register(CP1_STATUS);
148 else 148 else
149 fcr31 = current->thread.fpu.fcr31; 149 fcr31 = current->thread.fpu.fcr31;
150 preempt_enable(); 150 preempt_enable();
@@ -562,11 +562,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
562 case cop1_op: 562 case cop1_op:
563 preempt_disable(); 563 preempt_disable();
564 if (is_fpu_owner()) 564 if (is_fpu_owner())
565 asm volatile( 565 fcr31 = read_32bit_cp1_register(CP1_STATUS);
566 ".set push\n"
567 "\t.set mips1\n"
568 "\tcfc1\t%0,$31\n"
569 "\t.set pop" : "=r" (fcr31));
570 else 566 else
571 fcr31 = current->thread.fpu.fcr31; 567 fcr31 = current->thread.fpu.fcr31;
572 preempt_enable(); 568 preempt_enable();
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index e6e97d2a5c9e..0384b05ab5a0 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -229,6 +229,7 @@ LEAF(mips_cps_core_init)
229 nop 229 nop
230 230
231 .set push 231 .set push
232 .set mips32r2
232 .set mt 233 .set mt
233 234
234 /* Only allow 1 TC per VPE to execute... */ 235 /* Only allow 1 TC per VPE to execute... */
@@ -345,6 +346,7 @@ LEAF(mips_cps_boot_vpes)
345 nop 346 nop
346 347
347 .set push 348 .set push
349 .set mips32r2
348 .set mt 350 .set mt
349 351
3501: /* Enter VPE configuration state */ 3521: /* Enter VPE configuration state */
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 94c4a0c0a577..dc49cf30c2db 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -193,6 +193,32 @@ static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
193static char unknown_isa[] = KERN_ERR \ 193static char unknown_isa[] = KERN_ERR \
194 "Unsupported ISA type, c0.config0: %d."; 194 "Unsupported ISA type, c0.config0: %d.";
195 195
196static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c)
197{
198
199 unsigned int probability = c->tlbsize / c->tlbsizevtlb;
200
201 /*
202 * 0 = All TLBWR instructions go to FTLB
203 * 1 = 15:1: For every 16 TBLWR instructions, 15 go to the
204 * FTLB and 1 goes to the VTLB.
205 * 2 = 7:1: As above with 7:1 ratio.
206 * 3 = 3:1: As above with 3:1 ratio.
207 *
208 * Use the linear midpoint as the probability threshold.
209 */
210 if (probability >= 12)
211 return 1;
212 else if (probability >= 6)
213 return 2;
214 else
215 /*
216 * So FTLB is less than 4 times bigger than VTLB.
217 * A 3:1 ratio can still be useful though.
218 */
219 return 3;
220}
221
196static void set_ftlb_enable(struct cpuinfo_mips *c, int enable) 222static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
197{ 223{
198 unsigned int config6; 224 unsigned int config6;
@@ -203,9 +229,14 @@ static void set_ftlb_enable(struct cpuinfo_mips *c, int enable)
203 case CPU_P5600: 229 case CPU_P5600:
204 /* proAptiv & related cores use Config6 to enable the FTLB */ 230 /* proAptiv & related cores use Config6 to enable the FTLB */
205 config6 = read_c0_config6(); 231 config6 = read_c0_config6();
232 /* Clear the old probability value */
233 config6 &= ~(3 << MIPS_CONF6_FTLBP_SHIFT);
206 if (enable) 234 if (enable)
207 /* Enable FTLB */ 235 /* Enable FTLB */
208 write_c0_config6(config6 | MIPS_CONF6_FTLBEN); 236 write_c0_config6(config6 |
237 (calculate_ftlb_probability(c)
238 << MIPS_CONF6_FTLBP_SHIFT)
239 | MIPS_CONF6_FTLBEN);
209 else 240 else
210 /* Disable FTLB */ 241 /* Disable FTLB */
211 write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN); 242 write_c0_config6(config6 & ~MIPS_CONF6_FTLBEN);
@@ -757,31 +788,34 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
757 c->cputype = CPU_LOONGSON2; 788 c->cputype = CPU_LOONGSON2;
758 __cpu_name[cpu] = "ICT Loongson-2"; 789 __cpu_name[cpu] = "ICT Loongson-2";
759 set_elf_platform(cpu, "loongson2e"); 790 set_elf_platform(cpu, "loongson2e");
791 set_isa(c, MIPS_CPU_ISA_III);
760 break; 792 break;
761 case PRID_REV_LOONGSON2F: 793 case PRID_REV_LOONGSON2F:
762 c->cputype = CPU_LOONGSON2; 794 c->cputype = CPU_LOONGSON2;
763 __cpu_name[cpu] = "ICT Loongson-2"; 795 __cpu_name[cpu] = "ICT Loongson-2";
764 set_elf_platform(cpu, "loongson2f"); 796 set_elf_platform(cpu, "loongson2f");
797 set_isa(c, MIPS_CPU_ISA_III);
765 break; 798 break;
766 case PRID_REV_LOONGSON3A: 799 case PRID_REV_LOONGSON3A:
767 c->cputype = CPU_LOONGSON3; 800 c->cputype = CPU_LOONGSON3;
768 c->writecombine = _CACHE_UNCACHED_ACCELERATED;
769 __cpu_name[cpu] = "ICT Loongson-3"; 801 __cpu_name[cpu] = "ICT Loongson-3";
770 set_elf_platform(cpu, "loongson3a"); 802 set_elf_platform(cpu, "loongson3a");
803 set_isa(c, MIPS_CPU_ISA_M64R1);
771 break; 804 break;
772 case PRID_REV_LOONGSON3B_R1: 805 case PRID_REV_LOONGSON3B_R1:
773 case PRID_REV_LOONGSON3B_R2: 806 case PRID_REV_LOONGSON3B_R2:
774 c->cputype = CPU_LOONGSON3; 807 c->cputype = CPU_LOONGSON3;
775 __cpu_name[cpu] = "ICT Loongson-3"; 808 __cpu_name[cpu] = "ICT Loongson-3";
776 set_elf_platform(cpu, "loongson3b"); 809 set_elf_platform(cpu, "loongson3b");
810 set_isa(c, MIPS_CPU_ISA_M64R1);
777 break; 811 break;
778 } 812 }
779 813
780 set_isa(c, MIPS_CPU_ISA_III);
781 c->options = R4K_OPTS | 814 c->options = R4K_OPTS |
782 MIPS_CPU_FPU | MIPS_CPU_LLSC | 815 MIPS_CPU_FPU | MIPS_CPU_LLSC |
783 MIPS_CPU_32FPR; 816 MIPS_CPU_32FPR;
784 c->tlbsize = 64; 817 c->tlbsize = 64;
818 c->writecombine = _CACHE_UNCACHED_ACCELERATED;
785 break; 819 break;
786 case PRID_IMP_LOONGSON_32: /* Loongson-1 */ 820 case PRID_IMP_LOONGSON_32: /* Loongson-1 */
787 decode_configs(c); 821 decode_configs(c);
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index ac35e12cb1f3..a5e26dd90592 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -358,6 +358,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
358 .set push 358 .set push
359 /* gas fails to assemble cfc1 for some archs (octeon).*/ \ 359 /* gas fails to assemble cfc1 for some archs (octeon).*/ \
360 .set mips1 360 .set mips1
361 SET_HARDFLOAT
361 cfc1 a1, fcr31 362 cfc1 a1, fcr31
362 li a2, ~(0x3f << 12) 363 li a2, ~(0x3f << 12)
363 and a2, a1 364 and a2, a1
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 09ce45980758..0b9082b6b683 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -68,9 +68,6 @@ void r4k_wait_irqoff(void)
68 " wait \n" 68 " wait \n"
69 " .set pop \n"); 69 " .set pop \n");
70 local_irq_enable(); 70 local_irq_enable();
71 __asm__(
72 " .globl __pastwait \n"
73 "__pastwait: \n");
74} 71}
75 72
76/* 73/*
diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
index 6001610cfe55..dda800e9e731 100644
--- a/arch/mips/kernel/jump_label.c
+++ b/arch/mips/kernel/jump_label.c
@@ -18,31 +18,53 @@
18 18
19#ifdef HAVE_JUMP_LABEL 19#ifdef HAVE_JUMP_LABEL
20 20
21#define J_RANGE_MASK ((1ul << 28) - 1) 21/*
22 * Define parameters for the standard MIPS and the microMIPS jump
23 * instruction encoding respectively:
24 *
25 * - the ISA bit of the target, either 0 or 1 respectively,
26 *
27 * - the amount the jump target address is shifted right to fit in the
28 * immediate field of the machine instruction, either 2 or 1,
29 *
30 * - the mask determining the size of the jump region relative to the
31 * delay-slot instruction, either 256MB or 128MB,
32 *
33 * - the jump target alignment, either 4 or 2 bytes.
34 */
35#define J_ISA_BIT IS_ENABLED(CONFIG_CPU_MICROMIPS)
36#define J_RANGE_SHIFT (2 - J_ISA_BIT)
37#define J_RANGE_MASK ((1ul << (26 + J_RANGE_SHIFT)) - 1)
38#define J_ALIGN_MASK ((1ul << J_RANGE_SHIFT) - 1)
22 39
23void arch_jump_label_transform(struct jump_entry *e, 40void arch_jump_label_transform(struct jump_entry *e,
24 enum jump_label_type type) 41 enum jump_label_type type)
25{ 42{
43 union mips_instruction *insn_p;
26 union mips_instruction insn; 44 union mips_instruction insn;
27 union mips_instruction *insn_p =
28 (union mips_instruction *)(unsigned long)e->code;
29 45
30 /* Jump only works within a 256MB aligned region. */ 46 insn_p = (union mips_instruction *)msk_isa16_mode(e->code);
31 BUG_ON((e->target & ~J_RANGE_MASK) != (e->code & ~J_RANGE_MASK)); 47
48 /* Jump only works within an aligned region its delay slot is in. */
49 BUG_ON((e->target & ~J_RANGE_MASK) != ((e->code + 4) & ~J_RANGE_MASK));
32 50
33 /* Target must have 4 byte alignment. */ 51 /* Target must have the right alignment and ISA must be preserved. */
34 BUG_ON((e->target & 3) != 0); 52 BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT);
35 53
36 if (type == JUMP_LABEL_ENABLE) { 54 if (type == JUMP_LABEL_ENABLE) {
37 insn.j_format.opcode = j_op; 55 insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op;
38 insn.j_format.target = (e->target & J_RANGE_MASK) >> 2; 56 insn.j_format.target = e->target >> J_RANGE_SHIFT;
39 } else { 57 } else {
40 insn.word = 0; /* nop */ 58 insn.word = 0; /* nop */
41 } 59 }
42 60
43 get_online_cpus(); 61 get_online_cpus();
44 mutex_lock(&text_mutex); 62 mutex_lock(&text_mutex);
45 *insn_p = insn; 63 if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) {
64 insn_p->halfword[0] = insn.word >> 16;
65 insn_p->halfword[1] = insn.word;
66 } else
67 *insn_p = insn;
46 68
47 flush_icache_range((unsigned long)insn_p, 69 flush_icache_range((unsigned long)insn_p,
48 (unsigned long)insn_p + sizeof(*insn_p)); 70 (unsigned long)insn_p + sizeof(*insn_p));
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
index f31063dbdaeb..5ce3b746cedc 100644
--- a/arch/mips/kernel/r2300_fpu.S
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -28,6 +28,8 @@
28 .set mips1 28 .set mips1
29 /* Save floating point context */ 29 /* Save floating point context */
30LEAF(_save_fp_context) 30LEAF(_save_fp_context)
31 .set push
32 SET_HARDFLOAT
31 li v0, 0 # assume success 33 li v0, 0 # assume success
32 cfc1 t1,fcr31 34 cfc1 t1,fcr31
33 EX(swc1 $f0,(SC_FPREGS+0)(a0)) 35 EX(swc1 $f0,(SC_FPREGS+0)(a0))
@@ -65,6 +67,7 @@ LEAF(_save_fp_context)
65 EX(sw t1,(SC_FPC_CSR)(a0)) 67 EX(sw t1,(SC_FPC_CSR)(a0))
66 cfc1 t0,$0 # implementation/version 68 cfc1 t0,$0 # implementation/version
67 jr ra 69 jr ra
70 .set pop
68 .set nomacro 71 .set nomacro
69 EX(sw t0,(SC_FPC_EIR)(a0)) 72 EX(sw t0,(SC_FPC_EIR)(a0))
70 .set macro 73 .set macro
@@ -80,6 +83,8 @@ LEAF(_save_fp_context)
80 * stack frame which might have been changed by the user. 83 * stack frame which might have been changed by the user.
81 */ 84 */
82LEAF(_restore_fp_context) 85LEAF(_restore_fp_context)
86 .set push
87 SET_HARDFLOAT
83 li v0, 0 # assume success 88 li v0, 0 # assume success
84 EX(lw t0,(SC_FPC_CSR)(a0)) 89 EX(lw t0,(SC_FPC_CSR)(a0))
85 EX(lwc1 $f0,(SC_FPREGS+0)(a0)) 90 EX(lwc1 $f0,(SC_FPREGS+0)(a0))
@@ -116,6 +121,7 @@ LEAF(_restore_fp_context)
116 EX(lwc1 $f31,(SC_FPREGS+248)(a0)) 121 EX(lwc1 $f31,(SC_FPREGS+248)(a0))
117 jr ra 122 jr ra
118 ctc1 t0,fcr31 123 ctc1 t0,fcr31
124 .set pop
119 END(_restore_fp_context) 125 END(_restore_fp_context)
120 .set reorder 126 .set reorder
121 127
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
index 20b7b040e76f..435ea652f5fa 100644
--- a/arch/mips/kernel/r2300_switch.S
+++ b/arch/mips/kernel/r2300_switch.S
@@ -120,6 +120,9 @@ LEAF(_restore_fp)
120 120
121#define FPU_DEFAULT 0x00000000 121#define FPU_DEFAULT 0x00000000
122 122
123 .set push
124 SET_HARDFLOAT
125
123LEAF(_init_fpu) 126LEAF(_init_fpu)
124 mfc0 t0, CP0_STATUS 127 mfc0 t0, CP0_STATUS
125 li t1, ST0_CU1 128 li t1, ST0_CU1
@@ -165,3 +168,5 @@ LEAF(_init_fpu)
165 mtc1 t0, $f31 168 mtc1 t0, $f31
166 jr ra 169 jr ra
167 END(_init_fpu) 170 END(_init_fpu)
171
172 .set pop
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 8352523568e6..6c160c67984c 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -19,8 +19,12 @@
19#include <asm/asm-offsets.h> 19#include <asm/asm-offsets.h>
20#include <asm/regdef.h> 20#include <asm/regdef.h>
21 21
22/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
23#undef fp
24
22 .macro EX insn, reg, src 25 .macro EX insn, reg, src
23 .set push 26 .set push
27 SET_HARDFLOAT
24 .set nomacro 28 .set nomacro
25.ex\@: \insn \reg, \src 29.ex\@: \insn \reg, \src
26 .set pop 30 .set pop
@@ -33,12 +37,17 @@
33 .set arch=r4000 37 .set arch=r4000
34 38
35LEAF(_save_fp_context) 39LEAF(_save_fp_context)
40 .set push
41 SET_HARDFLOAT
36 cfc1 t1, fcr31 42 cfc1 t1, fcr31
43 .set pop
37 44
38#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 45#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
39 .set push 46 .set push
47 SET_HARDFLOAT
40#ifdef CONFIG_CPU_MIPS32_R2 48#ifdef CONFIG_CPU_MIPS32_R2
41 .set mips64r2 49 .set mips32r2
50 .set fp=64
42 mfc0 t0, CP0_STATUS 51 mfc0 t0, CP0_STATUS
43 sll t0, t0, 5 52 sll t0, t0, 5
44 bgez t0, 1f # skip storing odd if FR=0 53 bgez t0, 1f # skip storing odd if FR=0
@@ -64,6 +73,8 @@ LEAF(_save_fp_context)
641: .set pop 731: .set pop
65#endif 74#endif
66 75
76 .set push
77 SET_HARDFLOAT
67 /* Store the 16 even double precision registers */ 78 /* Store the 16 even double precision registers */
68 EX sdc1 $f0, SC_FPREGS+0(a0) 79 EX sdc1 $f0, SC_FPREGS+0(a0)
69 EX sdc1 $f2, SC_FPREGS+16(a0) 80 EX sdc1 $f2, SC_FPREGS+16(a0)
@@ -84,11 +95,14 @@ LEAF(_save_fp_context)
84 EX sw t1, SC_FPC_CSR(a0) 95 EX sw t1, SC_FPC_CSR(a0)
85 jr ra 96 jr ra
86 li v0, 0 # success 97 li v0, 0 # success
98 .set pop
87 END(_save_fp_context) 99 END(_save_fp_context)
88 100
89#ifdef CONFIG_MIPS32_COMPAT 101#ifdef CONFIG_MIPS32_COMPAT
90 /* Save 32-bit process floating point context */ 102 /* Save 32-bit process floating point context */
91LEAF(_save_fp_context32) 103LEAF(_save_fp_context32)
104 .set push
105 SET_HARDFLOAT
92 cfc1 t1, fcr31 106 cfc1 t1, fcr31
93 107
94 mfc0 t0, CP0_STATUS 108 mfc0 t0, CP0_STATUS
@@ -134,6 +148,7 @@ LEAF(_save_fp_context32)
134 EX sw t1, SC32_FPC_CSR(a0) 148 EX sw t1, SC32_FPC_CSR(a0)
135 cfc1 t0, $0 # implementation/version 149 cfc1 t0, $0 # implementation/version
136 EX sw t0, SC32_FPC_EIR(a0) 150 EX sw t0, SC32_FPC_EIR(a0)
151 .set pop
137 152
138 jr ra 153 jr ra
139 li v0, 0 # success 154 li v0, 0 # success
@@ -150,8 +165,10 @@ LEAF(_restore_fp_context)
150 165
151#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2) 166#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPS32_R2)
152 .set push 167 .set push
168 SET_HARDFLOAT
153#ifdef CONFIG_CPU_MIPS32_R2 169#ifdef CONFIG_CPU_MIPS32_R2
154 .set mips64r2 170 .set mips32r2
171 .set fp=64
155 mfc0 t0, CP0_STATUS 172 mfc0 t0, CP0_STATUS
156 sll t0, t0, 5 173 sll t0, t0, 5
157 bgez t0, 1f # skip loading odd if FR=0 174 bgez t0, 1f # skip loading odd if FR=0
@@ -175,6 +192,8 @@ LEAF(_restore_fp_context)
175 EX ldc1 $f31, SC_FPREGS+248(a0) 192 EX ldc1 $f31, SC_FPREGS+248(a0)
1761: .set pop 1931: .set pop
177#endif 194#endif
195 .set push
196 SET_HARDFLOAT
178 EX ldc1 $f0, SC_FPREGS+0(a0) 197 EX ldc1 $f0, SC_FPREGS+0(a0)
179 EX ldc1 $f2, SC_FPREGS+16(a0) 198 EX ldc1 $f2, SC_FPREGS+16(a0)
180 EX ldc1 $f4, SC_FPREGS+32(a0) 199 EX ldc1 $f4, SC_FPREGS+32(a0)
@@ -192,6 +211,7 @@ LEAF(_restore_fp_context)
192 EX ldc1 $f28, SC_FPREGS+224(a0) 211 EX ldc1 $f28, SC_FPREGS+224(a0)
193 EX ldc1 $f30, SC_FPREGS+240(a0) 212 EX ldc1 $f30, SC_FPREGS+240(a0)
194 ctc1 t1, fcr31 213 ctc1 t1, fcr31
214 .set pop
195 jr ra 215 jr ra
196 li v0, 0 # success 216 li v0, 0 # success
197 END(_restore_fp_context) 217 END(_restore_fp_context)
@@ -199,6 +219,8 @@ LEAF(_restore_fp_context)
199#ifdef CONFIG_MIPS32_COMPAT 219#ifdef CONFIG_MIPS32_COMPAT
200LEAF(_restore_fp_context32) 220LEAF(_restore_fp_context32)
201 /* Restore an o32 sigcontext. */ 221 /* Restore an o32 sigcontext. */
222 .set push
223 SET_HARDFLOAT
202 EX lw t1, SC32_FPC_CSR(a0) 224 EX lw t1, SC32_FPC_CSR(a0)
203 225
204 mfc0 t0, CP0_STATUS 226 mfc0 t0, CP0_STATUS
@@ -242,6 +264,7 @@ LEAF(_restore_fp_context32)
242 ctc1 t1, fcr31 264 ctc1 t1, fcr31
243 jr ra 265 jr ra
244 li v0, 0 # success 266 li v0, 0 # success
267 .set pop
245 END(_restore_fp_context32) 268 END(_restore_fp_context32)
246#endif 269#endif
247 270
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 4c4ec1812420..64591e671878 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -22,6 +22,9 @@
22 22
23#include <asm/asmmacro.h> 23#include <asm/asmmacro.h>
24 24
25/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
26#undef fp
27
25/* 28/*
26 * Offset to the current process status flags, the first 32 bytes of the 29 * Offset to the current process status flags, the first 32 bytes of the
27 * stack are not used. 30 * stack are not used.
@@ -65,8 +68,12 @@
65 bgtz a3, 1f 68 bgtz a3, 1f
66 69
67 /* Save 128b MSA vector context + scalar FP control & status. */ 70 /* Save 128b MSA vector context + scalar FP control & status. */
71 .set push
72 SET_HARDFLOAT
68 cfc1 t1, fcr31 73 cfc1 t1, fcr31
69 msa_save_all a0 74 msa_save_all a0
75 .set pop /* SET_HARDFLOAT */
76
70 sw t1, THREAD_FCR31(a0) 77 sw t1, THREAD_FCR31(a0)
71 b 2f 78 b 2f
72 79
@@ -161,6 +168,9 @@ LEAF(_init_msa_upper)
161 168
162#define FPU_DEFAULT 0x00000000 169#define FPU_DEFAULT 0x00000000
163 170
171 .set push
172 SET_HARDFLOAT
173
164LEAF(_init_fpu) 174LEAF(_init_fpu)
165 mfc0 t0, CP0_STATUS 175 mfc0 t0, CP0_STATUS
166 li t1, ST0_CU1 176 li t1, ST0_CU1
@@ -232,7 +242,8 @@ LEAF(_init_fpu)
232 242
233#ifdef CONFIG_CPU_MIPS32_R2 243#ifdef CONFIG_CPU_MIPS32_R2
234 .set push 244 .set push
235 .set mips64r2 245 .set mips32r2
246 .set fp=64
236 sll t0, t0, 5 # is Status.FR set? 247 sll t0, t0, 5 # is Status.FR set?
237 bgez t0, 1f # no: skip setting upper 32b 248 bgez t0, 1f # no: skip setting upper 32b
238 249
@@ -291,3 +302,5 @@ LEAF(_init_fpu)
291#endif 302#endif
292 jr ra 303 jr ra
293 END(_init_fpu) 304 END(_init_fpu)
305
306 .set pop /* SET_HARDFLOAT */
diff --git a/arch/mips/kernel/r6000_fpu.S b/arch/mips/kernel/r6000_fpu.S
index da0fbe46d83b..47077380c15c 100644
--- a/arch/mips/kernel/r6000_fpu.S
+++ b/arch/mips/kernel/r6000_fpu.S
@@ -18,6 +18,9 @@
18 18
19 .set noreorder 19 .set noreorder
20 .set mips2 20 .set mips2
21 .set push
22 SET_HARDFLOAT
23
21 /* Save floating point context */ 24 /* Save floating point context */
22 LEAF(_save_fp_context) 25 LEAF(_save_fp_context)
23 mfc0 t0,CP0_STATUS 26 mfc0 t0,CP0_STATUS
@@ -85,3 +88,5 @@
851: jr ra 881: jr ra
86 nop 89 nop
87 END(_restore_fp_context) 90 END(_restore_fp_context)
91
92 .set pop /* SET_HARDFLOAT */
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
index 31b1b763cb29..c5c4fd54d797 100644
--- a/arch/mips/kernel/rtlx.c
+++ b/arch/mips/kernel/rtlx.c
@@ -94,12 +94,12 @@ int rtlx_open(int index, int can_sleep)
94 int ret = 0; 94 int ret = 0;
95 95
96 if (index >= RTLX_CHANNELS) { 96 if (index >= RTLX_CHANNELS) {
97 pr_debug(KERN_DEBUG "rtlx_open index out of range\n"); 97 pr_debug("rtlx_open index out of range\n");
98 return -ENOSYS; 98 return -ENOSYS;
99 } 99 }
100 100
101 if (atomic_inc_return(&channel_wqs[index].in_open) > 1) { 101 if (atomic_inc_return(&channel_wqs[index].in_open) > 1) {
102 pr_debug(KERN_DEBUG "rtlx_open channel %d already opened\n", index); 102 pr_debug("rtlx_open channel %d already opened\n", index);
103 ret = -EBUSY; 103 ret = -EBUSY;
104 goto out_fail; 104 goto out_fail;
105 } 105 }
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 744cd10ba599..00cad1005a16 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -579,3 +579,4 @@ EXPORT(sys_call_table)
579 PTR sys_seccomp 579 PTR sys_seccomp
580 PTR sys_getrandom 580 PTR sys_getrandom
581 PTR sys_memfd_create 581 PTR sys_memfd_create
582 PTR sys_bpf /* 4355 */
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 002b1bc09c38..5251565e344b 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -434,4 +434,5 @@ EXPORT(sys_call_table)
434 PTR sys_seccomp 434 PTR sys_seccomp
435 PTR sys_getrandom 435 PTR sys_getrandom
436 PTR sys_memfd_create 436 PTR sys_memfd_create
437 PTR sys_bpf /* 5315 */
437 .size sys_call_table,.-sys_call_table 438 .size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index ca6cbbe9805b..77e74398b828 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -427,4 +427,5 @@ EXPORT(sysn32_call_table)
427 PTR sys_seccomp 427 PTR sys_seccomp
428 PTR sys_getrandom 428 PTR sys_getrandom
429 PTR sys_memfd_create 429 PTR sys_memfd_create
430 PTR sys_bpf
430 .size sysn32_call_table,.-sysn32_call_table 431 .size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 9e10d11fbb84..6f8db9f728e8 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -564,4 +564,5 @@ EXPORT(sys32_call_table)
564 PTR sys_seccomp 564 PTR sys_seccomp
565 PTR sys_getrandom 565 PTR sys_getrandom
566 PTR sys_memfd_create 566 PTR sys_memfd_create
567 PTR sys_bpf /* 4355 */
567 .size sys32_call_table,.-sys32_call_table 568 .size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index b3b8f0d9d4a7..f3b635f86c39 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -485,7 +485,7 @@ static void __init bootmem_init(void)
485 * NOTE: historically plat_mem_setup did the entire platform initialization. 485 * NOTE: historically plat_mem_setup did the entire platform initialization.
486 * This was rather impractical because it meant plat_mem_setup had to 486 * This was rather impractical because it meant plat_mem_setup had to
487 * get away without any kind of memory allocator. To keep old code from 487 * get away without any kind of memory allocator. To keep old code from
488 * breaking plat_setup was just renamed to plat_setup and a second platform 488 * breaking plat_setup was just renamed to plat_mem_setup and a second platform
489 * initialization hook for anything else was introduced. 489 * initialization hook for anything else was introduced.
490 */ 490 */
491 491
@@ -493,7 +493,7 @@ static int usermem __initdata;
493 493
494static int __init early_parse_mem(char *p) 494static int __init early_parse_mem(char *p)
495{ 495{
496 unsigned long start, size; 496 phys_t start, size;
497 497
498 /* 498 /*
499 * If a user specifies memory size, we 499 * If a user specifies memory size, we
@@ -683,7 +683,8 @@ static void __init arch_mem_init(char **cmdline_p)
683 dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); 683 dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
684 /* Tell bootmem about cma reserved memblock section */ 684 /* Tell bootmem about cma reserved memblock section */
685 for_each_memblock(reserved, reg) 685 for_each_memblock(reserved, reg)
686 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); 686 if (reg->size != 0)
687 reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
687} 688}
688 689
689static void __init resource_init(void) 690static void __init resource_init(void)
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 1d57605e4615..16f1e4f2bf3c 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -658,13 +658,13 @@ static int signal_setup(void)
658 save_fp_context = _save_fp_context; 658 save_fp_context = _save_fp_context;
659 restore_fp_context = _restore_fp_context; 659 restore_fp_context = _restore_fp_context;
660 } else { 660 } else {
661 save_fp_context = copy_fp_from_sigcontext; 661 save_fp_context = copy_fp_to_sigcontext;
662 restore_fp_context = copy_fp_to_sigcontext; 662 restore_fp_context = copy_fp_from_sigcontext;
663 } 663 }
664#endif /* CONFIG_SMP */ 664#endif /* CONFIG_SMP */
665#else 665#else
666 save_fp_context = copy_fp_from_sigcontext;; 666 save_fp_context = copy_fp_to_sigcontext;
667 restore_fp_context = copy_fp_to_sigcontext; 667 restore_fp_context = copy_fp_from_sigcontext;
668#endif 668#endif
669 669
670 return 0; 670 return 0;
diff --git a/arch/mips/lasat/Kconfig b/arch/mips/lasat/Kconfig
index 1d2ee8a9be13..8776d0a34274 100644
--- a/arch/mips/lasat/Kconfig
+++ b/arch/mips/lasat/Kconfig
@@ -4,7 +4,7 @@ config PICVUE
4 4
5config PICVUE_PROC 5config PICVUE_PROC
6 tristate "PICVUE LCD display driver /proc interface" 6 tristate "PICVUE LCD display driver /proc interface"
7 depends on PICVUE 7 depends on PICVUE && PROC_FS
8 8
9config DS1603 9config DS1603
10 bool "DS1603 RTC driver" 10 bool "DS1603 RTC driver"
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index c17ef80cf65a..5d3238af9b5c 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -503,6 +503,7 @@
503 STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@) 503 STOREB(t0, NBYTES-2(dst), .Ls_exc_p1\@)
504.Ldone\@: 504.Ldone\@:
505 jr ra 505 jr ra
506 nop
506 .if __memcpy == 1 507 .if __memcpy == 1
507 END(memcpy) 508 END(memcpy)
508 .set __memcpy, 0 509 .set __memcpy, 0
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index 91615c2ef0cf..1ef365ab3cd3 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -34,7 +34,7 @@ static void dump_tlb(int first, int last)
34 entrylo0 = read_c0_entrylo0(); 34 entrylo0 = read_c0_entrylo0();
35 35
36 /* Unused entries have a virtual address of KSEG0. */ 36 /* Unused entries have a virtual address of KSEG0. */
37 if ((entryhi & 0xffffe000) != 0x80000000 37 if ((entryhi & 0xfffff000) != 0x80000000
38 && (entryhi & 0xfc0) == asid) { 38 && (entryhi & 0xfc0) == asid) {
39 /* 39 /*
40 * Only print entries in use 40 * Only print entries in use
@@ -43,7 +43,7 @@ static void dump_tlb(int first, int last)
43 43
44 printk("va=%08lx asid=%08lx" 44 printk("va=%08lx asid=%08lx"
45 " [pa=%06lx n=%d d=%d v=%d g=%d]", 45 " [pa=%06lx n=%d d=%d v=%d g=%d]",
46 (entryhi & 0xffffe000), 46 (entryhi & 0xfffff000),
47 entryhi & 0xfc0, 47 entryhi & 0xfc0,
48 entrylo0 & PAGE_MASK, 48 entrylo0 & PAGE_MASK,
49 (entrylo0 & (1 << 11)) ? 1 : 0, 49 (entrylo0 & (1 << 11)) ? 1 : 0,
diff --git a/arch/mips/lib/strnlen_user.S b/arch/mips/lib/strnlen_user.S
index f3af6995e2a6..7d12c0dded3d 100644
--- a/arch/mips/lib/strnlen_user.S
+++ b/arch/mips/lib/strnlen_user.S
@@ -40,9 +40,11 @@ FEXPORT(__strnlen_\func\()_nocheck_asm)
40.else 40.else
41 EX(lbe, t0, (v0), .Lfault\@) 41 EX(lbe, t0, (v0), .Lfault\@)
42.endif 42.endif
43 PTR_ADDIU v0, 1 43 .set noreorder
44 bnez t0, 1b 44 bnez t0, 1b
451: PTR_SUBU v0, a0 451: PTR_ADDIU v0, 1
46 .set reorder
47 PTR_SUBU v0, a0
46 jr ra 48 jr ra
47 END(__strnlen_\func\()_asm) 49 END(__strnlen_\func\()_asm)
48 50
diff --git a/arch/mips/loongson/common/Makefile b/arch/mips/loongson/common/Makefile
index 0bb9cc9dc621..d87e03330b29 100644
--- a/arch/mips/loongson/common/Makefile
+++ b/arch/mips/loongson/common/Makefile
@@ -11,7 +11,8 @@ obj-$(CONFIG_PCI) += pci.o
11# Serial port support 11# Serial port support
12# 12#
13obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 13obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
14obj-$(CONFIG_SERIAL_8250) += serial.o 14loongson-serial-$(CONFIG_SERIAL_8250) := serial.o
15obj-y += $(loongson-serial-m) $(loongson-serial-y)
15obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o 16obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o
16obj-$(CONFIG_LOONGSON_MC146818) += rtc.o 17obj-$(CONFIG_LOONGSON_MC146818) += rtc.o
17 18
diff --git a/arch/mips/loongson/lemote-2f/clock.c b/arch/mips/loongson/lemote-2f/clock.c
index a217061beee3..462e34d46b4a 100644
--- a/arch/mips/loongson/lemote-2f/clock.c
+++ b/arch/mips/loongson/lemote-2f/clock.c
@@ -91,6 +91,7 @@ EXPORT_SYMBOL(clk_put);
91 91
92int clk_set_rate(struct clk *clk, unsigned long rate) 92int clk_set_rate(struct clk *clk, unsigned long rate)
93{ 93{
94 unsigned int rate_khz = rate / 1000;
94 struct cpufreq_frequency_table *pos; 95 struct cpufreq_frequency_table *pos;
95 int ret = 0; 96 int ret = 0;
96 int regval; 97 int regval;
@@ -107,9 +108,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
107 propagate_rate(clk); 108 propagate_rate(clk);
108 109
109 cpufreq_for_each_valid_entry(pos, loongson2_clockmod_table) 110 cpufreq_for_each_valid_entry(pos, loongson2_clockmod_table)
110 if (rate == pos->frequency) 111 if (rate_khz == pos->frequency)
111 break; 112 break;
112 if (rate != pos->frequency) 113 if (rate_khz != pos->frequency)
113 return -ENOTSUPP; 114 return -ENOTSUPP;
114 115
115 clk->rate = rate; 116 clk->rate = rate;
diff --git a/arch/mips/loongson/loongson-3/numa.c b/arch/mips/loongson/loongson-3/numa.c
index 37ed184398c6..42323bcc5d28 100644
--- a/arch/mips/loongson/loongson-3/numa.c
+++ b/arch/mips/loongson/loongson-3/numa.c
@@ -33,6 +33,7 @@
33 33
34static struct node_data prealloc__node_data[MAX_NUMNODES]; 34static struct node_data prealloc__node_data[MAX_NUMNODES];
35unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES]; 35unsigned char __node_distances[MAX_NUMNODES][MAX_NUMNODES];
36EXPORT_SYMBOL(__node_distances);
36struct node_data *__node_data[MAX_NUMNODES]; 37struct node_data *__node_data[MAX_NUMNODES];
37EXPORT_SYMBOL(__node_data); 38EXPORT_SYMBOL(__node_data);
38 39
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index 7a4727795a70..cac529a405b8 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -584,11 +584,7 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
584 if (insn.i_format.rs == bc_op) { 584 if (insn.i_format.rs == bc_op) {
585 preempt_disable(); 585 preempt_disable();
586 if (is_fpu_owner()) 586 if (is_fpu_owner())
587 asm volatile( 587 fcr31 = read_32bit_cp1_register(CP1_STATUS);
588 ".set push\n"
589 "\t.set mips1\n"
590 "\tcfc1\t%0,$31\n"
591 "\t.set pop" : "=r" (fcr31));
592 else 588 else
593 fcr31 = current->thread.fpu.fcr31; 589 fcr31 = current->thread.fpu.fcr31;
594 preempt_enable(); 590 preempt_enable();
@@ -1023,7 +1019,7 @@ emul:
1023 goto emul; 1019 goto emul;
1024 1020
1025 case cop1x_op: 1021 case cop1x_op:
1026 if (cpu_has_mips_4_5 || cpu_has_mips64) 1022 if (cpu_has_mips_4_5 || cpu_has_mips64 || cpu_has_mips32r2)
1027 /* its one of ours */ 1023 /* its one of ours */
1028 goto emul; 1024 goto emul;
1029 1025
@@ -1068,7 +1064,7 @@ emul:
1068 break; 1064 break;
1069 1065
1070 case cop1x_op: 1066 case cop1x_op:
1071 if (!cpu_has_mips_4_5 && !cpu_has_mips64) 1067 if (!cpu_has_mips_4_5 && !cpu_has_mips64 && !cpu_has_mips32r2)
1072 return SIGILL; 1068 return SIGILL;
1073 1069
1074 sig = fpux_emu(xcp, ctx, ir, fault_addr); 1070 sig = fpux_emu(xcp, ctx, ir, fault_addr);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index fa6ebd4bc9e9..c3917e251f59 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -299,6 +299,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
299 299
300 local_irq_save(flags); 300 local_irq_save(flags);
301 301
302 htw_stop();
302 pid = read_c0_entryhi() & ASID_MASK; 303 pid = read_c0_entryhi() & ASID_MASK;
303 address &= (PAGE_MASK << 1); 304 address &= (PAGE_MASK << 1);
304 write_c0_entryhi(address | pid); 305 write_c0_entryhi(address | pid);
@@ -346,6 +347,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
346 tlb_write_indexed(); 347 tlb_write_indexed();
347 } 348 }
348 tlbw_use_hazard(); 349 tlbw_use_hazard();
350 htw_start();
349 flush_itlb_vm(vma); 351 flush_itlb_vm(vma);
350 local_irq_restore(flags); 352 local_irq_restore(flags);
351} 353}
@@ -422,6 +424,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
422 424
423 local_irq_save(flags); 425 local_irq_save(flags);
424 /* Save old context and create impossible VPN2 value */ 426 /* Save old context and create impossible VPN2 value */
427 htw_stop();
425 old_ctx = read_c0_entryhi(); 428 old_ctx = read_c0_entryhi();
426 old_pagemask = read_c0_pagemask(); 429 old_pagemask = read_c0_pagemask();
427 wired = read_c0_wired(); 430 wired = read_c0_wired();
@@ -443,6 +446,7 @@ __init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
443 446
444 write_c0_entryhi(old_ctx); 447 write_c0_entryhi(old_ctx);
445 write_c0_pagemask(old_pagemask); 448 write_c0_pagemask(old_pagemask);
449 htw_start();
446out: 450out:
447 local_irq_restore(flags); 451 local_irq_restore(flags);
448 return ret; 452 return ret;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index a08dd53a1cc5..e3328a96e809 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -1062,6 +1062,7 @@ static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
1062struct mips_huge_tlb_info { 1062struct mips_huge_tlb_info {
1063 int huge_pte; 1063 int huge_pte;
1064 int restore_scratch; 1064 int restore_scratch;
1065 bool need_reload_pte;
1065}; 1066};
1066 1067
1067static struct mips_huge_tlb_info 1068static struct mips_huge_tlb_info
@@ -1076,6 +1077,7 @@ build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
1076 1077
1077 rv.huge_pte = scratch; 1078 rv.huge_pte = scratch;
1078 rv.restore_scratch = 0; 1079 rv.restore_scratch = 0;
1080 rv.need_reload_pte = false;
1079 1081
1080 if (check_for_high_segbits) { 1082 if (check_for_high_segbits) {
1081 UASM_i_MFC0(p, tmp, C0_BADVADDR); 1083 UASM_i_MFC0(p, tmp, C0_BADVADDR);
@@ -1264,6 +1266,7 @@ static void build_r4000_tlb_refill_handler(void)
1264 } else { 1266 } else {
1265 htlb_info.huge_pte = K0; 1267 htlb_info.huge_pte = K0;
1266 htlb_info.restore_scratch = 0; 1268 htlb_info.restore_scratch = 0;
1269 htlb_info.need_reload_pte = true;
1267 vmalloc_mode = refill_noscratch; 1270 vmalloc_mode = refill_noscratch;
1268 /* 1271 /*
1269 * create the plain linear handler 1272 * create the plain linear handler
@@ -1300,7 +1303,8 @@ static void build_r4000_tlb_refill_handler(void)
1300 } 1303 }
1301#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT 1304#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1302 uasm_l_tlb_huge_update(&l, p); 1305 uasm_l_tlb_huge_update(&l, p);
1303 UASM_i_LW(&p, K0, 0, K1); 1306 if (htlb_info.need_reload_pte)
1307 UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
1304 build_huge_update_entries(&p, htlb_info.huge_pte, K1); 1308 build_huge_update_entries(&p, htlb_info.huge_pte, K1);
1305 build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random, 1309 build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
1306 htlb_info.restore_scratch); 1310 htlb_info.restore_scratch);
@@ -1868,8 +1872,16 @@ build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1868 uasm_l_smp_pgtable_change(l, *p); 1872 uasm_l_smp_pgtable_change(l, *p);
1869#endif 1873#endif
1870 iPTE_LW(p, wr.r1, wr.r2); /* get even pte */ 1874 iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
1871 if (!m4kc_tlbp_war()) 1875 if (!m4kc_tlbp_war()) {
1872 build_tlb_probe_entry(p); 1876 build_tlb_probe_entry(p);
1877 if (cpu_has_htw) {
1878 /* race condition happens, leaving */
1879 uasm_i_ehb(p);
1880 uasm_i_mfc0(p, wr.r3, C0_INDEX);
1881 uasm_il_bltz(p, r, wr.r3, label_leave);
1882 uasm_i_nop(p);
1883 }
1884 }
1873 return wr; 1885 return wr;
1874} 1886}
1875 1887
diff --git a/arch/mips/mti-malta/Makefile b/arch/mips/mti-malta/Makefile
index b9510ea8db56..6510ace272d4 100644
--- a/arch/mips/mti-malta/Makefile
+++ b/arch/mips/mti-malta/Makefile
@@ -5,8 +5,9 @@
5# Copyright (C) 2008 Wind River Systems, Inc. 5# Copyright (C) 2008 Wind River Systems, Inc.
6# written by Ralf Baechle <ralf@linux-mips.org> 6# written by Ralf Baechle <ralf@linux-mips.org>
7# 7#
8obj-y := malta-amon.o malta-display.o malta-init.o \ 8obj-y := malta-display.o malta-init.o \
9 malta-int.o malta-memory.o malta-platform.o \ 9 malta-int.o malta-memory.o malta-platform.o \
10 malta-reset.o malta-setup.o malta-time.o 10 malta-reset.o malta-setup.o malta-time.o
11 11
12obj-$(CONFIG_MIPS_CMP) += malta-amon.o
12obj-$(CONFIG_MIPS_MALTA_PM) += malta-pm.o 13obj-$(CONFIG_MIPS_MALTA_PM) += malta-pm.o
diff --git a/arch/mips/mti-sead3/Makefile b/arch/mips/mti-sead3/Makefile
index febf4334545e..2ae49e99eb67 100644
--- a/arch/mips/mti-sead3/Makefile
+++ b/arch/mips/mti-sead3/Makefile
@@ -14,7 +14,6 @@ obj-y := sead3-lcd.o sead3-display.o sead3-init.o \
14 sead3-setup.o sead3-time.o 14 sead3-setup.o sead3-time.o
15 15
16obj-y += sead3-i2c-dev.o sead3-i2c.o \ 16obj-y += sead3-i2c-dev.o sead3-i2c.o \
17 sead3-pic32-i2c-drv.o sead3-pic32-bus.o \
18 leds-sead3.o sead3-leds.o 17 leds-sead3.o sead3-leds.o
19 18
20obj-$(CONFIG_EARLY_PRINTK) += sead3-console.o 19obj-$(CONFIG_EARLY_PRINTK) += sead3-console.o
diff --git a/arch/mips/mti-sead3/sead3-i2c.c b/arch/mips/mti-sead3/sead3-i2c.c
index f70d5fc58ef5..795ae83894e0 100644
--- a/arch/mips/mti-sead3/sead3-i2c.c
+++ b/arch/mips/mti-sead3/sead3-i2c.c
@@ -5,10 +5,8 @@
5 * 5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 */ 7 */
8#include <linux/module.h>
9#include <linux/init.h> 8#include <linux/init.h>
10#include <linux/platform_device.h> 9#include <linux/platform_device.h>
11#include <irq.h>
12 10
13struct resource sead3_i2c_resources[] = { 11struct resource sead3_i2c_resources[] = {
14 { 12 {
@@ -30,8 +28,4 @@ static int __init sead3_i2c_init(void)
30 return platform_device_register(&sead3_i2c_device); 28 return platform_device_register(&sead3_i2c_device);
31} 29}
32 30
33module_init(sead3_i2c_init); 31device_initcall(sead3_i2c_init);
34
35MODULE_AUTHOR("Chris Dearman <chris@mips.com>");
36MODULE_LICENSE("GPL");
37MODULE_DESCRIPTION("I2C probe driver for SEAD3");
diff --git a/arch/mips/mti-sead3/sead3-leds.c b/arch/mips/mti-sead3/sead3-leds.c
index 20102a6d4141..c427c5778186 100644
--- a/arch/mips/mti-sead3/sead3-leds.c
+++ b/arch/mips/mti-sead3/sead3-leds.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. 6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 */ 7 */
8#include <linux/module.h> 8#include <linux/init.h>
9#include <linux/leds.h> 9#include <linux/leds.h>
10#include <linux/platform_device.h> 10#include <linux/platform_device.h>
11 11
@@ -76,8 +76,4 @@ static int __init led_init(void)
76 return platform_device_register(&fled_device); 76 return platform_device_register(&fled_device);
77} 77}
78 78
79module_init(led_init); 79device_initcall(led_init);
80
81MODULE_AUTHOR("Chris Dearman <chris@mips.com>");
82MODULE_LICENSE("GPL");
83MODULE_DESCRIPTION("LED probe driver for SEAD-3");
diff --git a/arch/mips/mti-sead3/sead3-pic32-bus.c b/arch/mips/mti-sead3/sead3-pic32-bus.c
deleted file mode 100644
index 3b12aa5a7c88..000000000000
--- a/arch/mips/mti-sead3/sead3-pic32-bus.c
+++ /dev/null
@@ -1,102 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 */
8#include <linux/delay.h>
9#include <linux/kernel.h>
10#include <linux/spinlock.h>
11#include <linux/io.h>
12#include <linux/errno.h>
13
14#define PIC32_NULL 0x00
15#define PIC32_RD 0x01
16#define PIC32_SYSRD 0x02
17#define PIC32_WR 0x10
18#define PIC32_SYSWR 0x20
19#define PIC32_IRQ_CLR 0x40
20#define PIC32_STATUS 0x80
21
22#define DELAY() udelay(100) /* FIXME: needed? */
23
24/* spinlock to ensure atomic access to PIC32 */
25static DEFINE_SPINLOCK(pic32_bus_lock);
26
27/* FIXME: io_remap these */
28static void __iomem *bus_xfer = (void __iomem *)0xbf000600;
29static void __iomem *bus_status = (void __iomem *)0xbf000060;
30
31static inline unsigned int ioready(void)
32{
33 return readl(bus_status) & 1;
34}
35
36static inline void wait_ioready(void)
37{
38 do { } while (!ioready());
39}
40
41static inline void wait_ioclear(void)
42{
43 do { } while (ioready());
44}
45
46static inline void check_ioclear(void)
47{
48 if (ioready()) {
49 pr_debug("ioclear: initially busy\n");
50 do {
51 (void) readl(bus_xfer);
52 DELAY();
53 } while (ioready());
54 pr_debug("ioclear: cleared busy\n");
55 }
56}
57
58u32 pic32_bus_readl(u32 reg)
59{
60 unsigned long flags;
61 u32 status, val;
62
63 spin_lock_irqsave(&pic32_bus_lock, flags);
64
65 check_ioclear();
66
67 writel((PIC32_RD << 24) | (reg & 0x00ffffff), bus_xfer);
68 DELAY();
69 wait_ioready();
70 status = readl(bus_xfer);
71 DELAY();
72 val = readl(bus_xfer);
73 wait_ioclear();
74
75 pr_debug("pic32_bus_readl: *%x -> %x (status=%x)\n", reg, val, status);
76
77 spin_unlock_irqrestore(&pic32_bus_lock, flags);
78
79 return val;
80}
81
82void pic32_bus_writel(u32 val, u32 reg)
83{
84 unsigned long flags;
85 u32 status;
86
87 spin_lock_irqsave(&pic32_bus_lock, flags);
88
89 check_ioclear();
90
91 writel((PIC32_WR << 24) | (reg & 0x00ffffff), bus_xfer);
92 DELAY();
93 writel(val, bus_xfer);
94 DELAY();
95 wait_ioready();
96 status = readl(bus_xfer);
97 wait_ioclear();
98
99 pr_debug("pic32_bus_writel: *%x <- %x (status=%x)\n", reg, val, status);
100
101 spin_unlock_irqrestore(&pic32_bus_lock, flags);
102}
diff --git a/arch/mips/mti-sead3/sead3-pic32-i2c-drv.c b/arch/mips/mti-sead3/sead3-pic32-i2c-drv.c
deleted file mode 100644
index 80fe194cfa53..000000000000
--- a/arch/mips/mti-sead3/sead3-pic32-i2c-drv.c
+++ /dev/null
@@ -1,423 +0,0 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
7 */
8#include <linux/delay.h>
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/spinlock.h>
12#include <linux/platform_device.h>
13#include <linux/init.h>
14#include <linux/errno.h>
15#include <linux/i2c.h>
16#include <linux/slab.h>
17
18#define PIC32_I2CxCON 0x0000
19#define PIC32_I2CxCONCLR 0x0004
20#define PIC32_I2CxCONSET 0x0008
21#define PIC32_I2CxCONINV 0x000C
22#define I2CCON_ON (1<<15)
23#define I2CCON_FRZ (1<<14)
24#define I2CCON_SIDL (1<<13)
25#define I2CCON_SCLREL (1<<12)
26#define I2CCON_STRICT (1<<11)
27#define I2CCON_A10M (1<<10)
28#define I2CCON_DISSLW (1<<9)
29#define I2CCON_SMEN (1<<8)
30#define I2CCON_GCEN (1<<7)
31#define I2CCON_STREN (1<<6)
32#define I2CCON_ACKDT (1<<5)
33#define I2CCON_ACKEN (1<<4)
34#define I2CCON_RCEN (1<<3)
35#define I2CCON_PEN (1<<2)
36#define I2CCON_RSEN (1<<1)
37#define I2CCON_SEN (1<<0)
38
39#define PIC32_I2CxSTAT 0x0010
40#define PIC32_I2CxSTATCLR 0x0014
41#define PIC32_I2CxSTATSET 0x0018
42#define PIC32_I2CxSTATINV 0x001C
43#define I2CSTAT_ACKSTAT (1<<15)
44#define I2CSTAT_TRSTAT (1<<14)
45#define I2CSTAT_BCL (1<<10)
46#define I2CSTAT_GCSTAT (1<<9)
47#define I2CSTAT_ADD10 (1<<8)
48#define I2CSTAT_IWCOL (1<<7)
49#define I2CSTAT_I2COV (1<<6)
50#define I2CSTAT_DA (1<<5)
51#define I2CSTAT_P (1<<4)
52#define I2CSTAT_S (1<<3)
53#define I2CSTAT_RW (1<<2)
54#define I2CSTAT_RBF (1<<1)
55#define I2CSTAT_TBF (1<<0)
56
57#define PIC32_I2CxADD 0x0020
58#define PIC32_I2CxADDCLR 0x0024
59#define PIC32_I2CxADDSET 0x0028
60#define PIC32_I2CxADDINV 0x002C
61#define PIC32_I2CxMSK 0x0030
62#define PIC32_I2CxMSKCLR 0x0034
63#define PIC32_I2CxMSKSET 0x0038
64#define PIC32_I2CxMSKINV 0x003C
65#define PIC32_I2CxBRG 0x0040
66#define PIC32_I2CxBRGCLR 0x0044
67#define PIC32_I2CxBRGSET 0x0048
68#define PIC32_I2CxBRGINV 0x004C
69#define PIC32_I2CxTRN 0x0050
70#define PIC32_I2CxTRNCLR 0x0054
71#define PIC32_I2CxTRNSET 0x0058
72#define PIC32_I2CxTRNINV 0x005C
73#define PIC32_I2CxRCV 0x0060
74
75struct i2c_platform_data {
76 u32 base;
77 struct i2c_adapter adap;
78 u32 xfer_timeout;
79 u32 ack_timeout;
80 u32 ctl_timeout;
81};
82
83extern u32 pic32_bus_readl(u32 reg);
84extern void pic32_bus_writel(u32 val, u32 reg);
85
86static inline void
87StartI2C(struct i2c_platform_data *adap)
88{
89 pr_debug("StartI2C\n");
90 pic32_bus_writel(I2CCON_SEN, adap->base + PIC32_I2CxCONSET);
91}
92
93static inline void
94StopI2C(struct i2c_platform_data *adap)
95{
96 pr_debug("StopI2C\n");
97 pic32_bus_writel(I2CCON_PEN, adap->base + PIC32_I2CxCONSET);
98}
99
100static inline void
101AckI2C(struct i2c_platform_data *adap)
102{
103 pr_debug("AckI2C\n");
104 pic32_bus_writel(I2CCON_ACKDT, adap->base + PIC32_I2CxCONCLR);
105 pic32_bus_writel(I2CCON_ACKEN, adap->base + PIC32_I2CxCONSET);
106}
107
108static inline void
109NotAckI2C(struct i2c_platform_data *adap)
110{
111 pr_debug("NakI2C\n");
112 pic32_bus_writel(I2CCON_ACKDT, adap->base + PIC32_I2CxCONSET);
113 pic32_bus_writel(I2CCON_ACKEN, adap->base + PIC32_I2CxCONSET);
114}
115
116static inline int
117IdleI2C(struct i2c_platform_data *adap)
118{
119 int i;
120
121 pr_debug("IdleI2C\n");
122 for (i = 0; i < adap->ctl_timeout; i++) {
123 if (((pic32_bus_readl(adap->base + PIC32_I2CxCON) &
124 (I2CCON_ACKEN | I2CCON_RCEN | I2CCON_PEN | I2CCON_RSEN |
125 I2CCON_SEN)) == 0) &&
126 ((pic32_bus_readl(adap->base + PIC32_I2CxSTAT) &
127 (I2CSTAT_TRSTAT)) == 0))
128 return 0;
129 udelay(1);
130 }
131 return -ETIMEDOUT;
132}
133
134static inline u32
135MasterWriteI2C(struct i2c_platform_data *adap, u32 byte)
136{
137 pr_debug("MasterWriteI2C\n");
138
139 pic32_bus_writel(byte, adap->base + PIC32_I2CxTRN);
140
141 return pic32_bus_readl(adap->base + PIC32_I2CxSTAT) & I2CSTAT_IWCOL;
142}
143
144static inline u32
145MasterReadI2C(struct i2c_platform_data *adap)
146{
147 pr_debug("MasterReadI2C\n");
148
149 pic32_bus_writel(I2CCON_RCEN, adap->base + PIC32_I2CxCONSET);
150
151 while (pic32_bus_readl(adap->base + PIC32_I2CxCON) & I2CCON_RCEN)
152 ;
153
154 pic32_bus_writel(I2CSTAT_I2COV, adap->base + PIC32_I2CxSTATCLR);
155
156 return pic32_bus_readl(adap->base + PIC32_I2CxRCV);
157}
158
159static int
160do_address(struct i2c_platform_data *adap, unsigned int addr, int rd)
161{
162 pr_debug("doaddress\n");
163
164 IdleI2C(adap);
165 StartI2C(adap);
166 IdleI2C(adap);
167
168 addr <<= 1;
169 if (rd)
170 addr |= 1;
171
172 if (MasterWriteI2C(adap, addr))
173 return -EIO;
174 IdleI2C(adap);
175 if (pic32_bus_readl(adap->base + PIC32_I2CxSTAT) & I2CSTAT_ACKSTAT)
176 return -EIO;
177 return 0;
178}
179
180static int
181i2c_read(struct i2c_platform_data *adap, unsigned char *buf,
182 unsigned int len)
183{
184 int i;
185 u32 data;
186
187 pr_debug("i2c_read\n");
188
189 i = 0;
190 while (i < len) {
191 data = MasterReadI2C(adap);
192 buf[i++] = data;
193 if (i < len)
194 AckI2C(adap);
195 else
196 NotAckI2C(adap);
197 }
198
199 StopI2C(adap);
200 IdleI2C(adap);
201 return 0;
202}
203
204static int
205i2c_write(struct i2c_platform_data *adap, unsigned char *buf,
206 unsigned int len)
207{
208 int i;
209 u32 data;
210
211 pr_debug("i2c_write\n");
212
213 i = 0;
214 while (i < len) {
215 data = buf[i];
216 if (MasterWriteI2C(adap, data))
217 return -EIO;
218 IdleI2C(adap);
219 if (pic32_bus_readl(adap->base + PIC32_I2CxSTAT) &
220 I2CSTAT_ACKSTAT)
221 return -EIO;
222 i++;
223 }
224
225 StopI2C(adap);
226 IdleI2C(adap);
227 return 0;
228}
229
230static int
231platform_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num)
232{
233 struct i2c_platform_data *adap = i2c_adap->algo_data;
234 struct i2c_msg *p;
235 int i, err = 0;
236
237 pr_debug("platform_xfer\n");
238 for (i = 0; i < num; i++) {
239#define __BUFSIZE 80
240 int ii;
241 static char buf[__BUFSIZE];
242 char *b = buf;
243
244 p = &msgs[i];
245 b += sprintf(buf, " [%d bytes]", p->len);
246 if ((p->flags & I2C_M_RD) == 0) {
247 for (ii = 0; ii < p->len; ii++) {
248 if (b < &buf[__BUFSIZE-4]) {
249 b += sprintf(b, " %02x", p->buf[ii]);
250 } else {
251 strcat(b, "...");
252 break;
253 }
254 }
255 }
256 pr_debug("xfer%d: DevAddr: %04x Op:%s Data:%s\n", i, p->addr,
257 (p->flags & I2C_M_RD) ? "Rd" : "Wr", buf);
258 }
259
260
261 for (i = 0; !err && i < num; i++) {
262 p = &msgs[i];
263 err = do_address(adap, p->addr, p->flags & I2C_M_RD);
264 if (err || !p->len)
265 continue;
266 if (p->flags & I2C_M_RD)
267 err = i2c_read(adap, p->buf, p->len);
268 else
269 err = i2c_write(adap, p->buf, p->len);
270 }
271
272 /* Return the number of messages processed, or the error code. */
273 if (err == 0)
274 err = num;
275
276 return err;
277}
278
279static u32
280platform_func(struct i2c_adapter *adap)
281{
282 pr_debug("platform_algo\n");
283 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
284}
285
286static const struct i2c_algorithm platform_algo = {
287 .master_xfer = platform_xfer,
288 .functionality = platform_func,
289};
290
291static void i2c_platform_setup(struct i2c_platform_data *priv)
292{
293 pr_debug("i2c_platform_setup\n");
294
295 pic32_bus_writel(500, priv->base + PIC32_I2CxBRG);
296 pic32_bus_writel(I2CCON_ON, priv->base + PIC32_I2CxCONCLR);
297 pic32_bus_writel(I2CCON_ON, priv->base + PIC32_I2CxCONSET);
298 pic32_bus_writel((I2CSTAT_BCL | I2CSTAT_IWCOL),
299 (priv->base + PIC32_I2CxSTATCLR));
300}
301
302static void i2c_platform_disable(struct i2c_platform_data *priv)
303{
304 pr_debug("i2c_platform_disable\n");
305}
306
307static int i2c_platform_probe(struct platform_device *pdev)
308{
309 struct i2c_platform_data *priv;
310 struct resource *r;
311 int ret;
312
313 pr_debug("i2c_platform_probe\n");
314 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
315 if (!r)
316 return -ENODEV;
317
318 priv = devm_kzalloc(&pdev->dev, sizeof(struct i2c_platform_data),
319 GFP_KERNEL);
320 if (!priv)
321 return -ENOMEM;
322
323 /* FIXME: need to allocate resource in PIC32 space */
324#if 0
325 priv->base = bus_request_region(r->start, resource_size(r),
326 pdev->name);
327#else
328 priv->base = r->start;
329#endif
330 if (!priv->base)
331 return -EBUSY;
332
333 priv->xfer_timeout = 200;
334 priv->ack_timeout = 200;
335 priv->ctl_timeout = 200;
336
337 priv->adap.nr = pdev->id;
338 priv->adap.algo = &platform_algo;
339 priv->adap.algo_data = priv;
340 priv->adap.dev.parent = &pdev->dev;
341 strlcpy(priv->adap.name, "PIC32 I2C", sizeof(priv->adap.name));
342
343 i2c_platform_setup(priv);
344
345 ret = i2c_add_numbered_adapter(&priv->adap);
346 if (ret) {
347 i2c_platform_disable(priv);
348 return ret;
349 }
350
351 platform_set_drvdata(pdev, priv);
352 return 0;
353}
354
355static int i2c_platform_remove(struct platform_device *pdev)
356{
357 struct i2c_platform_data *priv = platform_get_drvdata(pdev);
358
359 pr_debug("i2c_platform_remove\n");
360 platform_set_drvdata(pdev, NULL);
361 i2c_del_adapter(&priv->adap);
362 i2c_platform_disable(priv);
363 return 0;
364}
365
366#ifdef CONFIG_PM
367static int
368i2c_platform_suspend(struct platform_device *pdev, pm_message_t state)
369{
370 struct i2c_platform_data *priv = platform_get_drvdata(pdev);
371
372 dev_dbg(&pdev->dev, "i2c_platform_disable\n");
373 i2c_platform_disable(priv);
374
375 return 0;
376}
377
378static int
379i2c_platform_resume(struct platform_device *pdev)
380{
381 struct i2c_platform_data *priv = platform_get_drvdata(pdev);
382
383 dev_dbg(&pdev->dev, "i2c_platform_setup\n");
384 i2c_platform_setup(priv);
385
386 return 0;
387}
388#else
389#define i2c_platform_suspend NULL
390#define i2c_platform_resume NULL
391#endif
392
393static struct platform_driver i2c_platform_driver = {
394 .driver = {
395 .name = "i2c_pic32",
396 .owner = THIS_MODULE,
397 },
398 .probe = i2c_platform_probe,
399 .remove = i2c_platform_remove,
400 .suspend = i2c_platform_suspend,
401 .resume = i2c_platform_resume,
402};
403
404static int __init
405i2c_platform_init(void)
406{
407 pr_debug("i2c_platform_init\n");
408 return platform_driver_register(&i2c_platform_driver);
409}
410
411static void __exit
412i2c_platform_exit(void)
413{
414 pr_debug("i2c_platform_exit\n");
415 platform_driver_unregister(&i2c_platform_driver);
416}
417
418MODULE_AUTHOR("Chris Dearman, MIPS Technologies INC.");
419MODULE_DESCRIPTION("PIC32 I2C driver");
420MODULE_LICENSE("GPL");
421
422module_init(i2c_platform_init);
423module_exit(i2c_platform_exit);
diff --git a/arch/mips/netlogic/xlp/Makefile b/arch/mips/netlogic/xlp/Makefile
index be358a8050c5..6b43af0a34d9 100644
--- a/arch/mips/netlogic/xlp/Makefile
+++ b/arch/mips/netlogic/xlp/Makefile
@@ -1,6 +1,10 @@
1obj-y += setup.o nlm_hal.o cop2-ex.o dt.o 1obj-y += setup.o nlm_hal.o cop2-ex.o dt.o
2obj-$(CONFIG_SMP) += wakeup.o 2obj-$(CONFIG_SMP) += wakeup.o
3obj-$(CONFIG_USB) += usb-init.o 3ifdef CONFIG_USB
4obj-$(CONFIG_USB) += usb-init-xlp2.o 4obj-y += usb-init.o
5obj-$(CONFIG_SATA_AHCI) += ahci-init.o 5obj-y += usb-init-xlp2.o
6obj-$(CONFIG_SATA_AHCI) += ahci-init-xlp2.o 6endif
7ifdef CONFIG_SATA_AHCI
8obj-y += ahci-init.o
9obj-y += ahci-init-xlp2.o
10endif
diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c
index 6854ed5097d2..83a1dfd8f0e3 100644
--- a/arch/mips/oprofile/backtrace.c
+++ b/arch/mips/oprofile/backtrace.c
@@ -92,7 +92,7 @@ static inline int unwind_user_frame(struct stackframe *old_frame,
92 /* This marks the end of the previous function, 92 /* This marks the end of the previous function,
93 which means we overran. */ 93 which means we overran. */
94 break; 94 break;
95 stack_size = (unsigned) stack_adjustment; 95 stack_size = (unsigned long) stack_adjustment;
96 } else if (is_ra_save_ins(&ip)) { 96 } else if (is_ra_save_ins(&ip)) {
97 int ra_slot = ip.i_format.simmediate; 97 int ra_slot = ip.i_format.simmediate;
98 if (ra_slot < 0) 98 if (ra_slot < 0)
diff --git a/arch/mips/pci/msi-xlp.c b/arch/mips/pci/msi-xlp.c
index fa374fe3746b..f7ac3edda1b2 100644
--- a/arch/mips/pci/msi-xlp.c
+++ b/arch/mips/pci/msi-xlp.c
@@ -443,10 +443,8 @@ static int xlp_setup_msix(uint64_t lnkbase, int node, int link,
443 msg.data = 0xc00 | msixvec; 443 msg.data = 0xc00 | msixvec;
444 444
445 ret = irq_set_msi_desc(xirq, desc); 445 ret = irq_set_msi_desc(xirq, desc);
446 if (ret < 0) { 446 if (ret < 0)
447 destroy_irq(xirq);
448 return ret; 447 return ret;
449 }
450 448
451 write_msi_msg(xirq, &msg); 449 write_msi_msg(xirq, &msg);
452 return 0; 450 return 0;
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index 37fe8e7887e2..d3ed15b2b2d1 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -215,17 +215,12 @@ static int ltq_pci_probe(struct platform_device *pdev)
215 215
216 pci_clear_flags(PCI_PROBE_ONLY); 216 pci_clear_flags(PCI_PROBE_ONLY);
217 217
218 res_cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
219 res_bridge = platform_get_resource(pdev, IORESOURCE_MEM, 1); 218 res_bridge = platform_get_resource(pdev, IORESOURCE_MEM, 1);
220 if (!res_cfg || !res_bridge) {
221 dev_err(&pdev->dev, "missing memory resources\n");
222 return -EINVAL;
223 }
224
225 ltq_pci_membase = devm_ioremap_resource(&pdev->dev, res_bridge); 219 ltq_pci_membase = devm_ioremap_resource(&pdev->dev, res_bridge);
226 if (IS_ERR(ltq_pci_membase)) 220 if (IS_ERR(ltq_pci_membase))
227 return PTR_ERR(ltq_pci_membase); 221 return PTR_ERR(ltq_pci_membase);
228 222
223 res_cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
229 ltq_pci_mapped_cfg = devm_ioremap_resource(&pdev->dev, res_cfg); 224 ltq_pci_mapped_cfg = devm_ioremap_resource(&pdev->dev, res_cfg);
230 if (IS_ERR(ltq_pci_mapped_cfg)) 225 if (IS_ERR(ltq_pci_mapped_cfg))
231 return PTR_ERR(ltq_pci_mapped_cfg); 226 return PTR_ERR(ltq_pci_mapped_cfg);
diff --git a/arch/mips/pmcs-msp71xx/msp_irq.c b/arch/mips/pmcs-msp71xx/msp_irq.c
index f914c753de21..8d53d7a2ed45 100644
--- a/arch/mips/pmcs-msp71xx/msp_irq.c
+++ b/arch/mips/pmcs-msp71xx/msp_irq.c
@@ -16,6 +16,7 @@
16#include <linux/time.h> 16#include <linux/time.h>
17 17
18#include <asm/irq_cpu.h> 18#include <asm/irq_cpu.h>
19#include <asm/setup.h>
19 20
20#include <msp_int.h> 21#include <msp_int.h>
21 22
diff --git a/arch/mips/pmcs-msp71xx/msp_irq_cic.c b/arch/mips/pmcs-msp71xx/msp_irq_cic.c
index b8df2f7b3328..1207ec4dfb77 100644
--- a/arch/mips/pmcs-msp71xx/msp_irq_cic.c
+++ b/arch/mips/pmcs-msp71xx/msp_irq_cic.c
@@ -131,11 +131,11 @@ static int msp_cic_irq_set_affinity(struct irq_data *d,
131 int cpu; 131 int cpu;
132 unsigned long flags; 132 unsigned long flags;
133 unsigned int mtflags; 133 unsigned int mtflags;
134 unsigned long imask = (1 << (irq - MSP_CIC_INTBASE)); 134 unsigned long imask = (1 << (d->irq - MSP_CIC_INTBASE));
135 volatile u32 *cic_mask = (volatile u32 *)CIC_VPE0_MSK_REG; 135 volatile u32 *cic_mask = (volatile u32 *)CIC_VPE0_MSK_REG;
136 136
137 /* timer balancing should be disabled in kernel code */ 137 /* timer balancing should be disabled in kernel code */
138 BUG_ON(irq == MSP_INT_VPE0_TIMER || irq == MSP_INT_VPE1_TIMER); 138 BUG_ON(d->irq == MSP_INT_VPE0_TIMER || d->irq == MSP_INT_VPE1_TIMER);
139 139
140 LOCK_CORE(flags, mtflags); 140 LOCK_CORE(flags, mtflags);
141 /* enable if any of each VPE's TCs require this IRQ */ 141 /* enable if any of each VPE's TCs require this IRQ */
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index a95c00f5fb96..a304bcc37e4f 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -107,6 +107,7 @@ static void router_recurse(klrou_t *router_a, klrou_t *router_b, int depth)
107} 107}
108 108
109unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES]; 109unsigned char __node_distances[MAX_COMPACT_NODES][MAX_COMPACT_NODES];
110EXPORT_SYMBOL(__node_distances);
110 111
111static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b) 112static int __init compute_node_distance(nasid_t nasid_a, nasid_t nasid_b)
112{ 113{
diff --git a/arch/mips/sibyte/Makefile b/arch/mips/sibyte/Makefile
index c8ed2c807e69..455c40d6d625 100644
--- a/arch/mips/sibyte/Makefile
+++ b/arch/mips/sibyte/Makefile
@@ -25,3 +25,4 @@ obj-$(CONFIG_SIBYTE_RHONE) += swarm/
25obj-$(CONFIG_SIBYTE_SENTOSA) += swarm/ 25obj-$(CONFIG_SIBYTE_SENTOSA) += swarm/
26obj-$(CONFIG_SIBYTE_SWARM) += swarm/ 26obj-$(CONFIG_SIBYTE_SWARM) += swarm/
27obj-$(CONFIG_SIBYTE_BIGSUR) += swarm/ 27obj-$(CONFIG_SIBYTE_BIGSUR) += swarm/
28obj-$(CONFIG_SIBYTE_LITTLESUR) += swarm/
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 4006964d8e12..a5cb070b54bf 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -9,6 +9,8 @@
9#include <asm/errno.h> 9#include <asm/errno.h>
10#include <asm-generic/uaccess-unaligned.h> 10#include <asm-generic/uaccess-unaligned.h>
11 11
12#include <linux/bug.h>
13
12#define VERIFY_READ 0 14#define VERIFY_READ 0
13#define VERIFY_WRITE 1 15#define VERIFY_WRITE 1
14 16
@@ -28,11 +30,6 @@
28 * that put_user is the same as __put_user, etc. 30 * that put_user is the same as __put_user, etc.
29 */ 31 */
30 32
31extern int __get_kernel_bad(void);
32extern int __get_user_bad(void);
33extern int __put_kernel_bad(void);
34extern int __put_user_bad(void);
35
36static inline long access_ok(int type, const void __user * addr, 33static inline long access_ok(int type, const void __user * addr,
37 unsigned long size) 34 unsigned long size)
38{ 35{
@@ -43,8 +40,8 @@ static inline long access_ok(int type, const void __user * addr,
43#define get_user __get_user 40#define get_user __get_user
44 41
45#if !defined(CONFIG_64BIT) 42#if !defined(CONFIG_64BIT)
46#define LDD_KERNEL(ptr) __get_kernel_bad(); 43#define LDD_KERNEL(ptr) BUILD_BUG()
47#define LDD_USER(ptr) __get_user_bad(); 44#define LDD_USER(ptr) BUILD_BUG()
48#define STD_KERNEL(x, ptr) __put_kernel_asm64(x,ptr) 45#define STD_KERNEL(x, ptr) __put_kernel_asm64(x,ptr)
49#define STD_USER(x, ptr) __put_user_asm64(x,ptr) 46#define STD_USER(x, ptr) __put_user_asm64(x,ptr)
50#define ASM_WORD_INSN ".word\t" 47#define ASM_WORD_INSN ".word\t"
@@ -94,7 +91,7 @@ struct exception_data {
94 case 2: __get_kernel_asm("ldh",ptr); break; \ 91 case 2: __get_kernel_asm("ldh",ptr); break; \
95 case 4: __get_kernel_asm("ldw",ptr); break; \ 92 case 4: __get_kernel_asm("ldw",ptr); break; \
96 case 8: LDD_KERNEL(ptr); break; \ 93 case 8: LDD_KERNEL(ptr); break; \
97 default: __get_kernel_bad(); break; \ 94 default: BUILD_BUG(); break; \
98 } \ 95 } \
99 } \ 96 } \
100 else { \ 97 else { \
@@ -103,7 +100,7 @@ struct exception_data {
103 case 2: __get_user_asm("ldh",ptr); break; \ 100 case 2: __get_user_asm("ldh",ptr); break; \
104 case 4: __get_user_asm("ldw",ptr); break; \ 101 case 4: __get_user_asm("ldw",ptr); break; \
105 case 8: LDD_USER(ptr); break; \ 102 case 8: LDD_USER(ptr); break; \
106 default: __get_user_bad(); break; \ 103 default: BUILD_BUG(); break; \
107 } \ 104 } \
108 } \ 105 } \
109 \ 106 \
@@ -136,7 +133,7 @@ struct exception_data {
136 case 2: __put_kernel_asm("sth",__x,ptr); break; \ 133 case 2: __put_kernel_asm("sth",__x,ptr); break; \
137 case 4: __put_kernel_asm("stw",__x,ptr); break; \ 134 case 4: __put_kernel_asm("stw",__x,ptr); break; \
138 case 8: STD_KERNEL(__x,ptr); break; \ 135 case 8: STD_KERNEL(__x,ptr); break; \
139 default: __put_kernel_bad(); break; \ 136 default: BUILD_BUG(); break; \
140 } \ 137 } \
141 } \ 138 } \
142 else { \ 139 else { \
@@ -145,7 +142,7 @@ struct exception_data {
145 case 2: __put_user_asm("sth",__x,ptr); break; \ 142 case 2: __put_user_asm("sth",__x,ptr); break; \
146 case 4: __put_user_asm("stw",__x,ptr); break; \ 143 case 4: __put_user_asm("stw",__x,ptr); break; \
147 case 8: STD_USER(__x,ptr); break; \ 144 case 8: STD_USER(__x,ptr); break; \
148 default: __put_user_bad(); break; \ 145 default: BUILD_BUG(); break; \
149 } \ 146 } \
150 } \ 147 } \
151 \ 148 \
diff --git a/arch/parisc/include/uapi/asm/bitsperlong.h b/arch/parisc/include/uapi/asm/bitsperlong.h
index 75196b415d3f..e0a23c7bdd43 100644
--- a/arch/parisc/include/uapi/asm/bitsperlong.h
+++ b/arch/parisc/include/uapi/asm/bitsperlong.h
@@ -1,13 +1,7 @@
1#ifndef __ASM_PARISC_BITSPERLONG_H 1#ifndef __ASM_PARISC_BITSPERLONG_H
2#define __ASM_PARISC_BITSPERLONG_H 2#define __ASM_PARISC_BITSPERLONG_H
3 3
4/* 4#if defined(__LP64__)
5 * using CONFIG_* outside of __KERNEL__ is wrong,
6 * __LP64__ was also removed from headers, so what
7 * is the right approach on parisc?
8 * -arnd
9 */
10#if (defined(__KERNEL__) && defined(CONFIG_64BIT)) || defined (__LP64__)
11#define __BITS_PER_LONG 64 5#define __BITS_PER_LONG 64
12#define SHIFT_PER_LONG 6 6#define SHIFT_PER_LONG 6
13#else 7#else
diff --git a/arch/parisc/include/uapi/asm/msgbuf.h b/arch/parisc/include/uapi/asm/msgbuf.h
index fe88f2649418..342138983914 100644
--- a/arch/parisc/include/uapi/asm/msgbuf.h
+++ b/arch/parisc/include/uapi/asm/msgbuf.h
@@ -1,6 +1,8 @@
1#ifndef _PARISC_MSGBUF_H 1#ifndef _PARISC_MSGBUF_H
2#define _PARISC_MSGBUF_H 2#define _PARISC_MSGBUF_H
3 3
4#include <asm/bitsperlong.h>
5
4/* 6/*
5 * The msqid64_ds structure for parisc architecture, copied from sparc. 7 * The msqid64_ds structure for parisc architecture, copied from sparc.
6 * Note extra padding because this structure is passed back and forth 8 * Note extra padding because this structure is passed back and forth
@@ -13,15 +15,15 @@
13 15
14struct msqid64_ds { 16struct msqid64_ds {
15 struct ipc64_perm msg_perm; 17 struct ipc64_perm msg_perm;
16#ifndef CONFIG_64BIT 18#if __BITS_PER_LONG != 64
17 unsigned int __pad1; 19 unsigned int __pad1;
18#endif 20#endif
19 __kernel_time_t msg_stime; /* last msgsnd time */ 21 __kernel_time_t msg_stime; /* last msgsnd time */
20#ifndef CONFIG_64BIT 22#if __BITS_PER_LONG != 64
21 unsigned int __pad2; 23 unsigned int __pad2;
22#endif 24#endif
23 __kernel_time_t msg_rtime; /* last msgrcv time */ 25 __kernel_time_t msg_rtime; /* last msgrcv time */
24#ifndef CONFIG_64BIT 26#if __BITS_PER_LONG != 64
25 unsigned int __pad3; 27 unsigned int __pad3;
26#endif 28#endif
27 __kernel_time_t msg_ctime; /* last change time */ 29 __kernel_time_t msg_ctime; /* last change time */
diff --git a/arch/parisc/include/uapi/asm/sembuf.h b/arch/parisc/include/uapi/asm/sembuf.h
index 1e59ffd3bd1e..f01d89e30d73 100644
--- a/arch/parisc/include/uapi/asm/sembuf.h
+++ b/arch/parisc/include/uapi/asm/sembuf.h
@@ -1,6 +1,8 @@
1#ifndef _PARISC_SEMBUF_H 1#ifndef _PARISC_SEMBUF_H
2#define _PARISC_SEMBUF_H 2#define _PARISC_SEMBUF_H
3 3
4#include <asm/bitsperlong.h>
5
4/* 6/*
5 * The semid64_ds structure for parisc architecture. 7 * The semid64_ds structure for parisc architecture.
6 * Note extra padding because this structure is passed back and forth 8 * Note extra padding because this structure is passed back and forth
@@ -13,11 +15,11 @@
13 15
14struct semid64_ds { 16struct semid64_ds {
15 struct ipc64_perm sem_perm; /* permissions .. see ipc.h */ 17 struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
16#ifndef CONFIG_64BIT 18#if __BITS_PER_LONG != 64
17 unsigned int __pad1; 19 unsigned int __pad1;
18#endif 20#endif
19 __kernel_time_t sem_otime; /* last semop time */ 21 __kernel_time_t sem_otime; /* last semop time */
20#ifndef CONFIG_64BIT 22#if __BITS_PER_LONG != 64
21 unsigned int __pad2; 23 unsigned int __pad2;
22#endif 24#endif
23 __kernel_time_t sem_ctime; /* last change time */ 25 __kernel_time_t sem_ctime; /* last change time */
diff --git a/arch/parisc/include/uapi/asm/shmbuf.h b/arch/parisc/include/uapi/asm/shmbuf.h
index 0a3eada1863b..8496c38560c6 100644
--- a/arch/parisc/include/uapi/asm/shmbuf.h
+++ b/arch/parisc/include/uapi/asm/shmbuf.h
@@ -1,6 +1,8 @@
1#ifndef _PARISC_SHMBUF_H 1#ifndef _PARISC_SHMBUF_H
2#define _PARISC_SHMBUF_H 2#define _PARISC_SHMBUF_H
3 3
4#include <asm/bitsperlong.h>
5
4/* 6/*
5 * The shmid64_ds structure for parisc architecture. 7 * The shmid64_ds structure for parisc architecture.
6 * Note extra padding because this structure is passed back and forth 8 * Note extra padding because this structure is passed back and forth
@@ -13,19 +15,19 @@
13 15
14struct shmid64_ds { 16struct shmid64_ds {
15 struct ipc64_perm shm_perm; /* operation perms */ 17 struct ipc64_perm shm_perm; /* operation perms */
16#ifndef CONFIG_64BIT 18#if __BITS_PER_LONG != 64
17 unsigned int __pad1; 19 unsigned int __pad1;
18#endif 20#endif
19 __kernel_time_t shm_atime; /* last attach time */ 21 __kernel_time_t shm_atime; /* last attach time */
20#ifndef CONFIG_64BIT 22#if __BITS_PER_LONG != 64
21 unsigned int __pad2; 23 unsigned int __pad2;
22#endif 24#endif
23 __kernel_time_t shm_dtime; /* last detach time */ 25 __kernel_time_t shm_dtime; /* last detach time */
24#ifndef CONFIG_64BIT 26#if __BITS_PER_LONG != 64
25 unsigned int __pad3; 27 unsigned int __pad3;
26#endif 28#endif
27 __kernel_time_t shm_ctime; /* last change time */ 29 __kernel_time_t shm_ctime; /* last change time */
28#ifndef CONFIG_64BIT 30#if __BITS_PER_LONG != 64
29 unsigned int __pad4; 31 unsigned int __pad4;
30#endif 32#endif
31 size_t shm_segsz; /* size of segment (bytes) */ 33 size_t shm_segsz; /* size of segment (bytes) */
@@ -36,23 +38,16 @@ struct shmid64_ds {
36 unsigned int __unused2; 38 unsigned int __unused2;
37}; 39};
38 40
39#ifdef CONFIG_64BIT
40/* The 'unsigned int' (formerly 'unsigned long') data types below will
41 * ensure that a 32-bit app calling shmctl(*,IPC_INFO,*) will work on
42 * a wide kernel, but if some of these values are meant to contain pointers
43 * they may need to be 'long long' instead. -PB XXX FIXME
44 */
45#endif
46struct shminfo64 { 41struct shminfo64 {
47 unsigned int shmmax; 42 unsigned long shmmax;
48 unsigned int shmmin; 43 unsigned long shmmin;
49 unsigned int shmmni; 44 unsigned long shmmni;
50 unsigned int shmseg; 45 unsigned long shmseg;
51 unsigned int shmall; 46 unsigned long shmall;
52 unsigned int __unused1; 47 unsigned long __unused1;
53 unsigned int __unused2; 48 unsigned long __unused2;
54 unsigned int __unused3; 49 unsigned long __unused3;
55 unsigned int __unused4; 50 unsigned long __unused4;
56}; 51};
57 52
58#endif /* _PARISC_SHMBUF_H */ 53#endif /* _PARISC_SHMBUF_H */
diff --git a/arch/parisc/include/uapi/asm/signal.h b/arch/parisc/include/uapi/asm/signal.h
index 10df7079f4cd..e26043b73f5d 100644
--- a/arch/parisc/include/uapi/asm/signal.h
+++ b/arch/parisc/include/uapi/asm/signal.h
@@ -85,7 +85,7 @@
85struct siginfo; 85struct siginfo;
86 86
87/* Type of a signal handler. */ 87/* Type of a signal handler. */
88#ifdef CONFIG_64BIT 88#if defined(__LP64__)
89/* function pointers on 64-bit parisc are pointers to little structs and the 89/* function pointers on 64-bit parisc are pointers to little structs and the
90 * compiler doesn't support code which changes or tests the address of 90 * compiler doesn't support code which changes or tests the address of
91 * the function in the little struct. This is really ugly -PB 91 * the function in the little struct. This is really ugly -PB
diff --git a/arch/parisc/include/uapi/asm/unistd.h b/arch/parisc/include/uapi/asm/unistd.h
index 8667f18be238..5f5c0373de63 100644
--- a/arch/parisc/include/uapi/asm/unistd.h
+++ b/arch/parisc/include/uapi/asm/unistd.h
@@ -833,8 +833,9 @@
833#define __NR_seccomp (__NR_Linux + 338) 833#define __NR_seccomp (__NR_Linux + 338)
834#define __NR_getrandom (__NR_Linux + 339) 834#define __NR_getrandom (__NR_Linux + 339)
835#define __NR_memfd_create (__NR_Linux + 340) 835#define __NR_memfd_create (__NR_Linux + 340)
836#define __NR_bpf (__NR_Linux + 341)
836 837
837#define __NR_Linux_syscalls (__NR_memfd_create + 1) 838#define __NR_Linux_syscalls (__NR_bpf + 1)
838 839
839 840
840#define __IGNORE_select /* newselect */ 841#define __IGNORE_select /* newselect */
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index b563d9c8268b..fe4f0b89bf8f 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -286,11 +286,11 @@
286 ENTRY_COMP(msgsnd) 286 ENTRY_COMP(msgsnd)
287 ENTRY_COMP(msgrcv) 287 ENTRY_COMP(msgrcv)
288 ENTRY_SAME(msgget) /* 190 */ 288 ENTRY_SAME(msgget) /* 190 */
289 ENTRY_SAME(msgctl) 289 ENTRY_COMP(msgctl)
290 ENTRY_SAME(shmat) 290 ENTRY_COMP(shmat)
291 ENTRY_SAME(shmdt) 291 ENTRY_SAME(shmdt)
292 ENTRY_SAME(shmget) 292 ENTRY_SAME(shmget)
293 ENTRY_SAME(shmctl) /* 195 */ 293 ENTRY_COMP(shmctl) /* 195 */
294 ENTRY_SAME(ni_syscall) /* streams1 */ 294 ENTRY_SAME(ni_syscall) /* streams1 */
295 ENTRY_SAME(ni_syscall) /* streams2 */ 295 ENTRY_SAME(ni_syscall) /* streams2 */
296 ENTRY_SAME(lstat64) 296 ENTRY_SAME(lstat64)
@@ -323,7 +323,7 @@
323 ENTRY_SAME(epoll_ctl) /* 225 */ 323 ENTRY_SAME(epoll_ctl) /* 225 */
324 ENTRY_SAME(epoll_wait) 324 ENTRY_SAME(epoll_wait)
325 ENTRY_SAME(remap_file_pages) 325 ENTRY_SAME(remap_file_pages)
326 ENTRY_SAME(semtimedop) 326 ENTRY_COMP(semtimedop)
327 ENTRY_COMP(mq_open) 327 ENTRY_COMP(mq_open)
328 ENTRY_SAME(mq_unlink) /* 230 */ 328 ENTRY_SAME(mq_unlink) /* 230 */
329 ENTRY_COMP(mq_timedsend) 329 ENTRY_COMP(mq_timedsend)
@@ -436,6 +436,7 @@
436 ENTRY_SAME(seccomp) 436 ENTRY_SAME(seccomp)
437 ENTRY_SAME(getrandom) 437 ENTRY_SAME(getrandom)
438 ENTRY_SAME(memfd_create) /* 340 */ 438 ENTRY_SAME(memfd_create) /* 340 */
439 ENTRY_SAME(bpf)
439 440
440 /* Nothing yet */ 441 /* Nothing yet */
441 442
diff --git a/arch/powerpc/configs/pseries_le_defconfig b/arch/powerpc/configs/pseries_le_defconfig
index 63392f4b29a4..d2008887eb8c 100644
--- a/arch/powerpc/configs/pseries_le_defconfig
+++ b/arch/powerpc/configs/pseries_le_defconfig
@@ -48,7 +48,6 @@ CONFIG_KEXEC=y
48CONFIG_IRQ_ALL_CPUS=y 48CONFIG_IRQ_ALL_CPUS=y
49CONFIG_MEMORY_HOTPLUG=y 49CONFIG_MEMORY_HOTPLUG=y
50CONFIG_MEMORY_HOTREMOVE=y 50CONFIG_MEMORY_HOTREMOVE=y
51CONFIG_CMA=y
52CONFIG_PPC_64K_PAGES=y 51CONFIG_PPC_64K_PAGES=y
53CONFIG_PPC_SUBPAGE_PROT=y 52CONFIG_PPC_SUBPAGE_PROT=y
54CONFIG_SCHED_SMT=y 53CONFIG_SCHED_SMT=y
@@ -138,6 +137,7 @@ CONFIG_NETCONSOLE=y
138CONFIG_NETPOLL_TRAP=y 137CONFIG_NETPOLL_TRAP=y
139CONFIG_TUN=m 138CONFIG_TUN=m
140CONFIG_VIRTIO_NET=m 139CONFIG_VIRTIO_NET=m
140CONFIG_VHOST_NET=m
141CONFIG_VORTEX=y 141CONFIG_VORTEX=y
142CONFIG_ACENIC=m 142CONFIG_ACENIC=m
143CONFIG_ACENIC_OMIT_TIGON_I=y 143CONFIG_ACENIC_OMIT_TIGON_I=y
@@ -303,4 +303,9 @@ CONFIG_CRYPTO_LZO=m
303# CONFIG_CRYPTO_ANSI_CPRNG is not set 303# CONFIG_CRYPTO_ANSI_CPRNG is not set
304CONFIG_CRYPTO_DEV_NX=y 304CONFIG_CRYPTO_DEV_NX=y
305CONFIG_CRYPTO_DEV_NX_ENCRYPT=m 305CONFIG_CRYPTO_DEV_NX_ENCRYPT=m
306CONFIG_VIRTUALIZATION=y
307CONFIG_KVM_BOOK3S_64=m
308CONFIG_KVM_BOOK3S_64_HV=y
309CONFIG_TRANSPARENT_HUGEPAGE=y
310CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y
306CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y 311CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index 3b260efbfbf9..ca07f9c27335 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -71,9 +71,10 @@ struct device_node;
71 71
72#define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */ 72#define EEH_PE_ISOLATED (1 << 0) /* Isolated PE */
73#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */ 73#define EEH_PE_RECOVERING (1 << 1) /* Recovering PE */
74#define EEH_PE_RESET (1 << 2) /* PE reset in progress */ 74#define EEH_PE_CFG_BLOCKED (1 << 2) /* Block config access */
75 75
76#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */ 76#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */
77#define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */
77 78
78struct eeh_pe { 79struct eeh_pe {
79 int type; /* PE type: PHB/Bus/Device */ 80 int type; /* PE type: PHB/Bus/Device */
diff --git a/arch/powerpc/include/asm/fadump.h b/arch/powerpc/include/asm/fadump.h
index a6774560afe3..493e72f64b35 100644
--- a/arch/powerpc/include/asm/fadump.h
+++ b/arch/powerpc/include/asm/fadump.h
@@ -70,39 +70,39 @@
70#define CPU_UNKNOWN (~((u32)0)) 70#define CPU_UNKNOWN (~((u32)0))
71 71
72/* Utility macros */ 72/* Utility macros */
73#define SKIP_TO_NEXT_CPU(reg_entry) \ 73#define SKIP_TO_NEXT_CPU(reg_entry) \
74({ \ 74({ \
75 while (reg_entry->reg_id != REG_ID("CPUEND")) \ 75 while (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUEND")) \
76 reg_entry++; \ 76 reg_entry++; \
77 reg_entry++; \ 77 reg_entry++; \
78}) 78})
79 79
80/* Kernel Dump section info */ 80/* Kernel Dump section info */
81struct fadump_section { 81struct fadump_section {
82 u32 request_flag; 82 __be32 request_flag;
83 u16 source_data_type; 83 __be16 source_data_type;
84 u16 error_flags; 84 __be16 error_flags;
85 u64 source_address; 85 __be64 source_address;
86 u64 source_len; 86 __be64 source_len;
87 u64 bytes_dumped; 87 __be64 bytes_dumped;
88 u64 destination_address; 88 __be64 destination_address;
89}; 89};
90 90
91/* ibm,configure-kernel-dump header. */ 91/* ibm,configure-kernel-dump header. */
92struct fadump_section_header { 92struct fadump_section_header {
93 u32 dump_format_version; 93 __be32 dump_format_version;
94 u16 dump_num_sections; 94 __be16 dump_num_sections;
95 u16 dump_status_flag; 95 __be16 dump_status_flag;
96 u32 offset_first_dump_section; 96 __be32 offset_first_dump_section;
97 97
98 /* Fields for disk dump option. */ 98 /* Fields for disk dump option. */
99 u32 dd_block_size; 99 __be32 dd_block_size;
100 u64 dd_block_offset; 100 __be64 dd_block_offset;
101 u64 dd_num_blocks; 101 __be64 dd_num_blocks;
102 u32 dd_offset_disk_path; 102 __be32 dd_offset_disk_path;
103 103
104 /* Maximum time allowed to prevent an automatic dump-reboot. */ 104 /* Maximum time allowed to prevent an automatic dump-reboot. */
105 u32 max_time_auto; 105 __be32 max_time_auto;
106}; 106};
107 107
108/* 108/*
@@ -174,15 +174,15 @@ static inline u64 str_to_u64(const char *str)
174 174
175/* Register save area header. */ 175/* Register save area header. */
176struct fadump_reg_save_area_header { 176struct fadump_reg_save_area_header {
177 u64 magic_number; 177 __be64 magic_number;
178 u32 version; 178 __be32 version;
179 u32 num_cpu_offset; 179 __be32 num_cpu_offset;
180}; 180};
181 181
182/* Register entry. */ 182/* Register entry. */
183struct fadump_reg_entry { 183struct fadump_reg_entry {
184 u64 reg_id; 184 __be64 reg_id;
185 u64 reg_value; 185 __be64 reg_value;
186}; 186};
187 187
188/* fadump crash info structure */ 188/* fadump crash info structure */
diff --git a/arch/powerpc/include/asm/hugetlb.h b/arch/powerpc/include/asm/hugetlb.h
index 623f2971ce0e..766b77d527ac 100644
--- a/arch/powerpc/include/asm/hugetlb.h
+++ b/arch/powerpc/include/asm/hugetlb.h
@@ -71,7 +71,7 @@ pte_t *huge_pte_offset_and_shift(struct mm_struct *mm,
71 71
72void flush_dcache_icache_hugepage(struct page *page); 72void flush_dcache_icache_hugepage(struct page *page);
73 73
74#if defined(CONFIG_PPC_MM_SLICES) || defined(CONFIG_PPC_SUBPAGE_PROT) 74#if defined(CONFIG_PPC_MM_SLICES)
75int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr, 75int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
76 unsigned long len); 76 unsigned long len);
77#else 77#else
diff --git a/arch/powerpc/include/asm/pci-bridge.h b/arch/powerpc/include/asm/pci-bridge.h
index 4ca90a39d6d0..725247beebec 100644
--- a/arch/powerpc/include/asm/pci-bridge.h
+++ b/arch/powerpc/include/asm/pci-bridge.h
@@ -159,8 +159,6 @@ struct pci_dn {
159 159
160 int pci_ext_config_space; /* for pci devices */ 160 int pci_ext_config_space; /* for pci devices */
161 161
162 bool force_32bit_msi;
163
164 struct pci_dev *pcidev; /* back-pointer to the pci device */ 162 struct pci_dev *pcidev; /* back-pointer to the pci device */
165#ifdef CONFIG_EEH 163#ifdef CONFIG_EEH
166 struct eeh_dev *edev; /* eeh device */ 164 struct eeh_dev *edev; /* eeh device */
diff --git a/arch/powerpc/include/asm/perf_event.h b/arch/powerpc/include/asm/perf_event.h
index 0bb23725b1e7..8bf1b6351716 100644
--- a/arch/powerpc/include/asm/perf_event.h
+++ b/arch/powerpc/include/asm/perf_event.h
@@ -34,7 +34,7 @@
34 do { \ 34 do { \
35 (regs)->result = 0; \ 35 (regs)->result = 0; \
36 (regs)->nip = __ip; \ 36 (regs)->nip = __ip; \
37 (regs)->gpr[1] = *(unsigned long *)__get_SP(); \ 37 (regs)->gpr[1] = current_stack_pointer(); \
38 asm volatile("mfmsr %0" : "=r" ((regs)->msr)); \ 38 asm volatile("mfmsr %0" : "=r" ((regs)->msr)); \
39 } while (0) 39 } while (0)
40#endif 40#endif
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index fe3f9488f321..c998279bd85b 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1265,8 +1265,7 @@ static inline unsigned long mfvtb (void)
1265 1265
1266#define proc_trap() asm volatile("trap") 1266#define proc_trap() asm volatile("trap")
1267 1267
1268#define __get_SP() ({unsigned long sp; \ 1268extern unsigned long current_stack_pointer(void);
1269 asm volatile("mr %0,1": "=r" (sp)); sp;})
1270 1269
1271extern unsigned long scom970_read(unsigned int address); 1270extern unsigned long scom970_read(unsigned int address);
1272extern void scom970_write(unsigned int address, unsigned long value); 1271extern void scom970_write(unsigned int address, unsigned long value);
diff --git a/arch/powerpc/include/asm/syscall.h b/arch/powerpc/include/asm/syscall.h
index 6fa2708da153..6240698fee9a 100644
--- a/arch/powerpc/include/asm/syscall.h
+++ b/arch/powerpc/include/asm/syscall.h
@@ -19,7 +19,7 @@
19 19
20/* ftrace syscalls requires exporting the sys_call_table */ 20/* ftrace syscalls requires exporting the sys_call_table */
21#ifdef CONFIG_FTRACE_SYSCALLS 21#ifdef CONFIG_FTRACE_SYSCALLS
22extern const unsigned long *sys_call_table; 22extern const unsigned long sys_call_table[];
23#endif /* CONFIG_FTRACE_SYSCALLS */ 23#endif /* CONFIG_FTRACE_SYSCALLS */
24 24
25static inline long syscall_get_nr(struct task_struct *task, 25static inline long syscall_get_nr(struct task_struct *task,
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index 7d8a60068805..ce9577d693be 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -365,3 +365,4 @@ SYSCALL_SPU(renameat2)
365SYSCALL_SPU(seccomp) 365SYSCALL_SPU(seccomp)
366SYSCALL_SPU(getrandom) 366SYSCALL_SPU(getrandom)
367SYSCALL_SPU(memfd_create) 367SYSCALL_SPU(memfd_create)
368SYSCALL_SPU(bpf)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 4e9af3fd43e7..e0da021caa00 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
12#include <uapi/asm/unistd.h> 12#include <uapi/asm/unistd.h>
13 13
14 14
15#define __NR_syscalls 361 15#define __NR_syscalls 362
16 16
17#define __NR__exit __NR_exit 17#define __NR__exit __NR_exit
18#define NR_syscalls __NR_syscalls 18#define NR_syscalls __NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 0688fc06e183..f55351f2e66e 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -383,5 +383,6 @@
383#define __NR_seccomp 358 383#define __NR_seccomp 358
384#define __NR_getrandom 359 384#define __NR_getrandom 359
385#define __NR_memfd_create 360 385#define __NR_memfd_create 360
386#define __NR_bpf 361
386 387
387#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */ 388#endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/powerpc/kernel/dma.c b/arch/powerpc/kernel/dma.c
index adac9dc54aee..484b2d4462c1 100644
--- a/arch/powerpc/kernel/dma.c
+++ b/arch/powerpc/kernel/dma.c
@@ -53,9 +53,16 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
53#else 53#else
54 struct page *page; 54 struct page *page;
55 int node = dev_to_node(dev); 55 int node = dev_to_node(dev);
56#ifdef CONFIG_FSL_SOC
56 u64 pfn = get_pfn_limit(dev); 57 u64 pfn = get_pfn_limit(dev);
57 int zone; 58 int zone;
58 59
60 /*
61 * This code should be OK on other platforms, but we have drivers that
62 * don't set coherent_dma_mask. As a workaround we just ifdef it. This
63 * whole routine needs some serious cleanup.
64 */
65
59 zone = dma_pfn_limit_to_zone(pfn); 66 zone = dma_pfn_limit_to_zone(pfn);
60 if (zone < 0) { 67 if (zone < 0) {
61 dev_err(dev, "%s: No suitable zone for pfn %#llx\n", 68 dev_err(dev, "%s: No suitable zone for pfn %#llx\n",
@@ -73,6 +80,7 @@ void *dma_direct_alloc_coherent(struct device *dev, size_t size,
73 break; 80 break;
74#endif 81#endif
75 }; 82 };
83#endif /* CONFIG_FSL_SOC */
76 84
77 /* ignore region specifiers */ 85 /* ignore region specifiers */
78 flag &= ~(__GFP_HIGHMEM); 86 flag &= ~(__GFP_HIGHMEM);
diff --git a/arch/powerpc/kernel/eeh.c b/arch/powerpc/kernel/eeh.c
index d543e4179c18..2248a1999c64 100644
--- a/arch/powerpc/kernel/eeh.c
+++ b/arch/powerpc/kernel/eeh.c
@@ -257,6 +257,13 @@ static void *eeh_dump_pe_log(void *data, void *flag)
257 struct eeh_dev *edev, *tmp; 257 struct eeh_dev *edev, *tmp;
258 size_t *plen = flag; 258 size_t *plen = flag;
259 259
260 /* If the PE's config space is blocked, 0xFF's will be
261 * returned. It's pointless to collect the log in this
262 * case.
263 */
264 if (pe->state & EEH_PE_CFG_BLOCKED)
265 return NULL;
266
260 eeh_pe_for_each_dev(pe, edev, tmp) 267 eeh_pe_for_each_dev(pe, edev, tmp)
261 *plen += eeh_dump_dev_log(edev, pci_regs_buf + *plen, 268 *plen += eeh_dump_dev_log(edev, pci_regs_buf + *plen,
262 EEH_PCI_REGS_LOG_LEN - *plen); 269 EEH_PCI_REGS_LOG_LEN - *plen);
@@ -673,18 +680,18 @@ int pcibios_set_pcie_reset_state(struct pci_dev *dev, enum pcie_reset_state stat
673 switch (state) { 680 switch (state) {
674 case pcie_deassert_reset: 681 case pcie_deassert_reset:
675 eeh_ops->reset(pe, EEH_RESET_DEACTIVATE); 682 eeh_ops->reset(pe, EEH_RESET_DEACTIVATE);
676 eeh_pe_state_clear(pe, EEH_PE_RESET); 683 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
677 break; 684 break;
678 case pcie_hot_reset: 685 case pcie_hot_reset:
679 eeh_pe_state_mark(pe, EEH_PE_RESET); 686 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
680 eeh_ops->reset(pe, EEH_RESET_HOT); 687 eeh_ops->reset(pe, EEH_RESET_HOT);
681 break; 688 break;
682 case pcie_warm_reset: 689 case pcie_warm_reset:
683 eeh_pe_state_mark(pe, EEH_PE_RESET); 690 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
684 eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL); 691 eeh_ops->reset(pe, EEH_RESET_FUNDAMENTAL);
685 break; 692 break;
686 default: 693 default:
687 eeh_pe_state_clear(pe, EEH_PE_RESET); 694 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
688 return -EINVAL; 695 return -EINVAL;
689 }; 696 };
690 697
@@ -1523,7 +1530,7 @@ int eeh_pe_reset(struct eeh_pe *pe, int option)
1523 switch (option) { 1530 switch (option) {
1524 case EEH_RESET_DEACTIVATE: 1531 case EEH_RESET_DEACTIVATE:
1525 ret = eeh_ops->reset(pe, option); 1532 ret = eeh_ops->reset(pe, option);
1526 eeh_pe_state_clear(pe, EEH_PE_RESET); 1533 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
1527 if (ret) 1534 if (ret)
1528 break; 1535 break;
1529 1536
@@ -1538,7 +1545,7 @@ int eeh_pe_reset(struct eeh_pe *pe, int option)
1538 */ 1545 */
1539 eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE); 1546 eeh_ops->set_option(pe, EEH_OPT_FREEZE_PE);
1540 1547
1541 eeh_pe_state_mark(pe, EEH_PE_RESET); 1548 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
1542 ret = eeh_ops->reset(pe, option); 1549 ret = eeh_ops->reset(pe, option);
1543 break; 1550 break;
1544 default: 1551 default:
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 3fd514f8e4b2..6535936bdf27 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -528,13 +528,13 @@ int eeh_pe_reset_and_recover(struct eeh_pe *pe)
528 eeh_pe_dev_traverse(pe, eeh_report_error, &result); 528 eeh_pe_dev_traverse(pe, eeh_report_error, &result);
529 529
530 /* Issue reset */ 530 /* Issue reset */
531 eeh_pe_state_mark(pe, EEH_PE_RESET); 531 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
532 ret = eeh_reset_pe(pe); 532 ret = eeh_reset_pe(pe);
533 if (ret) { 533 if (ret) {
534 eeh_pe_state_clear(pe, EEH_PE_RECOVERING | EEH_PE_RESET); 534 eeh_pe_state_clear(pe, EEH_PE_RECOVERING | EEH_PE_CFG_BLOCKED);
535 return ret; 535 return ret;
536 } 536 }
537 eeh_pe_state_clear(pe, EEH_PE_RESET); 537 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
538 538
539 /* Unfreeze the PE */ 539 /* Unfreeze the PE */
540 ret = eeh_clear_pe_frozen_state(pe, true); 540 ret = eeh_clear_pe_frozen_state(pe, true);
@@ -601,10 +601,10 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
601 * config accesses. So we prefer to block them. However, controlled 601 * config accesses. So we prefer to block them. However, controlled
602 * PCI config accesses initiated from EEH itself are allowed. 602 * PCI config accesses initiated from EEH itself are allowed.
603 */ 603 */
604 eeh_pe_state_mark(pe, EEH_PE_RESET); 604 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
605 rc = eeh_reset_pe(pe); 605 rc = eeh_reset_pe(pe);
606 if (rc) { 606 if (rc) {
607 eeh_pe_state_clear(pe, EEH_PE_RESET); 607 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
608 return rc; 608 return rc;
609 } 609 }
610 610
@@ -613,7 +613,7 @@ static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
613 /* Restore PE */ 613 /* Restore PE */
614 eeh_ops->configure_bridge(pe); 614 eeh_ops->configure_bridge(pe);
615 eeh_pe_restore_bars(pe); 615 eeh_pe_restore_bars(pe);
616 eeh_pe_state_clear(pe, EEH_PE_RESET); 616 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
617 617
618 /* Clear frozen state */ 618 /* Clear frozen state */
619 rc = eeh_clear_pe_frozen_state(pe, false); 619 rc = eeh_clear_pe_frozen_state(pe, false);
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index 53dd0915e690..5a63e2b0f65b 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -525,7 +525,7 @@ static void *__eeh_pe_state_mark(void *data, void *flag)
525 pe->state |= state; 525 pe->state |= state;
526 526
527 /* Offline PCI devices if applicable */ 527 /* Offline PCI devices if applicable */
528 if (state != EEH_PE_ISOLATED) 528 if (!(state & EEH_PE_ISOLATED))
529 return NULL; 529 return NULL;
530 530
531 eeh_pe_for_each_dev(pe, edev, tmp) { 531 eeh_pe_for_each_dev(pe, edev, tmp) {
@@ -534,6 +534,10 @@ static void *__eeh_pe_state_mark(void *data, void *flag)
534 pdev->error_state = pci_channel_io_frozen; 534 pdev->error_state = pci_channel_io_frozen;
535 } 535 }
536 536
537 /* Block PCI config access if required */
538 if (pe->state & EEH_PE_CFG_RESTRICTED)
539 pe->state |= EEH_PE_CFG_BLOCKED;
540
537 return NULL; 541 return NULL;
538} 542}
539 543
@@ -611,6 +615,10 @@ static void *__eeh_pe_state_clear(void *data, void *flag)
611 pdev->error_state = pci_channel_io_normal; 615 pdev->error_state = pci_channel_io_normal;
612 } 616 }
613 617
618 /* Unblock PCI config access if required */
619 if (pe->state & EEH_PE_CFG_RESTRICTED)
620 pe->state &= ~EEH_PE_CFG_BLOCKED;
621
614 return NULL; 622 return NULL;
615} 623}
616 624
diff --git a/arch/powerpc/kernel/eeh_sysfs.c b/arch/powerpc/kernel/eeh_sysfs.c
index f19b1e5cb060..1ceecdda810b 100644
--- a/arch/powerpc/kernel/eeh_sysfs.c
+++ b/arch/powerpc/kernel/eeh_sysfs.c
@@ -65,7 +65,7 @@ static ssize_t eeh_pe_state_show(struct device *dev,
65 return -ENODEV; 65 return -ENODEV;
66 66
67 state = eeh_ops->get_state(edev->pe, NULL); 67 state = eeh_ops->get_state(edev->pe, NULL);
68 return sprintf(buf, "%0x08x %0x08x\n", 68 return sprintf(buf, "0x%08x 0x%08x\n",
69 state, edev->pe->state); 69 state, edev->pe->state);
70} 70}
71 71
diff --git a/arch/powerpc/kernel/entry_64.S b/arch/powerpc/kernel/entry_64.S
index 5bbd1bc8c3b0..0905c8da90f1 100644
--- a/arch/powerpc/kernel/entry_64.S
+++ b/arch/powerpc/kernel/entry_64.S
@@ -659,7 +659,13 @@ _GLOBAL(ret_from_except_lite)
6593: 6593:
660#endif 660#endif
661 bl save_nvgprs 661 bl save_nvgprs
662 /*
663 * Use a non volatile GPR to save and restore our thread_info flags
664 * across the call to restore_interrupts.
665 */
666 mr r30,r4
662 bl restore_interrupts 667 bl restore_interrupts
668 mr r4,r30
663 addi r3,r1,STACK_FRAME_OVERHEAD 669 addi r3,r1,STACK_FRAME_OVERHEAD
664 bl do_notify_resume 670 bl do_notify_resume
665 b ret_from_except 671 b ret_from_except
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index 050f79a4a168..72e783ea0681 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -1270,11 +1270,6 @@ hmi_exception_early:
1270 addi r3,r1,STACK_FRAME_OVERHEAD 1270 addi r3,r1,STACK_FRAME_OVERHEAD
1271 bl hmi_exception_realmode 1271 bl hmi_exception_realmode
1272 /* Windup the stack. */ 1272 /* Windup the stack. */
1273 /* Clear MSR_RI before setting SRR0 and SRR1. */
1274 li r0,MSR_RI
1275 mfmsr r9 /* get MSR value */
1276 andc r9,r9,r0
1277 mtmsrd r9,1 /* Clear MSR_RI */
1278 /* Move original HSRR0 and HSRR1 into the respective regs */ 1273 /* Move original HSRR0 and HSRR1 into the respective regs */
1279 ld r9,_MSR(r1) 1274 ld r9,_MSR(r1)
1280 mtspr SPRN_HSRR1,r9 1275 mtspr SPRN_HSRR1,r9
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index 742694c1d852..26d091a1a54c 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -58,7 +58,7 @@ int __init early_init_dt_scan_fw_dump(unsigned long node,
58 const __be32 *sections; 58 const __be32 *sections;
59 int i, num_sections; 59 int i, num_sections;
60 int size; 60 int size;
61 const int *token; 61 const __be32 *token;
62 62
63 if (depth != 1 || strcmp(uname, "rtas") != 0) 63 if (depth != 1 || strcmp(uname, "rtas") != 0)
64 return 0; 64 return 0;
@@ -72,7 +72,7 @@ int __init early_init_dt_scan_fw_dump(unsigned long node,
72 return 1; 72 return 1;
73 73
74 fw_dump.fadump_supported = 1; 74 fw_dump.fadump_supported = 1;
75 fw_dump.ibm_configure_kernel_dump = *token; 75 fw_dump.ibm_configure_kernel_dump = be32_to_cpu(*token);
76 76
77 /* 77 /*
78 * The 'ibm,kernel-dump' rtas node is present only if there is 78 * The 'ibm,kernel-dump' rtas node is present only if there is
@@ -147,11 +147,11 @@ static unsigned long init_fadump_mem_struct(struct fadump_mem_struct *fdm,
147 memset(fdm, 0, sizeof(struct fadump_mem_struct)); 147 memset(fdm, 0, sizeof(struct fadump_mem_struct));
148 addr = addr & PAGE_MASK; 148 addr = addr & PAGE_MASK;
149 149
150 fdm->header.dump_format_version = 0x00000001; 150 fdm->header.dump_format_version = cpu_to_be32(0x00000001);
151 fdm->header.dump_num_sections = 3; 151 fdm->header.dump_num_sections = cpu_to_be16(3);
152 fdm->header.dump_status_flag = 0; 152 fdm->header.dump_status_flag = 0;
153 fdm->header.offset_first_dump_section = 153 fdm->header.offset_first_dump_section =
154 (u32)offsetof(struct fadump_mem_struct, cpu_state_data); 154 cpu_to_be32((u32)offsetof(struct fadump_mem_struct, cpu_state_data));
155 155
156 /* 156 /*
157 * Fields for disk dump option. 157 * Fields for disk dump option.
@@ -167,27 +167,27 @@ static unsigned long init_fadump_mem_struct(struct fadump_mem_struct *fdm,
167 167
168 /* Kernel dump sections */ 168 /* Kernel dump sections */
169 /* cpu state data section. */ 169 /* cpu state data section. */
170 fdm->cpu_state_data.request_flag = FADUMP_REQUEST_FLAG; 170 fdm->cpu_state_data.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG);
171 fdm->cpu_state_data.source_data_type = FADUMP_CPU_STATE_DATA; 171 fdm->cpu_state_data.source_data_type = cpu_to_be16(FADUMP_CPU_STATE_DATA);
172 fdm->cpu_state_data.source_address = 0; 172 fdm->cpu_state_data.source_address = 0;
173 fdm->cpu_state_data.source_len = fw_dump.cpu_state_data_size; 173 fdm->cpu_state_data.source_len = cpu_to_be64(fw_dump.cpu_state_data_size);
174 fdm->cpu_state_data.destination_address = addr; 174 fdm->cpu_state_data.destination_address = cpu_to_be64(addr);
175 addr += fw_dump.cpu_state_data_size; 175 addr += fw_dump.cpu_state_data_size;
176 176
177 /* hpte region section */ 177 /* hpte region section */
178 fdm->hpte_region.request_flag = FADUMP_REQUEST_FLAG; 178 fdm->hpte_region.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG);
179 fdm->hpte_region.source_data_type = FADUMP_HPTE_REGION; 179 fdm->hpte_region.source_data_type = cpu_to_be16(FADUMP_HPTE_REGION);
180 fdm->hpte_region.source_address = 0; 180 fdm->hpte_region.source_address = 0;
181 fdm->hpte_region.source_len = fw_dump.hpte_region_size; 181 fdm->hpte_region.source_len = cpu_to_be64(fw_dump.hpte_region_size);
182 fdm->hpte_region.destination_address = addr; 182 fdm->hpte_region.destination_address = cpu_to_be64(addr);
183 addr += fw_dump.hpte_region_size; 183 addr += fw_dump.hpte_region_size;
184 184
185 /* RMA region section */ 185 /* RMA region section */
186 fdm->rmr_region.request_flag = FADUMP_REQUEST_FLAG; 186 fdm->rmr_region.request_flag = cpu_to_be32(FADUMP_REQUEST_FLAG);
187 fdm->rmr_region.source_data_type = FADUMP_REAL_MODE_REGION; 187 fdm->rmr_region.source_data_type = cpu_to_be16(FADUMP_REAL_MODE_REGION);
188 fdm->rmr_region.source_address = RMA_START; 188 fdm->rmr_region.source_address = cpu_to_be64(RMA_START);
189 fdm->rmr_region.source_len = fw_dump.boot_memory_size; 189 fdm->rmr_region.source_len = cpu_to_be64(fw_dump.boot_memory_size);
190 fdm->rmr_region.destination_address = addr; 190 fdm->rmr_region.destination_address = cpu_to_be64(addr);
191 addr += fw_dump.boot_memory_size; 191 addr += fw_dump.boot_memory_size;
192 192
193 return addr; 193 return addr;
@@ -272,7 +272,7 @@ int __init fadump_reserve_mem(void)
272 * first kernel. 272 * first kernel.
273 */ 273 */
274 if (fdm_active) 274 if (fdm_active)
275 fw_dump.boot_memory_size = fdm_active->rmr_region.source_len; 275 fw_dump.boot_memory_size = be64_to_cpu(fdm_active->rmr_region.source_len);
276 else 276 else
277 fw_dump.boot_memory_size = fadump_calculate_reserve_size(); 277 fw_dump.boot_memory_size = fadump_calculate_reserve_size();
278 278
@@ -314,8 +314,8 @@ int __init fadump_reserve_mem(void)
314 (unsigned long)(base >> 20)); 314 (unsigned long)(base >> 20));
315 315
316 fw_dump.fadumphdr_addr = 316 fw_dump.fadumphdr_addr =
317 fdm_active->rmr_region.destination_address + 317 be64_to_cpu(fdm_active->rmr_region.destination_address) +
318 fdm_active->rmr_region.source_len; 318 be64_to_cpu(fdm_active->rmr_region.source_len);
319 pr_debug("fadumphdr_addr = %p\n", 319 pr_debug("fadumphdr_addr = %p\n",
320 (void *) fw_dump.fadumphdr_addr); 320 (void *) fw_dump.fadumphdr_addr);
321 } else { 321 } else {
@@ -472,9 +472,9 @@ fadump_read_registers(struct fadump_reg_entry *reg_entry, struct pt_regs *regs)
472{ 472{
473 memset(regs, 0, sizeof(struct pt_regs)); 473 memset(regs, 0, sizeof(struct pt_regs));
474 474
475 while (reg_entry->reg_id != REG_ID("CPUEND")) { 475 while (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUEND")) {
476 fadump_set_regval(regs, reg_entry->reg_id, 476 fadump_set_regval(regs, be64_to_cpu(reg_entry->reg_id),
477 reg_entry->reg_value); 477 be64_to_cpu(reg_entry->reg_value));
478 reg_entry++; 478 reg_entry++;
479 } 479 }
480 reg_entry++; 480 reg_entry++;
@@ -603,20 +603,20 @@ static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm)
603 if (!fdm->cpu_state_data.bytes_dumped) 603 if (!fdm->cpu_state_data.bytes_dumped)
604 return -EINVAL; 604 return -EINVAL;
605 605
606 addr = fdm->cpu_state_data.destination_address; 606 addr = be64_to_cpu(fdm->cpu_state_data.destination_address);
607 vaddr = __va(addr); 607 vaddr = __va(addr);
608 608
609 reg_header = vaddr; 609 reg_header = vaddr;
610 if (reg_header->magic_number != REGSAVE_AREA_MAGIC) { 610 if (be64_to_cpu(reg_header->magic_number) != REGSAVE_AREA_MAGIC) {
611 printk(KERN_ERR "Unable to read register save area.\n"); 611 printk(KERN_ERR "Unable to read register save area.\n");
612 return -ENOENT; 612 return -ENOENT;
613 } 613 }
614 pr_debug("--------CPU State Data------------\n"); 614 pr_debug("--------CPU State Data------------\n");
615 pr_debug("Magic Number: %llx\n", reg_header->magic_number); 615 pr_debug("Magic Number: %llx\n", be64_to_cpu(reg_header->magic_number));
616 pr_debug("NumCpuOffset: %x\n", reg_header->num_cpu_offset); 616 pr_debug("NumCpuOffset: %x\n", be32_to_cpu(reg_header->num_cpu_offset));
617 617
618 vaddr += reg_header->num_cpu_offset; 618 vaddr += be32_to_cpu(reg_header->num_cpu_offset);
619 num_cpus = *((u32 *)(vaddr)); 619 num_cpus = be32_to_cpu(*((__be32 *)(vaddr)));
620 pr_debug("NumCpus : %u\n", num_cpus); 620 pr_debug("NumCpus : %u\n", num_cpus);
621 vaddr += sizeof(u32); 621 vaddr += sizeof(u32);
622 reg_entry = (struct fadump_reg_entry *)vaddr; 622 reg_entry = (struct fadump_reg_entry *)vaddr;
@@ -639,13 +639,13 @@ static int __init fadump_build_cpu_notes(const struct fadump_mem_struct *fdm)
639 fdh = __va(fw_dump.fadumphdr_addr); 639 fdh = __va(fw_dump.fadumphdr_addr);
640 640
641 for (i = 0; i < num_cpus; i++) { 641 for (i = 0; i < num_cpus; i++) {
642 if (reg_entry->reg_id != REG_ID("CPUSTRT")) { 642 if (be64_to_cpu(reg_entry->reg_id) != REG_ID("CPUSTRT")) {
643 printk(KERN_ERR "Unable to read CPU state data\n"); 643 printk(KERN_ERR "Unable to read CPU state data\n");
644 rc = -ENOENT; 644 rc = -ENOENT;
645 goto error_out; 645 goto error_out;
646 } 646 }
647 /* Lower 4 bytes of reg_value contains logical cpu id */ 647 /* Lower 4 bytes of reg_value contains logical cpu id */
648 cpu = reg_entry->reg_value & FADUMP_CPU_ID_MASK; 648 cpu = be64_to_cpu(reg_entry->reg_value) & FADUMP_CPU_ID_MASK;
649 if (fdh && !cpumask_test_cpu(cpu, &fdh->cpu_online_mask)) { 649 if (fdh && !cpumask_test_cpu(cpu, &fdh->cpu_online_mask)) {
650 SKIP_TO_NEXT_CPU(reg_entry); 650 SKIP_TO_NEXT_CPU(reg_entry);
651 continue; 651 continue;
@@ -692,7 +692,7 @@ static int __init process_fadump(const struct fadump_mem_struct *fdm_active)
692 return -EINVAL; 692 return -EINVAL;
693 693
694 /* Check if the dump data is valid. */ 694 /* Check if the dump data is valid. */
695 if ((fdm_active->header.dump_status_flag == FADUMP_ERROR_FLAG) || 695 if ((be16_to_cpu(fdm_active->header.dump_status_flag) == FADUMP_ERROR_FLAG) ||
696 (fdm_active->cpu_state_data.error_flags != 0) || 696 (fdm_active->cpu_state_data.error_flags != 0) ||
697 (fdm_active->rmr_region.error_flags != 0)) { 697 (fdm_active->rmr_region.error_flags != 0)) {
698 printk(KERN_ERR "Dump taken by platform is not valid\n"); 698 printk(KERN_ERR "Dump taken by platform is not valid\n");
@@ -828,7 +828,7 @@ static void fadump_setup_crash_memory_ranges(void)
828static inline unsigned long fadump_relocate(unsigned long paddr) 828static inline unsigned long fadump_relocate(unsigned long paddr)
829{ 829{
830 if (paddr > RMA_START && paddr < fw_dump.boot_memory_size) 830 if (paddr > RMA_START && paddr < fw_dump.boot_memory_size)
831 return fdm.rmr_region.destination_address + paddr; 831 return be64_to_cpu(fdm.rmr_region.destination_address) + paddr;
832 else 832 else
833 return paddr; 833 return paddr;
834} 834}
@@ -902,7 +902,7 @@ static int fadump_create_elfcore_headers(char *bufp)
902 * to the specified destination_address. Hence set 902 * to the specified destination_address. Hence set
903 * the correct offset. 903 * the correct offset.
904 */ 904 */
905 phdr->p_offset = fdm.rmr_region.destination_address; 905 phdr->p_offset = be64_to_cpu(fdm.rmr_region.destination_address);
906 } 906 }
907 907
908 phdr->p_paddr = mbase; 908 phdr->p_paddr = mbase;
@@ -951,7 +951,7 @@ static void register_fadump(void)
951 951
952 fadump_setup_crash_memory_ranges(); 952 fadump_setup_crash_memory_ranges();
953 953
954 addr = fdm.rmr_region.destination_address + fdm.rmr_region.source_len; 954 addr = be64_to_cpu(fdm.rmr_region.destination_address) + be64_to_cpu(fdm.rmr_region.source_len);
955 /* Initialize fadump crash info header. */ 955 /* Initialize fadump crash info header. */
956 addr = init_fadump_header(addr); 956 addr = init_fadump_header(addr);
957 vaddr = __va(addr); 957 vaddr = __va(addr);
@@ -1023,7 +1023,7 @@ void fadump_cleanup(void)
1023 /* Invalidate the registration only if dump is active. */ 1023 /* Invalidate the registration only if dump is active. */
1024 if (fw_dump.dump_active) { 1024 if (fw_dump.dump_active) {
1025 init_fadump_mem_struct(&fdm, 1025 init_fadump_mem_struct(&fdm,
1026 fdm_active->cpu_state_data.destination_address); 1026 be64_to_cpu(fdm_active->cpu_state_data.destination_address));
1027 fadump_invalidate_dump(&fdm); 1027 fadump_invalidate_dump(&fdm);
1028 } 1028 }
1029} 1029}
@@ -1063,7 +1063,7 @@ static void fadump_invalidate_release_mem(void)
1063 return; 1063 return;
1064 } 1064 }
1065 1065
1066 destination_address = fdm_active->cpu_state_data.destination_address; 1066 destination_address = be64_to_cpu(fdm_active->cpu_state_data.destination_address);
1067 fadump_cleanup(); 1067 fadump_cleanup();
1068 mutex_unlock(&fadump_mutex); 1068 mutex_unlock(&fadump_mutex);
1069 1069
@@ -1183,31 +1183,31 @@ static int fadump_region_show(struct seq_file *m, void *private)
1183 seq_printf(m, 1183 seq_printf(m,
1184 "CPU : [%#016llx-%#016llx] %#llx bytes, " 1184 "CPU : [%#016llx-%#016llx] %#llx bytes, "
1185 "Dumped: %#llx\n", 1185 "Dumped: %#llx\n",
1186 fdm_ptr->cpu_state_data.destination_address, 1186 be64_to_cpu(fdm_ptr->cpu_state_data.destination_address),
1187 fdm_ptr->cpu_state_data.destination_address + 1187 be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) +
1188 fdm_ptr->cpu_state_data.source_len - 1, 1188 be64_to_cpu(fdm_ptr->cpu_state_data.source_len) - 1,
1189 fdm_ptr->cpu_state_data.source_len, 1189 be64_to_cpu(fdm_ptr->cpu_state_data.source_len),
1190 fdm_ptr->cpu_state_data.bytes_dumped); 1190 be64_to_cpu(fdm_ptr->cpu_state_data.bytes_dumped));
1191 seq_printf(m, 1191 seq_printf(m,
1192 "HPTE: [%#016llx-%#016llx] %#llx bytes, " 1192 "HPTE: [%#016llx-%#016llx] %#llx bytes, "
1193 "Dumped: %#llx\n", 1193 "Dumped: %#llx\n",
1194 fdm_ptr->hpte_region.destination_address, 1194 be64_to_cpu(fdm_ptr->hpte_region.destination_address),
1195 fdm_ptr->hpte_region.destination_address + 1195 be64_to_cpu(fdm_ptr->hpte_region.destination_address) +
1196 fdm_ptr->hpte_region.source_len - 1, 1196 be64_to_cpu(fdm_ptr->hpte_region.source_len) - 1,
1197 fdm_ptr->hpte_region.source_len, 1197 be64_to_cpu(fdm_ptr->hpte_region.source_len),
1198 fdm_ptr->hpte_region.bytes_dumped); 1198 be64_to_cpu(fdm_ptr->hpte_region.bytes_dumped));
1199 seq_printf(m, 1199 seq_printf(m,
1200 "DUMP: [%#016llx-%#016llx] %#llx bytes, " 1200 "DUMP: [%#016llx-%#016llx] %#llx bytes, "
1201 "Dumped: %#llx\n", 1201 "Dumped: %#llx\n",
1202 fdm_ptr->rmr_region.destination_address, 1202 be64_to_cpu(fdm_ptr->rmr_region.destination_address),
1203 fdm_ptr->rmr_region.destination_address + 1203 be64_to_cpu(fdm_ptr->rmr_region.destination_address) +
1204 fdm_ptr->rmr_region.source_len - 1, 1204 be64_to_cpu(fdm_ptr->rmr_region.source_len) - 1,
1205 fdm_ptr->rmr_region.source_len, 1205 be64_to_cpu(fdm_ptr->rmr_region.source_len),
1206 fdm_ptr->rmr_region.bytes_dumped); 1206 be64_to_cpu(fdm_ptr->rmr_region.bytes_dumped));
1207 1207
1208 if (!fdm_active || 1208 if (!fdm_active ||
1209 (fw_dump.reserve_dump_area_start == 1209 (fw_dump.reserve_dump_area_start ==
1210 fdm_ptr->cpu_state_data.destination_address)) 1210 be64_to_cpu(fdm_ptr->cpu_state_data.destination_address)))
1211 goto out; 1211 goto out;
1212 1212
1213 /* Dump is active. Show reserved memory region. */ 1213 /* Dump is active. Show reserved memory region. */
@@ -1215,10 +1215,10 @@ static int fadump_region_show(struct seq_file *m, void *private)
1215 " : [%#016llx-%#016llx] %#llx bytes, " 1215 " : [%#016llx-%#016llx] %#llx bytes, "
1216 "Dumped: %#llx\n", 1216 "Dumped: %#llx\n",
1217 (unsigned long long)fw_dump.reserve_dump_area_start, 1217 (unsigned long long)fw_dump.reserve_dump_area_start,
1218 fdm_ptr->cpu_state_data.destination_address - 1, 1218 be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) - 1,
1219 fdm_ptr->cpu_state_data.destination_address - 1219 be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) -
1220 fw_dump.reserve_dump_area_start, 1220 fw_dump.reserve_dump_area_start,
1221 fdm_ptr->cpu_state_data.destination_address - 1221 be64_to_cpu(fdm_ptr->cpu_state_data.destination_address) -
1222 fw_dump.reserve_dump_area_start); 1222 fw_dump.reserve_dump_area_start);
1223out: 1223out:
1224 if (fdm_active) 1224 if (fdm_active)
diff --git a/arch/powerpc/kernel/irq.c b/arch/powerpc/kernel/irq.c
index 8eb857f216c1..c14383575fe8 100644
--- a/arch/powerpc/kernel/irq.c
+++ b/arch/powerpc/kernel/irq.c
@@ -466,7 +466,7 @@ static inline void check_stack_overflow(void)
466#ifdef CONFIG_DEBUG_STACKOVERFLOW 466#ifdef CONFIG_DEBUG_STACKOVERFLOW
467 long sp; 467 long sp;
468 468
469 sp = __get_SP() & (THREAD_SIZE-1); 469 sp = current_stack_pointer() & (THREAD_SIZE-1);
470 470
471 /* check for stack overflow: is there less than 2KB free? */ 471 /* check for stack overflow: is there less than 2KB free? */
472 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) { 472 if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
diff --git a/arch/powerpc/kernel/misc.S b/arch/powerpc/kernel/misc.S
index 7ce26d45777e..0d432194c018 100644
--- a/arch/powerpc/kernel/misc.S
+++ b/arch/powerpc/kernel/misc.S
@@ -114,3 +114,7 @@ _GLOBAL(longjmp)
114 mtlr r0 114 mtlr r0
115 mr r3,r4 115 mr r3,r4
116 blr 116 blr
117
118_GLOBAL(current_stack_pointer)
119 PPC_LL r3,0(r1)
120 blr
diff --git a/arch/powerpc/kernel/pci_64.c b/arch/powerpc/kernel/pci_64.c
index 155013da27e0..b15194e2c5fc 100644
--- a/arch/powerpc/kernel/pci_64.c
+++ b/arch/powerpc/kernel/pci_64.c
@@ -266,13 +266,3 @@ int pcibus_to_node(struct pci_bus *bus)
266} 266}
267EXPORT_SYMBOL(pcibus_to_node); 267EXPORT_SYMBOL(pcibus_to_node);
268#endif 268#endif
269
270static void quirk_radeon_32bit_msi(struct pci_dev *dev)
271{
272 struct pci_dn *pdn = pci_get_pdn(dev);
273
274 if (pdn)
275 pdn->force_32bit_msi = true;
276}
277DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0x68f2, quirk_radeon_32bit_msi);
278DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_ATI, 0xaa68, quirk_radeon_32bit_msi);
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index c4dfff6c2719..202963ee013a 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -41,3 +41,5 @@ EXPORT_SYMBOL(giveup_spe);
41#ifdef CONFIG_EPAPR_PARAVIRT 41#ifdef CONFIG_EPAPR_PARAVIRT
42EXPORT_SYMBOL(epapr_hypercall_start); 42EXPORT_SYMBOL(epapr_hypercall_start);
43#endif 43#endif
44
45EXPORT_SYMBOL(current_stack_pointer);
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index aa1df89c8b2a..923cd2daba89 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1545,7 +1545,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
1545 tsk = current; 1545 tsk = current;
1546 if (sp == 0) { 1546 if (sp == 0) {
1547 if (tsk == current) 1547 if (tsk == current)
1548 asm("mr %0,1" : "=r" (sp)); 1548 sp = current_stack_pointer();
1549 else 1549 else
1550 sp = tsk->thread.ksp; 1550 sp = tsk->thread.ksp;
1551 } 1551 }
diff --git a/arch/powerpc/kernel/rtas_pci.c b/arch/powerpc/kernel/rtas_pci.c
index c168337aef9d..7c55b86206b3 100644
--- a/arch/powerpc/kernel/rtas_pci.c
+++ b/arch/powerpc/kernel/rtas_pci.c
@@ -66,6 +66,11 @@ int rtas_read_config(struct pci_dn *pdn, int where, int size, u32 *val)
66 return PCIBIOS_DEVICE_NOT_FOUND; 66 return PCIBIOS_DEVICE_NOT_FOUND;
67 if (!config_access_valid(pdn, where)) 67 if (!config_access_valid(pdn, where))
68 return PCIBIOS_BAD_REGISTER_NUMBER; 68 return PCIBIOS_BAD_REGISTER_NUMBER;
69#ifdef CONFIG_EEH
70 if (pdn->edev && pdn->edev->pe &&
71 (pdn->edev->pe->state & EEH_PE_CFG_BLOCKED))
72 return PCIBIOS_SET_FAILED;
73#endif
69 74
70 addr = rtas_config_addr(pdn->busno, pdn->devfn, where); 75 addr = rtas_config_addr(pdn->busno, pdn->devfn, where);
71 buid = pdn->phb->buid; 76 buid = pdn->phb->buid;
@@ -90,9 +95,6 @@ static int rtas_pci_read_config(struct pci_bus *bus,
90 struct device_node *busdn, *dn; 95 struct device_node *busdn, *dn;
91 struct pci_dn *pdn; 96 struct pci_dn *pdn;
92 bool found = false; 97 bool found = false;
93#ifdef CONFIG_EEH
94 struct eeh_dev *edev;
95#endif
96 int ret; 98 int ret;
97 99
98 /* Search only direct children of the bus */ 100 /* Search only direct children of the bus */
@@ -109,11 +111,6 @@ static int rtas_pci_read_config(struct pci_bus *bus,
109 111
110 if (!found) 112 if (!found)
111 return PCIBIOS_DEVICE_NOT_FOUND; 113 return PCIBIOS_DEVICE_NOT_FOUND;
112#ifdef CONFIG_EEH
113 edev = of_node_to_eeh_dev(dn);
114 if (edev && edev->pe && edev->pe->state & EEH_PE_RESET)
115 return PCIBIOS_DEVICE_NOT_FOUND;
116#endif
117 114
118 ret = rtas_read_config(pdn, where, size, val); 115 ret = rtas_read_config(pdn, where, size, val);
119 if (*val == EEH_IO_ERROR_VALUE(size) && 116 if (*val == EEH_IO_ERROR_VALUE(size) &&
@@ -132,6 +129,11 @@ int rtas_write_config(struct pci_dn *pdn, int where, int size, u32 val)
132 return PCIBIOS_DEVICE_NOT_FOUND; 129 return PCIBIOS_DEVICE_NOT_FOUND;
133 if (!config_access_valid(pdn, where)) 130 if (!config_access_valid(pdn, where))
134 return PCIBIOS_BAD_REGISTER_NUMBER; 131 return PCIBIOS_BAD_REGISTER_NUMBER;
132#ifdef CONFIG_EEH
133 if (pdn->edev && pdn->edev->pe &&
134 (pdn->edev->pe->state & EEH_PE_CFG_BLOCKED))
135 return PCIBIOS_SET_FAILED;
136#endif
135 137
136 addr = rtas_config_addr(pdn->busno, pdn->devfn, where); 138 addr = rtas_config_addr(pdn->busno, pdn->devfn, where);
137 buid = pdn->phb->buid; 139 buid = pdn->phb->buid;
@@ -155,10 +157,6 @@ static int rtas_pci_write_config(struct pci_bus *bus,
155 struct device_node *busdn, *dn; 157 struct device_node *busdn, *dn;
156 struct pci_dn *pdn; 158 struct pci_dn *pdn;
157 bool found = false; 159 bool found = false;
158#ifdef CONFIG_EEH
159 struct eeh_dev *edev;
160#endif
161 int ret;
162 160
163 /* Search only direct children of the bus */ 161 /* Search only direct children of the bus */
164 busdn = pci_bus_to_OF_node(bus); 162 busdn = pci_bus_to_OF_node(bus);
@@ -173,14 +171,8 @@ static int rtas_pci_write_config(struct pci_bus *bus,
173 171
174 if (!found) 172 if (!found)
175 return PCIBIOS_DEVICE_NOT_FOUND; 173 return PCIBIOS_DEVICE_NOT_FOUND;
176#ifdef CONFIG_EEH
177 edev = of_node_to_eeh_dev(dn);
178 if (edev && edev->pe && (edev->pe->state & EEH_PE_RESET))
179 return PCIBIOS_DEVICE_NOT_FOUND;
180#endif
181 ret = rtas_write_config(pdn, where, size, val);
182 174
183 return ret; 175 return rtas_write_config(pdn, where, size, val);
184} 176}
185 177
186static struct pci_ops rtas_pci_ops = { 178static struct pci_ops rtas_pci_ops = {
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index cd07d79ad21c..4f3cfe1b6a33 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -522,36 +522,36 @@ void __init setup_system(void)
522 smp_release_cpus(); 522 smp_release_cpus();
523#endif 523#endif
524 524
525 printk("Starting Linux PPC64 %s\n", init_utsname()->version); 525 pr_info("Starting Linux PPC64 %s\n", init_utsname()->version);
526 526
527 printk("-----------------------------------------------------\n"); 527 pr_info("-----------------------------------------------------\n");
528 printk("ppc64_pft_size = 0x%llx\n", ppc64_pft_size); 528 pr_info("ppc64_pft_size = 0x%llx\n", ppc64_pft_size);
529 printk("phys_mem_size = 0x%llx\n", memblock_phys_mem_size()); 529 pr_info("phys_mem_size = 0x%llx\n", memblock_phys_mem_size());
530 530
531 if (ppc64_caches.dline_size != 0x80) 531 if (ppc64_caches.dline_size != 0x80)
532 printk("dcache_line_size = 0x%x\n", ppc64_caches.dline_size); 532 pr_info("dcache_line_size = 0x%x\n", ppc64_caches.dline_size);
533 if (ppc64_caches.iline_size != 0x80) 533 if (ppc64_caches.iline_size != 0x80)
534 printk("icache_line_size = 0x%x\n", ppc64_caches.iline_size); 534 pr_info("icache_line_size = 0x%x\n", ppc64_caches.iline_size);
535 535
536 printk("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features); 536 pr_info("cpu_features = 0x%016lx\n", cur_cpu_spec->cpu_features);
537 printk(" possible = 0x%016lx\n", CPU_FTRS_POSSIBLE); 537 pr_info(" possible = 0x%016lx\n", CPU_FTRS_POSSIBLE);
538 printk(" always = 0x%016lx\n", CPU_FTRS_ALWAYS); 538 pr_info(" always = 0x%016lx\n", CPU_FTRS_ALWAYS);
539 printk("cpu_user_features = 0x%08x 0x%08x\n", cur_cpu_spec->cpu_user_features, 539 pr_info("cpu_user_features = 0x%08x 0x%08x\n", cur_cpu_spec->cpu_user_features,
540 cur_cpu_spec->cpu_user_features2); 540 cur_cpu_spec->cpu_user_features2);
541 printk("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features); 541 pr_info("mmu_features = 0x%08x\n", cur_cpu_spec->mmu_features);
542 printk("firmware_features = 0x%016lx\n", powerpc_firmware_features); 542 pr_info("firmware_features = 0x%016lx\n", powerpc_firmware_features);
543 543
544#ifdef CONFIG_PPC_STD_MMU_64 544#ifdef CONFIG_PPC_STD_MMU_64
545 if (htab_address) 545 if (htab_address)
546 printk("htab_address = 0x%p\n", htab_address); 546 pr_info("htab_address = 0x%p\n", htab_address);
547 547
548 printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); 548 pr_info("htab_hash_mask = 0x%lx\n", htab_hash_mask);
549#endif 549#endif
550 550
551 if (PHYSICAL_START > 0) 551 if (PHYSICAL_START > 0)
552 printk("physical_start = 0x%llx\n", 552 pr_info("physical_start = 0x%llx\n",
553 (unsigned long long)PHYSICAL_START); 553 (unsigned long long)PHYSICAL_START);
554 printk("-----------------------------------------------------\n"); 554 pr_info("-----------------------------------------------------\n");
555 555
556 DBG(" <- setup_system()\n"); 556 DBG(" <- setup_system()\n");
557} 557}
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index 3d30ef1038e5..ea43a347a104 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -50,7 +50,7 @@ void save_stack_trace(struct stack_trace *trace)
50{ 50{
51 unsigned long sp; 51 unsigned long sp;
52 52
53 asm("mr %0,1" : "=r" (sp)); 53 sp = current_stack_pointer();
54 54
55 save_context_stack(trace, sp, current, 1); 55 save_context_stack(trace, sp, current, 1);
56} 56}
diff --git a/arch/powerpc/kernel/vdso32/getcpu.S b/arch/powerpc/kernel/vdso32/getcpu.S
index 23eb9a9441bd..c62be60c7274 100644
--- a/arch/powerpc/kernel/vdso32/getcpu.S
+++ b/arch/powerpc/kernel/vdso32/getcpu.S
@@ -30,8 +30,8 @@
30V_FUNCTION_BEGIN(__kernel_getcpu) 30V_FUNCTION_BEGIN(__kernel_getcpu)
31 .cfi_startproc 31 .cfi_startproc
32 mfspr r5,SPRN_SPRG_VDSO_READ 32 mfspr r5,SPRN_SPRG_VDSO_READ
33 cmpdi cr0,r3,0 33 cmpwi cr0,r3,0
34 cmpdi cr1,r4,0 34 cmpwi cr1,r4,0
35 clrlwi r6,r5,16 35 clrlwi r6,r5,16
36 rlwinm r7,r5,16,31-15,31-0 36 rlwinm r7,r5,16,31-15,31-0
37 beq cr0,1f 37 beq cr0,1f
diff --git a/arch/powerpc/mm/copro_fault.c b/arch/powerpc/mm/copro_fault.c
index 0f9939e693df..5a236f082c78 100644
--- a/arch/powerpc/mm/copro_fault.c
+++ b/arch/powerpc/mm/copro_fault.c
@@ -99,8 +99,6 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
99 u64 vsid; 99 u64 vsid;
100 int psize, ssize; 100 int psize, ssize;
101 101
102 slb->esid = (ea & ESID_MASK) | SLB_ESID_V;
103
104 switch (REGION_ID(ea)) { 102 switch (REGION_ID(ea)) {
105 case USER_REGION_ID: 103 case USER_REGION_ID:
106 pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea); 104 pr_devel("%s: 0x%llx -- USER_REGION_ID\n", __func__, ea);
@@ -133,6 +131,7 @@ int copro_calculate_slb(struct mm_struct *mm, u64 ea, struct copro_slb *slb)
133 vsid |= mmu_psize_defs[psize].sllp | 131 vsid |= mmu_psize_defs[psize].sllp |
134 ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0); 132 ((ssize == MMU_SEGSIZE_1T) ? SLB_VSID_B_1T : 0);
135 133
134 slb->esid = (ea & (ssize == MMU_SEGSIZE_1T ? ESID_MASK_1T : ESID_MASK)) | SLB_ESID_V;
136 slb->vsid = vsid; 135 slb->vsid = vsid;
137 136
138 return 0; 137 return 0;
diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c
index cad68ff8eca5..415a51b028b9 100644
--- a/arch/powerpc/mm/init_32.c
+++ b/arch/powerpc/mm/init_32.c
@@ -103,7 +103,7 @@ unsigned long __max_low_memory = MAX_LOW_MEM;
103/* 103/*
104 * Check for command-line options that affect what MMU_init will do. 104 * Check for command-line options that affect what MMU_init will do.
105 */ 105 */
106void MMU_setup(void) 106void __init MMU_setup(void)
107{ 107{
108 /* Check for nobats option (used in mapin_ram). */ 108 /* Check for nobats option (used in mapin_ram). */
109 if (strstr(boot_command_line, "nobats")) { 109 if (strstr(boot_command_line, "nobats")) {
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index 649666d5d1c2..b9d1dfdbe5bb 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -8,6 +8,8 @@
8 * as published by the Free Software Foundation; either version 8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version. 9 * 2 of the License, or (at your option) any later version.
10 */ 10 */
11#define pr_fmt(fmt) "numa: " fmt
12
11#include <linux/threads.h> 13#include <linux/threads.h>
12#include <linux/bootmem.h> 14#include <linux/bootmem.h>
13#include <linux/init.h> 15#include <linux/init.h>
@@ -1153,6 +1155,22 @@ static int __init early_numa(char *p)
1153} 1155}
1154early_param("numa", early_numa); 1156early_param("numa", early_numa);
1155 1157
1158static bool topology_updates_enabled = true;
1159
1160static int __init early_topology_updates(char *p)
1161{
1162 if (!p)
1163 return 0;
1164
1165 if (!strcmp(p, "off")) {
1166 pr_info("Disabling topology updates\n");
1167 topology_updates_enabled = false;
1168 }
1169
1170 return 0;
1171}
1172early_param("topology_updates", early_topology_updates);
1173
1156#ifdef CONFIG_MEMORY_HOTPLUG 1174#ifdef CONFIG_MEMORY_HOTPLUG
1157/* 1175/*
1158 * Find the node associated with a hot added memory section for 1176 * Find the node associated with a hot added memory section for
@@ -1442,8 +1460,11 @@ static long hcall_vphn(unsigned long cpu, __be32 *associativity)
1442 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0}; 1460 long retbuf[PLPAR_HCALL9_BUFSIZE] = {0};
1443 u64 flags = 1; 1461 u64 flags = 1;
1444 int hwcpu = get_hard_smp_processor_id(cpu); 1462 int hwcpu = get_hard_smp_processor_id(cpu);
1463 int i;
1445 1464
1446 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu); 1465 rc = plpar_hcall9(H_HOME_NODE_ASSOCIATIVITY, retbuf, flags, hwcpu);
1466 for (i = 0; i < 6; i++)
1467 retbuf[i] = cpu_to_be64(retbuf[i]);
1447 vphn_unpack_associativity(retbuf, associativity); 1468 vphn_unpack_associativity(retbuf, associativity);
1448 1469
1449 return rc; 1470 return rc;
@@ -1488,11 +1509,14 @@ static int update_cpu_topology(void *data)
1488 cpu = smp_processor_id(); 1509 cpu = smp_processor_id();
1489 1510
1490 for (update = data; update; update = update->next) { 1511 for (update = data; update; update = update->next) {
1512 int new_nid = update->new_nid;
1491 if (cpu != update->cpu) 1513 if (cpu != update->cpu)
1492 continue; 1514 continue;
1493 1515
1494 unmap_cpu_from_node(update->cpu); 1516 unmap_cpu_from_node(cpu);
1495 map_cpu_to_node(update->cpu, update->new_nid); 1517 map_cpu_to_node(cpu, new_nid);
1518 set_cpu_numa_node(cpu, new_nid);
1519 set_cpu_numa_mem(cpu, local_memory_node(new_nid));
1496 vdso_getcpu_init(); 1520 vdso_getcpu_init();
1497 } 1521 }
1498 1522
@@ -1539,6 +1563,9 @@ int arch_update_cpu_topology(void)
1539 struct device *dev; 1563 struct device *dev;
1540 int weight, new_nid, i = 0; 1564 int weight, new_nid, i = 0;
1541 1565
1566 if (!prrn_enabled && !vphn_enabled)
1567 return 0;
1568
1542 weight = cpumask_weight(&cpu_associativity_changes_mask); 1569 weight = cpumask_weight(&cpu_associativity_changes_mask);
1543 if (!weight) 1570 if (!weight)
1544 return 0; 1571 return 0;
@@ -1592,6 +1619,15 @@ int arch_update_cpu_topology(void)
1592 cpu = cpu_last_thread_sibling(cpu); 1619 cpu = cpu_last_thread_sibling(cpu);
1593 } 1620 }
1594 1621
1622 pr_debug("Topology update for the following CPUs:\n");
1623 if (cpumask_weight(&updated_cpus)) {
1624 for (ud = &updates[0]; ud; ud = ud->next) {
1625 pr_debug("cpu %d moving from node %d "
1626 "to %d\n", ud->cpu,
1627 ud->old_nid, ud->new_nid);
1628 }
1629 }
1630
1595 /* 1631 /*
1596 * In cases where we have nothing to update (because the updates list 1632 * In cases where we have nothing to update (because the updates list
1597 * is too short or because the new topology is same as the old one), 1633 * is too short or because the new topology is same as the old one),
@@ -1800,8 +1836,12 @@ static const struct file_operations topology_ops = {
1800 1836
1801static int topology_update_init(void) 1837static int topology_update_init(void)
1802{ 1838{
1803 start_topology_update(); 1839 /* Do not poll for changes if disabled at boot */
1804 proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops); 1840 if (topology_updates_enabled)
1841 start_topology_update();
1842
1843 if (!proc_create("powerpc/topology_updates", 0644, NULL, &topology_ops))
1844 return -ENOMEM;
1805 1845
1806 return 0; 1846 return 0;
1807} 1847}
diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
index 8d7bda94d196..ded0ea1afde4 100644
--- a/arch/powerpc/mm/slice.c
+++ b/arch/powerpc/mm/slice.c
@@ -682,6 +682,7 @@ void slice_set_range_psize(struct mm_struct *mm, unsigned long start,
682 slice_convert(mm, mask, psize); 682 slice_convert(mm, mask, psize);
683} 683}
684 684
685#ifdef CONFIG_HUGETLB_PAGE
685/* 686/*
686 * is_hugepage_only_range() is used by generic code to verify whether 687 * is_hugepage_only_range() is used by generic code to verify whether
687 * a normal mmap mapping (non hugetlbfs) is valid on a given area. 688 * a normal mmap mapping (non hugetlbfs) is valid on a given area.
@@ -726,4 +727,4 @@ int is_hugepage_only_range(struct mm_struct *mm, unsigned long addr,
726#endif 727#endif
727 return !slice_check_fit(mask, available); 728 return !slice_check_fit(mask, available);
728} 729}
729 730#endif
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 6c8710dd90c9..dba34088da28 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -417,11 +417,6 @@ static int h_24x7_event_add(struct perf_event *event, int flags)
417 return 0; 417 return 0;
418} 418}
419 419
420static int h_24x7_event_idx(struct perf_event *event)
421{
422 return 0;
423}
424
425static struct pmu h_24x7_pmu = { 420static struct pmu h_24x7_pmu = {
426 .task_ctx_nr = perf_invalid_context, 421 .task_ctx_nr = perf_invalid_context,
427 422
@@ -433,7 +428,6 @@ static struct pmu h_24x7_pmu = {
433 .start = h_24x7_event_start, 428 .start = h_24x7_event_start,
434 .stop = h_24x7_event_stop, 429 .stop = h_24x7_event_stop,
435 .read = h_24x7_event_update, 430 .read = h_24x7_event_update,
436 .event_idx = h_24x7_event_idx,
437}; 431};
438 432
439static int hv_24x7_init(void) 433static int hv_24x7_init(void)
diff --git a/arch/powerpc/perf/hv-gpci.c b/arch/powerpc/perf/hv-gpci.c
index 15fc76c93022..a051fe946c63 100644
--- a/arch/powerpc/perf/hv-gpci.c
+++ b/arch/powerpc/perf/hv-gpci.c
@@ -246,11 +246,6 @@ static int h_gpci_event_init(struct perf_event *event)
246 return 0; 246 return 0;
247} 247}
248 248
249static int h_gpci_event_idx(struct perf_event *event)
250{
251 return 0;
252}
253
254static struct pmu h_gpci_pmu = { 249static struct pmu h_gpci_pmu = {
255 .task_ctx_nr = perf_invalid_context, 250 .task_ctx_nr = perf_invalid_context,
256 251
@@ -262,7 +257,6 @@ static struct pmu h_gpci_pmu = {
262 .start = h_gpci_event_start, 257 .start = h_gpci_event_start,
263 .stop = h_gpci_event_stop, 258 .stop = h_gpci_event_stop,
264 .read = h_gpci_event_update, 259 .read = h_gpci_event_update,
265 .event_idx = h_gpci_event_idx,
266}; 260};
267 261
268static int hv_gpci_init(void) 262static int hv_gpci_init(void)
diff --git a/arch/powerpc/platforms/powernv/eeh-ioda.c b/arch/powerpc/platforms/powernv/eeh-ioda.c
index 426814a2ede3..eba9cb10619c 100644
--- a/arch/powerpc/platforms/powernv/eeh-ioda.c
+++ b/arch/powerpc/platforms/powernv/eeh-ioda.c
@@ -373,7 +373,7 @@ static int ioda_eeh_get_pe_state(struct eeh_pe *pe)
373 * moving forward, we have to return operational 373 * moving forward, we have to return operational
374 * state during PE reset. 374 * state during PE reset.
375 */ 375 */
376 if (pe->state & EEH_PE_RESET) { 376 if (pe->state & EEH_PE_CFG_BLOCKED) {
377 result = (EEH_STATE_MMIO_ACTIVE | 377 result = (EEH_STATE_MMIO_ACTIVE |
378 EEH_STATE_DMA_ACTIVE | 378 EEH_STATE_DMA_ACTIVE |
379 EEH_STATE_MMIO_ENABLED | 379 EEH_STATE_MMIO_ENABLED |
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 3e89cbf55885..1d19e7917d7f 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -169,6 +169,26 @@ static int powernv_eeh_dev_probe(struct pci_dev *dev, void *flag)
169 } 169 }
170 170
171 /* 171 /*
172 * If the PE contains any one of following adapters, the
173 * PCI config space can't be accessed when dumping EEH log.
174 * Otherwise, we will run into fenced PHB caused by shortage
175 * of outbound credits in the adapter. The PCI config access
176 * should be blocked until PE reset. MMIO access is dropped
177 * by hardware certainly. In order to drop PCI config requests,
178 * one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which
179 * will be checked in the backend for PE state retrival. If
180 * the PE becomes frozen for the first time and the flag has
181 * been set for the PE, we will set EEH_PE_CFG_BLOCKED for
182 * that PE to block its config space.
183 *
184 * Broadcom Austin 4-ports NICs (14e4:1657)
185 * Broadcom Shiner 2-ports 10G NICs (14e4:168e)
186 */
187 if ((dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x1657) ||
188 (dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x168e))
189 edev->pe->state |= EEH_PE_CFG_RESTRICTED;
190
191 /*
172 * Cache the PE primary bus, which can't be fetched when 192 * Cache the PE primary bus, which can't be fetched when
173 * full hotplug is in progress. In that case, all child 193 * full hotplug is in progress. In that case, all child
174 * PCI devices of the PE are expected to be removed prior 194 * PCI devices of the PE are expected to be removed prior
@@ -383,6 +403,39 @@ static int powernv_eeh_err_inject(struct eeh_pe *pe, int type, int func,
383 return ret; 403 return ret;
384} 404}
385 405
406static inline bool powernv_eeh_cfg_blocked(struct device_node *dn)
407{
408 struct eeh_dev *edev = of_node_to_eeh_dev(dn);
409
410 if (!edev || !edev->pe)
411 return false;
412
413 if (edev->pe->state & EEH_PE_CFG_BLOCKED)
414 return true;
415
416 return false;
417}
418
419static int powernv_eeh_read_config(struct device_node *dn,
420 int where, int size, u32 *val)
421{
422 if (powernv_eeh_cfg_blocked(dn)) {
423 *val = 0xFFFFFFFF;
424 return PCIBIOS_SET_FAILED;
425 }
426
427 return pnv_pci_cfg_read(dn, where, size, val);
428}
429
430static int powernv_eeh_write_config(struct device_node *dn,
431 int where, int size, u32 val)
432{
433 if (powernv_eeh_cfg_blocked(dn))
434 return PCIBIOS_SET_FAILED;
435
436 return pnv_pci_cfg_write(dn, where, size, val);
437}
438
386/** 439/**
387 * powernv_eeh_next_error - Retrieve next EEH error to handle 440 * powernv_eeh_next_error - Retrieve next EEH error to handle
388 * @pe: Affected PE 441 * @pe: Affected PE
@@ -440,8 +493,8 @@ static struct eeh_ops powernv_eeh_ops = {
440 .get_log = powernv_eeh_get_log, 493 .get_log = powernv_eeh_get_log,
441 .configure_bridge = powernv_eeh_configure_bridge, 494 .configure_bridge = powernv_eeh_configure_bridge,
442 .err_inject = powernv_eeh_err_inject, 495 .err_inject = powernv_eeh_err_inject,
443 .read_config = pnv_pci_cfg_read, 496 .read_config = powernv_eeh_read_config,
444 .write_config = pnv_pci_cfg_write, 497 .write_config = powernv_eeh_write_config,
445 .next_error = powernv_eeh_next_error, 498 .next_error = powernv_eeh_next_error,
446 .restore_config = powernv_eeh_restore_config 499 .restore_config = powernv_eeh_restore_config
447}; 500};
diff --git a/arch/powerpc/platforms/powernv/opal-hmi.c b/arch/powerpc/platforms/powernv/opal-hmi.c
index 5e1ed1575aab..b322bfb51343 100644
--- a/arch/powerpc/platforms/powernv/opal-hmi.c
+++ b/arch/powerpc/platforms/powernv/opal-hmi.c
@@ -57,7 +57,7 @@ static void print_hmi_event_info(struct OpalHMIEvent *hmi_evt)
57 }; 57 };
58 58
59 /* Print things out */ 59 /* Print things out */
60 if (hmi_evt->version != OpalHMIEvt_V1) { 60 if (hmi_evt->version < OpalHMIEvt_V1) {
61 pr_err("HMI Interrupt, Unknown event version %d !\n", 61 pr_err("HMI Interrupt, Unknown event version %d !\n",
62 hmi_evt->version); 62 hmi_evt->version);
63 return; 63 return;
diff --git a/arch/powerpc/platforms/powernv/opal-lpc.c b/arch/powerpc/platforms/powernv/opal-lpc.c
index dd2c285ad170..e4169d68cb32 100644
--- a/arch/powerpc/platforms/powernv/opal-lpc.c
+++ b/arch/powerpc/platforms/powernv/opal-lpc.c
@@ -191,7 +191,6 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf,
191{ 191{
192 struct lpc_debugfs_entry *lpc = filp->private_data; 192 struct lpc_debugfs_entry *lpc = filp->private_data;
193 u32 data, pos, len, todo; 193 u32 data, pos, len, todo;
194 __be32 bedata;
195 int rc; 194 int rc;
196 195
197 if (!access_ok(VERIFY_WRITE, ubuf, count)) 196 if (!access_ok(VERIFY_WRITE, ubuf, count))
@@ -214,18 +213,57 @@ static ssize_t lpc_debug_read(struct file *filp, char __user *ubuf,
214 len = 2; 213 len = 2;
215 } 214 }
216 rc = opal_lpc_read(opal_lpc_chip_id, lpc->lpc_type, pos, 215 rc = opal_lpc_read(opal_lpc_chip_id, lpc->lpc_type, pos,
217 &bedata, len); 216 &data, len);
218 if (rc) 217 if (rc)
219 return -ENXIO; 218 return -ENXIO;
220 data = be32_to_cpu(bedata); 219
220 /*
221 * Now there is some trickery with the data returned by OPAL
222 * as it's the desired data right justified in a 32-bit BE
223 * word.
224 *
225 * This is a very bad interface and I'm to blame for it :-(
226 *
227 * So we can't just apply a 32-bit swap to what comes from OPAL,
228 * because user space expects the *bytes* to be in their proper
229 * respective positions (ie, LPC position).
230 *
231 * So what we really want to do here is to shift data right
232 * appropriately on a LE kernel.
233 *
234 * IE. If the LPC transaction has bytes B0, B1, B2 and B3 in that
235 * order, we have in memory written to by OPAL at the "data"
236 * pointer:
237 *
238 * Bytes: OPAL "data" LE "data"
239 * 32-bit: B0 B1 B2 B3 B0B1B2B3 B3B2B1B0
240 * 16-bit: B0 B1 0000B0B1 B1B00000
241 * 8-bit: B0 000000B0 B0000000
242 *
243 * So a BE kernel will have the leftmost of the above in the MSB
244 * and rightmost in the LSB and can just then "cast" the u32 "data"
245 * down to the appropriate quantity and write it.
246 *
247 * However, an LE kernel can't. It doesn't need to swap because a
248 * load from data followed by a store to user are going to preserve
249 * the byte ordering which is the wire byte order which is what the
250 * user wants, but in order to "crop" to the right size, we need to
251 * shift right first.
252 */
221 switch(len) { 253 switch(len) {
222 case 4: 254 case 4:
223 rc = __put_user((u32)data, (u32 __user *)ubuf); 255 rc = __put_user((u32)data, (u32 __user *)ubuf);
224 break; 256 break;
225 case 2: 257 case 2:
258#ifdef __LITTLE_ENDIAN__
259 data >>= 16;
260#endif
226 rc = __put_user((u16)data, (u16 __user *)ubuf); 261 rc = __put_user((u16)data, (u16 __user *)ubuf);
227 break; 262 break;
228 default: 263 default:
264#ifdef __LITTLE_ENDIAN__
265 data >>= 24;
266#endif
229 rc = __put_user((u8)data, (u8 __user *)ubuf); 267 rc = __put_user((u8)data, (u8 __user *)ubuf);
230 break; 268 break;
231 } 269 }
@@ -265,12 +303,31 @@ static ssize_t lpc_debug_write(struct file *filp, const char __user *ubuf,
265 else if (todo > 1 && (pos & 1) == 0) 303 else if (todo > 1 && (pos & 1) == 0)
266 len = 2; 304 len = 2;
267 } 305 }
306
307 /*
308 * Similarly to the read case, we have some trickery here but
309 * it's different to handle. We need to pass the value to OPAL in
310 * a register whose layout depends on the access size. We want
311 * to reproduce the memory layout of the user, however we aren't
312 * doing a load from user and a store to another memory location
313 * which would achieve that. Here we pass the value to OPAL via
314 * a register which is expected to contain the "BE" interpretation
315 * of the byte sequence. IE: for a 32-bit access, byte 0 should be
316 * in the MSB. So here we *do* need to byteswap on LE.
317 *
318 * User bytes: LE "data" OPAL "data"
319 * 32-bit: B0 B1 B2 B3 B3B2B1B0 B0B1B2B3
320 * 16-bit: B0 B1 0000B1B0 0000B0B1
321 * 8-bit: B0 000000B0 000000B0
322 */
268 switch(len) { 323 switch(len) {
269 case 4: 324 case 4:
270 rc = __get_user(data, (u32 __user *)ubuf); 325 rc = __get_user(data, (u32 __user *)ubuf);
326 data = cpu_to_be32(data);
271 break; 327 break;
272 case 2: 328 case 2:
273 rc = __get_user(data, (u16 __user *)ubuf); 329 rc = __get_user(data, (u16 __user *)ubuf);
330 data = cpu_to_be16(data);
274 break; 331 break;
275 default: 332 default:
276 rc = __get_user(data, (u8 __user *)ubuf); 333 rc = __get_user(data, (u8 __user *)ubuf);
diff --git a/arch/powerpc/platforms/powernv/opal-wrappers.S b/arch/powerpc/platforms/powernv/opal-wrappers.S
index e9e2450c1fdd..feb549aa3eea 100644
--- a/arch/powerpc/platforms/powernv/opal-wrappers.S
+++ b/arch/powerpc/platforms/powernv/opal-wrappers.S
@@ -58,7 +58,7 @@ END_FTR_SECTION(0, 1); \
58 */ 58 */
59 59
60#define OPAL_CALL(name, token) \ 60#define OPAL_CALL(name, token) \
61 _GLOBAL(name); \ 61 _GLOBAL_TOC(name); \
62 mflr r0; \ 62 mflr r0; \
63 std r0,16(r1); \ 63 std r0,16(r1); \
64 li r0,token; \ 64 li r0,token; \
diff --git a/arch/powerpc/platforms/powernv/opal.c b/arch/powerpc/platforms/powernv/opal.c
index b642b0562f5a..d019b081df9d 100644
--- a/arch/powerpc/platforms/powernv/opal.c
+++ b/arch/powerpc/platforms/powernv/opal.c
@@ -194,6 +194,27 @@ static int __init opal_register_exception_handlers(void)
194 * fwnmi area at 0x7000 to provide the glue space to OPAL 194 * fwnmi area at 0x7000 to provide the glue space to OPAL
195 */ 195 */
196 glue = 0x7000; 196 glue = 0x7000;
197
198 /*
199 * Check if we are running on newer firmware that exports
200 * OPAL_HANDLE_HMI token. If yes, then don't ask OPAL to patch
201 * the HMI interrupt and we catch it directly in Linux.
202 *
203 * For older firmware (i.e currently released POWER8 System Firmware
204 * as of today <= SV810_087), we fallback to old behavior and let OPAL
205 * patch the HMI vector and handle it inside OPAL firmware.
206 *
207 * For newer firmware (in development/yet to be released) we will
208 * start catching/handling HMI directly in Linux.
209 */
210 if (!opal_check_token(OPAL_HANDLE_HMI)) {
211 pr_info("opal: Old firmware detected, OPAL handles HMIs.\n");
212 opal_register_exception_handler(
213 OPAL_HYPERVISOR_MAINTENANCE_HANDLER,
214 0, glue);
215 glue += 128;
216 }
217
197 opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue); 218 opal_register_exception_handler(OPAL_SOFTPATCH_HANDLER, 0, glue);
198#endif 219#endif
199 220
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 468a0f23c7f2..3ba435ec3dcd 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -1509,7 +1509,6 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
1509 unsigned int is_64, struct msi_msg *msg) 1509 unsigned int is_64, struct msi_msg *msg)
1510{ 1510{
1511 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev); 1511 struct pnv_ioda_pe *pe = pnv_ioda_get_pe(dev);
1512 struct pci_dn *pdn = pci_get_pdn(dev);
1513 unsigned int xive_num = hwirq - phb->msi_base; 1512 unsigned int xive_num = hwirq - phb->msi_base;
1514 __be32 data; 1513 __be32 data;
1515 int rc; 1514 int rc;
@@ -1523,7 +1522,7 @@ static int pnv_pci_ioda_msi_setup(struct pnv_phb *phb, struct pci_dev *dev,
1523 return -ENXIO; 1522 return -ENXIO;
1524 1523
1525 /* Force 32-bit MSI on some broken devices */ 1524 /* Force 32-bit MSI on some broken devices */
1526 if (pdn && pdn->force_32bit_msi) 1525 if (dev->no_64bit_msi)
1527 is_64 = 0; 1526 is_64 = 0;
1528 1527
1529 /* Assign XIVE to PE */ 1528 /* Assign XIVE to PE */
@@ -1997,7 +1996,7 @@ static void __init pnv_pci_init_ioda_phb(struct device_node *np,
1997 if (is_kdump_kernel()) { 1996 if (is_kdump_kernel()) {
1998 pr_info(" Issue PHB reset ...\n"); 1997 pr_info(" Issue PHB reset ...\n");
1999 ioda_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL); 1998 ioda_eeh_phb_reset(hose, EEH_RESET_FUNDAMENTAL);
2000 ioda_eeh_phb_reset(hose, OPAL_DEASSERT_RESET); 1999 ioda_eeh_phb_reset(hose, EEH_RESET_DEACTIVATE);
2001 } 2000 }
2002 2001
2003 /* Configure M64 window */ 2002 /* Configure M64 window */
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index b3ca77ddf36d..4b20f2c6b3b2 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -50,7 +50,6 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
50{ 50{
51 struct pci_controller *hose = pci_bus_to_host(pdev->bus); 51 struct pci_controller *hose = pci_bus_to_host(pdev->bus);
52 struct pnv_phb *phb = hose->private_data; 52 struct pnv_phb *phb = hose->private_data;
53 struct pci_dn *pdn = pci_get_pdn(pdev);
54 struct msi_desc *entry; 53 struct msi_desc *entry;
55 struct msi_msg msg; 54 struct msi_msg msg;
56 int hwirq; 55 int hwirq;
@@ -60,7 +59,7 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
60 if (WARN_ON(!phb) || !phb->msi_bmp.bitmap) 59 if (WARN_ON(!phb) || !phb->msi_bmp.bitmap)
61 return -ENODEV; 60 return -ENODEV;
62 61
63 if (pdn && pdn->force_32bit_msi && !phb->msi32_support) 62 if (pdev->no_64bit_msi && !phb->msi32_support)
64 return -ENODEV; 63 return -ENODEV;
65 64
66 list_for_each_entry(entry, &pdev->msi_list, list) { 65 list_for_each_entry(entry, &pdev->msi_list, list) {
@@ -505,7 +504,7 @@ static bool pnv_pci_cfg_check(struct pci_controller *hose,
505 edev = of_node_to_eeh_dev(dn); 504 edev = of_node_to_eeh_dev(dn);
506 if (edev) { 505 if (edev) {
507 if (edev->pe && 506 if (edev->pe &&
508 (edev->pe->state & EEH_PE_RESET)) 507 (edev->pe->state & EEH_PE_CFG_BLOCKED))
509 return false; 508 return false;
510 509
511 if (edev->mode & EEH_DEV_REMOVED) 510 if (edev->mode & EEH_DEV_REMOVED)
diff --git a/arch/powerpc/platforms/pseries/dlpar.c b/arch/powerpc/platforms/pseries/dlpar.c
index fdf01b660d59..c22bb1b4beb8 100644
--- a/arch/powerpc/platforms/pseries/dlpar.c
+++ b/arch/powerpc/platforms/pseries/dlpar.c
@@ -25,11 +25,11 @@
25#include <asm/rtas.h> 25#include <asm/rtas.h>
26 26
27struct cc_workarea { 27struct cc_workarea {
28 u32 drc_index; 28 __be32 drc_index;
29 u32 zero; 29 __be32 zero;
30 u32 name_offset; 30 __be32 name_offset;
31 u32 prop_length; 31 __be32 prop_length;
32 u32 prop_offset; 32 __be32 prop_offset;
33}; 33};
34 34
35void dlpar_free_cc_property(struct property *prop) 35void dlpar_free_cc_property(struct property *prop)
@@ -49,11 +49,11 @@ static struct property *dlpar_parse_cc_property(struct cc_workarea *ccwa)
49 if (!prop) 49 if (!prop)
50 return NULL; 50 return NULL;
51 51
52 name = (char *)ccwa + ccwa->name_offset; 52 name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
53 prop->name = kstrdup(name, GFP_KERNEL); 53 prop->name = kstrdup(name, GFP_KERNEL);
54 54
55 prop->length = ccwa->prop_length; 55 prop->length = be32_to_cpu(ccwa->prop_length);
56 value = (char *)ccwa + ccwa->prop_offset; 56 value = (char *)ccwa + be32_to_cpu(ccwa->prop_offset);
57 prop->value = kmemdup(value, prop->length, GFP_KERNEL); 57 prop->value = kmemdup(value, prop->length, GFP_KERNEL);
58 if (!prop->value) { 58 if (!prop->value) {
59 dlpar_free_cc_property(prop); 59 dlpar_free_cc_property(prop);
@@ -79,7 +79,7 @@ static struct device_node *dlpar_parse_cc_node(struct cc_workarea *ccwa,
79 if (!dn) 79 if (!dn)
80 return NULL; 80 return NULL;
81 81
82 name = (char *)ccwa + ccwa->name_offset; 82 name = (char *)ccwa + be32_to_cpu(ccwa->name_offset);
83 dn->full_name = kasprintf(GFP_KERNEL, "%s/%s", path, name); 83 dn->full_name = kasprintf(GFP_KERNEL, "%s/%s", path, name);
84 if (!dn->full_name) { 84 if (!dn->full_name) {
85 kfree(dn); 85 kfree(dn);
@@ -126,7 +126,7 @@ void dlpar_free_cc_nodes(struct device_node *dn)
126#define CALL_AGAIN -2 126#define CALL_AGAIN -2
127#define ERR_CFG_USE -9003 127#define ERR_CFG_USE -9003
128 128
129struct device_node *dlpar_configure_connector(u32 drc_index, 129struct device_node *dlpar_configure_connector(__be32 drc_index,
130 struct device_node *parent) 130 struct device_node *parent)
131{ 131{
132 struct device_node *dn; 132 struct device_node *dn;
@@ -382,7 +382,7 @@ static int dlpar_online_cpu(struct device_node *dn)
382 BUG_ON(get_cpu_current_state(cpu) 382 BUG_ON(get_cpu_current_state(cpu)
383 != CPU_STATE_OFFLINE); 383 != CPU_STATE_OFFLINE);
384 cpu_maps_update_done(); 384 cpu_maps_update_done();
385 rc = cpu_up(cpu); 385 rc = device_online(get_cpu_device(cpu));
386 if (rc) 386 if (rc)
387 goto out; 387 goto out;
388 cpu_maps_update_begin(); 388 cpu_maps_update_begin();
@@ -414,7 +414,7 @@ static ssize_t dlpar_cpu_probe(const char *buf, size_t count)
414 if (!parent) 414 if (!parent)
415 return -ENODEV; 415 return -ENODEV;
416 416
417 dn = dlpar_configure_connector(drc_index, parent); 417 dn = dlpar_configure_connector(cpu_to_be32(drc_index), parent);
418 if (!dn) 418 if (!dn)
419 return -EINVAL; 419 return -EINVAL;
420 420
@@ -467,7 +467,7 @@ static int dlpar_offline_cpu(struct device_node *dn)
467 if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) { 467 if (get_cpu_current_state(cpu) == CPU_STATE_ONLINE) {
468 set_preferred_offline_state(cpu, CPU_STATE_OFFLINE); 468 set_preferred_offline_state(cpu, CPU_STATE_OFFLINE);
469 cpu_maps_update_done(); 469 cpu_maps_update_done();
470 rc = cpu_down(cpu); 470 rc = device_offline(get_cpu_device(cpu));
471 if (rc) 471 if (rc)
472 goto out; 472 goto out;
473 cpu_maps_update_begin(); 473 cpu_maps_update_begin();
diff --git a/arch/powerpc/platforms/pseries/hotplug-cpu.c b/arch/powerpc/platforms/pseries/hotplug-cpu.c
index b174fa751d26..5c375f93c669 100644
--- a/arch/powerpc/platforms/pseries/hotplug-cpu.c
+++ b/arch/powerpc/platforms/pseries/hotplug-cpu.c
@@ -247,7 +247,7 @@ static int pseries_add_processor(struct device_node *np)
247 unsigned int cpu; 247 unsigned int cpu;
248 cpumask_var_t candidate_mask, tmp; 248 cpumask_var_t candidate_mask, tmp;
249 int err = -ENOSPC, len, nthreads, i; 249 int err = -ENOSPC, len, nthreads, i;
250 const u32 *intserv; 250 const __be32 *intserv;
251 251
252 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len); 252 intserv = of_get_property(np, "ibm,ppc-interrupt-server#s", &len);
253 if (!intserv) 253 if (!intserv)
@@ -293,7 +293,7 @@ static int pseries_add_processor(struct device_node *np)
293 for_each_cpu(cpu, tmp) { 293 for_each_cpu(cpu, tmp) {
294 BUG_ON(cpu_present(cpu)); 294 BUG_ON(cpu_present(cpu));
295 set_cpu_present(cpu, true); 295 set_cpu_present(cpu, true);
296 set_hard_smp_processor_id(cpu, *intserv++); 296 set_hard_smp_processor_id(cpu, be32_to_cpu(*intserv++));
297 } 297 }
298 err = 0; 298 err = 0;
299out_unlock: 299out_unlock:
diff --git a/arch/powerpc/platforms/pseries/iommu.c b/arch/powerpc/platforms/pseries/iommu.c
index de1ec54a2a57..e32e00976a94 100644
--- a/arch/powerpc/platforms/pseries/iommu.c
+++ b/arch/powerpc/platforms/pseries/iommu.c
@@ -30,7 +30,6 @@
30#include <linux/mm.h> 30#include <linux/mm.h>
31#include <linux/memblock.h> 31#include <linux/memblock.h>
32#include <linux/spinlock.h> 32#include <linux/spinlock.h>
33#include <linux/sched.h> /* for show_stack */
34#include <linux/string.h> 33#include <linux/string.h>
35#include <linux/pci.h> 34#include <linux/pci.h>
36#include <linux/dma-mapping.h> 35#include <linux/dma-mapping.h>
@@ -168,7 +167,7 @@ static int tce_build_pSeriesLP(struct iommu_table *tbl, long tcenum,
168 printk("\tindex = 0x%llx\n", (u64)tbl->it_index); 167 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
169 printk("\ttcenum = 0x%llx\n", (u64)tcenum); 168 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
170 printk("\ttce val = 0x%llx\n", tce ); 169 printk("\ttce val = 0x%llx\n", tce );
171 show_stack(current, (unsigned long *)__get_SP()); 170 dump_stack();
172 } 171 }
173 172
174 tcenum++; 173 tcenum++;
@@ -257,7 +256,7 @@ static int tce_buildmulti_pSeriesLP(struct iommu_table *tbl, long tcenum,
257 printk("\tindex = 0x%llx\n", (u64)tbl->it_index); 256 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
258 printk("\tnpages = 0x%llx\n", (u64)npages); 257 printk("\tnpages = 0x%llx\n", (u64)npages);
259 printk("\ttce[0] val = 0x%llx\n", tcep[0]); 258 printk("\ttce[0] val = 0x%llx\n", tcep[0]);
260 show_stack(current, (unsigned long *)__get_SP()); 259 dump_stack();
261 } 260 }
262 return ret; 261 return ret;
263} 262}
@@ -273,7 +272,7 @@ static void tce_free_pSeriesLP(struct iommu_table *tbl, long tcenum, long npages
273 printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc); 272 printk("tce_free_pSeriesLP: plpar_tce_put failed. rc=%lld\n", rc);
274 printk("\tindex = 0x%llx\n", (u64)tbl->it_index); 273 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
275 printk("\ttcenum = 0x%llx\n", (u64)tcenum); 274 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
276 show_stack(current, (unsigned long *)__get_SP()); 275 dump_stack();
277 } 276 }
278 277
279 tcenum++; 278 tcenum++;
@@ -292,7 +291,7 @@ static void tce_freemulti_pSeriesLP(struct iommu_table *tbl, long tcenum, long n
292 printk("\trc = %lld\n", rc); 291 printk("\trc = %lld\n", rc);
293 printk("\tindex = 0x%llx\n", (u64)tbl->it_index); 292 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
294 printk("\tnpages = 0x%llx\n", (u64)npages); 293 printk("\tnpages = 0x%llx\n", (u64)npages);
295 show_stack(current, (unsigned long *)__get_SP()); 294 dump_stack();
296 } 295 }
297} 296}
298 297
@@ -307,7 +306,7 @@ static unsigned long tce_get_pSeriesLP(struct iommu_table *tbl, long tcenum)
307 printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc); 306 printk("tce_get_pSeriesLP: plpar_tce_get failed. rc=%lld\n", rc);
308 printk("\tindex = 0x%llx\n", (u64)tbl->it_index); 307 printk("\tindex = 0x%llx\n", (u64)tbl->it_index);
309 printk("\ttcenum = 0x%llx\n", (u64)tcenum); 308 printk("\ttcenum = 0x%llx\n", (u64)tcenum);
310 show_stack(current, (unsigned long *)__get_SP()); 309 dump_stack();
311 } 310 }
312 311
313 return tce_ret; 312 return tce_ret;
diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c
index 8c509d5397c6..f6880d2a40fb 100644
--- a/arch/powerpc/platforms/pseries/lpar.c
+++ b/arch/powerpc/platforms/pseries/lpar.c
@@ -43,6 +43,7 @@
43#include <asm/trace.h> 43#include <asm/trace.h>
44#include <asm/firmware.h> 44#include <asm/firmware.h>
45#include <asm/plpar_wrappers.h> 45#include <asm/plpar_wrappers.h>
46#include <asm/fadump.h>
46 47
47#include "pseries.h" 48#include "pseries.h"
48 49
@@ -247,8 +248,17 @@ static void pSeries_lpar_hptab_clear(void)
247 } 248 }
248 249
249#ifdef __LITTLE_ENDIAN__ 250#ifdef __LITTLE_ENDIAN__
250 /* Reset exceptions to big endian */ 251 /*
251 if (firmware_has_feature(FW_FEATURE_SET_MODE)) { 252 * Reset exceptions to big endian.
253 *
254 * FIXME this is a hack for kexec, we need to reset the exception
255 * endian before starting the new kernel and this is a convenient place
256 * to do it.
257 *
258 * This is also called on boot when a fadump happens. In that case we
259 * must not change the exception endian mode.
260 */
261 if (firmware_has_feature(FW_FEATURE_SET_MODE) && !is_fadump_active()) {
252 long rc; 262 long rc;
253 263
254 rc = pseries_big_endian_exceptions(); 264 rc = pseries_big_endian_exceptions();
diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c
index 8ab5add4ac82..8b909e94fd9a 100644
--- a/arch/powerpc/platforms/pseries/msi.c
+++ b/arch/powerpc/platforms/pseries/msi.c
@@ -420,7 +420,7 @@ static int rtas_setup_msi_irqs(struct pci_dev *pdev, int nvec_in, int type)
420 */ 420 */
421again: 421again:
422 if (type == PCI_CAP_ID_MSI) { 422 if (type == PCI_CAP_ID_MSI) {
423 if (pdn->force_32bit_msi) { 423 if (pdev->no_64bit_msi) {
424 rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec); 424 rc = rtas_change_msi(pdn, RTAS_CHANGE_32MSI_FN, nvec);
425 if (rc < 0) { 425 if (rc < 0) {
426 /* 426 /*
diff --git a/arch/powerpc/platforms/pseries/pseries.h b/arch/powerpc/platforms/pseries/pseries.h
index 361add62abf1..1796c5438cc6 100644
--- a/arch/powerpc/platforms/pseries/pseries.h
+++ b/arch/powerpc/platforms/pseries/pseries.h
@@ -56,7 +56,8 @@ extern void hvc_vio_init_early(void);
56/* Dynamic logical Partitioning/Mobility */ 56/* Dynamic logical Partitioning/Mobility */
57extern void dlpar_free_cc_nodes(struct device_node *); 57extern void dlpar_free_cc_nodes(struct device_node *);
58extern void dlpar_free_cc_property(struct property *); 58extern void dlpar_free_cc_property(struct property *);
59extern struct device_node *dlpar_configure_connector(u32, struct device_node *); 59extern struct device_node *dlpar_configure_connector(__be32,
60 struct device_node *);
60extern int dlpar_attach_node(struct device_node *); 61extern int dlpar_attach_node(struct device_node *);
61extern int dlpar_detach_node(struct device_node *); 62extern int dlpar_detach_node(struct device_node *);
62 63
diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c
index de40b48b460e..da08ed088157 100644
--- a/arch/powerpc/sysdev/fsl_msi.c
+++ b/arch/powerpc/sysdev/fsl_msi.c
@@ -361,7 +361,7 @@ static int fsl_msi_setup_hwirq(struct fsl_msi *msi, struct platform_device *dev,
361 cascade_data->virq = virt_msir; 361 cascade_data->virq = virt_msir;
362 msi->cascade_array[irq_index] = cascade_data; 362 msi->cascade_array[irq_index] = cascade_data;
363 363
364 ret = request_irq(virt_msir, fsl_msi_cascade, 0, 364 ret = request_irq(virt_msir, fsl_msi_cascade, IRQF_NO_THREAD,
365 "fsl-msi-cascade", cascade_data); 365 "fsl-msi-cascade", cascade_data);
366 if (ret) { 366 if (ret) {
367 dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n", 367 dev_err(&dev->dev, "failed to request_irq(%d), ret = %d\n",
diff --git a/arch/powerpc/sysdev/msi_bitmap.c b/arch/powerpc/sysdev/msi_bitmap.c
index 0c75214b6f92..73b64c73505b 100644
--- a/arch/powerpc/sysdev/msi_bitmap.c
+++ b/arch/powerpc/sysdev/msi_bitmap.c
@@ -145,59 +145,64 @@ void msi_bitmap_free(struct msi_bitmap *bmp)
145 145
146#ifdef CONFIG_MSI_BITMAP_SELFTEST 146#ifdef CONFIG_MSI_BITMAP_SELFTEST
147 147
148#define check(x) \
149 if (!(x)) printk("msi_bitmap: test failed at line %d\n", __LINE__);
150
151static void __init test_basics(void) 148static void __init test_basics(void)
152{ 149{
153 struct msi_bitmap bmp; 150 struct msi_bitmap bmp;
154 int i, size = 512; 151 int rc, i, size = 512;
155 152
156 /* Can't allocate a bitmap of 0 irqs */ 153 /* Can't allocate a bitmap of 0 irqs */
157 check(msi_bitmap_alloc(&bmp, 0, NULL) != 0); 154 WARN_ON(msi_bitmap_alloc(&bmp, 0, NULL) == 0);
158 155
159 /* of_node may be NULL */ 156 /* of_node may be NULL */
160 check(0 == msi_bitmap_alloc(&bmp, size, NULL)); 157 WARN_ON(msi_bitmap_alloc(&bmp, size, NULL));
161 158
162 /* Should all be free by default */ 159 /* Should all be free by default */
163 check(0 == bitmap_find_free_region(bmp.bitmap, size, 160 WARN_ON(bitmap_find_free_region(bmp.bitmap, size, get_count_order(size)));
164 get_count_order(size)));
165 bitmap_release_region(bmp.bitmap, 0, get_count_order(size)); 161 bitmap_release_region(bmp.bitmap, 0, get_count_order(size));
166 162
167 /* With no node, there's no msi-available-ranges, so expect > 0 */ 163 /* With no node, there's no msi-available-ranges, so expect > 0 */
168 check(msi_bitmap_reserve_dt_hwirqs(&bmp) > 0); 164 WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp) <= 0);
169 165
170 /* Should all still be free */ 166 /* Should all still be free */
171 check(0 == bitmap_find_free_region(bmp.bitmap, size, 167 WARN_ON(bitmap_find_free_region(bmp.bitmap, size, get_count_order(size)));
172 get_count_order(size)));
173 bitmap_release_region(bmp.bitmap, 0, get_count_order(size)); 168 bitmap_release_region(bmp.bitmap, 0, get_count_order(size));
174 169
175 /* Check we can fill it up and then no more */ 170 /* Check we can fill it up and then no more */
176 for (i = 0; i < size; i++) 171 for (i = 0; i < size; i++)
177 check(msi_bitmap_alloc_hwirqs(&bmp, 1) >= 0); 172 WARN_ON(msi_bitmap_alloc_hwirqs(&bmp, 1) < 0);
178 173
179 check(msi_bitmap_alloc_hwirqs(&bmp, 1) < 0); 174 WARN_ON(msi_bitmap_alloc_hwirqs(&bmp, 1) >= 0);
180 175
181 /* Should all be allocated */ 176 /* Should all be allocated */
182 check(bitmap_find_free_region(bmp.bitmap, size, 0) < 0); 177 WARN_ON(bitmap_find_free_region(bmp.bitmap, size, 0) >= 0);
183 178
184 /* And if we free one we can then allocate another */ 179 /* And if we free one we can then allocate another */
185 msi_bitmap_free_hwirqs(&bmp, size / 2, 1); 180 msi_bitmap_free_hwirqs(&bmp, size / 2, 1);
186 check(msi_bitmap_alloc_hwirqs(&bmp, 1) == size / 2); 181 WARN_ON(msi_bitmap_alloc_hwirqs(&bmp, 1) != size / 2);
182
183 /* Free most of them for the alignment tests */
184 msi_bitmap_free_hwirqs(&bmp, 3, size - 3);
187 185
188 /* Check we get a naturally aligned offset */ 186 /* Check we get a naturally aligned offset */
189 check(msi_bitmap_alloc_hwirqs(&bmp, 2) % 2 == 0); 187 rc = msi_bitmap_alloc_hwirqs(&bmp, 2);
190 check(msi_bitmap_alloc_hwirqs(&bmp, 4) % 4 == 0); 188 WARN_ON(rc < 0 && rc % 2 != 0);
191 check(msi_bitmap_alloc_hwirqs(&bmp, 8) % 8 == 0); 189 rc = msi_bitmap_alloc_hwirqs(&bmp, 4);
192 check(msi_bitmap_alloc_hwirqs(&bmp, 9) % 16 == 0); 190 WARN_ON(rc < 0 && rc % 4 != 0);
193 check(msi_bitmap_alloc_hwirqs(&bmp, 3) % 4 == 0); 191 rc = msi_bitmap_alloc_hwirqs(&bmp, 8);
194 check(msi_bitmap_alloc_hwirqs(&bmp, 7) % 8 == 0); 192 WARN_ON(rc < 0 && rc % 8 != 0);
195 check(msi_bitmap_alloc_hwirqs(&bmp, 121) % 128 == 0); 193 rc = msi_bitmap_alloc_hwirqs(&bmp, 9);
194 WARN_ON(rc < 0 && rc % 16 != 0);
195 rc = msi_bitmap_alloc_hwirqs(&bmp, 3);
196 WARN_ON(rc < 0 && rc % 4 != 0);
197 rc = msi_bitmap_alloc_hwirqs(&bmp, 7);
198 WARN_ON(rc < 0 && rc % 8 != 0);
199 rc = msi_bitmap_alloc_hwirqs(&bmp, 121);
200 WARN_ON(rc < 0 && rc % 128 != 0);
196 201
197 msi_bitmap_free(&bmp); 202 msi_bitmap_free(&bmp);
198 203
199 /* Clients may check bitmap == NULL for "not-allocated" */ 204 /* Clients may WARN_ON bitmap == NULL for "not-allocated" */
200 check(bmp.bitmap == NULL); 205 WARN_ON(bmp.bitmap != NULL);
201 206
202 kfree(bmp.bitmap); 207 kfree(bmp.bitmap);
203} 208}
@@ -219,14 +224,13 @@ static void __init test_of_node(void)
219 of_node_init(&of_node); 224 of_node_init(&of_node);
220 of_node.full_name = node_name; 225 of_node.full_name = node_name;
221 226
222 check(0 == msi_bitmap_alloc(&bmp, size, &of_node)); 227 WARN_ON(msi_bitmap_alloc(&bmp, size, &of_node));
223 228
224 /* No msi-available-ranges, so expect > 0 */ 229 /* No msi-available-ranges, so expect > 0 */
225 check(msi_bitmap_reserve_dt_hwirqs(&bmp) > 0); 230 WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp) <= 0);
226 231
227 /* Should all still be free */ 232 /* Should all still be free */
228 check(0 == bitmap_find_free_region(bmp.bitmap, size, 233 WARN_ON(bitmap_find_free_region(bmp.bitmap, size, get_count_order(size)));
229 get_count_order(size)));
230 bitmap_release_region(bmp.bitmap, 0, get_count_order(size)); 234 bitmap_release_region(bmp.bitmap, 0, get_count_order(size));
231 235
232 /* Now create a fake msi-available-ranges property */ 236 /* Now create a fake msi-available-ranges property */
@@ -240,11 +244,11 @@ static void __init test_of_node(void)
240 of_node.properties = &prop; 244 of_node.properties = &prop;
241 245
242 /* msi-available-ranges, so expect == 0 */ 246 /* msi-available-ranges, so expect == 0 */
243 check(msi_bitmap_reserve_dt_hwirqs(&bmp) == 0); 247 WARN_ON(msi_bitmap_reserve_dt_hwirqs(&bmp));
244 248
245 /* Check we got the expected result */ 249 /* Check we got the expected result */
246 check(0 == bitmap_parselist(expected_str, expected, size)); 250 WARN_ON(bitmap_parselist(expected_str, expected, size));
247 check(bitmap_equal(expected, bmp.bitmap, size)); 251 WARN_ON(!bitmap_equal(expected, bmp.bitmap, size));
248 252
249 msi_bitmap_free(&bmp); 253 msi_bitmap_free(&bmp);
250 kfree(bmp.bitmap); 254 kfree(bmp.bitmap);
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index b988b5addf86..c8efbb37d6e0 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -293,10 +293,10 @@ static inline void disable_surveillance(void)
293 args.token = rtas_token("set-indicator"); 293 args.token = rtas_token("set-indicator");
294 if (args.token == RTAS_UNKNOWN_SERVICE) 294 if (args.token == RTAS_UNKNOWN_SERVICE)
295 return; 295 return;
296 args.nargs = 3; 296 args.nargs = cpu_to_be32(3);
297 args.nret = 1; 297 args.nret = cpu_to_be32(1);
298 args.rets = &args.args[3]; 298 args.rets = &args.args[3];
299 args.args[0] = SURVEILLANCE_TOKEN; 299 args.args[0] = cpu_to_be32(SURVEILLANCE_TOKEN);
300 args.args[1] = 0; 300 args.args[1] = 0;
301 args.args[2] = 0; 301 args.args[2] = 0;
302 enter_rtas(__pa(&args)); 302 enter_rtas(__pa(&args));
diff --git a/arch/s390/configs/default_defconfig b/arch/s390/configs/default_defconfig
index 9d94fdd9f525..9432d0f202ef 100644
--- a/arch/s390/configs/default_defconfig
+++ b/arch/s390/configs/default_defconfig
@@ -35,7 +35,6 @@ CONFIG_MODULE_UNLOAD=y
35CONFIG_MODULE_FORCE_UNLOAD=y 35CONFIG_MODULE_FORCE_UNLOAD=y
36CONFIG_MODVERSIONS=y 36CONFIG_MODVERSIONS=y
37CONFIG_MODULE_SRCVERSION_ALL=y 37CONFIG_MODULE_SRCVERSION_ALL=y
38CONFIG_BLK_DEV_INTEGRITY=y
39CONFIG_BLK_DEV_THROTTLING=y 38CONFIG_BLK_DEV_THROTTLING=y
40CONFIG_PARTITION_ADVANCED=y 39CONFIG_PARTITION_ADVANCED=y
41CONFIG_IBM_PARTITION=y 40CONFIG_IBM_PARTITION=y
@@ -245,6 +244,7 @@ CONFIG_NF_TABLES_IPV4=m
245CONFIG_NFT_CHAIN_ROUTE_IPV4=m 244CONFIG_NFT_CHAIN_ROUTE_IPV4=m
246CONFIG_NFT_CHAIN_NAT_IPV4=m 245CONFIG_NFT_CHAIN_NAT_IPV4=m
247CONFIG_NF_TABLES_ARP=m 246CONFIG_NF_TABLES_ARP=m
247CONFIG_NF_NAT_IPV4=m
248CONFIG_IP_NF_IPTABLES=m 248CONFIG_IP_NF_IPTABLES=m
249CONFIG_IP_NF_MATCH_AH=m 249CONFIG_IP_NF_MATCH_AH=m
250CONFIG_IP_NF_MATCH_ECN=m 250CONFIG_IP_NF_MATCH_ECN=m
@@ -252,11 +252,6 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
252CONFIG_IP_NF_MATCH_TTL=m 252CONFIG_IP_NF_MATCH_TTL=m
253CONFIG_IP_NF_FILTER=m 253CONFIG_IP_NF_FILTER=m
254CONFIG_IP_NF_TARGET_REJECT=m 254CONFIG_IP_NF_TARGET_REJECT=m
255CONFIG_IP_NF_TARGET_ULOG=m
256CONFIG_NF_NAT_IPV4=m
257CONFIG_IP_NF_TARGET_MASQUERADE=m
258CONFIG_IP_NF_TARGET_NETMAP=m
259CONFIG_IP_NF_TARGET_REDIRECT=m
260CONFIG_IP_NF_MANGLE=m 255CONFIG_IP_NF_MANGLE=m
261CONFIG_IP_NF_TARGET_CLUSTERIP=m 256CONFIG_IP_NF_TARGET_CLUSTERIP=m
262CONFIG_IP_NF_TARGET_ECN=m 257CONFIG_IP_NF_TARGET_ECN=m
@@ -270,6 +265,7 @@ CONFIG_NF_CONNTRACK_IPV6=m
270CONFIG_NF_TABLES_IPV6=m 265CONFIG_NF_TABLES_IPV6=m
271CONFIG_NFT_CHAIN_ROUTE_IPV6=m 266CONFIG_NFT_CHAIN_ROUTE_IPV6=m
272CONFIG_NFT_CHAIN_NAT_IPV6=m 267CONFIG_NFT_CHAIN_NAT_IPV6=m
268CONFIG_NF_NAT_IPV6=m
273CONFIG_IP6_NF_IPTABLES=m 269CONFIG_IP6_NF_IPTABLES=m
274CONFIG_IP6_NF_MATCH_AH=m 270CONFIG_IP6_NF_MATCH_AH=m
275CONFIG_IP6_NF_MATCH_EUI64=m 271CONFIG_IP6_NF_MATCH_EUI64=m
@@ -286,9 +282,6 @@ CONFIG_IP6_NF_TARGET_REJECT=m
286CONFIG_IP6_NF_MANGLE=m 282CONFIG_IP6_NF_MANGLE=m
287CONFIG_IP6_NF_RAW=m 283CONFIG_IP6_NF_RAW=m
288CONFIG_IP6_NF_SECURITY=m 284CONFIG_IP6_NF_SECURITY=m
289CONFIG_NF_NAT_IPV6=m
290CONFIG_IP6_NF_TARGET_MASQUERADE=m
291CONFIG_IP6_NF_TARGET_NPT=m
292CONFIG_NF_TABLES_BRIDGE=m 285CONFIG_NF_TABLES_BRIDGE=m
293CONFIG_NET_SCTPPROBE=m 286CONFIG_NET_SCTPPROBE=m
294CONFIG_RDS=m 287CONFIG_RDS=m
@@ -374,14 +367,13 @@ CONFIG_BLK_DEV_SR=m
374CONFIG_CHR_DEV_SG=y 367CONFIG_CHR_DEV_SG=y
375CONFIG_CHR_DEV_SCH=m 368CONFIG_CHR_DEV_SCH=m
376CONFIG_SCSI_ENCLOSURE=m 369CONFIG_SCSI_ENCLOSURE=m
377CONFIG_SCSI_MULTI_LUN=y
378CONFIG_SCSI_CONSTANTS=y 370CONFIG_SCSI_CONSTANTS=y
379CONFIG_SCSI_LOGGING=y 371CONFIG_SCSI_LOGGING=y
380CONFIG_SCSI_SPI_ATTRS=m 372CONFIG_SCSI_SPI_ATTRS=m
373CONFIG_SCSI_FC_ATTRS=y
381CONFIG_SCSI_SAS_LIBSAS=m 374CONFIG_SCSI_SAS_LIBSAS=m
382CONFIG_SCSI_SRP_ATTRS=m 375CONFIG_SCSI_SRP_ATTRS=m
383CONFIG_ISCSI_TCP=m 376CONFIG_ISCSI_TCP=m
384CONFIG_LIBFCOE=m
385CONFIG_SCSI_DEBUG=m 377CONFIG_SCSI_DEBUG=m
386CONFIG_ZFCP=y 378CONFIG_ZFCP=y
387CONFIG_SCSI_VIRTIO=m 379CONFIG_SCSI_VIRTIO=m
@@ -427,7 +419,6 @@ CONFIG_VIRTIO_NET=m
427CONFIG_NLMON=m 419CONFIG_NLMON=m
428CONFIG_VHOST_NET=m 420CONFIG_VHOST_NET=m
429# CONFIG_NET_VENDOR_ARC is not set 421# CONFIG_NET_VENDOR_ARC is not set
430# CONFIG_NET_CADENCE is not set
431# CONFIG_NET_VENDOR_CHELSIO is not set 422# CONFIG_NET_VENDOR_CHELSIO is not set
432# CONFIG_NET_VENDOR_INTEL is not set 423# CONFIG_NET_VENDOR_INTEL is not set
433# CONFIG_NET_VENDOR_MARVELL is not set 424# CONFIG_NET_VENDOR_MARVELL is not set
@@ -481,14 +472,14 @@ CONFIG_JFS_FS=m
481CONFIG_JFS_POSIX_ACL=y 472CONFIG_JFS_POSIX_ACL=y
482CONFIG_JFS_SECURITY=y 473CONFIG_JFS_SECURITY=y
483CONFIG_JFS_STATISTICS=y 474CONFIG_JFS_STATISTICS=y
484CONFIG_XFS_FS=m 475CONFIG_XFS_FS=y
485CONFIG_XFS_QUOTA=y 476CONFIG_XFS_QUOTA=y
486CONFIG_XFS_POSIX_ACL=y 477CONFIG_XFS_POSIX_ACL=y
487CONFIG_XFS_RT=y 478CONFIG_XFS_RT=y
488CONFIG_XFS_DEBUG=y 479CONFIG_XFS_DEBUG=y
489CONFIG_GFS2_FS=m 480CONFIG_GFS2_FS=m
490CONFIG_OCFS2_FS=m 481CONFIG_OCFS2_FS=m
491CONFIG_BTRFS_FS=m 482CONFIG_BTRFS_FS=y
492CONFIG_BTRFS_FS_POSIX_ACL=y 483CONFIG_BTRFS_FS_POSIX_ACL=y
493CONFIG_NILFS2_FS=m 484CONFIG_NILFS2_FS=m
494CONFIG_FANOTIFY=y 485CONFIG_FANOTIFY=y
@@ -574,7 +565,6 @@ CONFIG_DEBUG_SHIRQ=y
574CONFIG_DETECT_HUNG_TASK=y 565CONFIG_DETECT_HUNG_TASK=y
575CONFIG_TIMER_STATS=y 566CONFIG_TIMER_STATS=y
576CONFIG_DEBUG_RT_MUTEXES=y 567CONFIG_DEBUG_RT_MUTEXES=y
577CONFIG_RT_MUTEX_TESTER=y
578CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y 568CONFIG_DEBUG_WW_MUTEX_SLOWPATH=y
579CONFIG_PROVE_LOCKING=y 569CONFIG_PROVE_LOCKING=y
580CONFIG_LOCK_STAT=y 570CONFIG_LOCK_STAT=y
@@ -600,8 +590,13 @@ CONFIG_FAULT_INJECTION_DEBUG_FS=y
600CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y 590CONFIG_FAULT_INJECTION_STACKTRACE_FILTER=y
601CONFIG_LATENCYTOP=y 591CONFIG_LATENCYTOP=y
602CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y 592CONFIG_DEBUG_STRICT_USER_COPY_CHECKS=y
593CONFIG_IRQSOFF_TRACER=y
594CONFIG_PREEMPT_TRACER=y
595CONFIG_SCHED_TRACER=y
596CONFIG_FTRACE_SYSCALLS=y
597CONFIG_STACK_TRACER=y
603CONFIG_BLK_DEV_IO_TRACE=y 598CONFIG_BLK_DEV_IO_TRACE=y
604# CONFIG_KPROBE_EVENT is not set 599CONFIG_UPROBE_EVENT=y
605CONFIG_LKDTM=m 600CONFIG_LKDTM=m
606CONFIG_TEST_LIST_SORT=y 601CONFIG_TEST_LIST_SORT=y
607CONFIG_KPROBES_SANITY_TEST=y 602CONFIG_KPROBES_SANITY_TEST=y
@@ -609,7 +604,10 @@ CONFIG_RBTREE_TEST=y
609CONFIG_INTERVAL_TREE_TEST=m 604CONFIG_INTERVAL_TREE_TEST=m
610CONFIG_PERCPU_TEST=m 605CONFIG_PERCPU_TEST=m
611CONFIG_ATOMIC64_SELFTEST=y 606CONFIG_ATOMIC64_SELFTEST=y
607CONFIG_TEST_STRING_HELPERS=y
608CONFIG_TEST_KSTRTOX=y
612CONFIG_DMA_API_DEBUG=y 609CONFIG_DMA_API_DEBUG=y
610CONFIG_TEST_BPF=m
613# CONFIG_STRICT_DEVMEM is not set 611# CONFIG_STRICT_DEVMEM is not set
614CONFIG_S390_PTDUMP=y 612CONFIG_S390_PTDUMP=y
615CONFIG_ENCRYPTED_KEYS=m 613CONFIG_ENCRYPTED_KEYS=m
@@ -673,12 +671,6 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
673CONFIG_X509_CERTIFICATE_PARSER=m 671CONFIG_X509_CERTIFICATE_PARSER=m
674CONFIG_CRC7=m 672CONFIG_CRC7=m
675CONFIG_CRC8=m 673CONFIG_CRC8=m
676CONFIG_XZ_DEC_X86=y
677CONFIG_XZ_DEC_POWERPC=y
678CONFIG_XZ_DEC_IA64=y
679CONFIG_XZ_DEC_ARM=y
680CONFIG_XZ_DEC_ARMTHUMB=y
681CONFIG_XZ_DEC_SPARC=y
682CONFIG_CORDIC=m 674CONFIG_CORDIC=m
683CONFIG_CMM=m 675CONFIG_CMM=m
684CONFIG_APPLDATA_BASE=y 676CONFIG_APPLDATA_BASE=y
diff --git a/arch/s390/configs/gcov_defconfig b/arch/s390/configs/gcov_defconfig
index 90f514baa37d..219dca6ea926 100644
--- a/arch/s390/configs/gcov_defconfig
+++ b/arch/s390/configs/gcov_defconfig
@@ -35,7 +35,6 @@ CONFIG_MODULE_UNLOAD=y
35CONFIG_MODULE_FORCE_UNLOAD=y 35CONFIG_MODULE_FORCE_UNLOAD=y
36CONFIG_MODVERSIONS=y 36CONFIG_MODVERSIONS=y
37CONFIG_MODULE_SRCVERSION_ALL=y 37CONFIG_MODULE_SRCVERSION_ALL=y
38CONFIG_BLK_DEV_INTEGRITY=y
39CONFIG_BLK_DEV_THROTTLING=y 38CONFIG_BLK_DEV_THROTTLING=y
40CONFIG_PARTITION_ADVANCED=y 39CONFIG_PARTITION_ADVANCED=y
41CONFIG_IBM_PARTITION=y 40CONFIG_IBM_PARTITION=y
@@ -243,6 +242,7 @@ CONFIG_NF_TABLES_IPV4=m
243CONFIG_NFT_CHAIN_ROUTE_IPV4=m 242CONFIG_NFT_CHAIN_ROUTE_IPV4=m
244CONFIG_NFT_CHAIN_NAT_IPV4=m 243CONFIG_NFT_CHAIN_NAT_IPV4=m
245CONFIG_NF_TABLES_ARP=m 244CONFIG_NF_TABLES_ARP=m
245CONFIG_NF_NAT_IPV4=m
246CONFIG_IP_NF_IPTABLES=m 246CONFIG_IP_NF_IPTABLES=m
247CONFIG_IP_NF_MATCH_AH=m 247CONFIG_IP_NF_MATCH_AH=m
248CONFIG_IP_NF_MATCH_ECN=m 248CONFIG_IP_NF_MATCH_ECN=m
@@ -250,11 +250,6 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
250CONFIG_IP_NF_MATCH_TTL=m 250CONFIG_IP_NF_MATCH_TTL=m
251CONFIG_IP_NF_FILTER=m 251CONFIG_IP_NF_FILTER=m
252CONFIG_IP_NF_TARGET_REJECT=m 252CONFIG_IP_NF_TARGET_REJECT=m
253CONFIG_IP_NF_TARGET_ULOG=m
254CONFIG_NF_NAT_IPV4=m
255CONFIG_IP_NF_TARGET_MASQUERADE=m
256CONFIG_IP_NF_TARGET_NETMAP=m
257CONFIG_IP_NF_TARGET_REDIRECT=m
258CONFIG_IP_NF_MANGLE=m 253CONFIG_IP_NF_MANGLE=m
259CONFIG_IP_NF_TARGET_CLUSTERIP=m 254CONFIG_IP_NF_TARGET_CLUSTERIP=m
260CONFIG_IP_NF_TARGET_ECN=m 255CONFIG_IP_NF_TARGET_ECN=m
@@ -268,6 +263,7 @@ CONFIG_NF_CONNTRACK_IPV6=m
268CONFIG_NF_TABLES_IPV6=m 263CONFIG_NF_TABLES_IPV6=m
269CONFIG_NFT_CHAIN_ROUTE_IPV6=m 264CONFIG_NFT_CHAIN_ROUTE_IPV6=m
270CONFIG_NFT_CHAIN_NAT_IPV6=m 265CONFIG_NFT_CHAIN_NAT_IPV6=m
266CONFIG_NF_NAT_IPV6=m
271CONFIG_IP6_NF_IPTABLES=m 267CONFIG_IP6_NF_IPTABLES=m
272CONFIG_IP6_NF_MATCH_AH=m 268CONFIG_IP6_NF_MATCH_AH=m
273CONFIG_IP6_NF_MATCH_EUI64=m 269CONFIG_IP6_NF_MATCH_EUI64=m
@@ -284,9 +280,6 @@ CONFIG_IP6_NF_TARGET_REJECT=m
284CONFIG_IP6_NF_MANGLE=m 280CONFIG_IP6_NF_MANGLE=m
285CONFIG_IP6_NF_RAW=m 281CONFIG_IP6_NF_RAW=m
286CONFIG_IP6_NF_SECURITY=m 282CONFIG_IP6_NF_SECURITY=m
287CONFIG_NF_NAT_IPV6=m
288CONFIG_IP6_NF_TARGET_MASQUERADE=m
289CONFIG_IP6_NF_TARGET_NPT=m
290CONFIG_NF_TABLES_BRIDGE=m 283CONFIG_NF_TABLES_BRIDGE=m
291CONFIG_NET_SCTPPROBE=m 284CONFIG_NET_SCTPPROBE=m
292CONFIG_RDS=m 285CONFIG_RDS=m
@@ -371,14 +364,13 @@ CONFIG_BLK_DEV_SR=m
371CONFIG_CHR_DEV_SG=y 364CONFIG_CHR_DEV_SG=y
372CONFIG_CHR_DEV_SCH=m 365CONFIG_CHR_DEV_SCH=m
373CONFIG_SCSI_ENCLOSURE=m 366CONFIG_SCSI_ENCLOSURE=m
374CONFIG_SCSI_MULTI_LUN=y
375CONFIG_SCSI_CONSTANTS=y 367CONFIG_SCSI_CONSTANTS=y
376CONFIG_SCSI_LOGGING=y 368CONFIG_SCSI_LOGGING=y
377CONFIG_SCSI_SPI_ATTRS=m 369CONFIG_SCSI_SPI_ATTRS=m
370CONFIG_SCSI_FC_ATTRS=y
378CONFIG_SCSI_SAS_LIBSAS=m 371CONFIG_SCSI_SAS_LIBSAS=m
379CONFIG_SCSI_SRP_ATTRS=m 372CONFIG_SCSI_SRP_ATTRS=m
380CONFIG_ISCSI_TCP=m 373CONFIG_ISCSI_TCP=m
381CONFIG_LIBFCOE=m
382CONFIG_SCSI_DEBUG=m 374CONFIG_SCSI_DEBUG=m
383CONFIG_ZFCP=y 375CONFIG_ZFCP=y
384CONFIG_SCSI_VIRTIO=m 376CONFIG_SCSI_VIRTIO=m
@@ -424,7 +416,6 @@ CONFIG_VIRTIO_NET=m
424CONFIG_NLMON=m 416CONFIG_NLMON=m
425CONFIG_VHOST_NET=m 417CONFIG_VHOST_NET=m
426# CONFIG_NET_VENDOR_ARC is not set 418# CONFIG_NET_VENDOR_ARC is not set
427# CONFIG_NET_CADENCE is not set
428# CONFIG_NET_VENDOR_CHELSIO is not set 419# CONFIG_NET_VENDOR_CHELSIO is not set
429# CONFIG_NET_VENDOR_INTEL is not set 420# CONFIG_NET_VENDOR_INTEL is not set
430# CONFIG_NET_VENDOR_MARVELL is not set 421# CONFIG_NET_VENDOR_MARVELL is not set
@@ -478,13 +469,13 @@ CONFIG_JFS_FS=m
478CONFIG_JFS_POSIX_ACL=y 469CONFIG_JFS_POSIX_ACL=y
479CONFIG_JFS_SECURITY=y 470CONFIG_JFS_SECURITY=y
480CONFIG_JFS_STATISTICS=y 471CONFIG_JFS_STATISTICS=y
481CONFIG_XFS_FS=m 472CONFIG_XFS_FS=y
482CONFIG_XFS_QUOTA=y 473CONFIG_XFS_QUOTA=y
483CONFIG_XFS_POSIX_ACL=y 474CONFIG_XFS_POSIX_ACL=y
484CONFIG_XFS_RT=y 475CONFIG_XFS_RT=y
485CONFIG_GFS2_FS=m 476CONFIG_GFS2_FS=m
486CONFIG_OCFS2_FS=m 477CONFIG_OCFS2_FS=m
487CONFIG_BTRFS_FS=m 478CONFIG_BTRFS_FS=y
488CONFIG_BTRFS_FS_POSIX_ACL=y 479CONFIG_BTRFS_FS_POSIX_ACL=y
489CONFIG_NILFS2_FS=m 480CONFIG_NILFS2_FS=m
490CONFIG_FANOTIFY=y 481CONFIG_FANOTIFY=y
@@ -626,12 +617,6 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
626CONFIG_X509_CERTIFICATE_PARSER=m 617CONFIG_X509_CERTIFICATE_PARSER=m
627CONFIG_CRC7=m 618CONFIG_CRC7=m
628CONFIG_CRC8=m 619CONFIG_CRC8=m
629CONFIG_XZ_DEC_X86=y
630CONFIG_XZ_DEC_POWERPC=y
631CONFIG_XZ_DEC_IA64=y
632CONFIG_XZ_DEC_ARM=y
633CONFIG_XZ_DEC_ARMTHUMB=y
634CONFIG_XZ_DEC_SPARC=y
635CONFIG_CORDIC=m 620CONFIG_CORDIC=m
636CONFIG_CMM=m 621CONFIG_CMM=m
637CONFIG_APPLDATA_BASE=y 622CONFIG_APPLDATA_BASE=y
diff --git a/arch/s390/configs/performance_defconfig b/arch/s390/configs/performance_defconfig
index 13559d32af69..822c2f2e0c25 100644
--- a/arch/s390/configs/performance_defconfig
+++ b/arch/s390/configs/performance_defconfig
@@ -33,7 +33,6 @@ CONFIG_MODULE_UNLOAD=y
33CONFIG_MODULE_FORCE_UNLOAD=y 33CONFIG_MODULE_FORCE_UNLOAD=y
34CONFIG_MODVERSIONS=y 34CONFIG_MODVERSIONS=y
35CONFIG_MODULE_SRCVERSION_ALL=y 35CONFIG_MODULE_SRCVERSION_ALL=y
36CONFIG_BLK_DEV_INTEGRITY=y
37CONFIG_BLK_DEV_THROTTLING=y 36CONFIG_BLK_DEV_THROTTLING=y
38CONFIG_PARTITION_ADVANCED=y 37CONFIG_PARTITION_ADVANCED=y
39CONFIG_IBM_PARTITION=y 38CONFIG_IBM_PARTITION=y
@@ -241,6 +240,7 @@ CONFIG_NF_TABLES_IPV4=m
241CONFIG_NFT_CHAIN_ROUTE_IPV4=m 240CONFIG_NFT_CHAIN_ROUTE_IPV4=m
242CONFIG_NFT_CHAIN_NAT_IPV4=m 241CONFIG_NFT_CHAIN_NAT_IPV4=m
243CONFIG_NF_TABLES_ARP=m 242CONFIG_NF_TABLES_ARP=m
243CONFIG_NF_NAT_IPV4=m
244CONFIG_IP_NF_IPTABLES=m 244CONFIG_IP_NF_IPTABLES=m
245CONFIG_IP_NF_MATCH_AH=m 245CONFIG_IP_NF_MATCH_AH=m
246CONFIG_IP_NF_MATCH_ECN=m 246CONFIG_IP_NF_MATCH_ECN=m
@@ -248,11 +248,6 @@ CONFIG_IP_NF_MATCH_RPFILTER=m
248CONFIG_IP_NF_MATCH_TTL=m 248CONFIG_IP_NF_MATCH_TTL=m
249CONFIG_IP_NF_FILTER=m 249CONFIG_IP_NF_FILTER=m
250CONFIG_IP_NF_TARGET_REJECT=m 250CONFIG_IP_NF_TARGET_REJECT=m
251CONFIG_IP_NF_TARGET_ULOG=m
252CONFIG_NF_NAT_IPV4=m
253CONFIG_IP_NF_TARGET_MASQUERADE=m
254CONFIG_IP_NF_TARGET_NETMAP=m
255CONFIG_IP_NF_TARGET_REDIRECT=m
256CONFIG_IP_NF_MANGLE=m 251CONFIG_IP_NF_MANGLE=m
257CONFIG_IP_NF_TARGET_CLUSTERIP=m 252CONFIG_IP_NF_TARGET_CLUSTERIP=m
258CONFIG_IP_NF_TARGET_ECN=m 253CONFIG_IP_NF_TARGET_ECN=m
@@ -266,6 +261,7 @@ CONFIG_NF_CONNTRACK_IPV6=m
266CONFIG_NF_TABLES_IPV6=m 261CONFIG_NF_TABLES_IPV6=m
267CONFIG_NFT_CHAIN_ROUTE_IPV6=m 262CONFIG_NFT_CHAIN_ROUTE_IPV6=m
268CONFIG_NFT_CHAIN_NAT_IPV6=m 263CONFIG_NFT_CHAIN_NAT_IPV6=m
264CONFIG_NF_NAT_IPV6=m
269CONFIG_IP6_NF_IPTABLES=m 265CONFIG_IP6_NF_IPTABLES=m
270CONFIG_IP6_NF_MATCH_AH=m 266CONFIG_IP6_NF_MATCH_AH=m
271CONFIG_IP6_NF_MATCH_EUI64=m 267CONFIG_IP6_NF_MATCH_EUI64=m
@@ -282,9 +278,6 @@ CONFIG_IP6_NF_TARGET_REJECT=m
282CONFIG_IP6_NF_MANGLE=m 278CONFIG_IP6_NF_MANGLE=m
283CONFIG_IP6_NF_RAW=m 279CONFIG_IP6_NF_RAW=m
284CONFIG_IP6_NF_SECURITY=m 280CONFIG_IP6_NF_SECURITY=m
285CONFIG_NF_NAT_IPV6=m
286CONFIG_IP6_NF_TARGET_MASQUERADE=m
287CONFIG_IP6_NF_TARGET_NPT=m
288CONFIG_NF_TABLES_BRIDGE=m 281CONFIG_NF_TABLES_BRIDGE=m
289CONFIG_NET_SCTPPROBE=m 282CONFIG_NET_SCTPPROBE=m
290CONFIG_RDS=m 283CONFIG_RDS=m
@@ -369,14 +362,13 @@ CONFIG_BLK_DEV_SR=m
369CONFIG_CHR_DEV_SG=y 362CONFIG_CHR_DEV_SG=y
370CONFIG_CHR_DEV_SCH=m 363CONFIG_CHR_DEV_SCH=m
371CONFIG_SCSI_ENCLOSURE=m 364CONFIG_SCSI_ENCLOSURE=m
372CONFIG_SCSI_MULTI_LUN=y
373CONFIG_SCSI_CONSTANTS=y 365CONFIG_SCSI_CONSTANTS=y
374CONFIG_SCSI_LOGGING=y 366CONFIG_SCSI_LOGGING=y
375CONFIG_SCSI_SPI_ATTRS=m 367CONFIG_SCSI_SPI_ATTRS=m
368CONFIG_SCSI_FC_ATTRS=y
376CONFIG_SCSI_SAS_LIBSAS=m 369CONFIG_SCSI_SAS_LIBSAS=m
377CONFIG_SCSI_SRP_ATTRS=m 370CONFIG_SCSI_SRP_ATTRS=m
378CONFIG_ISCSI_TCP=m 371CONFIG_ISCSI_TCP=m
379CONFIG_LIBFCOE=m
380CONFIG_SCSI_DEBUG=m 372CONFIG_SCSI_DEBUG=m
381CONFIG_ZFCP=y 373CONFIG_ZFCP=y
382CONFIG_SCSI_VIRTIO=m 374CONFIG_SCSI_VIRTIO=m
@@ -422,7 +414,6 @@ CONFIG_VIRTIO_NET=m
422CONFIG_NLMON=m 414CONFIG_NLMON=m
423CONFIG_VHOST_NET=m 415CONFIG_VHOST_NET=m
424# CONFIG_NET_VENDOR_ARC is not set 416# CONFIG_NET_VENDOR_ARC is not set
425# CONFIG_NET_CADENCE is not set
426# CONFIG_NET_VENDOR_CHELSIO is not set 417# CONFIG_NET_VENDOR_CHELSIO is not set
427# CONFIG_NET_VENDOR_INTEL is not set 418# CONFIG_NET_VENDOR_INTEL is not set
428# CONFIG_NET_VENDOR_MARVELL is not set 419# CONFIG_NET_VENDOR_MARVELL is not set
@@ -476,13 +467,13 @@ CONFIG_JFS_FS=m
476CONFIG_JFS_POSIX_ACL=y 467CONFIG_JFS_POSIX_ACL=y
477CONFIG_JFS_SECURITY=y 468CONFIG_JFS_SECURITY=y
478CONFIG_JFS_STATISTICS=y 469CONFIG_JFS_STATISTICS=y
479CONFIG_XFS_FS=m 470CONFIG_XFS_FS=y
480CONFIG_XFS_QUOTA=y 471CONFIG_XFS_QUOTA=y
481CONFIG_XFS_POSIX_ACL=y 472CONFIG_XFS_POSIX_ACL=y
482CONFIG_XFS_RT=y 473CONFIG_XFS_RT=y
483CONFIG_GFS2_FS=m 474CONFIG_GFS2_FS=m
484CONFIG_OCFS2_FS=m 475CONFIG_OCFS2_FS=m
485CONFIG_BTRFS_FS=m 476CONFIG_BTRFS_FS=y
486CONFIG_BTRFS_FS_POSIX_ACL=y 477CONFIG_BTRFS_FS_POSIX_ACL=y
487CONFIG_NILFS2_FS=m 478CONFIG_NILFS2_FS=m
488CONFIG_FANOTIFY=y 479CONFIG_FANOTIFY=y
@@ -550,8 +541,11 @@ CONFIG_TIMER_STATS=y
550CONFIG_RCU_TORTURE_TEST=m 541CONFIG_RCU_TORTURE_TEST=m
551CONFIG_RCU_CPU_STALL_TIMEOUT=60 542CONFIG_RCU_CPU_STALL_TIMEOUT=60
552CONFIG_LATENCYTOP=y 543CONFIG_LATENCYTOP=y
544CONFIG_SCHED_TRACER=y
545CONFIG_FTRACE_SYSCALLS=y
546CONFIG_STACK_TRACER=y
553CONFIG_BLK_DEV_IO_TRACE=y 547CONFIG_BLK_DEV_IO_TRACE=y
554# CONFIG_KPROBE_EVENT is not set 548CONFIG_UPROBE_EVENT=y
555CONFIG_LKDTM=m 549CONFIG_LKDTM=m
556CONFIG_PERCPU_TEST=m 550CONFIG_PERCPU_TEST=m
557CONFIG_ATOMIC64_SELFTEST=y 551CONFIG_ATOMIC64_SELFTEST=y
@@ -618,12 +612,6 @@ CONFIG_ASYMMETRIC_PUBLIC_KEY_SUBTYPE=m
618CONFIG_X509_CERTIFICATE_PARSER=m 612CONFIG_X509_CERTIFICATE_PARSER=m
619CONFIG_CRC7=m 613CONFIG_CRC7=m
620CONFIG_CRC8=m 614CONFIG_CRC8=m
621CONFIG_XZ_DEC_X86=y
622CONFIG_XZ_DEC_POWERPC=y
623CONFIG_XZ_DEC_IA64=y
624CONFIG_XZ_DEC_ARM=y
625CONFIG_XZ_DEC_ARMTHUMB=y
626CONFIG_XZ_DEC_SPARC=y
627CONFIG_CORDIC=m 615CONFIG_CORDIC=m
628CONFIG_CMM=m 616CONFIG_CMM=m
629CONFIG_APPLDATA_BASE=y 617CONFIG_APPLDATA_BASE=y
diff --git a/arch/s390/configs/zfcpdump_defconfig b/arch/s390/configs/zfcpdump_defconfig
index e376789f2d8d..9d63051ebec4 100644
--- a/arch/s390/configs/zfcpdump_defconfig
+++ b/arch/s390/configs/zfcpdump_defconfig
@@ -22,8 +22,8 @@ CONFIG_HZ_100=y
22CONFIG_CRASH_DUMP=y 22CONFIG_CRASH_DUMP=y
23# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set 23# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
24# CONFIG_SECCOMP is not set 24# CONFIG_SECCOMP is not set
25# CONFIG_IUCV is not set
26CONFIG_NET=y 25CONFIG_NET=y
26# CONFIG_IUCV is not set
27CONFIG_ATM=y 27CONFIG_ATM=y
28CONFIG_ATM_LANE=y 28CONFIG_ATM_LANE=y
29CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 29CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
@@ -36,9 +36,9 @@ CONFIG_ENCLOSURE_SERVICES=y
36CONFIG_SCSI=y 36CONFIG_SCSI=y
37CONFIG_BLK_DEV_SD=y 37CONFIG_BLK_DEV_SD=y
38CONFIG_SCSI_ENCLOSURE=y 38CONFIG_SCSI_ENCLOSURE=y
39CONFIG_SCSI_MULTI_LUN=y
40CONFIG_SCSI_CONSTANTS=y 39CONFIG_SCSI_CONSTANTS=y
41CONFIG_SCSI_LOGGING=y 40CONFIG_SCSI_LOGGING=y
41CONFIG_SCSI_FC_ATTRS=y
42CONFIG_SCSI_SRP_ATTRS=y 42CONFIG_SCSI_SRP_ATTRS=y
43CONFIG_ZFCP=y 43CONFIG_ZFCP=y
44# CONFIG_INPUT_MOUSEDEV_PSAUX is not set 44# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
@@ -75,12 +75,6 @@ CONFIG_DEBUG_KERNEL=y
75CONFIG_RCU_CPU_STALL_TIMEOUT=60 75CONFIG_RCU_CPU_STALL_TIMEOUT=60
76# CONFIG_FTRACE is not set 76# CONFIG_FTRACE is not set
77# CONFIG_STRICT_DEVMEM is not set 77# CONFIG_STRICT_DEVMEM is not set
78CONFIG_XZ_DEC_X86=y
79CONFIG_XZ_DEC_POWERPC=y
80CONFIG_XZ_DEC_IA64=y
81CONFIG_XZ_DEC_ARM=y
82CONFIG_XZ_DEC_ARMTHUMB=y
83CONFIG_XZ_DEC_SPARC=y
84# CONFIG_PFAULT is not set 78# CONFIG_PFAULT is not set
85# CONFIG_S390_HYPFS_FS is not set 79# CONFIG_S390_HYPFS_FS is not set
86# CONFIG_VIRTUALIZATION is not set 80# CONFIG_VIRTUALIZATION is not set
diff --git a/arch/s390/defconfig b/arch/s390/defconfig
index fab35a8efa4f..785c5f24d6f9 100644
--- a/arch/s390/defconfig
+++ b/arch/s390/defconfig
@@ -92,10 +92,10 @@ CONFIG_CHR_DEV_ST=y
92CONFIG_BLK_DEV_SR=y 92CONFIG_BLK_DEV_SR=y
93CONFIG_BLK_DEV_SR_VENDOR=y 93CONFIG_BLK_DEV_SR_VENDOR=y
94CONFIG_CHR_DEV_SG=y 94CONFIG_CHR_DEV_SG=y
95CONFIG_SCSI_MULTI_LUN=y
96CONFIG_SCSI_CONSTANTS=y 95CONFIG_SCSI_CONSTANTS=y
97CONFIG_SCSI_LOGGING=y 96CONFIG_SCSI_LOGGING=y
98CONFIG_SCSI_SCAN_ASYNC=y 97CONFIG_SCSI_SCAN_ASYNC=y
98CONFIG_SCSI_FC_ATTRS=y
99CONFIG_ZFCP=y 99CONFIG_ZFCP=y
100CONFIG_SCSI_VIRTIO=y 100CONFIG_SCSI_VIRTIO=y
101CONFIG_NETDEVICES=y 101CONFIG_NETDEVICES=y
@@ -164,14 +164,13 @@ CONFIG_CRYPTO_CMAC=m
164CONFIG_CRYPTO_XCBC=m 164CONFIG_CRYPTO_XCBC=m
165CONFIG_CRYPTO_VMAC=m 165CONFIG_CRYPTO_VMAC=m
166CONFIG_CRYPTO_CRC32=m 166CONFIG_CRYPTO_CRC32=m
167CONFIG_CRYPTO_CRCT10DIF=m
168CONFIG_CRYPTO_MD4=m 167CONFIG_CRYPTO_MD4=m
169CONFIG_CRYPTO_MICHAEL_MIC=m 168CONFIG_CRYPTO_MICHAEL_MIC=m
170CONFIG_CRYPTO_RMD128=m 169CONFIG_CRYPTO_RMD128=m
171CONFIG_CRYPTO_RMD160=m 170CONFIG_CRYPTO_RMD160=m
172CONFIG_CRYPTO_RMD256=m 171CONFIG_CRYPTO_RMD256=m
173CONFIG_CRYPTO_RMD320=m 172CONFIG_CRYPTO_RMD320=m
174CONFIG_CRYPTO_SHA256=m 173CONFIG_CRYPTO_SHA256=y
175CONFIG_CRYPTO_SHA512=m 174CONFIG_CRYPTO_SHA512=m
176CONFIG_CRYPTO_TGR192=m 175CONFIG_CRYPTO_TGR192=m
177CONFIG_CRYPTO_WP512=m 176CONFIG_CRYPTO_WP512=m
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index 940ac49198db..4197c89c52d4 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -286,7 +286,8 @@
286#define __NR_seccomp 348 286#define __NR_seccomp 348
287#define __NR_getrandom 349 287#define __NR_getrandom 349
288#define __NR_memfd_create 350 288#define __NR_memfd_create 350
289#define NR_syscalls 351 289#define __NR_bpf 351
290#define NR_syscalls 352
290 291
291/* 292/*
292 * There are some system calls that are not present on 64 bit, some 293 * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index faf6caa510dc..c4f7a3d655b8 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -217,3 +217,4 @@ COMPAT_SYSCALL_WRAP5(renameat2, int, olddfd, const char __user *, oldname, int,
217COMPAT_SYSCALL_WRAP3(seccomp, unsigned int, op, unsigned int, flags, const char __user *, uargs) 217COMPAT_SYSCALL_WRAP3(seccomp, unsigned int, op, unsigned int, flags, const char __user *, uargs)
218COMPAT_SYSCALL_WRAP3(getrandom, char __user *, buf, size_t, count, unsigned int, flags) 218COMPAT_SYSCALL_WRAP3(getrandom, char __user *, buf, size_t, count, unsigned int, flags)
219COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, flags) 219COMPAT_SYSCALL_WRAP2(memfd_create, const char __user *, uname, unsigned int, flags)
220COMPAT_SYSCALL_WRAP3(bpf, int, cmd, union bpf_attr *, attr, unsigned int, size);
diff --git a/arch/s390/kernel/ftrace.c b/arch/s390/kernel/ftrace.c
index 51d14fe5eb9a..ca1cabb3a96c 100644
--- a/arch/s390/kernel/ftrace.c
+++ b/arch/s390/kernel/ftrace.c
@@ -121,6 +121,8 @@ unsigned long __kprobes prepare_ftrace_return(unsigned long parent,
121{ 121{
122 struct ftrace_graph_ent trace; 122 struct ftrace_graph_ent trace;
123 123
124 if (unlikely(ftrace_graph_is_dead()))
125 goto out;
124 if (unlikely(atomic_read(&current->tracing_graph_pause))) 126 if (unlikely(atomic_read(&current->tracing_graph_pause)))
125 goto out; 127 goto out;
126 ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE; 128 ip = (ip & PSW_ADDR_INSN) - MCOUNT_INSN_SIZE;
diff --git a/arch/s390/kernel/nmi.c b/arch/s390/kernel/nmi.c
index dd1c24ceda50..3f51cf4e8f02 100644
--- a/arch/s390/kernel/nmi.c
+++ b/arch/s390/kernel/nmi.c
@@ -54,12 +54,8 @@ void s390_handle_mcck(void)
54 */ 54 */
55 local_irq_save(flags); 55 local_irq_save(flags);
56 local_mcck_disable(); 56 local_mcck_disable();
57 /* 57 mcck = *this_cpu_ptr(&cpu_mcck);
58 * Ummm... Does this make sense at all? Copying the percpu struct 58 memset(this_cpu_ptr(&cpu_mcck), 0, sizeof(mcck));
59 * and then zapping it one statement later?
60 */
61 memcpy(&mcck, this_cpu_ptr(&cpu_mcck), sizeof(mcck));
62 memset(&mcck, 0, sizeof(struct mcck_struct));
63 clear_cpu_flag(CIF_MCCK_PENDING); 59 clear_cpu_flag(CIF_MCCK_PENDING);
64 local_mcck_enable(); 60 local_mcck_enable();
65 local_irq_restore(flags); 61 local_irq_restore(flags);
diff --git a/arch/s390/kernel/perf_cpum_sf.c b/arch/s390/kernel/perf_cpum_sf.c
index 08e761318c17..b878f12a9597 100644
--- a/arch/s390/kernel/perf_cpum_sf.c
+++ b/arch/s390/kernel/perf_cpum_sf.c
@@ -1411,11 +1411,6 @@ static void cpumsf_pmu_del(struct perf_event *event, int flags)
1411 perf_pmu_enable(event->pmu); 1411 perf_pmu_enable(event->pmu);
1412} 1412}
1413 1413
1414static int cpumsf_pmu_event_idx(struct perf_event *event)
1415{
1416 return event->hw.idx;
1417}
1418
1419CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF); 1414CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC, PERF_EVENT_CPUM_SF);
1420CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG); 1415CPUMF_EVENT_ATTR(SF, SF_CYCLES_BASIC_DIAG, PERF_EVENT_CPUM_SF_DIAG);
1421 1416
@@ -1458,7 +1453,6 @@ static struct pmu cpumf_sampling = {
1458 .stop = cpumsf_pmu_stop, 1453 .stop = cpumsf_pmu_stop,
1459 .read = cpumsf_pmu_read, 1454 .read = cpumsf_pmu_read,
1460 1455
1461 .event_idx = cpumsf_pmu_event_idx,
1462 .attr_groups = cpumsf_pmu_attr_groups, 1456 .attr_groups = cpumsf_pmu_attr_groups,
1463}; 1457};
1464 1458
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 6fe886ac2db5..9f7087fd58de 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -359,3 +359,4 @@ SYSCALL(sys_renameat2,sys_renameat2,compat_sys_renameat2)
359SYSCALL(sys_seccomp,sys_seccomp,compat_sys_seccomp) 359SYSCALL(sys_seccomp,sys_seccomp,compat_sys_seccomp)
360SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom) 360SYSCALL(sys_getrandom,sys_getrandom,compat_sys_getrandom)
361SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */ 361SYSCALL(sys_memfd_create,sys_memfd_create,compat_sys_memfd_create) /* 350 */
362SYSCALL(sys_bpf,sys_bpf,compat_sys_bpf)
diff --git a/arch/s390/kernel/uprobes.c b/arch/s390/kernel/uprobes.c
index 956f4f7a591c..f6b3cd056ec2 100644
--- a/arch/s390/kernel/uprobes.c
+++ b/arch/s390/kernel/uprobes.c
@@ -5,13 +5,13 @@
5 * Author(s): Jan Willeke, 5 * Author(s): Jan Willeke,
6 */ 6 */
7 7
8#include <linux/kprobes.h>
9#include <linux/uaccess.h> 8#include <linux/uaccess.h>
10#include <linux/uprobes.h> 9#include <linux/uprobes.h>
11#include <linux/compat.h> 10#include <linux/compat.h>
12#include <linux/kdebug.h> 11#include <linux/kdebug.h>
13#include <asm/switch_to.h> 12#include <asm/switch_to.h>
14#include <asm/facility.h> 13#include <asm/facility.h>
14#include <asm/kprobes.h>
15#include <asm/dis.h> 15#include <asm/dis.h>
16#include "entry.h" 16#include "entry.h"
17 17
diff --git a/arch/s390/kernel/vdso32/clock_gettime.S b/arch/s390/kernel/vdso32/clock_gettime.S
index 48c2206a3956..5eec9afbb5b5 100644
--- a/arch/s390/kernel/vdso32/clock_gettime.S
+++ b/arch/s390/kernel/vdso32/clock_gettime.S
@@ -19,6 +19,7 @@
19 .type __kernel_clock_gettime,@function 19 .type __kernel_clock_gettime,@function
20__kernel_clock_gettime: 20__kernel_clock_gettime:
21 .cfi_startproc 21 .cfi_startproc
22 ahi %r15,-16
22 basr %r5,0 23 basr %r5,0
230: al %r5,21f-0b(%r5) /* get &_vdso_data */ 240: al %r5,21f-0b(%r5) /* get &_vdso_data */
24 chi %r2,__CLOCK_REALTIME_COARSE 25 chi %r2,__CLOCK_REALTIME_COARSE
@@ -34,8 +35,8 @@ __kernel_clock_gettime:
341: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ 351: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
35 tml %r4,0x0001 /* pending update ? loop */ 36 tml %r4,0x0001 /* pending update ? loop */
36 jnz 1b 37 jnz 1b
37 stcke 24(%r15) /* Store TOD clock */ 38 stcke 0(%r15) /* Store TOD clock */
38 lm %r0,%r1,25(%r15) 39 lm %r0,%r1,1(%r15)
39 s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 40 s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
40 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 41 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
41 brc 3,2f 42 brc 3,2f
@@ -70,6 +71,7 @@ __kernel_clock_gettime:
708: st %r2,0(%r3) /* store tp->tv_sec */ 718: st %r2,0(%r3) /* store tp->tv_sec */
71 st %r1,4(%r3) /* store tp->tv_nsec */ 72 st %r1,4(%r3) /* store tp->tv_nsec */
72 lhi %r2,0 73 lhi %r2,0
74 ahi %r15,16
73 br %r14 75 br %r14
74 76
75 /* CLOCK_MONOTONIC_COARSE */ 77 /* CLOCK_MONOTONIC_COARSE */
@@ -96,8 +98,8 @@ __kernel_clock_gettime:
9611: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ 9811: l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
97 tml %r4,0x0001 /* pending update ? loop */ 99 tml %r4,0x0001 /* pending update ? loop */
98 jnz 11b 100 jnz 11b
99 stcke 24(%r15) /* Store TOD clock */ 101 stcke 0(%r15) /* Store TOD clock */
100 lm %r0,%r1,25(%r15) 102 lm %r0,%r1,1(%r15)
101 s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 103 s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
102 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 104 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
103 brc 3,12f 105 brc 3,12f
@@ -132,11 +134,13 @@ __kernel_clock_gettime:
13217: st %r2,0(%r3) /* store tp->tv_sec */ 13417: st %r2,0(%r3) /* store tp->tv_sec */
133 st %r1,4(%r3) /* store tp->tv_nsec */ 135 st %r1,4(%r3) /* store tp->tv_nsec */
134 lhi %r2,0 136 lhi %r2,0
137 ahi %r15,16
135 br %r14 138 br %r14
136 139
137 /* Fallback to system call */ 140 /* Fallback to system call */
13819: lhi %r1,__NR_clock_gettime 14119: lhi %r1,__NR_clock_gettime
139 svc 0 142 svc 0
143 ahi %r15,16
140 br %r14 144 br %r14
141 145
14220: .long 1000000000 14620: .long 1000000000
diff --git a/arch/s390/kernel/vdso32/gettimeofday.S b/arch/s390/kernel/vdso32/gettimeofday.S
index 60def5f562db..719de6186b20 100644
--- a/arch/s390/kernel/vdso32/gettimeofday.S
+++ b/arch/s390/kernel/vdso32/gettimeofday.S
@@ -19,6 +19,7 @@
19 .type __kernel_gettimeofday,@function 19 .type __kernel_gettimeofday,@function
20__kernel_gettimeofday: 20__kernel_gettimeofday:
21 .cfi_startproc 21 .cfi_startproc
22 ahi %r15,-16
22 basr %r5,0 23 basr %r5,0
230: al %r5,13f-0b(%r5) /* get &_vdso_data */ 240: al %r5,13f-0b(%r5) /* get &_vdso_data */
241: ltr %r3,%r3 /* check if tz is NULL */ 251: ltr %r3,%r3 /* check if tz is NULL */
@@ -29,30 +30,30 @@ __kernel_gettimeofday:
29 l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */ 30 l %r4,__VDSO_UPD_COUNT+4(%r5) /* load update counter */
30 tml %r4,0x0001 /* pending update ? loop */ 31 tml %r4,0x0001 /* pending update ? loop */
31 jnz 1b 32 jnz 1b
32 stcke 24(%r15) /* Store TOD clock */ 33 stcke 0(%r15) /* Store TOD clock */
33 lm %r0,%r1,25(%r15) 34 lm %r0,%r1,1(%r15)
34 s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 35 s %r0,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
35 sl %r1,__VDSO_XTIME_STAMP+4(%r5) 36 sl %r1,__VDSO_XTIME_STAMP+4(%r5)
36 brc 3,3f 37 brc 3,3f
37 ahi %r0,-1 38 ahi %r0,-1
383: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */ 393: ms %r0,__VDSO_TK_MULT(%r5) /* * tk->mult */
39 st %r0,24(%r15) 40 st %r0,0(%r15)
40 l %r0,__VDSO_TK_MULT(%r5) 41 l %r0,__VDSO_TK_MULT(%r5)
41 ltr %r1,%r1 42 ltr %r1,%r1
42 mr %r0,%r0 43 mr %r0,%r0
43 jnm 4f 44 jnm 4f
44 a %r0,__VDSO_TK_MULT(%r5) 45 a %r0,__VDSO_TK_MULT(%r5)
454: al %r0,24(%r15) 464: al %r0,0(%r15)
46 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */ 47 al %r0,__VDSO_XTIME_NSEC(%r5) /* + xtime */
47 al %r1,__VDSO_XTIME_NSEC+4(%r5) 48 al %r1,__VDSO_XTIME_NSEC+4(%r5)
48 brc 12,5f 49 brc 12,5f
49 ahi %r0,1 50 ahi %r0,1
505: mvc 24(4,%r15),__VDSO_XTIME_SEC+4(%r5) 515: mvc 0(4,%r15),__VDSO_XTIME_SEC+4(%r5)
51 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */ 52 cl %r4,__VDSO_UPD_COUNT+4(%r5) /* check update counter */
52 jne 1b 53 jne 1b
53 l %r4,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ 54 l %r4,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
54 srdl %r0,0(%r4) /* >> tk->shift */ 55 srdl %r0,0(%r4) /* >> tk->shift */
55 l %r4,24(%r15) /* get tv_sec from stack */ 56 l %r4,0(%r15) /* get tv_sec from stack */
56 basr %r5,0 57 basr %r5,0
576: ltr %r0,%r0 586: ltr %r0,%r0
58 jnz 7f 59 jnz 7f
@@ -71,6 +72,7 @@ __kernel_gettimeofday:
719: srl %r0,6 729: srl %r0,6
72 st %r0,4(%r2) /* store tv->tv_usec */ 73 st %r0,4(%r2) /* store tv->tv_usec */
7310: slr %r2,%r2 7410: slr %r2,%r2
75 ahi %r15,16
74 br %r14 76 br %r14
7511: .long 1000000000 7711: .long 1000000000
7612: .long 274877907 7812: .long 274877907
diff --git a/arch/s390/kernel/vdso64/clock_gettime.S b/arch/s390/kernel/vdso64/clock_gettime.S
index 9d9761f8e110..7699e735ae28 100644
--- a/arch/s390/kernel/vdso64/clock_gettime.S
+++ b/arch/s390/kernel/vdso64/clock_gettime.S
@@ -19,6 +19,7 @@
19 .type __kernel_clock_gettime,@function 19 .type __kernel_clock_gettime,@function
20__kernel_clock_gettime: 20__kernel_clock_gettime:
21 .cfi_startproc 21 .cfi_startproc
22 aghi %r15,-16
22 larl %r5,_vdso_data 23 larl %r5,_vdso_data
23 cghi %r2,__CLOCK_REALTIME_COARSE 24 cghi %r2,__CLOCK_REALTIME_COARSE
24 je 4f 25 je 4f
@@ -37,10 +38,10 @@ __kernel_clock_gettime:
370: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ 380: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
38 tmll %r4,0x0001 /* pending update ? loop */ 39 tmll %r4,0x0001 /* pending update ? loop */
39 jnz 0b 40 jnz 0b
40 stcke 48(%r15) /* Store TOD clock */ 41 stcke 0(%r15) /* Store TOD clock */
41 lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ 42 lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
42 lg %r0,__VDSO_WTOM_SEC(%r5) 43 lg %r0,__VDSO_WTOM_SEC(%r5)
43 lg %r1,49(%r15) 44 lg %r1,1(%r15)
44 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 45 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
45 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ 46 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
46 alg %r1,__VDSO_WTOM_NSEC(%r5) 47 alg %r1,__VDSO_WTOM_NSEC(%r5)
@@ -56,6 +57,7 @@ __kernel_clock_gettime:
562: stg %r0,0(%r3) /* store tp->tv_sec */ 572: stg %r0,0(%r3) /* store tp->tv_sec */
57 stg %r1,8(%r3) /* store tp->tv_nsec */ 58 stg %r1,8(%r3) /* store tp->tv_nsec */
58 lghi %r2,0 59 lghi %r2,0
60 aghi %r15,16
59 br %r14 61 br %r14
60 62
61 /* CLOCK_MONOTONIC_COARSE */ 63 /* CLOCK_MONOTONIC_COARSE */
@@ -82,9 +84,9 @@ __kernel_clock_gettime:
825: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ 845: lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
83 tmll %r4,0x0001 /* pending update ? loop */ 85 tmll %r4,0x0001 /* pending update ? loop */
84 jnz 5b 86 jnz 5b
85 stcke 48(%r15) /* Store TOD clock */ 87 stcke 0(%r15) /* Store TOD clock */
86 lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */ 88 lgf %r2,__VDSO_TK_SHIFT(%r5) /* Timekeeper shift */
87 lg %r1,49(%r15) 89 lg %r1,1(%r15)
88 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 90 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
89 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ 91 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
90 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ 92 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
@@ -101,6 +103,7 @@ __kernel_clock_gettime:
1017: stg %r0,0(%r3) /* store tp->tv_sec */ 1037: stg %r0,0(%r3) /* store tp->tv_sec */
102 stg %r1,8(%r3) /* store tp->tv_nsec */ 104 stg %r1,8(%r3) /* store tp->tv_nsec */
103 lghi %r2,0 105 lghi %r2,0
106 aghi %r15,16
104 br %r14 107 br %r14
105 108
106 /* CLOCK_THREAD_CPUTIME_ID for this thread */ 109 /* CLOCK_THREAD_CPUTIME_ID for this thread */
@@ -134,11 +137,13 @@ __kernel_clock_gettime:
134 slgr %r4,%r0 /* r4 = tv_nsec */ 137 slgr %r4,%r0 /* r4 = tv_nsec */
135 stg %r4,8(%r3) 138 stg %r4,8(%r3)
136 lghi %r2,0 139 lghi %r2,0
140 aghi %r15,16
137 br %r14 141 br %r14
138 142
139 /* Fallback to system call */ 143 /* Fallback to system call */
14012: lghi %r1,__NR_clock_gettime 14412: lghi %r1,__NR_clock_gettime
141 svc 0 145 svc 0
146 aghi %r15,16
142 br %r14 147 br %r14
143 148
14413: .quad 1000000000 14913: .quad 1000000000
diff --git a/arch/s390/kernel/vdso64/gettimeofday.S b/arch/s390/kernel/vdso64/gettimeofday.S
index 7a344995a97f..6ce46707663c 100644
--- a/arch/s390/kernel/vdso64/gettimeofday.S
+++ b/arch/s390/kernel/vdso64/gettimeofday.S
@@ -19,6 +19,7 @@
19 .type __kernel_gettimeofday,@function 19 .type __kernel_gettimeofday,@function
20__kernel_gettimeofday: 20__kernel_gettimeofday:
21 .cfi_startproc 21 .cfi_startproc
22 aghi %r15,-16
22 larl %r5,_vdso_data 23 larl %r5,_vdso_data
230: ltgr %r3,%r3 /* check if tz is NULL */ 240: ltgr %r3,%r3 /* check if tz is NULL */
24 je 1f 25 je 1f
@@ -28,8 +29,8 @@ __kernel_gettimeofday:
28 lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */ 29 lg %r4,__VDSO_UPD_COUNT(%r5) /* load update counter */
29 tmll %r4,0x0001 /* pending update ? loop */ 30 tmll %r4,0x0001 /* pending update ? loop */
30 jnz 0b 31 jnz 0b
31 stcke 48(%r15) /* Store TOD clock */ 32 stcke 0(%r15) /* Store TOD clock */
32 lg %r1,49(%r15) 33 lg %r1,1(%r15)
33 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */ 34 sg %r1,__VDSO_XTIME_STAMP(%r5) /* TOD - cycle_last */
34 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */ 35 msgf %r1,__VDSO_TK_MULT(%r5) /* * tk->mult */
35 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */ 36 alg %r1,__VDSO_XTIME_NSEC(%r5) /* + tk->xtime_nsec */
@@ -50,6 +51,7 @@ __kernel_gettimeofday:
50 srlg %r0,%r0,6 51 srlg %r0,%r0,6
51 stg %r0,8(%r2) /* store tv->tv_usec */ 52 stg %r0,8(%r2) /* store tv->tv_usec */
524: lghi %r2,0 534: lghi %r2,0
54 aghi %r15,16
53 br %r14 55 br %r14
545: .quad 1000000000 565: .quad 1000000000
55 .long 274877907 57 .long 274877907
diff --git a/arch/s390/kernel/vtime.c b/arch/s390/kernel/vtime.c
index 416f2a323ba5..7f0089d9a4aa 100644
--- a/arch/s390/kernel/vtime.c
+++ b/arch/s390/kernel/vtime.c
@@ -66,7 +66,11 @@ static int do_account_vtime(struct task_struct *tsk, int hardirq_offset)
66 clock = S390_lowcore.last_update_clock; 66 clock = S390_lowcore.last_update_clock;
67 asm volatile( 67 asm volatile(
68 " stpt %0\n" /* Store current cpu timer value */ 68 " stpt %0\n" /* Store current cpu timer value */
69#ifdef CONFIG_HAVE_MARCH_Z9_109_FEATURES
70 " stckf %1" /* Store current tod clock value */
71#else
69 " stck %1" /* Store current tod clock value */ 72 " stck %1" /* Store current tod clock value */
73#endif
70 : "=m" (S390_lowcore.last_update_timer), 74 : "=m" (S390_lowcore.last_update_timer),
71 "=m" (S390_lowcore.last_update_clock)); 75 "=m" (S390_lowcore.last_update_clock));
72 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer; 76 S390_lowcore.system_timer += timer - S390_lowcore.last_update_timer;
diff --git a/arch/s390/lib/probes.c b/arch/s390/lib/probes.c
index c5d64a099719..ae90e1ae3607 100644
--- a/arch/s390/lib/probes.c
+++ b/arch/s390/lib/probes.c
@@ -4,7 +4,7 @@
4 * Copyright IBM Corp. 2014 4 * Copyright IBM Corp. 2014
5 */ 5 */
6 6
7#include <linux/kprobes.h> 7#include <asm/kprobes.h>
8#include <asm/dis.h> 8#include <asm/dis.h>
9 9
10int probe_is_prohibited_opcode(u16 *insn) 10int probe_is_prohibited_opcode(u16 *insn)
diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
index 296b61a4af59..1b79ca67392f 100644
--- a/arch/s390/mm/pgtable.c
+++ b/arch/s390/mm/pgtable.c
@@ -656,7 +656,7 @@ void __gmap_zap(struct gmap *gmap, unsigned long gaddr)
656 } 656 }
657 pgste_set_unlock(ptep, pgste); 657 pgste_set_unlock(ptep, pgste);
658out_pte: 658out_pte:
659 pte_unmap_unlock(*ptep, ptl); 659 pte_unmap_unlock(ptep, ptl);
660} 660}
661EXPORT_SYMBOL_GPL(__gmap_zap); 661EXPORT_SYMBOL_GPL(__gmap_zap);
662 662
@@ -943,7 +943,7 @@ retry:
943 } 943 }
944 if (!(pte_val(*ptep) & _PAGE_INVALID) && 944 if (!(pte_val(*ptep) & _PAGE_INVALID) &&
945 (pte_val(*ptep) & _PAGE_PROTECT)) { 945 (pte_val(*ptep) & _PAGE_PROTECT)) {
946 pte_unmap_unlock(*ptep, ptl); 946 pte_unmap_unlock(ptep, ptl);
947 if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) { 947 if (fixup_user_fault(current, mm, addr, FAULT_FLAG_WRITE)) {
948 up_read(&mm->mmap_sem); 948 up_read(&mm->mmap_sem);
949 return -EFAULT; 949 return -EFAULT;
@@ -974,7 +974,7 @@ retry:
974 pgste_val(new) |= PGSTE_UC_BIT; 974 pgste_val(new) |= PGSTE_UC_BIT;
975 975
976 pgste_set_unlock(ptep, new); 976 pgste_set_unlock(ptep, new);
977 pte_unmap_unlock(*ptep, ptl); 977 pte_unmap_unlock(ptep, ptl);
978 up_read(&mm->mmap_sem); 978 up_read(&mm->mmap_sem);
979 return 0; 979 return 0;
980} 980}
diff --git a/arch/sh/kernel/cpu/sh3/setup-sh770x.c b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
index 9139d14b9c53..538c10db3537 100644
--- a/arch/sh/kernel/cpu/sh3/setup-sh770x.c
+++ b/arch/sh/kernel/cpu/sh3/setup-sh770x.c
@@ -118,7 +118,7 @@ static struct plat_sci_port scif0_platform_data = {
118}; 118};
119 119
120static struct resource scif0_resources[] = { 120static struct resource scif0_resources[] = {
121 DEFINE_RES_MEM(0xfffffe80, 0x100), 121 DEFINE_RES_MEM(0xfffffe80, 0x10),
122 DEFINE_RES_IRQ(evt2irq(0x4e0)), 122 DEFINE_RES_IRQ(evt2irq(0x4e0)),
123}; 123};
124 124
@@ -143,7 +143,7 @@ static struct plat_sci_port scif1_platform_data = {
143}; 143};
144 144
145static struct resource scif1_resources[] = { 145static struct resource scif1_resources[] = {
146 DEFINE_RES_MEM(0xa4000150, 0x100), 146 DEFINE_RES_MEM(0xa4000150, 0x10),
147 DEFINE_RES_IRQ(evt2irq(0x900)), 147 DEFINE_RES_IRQ(evt2irq(0x900)),
148}; 148};
149 149
@@ -169,7 +169,7 @@ static struct plat_sci_port scif2_platform_data = {
169}; 169};
170 170
171static struct resource scif2_resources[] = { 171static struct resource scif2_resources[] = {
172 DEFINE_RES_MEM(0xa4000140, 0x100), 172 DEFINE_RES_MEM(0xa4000140, 0x10),
173 DEFINE_RES_IRQ(evt2irq(0x880)), 173 DEFINE_RES_IRQ(evt2irq(0x880)),
174}; 174};
175 175
diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h
index 765c1776ec9f..0e69b7e7a439 100644
--- a/arch/sparc/include/asm/atomic_32.h
+++ b/arch/sparc/include/asm/atomic_32.h
@@ -22,7 +22,7 @@
22 22
23int atomic_add_return(int, atomic_t *); 23int atomic_add_return(int, atomic_t *);
24int atomic_cmpxchg(atomic_t *, int, int); 24int atomic_cmpxchg(atomic_t *, int, int);
25#define atomic_xchg(v, new) (xchg(&((v)->counter), new)) 25int atomic_xchg(atomic_t *, int);
26int __atomic_add_unless(atomic_t *, int, int); 26int __atomic_add_unless(atomic_t *, int, int);
27void atomic_set(atomic_t *, int); 27void atomic_set(atomic_t *, int);
28 28
diff --git a/arch/sparc/include/asm/cmpxchg_32.h b/arch/sparc/include/asm/cmpxchg_32.h
index 32c29a133f9d..d38b52dca216 100644
--- a/arch/sparc/include/asm/cmpxchg_32.h
+++ b/arch/sparc/include/asm/cmpxchg_32.h
@@ -11,22 +11,14 @@
11#ifndef __ARCH_SPARC_CMPXCHG__ 11#ifndef __ARCH_SPARC_CMPXCHG__
12#define __ARCH_SPARC_CMPXCHG__ 12#define __ARCH_SPARC_CMPXCHG__
13 13
14static inline unsigned long xchg_u32(__volatile__ unsigned long *m, unsigned long val) 14unsigned long __xchg_u32(volatile u32 *m, u32 new);
15{
16 __asm__ __volatile__("swap [%2], %0"
17 : "=&r" (val)
18 : "0" (val), "r" (m)
19 : "memory");
20 return val;
21}
22
23void __xchg_called_with_bad_pointer(void); 15void __xchg_called_with_bad_pointer(void);
24 16
25static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size) 17static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int size)
26{ 18{
27 switch (size) { 19 switch (size) {
28 case 4: 20 case 4:
29 return xchg_u32(ptr, x); 21 return __xchg_u32(ptr, x);
30 } 22 }
31 __xchg_called_with_bad_pointer(); 23 __xchg_called_with_bad_pointer();
32 return x; 24 return x;
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 5b1b52a04ad6..7e064c68c5ec 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -12,6 +12,14 @@ int dma_supported(struct device *dev, u64 mask);
12#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 12#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
13#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 13#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
14 14
15static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
16 enum dma_data_direction dir)
17{
18 /* Since dma_{alloc,free}_noncoherent() allocated coherent memory, this
19 * routine can be a nop.
20 */
21}
22
15extern struct dma_map_ops *dma_ops; 23extern struct dma_map_ops *dma_ops;
16extern struct dma_map_ops *leon_dma_ops; 24extern struct dma_map_ops *leon_dma_ops;
17extern struct dma_map_ops pci32_dma_ops; 25extern struct dma_map_ops pci32_dma_ops;
diff --git a/arch/sparc/include/asm/oplib_64.h b/arch/sparc/include/asm/oplib_64.h
index f34682430fcf..2e3a4add8591 100644
--- a/arch/sparc/include/asm/oplib_64.h
+++ b/arch/sparc/include/asm/oplib_64.h
@@ -62,7 +62,8 @@ struct linux_mem_p1275 {
62/* You must call prom_init() before using any of the library services, 62/* You must call prom_init() before using any of the library services,
63 * preferably as early as possible. Pass it the romvec pointer. 63 * preferably as early as possible. Pass it the romvec pointer.
64 */ 64 */
65void prom_init(void *cif_handler, void *cif_stack); 65void prom_init(void *cif_handler);
66void prom_init_report(void);
66 67
67/* Boot argument acquisition, returns the boot command line string. */ 68/* Boot argument acquisition, returns the boot command line string. */
68char *prom_getbootargs(void); 69char *prom_getbootargs(void);
diff --git a/arch/sparc/include/asm/setup.h b/arch/sparc/include/asm/setup.h
index f5fffd84d0dd..29d64b1758ed 100644
--- a/arch/sparc/include/asm/setup.h
+++ b/arch/sparc/include/asm/setup.h
@@ -48,6 +48,8 @@ unsigned long safe_compute_effective_address(struct pt_regs *, unsigned int);
48#endif 48#endif
49 49
50#ifdef CONFIG_SPARC64 50#ifdef CONFIG_SPARC64
51void __init start_early_boot(void);
52
51/* unaligned_64.c */ 53/* unaligned_64.c */
52int handle_ldf_stq(u32 insn, struct pt_regs *regs); 54int handle_ldf_stq(u32 insn, struct pt_regs *regs);
53void handle_ld_nf(u32 insn, struct pt_regs *regs); 55void handle_ld_nf(u32 insn, struct pt_regs *regs);
diff --git a/arch/sparc/include/uapi/asm/swab.h b/arch/sparc/include/uapi/asm/swab.h
index a34ad079487e..4c7c12d69bea 100644
--- a/arch/sparc/include/uapi/asm/swab.h
+++ b/arch/sparc/include/uapi/asm/swab.h
@@ -9,9 +9,9 @@ static inline __u16 __arch_swab16p(const __u16 *addr)
9{ 9{
10 __u16 ret; 10 __u16 ret;
11 11
12 __asm__ __volatile__ ("lduha [%1] %2, %0" 12 __asm__ __volatile__ ("lduha [%2] %3, %0"
13 : "=r" (ret) 13 : "=r" (ret)
14 : "r" (addr), "i" (ASI_PL)); 14 : "m" (*addr), "r" (addr), "i" (ASI_PL));
15 return ret; 15 return ret;
16} 16}
17#define __arch_swab16p __arch_swab16p 17#define __arch_swab16p __arch_swab16p
@@ -20,9 +20,9 @@ static inline __u32 __arch_swab32p(const __u32 *addr)
20{ 20{
21 __u32 ret; 21 __u32 ret;
22 22
23 __asm__ __volatile__ ("lduwa [%1] %2, %0" 23 __asm__ __volatile__ ("lduwa [%2] %3, %0"
24 : "=r" (ret) 24 : "=r" (ret)
25 : "r" (addr), "i" (ASI_PL)); 25 : "m" (*addr), "r" (addr), "i" (ASI_PL));
26 return ret; 26 return ret;
27} 27}
28#define __arch_swab32p __arch_swab32p 28#define __arch_swab32p __arch_swab32p
@@ -31,9 +31,9 @@ static inline __u64 __arch_swab64p(const __u64 *addr)
31{ 31{
32 __u64 ret; 32 __u64 ret;
33 33
34 __asm__ __volatile__ ("ldxa [%1] %2, %0" 34 __asm__ __volatile__ ("ldxa [%2] %3, %0"
35 : "=r" (ret) 35 : "=r" (ret)
36 : "r" (addr), "i" (ASI_PL)); 36 : "m" (*addr), "r" (addr), "i" (ASI_PL));
37 return ret; 37 return ret;
38} 38}
39#define __arch_swab64p __arch_swab64p 39#define __arch_swab64p __arch_swab64p
diff --git a/arch/sparc/include/uapi/asm/unistd.h b/arch/sparc/include/uapi/asm/unistd.h
index c842a89b1190..46d83842eddc 100644
--- a/arch/sparc/include/uapi/asm/unistd.h
+++ b/arch/sparc/include/uapi/asm/unistd.h
@@ -414,8 +414,9 @@
414#define __NR_seccomp 346 414#define __NR_seccomp 346
415#define __NR_getrandom 347 415#define __NR_getrandom 347
416#define __NR_memfd_create 348 416#define __NR_memfd_create 348
417#define __NR_bpf 349
417 418
418#define NR_syscalls 349 419#define NR_syscalls 350
419 420
420/* Bitmask values returned from kern_features system call. */ 421/* Bitmask values returned from kern_features system call. */
421#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001 422#define KERN_FEATURE_MIXED_MODE_STACK 0x00000001
diff --git a/arch/sparc/kernel/entry.h b/arch/sparc/kernel/entry.h
index ebaba6167dd4..88d322b67fac 100644
--- a/arch/sparc/kernel/entry.h
+++ b/arch/sparc/kernel/entry.h
@@ -65,13 +65,10 @@ struct pause_patch_entry {
65extern struct pause_patch_entry __pause_3insn_patch, 65extern struct pause_patch_entry __pause_3insn_patch,
66 __pause_3insn_patch_end; 66 __pause_3insn_patch_end;
67 67
68void __init per_cpu_patch(void);
69void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *, 68void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
70 struct sun4v_1insn_patch_entry *); 69 struct sun4v_1insn_patch_entry *);
71void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *, 70void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
72 struct sun4v_2insn_patch_entry *); 71 struct sun4v_2insn_patch_entry *);
73void __init sun4v_patch(void);
74void __init boot_cpu_id_too_large(int cpu);
75extern unsigned int dcache_parity_tl1_occurred; 72extern unsigned int dcache_parity_tl1_occurred;
76extern unsigned int icache_parity_tl1_occurred; 73extern unsigned int icache_parity_tl1_occurred;
77 74
diff --git a/arch/sparc/kernel/head_64.S b/arch/sparc/kernel/head_64.S
index 4fdeb8040d4d..3d61fcae7ee3 100644
--- a/arch/sparc/kernel/head_64.S
+++ b/arch/sparc/kernel/head_64.S
@@ -672,14 +672,12 @@ tlb_fixup_done:
672 sethi %hi(init_thread_union), %g6 672 sethi %hi(init_thread_union), %g6
673 or %g6, %lo(init_thread_union), %g6 673 or %g6, %lo(init_thread_union), %g6
674 ldx [%g6 + TI_TASK], %g4 674 ldx [%g6 + TI_TASK], %g4
675 mov %sp, %l6
676 675
677 wr %g0, ASI_P, %asi 676 wr %g0, ASI_P, %asi
678 mov 1, %g1 677 mov 1, %g1
679 sllx %g1, THREAD_SHIFT, %g1 678 sllx %g1, THREAD_SHIFT, %g1
680 sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1 679 sub %g1, (STACKFRAME_SZ + STACK_BIAS), %g1
681 add %g6, %g1, %sp 680 add %g6, %g1, %sp
682 mov 0, %fp
683 681
684 /* Set per-cpu pointer initially to zero, this makes 682 /* Set per-cpu pointer initially to zero, this makes
685 * the boot-cpu use the in-kernel-image per-cpu areas 683 * the boot-cpu use the in-kernel-image per-cpu areas
@@ -706,44 +704,14 @@ tlb_fixup_done:
706 nop 704 nop
707#endif 705#endif
708 706
709 mov %l6, %o1 ! OpenPROM stack
710 call prom_init 707 call prom_init
711 mov %l7, %o0 ! OpenPROM cif handler 708 mov %l7, %o0 ! OpenPROM cif handler
712 709
713 /* Initialize current_thread_info()->cpu as early as possible. 710 /* To create a one-register-window buffer between the kernel's
714 * In order to do that accurately we have to patch up the get_cpuid() 711 * initial stack and the last stack frame we use from the firmware,
715 * assembler sequences. And that, in turn, requires that we know 712 * do the rest of the boot from a C helper function.
716 * if we are on a Starfire box or not. While we're here, patch up
717 * the sun4v sequences as well.
718 */ 713 */
719 call check_if_starfire 714 call start_early_boot
720 nop
721 call per_cpu_patch
722 nop
723 call sun4v_patch
724 nop
725
726#ifdef CONFIG_SMP
727 call hard_smp_processor_id
728 nop
729 cmp %o0, NR_CPUS
730 blu,pt %xcc, 1f
731 nop
732 call boot_cpu_id_too_large
733 nop
734 /* Not reached... */
735
7361:
737#else
738 mov 0, %o0
739#endif
740 sth %o0, [%g6 + TI_CPU]
741
742 call prom_init_report
743 nop
744
745 /* Off we go.... */
746 call start_kernel
747 nop 715 nop
748 /* Not reached... */ 716 /* Not reached... */
749 717
diff --git a/arch/sparc/kernel/hvtramp.S b/arch/sparc/kernel/hvtramp.S
index b7ddcdd1dea9..cdbfec299f2f 100644
--- a/arch/sparc/kernel/hvtramp.S
+++ b/arch/sparc/kernel/hvtramp.S
@@ -109,7 +109,6 @@ hv_cpu_startup:
109 sllx %g5, THREAD_SHIFT, %g5 109 sllx %g5, THREAD_SHIFT, %g5
110 sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5 110 sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
111 add %g6, %g5, %sp 111 add %g6, %g5, %sp
112 mov 0, %fp
113 112
114 call init_irqwork_curcpu 113 call init_irqwork_curcpu
115 nop 114 nop
diff --git a/arch/sparc/kernel/pci_schizo.c b/arch/sparc/kernel/pci_schizo.c
index 8f76f23dac38..f9c6813c132d 100644
--- a/arch/sparc/kernel/pci_schizo.c
+++ b/arch/sparc/kernel/pci_schizo.c
@@ -581,7 +581,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
581{ 581{
582 unsigned long csr_reg, csr, csr_error_bits; 582 unsigned long csr_reg, csr, csr_error_bits;
583 irqreturn_t ret = IRQ_NONE; 583 irqreturn_t ret = IRQ_NONE;
584 u16 stat; 584 u32 stat;
585 585
586 csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL; 586 csr_reg = pbm->pbm_regs + SCHIZO_PCI_CTRL;
587 csr = upa_readq(csr_reg); 587 csr = upa_readq(csr_reg);
@@ -617,7 +617,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
617 pbm->name); 617 pbm->name);
618 ret = IRQ_HANDLED; 618 ret = IRQ_HANDLED;
619 } 619 }
620 pci_read_config_word(pbm->pci_bus->self, PCI_STATUS, &stat); 620 pbm->pci_ops->read(pbm->pci_bus, 0, PCI_STATUS, 2, &stat);
621 if (stat & (PCI_STATUS_PARITY | 621 if (stat & (PCI_STATUS_PARITY |
622 PCI_STATUS_SIG_TARGET_ABORT | 622 PCI_STATUS_SIG_TARGET_ABORT |
623 PCI_STATUS_REC_TARGET_ABORT | 623 PCI_STATUS_REC_TARGET_ABORT |
@@ -625,7 +625,7 @@ static irqreturn_t schizo_pcierr_intr_other(struct pci_pbm_info *pbm)
625 PCI_STATUS_SIG_SYSTEM_ERROR)) { 625 PCI_STATUS_SIG_SYSTEM_ERROR)) {
626 printk("%s: PCI bus error, PCI_STATUS[%04x]\n", 626 printk("%s: PCI bus error, PCI_STATUS[%04x]\n",
627 pbm->name, stat); 627 pbm->name, stat);
628 pci_write_config_word(pbm->pci_bus->self, PCI_STATUS, 0xffff); 628 pbm->pci_ops->write(pbm->pci_bus, 0, PCI_STATUS, 2, 0xffff);
629 ret = IRQ_HANDLED; 629 ret = IRQ_HANDLED;
630 } 630 }
631 return ret; 631 return ret;
diff --git a/arch/sparc/kernel/setup_64.c b/arch/sparc/kernel/setup_64.c
index e629b8377587..c38d19fc27ba 100644
--- a/arch/sparc/kernel/setup_64.c
+++ b/arch/sparc/kernel/setup_64.c
@@ -30,6 +30,7 @@
30#include <linux/cpu.h> 30#include <linux/cpu.h>
31#include <linux/initrd.h> 31#include <linux/initrd.h>
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/start_kernel.h>
33 34
34#include <asm/io.h> 35#include <asm/io.h>
35#include <asm/processor.h> 36#include <asm/processor.h>
@@ -162,7 +163,7 @@ char reboot_command[COMMAND_LINE_SIZE];
162 163
163static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 }; 164static struct pt_regs fake_swapper_regs = { { 0, }, 0, 0, 0, 0 };
164 165
165void __init per_cpu_patch(void) 166static void __init per_cpu_patch(void)
166{ 167{
167 struct cpuid_patch_entry *p; 168 struct cpuid_patch_entry *p;
168 unsigned long ver; 169 unsigned long ver;
@@ -254,7 +255,7 @@ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
254 } 255 }
255} 256}
256 257
257void __init sun4v_patch(void) 258static void __init sun4v_patch(void)
258{ 259{
259 extern void sun4v_hvapi_init(void); 260 extern void sun4v_hvapi_init(void);
260 261
@@ -323,14 +324,25 @@ static void __init pause_patch(void)
323 } 324 }
324} 325}
325 326
326#ifdef CONFIG_SMP 327void __init start_early_boot(void)
327void __init boot_cpu_id_too_large(int cpu)
328{ 328{
329 prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n", 329 int cpu;
330 cpu, NR_CPUS); 330
331 prom_halt(); 331 check_if_starfire();
332 per_cpu_patch();
333 sun4v_patch();
334
335 cpu = hard_smp_processor_id();
336 if (cpu >= NR_CPUS) {
337 prom_printf("Serious problem, boot cpu id (%d) >= NR_CPUS (%d)\n",
338 cpu, NR_CPUS);
339 prom_halt();
340 }
341 current_thread_info()->cpu = cpu;
342
343 prom_init_report();
344 start_kernel();
332} 345}
333#endif
334 346
335/* On Ultra, we support all of the v8 capabilities. */ 347/* On Ultra, we support all of the v8 capabilities. */
336unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR | 348unsigned long sparc64_elf_hwcap = (HWCAP_SPARC_FLUSH | HWCAP_SPARC_STBAR |
diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c
index 302c476413d5..da6f1a7fc4db 100644
--- a/arch/sparc/kernel/smp_64.c
+++ b/arch/sparc/kernel/smp_64.c
@@ -816,13 +816,17 @@ void arch_send_call_function_single_ipi(int cpu)
816void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs) 816void __irq_entry smp_call_function_client(int irq, struct pt_regs *regs)
817{ 817{
818 clear_softint(1 << irq); 818 clear_softint(1 << irq);
819 irq_enter();
819 generic_smp_call_function_interrupt(); 820 generic_smp_call_function_interrupt();
821 irq_exit();
820} 822}
821 823
822void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs) 824void __irq_entry smp_call_function_single_client(int irq, struct pt_regs *regs)
823{ 825{
824 clear_softint(1 << irq); 826 clear_softint(1 << irq);
827 irq_enter();
825 generic_smp_call_function_single_interrupt(); 828 generic_smp_call_function_single_interrupt();
829 irq_exit();
826} 830}
827 831
828static void tsb_sync(void *info) 832static void tsb_sync(void *info)
diff --git a/arch/sparc/kernel/systbls_32.S b/arch/sparc/kernel/systbls_32.S
index 6a873c344bc0..ad0cdf497b78 100644
--- a/arch/sparc/kernel/systbls_32.S
+++ b/arch/sparc/kernel/systbls_32.S
@@ -86,4 +86,4 @@ sys_call_table:
86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 86/*330*/ .long sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 87/*335*/ .long sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
88/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 88/*340*/ .long sys_ni_syscall, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
89/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create 89/*345*/ .long sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
diff --git a/arch/sparc/kernel/systbls_64.S b/arch/sparc/kernel/systbls_64.S
index d9151b6490d8..580cde9370c9 100644
--- a/arch/sparc/kernel/systbls_64.S
+++ b/arch/sparc/kernel/systbls_64.S
@@ -87,7 +87,7 @@ sys_call_table32:
87/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime 87/*330*/ .word compat_sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, compat_sys_open_by_handle_at, compat_sys_clock_adjtime
88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev 88 .word sys_syncfs, compat_sys_sendmmsg, sys_setns, compat_sys_process_vm_readv, compat_sys_process_vm_writev
89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 89/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
90 .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create 90 .word sys32_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
91 91
92#endif /* CONFIG_COMPAT */ 92#endif /* CONFIG_COMPAT */
93 93
@@ -166,4 +166,4 @@ sys_call_table:
166/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime 166/*330*/ .word sys_fanotify_mark, sys_prlimit64, sys_name_to_handle_at, sys_open_by_handle_at, sys_clock_adjtime
167 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev 167 .word sys_syncfs, sys_sendmmsg, sys_setns, sys_process_vm_readv, sys_process_vm_writev
168/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr 168/*340*/ .word sys_kern_features, sys_kcmp, sys_finit_module, sys_sched_setattr, sys_sched_getattr
169 .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create 169 .word sys_renameat2, sys_seccomp, sys_getrandom, sys_memfd_create, sys_bpf
diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S
index 737f8cbc7d56..88ede1d53b4c 100644
--- a/arch/sparc/kernel/trampoline_64.S
+++ b/arch/sparc/kernel/trampoline_64.S
@@ -109,10 +109,13 @@ startup_continue:
109 brnz,pn %g1, 1b 109 brnz,pn %g1, 1b
110 nop 110 nop
111 111
112 sethi %hi(p1275buf), %g2 112 /* Get onto temporary stack which will be in the locked
113 or %g2, %lo(p1275buf), %g2 113 * kernel image.
114 ldx [%g2 + 0x10], %l2 114 */
115 add %l2, -(192 + 128), %sp 115 sethi %hi(tramp_stack), %g1
116 or %g1, %lo(tramp_stack), %g1
117 add %g1, TRAMP_STACK_SIZE, %g1
118 sub %g1, STACKFRAME_SZ + STACK_BIAS + 256, %sp
116 flushw 119 flushw
117 120
118 /* Setup the loop variables: 121 /* Setup the loop variables:
@@ -394,7 +397,6 @@ after_lock_tlb:
394 sllx %g5, THREAD_SHIFT, %g5 397 sllx %g5, THREAD_SHIFT, %g5
395 sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5 398 sub %g5, (STACKFRAME_SZ + STACK_BIAS), %g5
396 add %g6, %g5, %sp 399 add %g6, %g5, %sp
397 mov 0, %fp
398 400
399 rdpr %pstate, %o1 401 rdpr %pstate, %o1
400 or %o1, PSTATE_IE, %o1 402 or %o1, PSTATE_IE, %o1
diff --git a/arch/sparc/lib/atomic32.c b/arch/sparc/lib/atomic32.c
index a7c418ac26af..71cd65ab200c 100644
--- a/arch/sparc/lib/atomic32.c
+++ b/arch/sparc/lib/atomic32.c
@@ -45,6 +45,19 @@ ATOMIC_OP(add, +=)
45 45
46#undef ATOMIC_OP 46#undef ATOMIC_OP
47 47
48int atomic_xchg(atomic_t *v, int new)
49{
50 int ret;
51 unsigned long flags;
52
53 spin_lock_irqsave(ATOMIC_HASH(v), flags);
54 ret = v->counter;
55 v->counter = new;
56 spin_unlock_irqrestore(ATOMIC_HASH(v), flags);
57 return ret;
58}
59EXPORT_SYMBOL(atomic_xchg);
60
48int atomic_cmpxchg(atomic_t *v, int old, int new) 61int atomic_cmpxchg(atomic_t *v, int old, int new)
49{ 62{
50 int ret; 63 int ret;
@@ -137,3 +150,17 @@ unsigned long __cmpxchg_u32(volatile u32 *ptr, u32 old, u32 new)
137 return (unsigned long)prev; 150 return (unsigned long)prev;
138} 151}
139EXPORT_SYMBOL(__cmpxchg_u32); 152EXPORT_SYMBOL(__cmpxchg_u32);
153
154unsigned long __xchg_u32(volatile u32 *ptr, u32 new)
155{
156 unsigned long flags;
157 u32 prev;
158
159 spin_lock_irqsave(ATOMIC_HASH(ptr), flags);
160 prev = *ptr;
161 *ptr = new;
162 spin_unlock_irqrestore(ATOMIC_HASH(ptr), flags);
163
164 return (unsigned long)prev;
165}
166EXPORT_SYMBOL(__xchg_u32);
diff --git a/arch/sparc/mm/gup.c b/arch/sparc/mm/gup.c
index 1aed0432c64b..ae6ce383d4df 100644
--- a/arch/sparc/mm/gup.c
+++ b/arch/sparc/mm/gup.c
@@ -160,6 +160,36 @@ static int gup_pud_range(pgd_t pgd, unsigned long addr, unsigned long end,
160 return 1; 160 return 1;
161} 161}
162 162
163int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
164 struct page **pages)
165{
166 struct mm_struct *mm = current->mm;
167 unsigned long addr, len, end;
168 unsigned long next, flags;
169 pgd_t *pgdp;
170 int nr = 0;
171
172 start &= PAGE_MASK;
173 addr = start;
174 len = (unsigned long) nr_pages << PAGE_SHIFT;
175 end = start + len;
176
177 local_irq_save(flags);
178 pgdp = pgd_offset(mm, addr);
179 do {
180 pgd_t pgd = *pgdp;
181
182 next = pgd_addr_end(addr, end);
183 if (pgd_none(pgd))
184 break;
185 if (!gup_pud_range(pgd, addr, next, write, pages, &nr))
186 break;
187 } while (pgdp++, addr = next, addr != end);
188 local_irq_restore(flags);
189
190 return nr;
191}
192
163int get_user_pages_fast(unsigned long start, int nr_pages, int write, 193int get_user_pages_fast(unsigned long start, int nr_pages, int write,
164 struct page **pages) 194 struct page **pages)
165{ 195{
diff --git a/arch/sparc/prom/cif.S b/arch/sparc/prom/cif.S
index 9c86b4b7d429..8050f381f518 100644
--- a/arch/sparc/prom/cif.S
+++ b/arch/sparc/prom/cif.S
@@ -11,11 +11,10 @@
11 .text 11 .text
12 .globl prom_cif_direct 12 .globl prom_cif_direct
13prom_cif_direct: 13prom_cif_direct:
14 save %sp, -192, %sp
14 sethi %hi(p1275buf), %o1 15 sethi %hi(p1275buf), %o1
15 or %o1, %lo(p1275buf), %o1 16 or %o1, %lo(p1275buf), %o1
16 ldx [%o1 + 0x0010], %o2 ! prom_cif_stack 17 ldx [%o1 + 0x0008], %l2 ! prom_cif_handler
17 save %o2, -192, %sp
18 ldx [%i1 + 0x0008], %l2 ! prom_cif_handler
19 mov %g4, %l0 18 mov %g4, %l0
20 mov %g5, %l1 19 mov %g5, %l1
21 mov %g6, %l3 20 mov %g6, %l3
diff --git a/arch/sparc/prom/init_64.c b/arch/sparc/prom/init_64.c
index d95db755828f..110b0d78b864 100644
--- a/arch/sparc/prom/init_64.c
+++ b/arch/sparc/prom/init_64.c
@@ -26,13 +26,13 @@ phandle prom_chosen_node;
26 * It gets passed the pointer to the PROM vector. 26 * It gets passed the pointer to the PROM vector.
27 */ 27 */
28 28
29extern void prom_cif_init(void *, void *); 29extern void prom_cif_init(void *);
30 30
31void __init prom_init(void *cif_handler, void *cif_stack) 31void __init prom_init(void *cif_handler)
32{ 32{
33 phandle node; 33 phandle node;
34 34
35 prom_cif_init(cif_handler, cif_stack); 35 prom_cif_init(cif_handler);
36 36
37 prom_chosen_node = prom_finddevice(prom_chosen_path); 37 prom_chosen_node = prom_finddevice(prom_chosen_path);
38 if (!prom_chosen_node || (s32)prom_chosen_node == -1) 38 if (!prom_chosen_node || (s32)prom_chosen_node == -1)
diff --git a/arch/sparc/prom/p1275.c b/arch/sparc/prom/p1275.c
index b2340f008ae0..545d8bb79b65 100644
--- a/arch/sparc/prom/p1275.c
+++ b/arch/sparc/prom/p1275.c
@@ -20,7 +20,6 @@
20struct { 20struct {
21 long prom_callback; /* 0x00 */ 21 long prom_callback; /* 0x00 */
22 void (*prom_cif_handler)(long *); /* 0x08 */ 22 void (*prom_cif_handler)(long *); /* 0x08 */
23 unsigned long prom_cif_stack; /* 0x10 */
24} p1275buf; 23} p1275buf;
25 24
26extern void prom_world(int); 25extern void prom_world(int);
@@ -52,5 +51,4 @@ void p1275_cmd_direct(unsigned long *args)
52void prom_cif_init(void *cif_handler, void *cif_stack) 51void prom_cif_init(void *cif_handler, void *cif_stack)
53{ 52{
54 p1275buf.prom_cif_handler = (void (*)(long *))cif_handler; 53 p1275buf.prom_cif_handler = (void (*)(long *))cif_handler;
55 p1275buf.prom_cif_stack = (unsigned long)cif_stack;
56} 54}
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index f2327e88e07c..41a503c15862 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -142,6 +142,10 @@ config INSTRUCTION_DECODER
142 def_bool y 142 def_bool y
143 depends on KPROBES || PERF_EVENTS || UPROBES 143 depends on KPROBES || PERF_EVENTS || UPROBES
144 144
145config PERF_EVENTS_INTEL_UNCORE
146 def_bool y
147 depends on PERF_EVENTS && CPU_SUP_INTEL && PCI
148
145config OUTPUT_FORMAT 149config OUTPUT_FORMAT
146 string 150 string
147 default "elf32-i386" if X86_32 151 default "elf32-i386" if X86_32
diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
index 704f58aa79cd..45abc363dd3e 100644
--- a/arch/x86/boot/compressed/Makefile
+++ b/arch/x86/boot/compressed/Makefile
@@ -76,8 +76,10 @@ suffix-$(CONFIG_KERNEL_XZ) := xz
76suffix-$(CONFIG_KERNEL_LZO) := lzo 76suffix-$(CONFIG_KERNEL_LZO) := lzo
77suffix-$(CONFIG_KERNEL_LZ4) := lz4 77suffix-$(CONFIG_KERNEL_LZ4) := lz4
78 78
79RUN_SIZE = $(shell $(OBJDUMP) -h vmlinux | \
80 perl $(srctree)/arch/x86/tools/calc_run_size.pl)
79quiet_cmd_mkpiggy = MKPIGGY $@ 81quiet_cmd_mkpiggy = MKPIGGY $@
80 cmd_mkpiggy = $(obj)/mkpiggy $< > $@ || ( rm -f $@ ; false ) 82 cmd_mkpiggy = $(obj)/mkpiggy $< $(RUN_SIZE) > $@ || ( rm -f $@ ; false )
81 83
82targets += piggy.S 84targets += piggy.S
83$(obj)/piggy.S: $(obj)/vmlinux.bin.$(suffix-y) $(obj)/mkpiggy FORCE 85$(obj)/piggy.S: $(obj)/vmlinux.bin.$(suffix-y) $(obj)/mkpiggy FORCE
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index de8eebd6f67c..1acf605a646d 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -330,8 +330,10 @@ __setup_efi_pci32(efi_pci_io_protocol_32 *pci, struct pci_setup_rom **__rom)
330 size = pci->romsize + sizeof(*rom); 330 size = pci->romsize + sizeof(*rom);
331 331
332 status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom); 332 status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom);
333 if (status != EFI_SUCCESS) 333 if (status != EFI_SUCCESS) {
334 efi_printk(sys_table, "Failed to alloc mem for rom\n");
334 return status; 335 return status;
336 }
335 337
336 memset(rom, 0, sizeof(*rom)); 338 memset(rom, 0, sizeof(*rom));
337 339
@@ -344,14 +346,18 @@ __setup_efi_pci32(efi_pci_io_protocol_32 *pci, struct pci_setup_rom **__rom)
344 status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16, 346 status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16,
345 PCI_VENDOR_ID, 1, &(rom->vendor)); 347 PCI_VENDOR_ID, 1, &(rom->vendor));
346 348
347 if (status != EFI_SUCCESS) 349 if (status != EFI_SUCCESS) {
350 efi_printk(sys_table, "Failed to read rom->vendor\n");
348 goto free_struct; 351 goto free_struct;
352 }
349 353
350 status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16, 354 status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16,
351 PCI_DEVICE_ID, 1, &(rom->devid)); 355 PCI_DEVICE_ID, 1, &(rom->devid));
352 356
353 if (status != EFI_SUCCESS) 357 if (status != EFI_SUCCESS) {
358 efi_printk(sys_table, "Failed to read rom->devid\n");
354 goto free_struct; 359 goto free_struct;
360 }
355 361
356 status = efi_early->call(pci->get_location, pci, &(rom->segment), 362 status = efi_early->call(pci->get_location, pci, &(rom->segment),
357 &(rom->bus), &(rom->device), &(rom->function)); 363 &(rom->bus), &(rom->device), &(rom->function));
@@ -432,8 +438,10 @@ __setup_efi_pci64(efi_pci_io_protocol_64 *pci, struct pci_setup_rom **__rom)
432 size = pci->romsize + sizeof(*rom); 438 size = pci->romsize + sizeof(*rom);
433 439
434 status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom); 440 status = efi_call_early(allocate_pool, EFI_LOADER_DATA, size, &rom);
435 if (status != EFI_SUCCESS) 441 if (status != EFI_SUCCESS) {
442 efi_printk(sys_table, "Failed to alloc mem for rom\n");
436 return status; 443 return status;
444 }
437 445
438 rom->data.type = SETUP_PCI; 446 rom->data.type = SETUP_PCI;
439 rom->data.len = size - sizeof(struct setup_data); 447 rom->data.len = size - sizeof(struct setup_data);
@@ -444,14 +452,18 @@ __setup_efi_pci64(efi_pci_io_protocol_64 *pci, struct pci_setup_rom **__rom)
444 status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16, 452 status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16,
445 PCI_VENDOR_ID, 1, &(rom->vendor)); 453 PCI_VENDOR_ID, 1, &(rom->vendor));
446 454
447 if (status != EFI_SUCCESS) 455 if (status != EFI_SUCCESS) {
456 efi_printk(sys_table, "Failed to read rom->vendor\n");
448 goto free_struct; 457 goto free_struct;
458 }
449 459
450 status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16, 460 status = efi_early->call(pci->pci.read, pci, EfiPciIoWidthUint16,
451 PCI_DEVICE_ID, 1, &(rom->devid)); 461 PCI_DEVICE_ID, 1, &(rom->devid));
452 462
453 if (status != EFI_SUCCESS) 463 if (status != EFI_SUCCESS) {
464 efi_printk(sys_table, "Failed to read rom->devid\n");
454 goto free_struct; 465 goto free_struct;
466 }
455 467
456 status = efi_early->call(pci->get_location, pci, &(rom->segment), 468 status = efi_early->call(pci->get_location, pci, &(rom->segment),
457 &(rom->bus), &(rom->device), &(rom->function)); 469 &(rom->bus), &(rom->device), &(rom->function));
@@ -538,8 +550,10 @@ static void setup_efi_pci(struct boot_params *params)
538 EFI_LOADER_DATA, 550 EFI_LOADER_DATA,
539 size, (void **)&pci_handle); 551 size, (void **)&pci_handle);
540 552
541 if (status != EFI_SUCCESS) 553 if (status != EFI_SUCCESS) {
554 efi_printk(sys_table, "Failed to alloc mem for pci_handle\n");
542 return; 555 return;
556 }
543 557
544 status = efi_call_early(locate_handle, 558 status = efi_call_early(locate_handle,
545 EFI_LOCATE_BY_PROTOCOL, &pci_proto, 559 EFI_LOCATE_BY_PROTOCOL, &pci_proto,
@@ -1105,6 +1119,10 @@ struct boot_params *make_boot_params(struct efi_config *c)
1105 1119
1106 memset(sdt, 0, sizeof(*sdt)); 1120 memset(sdt, 0, sizeof(*sdt));
1107 1121
1122 status = efi_parse_options(cmdline_ptr);
1123 if (status != EFI_SUCCESS)
1124 goto fail2;
1125
1108 status = handle_cmdline_files(sys_table, image, 1126 status = handle_cmdline_files(sys_table, image,
1109 (char *)(unsigned long)hdr->cmd_line_ptr, 1127 (char *)(unsigned long)hdr->cmd_line_ptr,
1110 "initrd=", hdr->initrd_addr_max, 1128 "initrd=", hdr->initrd_addr_max,
diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
index cbed1407a5cd..1d7fbbcc196d 100644
--- a/arch/x86/boot/compressed/head_32.S
+++ b/arch/x86/boot/compressed/head_32.S
@@ -207,7 +207,8 @@ relocated:
207 * Do the decompression, and jump to the new kernel.. 207 * Do the decompression, and jump to the new kernel..
208 */ 208 */
209 /* push arguments for decompress_kernel: */ 209 /* push arguments for decompress_kernel: */
210 pushl $z_output_len /* decompressed length */ 210 pushl $z_run_size /* size of kernel with .bss and .brk */
211 pushl $z_output_len /* decompressed length, end of relocs */
211 leal z_extract_offset_negative(%ebx), %ebp 212 leal z_extract_offset_negative(%ebx), %ebp
212 pushl %ebp /* output address */ 213 pushl %ebp /* output address */
213 pushl $z_input_len /* input_len */ 214 pushl $z_input_len /* input_len */
@@ -217,7 +218,7 @@ relocated:
217 pushl %eax /* heap area */ 218 pushl %eax /* heap area */
218 pushl %esi /* real mode pointer */ 219 pushl %esi /* real mode pointer */
219 call decompress_kernel /* returns kernel location in %eax */ 220 call decompress_kernel /* returns kernel location in %eax */
220 addl $24, %esp 221 addl $28, %esp
221 222
222/* 223/*
223 * Jump to the decompressed kernel. 224 * Jump to the decompressed kernel.
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 2884e0c3e8a5..6b1766c6c082 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -402,13 +402,16 @@ relocated:
402 * Do the decompression, and jump to the new kernel.. 402 * Do the decompression, and jump to the new kernel..
403 */ 403 */
404 pushq %rsi /* Save the real mode argument */ 404 pushq %rsi /* Save the real mode argument */
405 movq $z_run_size, %r9 /* size of kernel with .bss and .brk */
406 pushq %r9
405 movq %rsi, %rdi /* real mode address */ 407 movq %rsi, %rdi /* real mode address */
406 leaq boot_heap(%rip), %rsi /* malloc area for uncompression */ 408 leaq boot_heap(%rip), %rsi /* malloc area for uncompression */
407 leaq input_data(%rip), %rdx /* input_data */ 409 leaq input_data(%rip), %rdx /* input_data */
408 movl $z_input_len, %ecx /* input_len */ 410 movl $z_input_len, %ecx /* input_len */
409 movq %rbp, %r8 /* output target address */ 411 movq %rbp, %r8 /* output target address */
410 movq $z_output_len, %r9 /* decompressed length */ 412 movq $z_output_len, %r9 /* decompressed length, end of relocs */
411 call decompress_kernel /* returns kernel location in %rax */ 413 call decompress_kernel /* returns kernel location in %rax */
414 popq %r9
412 popq %rsi 415 popq %rsi
413 416
414/* 417/*
diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
index 57ab74df7eea..30dd59a9f0b4 100644
--- a/arch/x86/boot/compressed/misc.c
+++ b/arch/x86/boot/compressed/misc.c
@@ -358,7 +358,8 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
358 unsigned char *input_data, 358 unsigned char *input_data,
359 unsigned long input_len, 359 unsigned long input_len,
360 unsigned char *output, 360 unsigned char *output,
361 unsigned long output_len) 361 unsigned long output_len,
362 unsigned long run_size)
362{ 363{
363 real_mode = rmode; 364 real_mode = rmode;
364 365
@@ -381,8 +382,14 @@ asmlinkage __visible void *decompress_kernel(void *rmode, memptr heap,
381 free_mem_ptr = heap; /* Heap */ 382 free_mem_ptr = heap; /* Heap */
382 free_mem_end_ptr = heap + BOOT_HEAP_SIZE; 383 free_mem_end_ptr = heap + BOOT_HEAP_SIZE;
383 384
384 output = choose_kernel_location(input_data, input_len, 385 /*
385 output, output_len); 386 * The memory hole needed for the kernel is the larger of either
387 * the entire decompressed kernel plus relocation table, or the
388 * entire decompressed kernel plus .bss and .brk sections.
389 */
390 output = choose_kernel_location(input_data, input_len, output,
391 output_len > run_size ? output_len
392 : run_size);
386 393
387 /* Validate memory location choices. */ 394 /* Validate memory location choices. */
388 if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1)) 395 if ((unsigned long)output & (MIN_KERNEL_ALIGN - 1))
diff --git a/arch/x86/boot/compressed/mkpiggy.c b/arch/x86/boot/compressed/mkpiggy.c
index b669ab65bf6c..d8222f213182 100644
--- a/arch/x86/boot/compressed/mkpiggy.c
+++ b/arch/x86/boot/compressed/mkpiggy.c
@@ -36,11 +36,13 @@ int main(int argc, char *argv[])
36 uint32_t olen; 36 uint32_t olen;
37 long ilen; 37 long ilen;
38 unsigned long offs; 38 unsigned long offs;
39 unsigned long run_size;
39 FILE *f = NULL; 40 FILE *f = NULL;
40 int retval = 1; 41 int retval = 1;
41 42
42 if (argc < 2) { 43 if (argc < 3) {
43 fprintf(stderr, "Usage: %s compressed_file\n", argv[0]); 44 fprintf(stderr, "Usage: %s compressed_file run_size\n",
45 argv[0]);
44 goto bail; 46 goto bail;
45 } 47 }
46 48
@@ -74,6 +76,7 @@ int main(int argc, char *argv[])
74 offs += olen >> 12; /* Add 8 bytes for each 32K block */ 76 offs += olen >> 12; /* Add 8 bytes for each 32K block */
75 offs += 64*1024 + 128; /* Add 64K + 128 bytes slack */ 77 offs += 64*1024 + 128; /* Add 64K + 128 bytes slack */
76 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */ 78 offs = (offs+4095) & ~4095; /* Round to a 4K boundary */
79 run_size = atoi(argv[2]);
77 80
78 printf(".section \".rodata..compressed\",\"a\",@progbits\n"); 81 printf(".section \".rodata..compressed\",\"a\",@progbits\n");
79 printf(".globl z_input_len\n"); 82 printf(".globl z_input_len\n");
@@ -85,6 +88,8 @@ int main(int argc, char *argv[])
85 /* z_extract_offset_negative allows simplification of head_32.S */ 88 /* z_extract_offset_negative allows simplification of head_32.S */
86 printf(".globl z_extract_offset_negative\n"); 89 printf(".globl z_extract_offset_negative\n");
87 printf("z_extract_offset_negative = -0x%lx\n", offs); 90 printf("z_extract_offset_negative = -0x%lx\n", offs);
91 printf(".globl z_run_size\n");
92 printf("z_run_size = %lu\n", run_size);
88 93
89 printf(".globl input_data, input_data_end\n"); 94 printf(".globl input_data, input_data_end\n");
90 printf("input_data:\n"); 95 printf("input_data:\n");
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
index 8ffba18395c8..ffe71228fc10 100644
--- a/arch/x86/ia32/ia32entry.S
+++ b/arch/x86/ia32/ia32entry.S
@@ -157,7 +157,7 @@ ENTRY(ia32_sysenter_target)
157 * ourselves. To save a few cycles, we can check whether 157 * ourselves. To save a few cycles, we can check whether
158 * NT was set instead of doing an unconditional popfq. 158 * NT was set instead of doing an unconditional popfq.
159 */ 159 */
160 testl $X86_EFLAGS_NT,EFLAGS(%rsp) /* saved EFLAGS match cpu */ 160 testl $X86_EFLAGS_NT,EFLAGS-ARGOFFSET(%rsp)
161 jnz sysenter_fix_flags 161 jnz sysenter_fix_flags
162sysenter_flags_fixed: 162sysenter_flags_fixed:
163 163
diff --git a/arch/x86/include/asm/efi.h b/arch/x86/include/asm/efi.h
index 0ec241ede5a2..9b11757975d0 100644
--- a/arch/x86/include/asm/efi.h
+++ b/arch/x86/include/asm/efi.h
@@ -81,24 +81,23 @@ extern u64 asmlinkage efi_call(void *fp, ...);
81 */ 81 */
82#define __efi_call_virt(f, args...) efi_call_virt(f, args) 82#define __efi_call_virt(f, args...) efi_call_virt(f, args)
83 83
84extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size, 84extern void __iomem *__init efi_ioremap(unsigned long addr, unsigned long size,
85 u32 type, u64 attribute); 85 u32 type, u64 attribute);
86 86
87#endif /* CONFIG_X86_32 */ 87#endif /* CONFIG_X86_32 */
88 88
89extern int add_efi_memmap;
90extern struct efi_scratch efi_scratch; 89extern struct efi_scratch efi_scratch;
91extern void efi_set_executable(efi_memory_desc_t *md, bool executable); 90extern void __init efi_set_executable(efi_memory_desc_t *md, bool executable);
92extern int efi_memblock_x86_reserve_range(void); 91extern int __init efi_memblock_x86_reserve_range(void);
93extern void efi_call_phys_prelog(void); 92extern void __init efi_call_phys_prolog(void);
94extern void efi_call_phys_epilog(void); 93extern void __init efi_call_phys_epilog(void);
95extern void efi_unmap_memmap(void); 94extern void __init efi_unmap_memmap(void);
96extern void efi_memory_uc(u64 addr, unsigned long size); 95extern void __init efi_memory_uc(u64 addr, unsigned long size);
97extern void __init efi_map_region(efi_memory_desc_t *md); 96extern void __init efi_map_region(efi_memory_desc_t *md);
98extern void __init efi_map_region_fixed(efi_memory_desc_t *md); 97extern void __init efi_map_region_fixed(efi_memory_desc_t *md);
99extern void efi_sync_low_kernel_mappings(void); 98extern void efi_sync_low_kernel_mappings(void);
100extern int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages); 99extern int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages);
101extern void efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages); 100extern void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages);
102extern void __init old_map_region(efi_memory_desc_t *md); 101extern void __init old_map_region(efi_memory_desc_t *md);
103extern void __init runtime_code_page_mkexec(void); 102extern void __init runtime_code_page_mkexec(void);
104extern void __init efi_runtime_mkexec(void); 103extern void __init efi_runtime_mkexec(void);
@@ -162,16 +161,6 @@ static inline efi_status_t efi_thunk_set_virtual_address_map(
162extern bool efi_reboot_required(void); 161extern bool efi_reboot_required(void);
163 162
164#else 163#else
165/*
166 * IF EFI is not configured, have the EFI calls return -ENOSYS.
167 */
168#define efi_call0(_f) (-ENOSYS)
169#define efi_call1(_f, _a1) (-ENOSYS)
170#define efi_call2(_f, _a1, _a2) (-ENOSYS)
171#define efi_call3(_f, _a1, _a2, _a3) (-ENOSYS)
172#define efi_call4(_f, _a1, _a2, _a3, _a4) (-ENOSYS)
173#define efi_call5(_f, _a1, _a2, _a3, _a4, _a5) (-ENOSYS)
174#define efi_call6(_f, _a1, _a2, _a3, _a4, _a5, _a6) (-ENOSYS)
175static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {} 164static inline void parse_efi_setup(u64 phys_addr, u32 data_len) {}
176static inline bool efi_reboot_required(void) 165static inline bool efi_reboot_required(void)
177{ 166{
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 7d603a71ab3a..6ed0c30d6a0c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -989,6 +989,20 @@ static inline void kvm_inject_gp(struct kvm_vcpu *vcpu, u32 error_code)
989 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code); 989 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
990} 990}
991 991
992static inline u64 get_canonical(u64 la)
993{
994 return ((int64_t)la << 16) >> 16;
995}
996
997static inline bool is_noncanonical_address(u64 la)
998{
999#ifdef CONFIG_X86_64
1000 return get_canonical(la) != la;
1001#else
1002 return false;
1003#endif
1004}
1005
992#define TSS_IOPB_BASE_OFFSET 0x66 1006#define TSS_IOPB_BASE_OFFSET 0x66
993#define TSS_BASE_SIZE 0x68 1007#define TSS_BASE_SIZE 0x68
994#define TSS_IOPB_SIZE (65536 / 8) 1008#define TSS_IOPB_SIZE (65536 / 8)
@@ -1050,7 +1064,7 @@ void kvm_arch_mmu_notifier_invalidate_page(struct kvm *kvm,
1050 unsigned long address); 1064 unsigned long address);
1051 1065
1052void kvm_define_shared_msr(unsigned index, u32 msr); 1066void kvm_define_shared_msr(unsigned index, u32 msr);
1053void kvm_set_shared_msr(unsigned index, u64 val, u64 mask); 1067int kvm_set_shared_msr(unsigned index, u64 val, u64 mask);
1054 1068
1055bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip); 1069bool kvm_is_linear_rip(struct kvm_vcpu *vcpu, unsigned long linear_rip);
1056 1070
diff --git a/arch/x86/include/asm/page_32_types.h b/arch/x86/include/asm/page_32_types.h
index f48b17df4224..3a52ee0e726d 100644
--- a/arch/x86/include/asm/page_32_types.h
+++ b/arch/x86/include/asm/page_32_types.h
@@ -20,7 +20,6 @@
20#define THREAD_SIZE_ORDER 1 20#define THREAD_SIZE_ORDER 1
21#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) 21#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
22 22
23#define STACKFAULT_STACK 0
24#define DOUBLEFAULT_STACK 1 23#define DOUBLEFAULT_STACK 1
25#define NMI_STACK 0 24#define NMI_STACK 0
26#define DEBUG_STACK 0 25#define DEBUG_STACK 0
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index 678205195ae1..75450b2c7be4 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -14,12 +14,11 @@
14#define IRQ_STACK_ORDER 2 14#define IRQ_STACK_ORDER 2
15#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER) 15#define IRQ_STACK_SIZE (PAGE_SIZE << IRQ_STACK_ORDER)
16 16
17#define STACKFAULT_STACK 1 17#define DOUBLEFAULT_STACK 1
18#define DOUBLEFAULT_STACK 2 18#define NMI_STACK 2
19#define NMI_STACK 3 19#define DEBUG_STACK 3
20#define DEBUG_STACK 4 20#define MCE_STACK 4
21#define MCE_STACK 5 21#define N_EXCEPTION_STACKS 4 /* hw limit: 7 */
22#define N_EXCEPTION_STACKS 5 /* hw limit: 7 */
23 22
24#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT) 23#define PUD_PAGE_SIZE (_AC(1, UL) << PUD_SHIFT)
25#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1)) 24#define PUD_PAGE_MASK (~(PUD_PAGE_SIZE-1))
diff --git a/arch/x86/include/asm/preempt.h b/arch/x86/include/asm/preempt.h
index 7024c12f7bfe..400873450e33 100644
--- a/arch/x86/include/asm/preempt.h
+++ b/arch/x86/include/asm/preempt.h
@@ -105,6 +105,7 @@ static __always_inline bool should_resched(void)
105# ifdef CONFIG_CONTEXT_TRACKING 105# ifdef CONFIG_CONTEXT_TRACKING
106 extern asmlinkage void ___preempt_schedule_context(void); 106 extern asmlinkage void ___preempt_schedule_context(void);
107# define __preempt_schedule_context() asm ("call ___preempt_schedule_context") 107# define __preempt_schedule_context() asm ("call ___preempt_schedule_context")
108 extern asmlinkage void preempt_schedule_context(void);
108# endif 109# endif
109#endif 110#endif
110 111
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 8cd27e08e23c..8cd1cc3bc835 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -150,6 +150,7 @@ static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask)
150} 150}
151 151
152void cpu_disable_common(void); 152void cpu_disable_common(void);
153void cpu_die_common(unsigned int cpu);
153void native_smp_prepare_boot_cpu(void); 154void native_smp_prepare_boot_cpu(void);
154void native_smp_prepare_cpus(unsigned int max_cpus); 155void native_smp_prepare_cpus(unsigned int max_cpus);
155void native_smp_cpus_done(unsigned int max_cpus); 156void native_smp_cpus_done(unsigned int max_cpus);
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 854053889d4d..547e344a6dc6 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -141,7 +141,7 @@ struct thread_info {
141/* Only used for 64 bit */ 141/* Only used for 64 bit */
142#define _TIF_DO_NOTIFY_MASK \ 142#define _TIF_DO_NOTIFY_MASK \
143 (_TIF_SIGPENDING | _TIF_MCE_NOTIFY | _TIF_NOTIFY_RESUME | \ 143 (_TIF_SIGPENDING | _TIF_MCE_NOTIFY | _TIF_NOTIFY_RESUME | \
144 _TIF_USER_RETURN_NOTIFY) 144 _TIF_USER_RETURN_NOTIFY | _TIF_UPROBE)
145 145
146/* flags to check in __switch_to() */ 146/* flags to check in __switch_to() */
147#define _TIF_WORK_CTXSW \ 147#define _TIF_WORK_CTXSW \
diff --git a/arch/x86/include/asm/traps.h b/arch/x86/include/asm/traps.h
index bc8352e7010a..707adc6549d8 100644
--- a/arch/x86/include/asm/traps.h
+++ b/arch/x86/include/asm/traps.h
@@ -39,6 +39,7 @@ asmlinkage void simd_coprocessor_error(void);
39 39
40#ifdef CONFIG_TRACING 40#ifdef CONFIG_TRACING
41asmlinkage void trace_page_fault(void); 41asmlinkage void trace_page_fault(void);
42#define trace_stack_segment stack_segment
42#define trace_divide_error divide_error 43#define trace_divide_error divide_error
43#define trace_bounds bounds 44#define trace_bounds bounds
44#define trace_invalid_op invalid_op 45#define trace_invalid_op invalid_op
diff --git a/arch/x86/include/uapi/asm/vmx.h b/arch/x86/include/uapi/asm/vmx.h
index 0e79420376eb..990a2fe1588d 100644
--- a/arch/x86/include/uapi/asm/vmx.h
+++ b/arch/x86/include/uapi/asm/vmx.h
@@ -67,6 +67,7 @@
67#define EXIT_REASON_EPT_MISCONFIG 49 67#define EXIT_REASON_EPT_MISCONFIG 49
68#define EXIT_REASON_INVEPT 50 68#define EXIT_REASON_INVEPT 50
69#define EXIT_REASON_PREEMPTION_TIMER 52 69#define EXIT_REASON_PREEMPTION_TIMER 52
70#define EXIT_REASON_INVVPID 53
70#define EXIT_REASON_WBINVD 54 71#define EXIT_REASON_WBINVD 54
71#define EXIT_REASON_XSETBV 55 72#define EXIT_REASON_XSETBV 55
72#define EXIT_REASON_APIC_WRITE 56 73#define EXIT_REASON_APIC_WRITE 56
@@ -114,6 +115,7 @@
114 { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \ 115 { EXIT_REASON_EOI_INDUCED, "EOI_INDUCED" }, \
115 { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \ 116 { EXIT_REASON_INVALID_STATE, "INVALID_STATE" }, \
116 { EXIT_REASON_INVD, "INVD" }, \ 117 { EXIT_REASON_INVD, "INVD" }, \
118 { EXIT_REASON_INVVPID, "INVVPID" }, \
117 { EXIT_REASON_INVPCID, "INVPCID" } 119 { EXIT_REASON_INVPCID, "INVPCID" }
118 120
119#endif /* _UAPIVMX_H */ 121#endif /* _UAPIVMX_H */
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index b436fc735aa4..a142e77693e1 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -397,7 +397,7 @@ static int mp_register_gsi(struct device *dev, u32 gsi, int trigger,
397 397
398 /* Don't set up the ACPI SCI because it's already set up */ 398 /* Don't set up the ACPI SCI because it's already set up */
399 if (acpi_gbl_FADT.sci_interrupt == gsi) 399 if (acpi_gbl_FADT.sci_interrupt == gsi)
400 return gsi; 400 return mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC);
401 401
402 trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1; 402 trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1;
403 polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1; 403 polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1;
@@ -604,14 +604,18 @@ void __init acpi_pic_sci_set_trigger(unsigned int irq, u16 trigger)
604 604
605int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp) 605int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
606{ 606{
607 int irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK); 607 int irq;
608 608
609 if (irq >= 0) { 609 if (acpi_irq_model == ACPI_IRQ_MODEL_PIC) {
610 *irqp = gsi;
611 } else {
612 irq = mp_map_gsi_to_irq(gsi,
613 IOAPIC_MAP_ALLOC | IOAPIC_MAP_CHECK);
614 if (irq < 0)
615 return -1;
610 *irqp = irq; 616 *irqp = irq;
611 return 0;
612 } 617 }
613 618 return 0;
614 return -1;
615} 619}
616EXPORT_SYMBOL_GPL(acpi_gsi_to_irq); 620EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
617 621
diff --git a/arch/x86/kernel/apb_timer.c b/arch/x86/kernel/apb_timer.c
index 5972b108f15a..b708738d016e 100644
--- a/arch/x86/kernel/apb_timer.c
+++ b/arch/x86/kernel/apb_timer.c
@@ -185,8 +185,6 @@ static void apbt_setup_irq(struct apbt_dev *adev)
185 185
186 irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT); 186 irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
187 irq_set_affinity(adev->irq, cpumask_of(adev->cpu)); 187 irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
188 /* APB timer irqs are set up as mp_irqs, timer is edge type */
189 __irq_set_handler(adev->irq, handle_edge_irq, 0, "edge");
190} 188}
191 189
192/* Should be called with per cpu */ 190/* Should be called with per cpu */
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 00853b254ab0..ba6cc041edb1 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -1297,7 +1297,7 @@ void setup_local_APIC(void)
1297 unsigned int value, queued; 1297 unsigned int value, queued;
1298 int i, j, acked = 0; 1298 int i, j, acked = 0;
1299 unsigned long long tsc = 0, ntsc; 1299 unsigned long long tsc = 0, ntsc;
1300 long long max_loops = cpu_khz; 1300 long long max_loops = cpu_khz ? cpu_khz : 1000000;
1301 1301
1302 if (cpu_has_tsc) 1302 if (cpu_has_tsc)
1303 rdtscll(tsc); 1303 rdtscll(tsc);
@@ -1383,7 +1383,7 @@ void setup_local_APIC(void)
1383 break; 1383 break;
1384 } 1384 }
1385 if (queued) { 1385 if (queued) {
1386 if (cpu_has_tsc) { 1386 if (cpu_has_tsc && cpu_khz) {
1387 rdtscll(ntsc); 1387 rdtscll(ntsc);
1388 max_loops = (cpu_khz << 10) - (ntsc - tsc); 1388 max_loops = (cpu_khz << 10) - (ntsc - tsc);
1389 } else 1389 } else
diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
index 01d5453b5502..e27b49d7c922 100644
--- a/arch/x86/kernel/cpu/Makefile
+++ b/arch/x86/kernel/cpu/Makefile
@@ -39,9 +39,12 @@ obj-$(CONFIG_CPU_SUP_AMD) += perf_event_amd_iommu.o
39endif 39endif
40obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o 40obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_p6.o perf_event_knc.o perf_event_p4.o
41obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o 41obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_lbr.o perf_event_intel_ds.o perf_event_intel.o
42obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore.o perf_event_intel_uncore_snb.o
43obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_uncore_snbep.o perf_event_intel_uncore_nhmex.o
44obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o 42obj-$(CONFIG_CPU_SUP_INTEL) += perf_event_intel_rapl.o
43
44obj-$(CONFIG_PERF_EVENTS_INTEL_UNCORE) += perf_event_intel_uncore.o \
45 perf_event_intel_uncore_snb.o \
46 perf_event_intel_uncore_snbep.o \
47 perf_event_intel_uncore_nhmex.o
45endif 48endif
46 49
47 50
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 4b4f78c9ba19..cfa9b5b2c27a 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -146,6 +146,8 @@ EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
146 146
147static int __init x86_xsave_setup(char *s) 147static int __init x86_xsave_setup(char *s)
148{ 148{
149 if (strlen(s))
150 return 0;
149 setup_clear_cpu_cap(X86_FEATURE_XSAVE); 151 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
150 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); 152 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
151 setup_clear_cpu_cap(X86_FEATURE_XSAVES); 153 setup_clear_cpu_cap(X86_FEATURE_XSAVES);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 1ef456273172..9cc6b6f25f42 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -213,12 +213,13 @@ static void intel_workarounds(struct cpuinfo_x86 *c)
213{ 213{
214#ifdef CONFIG_X86_F00F_BUG 214#ifdef CONFIG_X86_F00F_BUG
215 /* 215 /*
216 * All current models of Pentium and Pentium with MMX technology CPUs 216 * All models of Pentium and Pentium with MMX technology CPUs
217 * have the F0 0F bug, which lets nonprivileged users lock up the 217 * have the F0 0F bug, which lets nonprivileged users lock up the
218 * system. Announce that the fault handler will be checking for it. 218 * system. Announce that the fault handler will be checking for it.
219 * The Quark is also family 5, but does not have the same bug.
219 */ 220 */
220 clear_cpu_bug(c, X86_BUG_F00F); 221 clear_cpu_bug(c, X86_BUG_F00F);
221 if (!paravirt_enabled() && c->x86 == 5) { 222 if (!paravirt_enabled() && c->x86 == 5 && c->x86_model < 9) {
222 static int f00f_workaround_enabled; 223 static int f00f_workaround_enabled;
223 224
224 set_cpu_bug(c, X86_BUG_F00F); 225 set_cpu_bug(c, X86_BUG_F00F);
diff --git a/arch/x86/kernel/cpu/microcode/amd_early.c b/arch/x86/kernel/cpu/microcode/amd_early.c
index 7aa1acc79789..06674473b0e6 100644
--- a/arch/x86/kernel/cpu/microcode/amd_early.c
+++ b/arch/x86/kernel/cpu/microcode/amd_early.c
@@ -108,12 +108,13 @@ static size_t compute_container_size(u8 *data, u32 total_size)
108 * load_microcode_amd() to save equivalent cpu table and microcode patches in 108 * load_microcode_amd() to save equivalent cpu table and microcode patches in
109 * kernel heap memory. 109 * kernel heap memory.
110 */ 110 */
111static void apply_ucode_in_initrd(void *ucode, size_t size) 111static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
112{ 112{
113 struct equiv_cpu_entry *eq; 113 struct equiv_cpu_entry *eq;
114 size_t *cont_sz; 114 size_t *cont_sz;
115 u32 *header; 115 u32 *header;
116 u8 *data, **cont; 116 u8 *data, **cont;
117 u8 (*patch)[PATCH_MAX_SIZE];
117 u16 eq_id = 0; 118 u16 eq_id = 0;
118 int offset, left; 119 int offset, left;
119 u32 rev, eax, ebx, ecx, edx; 120 u32 rev, eax, ebx, ecx, edx;
@@ -123,10 +124,12 @@ static void apply_ucode_in_initrd(void *ucode, size_t size)
123 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev); 124 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
124 cont_sz = (size_t *)__pa_nodebug(&container_size); 125 cont_sz = (size_t *)__pa_nodebug(&container_size);
125 cont = (u8 **)__pa_nodebug(&container); 126 cont = (u8 **)__pa_nodebug(&container);
127 patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
126#else 128#else
127 new_rev = &ucode_new_rev; 129 new_rev = &ucode_new_rev;
128 cont_sz = &container_size; 130 cont_sz = &container_size;
129 cont = &container; 131 cont = &container;
132 patch = &amd_ucode_patch;
130#endif 133#endif
131 134
132 data = ucode; 135 data = ucode;
@@ -213,9 +216,9 @@ static void apply_ucode_in_initrd(void *ucode, size_t size)
213 rev = mc->hdr.patch_id; 216 rev = mc->hdr.patch_id;
214 *new_rev = rev; 217 *new_rev = rev;
215 218
216 /* save ucode patch */ 219 if (save_patch)
217 memcpy(amd_ucode_patch, mc, 220 memcpy(patch, mc,
218 min_t(u32, header[1], PATCH_MAX_SIZE)); 221 min_t(u32, header[1], PATCH_MAX_SIZE));
219 } 222 }
220 } 223 }
221 224
@@ -246,7 +249,7 @@ void __init load_ucode_amd_bsp(void)
246 *data = cp.data; 249 *data = cp.data;
247 *size = cp.size; 250 *size = cp.size;
248 251
249 apply_ucode_in_initrd(cp.data, cp.size); 252 apply_ucode_in_initrd(cp.data, cp.size, true);
250} 253}
251 254
252#ifdef CONFIG_X86_32 255#ifdef CONFIG_X86_32
@@ -263,7 +266,7 @@ void load_ucode_amd_ap(void)
263 size_t *usize; 266 size_t *usize;
264 void **ucode; 267 void **ucode;
265 268
266 mc = (struct microcode_amd *)__pa(amd_ucode_patch); 269 mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
267 if (mc->hdr.patch_id && mc->hdr.processor_rev_id) { 270 if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
268 __apply_microcode_amd(mc); 271 __apply_microcode_amd(mc);
269 return; 272 return;
@@ -275,7 +278,7 @@ void load_ucode_amd_ap(void)
275 if (!*ucode || !*usize) 278 if (!*ucode || !*usize)
276 return; 279 return;
277 280
278 apply_ucode_in_initrd(*ucode, *usize); 281 apply_ucode_in_initrd(*ucode, *usize, false);
279} 282}
280 283
281static void __init collect_cpu_sig_on_bsp(void *arg) 284static void __init collect_cpu_sig_on_bsp(void *arg)
@@ -339,7 +342,7 @@ void load_ucode_amd_ap(void)
339 * AP has a different equivalence ID than BSP, looks like 342 * AP has a different equivalence ID than BSP, looks like
340 * mixed-steppings silicon so go through the ucode blob anew. 343 * mixed-steppings silicon so go through the ucode blob anew.
341 */ 344 */
342 apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size); 345 apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false);
343 } 346 }
344} 347}
345#endif 348#endif
@@ -347,7 +350,9 @@ void load_ucode_amd_ap(void)
347int __init save_microcode_in_initrd_amd(void) 350int __init save_microcode_in_initrd_amd(void)
348{ 351{
349 unsigned long cont; 352 unsigned long cont;
353 int retval = 0;
350 enum ucode_state ret; 354 enum ucode_state ret;
355 u8 *cont_va;
351 u32 eax; 356 u32 eax;
352 357
353 if (!container) 358 if (!container)
@@ -355,13 +360,15 @@ int __init save_microcode_in_initrd_amd(void)
355 360
356#ifdef CONFIG_X86_32 361#ifdef CONFIG_X86_32
357 get_bsp_sig(); 362 get_bsp_sig();
358 cont = (unsigned long)container; 363 cont = (unsigned long)container;
364 cont_va = __va(container);
359#else 365#else
360 /* 366 /*
361 * We need the physical address of the container for both bitness since 367 * We need the physical address of the container for both bitness since
362 * boot_params.hdr.ramdisk_image is a physical address. 368 * boot_params.hdr.ramdisk_image is a physical address.
363 */ 369 */
364 cont = __pa(container); 370 cont = __pa(container);
371 cont_va = container;
365#endif 372#endif
366 373
367 /* 374 /*
@@ -372,6 +379,8 @@ int __init save_microcode_in_initrd_amd(void)
372 if (relocated_ramdisk) 379 if (relocated_ramdisk)
373 container = (u8 *)(__va(relocated_ramdisk) + 380 container = (u8 *)(__va(relocated_ramdisk) +
374 (cont - boot_params.hdr.ramdisk_image)); 381 (cont - boot_params.hdr.ramdisk_image));
382 else
383 container = cont_va;
375 384
376 if (ucode_new_rev) 385 if (ucode_new_rev)
377 pr_info("microcode: updated early to new patch_level=0x%08x\n", 386 pr_info("microcode: updated early to new patch_level=0x%08x\n",
@@ -382,7 +391,7 @@ int __init save_microcode_in_initrd_amd(void)
382 391
383 ret = load_microcode_amd(eax, container, container_size); 392 ret = load_microcode_amd(eax, container, container_size);
384 if (ret != UCODE_OK) 393 if (ret != UCODE_OK)
385 return -EINVAL; 394 retval = -EINVAL;
386 395
387 /* 396 /*
388 * This will be freed any msec now, stash patches for the current 397 * This will be freed any msec now, stash patches for the current
@@ -391,5 +400,5 @@ int __init save_microcode_in_initrd_amd(void)
391 container = NULL; 400 container = NULL;
392 container_size = 0; 401 container_size = 0;
393 402
394 return 0; 403 return retval;
395} 404}
diff --git a/arch/x86/kernel/cpu/microcode/core.c b/arch/x86/kernel/cpu/microcode/core.c
index dd9d6190b08d..08fe6e8a726e 100644
--- a/arch/x86/kernel/cpu/microcode/core.c
+++ b/arch/x86/kernel/cpu/microcode/core.c
@@ -465,6 +465,16 @@ static void mc_bp_resume(void)
465 465
466 if (uci->valid && uci->mc) 466 if (uci->valid && uci->mc)
467 microcode_ops->apply_microcode(cpu); 467 microcode_ops->apply_microcode(cpu);
468#ifdef CONFIG_X86_64
469 else if (!uci->mc)
470 /*
471 * We might resume and not have applied late microcode but still
472 * have a newer patch stashed from the early loader. We don't
473 * have it in uci->mc so we have to load it the same way we're
474 * applying patches early on the APs.
475 */
476 load_ucode_ap();
477#endif
468} 478}
469 479
470static struct syscore_ops mc_syscore_ops = { 480static struct syscore_ops mc_syscore_ops = {
diff --git a/arch/x86/kernel/cpu/microcode/core_early.c b/arch/x86/kernel/cpu/microcode/core_early.c
index 5f28a64e71ea..2c017f242a78 100644
--- a/arch/x86/kernel/cpu/microcode/core_early.c
+++ b/arch/x86/kernel/cpu/microcode/core_early.c
@@ -124,7 +124,7 @@ void __init load_ucode_bsp(void)
124static bool check_loader_disabled_ap(void) 124static bool check_loader_disabled_ap(void)
125{ 125{
126#ifdef CONFIG_X86_32 126#ifdef CONFIG_X86_32
127 return __pa_nodebug(dis_ucode_ldr); 127 return *((bool *)__pa_nodebug(&dis_ucode_ldr));
128#else 128#else
129 return dis_ucode_ldr; 129 return dis_ucode_ldr;
130#endif 130#endif
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 1b8299dd3d91..143e5f5dc855 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -243,8 +243,9 @@ static bool check_hw_exists(void)
243 243
244msr_fail: 244msr_fail:
245 printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n"); 245 printk(KERN_CONT "Broken PMU hardware detected, using software events only.\n");
246 printk(boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR 246 printk("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
247 "Failed to access perfctr msr (MSR %x is %Lx)\n", reg, val_new); 247 boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR,
248 reg, val_new);
248 249
249 return false; 250 return false;
250} 251}
@@ -444,12 +445,6 @@ int x86_pmu_hw_config(struct perf_event *event)
444 if (event->attr.type == PERF_TYPE_RAW) 445 if (event->attr.type == PERF_TYPE_RAW)
445 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK; 446 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
446 447
447 if (event->attr.sample_period && x86_pmu.limit_period) {
448 if (x86_pmu.limit_period(event, event->attr.sample_period) >
449 event->attr.sample_period)
450 return -EINVAL;
451 }
452
453 return x86_setup_perfctr(event); 448 return x86_setup_perfctr(event);
454} 449}
455 450
@@ -987,9 +982,6 @@ int x86_perf_event_set_period(struct perf_event *event)
987 if (left > x86_pmu.max_period) 982 if (left > x86_pmu.max_period)
988 left = x86_pmu.max_period; 983 left = x86_pmu.max_period;
989 984
990 if (x86_pmu.limit_period)
991 left = x86_pmu.limit_period(event, left);
992
993 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left; 985 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
994 986
995 /* 987 /*
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index d98a34d435d7..fc5eb390b368 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -445,7 +445,6 @@ struct x86_pmu {
445 struct x86_pmu_quirk *quirks; 445 struct x86_pmu_quirk *quirks;
446 int perfctr_second_write; 446 int perfctr_second_write;
447 bool late_ack; 447 bool late_ack;
448 unsigned (*limit_period)(struct perf_event *event, unsigned l);
449 448
450 /* 449 /*
451 * sysfs attrs 450 * sysfs attrs
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index a73947c53b65..944bf019b74f 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -220,15 +220,6 @@ static struct event_constraint intel_hsw_event_constraints[] = {
220 EVENT_CONSTRAINT_END 220 EVENT_CONSTRAINT_END
221}; 221};
222 222
223static struct event_constraint intel_bdw_event_constraints[] = {
224 FIXED_EVENT_CONSTRAINT(0x00c0, 0), /* INST_RETIRED.ANY */
225 FIXED_EVENT_CONSTRAINT(0x003c, 1), /* CPU_CLK_UNHALTED.CORE */
226 FIXED_EVENT_CONSTRAINT(0x0300, 2), /* CPU_CLK_UNHALTED.REF */
227 INTEL_UEVENT_CONSTRAINT(0x148, 0x4), /* L1D_PEND_MISS.PENDING */
228 INTEL_EVENT_CONSTRAINT(0xa3, 0x4), /* CYCLE_ACTIVITY.* */
229 EVENT_CONSTRAINT_END
230};
231
232static u64 intel_pmu_event_map(int hw_event) 223static u64 intel_pmu_event_map(int hw_event)
233{ 224{
234 return intel_perfmon_event_map[hw_event]; 225 return intel_perfmon_event_map[hw_event];
@@ -424,126 +415,6 @@ static __initconst const u64 snb_hw_cache_event_ids
424 415
425}; 416};
426 417
427static __initconst const u64 hsw_hw_cache_event_ids
428 [PERF_COUNT_HW_CACHE_MAX]
429 [PERF_COUNT_HW_CACHE_OP_MAX]
430 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
431{
432 [ C(L1D ) ] = {
433 [ C(OP_READ) ] = {
434 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
435 [ C(RESULT_MISS) ] = 0x151, /* L1D.REPLACEMENT */
436 },
437 [ C(OP_WRITE) ] = {
438 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
439 [ C(RESULT_MISS) ] = 0x0,
440 },
441 [ C(OP_PREFETCH) ] = {
442 [ C(RESULT_ACCESS) ] = 0x0,
443 [ C(RESULT_MISS) ] = 0x0,
444 },
445 },
446 [ C(L1I ) ] = {
447 [ C(OP_READ) ] = {
448 [ C(RESULT_ACCESS) ] = 0x0,
449 [ C(RESULT_MISS) ] = 0x280, /* ICACHE.MISSES */
450 },
451 [ C(OP_WRITE) ] = {
452 [ C(RESULT_ACCESS) ] = -1,
453 [ C(RESULT_MISS) ] = -1,
454 },
455 [ C(OP_PREFETCH) ] = {
456 [ C(RESULT_ACCESS) ] = 0x0,
457 [ C(RESULT_MISS) ] = 0x0,
458 },
459 },
460 [ C(LL ) ] = {
461 [ C(OP_READ) ] = {
462 /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD */
463 [ C(RESULT_ACCESS) ] = 0x1b7,
464 /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD|SUPPLIER_NONE|
465 L3_MISS|ANY_SNOOP */
466 [ C(RESULT_MISS) ] = 0x1b7,
467 },
468 [ C(OP_WRITE) ] = {
469 [ C(RESULT_ACCESS) ] = 0x1b7, /* OFFCORE_RESPONSE:ALL_RFO */
470 /* OFFCORE_RESPONSE:ALL_RFO|SUPPLIER_NONE|L3_MISS|ANY_SNOOP */
471 [ C(RESULT_MISS) ] = 0x1b7,
472 },
473 [ C(OP_PREFETCH) ] = {
474 [ C(RESULT_ACCESS) ] = 0x0,
475 [ C(RESULT_MISS) ] = 0x0,
476 },
477 },
478 [ C(DTLB) ] = {
479 [ C(OP_READ) ] = {
480 [ C(RESULT_ACCESS) ] = 0x81d0, /* MEM_UOPS_RETIRED.ALL_LOADS */
481 [ C(RESULT_MISS) ] = 0x108, /* DTLB_LOAD_MISSES.MISS_CAUSES_A_WALK */
482 },
483 [ C(OP_WRITE) ] = {
484 [ C(RESULT_ACCESS) ] = 0x82d0, /* MEM_UOPS_RETIRED.ALL_STORES */
485 [ C(RESULT_MISS) ] = 0x149, /* DTLB_STORE_MISSES.MISS_CAUSES_A_WALK */
486 },
487 [ C(OP_PREFETCH) ] = {
488 [ C(RESULT_ACCESS) ] = 0x0,
489 [ C(RESULT_MISS) ] = 0x0,
490 },
491 },
492 [ C(ITLB) ] = {
493 [ C(OP_READ) ] = {
494 [ C(RESULT_ACCESS) ] = 0x6085, /* ITLB_MISSES.STLB_HIT */
495 [ C(RESULT_MISS) ] = 0x185, /* ITLB_MISSES.MISS_CAUSES_A_WALK */
496 },
497 [ C(OP_WRITE) ] = {
498 [ C(RESULT_ACCESS) ] = -1,
499 [ C(RESULT_MISS) ] = -1,
500 },
501 [ C(OP_PREFETCH) ] = {
502 [ C(RESULT_ACCESS) ] = -1,
503 [ C(RESULT_MISS) ] = -1,
504 },
505 },
506 [ C(BPU ) ] = {
507 [ C(OP_READ) ] = {
508 [ C(RESULT_ACCESS) ] = 0xc4, /* BR_INST_RETIRED.ALL_BRANCHES */
509 [ C(RESULT_MISS) ] = 0xc5, /* BR_MISP_RETIRED.ALL_BRANCHES */
510 },
511 [ C(OP_WRITE) ] = {
512 [ C(RESULT_ACCESS) ] = -1,
513 [ C(RESULT_MISS) ] = -1,
514 },
515 [ C(OP_PREFETCH) ] = {
516 [ C(RESULT_ACCESS) ] = -1,
517 [ C(RESULT_MISS) ] = -1,
518 },
519 },
520};
521
522static __initconst const u64 hsw_hw_cache_extra_regs
523 [PERF_COUNT_HW_CACHE_MAX]
524 [PERF_COUNT_HW_CACHE_OP_MAX]
525 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
526{
527 [ C(LL ) ] = {
528 [ C(OP_READ) ] = {
529 /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD */
530 [ C(RESULT_ACCESS) ] = 0x2d5,
531 /* OFFCORE_RESPONSE:ALL_DATA_RD|ALL_CODE_RD|SUPPLIER_NONE|
532 L3_MISS|ANY_SNOOP */
533 [ C(RESULT_MISS) ] = 0x3fbc0202d5ull,
534 },
535 [ C(OP_WRITE) ] = {
536 [ C(RESULT_ACCESS) ] = 0x122, /* OFFCORE_RESPONSE:ALL_RFO */
537 /* OFFCORE_RESPONSE:ALL_RFO|SUPPLIER_NONE|L3_MISS|ANY_SNOOP */
538 [ C(RESULT_MISS) ] = 0x3fbc020122ull,
539 },
540 [ C(OP_PREFETCH) ] = {
541 [ C(RESULT_ACCESS) ] = 0x0,
542 [ C(RESULT_MISS) ] = 0x0,
543 },
544 },
545};
546
547static __initconst const u64 westmere_hw_cache_event_ids 418static __initconst const u64 westmere_hw_cache_event_ids
548 [PERF_COUNT_HW_CACHE_MAX] 419 [PERF_COUNT_HW_CACHE_MAX]
549 [PERF_COUNT_HW_CACHE_OP_MAX] 420 [PERF_COUNT_HW_CACHE_OP_MAX]
@@ -2034,24 +1905,6 @@ hsw_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
2034 return c; 1905 return c;
2035} 1906}
2036 1907
2037/*
2038 * Broadwell:
2039 * The INST_RETIRED.ALL period always needs to have lowest
2040 * 6bits cleared (BDM57). It shall not use a period smaller
2041 * than 100 (BDM11). We combine the two to enforce
2042 * a min-period of 128.
2043 */
2044static unsigned bdw_limit_period(struct perf_event *event, unsigned left)
2045{
2046 if ((event->hw.config & INTEL_ARCH_EVENT_MASK) ==
2047 X86_CONFIG(.event=0xc0, .umask=0x01)) {
2048 if (left < 128)
2049 left = 128;
2050 left &= ~0x3fu;
2051 }
2052 return left;
2053}
2054
2055PMU_FORMAT_ATTR(event, "config:0-7" ); 1908PMU_FORMAT_ATTR(event, "config:0-7" );
2056PMU_FORMAT_ATTR(umask, "config:8-15" ); 1909PMU_FORMAT_ATTR(umask, "config:8-15" );
2057PMU_FORMAT_ATTR(edge, "config:18" ); 1910PMU_FORMAT_ATTR(edge, "config:18" );
@@ -2692,8 +2545,8 @@ __init int intel_pmu_init(void)
2692 case 69: /* 22nm Haswell ULT */ 2545 case 69: /* 22nm Haswell ULT */
2693 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */ 2546 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
2694 x86_pmu.late_ack = true; 2547 x86_pmu.late_ack = true;
2695 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids)); 2548 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, sizeof(hw_cache_event_ids));
2696 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs)); 2549 memcpy(hw_cache_extra_regs, snb_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
2697 2550
2698 intel_pmu_lbr_init_snb(); 2551 intel_pmu_lbr_init_snb();
2699 2552
@@ -2712,28 +2565,6 @@ __init int intel_pmu_init(void)
2712 pr_cont("Haswell events, "); 2565 pr_cont("Haswell events, ");
2713 break; 2566 break;
2714 2567
2715 case 61: /* 14nm Broadwell Core-M */
2716 x86_pmu.late_ack = true;
2717 memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
2718 memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
2719
2720 intel_pmu_lbr_init_snb();
2721
2722 x86_pmu.event_constraints = intel_bdw_event_constraints;
2723 x86_pmu.pebs_constraints = intel_hsw_pebs_event_constraints;
2724 x86_pmu.extra_regs = intel_snbep_extra_regs;
2725 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
2726 /* all extra regs are per-cpu when HT is on */
2727 x86_pmu.er_flags |= ERF_HAS_RSP_1;
2728 x86_pmu.er_flags |= ERF_NO_HT_SHARING;
2729
2730 x86_pmu.hw_config = hsw_hw_config;
2731 x86_pmu.get_event_constraints = hsw_get_event_constraints;
2732 x86_pmu.cpu_events = hsw_events_attrs;
2733 x86_pmu.limit_period = bdw_limit_period;
2734 pr_cont("Broadwell events, ");
2735 break;
2736
2737 default: 2568 default:
2738 switch (x86_pmu.version) { 2569 switch (x86_pmu.version) {
2739 case 1: 2570 case 1:
diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
index adf138eac85c..f9ed429d6e4f 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
@@ -486,14 +486,17 @@ static struct attribute_group snbep_uncore_qpi_format_group = {
486 .attrs = snbep_uncore_qpi_formats_attr, 486 .attrs = snbep_uncore_qpi_formats_attr,
487}; 487};
488 488
489#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \ 489#define __SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
490 .init_box = snbep_uncore_msr_init_box, \
491 .disable_box = snbep_uncore_msr_disable_box, \ 490 .disable_box = snbep_uncore_msr_disable_box, \
492 .enable_box = snbep_uncore_msr_enable_box, \ 491 .enable_box = snbep_uncore_msr_enable_box, \
493 .disable_event = snbep_uncore_msr_disable_event, \ 492 .disable_event = snbep_uncore_msr_disable_event, \
494 .enable_event = snbep_uncore_msr_enable_event, \ 493 .enable_event = snbep_uncore_msr_enable_event, \
495 .read_counter = uncore_msr_read_counter 494 .read_counter = uncore_msr_read_counter
496 495
496#define SNBEP_UNCORE_MSR_OPS_COMMON_INIT() \
497 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), \
498 .init_box = snbep_uncore_msr_init_box \
499
497static struct intel_uncore_ops snbep_uncore_msr_ops = { 500static struct intel_uncore_ops snbep_uncore_msr_ops = {
498 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(), 501 SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
499}; 502};
@@ -1919,6 +1922,30 @@ static struct intel_uncore_type hswep_uncore_cbox = {
1919 .format_group = &hswep_uncore_cbox_format_group, 1922 .format_group = &hswep_uncore_cbox_format_group,
1920}; 1923};
1921 1924
1925/*
1926 * Write SBOX Initialization register bit by bit to avoid spurious #GPs
1927 */
1928static void hswep_uncore_sbox_msr_init_box(struct intel_uncore_box *box)
1929{
1930 unsigned msr = uncore_msr_box_ctl(box);
1931
1932 if (msr) {
1933 u64 init = SNBEP_PMON_BOX_CTL_INT;
1934 u64 flags = 0;
1935 int i;
1936
1937 for_each_set_bit(i, (unsigned long *)&init, 64) {
1938 flags |= (1ULL << i);
1939 wrmsrl(msr, flags);
1940 }
1941 }
1942}
1943
1944static struct intel_uncore_ops hswep_uncore_sbox_msr_ops = {
1945 __SNBEP_UNCORE_MSR_OPS_COMMON_INIT(),
1946 .init_box = hswep_uncore_sbox_msr_init_box
1947};
1948
1922static struct attribute *hswep_uncore_sbox_formats_attr[] = { 1949static struct attribute *hswep_uncore_sbox_formats_attr[] = {
1923 &format_attr_event.attr, 1950 &format_attr_event.attr,
1924 &format_attr_umask.attr, 1951 &format_attr_umask.attr,
@@ -1944,7 +1971,7 @@ static struct intel_uncore_type hswep_uncore_sbox = {
1944 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK, 1971 .event_mask = HSWEP_S_MSR_PMON_RAW_EVENT_MASK,
1945 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL, 1972 .box_ctl = HSWEP_S0_MSR_PMON_BOX_CTL,
1946 .msr_offset = HSWEP_SBOX_MSR_OFFSET, 1973 .msr_offset = HSWEP_SBOX_MSR_OFFSET,
1947 .ops = &snbep_uncore_msr_ops, 1974 .ops = &hswep_uncore_sbox_msr_ops,
1948 .format_group = &hswep_uncore_sbox_format_group, 1975 .format_group = &hswep_uncore_sbox_format_group,
1949}; 1976};
1950 1977
@@ -2025,13 +2052,27 @@ static struct intel_uncore_type hswep_uncore_imc = {
2025 SNBEP_UNCORE_PCI_COMMON_INIT(), 2052 SNBEP_UNCORE_PCI_COMMON_INIT(),
2026}; 2053};
2027 2054
2055static unsigned hswep_uncore_irp_ctrs[] = {0xa0, 0xa8, 0xb0, 0xb8};
2056
2057static u64 hswep_uncore_irp_read_counter(struct intel_uncore_box *box, struct perf_event *event)
2058{
2059 struct pci_dev *pdev = box->pci_dev;
2060 struct hw_perf_event *hwc = &event->hw;
2061 u64 count = 0;
2062
2063 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx], (u32 *)&count);
2064 pci_read_config_dword(pdev, hswep_uncore_irp_ctrs[hwc->idx] + 4, (u32 *)&count + 1);
2065
2066 return count;
2067}
2068
2028static struct intel_uncore_ops hswep_uncore_irp_ops = { 2069static struct intel_uncore_ops hswep_uncore_irp_ops = {
2029 .init_box = snbep_uncore_pci_init_box, 2070 .init_box = snbep_uncore_pci_init_box,
2030 .disable_box = snbep_uncore_pci_disable_box, 2071 .disable_box = snbep_uncore_pci_disable_box,
2031 .enable_box = snbep_uncore_pci_enable_box, 2072 .enable_box = snbep_uncore_pci_enable_box,
2032 .disable_event = ivbep_uncore_irp_disable_event, 2073 .disable_event = ivbep_uncore_irp_disable_event,
2033 .enable_event = ivbep_uncore_irp_enable_event, 2074 .enable_event = ivbep_uncore_irp_enable_event,
2034 .read_counter = ivbep_uncore_irp_read_counter, 2075 .read_counter = hswep_uncore_irp_read_counter,
2035}; 2076};
2036 2077
2037static struct intel_uncore_type hswep_uncore_irp = { 2078static struct intel_uncore_type hswep_uncore_irp = {
diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
index 1abcb50b48ae..ff86f19b5758 100644
--- a/arch/x86/kernel/dumpstack_64.c
+++ b/arch/x86/kernel/dumpstack_64.c
@@ -24,7 +24,6 @@ static char x86_stack_ids[][8] = {
24 [ DEBUG_STACK-1 ] = "#DB", 24 [ DEBUG_STACK-1 ] = "#DB",
25 [ NMI_STACK-1 ] = "NMI", 25 [ NMI_STACK-1 ] = "NMI",
26 [ DOUBLEFAULT_STACK-1 ] = "#DF", 26 [ DOUBLEFAULT_STACK-1 ] = "#DF",
27 [ STACKFAULT_STACK-1 ] = "#SS",
28 [ MCE_STACK-1 ] = "#MC", 27 [ MCE_STACK-1 ] = "#MC",
29#if DEBUG_STKSZ > EXCEPTION_STKSZ 28#if DEBUG_STKSZ > EXCEPTION_STKSZ
30 [ N_EXCEPTION_STACKS ... 29 [ N_EXCEPTION_STACKS ...
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index b553ed89e5f5..344b63f18d14 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -447,15 +447,14 @@ sysenter_exit:
447sysenter_audit: 447sysenter_audit:
448 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp) 448 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
449 jnz syscall_trace_entry 449 jnz syscall_trace_entry
450 addl $4,%esp 450 /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */
451 CFI_ADJUST_CFA_OFFSET -4 451 movl PT_EBX(%esp), %edx /* ebx/a0: 2nd arg to audit */
452 movl %esi,4(%esp) /* 5th arg: 4th syscall arg */ 452 /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */
453 movl %edx,(%esp) /* 4th arg: 3rd syscall arg */ 453 pushl_cfi PT_ESI(%esp) /* a3: 5th arg */
454 /* %ecx already in %ecx 3rd arg: 2nd syscall arg */ 454 pushl_cfi PT_EDX+4(%esp) /* a2: 4th arg */
455 movl %ebx,%edx /* 2nd arg: 1st syscall arg */
456 /* %eax already in %eax 1st arg: syscall number */
457 call __audit_syscall_entry 455 call __audit_syscall_entry
458 pushl_cfi %ebx 456 popl_cfi %ecx /* get that remapped edx off the stack */
457 popl_cfi %ecx /* get that remapped esi off the stack */
459 movl PT_EAX(%esp),%eax /* reload syscall number */ 458 movl PT_EAX(%esp),%eax /* reload syscall number */
460 jmp sysenter_do_call 459 jmp sysenter_do_call
461 460
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index df088bb03fb3..c0226ab54106 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -828,9 +828,15 @@ ENTRY(native_iret)
828 jnz native_irq_return_ldt 828 jnz native_irq_return_ldt
829#endif 829#endif
830 830
831.global native_irq_return_iret
831native_irq_return_iret: 832native_irq_return_iret:
833 /*
834 * This may fault. Non-paranoid faults on return to userspace are
835 * handled by fixup_bad_iret. These include #SS, #GP, and #NP.
836 * Double-faults due to espfix64 are handled in do_double_fault.
837 * Other faults here are fatal.
838 */
832 iretq 839 iretq
833 _ASM_EXTABLE(native_irq_return_iret, bad_iret)
834 840
835#ifdef CONFIG_X86_ESPFIX64 841#ifdef CONFIG_X86_ESPFIX64
836native_irq_return_ldt: 842native_irq_return_ldt:
@@ -858,25 +864,6 @@ native_irq_return_ldt:
858 jmp native_irq_return_iret 864 jmp native_irq_return_iret
859#endif 865#endif
860 866
861 .section .fixup,"ax"
862bad_iret:
863 /*
864 * The iret traps when the %cs or %ss being restored is bogus.
865 * We've lost the original trap vector and error code.
866 * #GPF is the most likely one to get for an invalid selector.
867 * So pretend we completed the iret and took the #GPF in user mode.
868 *
869 * We are now running with the kernel GS after exception recovery.
870 * But error_entry expects us to have user GS to match the user %cs,
871 * so swap back.
872 */
873 pushq $0
874
875 SWAPGS
876 jmp general_protection
877
878 .previous
879
880 /* edi: workmask, edx: work */ 867 /* edi: workmask, edx: work */
881retint_careful: 868retint_careful:
882 CFI_RESTORE_STATE 869 CFI_RESTORE_STATE
@@ -922,37 +909,6 @@ ENTRY(retint_kernel)
922 CFI_ENDPROC 909 CFI_ENDPROC
923END(common_interrupt) 910END(common_interrupt)
924 911
925 /*
926 * If IRET takes a fault on the espfix stack, then we
927 * end up promoting it to a doublefault. In that case,
928 * modify the stack to make it look like we just entered
929 * the #GP handler from user space, similar to bad_iret.
930 */
931#ifdef CONFIG_X86_ESPFIX64
932 ALIGN
933__do_double_fault:
934 XCPT_FRAME 1 RDI+8
935 movq RSP(%rdi),%rax /* Trap on the espfix stack? */
936 sarq $PGDIR_SHIFT,%rax
937 cmpl $ESPFIX_PGD_ENTRY,%eax
938 jne do_double_fault /* No, just deliver the fault */
939 cmpl $__KERNEL_CS,CS(%rdi)
940 jne do_double_fault
941 movq RIP(%rdi),%rax
942 cmpq $native_irq_return_iret,%rax
943 jne do_double_fault /* This shouldn't happen... */
944 movq PER_CPU_VAR(kernel_stack),%rax
945 subq $(6*8-KERNEL_STACK_OFFSET),%rax /* Reset to original stack */
946 movq %rax,RSP(%rdi)
947 movq $0,(%rax) /* Missing (lost) #GP error code */
948 movq $general_protection,RIP(%rdi)
949 retq
950 CFI_ENDPROC
951END(__do_double_fault)
952#else
953# define __do_double_fault do_double_fault
954#endif
955
956/* 912/*
957 * APIC interrupts. 913 * APIC interrupts.
958 */ 914 */
@@ -1124,7 +1080,7 @@ idtentry overflow do_overflow has_error_code=0
1124idtentry bounds do_bounds has_error_code=0 1080idtentry bounds do_bounds has_error_code=0
1125idtentry invalid_op do_invalid_op has_error_code=0 1081idtentry invalid_op do_invalid_op has_error_code=0
1126idtentry device_not_available do_device_not_available has_error_code=0 1082idtentry device_not_available do_device_not_available has_error_code=0
1127idtentry double_fault __do_double_fault has_error_code=1 paranoid=1 1083idtentry double_fault do_double_fault has_error_code=1 paranoid=1
1128idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0 1084idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0
1129idtentry invalid_TSS do_invalid_TSS has_error_code=1 1085idtentry invalid_TSS do_invalid_TSS has_error_code=1
1130idtentry segment_not_present do_segment_not_present has_error_code=1 1086idtentry segment_not_present do_segment_not_present has_error_code=1
@@ -1289,7 +1245,7 @@ apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
1289 1245
1290idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK 1246idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
1291idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK 1247idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
1292idtentry stack_segment do_stack_segment has_error_code=1 paranoid=1 1248idtentry stack_segment do_stack_segment has_error_code=1
1293#ifdef CONFIG_XEN 1249#ifdef CONFIG_XEN
1294idtentry xen_debug do_debug has_error_code=0 1250idtentry xen_debug do_debug has_error_code=0
1295idtentry xen_int3 do_int3 has_error_code=0 1251idtentry xen_int3 do_int3 has_error_code=0
@@ -1399,17 +1355,16 @@ error_sti:
1399 1355
1400/* 1356/*
1401 * There are two places in the kernel that can potentially fault with 1357 * There are two places in the kernel that can potentially fault with
1402 * usergs. Handle them here. The exception handlers after iret run with 1358 * usergs. Handle them here. B stepping K8s sometimes report a
1403 * kernel gs again, so don't set the user space flag. B stepping K8s 1359 * truncated RIP for IRET exceptions returning to compat mode. Check
1404 * sometimes report an truncated RIP for IRET exceptions returning to 1360 * for these here too.
1405 * compat mode. Check for these here too.
1406 */ 1361 */
1407error_kernelspace: 1362error_kernelspace:
1408 CFI_REL_OFFSET rcx, RCX+8 1363 CFI_REL_OFFSET rcx, RCX+8
1409 incl %ebx 1364 incl %ebx
1410 leaq native_irq_return_iret(%rip),%rcx 1365 leaq native_irq_return_iret(%rip),%rcx
1411 cmpq %rcx,RIP+8(%rsp) 1366 cmpq %rcx,RIP+8(%rsp)
1412 je error_swapgs 1367 je error_bad_iret
1413 movl %ecx,%eax /* zero extend */ 1368 movl %ecx,%eax /* zero extend */
1414 cmpq %rax,RIP+8(%rsp) 1369 cmpq %rax,RIP+8(%rsp)
1415 je bstep_iret 1370 je bstep_iret
@@ -1420,7 +1375,15 @@ error_kernelspace:
1420bstep_iret: 1375bstep_iret:
1421 /* Fix truncated RIP */ 1376 /* Fix truncated RIP */
1422 movq %rcx,RIP+8(%rsp) 1377 movq %rcx,RIP+8(%rsp)
1423 jmp error_swapgs 1378 /* fall through */
1379
1380error_bad_iret:
1381 SWAPGS
1382 mov %rsp,%rdi
1383 call fixup_bad_iret
1384 mov %rax,%rsp
1385 decl %ebx /* Return to usergs */
1386 jmp error_sti
1424 CFI_ENDPROC 1387 CFI_ENDPROC
1425END(error_entry) 1388END(error_entry)
1426 1389
diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
index 8af817105e29..e7cc5370cd2f 100644
--- a/arch/x86/kernel/i8259.c
+++ b/arch/x86/kernel/i8259.c
@@ -111,8 +111,7 @@ static void make_8259A_irq(unsigned int irq)
111{ 111{
112 disable_irq_nosync(irq); 112 disable_irq_nosync(irq);
113 io_apic_irqs &= ~(1<<irq); 113 io_apic_irqs &= ~(1<<irq);
114 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq, 114 irq_set_chip_and_handler(irq, &i8259A_chip, handle_level_irq);
115 i8259A_chip.name);
116 enable_irq(irq); 115 enable_irq(irq);
117} 116}
118 117
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c
index 44f1ed42fdf2..4de73ee78361 100644
--- a/arch/x86/kernel/irqinit.c
+++ b/arch/x86/kernel/irqinit.c
@@ -70,7 +70,6 @@ int vector_used_by_percpu_irq(unsigned int vector)
70void __init init_ISA_irqs(void) 70void __init init_ISA_irqs(void)
71{ 71{
72 struct irq_chip *chip = legacy_pic->chip; 72 struct irq_chip *chip = legacy_pic->chip;
73 const char *name = chip->name;
74 int i; 73 int i;
75 74
76#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) 75#if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC)
@@ -79,7 +78,7 @@ void __init init_ISA_irqs(void)
79 legacy_pic->init(0); 78 legacy_pic->init(0);
80 79
81 for (i = 0; i < nr_legacy_irqs(); i++) 80 for (i = 0; i < nr_legacy_irqs(); i++)
82 irq_set_chip_and_handler_name(i, chip, handle_level_irq, name); 81 irq_set_chip_and_handler(i, chip, handle_level_irq);
83} 82}
84 83
85void __init init_IRQ(void) 84void __init init_IRQ(void)
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 749b0e423419..e510618b2e91 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -1484,7 +1484,7 @@ unsigned long syscall_trace_enter_phase1(struct pt_regs *regs, u32 arch)
1484 */ 1484 */
1485 if (work & _TIF_NOHZ) { 1485 if (work & _TIF_NOHZ) {
1486 user_exit(); 1486 user_exit();
1487 work &= ~TIF_NOHZ; 1487 work &= ~_TIF_NOHZ;
1488 } 1488 }
1489 1489
1490#ifdef CONFIG_SECCOMP 1490#ifdef CONFIG_SECCOMP
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 235cfd39e0d7..ab08aa2276fb 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1128,7 +1128,6 @@ void __init setup_arch(char **cmdline_p)
1128 setup_real_mode(); 1128 setup_real_mode();
1129 1129
1130 memblock_set_current_limit(get_max_mapped()); 1130 memblock_set_current_limit(get_max_mapped());
1131 dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
1132 1131
1133 /* 1132 /*
1134 * NOTE: On x86-32, only from this point on, fixmaps are ready for use. 1133 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
@@ -1159,6 +1158,7 @@ void __init setup_arch(char **cmdline_p)
1159 early_acpi_boot_init(); 1158 early_acpi_boot_init();
1160 1159
1161 initmem_init(); 1160 initmem_init();
1161 dma_contiguous_reserve(max_pfn_mapped << PAGE_SHIFT);
1162 1162
1163 /* 1163 /*
1164 * Reserve memory for crash kernel after SRAT is parsed so that it 1164 * Reserve memory for crash kernel after SRAT is parsed so that it
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 2d5200e56357..668d8f2a8781 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -102,8 +102,6 @@ DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
102DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); 102DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
103EXPORT_PER_CPU_SYMBOL(cpu_info); 103EXPORT_PER_CPU_SYMBOL(cpu_info);
104 104
105static DEFINE_PER_CPU(struct completion, die_complete);
106
107atomic_t init_deasserted; 105atomic_t init_deasserted;
108 106
109/* 107/*
@@ -1305,10 +1303,14 @@ static void __ref remove_cpu_from_maps(int cpu)
1305 numa_remove_cpu(cpu); 1303 numa_remove_cpu(cpu);
1306} 1304}
1307 1305
1306static DEFINE_PER_CPU(struct completion, die_complete);
1307
1308void cpu_disable_common(void) 1308void cpu_disable_common(void)
1309{ 1309{
1310 int cpu = smp_processor_id(); 1310 int cpu = smp_processor_id();
1311 1311
1312 init_completion(&per_cpu(die_complete, smp_processor_id()));
1313
1312 remove_siblinginfo(cpu); 1314 remove_siblinginfo(cpu);
1313 1315
1314 /* It's now safe to remove this processor from the online map */ 1316 /* It's now safe to remove this processor from the online map */
@@ -1327,16 +1329,21 @@ int native_cpu_disable(void)
1327 return ret; 1329 return ret;
1328 1330
1329 clear_local_APIC(); 1331 clear_local_APIC();
1330 init_completion(&per_cpu(die_complete, smp_processor_id()));
1331 cpu_disable_common(); 1332 cpu_disable_common();
1332 1333
1333 return 0; 1334 return 0;
1334} 1335}
1335 1336
1337void cpu_die_common(unsigned int cpu)
1338{
1339 wait_for_completion_timeout(&per_cpu(die_complete, cpu), HZ);
1340}
1341
1336void native_cpu_die(unsigned int cpu) 1342void native_cpu_die(unsigned int cpu)
1337{ 1343{
1338 /* We don't do anything here: idle task is faking death itself. */ 1344 /* We don't do anything here: idle task is faking death itself. */
1339 wait_for_completion_timeout(&per_cpu(die_complete, cpu), HZ); 1345
1346 cpu_die_common(cpu);
1340 1347
1341 /* They ack this in play_dead() by setting CPU_DEAD */ 1348 /* They ack this in play_dead() by setting CPU_DEAD */
1342 if (per_cpu(cpu_state, cpu) == CPU_DEAD) { 1349 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index 0d0e922fafc1..de801f22128a 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -233,32 +233,40 @@ DO_ERROR(X86_TRAP_UD, SIGILL, "invalid opcode", invalid_op)
233DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun) 233DO_ERROR(X86_TRAP_OLD_MF, SIGFPE, "coprocessor segment overrun",coprocessor_segment_overrun)
234DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS) 234DO_ERROR(X86_TRAP_TS, SIGSEGV, "invalid TSS", invalid_TSS)
235DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present) 235DO_ERROR(X86_TRAP_NP, SIGBUS, "segment not present", segment_not_present)
236#ifdef CONFIG_X86_32
237DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment) 236DO_ERROR(X86_TRAP_SS, SIGBUS, "stack segment", stack_segment)
238#endif
239DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check) 237DO_ERROR(X86_TRAP_AC, SIGBUS, "alignment check", alignment_check)
240 238
241#ifdef CONFIG_X86_64 239#ifdef CONFIG_X86_64
242/* Runs on IST stack */ 240/* Runs on IST stack */
243dotraplinkage void do_stack_segment(struct pt_regs *regs, long error_code)
244{
245 enum ctx_state prev_state;
246
247 prev_state = exception_enter();
248 if (notify_die(DIE_TRAP, "stack segment", regs, error_code,
249 X86_TRAP_SS, SIGBUS) != NOTIFY_STOP) {
250 preempt_conditional_sti(regs);
251 do_trap(X86_TRAP_SS, SIGBUS, "stack segment", regs, error_code, NULL);
252 preempt_conditional_cli(regs);
253 }
254 exception_exit(prev_state);
255}
256
257dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code) 241dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
258{ 242{
259 static const char str[] = "double fault"; 243 static const char str[] = "double fault";
260 struct task_struct *tsk = current; 244 struct task_struct *tsk = current;
261 245
246#ifdef CONFIG_X86_ESPFIX64
247 extern unsigned char native_irq_return_iret[];
248
249 /*
250 * If IRET takes a non-IST fault on the espfix64 stack, then we
251 * end up promoting it to a doublefault. In that case, modify
252 * the stack to make it look like we just entered the #GP
253 * handler from user space, similar to bad_iret.
254 */
255 if (((long)regs->sp >> PGDIR_SHIFT) == ESPFIX_PGD_ENTRY &&
256 regs->cs == __KERNEL_CS &&
257 regs->ip == (unsigned long)native_irq_return_iret)
258 {
259 struct pt_regs *normal_regs = task_pt_regs(current);
260
261 /* Fake a #GP(0) from userspace. */
262 memmove(&normal_regs->ip, (void *)regs->sp, 5*8);
263 normal_regs->orig_ax = 0; /* Missing (lost) #GP error code */
264 regs->ip = (unsigned long)general_protection;
265 regs->sp = (unsigned long)&normal_regs->orig_ax;
266 return;
267 }
268#endif
269
262 exception_enter(); 270 exception_enter();
263 /* Return not checked because double check cannot be ignored */ 271 /* Return not checked because double check cannot be ignored */
264 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV); 272 notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_DF, SIGSEGV);
@@ -399,6 +407,35 @@ asmlinkage __visible struct pt_regs *sync_regs(struct pt_regs *eregs)
399 return regs; 407 return regs;
400} 408}
401NOKPROBE_SYMBOL(sync_regs); 409NOKPROBE_SYMBOL(sync_regs);
410
411struct bad_iret_stack {
412 void *error_entry_ret;
413 struct pt_regs regs;
414};
415
416asmlinkage __visible
417struct bad_iret_stack *fixup_bad_iret(struct bad_iret_stack *s)
418{
419 /*
420 * This is called from entry_64.S early in handling a fault
421 * caused by a bad iret to user mode. To handle the fault
422 * correctly, we want move our stack frame to task_pt_regs
423 * and we want to pretend that the exception came from the
424 * iret target.
425 */
426 struct bad_iret_stack *new_stack =
427 container_of(task_pt_regs(current),
428 struct bad_iret_stack, regs);
429
430 /* Copy the IRET target to the new stack. */
431 memmove(&new_stack->regs.ip, (void *)s->regs.sp, 5*8);
432
433 /* Copy the remainder of the stack from the current stack. */
434 memmove(new_stack, s, offsetof(struct bad_iret_stack, regs.ip));
435
436 BUG_ON(!user_mode_vm(&new_stack->regs));
437 return new_stack;
438}
402#endif 439#endif
403 440
404/* 441/*
@@ -778,7 +815,7 @@ void __init trap_init(void)
778 set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun); 815 set_intr_gate(X86_TRAP_OLD_MF, coprocessor_segment_overrun);
779 set_intr_gate(X86_TRAP_TS, invalid_TSS); 816 set_intr_gate(X86_TRAP_TS, invalid_TSS);
780 set_intr_gate(X86_TRAP_NP, segment_not_present); 817 set_intr_gate(X86_TRAP_NP, segment_not_present);
781 set_intr_gate_ist(X86_TRAP_SS, &stack_segment, STACKFAULT_STACK); 818 set_intr_gate(X86_TRAP_SS, stack_segment);
782 set_intr_gate(X86_TRAP_GP, general_protection); 819 set_intr_gate(X86_TRAP_GP, general_protection);
783 set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug); 820 set_intr_gate(X86_TRAP_SPURIOUS, spurious_interrupt_bug);
784 set_intr_gate(X86_TRAP_MF, coprocessor_error); 821 set_intr_gate(X86_TRAP_MF, coprocessor_error);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index b6025f9e36c6..b7e50bba3bbb 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -1166,14 +1166,17 @@ void __init tsc_init(void)
1166 1166
1167 x86_init.timers.tsc_pre_init(); 1167 x86_init.timers.tsc_pre_init();
1168 1168
1169 if (!cpu_has_tsc) 1169 if (!cpu_has_tsc) {
1170 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1170 return; 1171 return;
1172 }
1171 1173
1172 tsc_khz = x86_platform.calibrate_tsc(); 1174 tsc_khz = x86_platform.calibrate_tsc();
1173 cpu_khz = tsc_khz; 1175 cpu_khz = tsc_khz;
1174 1176
1175 if (!tsc_khz) { 1177 if (!tsc_khz) {
1176 mark_tsc_unstable("could not calculate TSC khz"); 1178 mark_tsc_unstable("could not calculate TSC khz");
1179 setup_clear_cpu_cap(X86_FEATURE_TSC_DEADLINE_TIMER);
1177 return; 1180 return;
1178 } 1181 }
1179 1182
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index a46207a05835..9f8a2faf5040 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -504,11 +504,6 @@ static void rsp_increment(struct x86_emulate_ctxt *ctxt, int inc)
504 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc); 504 masked_increment(reg_rmw(ctxt, VCPU_REGS_RSP), stack_mask(ctxt), inc);
505} 505}
506 506
507static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
508{
509 register_address_increment(ctxt, &ctxt->_eip, rel);
510}
511
512static u32 desc_limit_scaled(struct desc_struct *desc) 507static u32 desc_limit_scaled(struct desc_struct *desc)
513{ 508{
514 u32 limit = get_desc_limit(desc); 509 u32 limit = get_desc_limit(desc);
@@ -569,6 +564,40 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt)
569 return emulate_exception(ctxt, NM_VECTOR, 0, false); 564 return emulate_exception(ctxt, NM_VECTOR, 0, false);
570} 565}
571 566
567static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
568 int cs_l)
569{
570 switch (ctxt->op_bytes) {
571 case 2:
572 ctxt->_eip = (u16)dst;
573 break;
574 case 4:
575 ctxt->_eip = (u32)dst;
576 break;
577#ifdef CONFIG_X86_64
578 case 8:
579 if ((cs_l && is_noncanonical_address(dst)) ||
580 (!cs_l && (dst >> 32) != 0))
581 return emulate_gp(ctxt, 0);
582 ctxt->_eip = dst;
583 break;
584#endif
585 default:
586 WARN(1, "unsupported eip assignment size\n");
587 }
588 return X86EMUL_CONTINUE;
589}
590
591static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
592{
593 return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
594}
595
596static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
597{
598 return assign_eip_near(ctxt, ctxt->_eip + rel);
599}
600
572static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg) 601static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
573{ 602{
574 u16 selector; 603 u16 selector;
@@ -614,7 +643,8 @@ static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
614 643
615static int __linearize(struct x86_emulate_ctxt *ctxt, 644static int __linearize(struct x86_emulate_ctxt *ctxt,
616 struct segmented_address addr, 645 struct segmented_address addr,
617 unsigned size, bool write, bool fetch, 646 unsigned *max_size, unsigned size,
647 bool write, bool fetch,
618 ulong *linear) 648 ulong *linear)
619{ 649{
620 struct desc_struct desc; 650 struct desc_struct desc;
@@ -625,10 +655,15 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
625 unsigned cpl; 655 unsigned cpl;
626 656
627 la = seg_base(ctxt, addr.seg) + addr.ea; 657 la = seg_base(ctxt, addr.seg) + addr.ea;
658 *max_size = 0;
628 switch (ctxt->mode) { 659 switch (ctxt->mode) {
629 case X86EMUL_MODE_PROT64: 660 case X86EMUL_MODE_PROT64:
630 if (((signed long)la << 16) >> 16 != la) 661 if (((signed long)la << 16) >> 16 != la)
631 return emulate_gp(ctxt, 0); 662 return emulate_gp(ctxt, 0);
663
664 *max_size = min_t(u64, ~0u, (1ull << 48) - la);
665 if (size > *max_size)
666 goto bad;
632 break; 667 break;
633 default: 668 default:
634 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL, 669 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
@@ -646,20 +681,25 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
646 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch && 681 if ((ctxt->mode == X86EMUL_MODE_REAL) && !fetch &&
647 (ctxt->d & NoBigReal)) { 682 (ctxt->d & NoBigReal)) {
648 /* la is between zero and 0xffff */ 683 /* la is between zero and 0xffff */
649 if (la > 0xffff || (u32)(la + size - 1) > 0xffff) 684 if (la > 0xffff)
650 goto bad; 685 goto bad;
686 *max_size = 0x10000 - la;
651 } else if ((desc.type & 8) || !(desc.type & 4)) { 687 } else if ((desc.type & 8) || !(desc.type & 4)) {
652 /* expand-up segment */ 688 /* expand-up segment */
653 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) 689 if (addr.ea > lim)
654 goto bad; 690 goto bad;
691 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
655 } else { 692 } else {
656 /* expand-down segment */ 693 /* expand-down segment */
657 if (addr.ea <= lim || (u32)(addr.ea + size - 1) <= lim) 694 if (addr.ea <= lim)
658 goto bad; 695 goto bad;
659 lim = desc.d ? 0xffffffff : 0xffff; 696 lim = desc.d ? 0xffffffff : 0xffff;
660 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) 697 if (addr.ea > lim)
661 goto bad; 698 goto bad;
699 *max_size = min_t(u64, ~0u, (u64)lim + 1 - addr.ea);
662 } 700 }
701 if (size > *max_size)
702 goto bad;
663 cpl = ctxt->ops->cpl(ctxt); 703 cpl = ctxt->ops->cpl(ctxt);
664 if (!(desc.type & 8)) { 704 if (!(desc.type & 8)) {
665 /* data segment */ 705 /* data segment */
@@ -684,9 +724,9 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
684 return X86EMUL_CONTINUE; 724 return X86EMUL_CONTINUE;
685bad: 725bad:
686 if (addr.seg == VCPU_SREG_SS) 726 if (addr.seg == VCPU_SREG_SS)
687 return emulate_ss(ctxt, sel); 727 return emulate_ss(ctxt, 0);
688 else 728 else
689 return emulate_gp(ctxt, sel); 729 return emulate_gp(ctxt, 0);
690} 730}
691 731
692static int linearize(struct x86_emulate_ctxt *ctxt, 732static int linearize(struct x86_emulate_ctxt *ctxt,
@@ -694,7 +734,8 @@ static int linearize(struct x86_emulate_ctxt *ctxt,
694 unsigned size, bool write, 734 unsigned size, bool write,
695 ulong *linear) 735 ulong *linear)
696{ 736{
697 return __linearize(ctxt, addr, size, write, false, linear); 737 unsigned max_size;
738 return __linearize(ctxt, addr, &max_size, size, write, false, linear);
698} 739}
699 740
700 741
@@ -719,17 +760,27 @@ static int segmented_read_std(struct x86_emulate_ctxt *ctxt,
719static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size) 760static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
720{ 761{
721 int rc; 762 int rc;
722 unsigned size; 763 unsigned size, max_size;
723 unsigned long linear; 764 unsigned long linear;
724 int cur_size = ctxt->fetch.end - ctxt->fetch.data; 765 int cur_size = ctxt->fetch.end - ctxt->fetch.data;
725 struct segmented_address addr = { .seg = VCPU_SREG_CS, 766 struct segmented_address addr = { .seg = VCPU_SREG_CS,
726 .ea = ctxt->eip + cur_size }; 767 .ea = ctxt->eip + cur_size };
727 768
728 size = 15UL ^ cur_size; 769 /*
729 rc = __linearize(ctxt, addr, size, false, true, &linear); 770 * We do not know exactly how many bytes will be needed, and
771 * __linearize is expensive, so fetch as much as possible. We
772 * just have to avoid going beyond the 15 byte limit, the end
773 * of the segment, or the end of the page.
774 *
775 * __linearize is called with size 0 so that it does not do any
776 * boundary check itself. Instead, we use max_size to check
777 * against op_size.
778 */
779 rc = __linearize(ctxt, addr, &max_size, 0, false, true, &linear);
730 if (unlikely(rc != X86EMUL_CONTINUE)) 780 if (unlikely(rc != X86EMUL_CONTINUE))
731 return rc; 781 return rc;
732 782
783 size = min_t(unsigned, 15UL ^ cur_size, max_size);
733 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear)); 784 size = min_t(unsigned, size, PAGE_SIZE - offset_in_page(linear));
734 785
735 /* 786 /*
@@ -739,7 +790,8 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
739 * still, we must have hit the 15-byte boundary. 790 * still, we must have hit the 15-byte boundary.
740 */ 791 */
741 if (unlikely(size < op_size)) 792 if (unlikely(size < op_size))
742 return X86EMUL_UNHANDLEABLE; 793 return emulate_gp(ctxt, 0);
794
743 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end, 795 rc = ctxt->ops->fetch(ctxt, linear, ctxt->fetch.end,
744 size, &ctxt->exception); 796 size, &ctxt->exception);
745 if (unlikely(rc != X86EMUL_CONTINUE)) 797 if (unlikely(rc != X86EMUL_CONTINUE))
@@ -751,8 +803,10 @@ static int __do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, int op_size)
751static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt, 803static __always_inline int do_insn_fetch_bytes(struct x86_emulate_ctxt *ctxt,
752 unsigned size) 804 unsigned size)
753{ 805{
754 if (unlikely(ctxt->fetch.end - ctxt->fetch.ptr < size)) 806 unsigned done_size = ctxt->fetch.end - ctxt->fetch.ptr;
755 return __do_insn_fetch_bytes(ctxt, size); 807
808 if (unlikely(done_size < size))
809 return __do_insn_fetch_bytes(ctxt, size - done_size);
756 else 810 else
757 return X86EMUL_CONTINUE; 811 return X86EMUL_CONTINUE;
758} 812}
@@ -1416,7 +1470,9 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1416 1470
1417/* Does not support long mode */ 1471/* Does not support long mode */
1418static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1472static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1419 u16 selector, int seg, u8 cpl, bool in_task_switch) 1473 u16 selector, int seg, u8 cpl,
1474 bool in_task_switch,
1475 struct desc_struct *desc)
1420{ 1476{
1421 struct desc_struct seg_desc, old_desc; 1477 struct desc_struct seg_desc, old_desc;
1422 u8 dpl, rpl; 1478 u8 dpl, rpl;
@@ -1557,6 +1613,8 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1557 } 1613 }
1558load: 1614load:
1559 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg); 1615 ctxt->ops->set_segment(ctxt, selector, &seg_desc, base3, seg);
1616 if (desc)
1617 *desc = seg_desc;
1560 return X86EMUL_CONTINUE; 1618 return X86EMUL_CONTINUE;
1561exception: 1619exception:
1562 return emulate_exception(ctxt, err_vec, err_code, true); 1620 return emulate_exception(ctxt, err_vec, err_code, true);
@@ -1566,7 +1624,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1566 u16 selector, int seg) 1624 u16 selector, int seg)
1567{ 1625{
1568 u8 cpl = ctxt->ops->cpl(ctxt); 1626 u8 cpl = ctxt->ops->cpl(ctxt);
1569 return __load_segment_descriptor(ctxt, selector, seg, cpl, false); 1627 return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL);
1570} 1628}
1571 1629
1572static void write_register_operand(struct operand *op) 1630static void write_register_operand(struct operand *op)
@@ -1960,17 +2018,31 @@ static int em_iret(struct x86_emulate_ctxt *ctxt)
1960static int em_jmp_far(struct x86_emulate_ctxt *ctxt) 2018static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1961{ 2019{
1962 int rc; 2020 int rc;
1963 unsigned short sel; 2021 unsigned short sel, old_sel;
2022 struct desc_struct old_desc, new_desc;
2023 const struct x86_emulate_ops *ops = ctxt->ops;
2024 u8 cpl = ctxt->ops->cpl(ctxt);
2025
2026 /* Assignment of RIP may only fail in 64-bit mode */
2027 if (ctxt->mode == X86EMUL_MODE_PROT64)
2028 ops->get_segment(ctxt, &old_sel, &old_desc, NULL,
2029 VCPU_SREG_CS);
1964 2030
1965 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); 2031 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
1966 2032
1967 rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS); 2033 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
2034 &new_desc);
1968 if (rc != X86EMUL_CONTINUE) 2035 if (rc != X86EMUL_CONTINUE)
1969 return rc; 2036 return rc;
1970 2037
1971 ctxt->_eip = 0; 2038 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
1972 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes); 2039 if (rc != X86EMUL_CONTINUE) {
1973 return X86EMUL_CONTINUE; 2040 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2041 /* assigning eip failed; restore the old cs */
2042 ops->set_segment(ctxt, old_sel, &old_desc, 0, VCPU_SREG_CS);
2043 return rc;
2044 }
2045 return rc;
1974} 2046}
1975 2047
1976static int em_grp45(struct x86_emulate_ctxt *ctxt) 2048static int em_grp45(struct x86_emulate_ctxt *ctxt)
@@ -1981,13 +2053,15 @@ static int em_grp45(struct x86_emulate_ctxt *ctxt)
1981 case 2: /* call near abs */ { 2053 case 2: /* call near abs */ {
1982 long int old_eip; 2054 long int old_eip;
1983 old_eip = ctxt->_eip; 2055 old_eip = ctxt->_eip;
1984 ctxt->_eip = ctxt->src.val; 2056 rc = assign_eip_near(ctxt, ctxt->src.val);
2057 if (rc != X86EMUL_CONTINUE)
2058 break;
1985 ctxt->src.val = old_eip; 2059 ctxt->src.val = old_eip;
1986 rc = em_push(ctxt); 2060 rc = em_push(ctxt);
1987 break; 2061 break;
1988 } 2062 }
1989 case 4: /* jmp abs */ 2063 case 4: /* jmp abs */
1990 ctxt->_eip = ctxt->src.val; 2064 rc = assign_eip_near(ctxt, ctxt->src.val);
1991 break; 2065 break;
1992 case 5: /* jmp far */ 2066 case 5: /* jmp far */
1993 rc = em_jmp_far(ctxt); 2067 rc = em_jmp_far(ctxt);
@@ -2022,30 +2096,47 @@ static int em_cmpxchg8b(struct x86_emulate_ctxt *ctxt)
2022 2096
2023static int em_ret(struct x86_emulate_ctxt *ctxt) 2097static int em_ret(struct x86_emulate_ctxt *ctxt)
2024{ 2098{
2025 ctxt->dst.type = OP_REG; 2099 int rc;
2026 ctxt->dst.addr.reg = &ctxt->_eip; 2100 unsigned long eip;
2027 ctxt->dst.bytes = ctxt->op_bytes; 2101
2028 return em_pop(ctxt); 2102 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2103 if (rc != X86EMUL_CONTINUE)
2104 return rc;
2105
2106 return assign_eip_near(ctxt, eip);
2029} 2107}
2030 2108
2031static int em_ret_far(struct x86_emulate_ctxt *ctxt) 2109static int em_ret_far(struct x86_emulate_ctxt *ctxt)
2032{ 2110{
2033 int rc; 2111 int rc;
2034 unsigned long cs; 2112 unsigned long eip, cs;
2113 u16 old_cs;
2035 int cpl = ctxt->ops->cpl(ctxt); 2114 int cpl = ctxt->ops->cpl(ctxt);
2115 struct desc_struct old_desc, new_desc;
2116 const struct x86_emulate_ops *ops = ctxt->ops;
2036 2117
2037 rc = emulate_pop(ctxt, &ctxt->_eip, ctxt->op_bytes); 2118 if (ctxt->mode == X86EMUL_MODE_PROT64)
2119 ops->get_segment(ctxt, &old_cs, &old_desc, NULL,
2120 VCPU_SREG_CS);
2121
2122 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2038 if (rc != X86EMUL_CONTINUE) 2123 if (rc != X86EMUL_CONTINUE)
2039 return rc; 2124 return rc;
2040 if (ctxt->op_bytes == 4)
2041 ctxt->_eip = (u32)ctxt->_eip;
2042 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes); 2125 rc = emulate_pop(ctxt, &cs, ctxt->op_bytes);
2043 if (rc != X86EMUL_CONTINUE) 2126 if (rc != X86EMUL_CONTINUE)
2044 return rc; 2127 return rc;
2045 /* Outer-privilege level return is not implemented */ 2128 /* Outer-privilege level return is not implemented */
2046 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) 2129 if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl)
2047 return X86EMUL_UNHANDLEABLE; 2130 return X86EMUL_UNHANDLEABLE;
2048 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS); 2131 rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, 0, false,
2132 &new_desc);
2133 if (rc != X86EMUL_CONTINUE)
2134 return rc;
2135 rc = assign_eip_far(ctxt, eip, new_desc.l);
2136 if (rc != X86EMUL_CONTINUE) {
2137 WARN_ON(ctxt->mode != X86EMUL_MODE_PROT64);
2138 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
2139 }
2049 return rc; 2140 return rc;
2050} 2141}
2051 2142
@@ -2306,7 +2397,7 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2306{ 2397{
2307 const struct x86_emulate_ops *ops = ctxt->ops; 2398 const struct x86_emulate_ops *ops = ctxt->ops;
2308 struct desc_struct cs, ss; 2399 struct desc_struct cs, ss;
2309 u64 msr_data; 2400 u64 msr_data, rcx, rdx;
2310 int usermode; 2401 int usermode;
2311 u16 cs_sel = 0, ss_sel = 0; 2402 u16 cs_sel = 0, ss_sel = 0;
2312 2403
@@ -2322,6 +2413,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2322 else 2413 else
2323 usermode = X86EMUL_MODE_PROT32; 2414 usermode = X86EMUL_MODE_PROT32;
2324 2415
2416 rcx = reg_read(ctxt, VCPU_REGS_RCX);
2417 rdx = reg_read(ctxt, VCPU_REGS_RDX);
2418
2325 cs.dpl = 3; 2419 cs.dpl = 3;
2326 ss.dpl = 3; 2420 ss.dpl = 3;
2327 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); 2421 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
@@ -2339,6 +2433,9 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2339 ss_sel = cs_sel + 8; 2433 ss_sel = cs_sel + 8;
2340 cs.d = 0; 2434 cs.d = 0;
2341 cs.l = 1; 2435 cs.l = 1;
2436 if (is_noncanonical_address(rcx) ||
2437 is_noncanonical_address(rdx))
2438 return emulate_gp(ctxt, 0);
2342 break; 2439 break;
2343 } 2440 }
2344 cs_sel |= SELECTOR_RPL_MASK; 2441 cs_sel |= SELECTOR_RPL_MASK;
@@ -2347,8 +2444,8 @@ static int em_sysexit(struct x86_emulate_ctxt *ctxt)
2347 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS); 2444 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2348 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS); 2445 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2349 2446
2350 ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX); 2447 ctxt->_eip = rdx;
2351 *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX); 2448 *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2352 2449
2353 return X86EMUL_CONTINUE; 2450 return X86EMUL_CONTINUE;
2354} 2451}
@@ -2466,19 +2563,24 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2466 * Now load segment descriptors. If fault happens at this stage 2563 * Now load segment descriptors. If fault happens at this stage
2467 * it is handled in a context of new task 2564 * it is handled in a context of new task
2468 */ 2565 */
2469 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, true); 2566 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl,
2567 true, NULL);
2470 if (ret != X86EMUL_CONTINUE) 2568 if (ret != X86EMUL_CONTINUE)
2471 return ret; 2569 return ret;
2472 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true); 2570 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2571 true, NULL);
2473 if (ret != X86EMUL_CONTINUE) 2572 if (ret != X86EMUL_CONTINUE)
2474 return ret; 2573 return ret;
2475 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true); 2574 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2575 true, NULL);
2476 if (ret != X86EMUL_CONTINUE) 2576 if (ret != X86EMUL_CONTINUE)
2477 return ret; 2577 return ret;
2478 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true); 2578 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2579 true, NULL);
2479 if (ret != X86EMUL_CONTINUE) 2580 if (ret != X86EMUL_CONTINUE)
2480 return ret; 2581 return ret;
2481 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true); 2582 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2583 true, NULL);
2482 if (ret != X86EMUL_CONTINUE) 2584 if (ret != X86EMUL_CONTINUE)
2483 return ret; 2585 return ret;
2484 2586
@@ -2603,25 +2705,32 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2603 * Now load segment descriptors. If fault happenes at this stage 2705 * Now load segment descriptors. If fault happenes at this stage
2604 * it is handled in a context of new task 2706 * it is handled in a context of new task
2605 */ 2707 */
2606 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl, true); 2708 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR,
2709 cpl, true, NULL);
2607 if (ret != X86EMUL_CONTINUE) 2710 if (ret != X86EMUL_CONTINUE)
2608 return ret; 2711 return ret;
2609 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, true); 2712 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl,
2713 true, NULL);
2610 if (ret != X86EMUL_CONTINUE) 2714 if (ret != X86EMUL_CONTINUE)
2611 return ret; 2715 return ret;
2612 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, true); 2716 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl,
2717 true, NULL);
2613 if (ret != X86EMUL_CONTINUE) 2718 if (ret != X86EMUL_CONTINUE)
2614 return ret; 2719 return ret;
2615 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, true); 2720 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl,
2721 true, NULL);
2616 if (ret != X86EMUL_CONTINUE) 2722 if (ret != X86EMUL_CONTINUE)
2617 return ret; 2723 return ret;
2618 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, true); 2724 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl,
2725 true, NULL);
2619 if (ret != X86EMUL_CONTINUE) 2726 if (ret != X86EMUL_CONTINUE)
2620 return ret; 2727 return ret;
2621 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, true); 2728 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl,
2729 true, NULL);
2622 if (ret != X86EMUL_CONTINUE) 2730 if (ret != X86EMUL_CONTINUE)
2623 return ret; 2731 return ret;
2624 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, true); 2732 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl,
2733 true, NULL);
2625 if (ret != X86EMUL_CONTINUE) 2734 if (ret != X86EMUL_CONTINUE)
2626 return ret; 2735 return ret;
2627 2736
@@ -2888,10 +2997,13 @@ static int em_aad(struct x86_emulate_ctxt *ctxt)
2888 2997
2889static int em_call(struct x86_emulate_ctxt *ctxt) 2998static int em_call(struct x86_emulate_ctxt *ctxt)
2890{ 2999{
3000 int rc;
2891 long rel = ctxt->src.val; 3001 long rel = ctxt->src.val;
2892 3002
2893 ctxt->src.val = (unsigned long)ctxt->_eip; 3003 ctxt->src.val = (unsigned long)ctxt->_eip;
2894 jmp_rel(ctxt, rel); 3004 rc = jmp_rel(ctxt, rel);
3005 if (rc != X86EMUL_CONTINUE)
3006 return rc;
2895 return em_push(ctxt); 3007 return em_push(ctxt);
2896} 3008}
2897 3009
@@ -2900,34 +3012,50 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
2900 u16 sel, old_cs; 3012 u16 sel, old_cs;
2901 ulong old_eip; 3013 ulong old_eip;
2902 int rc; 3014 int rc;
3015 struct desc_struct old_desc, new_desc;
3016 const struct x86_emulate_ops *ops = ctxt->ops;
3017 int cpl = ctxt->ops->cpl(ctxt);
2903 3018
2904 old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2905 old_eip = ctxt->_eip; 3019 old_eip = ctxt->_eip;
3020 ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS);
2906 3021
2907 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); 3022 memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2);
2908 if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS)) 3023 rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false,
3024 &new_desc);
3025 if (rc != X86EMUL_CONTINUE)
2909 return X86EMUL_CONTINUE; 3026 return X86EMUL_CONTINUE;
2910 3027
2911 ctxt->_eip = 0; 3028 rc = assign_eip_far(ctxt, ctxt->src.val, new_desc.l);
2912 memcpy(&ctxt->_eip, ctxt->src.valptr, ctxt->op_bytes); 3029 if (rc != X86EMUL_CONTINUE)
3030 goto fail;
2913 3031
2914 ctxt->src.val = old_cs; 3032 ctxt->src.val = old_cs;
2915 rc = em_push(ctxt); 3033 rc = em_push(ctxt);
2916 if (rc != X86EMUL_CONTINUE) 3034 if (rc != X86EMUL_CONTINUE)
2917 return rc; 3035 goto fail;
2918 3036
2919 ctxt->src.val = old_eip; 3037 ctxt->src.val = old_eip;
2920 return em_push(ctxt); 3038 rc = em_push(ctxt);
3039 /* If we failed, we tainted the memory, but the very least we should
3040 restore cs */
3041 if (rc != X86EMUL_CONTINUE)
3042 goto fail;
3043 return rc;
3044fail:
3045 ops->set_segment(ctxt, old_cs, &old_desc, 0, VCPU_SREG_CS);
3046 return rc;
3047
2921} 3048}
2922 3049
2923static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt) 3050static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2924{ 3051{
2925 int rc; 3052 int rc;
3053 unsigned long eip;
2926 3054
2927 ctxt->dst.type = OP_REG; 3055 rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2928 ctxt->dst.addr.reg = &ctxt->_eip; 3056 if (rc != X86EMUL_CONTINUE)
2929 ctxt->dst.bytes = ctxt->op_bytes; 3057 return rc;
2930 rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes); 3058 rc = assign_eip_near(ctxt, eip);
2931 if (rc != X86EMUL_CONTINUE) 3059 if (rc != X86EMUL_CONTINUE)
2932 return rc; 3060 return rc;
2933 rsp_increment(ctxt, ctxt->src.val); 3061 rsp_increment(ctxt, ctxt->src.val);
@@ -3254,20 +3382,24 @@ static int em_lmsw(struct x86_emulate_ctxt *ctxt)
3254 3382
3255static int em_loop(struct x86_emulate_ctxt *ctxt) 3383static int em_loop(struct x86_emulate_ctxt *ctxt)
3256{ 3384{
3385 int rc = X86EMUL_CONTINUE;
3386
3257 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1); 3387 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3258 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) && 3388 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3259 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags))) 3389 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3260 jmp_rel(ctxt, ctxt->src.val); 3390 rc = jmp_rel(ctxt, ctxt->src.val);
3261 3391
3262 return X86EMUL_CONTINUE; 3392 return rc;
3263} 3393}
3264 3394
3265static int em_jcxz(struct x86_emulate_ctxt *ctxt) 3395static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3266{ 3396{
3397 int rc = X86EMUL_CONTINUE;
3398
3267 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0) 3399 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3268 jmp_rel(ctxt, ctxt->src.val); 3400 rc = jmp_rel(ctxt, ctxt->src.val);
3269 3401
3270 return X86EMUL_CONTINUE; 3402 return rc;
3271} 3403}
3272 3404
3273static int em_in(struct x86_emulate_ctxt *ctxt) 3405static int em_in(struct x86_emulate_ctxt *ctxt)
@@ -3355,6 +3487,12 @@ static int em_bswap(struct x86_emulate_ctxt *ctxt)
3355 return X86EMUL_CONTINUE; 3487 return X86EMUL_CONTINUE;
3356} 3488}
3357 3489
3490static int em_clflush(struct x86_emulate_ctxt *ctxt)
3491{
3492 /* emulating clflush regardless of cpuid */
3493 return X86EMUL_CONTINUE;
3494}
3495
3358static bool valid_cr(int nr) 3496static bool valid_cr(int nr)
3359{ 3497{
3360 switch (nr) { 3498 switch (nr) {
@@ -3693,6 +3831,16 @@ static const struct opcode group11[] = {
3693 X7(D(Undefined)), 3831 X7(D(Undefined)),
3694}; 3832};
3695 3833
3834static const struct gprefix pfx_0f_ae_7 = {
3835 I(SrcMem | ByteOp, em_clflush), N, N, N,
3836};
3837
3838static const struct group_dual group15 = { {
3839 N, N, N, N, N, N, N, GP(0, &pfx_0f_ae_7),
3840}, {
3841 N, N, N, N, N, N, N, N,
3842} };
3843
3696static const struct gprefix pfx_0f_6f_0f_7f = { 3844static const struct gprefix pfx_0f_6f_0f_7f = {
3697 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov), 3845 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3698}; 3846};
@@ -3901,10 +4049,11 @@ static const struct opcode twobyte_table[256] = {
3901 N, I(ImplicitOps | EmulateOnUD, em_syscall), 4049 N, I(ImplicitOps | EmulateOnUD, em_syscall),
3902 II(ImplicitOps | Priv, em_clts, clts), N, 4050 II(ImplicitOps | Priv, em_clts, clts), N,
3903 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N, 4051 DI(ImplicitOps | Priv, invd), DI(ImplicitOps | Priv, wbinvd), N, N,
3904 N, D(ImplicitOps | ModRM), N, N, 4052 N, D(ImplicitOps | ModRM | SrcMem | NoAccess), N, N,
3905 /* 0x10 - 0x1F */ 4053 /* 0x10 - 0x1F */
3906 N, N, N, N, N, N, N, N, 4054 N, N, N, N, N, N, N, N,
3907 D(ImplicitOps | ModRM), N, N, N, N, N, N, D(ImplicitOps | ModRM), 4055 D(ImplicitOps | ModRM | SrcMem | NoAccess),
4056 N, N, N, N, N, N, D(ImplicitOps | ModRM | SrcMem | NoAccess),
3908 /* 0x20 - 0x2F */ 4057 /* 0x20 - 0x2F */
3909 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read), 4058 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, cr_read, check_cr_read),
3910 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read), 4059 DIP(ModRM | DstMem | Priv | Op3264 | NoMod, dr_read, check_dr_read),
@@ -3956,7 +4105,7 @@ static const struct opcode twobyte_table[256] = {
3956 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts), 4105 F(DstMem | SrcReg | ModRM | BitOp | Lock | PageTable, em_bts),
3957 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd), 4106 F(DstMem | SrcReg | Src2ImmByte | ModRM, em_shrd),
3958 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd), 4107 F(DstMem | SrcReg | Src2CL | ModRM, em_shrd),
3959 D(ModRM), F(DstReg | SrcMem | ModRM, em_imul), 4108 GD(0, &group15), F(DstReg | SrcMem | ModRM, em_imul),
3960 /* 0xB0 - 0xB7 */ 4109 /* 0xB0 - 0xB7 */
3961 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg), 4110 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_cmpxchg),
3962 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg), 4111 I(DstReg | SrcMemFAddr | ModRM | Src2SS, em_lseg),
@@ -4138,6 +4287,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4138 fetch_register_operand(op); 4287 fetch_register_operand(op);
4139 break; 4288 break;
4140 case OpCL: 4289 case OpCL:
4290 op->type = OP_IMM;
4141 op->bytes = 1; 4291 op->bytes = 1;
4142 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff; 4292 op->val = reg_read(ctxt, VCPU_REGS_RCX) & 0xff;
4143 break; 4293 break;
@@ -4145,6 +4295,7 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4145 rc = decode_imm(ctxt, op, 1, true); 4295 rc = decode_imm(ctxt, op, 1, true);
4146 break; 4296 break;
4147 case OpOne: 4297 case OpOne:
4298 op->type = OP_IMM;
4148 op->bytes = 1; 4299 op->bytes = 1;
4149 op->val = 1; 4300 op->val = 1;
4150 break; 4301 break;
@@ -4203,21 +4354,27 @@ static int decode_operand(struct x86_emulate_ctxt *ctxt, struct operand *op,
4203 ctxt->memop.bytes = ctxt->op_bytes + 2; 4354 ctxt->memop.bytes = ctxt->op_bytes + 2;
4204 goto mem_common; 4355 goto mem_common;
4205 case OpES: 4356 case OpES:
4357 op->type = OP_IMM;
4206 op->val = VCPU_SREG_ES; 4358 op->val = VCPU_SREG_ES;
4207 break; 4359 break;
4208 case OpCS: 4360 case OpCS:
4361 op->type = OP_IMM;
4209 op->val = VCPU_SREG_CS; 4362 op->val = VCPU_SREG_CS;
4210 break; 4363 break;
4211 case OpSS: 4364 case OpSS:
4365 op->type = OP_IMM;
4212 op->val = VCPU_SREG_SS; 4366 op->val = VCPU_SREG_SS;
4213 break; 4367 break;
4214 case OpDS: 4368 case OpDS:
4369 op->type = OP_IMM;
4215 op->val = VCPU_SREG_DS; 4370 op->val = VCPU_SREG_DS;
4216 break; 4371 break;
4217 case OpFS: 4372 case OpFS:
4373 op->type = OP_IMM;
4218 op->val = VCPU_SREG_FS; 4374 op->val = VCPU_SREG_FS;
4219 break; 4375 break;
4220 case OpGS: 4376 case OpGS:
4377 op->type = OP_IMM;
4221 op->val = VCPU_SREG_GS; 4378 op->val = VCPU_SREG_GS;
4222 break; 4379 break;
4223 case OpImplicit: 4380 case OpImplicit:
@@ -4473,10 +4630,10 @@ done_prefixes:
4473 /* Decode and fetch the destination operand: register or memory. */ 4630 /* Decode and fetch the destination operand: register or memory. */
4474 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask); 4631 rc = decode_operand(ctxt, &ctxt->dst, (ctxt->d >> DstShift) & OpMask);
4475 4632
4476done:
4477 if (ctxt->rip_relative) 4633 if (ctxt->rip_relative)
4478 ctxt->memopp->addr.mem.ea += ctxt->_eip; 4634 ctxt->memopp->addr.mem.ea += ctxt->_eip;
4479 4635
4636done:
4480 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK; 4637 return (rc != X86EMUL_CONTINUE) ? EMULATION_FAILED : EMULATION_OK;
4481} 4638}
4482 4639
@@ -4726,7 +4883,7 @@ special_insn:
4726 break; 4883 break;
4727 case 0x70 ... 0x7f: /* jcc (short) */ 4884 case 0x70 ... 0x7f: /* jcc (short) */
4728 if (test_cc(ctxt->b, ctxt->eflags)) 4885 if (test_cc(ctxt->b, ctxt->eflags))
4729 jmp_rel(ctxt, ctxt->src.val); 4886 rc = jmp_rel(ctxt, ctxt->src.val);
4730 break; 4887 break;
4731 case 0x8d: /* lea r16/r32, m */ 4888 case 0x8d: /* lea r16/r32, m */
4732 ctxt->dst.val = ctxt->src.addr.mem.ea; 4889 ctxt->dst.val = ctxt->src.addr.mem.ea;
@@ -4756,7 +4913,7 @@ special_insn:
4756 break; 4913 break;
4757 case 0xe9: /* jmp rel */ 4914 case 0xe9: /* jmp rel */
4758 case 0xeb: /* jmp rel short */ 4915 case 0xeb: /* jmp rel short */
4759 jmp_rel(ctxt, ctxt->src.val); 4916 rc = jmp_rel(ctxt, ctxt->src.val);
4760 ctxt->dst.type = OP_NONE; /* Disable writeback. */ 4917 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4761 break; 4918 break;
4762 case 0xf4: /* hlt */ 4919 case 0xf4: /* hlt */
@@ -4881,13 +5038,11 @@ twobyte_insn:
4881 break; 5038 break;
4882 case 0x80 ... 0x8f: /* jnz rel, etc*/ 5039 case 0x80 ... 0x8f: /* jnz rel, etc*/
4883 if (test_cc(ctxt->b, ctxt->eflags)) 5040 if (test_cc(ctxt->b, ctxt->eflags))
4884 jmp_rel(ctxt, ctxt->src.val); 5041 rc = jmp_rel(ctxt, ctxt->src.val);
4885 break; 5042 break;
4886 case 0x90 ... 0x9f: /* setcc r/m8 */ 5043 case 0x90 ... 0x9f: /* setcc r/m8 */
4887 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags); 5044 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);
4888 break; 5045 break;
4889 case 0xae: /* clflush */
4890 break;
4891 case 0xb6 ... 0xb7: /* movzx */ 5046 case 0xb6 ... 0xb7: /* movzx */
4892 ctxt->dst.bytes = ctxt->op_bytes; 5047 ctxt->dst.bytes = ctxt->op_bytes;
4893 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val 5048 ctxt->dst.val = (ctxt->src.bytes == 1) ? (u8) ctxt->src.val
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index 518d86471b76..298781d4cfb4 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -262,8 +262,10 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
262 return; 262 return;
263 263
264 timer = &pit->pit_state.timer; 264 timer = &pit->pit_state.timer;
265 mutex_lock(&pit->pit_state.lock);
265 if (hrtimer_cancel(timer)) 266 if (hrtimer_cancel(timer))
266 hrtimer_start_expires(timer, HRTIMER_MODE_ABS); 267 hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
268 mutex_unlock(&pit->pit_state.lock);
267} 269}
268 270
269static void destroy_pit_timer(struct kvm_pit *pit) 271static void destroy_pit_timer(struct kvm_pit *pit)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ac1c4de3a484..978f402006ee 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -630,7 +630,7 @@ static int mmu_spte_clear_track_bits(u64 *sptep)
630 * kvm mmu, before reclaiming the page, we should 630 * kvm mmu, before reclaiming the page, we should
631 * unmap it from mmu first. 631 * unmap it from mmu first.
632 */ 632 */
633 WARN_ON(!kvm_is_mmio_pfn(pfn) && !page_count(pfn_to_page(pfn))); 633 WARN_ON(!kvm_is_reserved_pfn(pfn) && !page_count(pfn_to_page(pfn)));
634 634
635 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask) 635 if (!shadow_accessed_mask || old_spte & shadow_accessed_mask)
636 kvm_set_pfn_accessed(pfn); 636 kvm_set_pfn_accessed(pfn);
@@ -2461,7 +2461,7 @@ static int set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
2461 spte |= PT_PAGE_SIZE_MASK; 2461 spte |= PT_PAGE_SIZE_MASK;
2462 if (tdp_enabled) 2462 if (tdp_enabled)
2463 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn, 2463 spte |= kvm_x86_ops->get_mt_mask(vcpu, gfn,
2464 kvm_is_mmio_pfn(pfn)); 2464 kvm_is_reserved_pfn(pfn));
2465 2465
2466 if (host_writable) 2466 if (host_writable)
2467 spte |= SPTE_HOST_WRITEABLE; 2467 spte |= SPTE_HOST_WRITEABLE;
@@ -2737,7 +2737,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
2737 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done 2737 * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
2738 * here. 2738 * here.
2739 */ 2739 */
2740 if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn) && 2740 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
2741 level == PT_PAGE_TABLE_LEVEL && 2741 level == PT_PAGE_TABLE_LEVEL &&
2742 PageTransCompound(pfn_to_page(pfn)) && 2742 PageTransCompound(pfn_to_page(pfn)) &&
2743 !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) { 2743 !has_wrprotected_page(vcpu->kvm, gfn, PT_DIRECTORY_LEVEL)) {
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 806d58e3c320..fd49c867b25a 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -298,7 +298,7 @@ retry_walk:
298 } 298 }
299#endif 299#endif
300 walker->max_level = walker->level; 300 walker->max_level = walker->level;
301 ASSERT(!is_long_mode(vcpu) && is_pae(vcpu)); 301 ASSERT(!(is_long_mode(vcpu) && !is_pae(vcpu)));
302 302
303 accessed_dirty = PT_GUEST_ACCESSED_MASK; 303 accessed_dirty = PT_GUEST_ACCESSED_MASK;
304 pt_access = pte_access = ACC_ALL; 304 pt_access = pte_access = ACC_ALL;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 65510f624dfe..7527cefc5a43 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3251,7 +3251,7 @@ static int wrmsr_interception(struct vcpu_svm *svm)
3251 msr.host_initiated = false; 3251 msr.host_initiated = false;
3252 3252
3253 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; 3253 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
3254 if (svm_set_msr(&svm->vcpu, &msr)) { 3254 if (kvm_set_msr(&svm->vcpu, &msr)) {
3255 trace_kvm_msr_write_ex(ecx, data); 3255 trace_kvm_msr_write_ex(ecx, data);
3256 kvm_inject_gp(&svm->vcpu, 0); 3256 kvm_inject_gp(&svm->vcpu, 0);
3257 } else { 3257 } else {
@@ -3551,9 +3551,9 @@ static int handle_exit(struct kvm_vcpu *vcpu)
3551 3551
3552 if (exit_code >= ARRAY_SIZE(svm_exit_handlers) 3552 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
3553 || !svm_exit_handlers[exit_code]) { 3553 || !svm_exit_handlers[exit_code]) {
3554 kvm_run->exit_reason = KVM_EXIT_UNKNOWN; 3554 WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_code);
3555 kvm_run->hw.hardware_exit_reason = exit_code; 3555 kvm_queue_exception(vcpu, UD_VECTOR);
3556 return 0; 3556 return 1;
3557 } 3557 }
3558 3558
3559 return svm_exit_handlers[exit_code](svm); 3559 return svm_exit_handlers[exit_code](svm);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 0acac81f198b..3e556c68351b 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2659,12 +2659,15 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2659 default: 2659 default:
2660 msr = find_msr_entry(vmx, msr_index); 2660 msr = find_msr_entry(vmx, msr_index);
2661 if (msr) { 2661 if (msr) {
2662 u64 old_msr_data = msr->data;
2662 msr->data = data; 2663 msr->data = data;
2663 if (msr - vmx->guest_msrs < vmx->save_nmsrs) { 2664 if (msr - vmx->guest_msrs < vmx->save_nmsrs) {
2664 preempt_disable(); 2665 preempt_disable();
2665 kvm_set_shared_msr(msr->index, msr->data, 2666 ret = kvm_set_shared_msr(msr->index, msr->data,
2666 msr->mask); 2667 msr->mask);
2667 preempt_enable(); 2668 preempt_enable();
2669 if (ret)
2670 msr->data = old_msr_data;
2668 } 2671 }
2669 break; 2672 break;
2670 } 2673 }
@@ -4576,7 +4579,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
4576 vmcs_write32(TPR_THRESHOLD, 0); 4579 vmcs_write32(TPR_THRESHOLD, 0);
4577 } 4580 }
4578 4581
4579 kvm_vcpu_reload_apic_access_page(vcpu); 4582 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4580 4583
4581 if (vmx_vm_has_apicv(vcpu->kvm)) 4584 if (vmx_vm_has_apicv(vcpu->kvm))
4582 memset(&vmx->pi_desc, 0, sizeof(struct pi_desc)); 4585 memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
@@ -5291,7 +5294,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu)
5291 msr.data = data; 5294 msr.data = data;
5292 msr.index = ecx; 5295 msr.index = ecx;
5293 msr.host_initiated = false; 5296 msr.host_initiated = false;
5294 if (vmx_set_msr(vcpu, &msr) != 0) { 5297 if (kvm_set_msr(vcpu, &msr) != 0) {
5295 trace_kvm_msr_write_ex(ecx, data); 5298 trace_kvm_msr_write_ex(ecx, data);
5296 kvm_inject_gp(vcpu, 0); 5299 kvm_inject_gp(vcpu, 0);
5297 return 1; 5300 return 1;
@@ -6423,6 +6426,8 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
6423 const unsigned long *fields = shadow_read_write_fields; 6426 const unsigned long *fields = shadow_read_write_fields;
6424 const int num_fields = max_shadow_read_write_fields; 6427 const int num_fields = max_shadow_read_write_fields;
6425 6428
6429 preempt_disable();
6430
6426 vmcs_load(shadow_vmcs); 6431 vmcs_load(shadow_vmcs);
6427 6432
6428 for (i = 0; i < num_fields; i++) { 6433 for (i = 0; i < num_fields; i++) {
@@ -6446,6 +6451,8 @@ static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx)
6446 6451
6447 vmcs_clear(shadow_vmcs); 6452 vmcs_clear(shadow_vmcs);
6448 vmcs_load(vmx->loaded_vmcs->vmcs); 6453 vmcs_load(vmx->loaded_vmcs->vmcs);
6454
6455 preempt_enable();
6449} 6456}
6450 6457
6451static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx) 6458static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx)
@@ -6743,6 +6750,12 @@ static int handle_invept(struct kvm_vcpu *vcpu)
6743 return 1; 6750 return 1;
6744} 6751}
6745 6752
6753static int handle_invvpid(struct kvm_vcpu *vcpu)
6754{
6755 kvm_queue_exception(vcpu, UD_VECTOR);
6756 return 1;
6757}
6758
6746/* 6759/*
6747 * The exit handlers return 1 if the exit was handled fully and guest execution 6760 * The exit handlers return 1 if the exit was handled fully and guest execution
6748 * may resume. Otherwise they set the kvm_run parameter to indicate what needs 6761 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
@@ -6788,6 +6801,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
6788 [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait, 6801 [EXIT_REASON_MWAIT_INSTRUCTION] = handle_mwait,
6789 [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor, 6802 [EXIT_REASON_MONITOR_INSTRUCTION] = handle_monitor,
6790 [EXIT_REASON_INVEPT] = handle_invept, 6803 [EXIT_REASON_INVEPT] = handle_invept,
6804 [EXIT_REASON_INVVPID] = handle_invvpid,
6791}; 6805};
6792 6806
6793static const int kvm_vmx_max_exit_handlers = 6807static const int kvm_vmx_max_exit_handlers =
@@ -7023,7 +7037,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
7023 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD: 7037 case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
7024 case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE: 7038 case EXIT_REASON_VMRESUME: case EXIT_REASON_VMWRITE:
7025 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON: 7039 case EXIT_REASON_VMOFF: case EXIT_REASON_VMON:
7026 case EXIT_REASON_INVEPT: 7040 case EXIT_REASON_INVEPT: case EXIT_REASON_INVVPID:
7027 /* 7041 /*
7028 * VMX instructions trap unconditionally. This allows L1 to 7042 * VMX instructions trap unconditionally. This allows L1 to
7029 * emulate them for its L2 guest, i.e., allows 3-level nesting! 7043 * emulate them for its L2 guest, i.e., allows 3-level nesting!
@@ -7164,10 +7178,10 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
7164 && kvm_vmx_exit_handlers[exit_reason]) 7178 && kvm_vmx_exit_handlers[exit_reason])
7165 return kvm_vmx_exit_handlers[exit_reason](vcpu); 7179 return kvm_vmx_exit_handlers[exit_reason](vcpu);
7166 else { 7180 else {
7167 vcpu->run->exit_reason = KVM_EXIT_UNKNOWN; 7181 WARN_ONCE(1, "vmx: unexpected exit reason 0x%x\n", exit_reason);
7168 vcpu->run->hw.hardware_exit_reason = exit_reason; 7182 kvm_queue_exception(vcpu, UD_VECTOR);
7183 return 1;
7169 } 7184 }
7170 return 0;
7171} 7185}
7172 7186
7173static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) 7187static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 34c8f94331f8..0033df32a745 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -229,20 +229,25 @@ static void kvm_shared_msr_cpu_online(void)
229 shared_msr_update(i, shared_msrs_global.msrs[i]); 229 shared_msr_update(i, shared_msrs_global.msrs[i]);
230} 230}
231 231
232void kvm_set_shared_msr(unsigned slot, u64 value, u64 mask) 232int kvm_set_shared_msr(unsigned slot, u64 value, u64 mask)
233{ 233{
234 unsigned int cpu = smp_processor_id(); 234 unsigned int cpu = smp_processor_id();
235 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu); 235 struct kvm_shared_msrs *smsr = per_cpu_ptr(shared_msrs, cpu);
236 int err;
236 237
237 if (((value ^ smsr->values[slot].curr) & mask) == 0) 238 if (((value ^ smsr->values[slot].curr) & mask) == 0)
238 return; 239 return 0;
239 smsr->values[slot].curr = value; 240 smsr->values[slot].curr = value;
240 wrmsrl(shared_msrs_global.msrs[slot], value); 241 err = wrmsrl_safe(shared_msrs_global.msrs[slot], value);
242 if (err)
243 return 1;
244
241 if (!smsr->registered) { 245 if (!smsr->registered) {
242 smsr->urn.on_user_return = kvm_on_user_return; 246 smsr->urn.on_user_return = kvm_on_user_return;
243 user_return_notifier_register(&smsr->urn); 247 user_return_notifier_register(&smsr->urn);
244 smsr->registered = true; 248 smsr->registered = true;
245 } 249 }
250 return 0;
246} 251}
247EXPORT_SYMBOL_GPL(kvm_set_shared_msr); 252EXPORT_SYMBOL_GPL(kvm_set_shared_msr);
248 253
@@ -987,7 +992,6 @@ void kvm_enable_efer_bits(u64 mask)
987} 992}
988EXPORT_SYMBOL_GPL(kvm_enable_efer_bits); 993EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
989 994
990
991/* 995/*
992 * Writes msr value into into the appropriate "register". 996 * Writes msr value into into the appropriate "register".
993 * Returns 0 on success, non-0 otherwise. 997 * Returns 0 on success, non-0 otherwise.
@@ -995,8 +999,34 @@ EXPORT_SYMBOL_GPL(kvm_enable_efer_bits);
995 */ 999 */
996int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr) 1000int kvm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
997{ 1001{
1002 switch (msr->index) {
1003 case MSR_FS_BASE:
1004 case MSR_GS_BASE:
1005 case MSR_KERNEL_GS_BASE:
1006 case MSR_CSTAR:
1007 case MSR_LSTAR:
1008 if (is_noncanonical_address(msr->data))
1009 return 1;
1010 break;
1011 case MSR_IA32_SYSENTER_EIP:
1012 case MSR_IA32_SYSENTER_ESP:
1013 /*
1014 * IA32_SYSENTER_ESP and IA32_SYSENTER_EIP cause #GP if
1015 * non-canonical address is written on Intel but not on
1016 * AMD (which ignores the top 32-bits, because it does
1017 * not implement 64-bit SYSENTER).
1018 *
1019 * 64-bit code should hence be able to write a non-canonical
1020 * value on AMD. Making the address canonical ensures that
1021 * vmentry does not fail on Intel after writing a non-canonical
1022 * value, and that something deterministic happens if the guest
1023 * invokes 64-bit SYSENTER.
1024 */
1025 msr->data = get_canonical(msr->data);
1026 }
998 return kvm_x86_ops->set_msr(vcpu, msr); 1027 return kvm_x86_ops->set_msr(vcpu, msr);
999} 1028}
1029EXPORT_SYMBOL_GPL(kvm_set_msr);
1000 1030
1001/* 1031/*
1002 * Adapt set_msr() to msr_io()'s calling convention 1032 * Adapt set_msr() to msr_io()'s calling convention
diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
index 7609e0e421ec..1318f75d56e4 100644
--- a/arch/x86/lib/csum-wrappers_64.c
+++ b/arch/x86/lib/csum-wrappers_64.c
@@ -41,9 +41,8 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
41 while (((unsigned long)src & 6) && len >= 2) { 41 while (((unsigned long)src & 6) && len >= 2) {
42 __u16 val16; 42 __u16 val16;
43 43
44 *errp = __get_user(val16, (const __u16 __user *)src); 44 if (__get_user(val16, (const __u16 __user *)src))
45 if (*errp) 45 goto out_err;
46 return isum;
47 46
48 *(__u16 *)dst = val16; 47 *(__u16 *)dst = val16;
49 isum = (__force __wsum)add32_with_carry( 48 isum = (__force __wsum)add32_with_carry(
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 4cb8763868fc..4e5dfec750fc 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1123,7 +1123,7 @@ void mark_rodata_ro(void)
1123 unsigned long end = (unsigned long) &__end_rodata_hpage_align; 1123 unsigned long end = (unsigned long) &__end_rodata_hpage_align;
1124 unsigned long text_end = PFN_ALIGN(&__stop___ex_table); 1124 unsigned long text_end = PFN_ALIGN(&__stop___ex_table);
1125 unsigned long rodata_end = PFN_ALIGN(&__end_rodata); 1125 unsigned long rodata_end = PFN_ALIGN(&__end_rodata);
1126 unsigned long all_end = PFN_ALIGN(&_end); 1126 unsigned long all_end;
1127 1127
1128 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", 1128 printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
1129 (end - start) >> 10); 1129 (end - start) >> 10);
@@ -1134,7 +1134,16 @@ void mark_rodata_ro(void)
1134 /* 1134 /*
1135 * The rodata/data/bss/brk section (but not the kernel text!) 1135 * The rodata/data/bss/brk section (but not the kernel text!)
1136 * should also be not-executable. 1136 * should also be not-executable.
1137 *
1138 * We align all_end to PMD_SIZE because the existing mapping
1139 * is a full PMD. If we would align _brk_end to PAGE_SIZE we
1140 * split the PMD and the reminder between _brk_end and the end
1141 * of the PMD will remain mapped executable.
1142 *
1143 * Any PMD which was setup after the one which covers _brk_end
1144 * has been zapped already via cleanup_highmem().
1137 */ 1145 */
1146 all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
1138 set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT); 1147 set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT);
1139 1148
1140 rodata_test(); 1149 rodata_test();
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index ae242a7c11c7..36de293caf25 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -409,7 +409,7 @@ phys_addr_t slow_virt_to_phys(void *__virt_addr)
409 psize = page_level_size(level); 409 psize = page_level_size(level);
410 pmask = page_level_mask(level); 410 pmask = page_level_mask(level);
411 offset = virt_addr & ~pmask; 411 offset = virt_addr & ~pmask;
412 phys_addr = pte_pfn(*pte) << PAGE_SHIFT; 412 phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
413 return (phys_addr | offset); 413 return (phys_addr | offset);
414} 414}
415EXPORT_SYMBOL_GPL(slow_virt_to_phys); 415EXPORT_SYMBOL_GPL(slow_virt_to_phys);
diff --git a/arch/x86/platform/efi/efi-bgrt.c b/arch/x86/platform/efi/efi-bgrt.c
index f15103dff4b4..d143d216d52b 100644
--- a/arch/x86/platform/efi/efi-bgrt.c
+++ b/arch/x86/platform/efi/efi-bgrt.c
@@ -40,20 +40,40 @@ void __init efi_bgrt_init(void)
40 if (ACPI_FAILURE(status)) 40 if (ACPI_FAILURE(status))
41 return; 41 return;
42 42
43 if (bgrt_tab->header.length < sizeof(*bgrt_tab)) 43 if (bgrt_tab->header.length < sizeof(*bgrt_tab)) {
44 pr_err("Ignoring BGRT: invalid length %u (expected %zu)\n",
45 bgrt_tab->header.length, sizeof(*bgrt_tab));
44 return; 46 return;
45 if (bgrt_tab->version != 1 || bgrt_tab->status != 1) 47 }
48 if (bgrt_tab->version != 1) {
49 pr_err("Ignoring BGRT: invalid version %u (expected 1)\n",
50 bgrt_tab->version);
51 return;
52 }
53 if (bgrt_tab->status != 1) {
54 pr_err("Ignoring BGRT: invalid status %u (expected 1)\n",
55 bgrt_tab->status);
56 return;
57 }
58 if (bgrt_tab->image_type != 0) {
59 pr_err("Ignoring BGRT: invalid image type %u (expected 0)\n",
60 bgrt_tab->image_type);
46 return; 61 return;
47 if (bgrt_tab->image_type != 0 || !bgrt_tab->image_address) 62 }
63 if (!bgrt_tab->image_address) {
64 pr_err("Ignoring BGRT: null image address\n");
48 return; 65 return;
66 }
49 67
50 image = efi_lookup_mapped_addr(bgrt_tab->image_address); 68 image = efi_lookup_mapped_addr(bgrt_tab->image_address);
51 if (!image) { 69 if (!image) {
52 image = early_memremap(bgrt_tab->image_address, 70 image = early_memremap(bgrt_tab->image_address,
53 sizeof(bmp_header)); 71 sizeof(bmp_header));
54 ioremapped = true; 72 ioremapped = true;
55 if (!image) 73 if (!image) {
74 pr_err("Ignoring BGRT: failed to map image header memory\n");
56 return; 75 return;
76 }
57 } 77 }
58 78
59 memcpy_fromio(&bmp_header, image, sizeof(bmp_header)); 79 memcpy_fromio(&bmp_header, image, sizeof(bmp_header));
@@ -61,14 +81,18 @@ void __init efi_bgrt_init(void)
61 early_iounmap(image, sizeof(bmp_header)); 81 early_iounmap(image, sizeof(bmp_header));
62 bgrt_image_size = bmp_header.size; 82 bgrt_image_size = bmp_header.size;
63 83
64 bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL); 84 bgrt_image = kmalloc(bgrt_image_size, GFP_KERNEL | __GFP_NOWARN);
65 if (!bgrt_image) 85 if (!bgrt_image) {
86 pr_err("Ignoring BGRT: failed to allocate memory for image (wanted %zu bytes)\n",
87 bgrt_image_size);
66 return; 88 return;
89 }
67 90
68 if (ioremapped) { 91 if (ioremapped) {
69 image = early_memremap(bgrt_tab->image_address, 92 image = early_memremap(bgrt_tab->image_address,
70 bmp_header.size); 93 bmp_header.size);
71 if (!image) { 94 if (!image) {
95 pr_err("Ignoring BGRT: failed to map image memory\n");
72 kfree(bgrt_image); 96 kfree(bgrt_image);
73 bgrt_image = NULL; 97 bgrt_image = NULL;
74 return; 98 return;
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index 850da94fef30..dbc8627a5cdf 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -70,17 +70,7 @@ static efi_config_table_type_t arch_tables[] __initdata = {
70 70
71u64 efi_setup; /* efi setup_data physical address */ 71u64 efi_setup; /* efi setup_data physical address */
72 72
73static bool disable_runtime __initdata = false; 73static int add_efi_memmap __initdata;
74static int __init setup_noefi(char *arg)
75{
76 disable_runtime = true;
77 return 0;
78}
79early_param("noefi", setup_noefi);
80
81int add_efi_memmap;
82EXPORT_SYMBOL(add_efi_memmap);
83
84static int __init setup_add_efi_memmap(char *arg) 74static int __init setup_add_efi_memmap(char *arg)
85{ 75{
86 add_efi_memmap = 1; 76 add_efi_memmap = 1;
@@ -96,7 +86,7 @@ static efi_status_t __init phys_efi_set_virtual_address_map(
96{ 86{
97 efi_status_t status; 87 efi_status_t status;
98 88
99 efi_call_phys_prelog(); 89 efi_call_phys_prolog();
100 status = efi_call_phys(efi_phys.set_virtual_address_map, 90 status = efi_call_phys(efi_phys.set_virtual_address_map,
101 memory_map_size, descriptor_size, 91 memory_map_size, descriptor_size,
102 descriptor_version, virtual_map); 92 descriptor_version, virtual_map);
@@ -210,9 +200,12 @@ static void __init print_efi_memmap(void)
210 for (p = memmap.map, i = 0; 200 for (p = memmap.map, i = 0;
211 p < memmap.map_end; 201 p < memmap.map_end;
212 p += memmap.desc_size, i++) { 202 p += memmap.desc_size, i++) {
203 char buf[64];
204
213 md = p; 205 md = p;
214 pr_info("mem%02u: type=%u, attr=0x%llx, range=[0x%016llx-0x%016llx) (%lluMB)\n", 206 pr_info("mem%02u: %s range=[0x%016llx-0x%016llx) (%lluMB)\n",
215 i, md->type, md->attribute, md->phys_addr, 207 i, efi_md_typeattr_format(buf, sizeof(buf), md),
208 md->phys_addr,
216 md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT), 209 md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT),
217 (md->num_pages >> (20 - EFI_PAGE_SHIFT))); 210 (md->num_pages >> (20 - EFI_PAGE_SHIFT)));
218 } 211 }
@@ -344,9 +337,9 @@ static int __init efi_runtime_init32(void)
344 } 337 }
345 338
346 /* 339 /*
347 * We will only need *early* access to the following two 340 * We will only need *early* access to the SetVirtualAddressMap
348 * EFI runtime services before set_virtual_address_map 341 * EFI runtime service. All other runtime services will be called
349 * is invoked. 342 * via the virtual mapping.
350 */ 343 */
351 efi_phys.set_virtual_address_map = 344 efi_phys.set_virtual_address_map =
352 (efi_set_virtual_address_map_t *) 345 (efi_set_virtual_address_map_t *)
@@ -368,9 +361,9 @@ static int __init efi_runtime_init64(void)
368 } 361 }
369 362
370 /* 363 /*
371 * We will only need *early* access to the following two 364 * We will only need *early* access to the SetVirtualAddressMap
372 * EFI runtime services before set_virtual_address_map 365 * EFI runtime service. All other runtime services will be called
373 * is invoked. 366 * via the virtual mapping.
374 */ 367 */
375 efi_phys.set_virtual_address_map = 368 efi_phys.set_virtual_address_map =
376 (efi_set_virtual_address_map_t *) 369 (efi_set_virtual_address_map_t *)
@@ -492,7 +485,7 @@ void __init efi_init(void)
492 if (!efi_runtime_supported()) 485 if (!efi_runtime_supported())
493 pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n"); 486 pr_info("No EFI runtime due to 32/64-bit mismatch with kernel\n");
494 else { 487 else {
495 if (disable_runtime || efi_runtime_init()) 488 if (efi_runtime_disabled() || efi_runtime_init())
496 return; 489 return;
497 } 490 }
498 if (efi_memmap_init()) 491 if (efi_memmap_init())
@@ -537,7 +530,7 @@ void __init runtime_code_page_mkexec(void)
537 } 530 }
538} 531}
539 532
540void efi_memory_uc(u64 addr, unsigned long size) 533void __init efi_memory_uc(u64 addr, unsigned long size)
541{ 534{
542 unsigned long page_shift = 1UL << EFI_PAGE_SHIFT; 535 unsigned long page_shift = 1UL << EFI_PAGE_SHIFT;
543 u64 npages; 536 u64 npages;
@@ -732,6 +725,7 @@ static void __init kexec_enter_virtual_mode(void)
732 */ 725 */
733 if (!efi_is_native()) { 726 if (!efi_is_native()) {
734 efi_unmap_memmap(); 727 efi_unmap_memmap();
728 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
735 return; 729 return;
736 } 730 }
737 731
@@ -805,6 +799,7 @@ static void __init __efi_enter_virtual_mode(void)
805 new_memmap = efi_map_regions(&count, &pg_shift); 799 new_memmap = efi_map_regions(&count, &pg_shift);
806 if (!new_memmap) { 800 if (!new_memmap) {
807 pr_err("Error reallocating memory, EFI runtime non-functional!\n"); 801 pr_err("Error reallocating memory, EFI runtime non-functional!\n");
802 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
808 return; 803 return;
809 } 804 }
810 805
@@ -812,8 +807,10 @@ static void __init __efi_enter_virtual_mode(void)
812 807
813 BUG_ON(!efi.systab); 808 BUG_ON(!efi.systab);
814 809
815 if (efi_setup_page_tables(__pa(new_memmap), 1 << pg_shift)) 810 if (efi_setup_page_tables(__pa(new_memmap), 1 << pg_shift)) {
811 clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
816 return; 812 return;
813 }
817 814
818 efi_sync_low_kernel_mappings(); 815 efi_sync_low_kernel_mappings();
819 efi_dump_pagetable(); 816 efi_dump_pagetable();
@@ -938,14 +935,11 @@ u64 efi_mem_attributes(unsigned long phys_addr)
938 return 0; 935 return 0;
939} 936}
940 937
941static int __init parse_efi_cmdline(char *str) 938static int __init arch_parse_efi_cmdline(char *str)
942{ 939{
943 if (*str == '=') 940 if (parse_option_str(str, "old_map"))
944 str++;
945
946 if (!strncmp(str, "old_map", 7))
947 set_bit(EFI_OLD_MEMMAP, &efi.flags); 941 set_bit(EFI_OLD_MEMMAP, &efi.flags);
948 942
949 return 0; 943 return 0;
950} 944}
951early_param("efi", parse_efi_cmdline); 945early_param("efi", arch_parse_efi_cmdline);
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
index 9ee3491e31fb..40e7cda52936 100644
--- a/arch/x86/platform/efi/efi_32.c
+++ b/arch/x86/platform/efi/efi_32.c
@@ -33,7 +33,7 @@
33 33
34/* 34/*
35 * To make EFI call EFI runtime service in physical addressing mode we need 35 * To make EFI call EFI runtime service in physical addressing mode we need
36 * prelog/epilog before/after the invocation to disable interrupt, to 36 * prolog/epilog before/after the invocation to disable interrupt, to
37 * claim EFI runtime service handler exclusively and to duplicate a memory in 37 * claim EFI runtime service handler exclusively and to duplicate a memory in
38 * low memory space say 0 - 3G. 38 * low memory space say 0 - 3G.
39 */ 39 */
@@ -41,11 +41,13 @@ static unsigned long efi_rt_eflags;
41 41
42void efi_sync_low_kernel_mappings(void) {} 42void efi_sync_low_kernel_mappings(void) {}
43void __init efi_dump_pagetable(void) {} 43void __init efi_dump_pagetable(void) {}
44int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) 44int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
45{ 45{
46 return 0; 46 return 0;
47} 47}
48void efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages) {} 48void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
49{
50}
49 51
50void __init efi_map_region(efi_memory_desc_t *md) 52void __init efi_map_region(efi_memory_desc_t *md)
51{ 53{
@@ -55,7 +57,7 @@ void __init efi_map_region(efi_memory_desc_t *md)
55void __init efi_map_region_fixed(efi_memory_desc_t *md) {} 57void __init efi_map_region_fixed(efi_memory_desc_t *md) {}
56void __init parse_efi_setup(u64 phys_addr, u32 data_len) {} 58void __init parse_efi_setup(u64 phys_addr, u32 data_len) {}
57 59
58void efi_call_phys_prelog(void) 60void __init efi_call_phys_prolog(void)
59{ 61{
60 struct desc_ptr gdt_descr; 62 struct desc_ptr gdt_descr;
61 63
@@ -69,7 +71,7 @@ void efi_call_phys_prelog(void)
69 load_gdt(&gdt_descr); 71 load_gdt(&gdt_descr);
70} 72}
71 73
72void efi_call_phys_epilog(void) 74void __init efi_call_phys_epilog(void)
73{ 75{
74 struct desc_ptr gdt_descr; 76 struct desc_ptr gdt_descr;
75 77
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
index 290d397e1dd9..35aecb6042fb 100644
--- a/arch/x86/platform/efi/efi_64.c
+++ b/arch/x86/platform/efi/efi_64.c
@@ -79,7 +79,7 @@ static void __init early_code_mapping_set_exec(int executable)
79 } 79 }
80} 80}
81 81
82void __init efi_call_phys_prelog(void) 82void __init efi_call_phys_prolog(void)
83{ 83{
84 unsigned long vaddress; 84 unsigned long vaddress;
85 int pgd; 85 int pgd;
@@ -139,7 +139,7 @@ void efi_sync_low_kernel_mappings(void)
139 sizeof(pgd_t) * num_pgds); 139 sizeof(pgd_t) * num_pgds);
140} 140}
141 141
142int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages) 142int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
143{ 143{
144 unsigned long text; 144 unsigned long text;
145 struct page *page; 145 struct page *page;
@@ -192,7 +192,7 @@ int efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
192 return 0; 192 return 0;
193} 193}
194 194
195void efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages) 195void __init efi_cleanup_page_tables(unsigned long pa_memmap, unsigned num_pages)
196{ 196{
197 pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd); 197 pgd_t *pgd = (pgd_t *)__va(real_mode_header->trampoline_pgd);
198 198
diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
index fbe66e626c09..040192b50d02 100644
--- a/arch/x86/platform/efi/efi_stub_32.S
+++ b/arch/x86/platform/efi/efi_stub_32.S
@@ -27,13 +27,13 @@ ENTRY(efi_call_phys)
27 * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found 27 * set to 0x0010, DS and SS have been set to 0x0018. In EFI, I found
28 * the values of these registers are the same. And, the corresponding 28 * the values of these registers are the same. And, the corresponding
29 * GDT entries are identical. So I will do nothing about segment reg 29 * GDT entries are identical. So I will do nothing about segment reg
30 * and GDT, but change GDT base register in prelog and epilog. 30 * and GDT, but change GDT base register in prolog and epilog.
31 */ 31 */
32 32
33 /* 33 /*
34 * 1. Now I am running with EIP = <physical address> + PAGE_OFFSET. 34 * 1. Now I am running with EIP = <physical address> + PAGE_OFFSET.
35 * But to make it smoothly switch from virtual mode to flat mode. 35 * But to make it smoothly switch from virtual mode to flat mode.
36 * The mapping of lower virtual memory has been created in prelog and 36 * The mapping of lower virtual memory has been created in prolog and
37 * epilog. 37 * epilog.
38 */ 38 */
39 movl $1f, %edx 39 movl $1f, %edx
diff --git a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
index 46aa25c8ce06..3c1c3866d82b 100644
--- a/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
+++ b/arch/x86/platform/intel-mid/intel_mid_weak_decls.h
@@ -10,10 +10,9 @@
10 */ 10 */
11 11
12 12
13/* __attribute__((weak)) makes these declarations overridable */
14/* For every CPU addition a new get_<cpuname>_ops interface needs 13/* For every CPU addition a new get_<cpuname>_ops interface needs
15 * to be added. 14 * to be added.
16 */ 15 */
17extern void *get_penwell_ops(void) __attribute__((weak)); 16extern void *get_penwell_ops(void);
18extern void *get_cloverview_ops(void) __attribute__((weak)); 17extern void *get_cloverview_ops(void);
19extern void *get_tangier_ops(void) __attribute__((weak)); 18extern void *get_tangier_ops(void);
diff --git a/arch/x86/platform/intel-mid/sfi.c b/arch/x86/platform/intel-mid/sfi.c
index 3c53a90fdb18..c14ad34776c4 100644
--- a/arch/x86/platform/intel-mid/sfi.c
+++ b/arch/x86/platform/intel-mid/sfi.c
@@ -106,6 +106,7 @@ int __init sfi_parse_mtmr(struct sfi_table_header *table)
106 mp_irq.dstapic = MP_APIC_ALL; 106 mp_irq.dstapic = MP_APIC_ALL;
107 mp_irq.dstirq = pentry->irq; 107 mp_irq.dstirq = pentry->irq;
108 mp_save_irq(&mp_irq); 108 mp_save_irq(&mp_irq);
109 mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC);
109 } 110 }
110 111
111 return 0; 112 return 0;
@@ -176,6 +177,7 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table)
176 mp_irq.dstapic = MP_APIC_ALL; 177 mp_irq.dstapic = MP_APIC_ALL;
177 mp_irq.dstirq = pentry->irq; 178 mp_irq.dstirq = pentry->irq;
178 mp_save_irq(&mp_irq); 179 mp_save_irq(&mp_irq);
180 mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC);
179 } 181 }
180 return 0; 182 return 0;
181} 183}
diff --git a/arch/x86/tools/calc_run_size.pl b/arch/x86/tools/calc_run_size.pl
new file mode 100644
index 000000000000..23210baade2d
--- /dev/null
+++ b/arch/x86/tools/calc_run_size.pl
@@ -0,0 +1,39 @@
1#!/usr/bin/perl
2#
3# Calculate the amount of space needed to run the kernel, including room for
4# the .bss and .brk sections.
5#
6# Usage:
7# objdump -h a.out | perl calc_run_size.pl
8use strict;
9
10my $mem_size = 0;
11my $file_offset = 0;
12
13my $sections=" *[0-9]+ \.(?:bss|brk) +";
14while (<>) {
15 if (/^$sections([0-9a-f]+) +(?:[0-9a-f]+ +){2}([0-9a-f]+)/) {
16 my $size = hex($1);
17 my $offset = hex($2);
18 $mem_size += $size;
19 if ($file_offset == 0) {
20 $file_offset = $offset;
21 } elsif ($file_offset != $offset) {
22 # BFD linker shows the same file offset in ELF.
23 # Gold linker shows them as consecutive.
24 next if ($file_offset + $mem_size == $offset + $size);
25
26 printf STDERR "file_offset: 0x%lx\n", $file_offset;
27 printf STDERR "mem_size: 0x%lx\n", $mem_size;
28 printf STDERR "offset: 0x%lx\n", $offset;
29 printf STDERR "size: 0x%lx\n", $size;
30
31 die ".bss and .brk are non-contiguous\n";
32 }
33 }
34}
35
36if ($file_offset == 0) {
37 die "Never found .bss or .brk file offset\n";
38}
39printf("%d\n", $mem_size + $file_offset);
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 1a3f0445432a..fac5e4f9607c 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -1636,9 +1636,6 @@ asmlinkage __visible void __init xen_start_kernel(void)
1636 xen_raw_console_write("mapping kernel into physical memory\n"); 1636 xen_raw_console_write("mapping kernel into physical memory\n");
1637 xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base, xen_start_info->nr_pages); 1637 xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base, xen_start_info->nr_pages);
1638 1638
1639 /* Allocate and initialize top and mid mfn levels for p2m structure */
1640 xen_build_mfn_list_list();
1641
1642 /* keep using Xen gdt for now; no urgent need to change it */ 1639 /* keep using Xen gdt for now; no urgent need to change it */
1643 1640
1644#ifdef CONFIG_X86_32 1641#ifdef CONFIG_X86_32
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index f62af7647ec9..a8a1a3d08d4d 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1217,10 +1217,13 @@ static void __init xen_pagetable_p2m_copy(void)
1217static void __init xen_pagetable_init(void) 1217static void __init xen_pagetable_init(void)
1218{ 1218{
1219 paging_init(); 1219 paging_init();
1220 xen_setup_shared_info();
1221#ifdef CONFIG_X86_64 1220#ifdef CONFIG_X86_64
1222 xen_pagetable_p2m_copy(); 1221 xen_pagetable_p2m_copy();
1223#endif 1222#endif
1223 /* Allocate and initialize top and mid mfn levels for p2m structure */
1224 xen_build_mfn_list_list();
1225
1226 xen_setup_shared_info();
1224 xen_post_allocator_init(); 1227 xen_post_allocator_init();
1225} 1228}
1226static void xen_write_cr2(unsigned long cr2) 1229static void xen_write_cr2(unsigned long cr2)
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 9f5983b01ed9..b456b048eca9 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -163,6 +163,7 @@
163#include <linux/hash.h> 163#include <linux/hash.h>
164#include <linux/sched.h> 164#include <linux/sched.h>
165#include <linux/seq_file.h> 165#include <linux/seq_file.h>
166#include <linux/bootmem.h>
166 167
167#include <asm/cache.h> 168#include <asm/cache.h>
168#include <asm/setup.h> 169#include <asm/setup.h>
@@ -181,21 +182,20 @@ static void __init m2p_override_init(void);
181 182
182unsigned long xen_max_p2m_pfn __read_mostly; 183unsigned long xen_max_p2m_pfn __read_mostly;
183 184
185static unsigned long *p2m_mid_missing_mfn;
186static unsigned long *p2m_top_mfn;
187static unsigned long **p2m_top_mfn_p;
188
184/* Placeholders for holes in the address space */ 189/* Placeholders for holes in the address space */
185static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE); 190static RESERVE_BRK_ARRAY(unsigned long, p2m_missing, P2M_PER_PAGE);
186static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE); 191static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_missing, P2M_MID_PER_PAGE);
187static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_missing_mfn, P2M_MID_PER_PAGE);
188 192
189static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE); 193static RESERVE_BRK_ARRAY(unsigned long **, p2m_top, P2M_TOP_PER_PAGE);
190static RESERVE_BRK_ARRAY(unsigned long, p2m_top_mfn, P2M_TOP_PER_PAGE);
191static RESERVE_BRK_ARRAY(unsigned long *, p2m_top_mfn_p, P2M_TOP_PER_PAGE);
192 194
193static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE); 195static RESERVE_BRK_ARRAY(unsigned long, p2m_identity, P2M_PER_PAGE);
194static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_identity, P2M_MID_PER_PAGE); 196static RESERVE_BRK_ARRAY(unsigned long *, p2m_mid_identity, P2M_MID_PER_PAGE);
195static RESERVE_BRK_ARRAY(unsigned long, p2m_mid_identity_mfn, P2M_MID_PER_PAGE);
196 197
197RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE))); 198RESERVE_BRK(p2m_mid, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
198RESERVE_BRK(p2m_mid_mfn, PAGE_SIZE * (MAX_DOMAIN_PAGES / (P2M_PER_PAGE * P2M_MID_PER_PAGE)));
199 199
200/* For each I/O range remapped we may lose up to two leaf pages for the boundary 200/* For each I/O range remapped we may lose up to two leaf pages for the boundary
201 * violations and three mid pages to cover up to 3GB. With 201 * violations and three mid pages to cover up to 3GB. With
@@ -272,11 +272,11 @@ static void p2m_init(unsigned long *p2m)
272 * Build the parallel p2m_top_mfn and p2m_mid_mfn structures 272 * Build the parallel p2m_top_mfn and p2m_mid_mfn structures
273 * 273 *
274 * This is called both at boot time, and after resuming from suspend: 274 * This is called both at boot time, and after resuming from suspend:
275 * - At boot time we're called very early, and must use extend_brk() 275 * - At boot time we're called rather early, and must use alloc_bootmem*()
276 * to allocate memory. 276 * to allocate memory.
277 * 277 *
278 * - After resume we're called from within stop_machine, but the mfn 278 * - After resume we're called from within stop_machine, but the mfn
279 * tree should alreay be completely allocated. 279 * tree should already be completely allocated.
280 */ 280 */
281void __ref xen_build_mfn_list_list(void) 281void __ref xen_build_mfn_list_list(void)
282{ 282{
@@ -287,20 +287,17 @@ void __ref xen_build_mfn_list_list(void)
287 287
288 /* Pre-initialize p2m_top_mfn to be completely missing */ 288 /* Pre-initialize p2m_top_mfn to be completely missing */
289 if (p2m_top_mfn == NULL) { 289 if (p2m_top_mfn == NULL) {
290 p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); 290 p2m_mid_missing_mfn = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
291 p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing); 291 p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
292 p2m_mid_identity_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE);
293 p2m_mid_mfn_init(p2m_mid_identity_mfn, p2m_identity);
294 292
295 p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); 293 p2m_top_mfn_p = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
296 p2m_top_mfn_p_init(p2m_top_mfn_p); 294 p2m_top_mfn_p_init(p2m_top_mfn_p);
297 295
298 p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); 296 p2m_top_mfn = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
299 p2m_top_mfn_init(p2m_top_mfn); 297 p2m_top_mfn_init(p2m_top_mfn);
300 } else { 298 } else {
301 /* Reinitialise, mfn's all change after migration */ 299 /* Reinitialise, mfn's all change after migration */
302 p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing); 300 p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing);
303 p2m_mid_mfn_init(p2m_mid_identity_mfn, p2m_identity);
304 } 301 }
305 302
306 for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) { 303 for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) {
@@ -328,10 +325,9 @@ void __ref xen_build_mfn_list_list(void)
328 /* 325 /*
329 * XXX boot-time only! We should never find 326 * XXX boot-time only! We should never find
330 * missing parts of the mfn tree after 327 * missing parts of the mfn tree after
331 * runtime. extend_brk() will BUG if we call 328 * runtime.
332 * it too late.
333 */ 329 */
334 mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); 330 mid_mfn_p = alloc_bootmem_align(PAGE_SIZE, PAGE_SIZE);
335 p2m_mid_mfn_init(mid_mfn_p, p2m_missing); 331 p2m_mid_mfn_init(mid_mfn_p, p2m_missing);
336 332
337 p2m_top_mfn_p[topidx] = mid_mfn_p; 333 p2m_top_mfn_p[topidx] = mid_mfn_p;
@@ -415,7 +411,6 @@ void __init xen_build_dynamic_phys_to_machine(void)
415 m2p_override_init(); 411 m2p_override_init();
416} 412}
417#ifdef CONFIG_X86_64 413#ifdef CONFIG_X86_64
418#include <linux/bootmem.h>
419unsigned long __init xen_revector_p2m_tree(void) 414unsigned long __init xen_revector_p2m_tree(void)
420{ 415{
421 unsigned long va_start; 416 unsigned long va_start;
@@ -477,7 +472,6 @@ unsigned long __init xen_revector_p2m_tree(void)
477 472
478 copy_page(new, mid_p); 473 copy_page(new, mid_p);
479 p2m_top[topidx][mididx] = &mfn_list[pfn_free]; 474 p2m_top[topidx][mididx] = &mfn_list[pfn_free];
480 p2m_top_mfn_p[topidx][mididx] = virt_to_mfn(&mfn_list[pfn_free]);
481 475
482 pfn_free += P2M_PER_PAGE; 476 pfn_free += P2M_PER_PAGE;
483 477
@@ -538,12 +532,13 @@ static bool alloc_p2m(unsigned long pfn)
538 unsigned topidx, mididx; 532 unsigned topidx, mididx;
539 unsigned long ***top_p, **mid; 533 unsigned long ***top_p, **mid;
540 unsigned long *top_mfn_p, *mid_mfn; 534 unsigned long *top_mfn_p, *mid_mfn;
535 unsigned long *p2m_orig;
541 536
542 topidx = p2m_top_index(pfn); 537 topidx = p2m_top_index(pfn);
543 mididx = p2m_mid_index(pfn); 538 mididx = p2m_mid_index(pfn);
544 539
545 top_p = &p2m_top[topidx]; 540 top_p = &p2m_top[topidx];
546 mid = *top_p; 541 mid = ACCESS_ONCE(*top_p);
547 542
548 if (mid == p2m_mid_missing) { 543 if (mid == p2m_mid_missing) {
549 /* Mid level is missing, allocate a new one */ 544 /* Mid level is missing, allocate a new one */
@@ -558,7 +553,7 @@ static bool alloc_p2m(unsigned long pfn)
558 } 553 }
559 554
560 top_mfn_p = &p2m_top_mfn[topidx]; 555 top_mfn_p = &p2m_top_mfn[topidx];
561 mid_mfn = p2m_top_mfn_p[topidx]; 556 mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]);
562 557
563 BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p); 558 BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p);
564 559
@@ -566,6 +561,7 @@ static bool alloc_p2m(unsigned long pfn)
566 /* Separately check the mid mfn level */ 561 /* Separately check the mid mfn level */
567 unsigned long missing_mfn; 562 unsigned long missing_mfn;
568 unsigned long mid_mfn_mfn; 563 unsigned long mid_mfn_mfn;
564 unsigned long old_mfn;
569 565
570 mid_mfn = alloc_p2m_page(); 566 mid_mfn = alloc_p2m_page();
571 if (!mid_mfn) 567 if (!mid_mfn)
@@ -575,17 +571,19 @@ static bool alloc_p2m(unsigned long pfn)
575 571
576 missing_mfn = virt_to_mfn(p2m_mid_missing_mfn); 572 missing_mfn = virt_to_mfn(p2m_mid_missing_mfn);
577 mid_mfn_mfn = virt_to_mfn(mid_mfn); 573 mid_mfn_mfn = virt_to_mfn(mid_mfn);
578 if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn) 574 old_mfn = cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn);
575 if (old_mfn != missing_mfn) {
579 free_p2m_page(mid_mfn); 576 free_p2m_page(mid_mfn);
580 else 577 mid_mfn = mfn_to_virt(old_mfn);
578 } else {
581 p2m_top_mfn_p[topidx] = mid_mfn; 579 p2m_top_mfn_p[topidx] = mid_mfn;
580 }
582 } 581 }
583 582
584 if (p2m_top[topidx][mididx] == p2m_identity || 583 p2m_orig = ACCESS_ONCE(p2m_top[topidx][mididx]);
585 p2m_top[topidx][mididx] == p2m_missing) { 584 if (p2m_orig == p2m_identity || p2m_orig == p2m_missing) {
586 /* p2m leaf page is missing */ 585 /* p2m leaf page is missing */
587 unsigned long *p2m; 586 unsigned long *p2m;
588 unsigned long *p2m_orig = p2m_top[topidx][mididx];
589 587
590 p2m = alloc_p2m_page(); 588 p2m = alloc_p2m_page();
591 if (!p2m) 589 if (!p2m)
@@ -606,7 +604,6 @@ static bool __init early_alloc_p2m(unsigned long pfn, bool check_boundary)
606{ 604{
607 unsigned topidx, mididx, idx; 605 unsigned topidx, mididx, idx;
608 unsigned long *p2m; 606 unsigned long *p2m;
609 unsigned long *mid_mfn_p;
610 607
611 topidx = p2m_top_index(pfn); 608 topidx = p2m_top_index(pfn);
612 mididx = p2m_mid_index(pfn); 609 mididx = p2m_mid_index(pfn);
@@ -633,43 +630,21 @@ static bool __init early_alloc_p2m(unsigned long pfn, bool check_boundary)
633 630
634 p2m_top[topidx][mididx] = p2m; 631 p2m_top[topidx][mididx] = p2m;
635 632
636 /* For save/restore we need to MFN of the P2M saved */
637
638 mid_mfn_p = p2m_top_mfn_p[topidx];
639 WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
640 "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
641 topidx, mididx);
642 mid_mfn_p[mididx] = virt_to_mfn(p2m);
643
644 return true; 633 return true;
645} 634}
646 635
647static bool __init early_alloc_p2m_middle(unsigned long pfn) 636static bool __init early_alloc_p2m_middle(unsigned long pfn)
648{ 637{
649 unsigned topidx = p2m_top_index(pfn); 638 unsigned topidx = p2m_top_index(pfn);
650 unsigned long *mid_mfn_p;
651 unsigned long **mid; 639 unsigned long **mid;
652 640
653 mid = p2m_top[topidx]; 641 mid = p2m_top[topidx];
654 mid_mfn_p = p2m_top_mfn_p[topidx];
655 if (mid == p2m_mid_missing) { 642 if (mid == p2m_mid_missing) {
656 mid = extend_brk(PAGE_SIZE, PAGE_SIZE); 643 mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
657 644
658 p2m_mid_init(mid, p2m_missing); 645 p2m_mid_init(mid, p2m_missing);
659 646
660 p2m_top[topidx] = mid; 647 p2m_top[topidx] = mid;
661
662 BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
663 }
664 /* And the save/restore P2M tables.. */
665 if (mid_mfn_p == p2m_mid_missing_mfn) {
666 mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
667 p2m_mid_mfn_init(mid_mfn_p, p2m_missing);
668
669 p2m_top_mfn_p[topidx] = mid_mfn_p;
670 p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
671 /* Note: we don't set mid_mfn_p[midix] here,
672 * look in early_alloc_p2m() */
673 } 648 }
674 return true; 649 return true;
675} 650}
@@ -680,14 +655,13 @@ static bool __init early_alloc_p2m_middle(unsigned long pfn)
680 * replace the P2M leaf with a p2m_missing or p2m_identity. 655 * replace the P2M leaf with a p2m_missing or p2m_identity.
681 * Stick the old page in the new P2M tree location. 656 * Stick the old page in the new P2M tree location.
682 */ 657 */
683bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_mfn) 658static bool __init early_can_reuse_p2m_middle(unsigned long set_pfn)
684{ 659{
685 unsigned topidx; 660 unsigned topidx;
686 unsigned mididx; 661 unsigned mididx;
687 unsigned ident_pfns; 662 unsigned ident_pfns;
688 unsigned inv_pfns; 663 unsigned inv_pfns;
689 unsigned long *p2m; 664 unsigned long *p2m;
690 unsigned long *mid_mfn_p;
691 unsigned idx; 665 unsigned idx;
692 unsigned long pfn; 666 unsigned long pfn;
693 667
@@ -733,11 +707,6 @@ bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_
733found: 707found:
734 /* Found one, replace old with p2m_identity or p2m_missing */ 708 /* Found one, replace old with p2m_identity or p2m_missing */
735 p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing); 709 p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing);
736 /* And the other for save/restore.. */
737 mid_mfn_p = p2m_top_mfn_p[topidx];
738 /* NOTE: Even if it is a p2m_identity it should still be point to
739 * a page filled with INVALID_P2M_ENTRY entries. */
740 mid_mfn_p[mididx] = virt_to_mfn(p2m_missing);
741 710
742 /* Reset where we want to stick the old page in. */ 711 /* Reset where we want to stick the old page in. */
743 topidx = p2m_top_index(set_pfn); 712 topidx = p2m_top_index(set_pfn);
@@ -752,8 +721,6 @@ found:
752 721
753 p2m_init(p2m); 722 p2m_init(p2m);
754 p2m_top[topidx][mididx] = p2m; 723 p2m_top[topidx][mididx] = p2m;
755 mid_mfn_p = p2m_top_mfn_p[topidx];
756 mid_mfn_p[mididx] = virt_to_mfn(p2m);
757 724
758 return true; 725 return true;
759} 726}
@@ -763,7 +730,7 @@ bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
763 if (!early_alloc_p2m_middle(pfn)) 730 if (!early_alloc_p2m_middle(pfn))
764 return false; 731 return false;
765 732
766 if (early_can_reuse_p2m_middle(pfn, mfn)) 733 if (early_can_reuse_p2m_middle(pfn))
767 return __set_phys_to_machine(pfn, mfn); 734 return __set_phys_to_machine(pfn, mfn);
768 735
769 if (!early_alloc_p2m(pfn, false /* boundary crossover OK!*/)) 736 if (!early_alloc_p2m(pfn, false /* boundary crossover OK!*/))
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index af7216128d93..29834b3fd87f 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -595,6 +595,7 @@ char * __init xen_memory_setup(void)
595 rc = 0; 595 rc = 0;
596 } 596 }
597 BUG_ON(rc); 597 BUG_ON(rc);
598 BUG_ON(memmap.nr_entries == 0);
598 599
599 /* 600 /*
600 * Xen won't allow a 1:1 mapping to be created to UNUSABLE 601 * Xen won't allow a 1:1 mapping to be created to UNUSABLE
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 8650cdb53209..4c071aeb8417 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -510,6 +510,9 @@ static void xen_cpu_die(unsigned int cpu)
510 current->state = TASK_UNINTERRUPTIBLE; 510 current->state = TASK_UNINTERRUPTIBLE;
511 schedule_timeout(HZ/10); 511 schedule_timeout(HZ/10);
512 } 512 }
513
514 cpu_die_common(cpu);
515
513 xen_smp_intr_free(cpu); 516 xen_smp_intr_free(cpu);
514 xen_uninit_lock_cpu(cpu); 517 xen_uninit_lock_cpu(cpu);
515 xen_teardown_timer(cpu); 518 xen_teardown_timer(cpu);
diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c
index a1d430b112b3..f473d268d387 100644
--- a/arch/x86/xen/time.c
+++ b/arch/x86/xen/time.c
@@ -158,7 +158,7 @@ cycle_t xen_clocksource_read(void)
158 cycle_t ret; 158 cycle_t ret;
159 159
160 preempt_disable_notrace(); 160 preempt_disable_notrace();
161 src = this_cpu_ptr(&xen_vcpu->time); 161 src = &__this_cpu_read(xen_vcpu)->time;
162 ret = pvclock_clocksource_read(src); 162 ret = pvclock_clocksource_read(src);
163 preempt_enable_notrace(); 163 preempt_enable_notrace();
164 return ret; 164 return ret;
diff --git a/arch/xtensa/Kconfig b/arch/xtensa/Kconfig
index 49c6c3d94449..81f57e8c8f1b 100644
--- a/arch/xtensa/Kconfig
+++ b/arch/xtensa/Kconfig
@@ -319,8 +319,8 @@ config XTENSA_PLATFORM_S6105
319 319
320config XTENSA_PLATFORM_XTFPGA 320config XTENSA_PLATFORM_XTFPGA
321 bool "XTFPGA" 321 bool "XTFPGA"
322 select ETHOC if ETHERNET
322 select SERIAL_CONSOLE 323 select SERIAL_CONSOLE
323 select ETHOC
324 select XTENSA_CALIBRATE_CCOUNT 324 select XTENSA_CALIBRATE_CCOUNT
325 help 325 help
326 XTFPGA is the name of Tensilica board family (LX60, LX110, LX200, ML605). 326 XTFPGA is the name of Tensilica board family (LX60, LX110, LX200, ML605).
@@ -367,7 +367,7 @@ config BUILTIN_DTB
367config BLK_DEV_SIMDISK 367config BLK_DEV_SIMDISK
368 tristate "Host file-based simulated block device support" 368 tristate "Host file-based simulated block device support"
369 default n 369 default n
370 depends on XTENSA_PLATFORM_ISS 370 depends on XTENSA_PLATFORM_ISS && BLOCK
371 help 371 help
372 Create block devices that map to files in the host file system. 372 Create block devices that map to files in the host file system.
373 Device binding to host file may be changed at runtime via proc 373 Device binding to host file may be changed at runtime via proc
diff --git a/arch/xtensa/boot/dts/lx200mx.dts b/arch/xtensa/boot/dts/lx200mx.dts
new file mode 100644
index 000000000000..249822b99bd6
--- /dev/null
+++ b/arch/xtensa/boot/dts/lx200mx.dts
@@ -0,0 +1,16 @@
1/dts-v1/;
2/include/ "xtfpga.dtsi"
3/include/ "xtfpga-flash-16m.dtsi"
4
5/ {
6 compatible = "cdns,xtensa-lx200";
7 memory@0 {
8 device_type = "memory";
9 reg = <0x00000000 0x06000000>;
10 };
11 pic: pic {
12 compatible = "cdns,xtensa-mx";
13 #interrupt-cells = <2>;
14 interrupt-controller;
15 };
16};
diff --git a/arch/xtensa/configs/generic_kc705_defconfig b/arch/xtensa/configs/generic_kc705_defconfig
new file mode 100644
index 000000000000..f4b7b3888da8
--- /dev/null
+++ b/arch/xtensa/configs/generic_kc705_defconfig
@@ -0,0 +1,131 @@
1CONFIG_SYSVIPC=y
2CONFIG_POSIX_MQUEUE=y
3CONFIG_FHANDLE=y
4CONFIG_IRQ_DOMAIN_DEBUG=y
5CONFIG_NO_HZ_IDLE=y
6CONFIG_HIGH_RES_TIMERS=y
7CONFIG_IRQ_TIME_ACCOUNTING=y
8CONFIG_BSD_PROCESS_ACCT=y
9CONFIG_CGROUP_DEBUG=y
10CONFIG_CGROUP_FREEZER=y
11CONFIG_CGROUP_DEVICE=y
12CONFIG_CPUSETS=y
13CONFIG_CGROUP_CPUACCT=y
14CONFIG_RESOURCE_COUNTERS=y
15CONFIG_MEMCG=y
16CONFIG_NAMESPACES=y
17CONFIG_SCHED_AUTOGROUP=y
18CONFIG_RELAY=y
19CONFIG_BLK_DEV_INITRD=y
20CONFIG_EXPERT=y
21CONFIG_SYSCTL_SYSCALL=y
22CONFIG_KALLSYMS_ALL=y
23CONFIG_PROFILING=y
24CONFIG_OPROFILE=y
25CONFIG_MODULES=y
26CONFIG_MODULE_UNLOAD=y
27# CONFIG_IOSCHED_DEADLINE is not set
28# CONFIG_IOSCHED_CFQ is not set
29CONFIG_XTENSA_VARIANT_DC233C=y
30CONFIG_XTENSA_UNALIGNED_USER=y
31CONFIG_PREEMPT=y
32CONFIG_HIGHMEM=y
33# CONFIG_PCI is not set
34CONFIG_XTENSA_PLATFORM_XTFPGA=y
35CONFIG_CMDLINE_BOOL=y
36CONFIG_CMDLINE="earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug"
37CONFIG_USE_OF=y
38CONFIG_BUILTIN_DTB="kc705"
39# CONFIG_COMPACTION is not set
40# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
41CONFIG_NET=y
42CONFIG_PACKET=y
43CONFIG_UNIX=y
44CONFIG_INET=y
45CONFIG_IP_MULTICAST=y
46CONFIG_IP_PNP=y
47CONFIG_IP_PNP_DHCP=y
48CONFIG_IP_PNP_BOOTP=y
49CONFIG_IP_PNP_RARP=y
50# CONFIG_IPV6 is not set
51CONFIG_NETFILTER=y
52# CONFIG_WIRELESS is not set
53CONFIG_DEVTMPFS=y
54CONFIG_DEVTMPFS_MOUNT=y
55# CONFIG_STANDALONE is not set
56CONFIG_MTD=y
57CONFIG_MTD_CFI=y
58CONFIG_MTD_JEDECPROBE=y
59CONFIG_MTD_CFI_INTELEXT=y
60CONFIG_MTD_CFI_AMDSTD=y
61CONFIG_MTD_CFI_STAA=y
62CONFIG_MTD_PHYSMAP_OF=y
63CONFIG_MTD_UBI=y
64CONFIG_BLK_DEV_LOOP=y
65CONFIG_BLK_DEV_RAM=y
66CONFIG_SCSI=y
67CONFIG_BLK_DEV_SD=y
68CONFIG_NETDEVICES=y
69# CONFIG_NET_VENDOR_ARC is not set
70# CONFIG_NET_VENDOR_BROADCOM is not set
71# CONFIG_NET_VENDOR_INTEL is not set
72# CONFIG_NET_VENDOR_MARVELL is not set
73# CONFIG_NET_VENDOR_MICREL is not set
74# CONFIG_NET_VENDOR_NATSEMI is not set
75# CONFIG_NET_VENDOR_SAMSUNG is not set
76# CONFIG_NET_VENDOR_SEEQ is not set
77# CONFIG_NET_VENDOR_SMSC is not set
78# CONFIG_NET_VENDOR_STMICRO is not set
79# CONFIG_NET_VENDOR_VIA is not set
80# CONFIG_NET_VENDOR_WIZNET is not set
81CONFIG_MARVELL_PHY=y
82# CONFIG_WLAN is not set
83# CONFIG_INPUT_MOUSEDEV is not set
84# CONFIG_INPUT_KEYBOARD is not set
85# CONFIG_INPUT_MOUSE is not set
86# CONFIG_SERIO is not set
87CONFIG_SERIAL_8250=y
88# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
89CONFIG_SERIAL_8250_CONSOLE=y
90CONFIG_SERIAL_OF_PLATFORM=y
91CONFIG_HW_RANDOM=y
92# CONFIG_HWMON is not set
93CONFIG_WATCHDOG=y
94CONFIG_WATCHDOG_NOWAYOUT=y
95CONFIG_SOFT_WATCHDOG=y
96# CONFIG_VGA_CONSOLE is not set
97# CONFIG_USB_SUPPORT is not set
98# CONFIG_IOMMU_SUPPORT is not set
99CONFIG_EXT3_FS=y
100CONFIG_EXT4_FS=y
101CONFIG_FANOTIFY=y
102CONFIG_VFAT_FS=y
103CONFIG_PROC_KCORE=y
104CONFIG_TMPFS=y
105CONFIG_TMPFS_POSIX_ACL=y
106CONFIG_UBIFS_FS=y
107CONFIG_NFS_FS=y
108CONFIG_NFS_V4=y
109CONFIG_NFS_SWAP=y
110CONFIG_ROOT_NFS=y
111CONFIG_SUNRPC_DEBUG=y
112CONFIG_NLS_CODEPAGE_437=y
113CONFIG_NLS_ISO8859_1=y
114CONFIG_PRINTK_TIME=y
115CONFIG_DYNAMIC_DEBUG=y
116CONFIG_DEBUG_INFO=y
117CONFIG_MAGIC_SYSRQ=y
118CONFIG_LOCKUP_DETECTOR=y
119# CONFIG_SCHED_DEBUG is not set
120CONFIG_SCHEDSTATS=y
121CONFIG_TIMER_STATS=y
122CONFIG_DEBUG_RT_MUTEXES=y
123CONFIG_DEBUG_SPINLOCK=y
124CONFIG_DEBUG_MUTEXES=y
125CONFIG_DEBUG_ATOMIC_SLEEP=y
126CONFIG_STACKTRACE=y
127CONFIG_RCU_TRACE=y
128# CONFIG_FTRACE is not set
129CONFIG_LD_NO_RELAX=y
130# CONFIG_S32C1I_SELFTEST is not set
131CONFIG_CRYPTO_ANSI_CPRNG=y
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
new file mode 100644
index 000000000000..22eeacba37cc
--- /dev/null
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -0,0 +1,135 @@
1CONFIG_SYSVIPC=y
2CONFIG_POSIX_MQUEUE=y
3CONFIG_FHANDLE=y
4CONFIG_IRQ_DOMAIN_DEBUG=y
5CONFIG_NO_HZ_IDLE=y
6CONFIG_HIGH_RES_TIMERS=y
7CONFIG_IRQ_TIME_ACCOUNTING=y
8CONFIG_BSD_PROCESS_ACCT=y
9CONFIG_CGROUP_DEBUG=y
10CONFIG_CGROUP_FREEZER=y
11CONFIG_CGROUP_DEVICE=y
12CONFIG_CPUSETS=y
13CONFIG_CGROUP_CPUACCT=y
14CONFIG_RESOURCE_COUNTERS=y
15CONFIG_MEMCG=y
16CONFIG_NAMESPACES=y
17CONFIG_SCHED_AUTOGROUP=y
18CONFIG_RELAY=y
19CONFIG_BLK_DEV_INITRD=y
20CONFIG_EXPERT=y
21CONFIG_SYSCTL_SYSCALL=y
22CONFIG_KALLSYMS_ALL=y
23CONFIG_PROFILING=y
24CONFIG_OPROFILE=y
25CONFIG_MODULES=y
26CONFIG_MODULE_UNLOAD=y
27# CONFIG_IOSCHED_DEADLINE is not set
28# CONFIG_IOSCHED_CFQ is not set
29CONFIG_XTENSA_VARIANT_CUSTOM=y
30CONFIG_XTENSA_VARIANT_CUSTOM_NAME="test_mmuhifi_c3"
31CONFIG_XTENSA_UNALIGNED_USER=y
32CONFIG_PREEMPT=y
33CONFIG_HAVE_SMP=y
34CONFIG_SMP=y
35CONFIG_HOTPLUG_CPU=y
36# CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
37# CONFIG_PCI is not set
38CONFIG_XTENSA_PLATFORM_XTFPGA=y
39CONFIG_CMDLINE_BOOL=y
40CONFIG_CMDLINE="earlycon=uart8250,mmio32,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug"
41CONFIG_USE_OF=y
42CONFIG_BUILTIN_DTB="lx200mx"
43# CONFIG_COMPACTION is not set
44# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
45CONFIG_NET=y
46CONFIG_PACKET=y
47CONFIG_UNIX=y
48CONFIG_INET=y
49CONFIG_IP_MULTICAST=y
50CONFIG_IP_PNP=y
51CONFIG_IP_PNP_DHCP=y
52CONFIG_IP_PNP_BOOTP=y
53CONFIG_IP_PNP_RARP=y
54# CONFIG_IPV6 is not set
55CONFIG_NETFILTER=y
56# CONFIG_WIRELESS is not set
57CONFIG_DEVTMPFS=y
58CONFIG_DEVTMPFS_MOUNT=y
59# CONFIG_STANDALONE is not set
60CONFIG_MTD=y
61CONFIG_MTD_CFI=y
62CONFIG_MTD_JEDECPROBE=y
63CONFIG_MTD_CFI_INTELEXT=y
64CONFIG_MTD_CFI_AMDSTD=y
65CONFIG_MTD_CFI_STAA=y
66CONFIG_MTD_PHYSMAP_OF=y
67CONFIG_MTD_UBI=y
68CONFIG_BLK_DEV_LOOP=y
69CONFIG_BLK_DEV_RAM=y
70CONFIG_SCSI=y
71CONFIG_BLK_DEV_SD=y
72CONFIG_NETDEVICES=y
73# CONFIG_NET_VENDOR_ARC is not set
74# CONFIG_NET_VENDOR_BROADCOM is not set
75# CONFIG_NET_VENDOR_INTEL is not set
76# CONFIG_NET_VENDOR_MARVELL is not set
77# CONFIG_NET_VENDOR_MICREL is not set
78# CONFIG_NET_VENDOR_NATSEMI is not set
79# CONFIG_NET_VENDOR_SAMSUNG is not set
80# CONFIG_NET_VENDOR_SEEQ is not set
81# CONFIG_NET_VENDOR_SMSC is not set
82# CONFIG_NET_VENDOR_STMICRO is not set
83# CONFIG_NET_VENDOR_VIA is not set
84# CONFIG_NET_VENDOR_WIZNET is not set
85CONFIG_MARVELL_PHY=y
86# CONFIG_WLAN is not set
87# CONFIG_INPUT_MOUSEDEV is not set
88# CONFIG_INPUT_KEYBOARD is not set
89# CONFIG_INPUT_MOUSE is not set
90# CONFIG_SERIO is not set
91CONFIG_SERIAL_8250=y
92# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
93CONFIG_SERIAL_8250_CONSOLE=y
94CONFIG_SERIAL_OF_PLATFORM=y
95CONFIG_HW_RANDOM=y
96# CONFIG_HWMON is not set
97CONFIG_WATCHDOG=y
98CONFIG_WATCHDOG_NOWAYOUT=y
99CONFIG_SOFT_WATCHDOG=y
100# CONFIG_VGA_CONSOLE is not set
101# CONFIG_USB_SUPPORT is not set
102# CONFIG_IOMMU_SUPPORT is not set
103CONFIG_EXT3_FS=y
104CONFIG_EXT4_FS=y
105CONFIG_FANOTIFY=y
106CONFIG_VFAT_FS=y
107CONFIG_PROC_KCORE=y
108CONFIG_TMPFS=y
109CONFIG_TMPFS_POSIX_ACL=y
110CONFIG_UBIFS_FS=y
111CONFIG_NFS_FS=y
112CONFIG_NFS_V4=y
113CONFIG_NFS_SWAP=y
114CONFIG_ROOT_NFS=y
115CONFIG_SUNRPC_DEBUG=y
116CONFIG_NLS_CODEPAGE_437=y
117CONFIG_NLS_ISO8859_1=y
118CONFIG_PRINTK_TIME=y
119CONFIG_DYNAMIC_DEBUG=y
120CONFIG_DEBUG_INFO=y
121CONFIG_MAGIC_SYSRQ=y
122CONFIG_DEBUG_VM=y
123CONFIG_LOCKUP_DETECTOR=y
124CONFIG_SCHEDSTATS=y
125CONFIG_TIMER_STATS=y
126CONFIG_DEBUG_RT_MUTEXES=y
127CONFIG_DEBUG_SPINLOCK=y
128CONFIG_DEBUG_MUTEXES=y
129CONFIG_DEBUG_ATOMIC_SLEEP=y
130CONFIG_STACKTRACE=y
131CONFIG_RCU_TRACE=y
132# CONFIG_FTRACE is not set
133CONFIG_LD_NO_RELAX=y
134# CONFIG_S32C1I_SELFTEST is not set
135CONFIG_CRYPTO_ANSI_CPRNG=y
diff --git a/arch/xtensa/include/asm/pgtable.h b/arch/xtensa/include/asm/pgtable.h
index b2173e5da601..0383aed59121 100644
--- a/arch/xtensa/include/asm/pgtable.h
+++ b/arch/xtensa/include/asm/pgtable.h
@@ -277,6 +277,8 @@ static inline pte_t pte_mkwrite(pte_t pte)
277static inline pte_t pte_mkspecial(pte_t pte) 277static inline pte_t pte_mkspecial(pte_t pte)
278 { return pte; } 278 { return pte; }
279 279
280#define pgprot_noncached(prot) (__pgprot(pgprot_val(prot) & ~_PAGE_CA_MASK))
281
280/* 282/*
281 * Conversion functions: convert a page and protection to a page entry, 283 * Conversion functions: convert a page and protection to a page entry,
282 * and a page entry and page directory to the page they refer to. 284 * and a page entry and page directory to the page they refer to.
diff --git a/arch/xtensa/include/uapi/asm/unistd.h b/arch/xtensa/include/uapi/asm/unistd.h
index 8883fc877c5c..db5bb72e2f4e 100644
--- a/arch/xtensa/include/uapi/asm/unistd.h
+++ b/arch/xtensa/include/uapi/asm/unistd.h
@@ -384,7 +384,8 @@ __SYSCALL(174, sys_chroot, 1)
384#define __NR_pivot_root 175 384#define __NR_pivot_root 175
385__SYSCALL(175, sys_pivot_root, 2) 385__SYSCALL(175, sys_pivot_root, 2)
386#define __NR_umount 176 386#define __NR_umount 176
387__SYSCALL(176, sys_umount, 2) 387__SYSCALL(176, sys_oldumount, 1)
388#define __ARCH_WANT_SYS_OLDUMOUNT
388#define __NR_swapoff 177 389#define __NR_swapoff 177
389__SYSCALL(177, sys_swapoff, 1) 390__SYSCALL(177, sys_swapoff, 1)
390#define __NR_sync 178 391#define __NR_sync 178
@@ -742,7 +743,14 @@ __SYSCALL(335, sys_sched_getattr, 3)
742#define __NR_renameat2 336 743#define __NR_renameat2 336
743__SYSCALL(336, sys_renameat2, 5) 744__SYSCALL(336, sys_renameat2, 5)
744 745
745#define __NR_syscall_count 337 746#define __NR_seccomp 337
747__SYSCALL(337, sys_seccomp, 3)
748#define __NR_getrandom 338
749__SYSCALL(338, sys_getrandom, 3)
750#define __NR_memfd_create 339
751__SYSCALL(339, sys_memfd_create, 2)
752
753#define __NR_syscall_count 340
746 754
747/* 755/*
748 * sysxtensa syscall handler 756 * sysxtensa syscall handler
diff --git a/block/bio-integrity.c b/block/bio-integrity.c
index 0984232e429f..5cbd5d9ea61d 100644
--- a/block/bio-integrity.c
+++ b/block/bio-integrity.c
@@ -216,9 +216,10 @@ static int bio_integrity_process(struct bio *bio,
216{ 216{
217 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 217 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
218 struct blk_integrity_iter iter; 218 struct blk_integrity_iter iter;
219 struct bio_vec *bv; 219 struct bvec_iter bviter;
220 struct bio_vec bv;
220 struct bio_integrity_payload *bip = bio_integrity(bio); 221 struct bio_integrity_payload *bip = bio_integrity(bio);
221 unsigned int i, ret = 0; 222 unsigned int ret = 0;
222 void *prot_buf = page_address(bip->bip_vec->bv_page) + 223 void *prot_buf = page_address(bip->bip_vec->bv_page) +
223 bip->bip_vec->bv_offset; 224 bip->bip_vec->bv_offset;
224 225
@@ -227,11 +228,11 @@ static int bio_integrity_process(struct bio *bio,
227 iter.seed = bip_get_seed(bip); 228 iter.seed = bip_get_seed(bip);
228 iter.prot_buf = prot_buf; 229 iter.prot_buf = prot_buf;
229 230
230 bio_for_each_segment_all(bv, bio, i) { 231 bio_for_each_segment(bv, bio, bviter) {
231 void *kaddr = kmap_atomic(bv->bv_page); 232 void *kaddr = kmap_atomic(bv.bv_page);
232 233
233 iter.data_buf = kaddr + bv->bv_offset; 234 iter.data_buf = kaddr + bv.bv_offset;
234 iter.data_size = bv->bv_len; 235 iter.data_size = bv.bv_len;
235 236
236 ret = proc_fn(&iter); 237 ret = proc_fn(&iter);
237 if (ret) { 238 if (ret) {
diff --git a/block/blk-merge.c b/block/blk-merge.c
index ba99351c0f58..89b97b5e0881 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -97,18 +97,22 @@ void blk_recalc_rq_segments(struct request *rq)
97 97
98void blk_recount_segments(struct request_queue *q, struct bio *bio) 98void blk_recount_segments(struct request_queue *q, struct bio *bio)
99{ 99{
100 bool no_sg_merge = !!test_bit(QUEUE_FLAG_NO_SG_MERGE, 100 unsigned short seg_cnt;
101 &q->queue_flags); 101
102 /* estimate segment number by bi_vcnt for non-cloned bio */
103 if (bio_flagged(bio, BIO_CLONED))
104 seg_cnt = bio_segments(bio);
105 else
106 seg_cnt = bio->bi_vcnt;
102 107
103 if (no_sg_merge && !bio_flagged(bio, BIO_CLONED) && 108 if (test_bit(QUEUE_FLAG_NO_SG_MERGE, &q->queue_flags) &&
104 bio->bi_vcnt < queue_max_segments(q)) 109 (seg_cnt < queue_max_segments(q)))
105 bio->bi_phys_segments = bio->bi_vcnt; 110 bio->bi_phys_segments = seg_cnt;
106 else { 111 else {
107 struct bio *nxt = bio->bi_next; 112 struct bio *nxt = bio->bi_next;
108 113
109 bio->bi_next = NULL; 114 bio->bi_next = NULL;
110 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, 115 bio->bi_phys_segments = __blk_recalc_rq_segments(q, bio, false);
111 no_sg_merge);
112 bio->bi_next = nxt; 116 bio->bi_next = nxt;
113 } 117 }
114 118
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 68929bad9a6a..1d016fc9a8b6 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -107,11 +107,7 @@ static void blk_mq_usage_counter_release(struct percpu_ref *ref)
107 wake_up_all(&q->mq_freeze_wq); 107 wake_up_all(&q->mq_freeze_wq);
108} 108}
109 109
110/* 110static void blk_mq_freeze_queue_start(struct request_queue *q)
111 * Guarantee no request is in use, so we can change any data structure of
112 * the queue afterward.
113 */
114void blk_mq_freeze_queue(struct request_queue *q)
115{ 111{
116 bool freeze; 112 bool freeze;
117 113
@@ -123,9 +119,23 @@ void blk_mq_freeze_queue(struct request_queue *q)
123 percpu_ref_kill(&q->mq_usage_counter); 119 percpu_ref_kill(&q->mq_usage_counter);
124 blk_mq_run_queues(q, false); 120 blk_mq_run_queues(q, false);
125 } 121 }
122}
123
124static void blk_mq_freeze_queue_wait(struct request_queue *q)
125{
126 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter)); 126 wait_event(q->mq_freeze_wq, percpu_ref_is_zero(&q->mq_usage_counter));
127} 127}
128 128
129/*
130 * Guarantee no request is in use, so we can change any data structure of
131 * the queue afterward.
132 */
133void blk_mq_freeze_queue(struct request_queue *q)
134{
135 blk_mq_freeze_queue_start(q);
136 blk_mq_freeze_queue_wait(q);
137}
138
129static void blk_mq_unfreeze_queue(struct request_queue *q) 139static void blk_mq_unfreeze_queue(struct request_queue *q)
130{ 140{
131 bool wake; 141 bool wake;
@@ -1921,7 +1931,7 @@ void blk_mq_free_queue(struct request_queue *q)
1921/* Basically redo blk_mq_init_queue with queue frozen */ 1931/* Basically redo blk_mq_init_queue with queue frozen */
1922static void blk_mq_queue_reinit(struct request_queue *q) 1932static void blk_mq_queue_reinit(struct request_queue *q)
1923{ 1933{
1924 blk_mq_freeze_queue(q); 1934 WARN_ON_ONCE(!q->mq_freeze_depth);
1925 1935
1926 blk_mq_sysfs_unregister(q); 1936 blk_mq_sysfs_unregister(q);
1927 1937
@@ -1936,8 +1946,6 @@ static void blk_mq_queue_reinit(struct request_queue *q)
1936 blk_mq_map_swqueue(q); 1946 blk_mq_map_swqueue(q);
1937 1947
1938 blk_mq_sysfs_register(q); 1948 blk_mq_sysfs_register(q);
1939
1940 blk_mq_unfreeze_queue(q);
1941} 1949}
1942 1950
1943static int blk_mq_queue_reinit_notify(struct notifier_block *nb, 1951static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
@@ -1956,8 +1964,25 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
1956 return NOTIFY_OK; 1964 return NOTIFY_OK;
1957 1965
1958 mutex_lock(&all_q_mutex); 1966 mutex_lock(&all_q_mutex);
1967
1968 /*
1969 * We need to freeze and reinit all existing queues. Freezing
1970 * involves synchronous wait for an RCU grace period and doing it
1971 * one by one may take a long time. Start freezing all queues in
1972 * one swoop and then wait for the completions so that freezing can
1973 * take place in parallel.
1974 */
1975 list_for_each_entry(q, &all_q_list, all_q_node)
1976 blk_mq_freeze_queue_start(q);
1977 list_for_each_entry(q, &all_q_list, all_q_node)
1978 blk_mq_freeze_queue_wait(q);
1979
1959 list_for_each_entry(q, &all_q_list, all_q_node) 1980 list_for_each_entry(q, &all_q_list, all_q_node)
1960 blk_mq_queue_reinit(q); 1981 blk_mq_queue_reinit(q);
1982
1983 list_for_each_entry(q, &all_q_list, all_q_node)
1984 blk_mq_unfreeze_queue(q);
1985
1961 mutex_unlock(&all_q_mutex); 1986 mutex_unlock(&all_q_mutex);
1962 return NOTIFY_OK; 1987 return NOTIFY_OK;
1963} 1988}
diff --git a/block/elevator.c b/block/elevator.c
index 24c28b659bb3..afa3b037a17c 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -229,7 +229,9 @@ int elevator_init(struct request_queue *q, char *name)
229 } 229 }
230 230
231 err = e->ops.elevator_init_fn(q, e); 231 err = e->ops.elevator_init_fn(q, e);
232 return 0; 232 if (err)
233 elevator_put(e);
234 return err;
233} 235}
234EXPORT_SYMBOL(elevator_init); 236EXPORT_SYMBOL(elevator_init);
235 237
diff --git a/block/ioprio.c b/block/ioprio.c
index e50170ca7c33..31666c92b46a 100644
--- a/block/ioprio.c
+++ b/block/ioprio.c
@@ -157,14 +157,16 @@ out:
157 157
158int ioprio_best(unsigned short aprio, unsigned short bprio) 158int ioprio_best(unsigned short aprio, unsigned short bprio)
159{ 159{
160 unsigned short aclass = IOPRIO_PRIO_CLASS(aprio); 160 unsigned short aclass;
161 unsigned short bclass = IOPRIO_PRIO_CLASS(bprio); 161 unsigned short bclass;
162 162
163 if (aclass == IOPRIO_CLASS_NONE) 163 if (!ioprio_valid(aprio))
164 aclass = IOPRIO_CLASS_BE; 164 aprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
165 if (bclass == IOPRIO_CLASS_NONE) 165 if (!ioprio_valid(bprio))
166 bclass = IOPRIO_CLASS_BE; 166 bprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, IOPRIO_NORM);
167 167
168 aclass = IOPRIO_PRIO_CLASS(aprio);
169 bclass = IOPRIO_PRIO_CLASS(bprio);
168 if (aclass == bclass) 170 if (aclass == bclass)
169 return min(aprio, bprio); 171 return min(aprio, bprio);
170 if (aclass > bclass) 172 if (aclass > bclass)
diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
index abb2e65b24cc..b0c2a616c8f9 100644
--- a/block/scsi_ioctl.c
+++ b/block/scsi_ioctl.c
@@ -458,7 +458,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
458 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT); 458 rq = blk_get_request(q, in_len ? WRITE : READ, __GFP_WAIT);
459 if (IS_ERR(rq)) { 459 if (IS_ERR(rq)) {
460 err = PTR_ERR(rq); 460 err = PTR_ERR(rq);
461 goto error; 461 goto error_free_buffer;
462 } 462 }
463 blk_rq_set_block_pc(rq); 463 blk_rq_set_block_pc(rq);
464 464
@@ -508,7 +508,7 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
508 508
509 if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) { 509 if (bytes && blk_rq_map_kern(q, rq, buffer, bytes, __GFP_WAIT)) {
510 err = DRIVER_ERROR << 24; 510 err = DRIVER_ERROR << 24;
511 goto out; 511 goto error;
512 } 512 }
513 513
514 memset(sense, 0, sizeof(sense)); 514 memset(sense, 0, sizeof(sense));
@@ -517,7 +517,6 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
517 517
518 blk_execute_rq(q, disk, rq, 0); 518 blk_execute_rq(q, disk, rq, 0);
519 519
520out:
521 err = rq->errors & 0xff; /* only 8 bit SCSI status */ 520 err = rq->errors & 0xff; /* only 8 bit SCSI status */
522 if (err) { 521 if (err) {
523 if (rq->sense_len && rq->sense) { 522 if (rq->sense_len && rq->sense) {
@@ -532,9 +531,11 @@ out:
532 } 531 }
533 532
534error: 533error:
534 blk_put_request(rq);
535
536error_free_buffer:
535 kfree(buffer); 537 kfree(buffer);
536 if (rq) 538
537 blk_put_request(rq);
538 return err; 539 return err;
539} 540}
540EXPORT_SYMBOL_GPL(sg_scsi_ioctl); 541EXPORT_SYMBOL_GPL(sg_scsi_ioctl);
diff --git a/crypto/cts.c b/crypto/cts.c
index 042223f8e733..133f0874c95e 100644
--- a/crypto/cts.c
+++ b/crypto/cts.c
@@ -202,7 +202,8 @@ static int cts_cbc_decrypt(struct crypto_cts_ctx *ctx,
202 /* 5. Append the tail (BB - Ln) bytes of Xn (tmp) to Cn to create En */ 202 /* 5. Append the tail (BB - Ln) bytes of Xn (tmp) to Cn to create En */
203 memcpy(s + bsize + lastn, tmp + lastn, bsize - lastn); 203 memcpy(s + bsize + lastn, tmp + lastn, bsize - lastn);
204 /* 6. Decrypt En to create Pn-1 */ 204 /* 6. Decrypt En to create Pn-1 */
205 memset(iv, 0, sizeof(iv)); 205 memzero_explicit(iv, sizeof(iv));
206
206 sg_set_buf(&sgsrc[0], s + bsize, bsize); 207 sg_set_buf(&sgsrc[0], s + bsize, bsize);
207 sg_set_buf(&sgdst[0], d, bsize); 208 sg_set_buf(&sgdst[0], d, bsize);
208 err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize); 209 err = crypto_blkcipher_decrypt_iv(&lcldesc, sgdst, sgsrc, bsize);
diff --git a/crypto/sha1_generic.c b/crypto/sha1_generic.c
index 42794803c480..7bb047432782 100644
--- a/crypto/sha1_generic.c
+++ b/crypto/sha1_generic.c
@@ -64,7 +64,7 @@ int crypto_sha1_update(struct shash_desc *desc, const u8 *data,
64 src = data + done; 64 src = data + done;
65 } while (done + SHA1_BLOCK_SIZE <= len); 65 } while (done + SHA1_BLOCK_SIZE <= len);
66 66
67 memset(temp, 0, sizeof(temp)); 67 memzero_explicit(temp, sizeof(temp));
68 partial = 0; 68 partial = 0;
69 } 69 }
70 memcpy(sctx->buffer + partial, src, len - done); 70 memcpy(sctx->buffer + partial, src, len - done);
diff --git a/crypto/sha256_generic.c b/crypto/sha256_generic.c
index 0bb558344699..65e7b76b057f 100644
--- a/crypto/sha256_generic.c
+++ b/crypto/sha256_generic.c
@@ -211,10 +211,9 @@ static void sha256_transform(u32 *state, const u8 *input)
211 211
212 /* clear any sensitive info... */ 212 /* clear any sensitive info... */
213 a = b = c = d = e = f = g = h = t1 = t2 = 0; 213 a = b = c = d = e = f = g = h = t1 = t2 = 0;
214 memset(W, 0, 64 * sizeof(u32)); 214 memzero_explicit(W, 64 * sizeof(u32));
215} 215}
216 216
217
218static int sha224_init(struct shash_desc *desc) 217static int sha224_init(struct shash_desc *desc)
219{ 218{
220 struct sha256_state *sctx = shash_desc_ctx(desc); 219 struct sha256_state *sctx = shash_desc_ctx(desc);
@@ -317,7 +316,7 @@ static int sha224_final(struct shash_desc *desc, u8 *hash)
317 sha256_final(desc, D); 316 sha256_final(desc, D);
318 317
319 memcpy(hash, D, SHA224_DIGEST_SIZE); 318 memcpy(hash, D, SHA224_DIGEST_SIZE);
320 memset(D, 0, SHA256_DIGEST_SIZE); 319 memzero_explicit(D, SHA256_DIGEST_SIZE);
321 320
322 return 0; 321 return 0;
323} 322}
diff --git a/crypto/sha512_generic.c b/crypto/sha512_generic.c
index 6dde57dc511b..95db67197cd9 100644
--- a/crypto/sha512_generic.c
+++ b/crypto/sha512_generic.c
@@ -239,7 +239,7 @@ static int sha384_final(struct shash_desc *desc, u8 *hash)
239 sha512_final(desc, D); 239 sha512_final(desc, D);
240 240
241 memcpy(hash, D, 48); 241 memcpy(hash, D, 48);
242 memset(D, 0, 64); 242 memzero_explicit(D, 64);
243 243
244 return 0; 244 return 0;
245} 245}
diff --git a/crypto/tgr192.c b/crypto/tgr192.c
index 87403556fd0b..3c7af0d1ff7a 100644
--- a/crypto/tgr192.c
+++ b/crypto/tgr192.c
@@ -612,7 +612,7 @@ static int tgr160_final(struct shash_desc *desc, u8 * out)
612 612
613 tgr192_final(desc, D); 613 tgr192_final(desc, D);
614 memcpy(out, D, TGR160_DIGEST_SIZE); 614 memcpy(out, D, TGR160_DIGEST_SIZE);
615 memset(D, 0, TGR192_DIGEST_SIZE); 615 memzero_explicit(D, TGR192_DIGEST_SIZE);
616 616
617 return 0; 617 return 0;
618} 618}
@@ -623,7 +623,7 @@ static int tgr128_final(struct shash_desc *desc, u8 * out)
623 623
624 tgr192_final(desc, D); 624 tgr192_final(desc, D);
625 memcpy(out, D, TGR128_DIGEST_SIZE); 625 memcpy(out, D, TGR128_DIGEST_SIZE);
626 memset(D, 0, TGR192_DIGEST_SIZE); 626 memzero_explicit(D, TGR192_DIGEST_SIZE);
627 627
628 return 0; 628 return 0;
629} 629}
diff --git a/crypto/vmac.c b/crypto/vmac.c
index 2eb11a30c29c..d84c24bd7ff7 100644
--- a/crypto/vmac.c
+++ b/crypto/vmac.c
@@ -613,7 +613,7 @@ static int vmac_final(struct shash_desc *pdesc, u8 *out)
613 } 613 }
614 mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx); 614 mac = vmac(ctx->partial, ctx->partial_size, nonce, NULL, ctx);
615 memcpy(out, &mac, sizeof(vmac_t)); 615 memcpy(out, &mac, sizeof(vmac_t));
616 memset(&mac, 0, sizeof(vmac_t)); 616 memzero_explicit(&mac, sizeof(vmac_t));
617 memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx)); 617 memset(&ctx->__vmac_ctx, 0, sizeof(struct vmac_ctx));
618 ctx->partial_size = 0; 618 ctx->partial_size = 0;
619 return 0; 619 return 0;
diff --git a/crypto/wp512.c b/crypto/wp512.c
index 180f1d6e03f4..ec64e7762fbb 100644
--- a/crypto/wp512.c
+++ b/crypto/wp512.c
@@ -1102,8 +1102,8 @@ static int wp384_final(struct shash_desc *desc, u8 *out)
1102 u8 D[64]; 1102 u8 D[64];
1103 1103
1104 wp512_final(desc, D); 1104 wp512_final(desc, D);
1105 memcpy (out, D, WP384_DIGEST_SIZE); 1105 memcpy(out, D, WP384_DIGEST_SIZE);
1106 memset (D, 0, WP512_DIGEST_SIZE); 1106 memzero_explicit(D, WP512_DIGEST_SIZE);
1107 1107
1108 return 0; 1108 return 0;
1109} 1109}
@@ -1113,8 +1113,8 @@ static int wp256_final(struct shash_desc *desc, u8 *out)
1113 u8 D[64]; 1113 u8 D[64];
1114 1114
1115 wp512_final(desc, D); 1115 wp512_final(desc, D);
1116 memcpy (out, D, WP256_DIGEST_SIZE); 1116 memcpy(out, D, WP256_DIGEST_SIZE);
1117 memset (D, 0, WP512_DIGEST_SIZE); 1117 memzero_explicit(D, WP512_DIGEST_SIZE);
1118 1118
1119 return 0; 1119 return 0;
1120} 1120}
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index d0f3265fb85d..b23fe37f67c0 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -144,7 +144,7 @@ config ACPI_VIDEO
144 144
145config ACPI_FAN 145config ACPI_FAN
146 tristate "Fan" 146 tristate "Fan"
147 select THERMAL 147 depends on THERMAL
148 default y 148 default y
149 help 149 help
150 This driver supports ACPI fan devices, allowing user-mode 150 This driver supports ACPI fan devices, allowing user-mode
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 505d4d79fe3e..c3b2fcb729f3 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -43,6 +43,7 @@ acpi-y += pci_root.o pci_link.o pci_irq.o
43acpi-y += acpi_lpss.o 43acpi-y += acpi_lpss.o
44acpi-y += acpi_platform.o 44acpi-y += acpi_platform.o
45acpi-y += acpi_pnp.o 45acpi-y += acpi_pnp.o
46acpi-y += int340x_thermal.o
46acpi-y += power.o 47acpi-y += power.o
47acpi-y += event.o 48acpi-y += event.o
48acpi-y += sysfs.o 49acpi-y += sysfs.o
diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c
index 2bf9082f7523..6ba8beb6b9d2 100644
--- a/drivers/acpi/acpi_platform.c
+++ b/drivers/acpi/acpi_platform.c
@@ -16,6 +16,7 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/dma-mapping.h>
19#include <linux/platform_device.h> 20#include <linux/platform_device.h>
20 21
21#include "internal.h" 22#include "internal.h"
@@ -102,6 +103,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
102 pdevinfo.res = resources; 103 pdevinfo.res = resources;
103 pdevinfo.num_res = count; 104 pdevinfo.num_res = count;
104 pdevinfo.acpi_node.companion = adev; 105 pdevinfo.acpi_node.companion = adev;
106 pdevinfo.dma_mask = DMA_BIT_MASK(32);
105 pdev = platform_device_register_full(&pdevinfo); 107 pdev = platform_device_register_full(&pdevinfo);
106 if (IS_ERR(pdev)) 108 if (IS_ERR(pdev))
107 dev_err(&adev->dev, "platform device creation failed: %ld\n", 109 dev_err(&adev->dev, "platform device creation failed: %ld\n",
@@ -113,3 +115,4 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev)
113 kfree(resources); 115 kfree(resources);
114 return pdev; 116 return pdev;
115} 117}
118EXPORT_SYMBOL_GPL(acpi_create_platform_device);
diff --git a/drivers/acpi/acpica/achware.h b/drivers/acpi/acpica/achware.h
index 2ad2351a9833..c318d3e27893 100644
--- a/drivers/acpi/acpica/achware.h
+++ b/drivers/acpi/acpica/achware.h
@@ -127,7 +127,7 @@ acpi_hw_clear_gpe_block(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
127 127
128acpi_status 128acpi_status
129acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info, 129acpi_hw_get_gpe_status(struct acpi_gpe_event_info *gpe_event_info,
130 acpi_event_status * event_status); 130 acpi_event_status *event_status);
131 131
132acpi_status acpi_hw_disable_all_gpes(void); 132acpi_status acpi_hw_disable_all_gpes(void);
133 133
diff --git a/drivers/acpi/acpica/aclocal.h b/drivers/acpi/acpica/aclocal.h
index 2747279fbe3c..c00e7e41ad75 100644
--- a/drivers/acpi/acpica/aclocal.h
+++ b/drivers/acpi/acpica/aclocal.h
@@ -413,8 +413,8 @@ struct acpi_gpe_handler_info {
413 acpi_gpe_handler address; /* Address of handler, if any */ 413 acpi_gpe_handler address; /* Address of handler, if any */
414 void *context; /* Context to be passed to handler */ 414 void *context; /* Context to be passed to handler */
415 struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */ 415 struct acpi_namespace_node *method_node; /* Method node for this GPE level (saved) */
416 u8 original_flags; /* Original (pre-handler) GPE info */ 416 u8 original_flags; /* Original (pre-handler) GPE info */
417 u8 originally_enabled; /* True if GPE was originally enabled */ 417 u8 originally_enabled; /* True if GPE was originally enabled */
418}; 418};
419 419
420/* Notify info for implicit notify, multiple device objects */ 420/* Notify info for implicit notify, multiple device objects */
diff --git a/drivers/acpi/acpica/actables.h b/drivers/acpi/acpica/actables.h
index f14882788eee..1afe46e44dac 100644
--- a/drivers/acpi/acpica/actables.h
+++ b/drivers/acpi/acpica/actables.h
@@ -49,6 +49,8 @@ acpi_status acpi_allocate_root_table(u32 initial_table_count);
49/* 49/*
50 * tbxfroot - Root pointer utilities 50 * tbxfroot - Root pointer utilities
51 */ 51 */
52u32 acpi_tb_get_rsdp_length(struct acpi_table_rsdp *rsdp);
53
52acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp); 54acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp);
53 55
54u8 *acpi_tb_scan_memory_for_rsdp(u8 *start_address, u32 length); 56u8 *acpi_tb_scan_memory_for_rsdp(u8 *start_address, u32 length);
diff --git a/drivers/acpi/acpica/amlresrc.h b/drivers/acpi/acpica/amlresrc.h
index f3f834408441..3a0beeb86ba5 100644
--- a/drivers/acpi/acpica/amlresrc.h
+++ b/drivers/acpi/acpica/amlresrc.h
@@ -117,6 +117,12 @@ struct asl_resource_node {
117 struct asl_resource_node *next; 117 struct asl_resource_node *next;
118}; 118};
119 119
120struct asl_resource_info {
121 union acpi_parse_object *descriptor_type_op; /* Resource descriptor parse node */
122 union acpi_parse_object *mapping_op; /* Used for mapfile support */
123 u32 current_byte_offset; /* Offset in resource template */
124};
125
120/* Macros used to generate AML resource length fields */ 126/* Macros used to generate AML resource length fields */
121 127
122#define ACPI_AML_SIZE_LARGE(r) (sizeof (r) - sizeof (struct aml_resource_large_header)) 128#define ACPI_AML_SIZE_LARGE(r) (sizeof (r) - sizeof (struct aml_resource_large_header))
@@ -449,4 +455,32 @@ union aml_resource {
449 u8 byte_item; 455 u8 byte_item;
450}; 456};
451 457
458/* Interfaces used by both the disassembler and compiler */
459
460void
461mp_save_gpio_info(union acpi_parse_object *op,
462 union aml_resource *resource,
463 u32 pin_count, u16 *pin_list, char *device_name);
464
465void
466mp_save_serial_info(union acpi_parse_object *op,
467 union aml_resource *resource, char *device_name);
468
469char *mp_get_hid_from_parse_tree(struct acpi_namespace_node *hid_node);
470
471char *mp_get_hid_via_namestring(char *device_name);
472
473char *mp_get_connection_info(union acpi_parse_object *op,
474 u32 pin_index,
475 struct acpi_namespace_node **target_node,
476 char **target_name);
477
478char *mp_get_parent_device_hid(union acpi_parse_object *op,
479 struct acpi_namespace_node **target_node,
480 char **parent_device_name);
481
482char *mp_get_ddn_value(char *device_name);
483
484char *mp_get_hid_value(struct acpi_namespace_node *device_node);
485
452#endif 486#endif
diff --git a/drivers/acpi/acpica/evgpe.c b/drivers/acpi/acpica/evgpe.c
index e4ba4dec86af..2095dfb72bcb 100644
--- a/drivers/acpi/acpica/evgpe.c
+++ b/drivers/acpi/acpica/evgpe.c
@@ -100,13 +100,14 @@ acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info)
100 * 100 *
101 * FUNCTION: acpi_ev_enable_gpe 101 * FUNCTION: acpi_ev_enable_gpe
102 * 102 *
103 * PARAMETERS: gpe_event_info - GPE to enable 103 * PARAMETERS: gpe_event_info - GPE to enable
104 * 104 *
105 * RETURN: Status 105 * RETURN: Status
106 * 106 *
107 * DESCRIPTION: Clear a GPE of stale events and enable it. 107 * DESCRIPTION: Clear a GPE of stale events and enable it.
108 * 108 *
109 ******************************************************************************/ 109 ******************************************************************************/
110
110acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) 111acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
111{ 112{
112 acpi_status status; 113 acpi_status status;
@@ -125,6 +126,7 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
125 } 126 }
126 127
127 /* Clear the GPE (of stale events) */ 128 /* Clear the GPE (of stale events) */
129
128 status = acpi_hw_clear_gpe(gpe_event_info); 130 status = acpi_hw_clear_gpe(gpe_event_info);
129 if (ACPI_FAILURE(status)) { 131 if (ACPI_FAILURE(status)) {
130 return_ACPI_STATUS(status); 132 return_ACPI_STATUS(status);
@@ -136,7 +138,6 @@ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info)
136 return_ACPI_STATUS(status); 138 return_ACPI_STATUS(status);
137} 139}
138 140
139
140/******************************************************************************* 141/*******************************************************************************
141 * 142 *
142 * FUNCTION: acpi_ev_add_gpe_reference 143 * FUNCTION: acpi_ev_add_gpe_reference
@@ -212,7 +213,7 @@ acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info)
212 if (ACPI_SUCCESS(status)) { 213 if (ACPI_SUCCESS(status)) {
213 status = 214 status =
214 acpi_hw_low_set_gpe(gpe_event_info, 215 acpi_hw_low_set_gpe(gpe_event_info,
215 ACPI_GPE_DISABLE); 216 ACPI_GPE_DISABLE);
216 } 217 }
217 218
218 if (ACPI_FAILURE(status)) { 219 if (ACPI_FAILURE(status)) {
@@ -334,7 +335,7 @@ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
334 * 335 *
335 ******************************************************************************/ 336 ******************************************************************************/
336 337
337u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) 338u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info *gpe_xrupt_list)
338{ 339{
339 acpi_status status; 340 acpi_status status;
340 struct acpi_gpe_block_info *gpe_block; 341 struct acpi_gpe_block_info *gpe_block;
@@ -427,7 +428,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
427 428
428 /* Check if there is anything active at all in this register */ 429 /* Check if there is anything active at all in this register */
429 430
430 enabled_status_byte = (u8) (status_reg & enable_reg); 431 enabled_status_byte = (u8)(status_reg & enable_reg);
431 if (!enabled_status_byte) { 432 if (!enabled_status_byte) {
432 433
433 /* No active GPEs in this register, move on */ 434 /* No active GPEs in this register, move on */
@@ -450,7 +451,7 @@ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
450 acpi_ev_gpe_dispatch(gpe_block-> 451 acpi_ev_gpe_dispatch(gpe_block->
451 node, 452 node,
452 &gpe_block-> 453 &gpe_block->
453 event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); 454 event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
454 } 455 }
455 } 456 }
456 } 457 }
@@ -636,7 +637,7 @@ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context)
636 * 637 *
637 ******************************************************************************/ 638 ******************************************************************************/
638 639
639acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info) 640acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info * gpe_event_info)
640{ 641{
641 acpi_status status; 642 acpi_status status;
642 643
@@ -666,9 +667,9 @@ acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
666 * 667 *
667 * FUNCTION: acpi_ev_gpe_dispatch 668 * FUNCTION: acpi_ev_gpe_dispatch
668 * 669 *
669 * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 670 * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1
670 * gpe_event_info - Info for this GPE 671 * gpe_event_info - Info for this GPE
671 * gpe_number - Number relative to the parent GPE block 672 * gpe_number - Number relative to the parent GPE block
672 * 673 *
673 * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED 674 * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
674 * 675 *
@@ -681,7 +682,7 @@ acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info)
681 682
682u32 683u32
683acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, 684acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device,
684 struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) 685 struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
685{ 686{
686 acpi_status status; 687 acpi_status status;
687 u32 return_value; 688 u32 return_value;
diff --git a/drivers/acpi/acpica/evgpeinit.c b/drivers/acpi/acpica/evgpeinit.c
index 49fc7effd961..7be928379879 100644
--- a/drivers/acpi/acpica/evgpeinit.c
+++ b/drivers/acpi/acpica/evgpeinit.c
@@ -424,6 +424,7 @@ acpi_ev_match_gpe_method(acpi_handle obj_handle,
424 } 424 }
425 425
426 /* Disable the GPE in case it's been enabled already. */ 426 /* Disable the GPE in case it's been enabled already. */
427
427 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); 428 (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE);
428 429
429 /* 430 /*
diff --git a/drivers/acpi/acpica/evxface.c b/drivers/acpi/acpica/evxface.c
index 11e5803b8b41..55a58f3ec8df 100644
--- a/drivers/acpi/acpica/evxface.c
+++ b/drivers/acpi/acpica/evxface.c
@@ -786,18 +786,26 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
786 handler->method_node = gpe_event_info->dispatch.method_node; 786 handler->method_node = gpe_event_info->dispatch.method_node;
787 handler->original_flags = (u8)(gpe_event_info->flags & 787 handler->original_flags = (u8)(gpe_event_info->flags &
788 (ACPI_GPE_XRUPT_TYPE_MASK | 788 (ACPI_GPE_XRUPT_TYPE_MASK |
789 ACPI_GPE_DISPATCH_MASK)); 789 ACPI_GPE_DISPATCH_MASK));
790 790
791 /* 791 /*
792 * If the GPE is associated with a method, it may have been enabled 792 * If the GPE is associated with a method, it may have been enabled
793 * automatically during initialization, in which case it has to be 793 * automatically during initialization, in which case it has to be
794 * disabled now to avoid spurious execution of the handler. 794 * disabled now to avoid spurious execution of the handler.
795 */ 795 */
796 796 if (((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) ||
797 if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) 797 (handler->original_flags & ACPI_GPE_DISPATCH_NOTIFY)) &&
798 && gpe_event_info->runtime_count) { 798 gpe_event_info->runtime_count) {
799 handler->originally_enabled = 1; 799 handler->originally_enabled = TRUE;
800 (void)acpi_ev_remove_gpe_reference(gpe_event_info); 800 (void)acpi_ev_remove_gpe_reference(gpe_event_info);
801
802 /* Sanity check of original type against new type */
803
804 if (type !=
805 (u32)(gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK)) {
806 ACPI_WARNING((AE_INFO,
807 "GPE type mismatch (level/edge)"));
808 }
801 } 809 }
802 810
803 /* Install the handler */ 811 /* Install the handler */
@@ -808,7 +816,7 @@ acpi_install_gpe_handler(acpi_handle gpe_device,
808 816
809 gpe_event_info->flags &= 817 gpe_event_info->flags &=
810 ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); 818 ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
811 gpe_event_info->flags |= (u8) (type | ACPI_GPE_DISPATCH_HANDLER); 819 gpe_event_info->flags |= (u8)(type | ACPI_GPE_DISPATCH_HANDLER);
812 820
813 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 821 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
814 822
@@ -893,7 +901,7 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
893 901
894 gpe_event_info->dispatch.method_node = handler->method_node; 902 gpe_event_info->dispatch.method_node = handler->method_node;
895 gpe_event_info->flags &= 903 gpe_event_info->flags &=
896 ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK); 904 ~(ACPI_GPE_XRUPT_TYPE_MASK | ACPI_GPE_DISPATCH_MASK);
897 gpe_event_info->flags |= handler->original_flags; 905 gpe_event_info->flags |= handler->original_flags;
898 906
899 /* 907 /*
@@ -901,7 +909,8 @@ acpi_remove_gpe_handler(acpi_handle gpe_device,
901 * enabled, it should be enabled at this point to restore the 909 * enabled, it should be enabled at this point to restore the
902 * post-initialization configuration. 910 * post-initialization configuration.
903 */ 911 */
904 if ((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) && 912 if (((handler->original_flags & ACPI_GPE_DISPATCH_METHOD) ||
913 (handler->original_flags & ACPI_GPE_DISPATCH_NOTIFY)) &&
905 handler->originally_enabled) { 914 handler->originally_enabled) {
906 (void)acpi_ev_add_gpe_reference(gpe_event_info); 915 (void)acpi_ev_add_gpe_reference(gpe_event_info);
907 } 916 }
@@ -946,7 +955,7 @@ ACPI_EXPORT_SYMBOL(acpi_remove_gpe_handler)
946 * handle is returned. 955 * handle is returned.
947 * 956 *
948 ******************************************************************************/ 957 ******************************************************************************/
949acpi_status acpi_acquire_global_lock(u16 timeout, u32 * handle) 958acpi_status acpi_acquire_global_lock(u16 timeout, u32 *handle)
950{ 959{
951 acpi_status status; 960 acpi_status status;
952 961
diff --git a/drivers/acpi/acpica/evxfevnt.c b/drivers/acpi/acpica/evxfevnt.c
index e286640ad4ff..bb8cbf5961bf 100644
--- a/drivers/acpi/acpica/evxfevnt.c
+++ b/drivers/acpi/acpica/evxfevnt.c
@@ -324,8 +324,9 @@ ACPI_EXPORT_SYMBOL(acpi_clear_event)
324 ******************************************************************************/ 324 ******************************************************************************/
325acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status) 325acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
326{ 326{
327 acpi_status status = AE_OK; 327 acpi_status status;
328 u32 value; 328 acpi_event_status local_event_status = 0;
329 u32 in_byte;
329 330
330 ACPI_FUNCTION_TRACE(acpi_get_event_status); 331 ACPI_FUNCTION_TRACE(acpi_get_event_status);
331 332
@@ -339,29 +340,40 @@ acpi_status acpi_get_event_status(u32 event, acpi_event_status * event_status)
339 return_ACPI_STATUS(AE_BAD_PARAMETER); 340 return_ACPI_STATUS(AE_BAD_PARAMETER);
340 } 341 }
341 342
342 /* Get the status of the requested fixed event */ 343 /* Fixed event currently can be dispatched? */
344
345 if (acpi_gbl_fixed_event_handlers[event].handler) {
346 local_event_status |= ACPI_EVENT_FLAG_HAS_HANDLER;
347 }
348
349 /* Fixed event currently enabled? */
343 350
344 status = 351 status =
345 acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. 352 acpi_read_bit_register(acpi_gbl_fixed_event_info[event].
346 enable_register_id, &value); 353 enable_register_id, &in_byte);
347 if (ACPI_FAILURE(status)) 354 if (ACPI_FAILURE(status)) {
348 return_ACPI_STATUS(status); 355 return_ACPI_STATUS(status);
356 }
349 357
350 *event_status = value; 358 if (in_byte) {
359 local_event_status |= ACPI_EVENT_FLAG_ENABLED;
360 }
361
362 /* Fixed event currently active? */
351 363
352 status = 364 status =
353 acpi_read_bit_register(acpi_gbl_fixed_event_info[event]. 365 acpi_read_bit_register(acpi_gbl_fixed_event_info[event].
354 status_register_id, &value); 366 status_register_id, &in_byte);
355 if (ACPI_FAILURE(status)) 367 if (ACPI_FAILURE(status)) {
356 return_ACPI_STATUS(status); 368 return_ACPI_STATUS(status);
369 }
357 370
358 if (value) 371 if (in_byte) {
359 *event_status |= ACPI_EVENT_FLAG_SET; 372 local_event_status |= ACPI_EVENT_FLAG_SET;
360 373 }
361 if (acpi_gbl_fixed_event_handlers[event].handler)
362 *event_status |= ACPI_EVENT_FLAG_HANDLE;
363 374
364 return_ACPI_STATUS(status); 375 (*event_status) = local_event_status;
376 return_ACPI_STATUS(AE_OK);
365} 377}
366 378
367ACPI_EXPORT_SYMBOL(acpi_get_event_status) 379ACPI_EXPORT_SYMBOL(acpi_get_event_status)
diff --git a/drivers/acpi/acpica/evxfgpe.c b/drivers/acpi/acpica/evxfgpe.c
index 56710a03c9b0..e889a5304abd 100644
--- a/drivers/acpi/acpica/evxfgpe.c
+++ b/drivers/acpi/acpica/evxfgpe.c
@@ -106,8 +106,8 @@ ACPI_EXPORT_SYMBOL(acpi_update_all_gpes)
106 * 106 *
107 * FUNCTION: acpi_enable_gpe 107 * FUNCTION: acpi_enable_gpe
108 * 108 *
109 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 109 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
110 * gpe_number - GPE level within the GPE block 110 * gpe_number - GPE level within the GPE block
111 * 111 *
112 * RETURN: Status 112 * RETURN: Status
113 * 113 *
@@ -115,7 +115,6 @@ ACPI_EXPORT_SYMBOL(acpi_update_all_gpes)
115 * hardware-enabled. 115 * hardware-enabled.
116 * 116 *
117 ******************************************************************************/ 117 ******************************************************************************/
118
119acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number) 118acpi_status acpi_enable_gpe(acpi_handle gpe_device, u32 gpe_number)
120{ 119{
121 acpi_status status = AE_BAD_PARAMETER; 120 acpi_status status = AE_BAD_PARAMETER;
@@ -490,8 +489,8 @@ ACPI_EXPORT_SYMBOL(acpi_clear_gpe)
490 * 489 *
491 * FUNCTION: acpi_get_gpe_status 490 * FUNCTION: acpi_get_gpe_status
492 * 491 *
493 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1 492 * PARAMETERS: gpe_device - Parent GPE Device. NULL for GPE0/GPE1
494 * gpe_number - GPE level within the GPE block 493 * gpe_number - GPE level within the GPE block
495 * event_status - Where the current status of the event 494 * event_status - Where the current status of the event
496 * will be returned 495 * will be returned
497 * 496 *
@@ -524,9 +523,6 @@ acpi_get_gpe_status(acpi_handle gpe_device,
524 523
525 status = acpi_hw_get_gpe_status(gpe_event_info, event_status); 524 status = acpi_hw_get_gpe_status(gpe_event_info, event_status);
526 525
527 if (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK)
528 *event_status |= ACPI_EVENT_FLAG_HANDLE;
529
530unlock_and_exit: 526unlock_and_exit:
531 acpi_os_release_lock(acpi_gbl_gpe_lock, flags); 527 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
532 return_ACPI_STATUS(status); 528 return_ACPI_STATUS(status);
diff --git a/drivers/acpi/acpica/hwgpe.c b/drivers/acpi/acpica/hwgpe.c
index ea62d40fd161..48ac7b7b59cd 100644
--- a/drivers/acpi/acpica/hwgpe.c
+++ b/drivers/acpi/acpica/hwgpe.c
@@ -202,7 +202,7 @@ acpi_status acpi_hw_clear_gpe(struct acpi_gpe_event_info * gpe_event_info)
202 202
203acpi_status 203acpi_status
204acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info, 204acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
205 acpi_event_status * event_status) 205 acpi_event_status *event_status)
206{ 206{
207 u32 in_byte; 207 u32 in_byte;
208 u32 register_bit; 208 u32 register_bit;
@@ -216,6 +216,13 @@ acpi_hw_get_gpe_status(struct acpi_gpe_event_info * gpe_event_info,
216 return (AE_BAD_PARAMETER); 216 return (AE_BAD_PARAMETER);
217 } 217 }
218 218
219 /* GPE currently handled? */
220
221 if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) !=
222 ACPI_GPE_DISPATCH_NONE) {
223 local_event_status |= ACPI_EVENT_FLAG_HAS_HANDLER;
224 }
225
219 /* Get the info block for the entire GPE register */ 226 /* Get the info block for the entire GPE register */
220 227
221 gpe_register_info = gpe_event_info->register_info; 228 gpe_register_info = gpe_event_info->register_info;
diff --git a/drivers/acpi/acpica/tbxfroot.c b/drivers/acpi/acpica/tbxfroot.c
index 65ab8fed3d5e..43a54af2b548 100644
--- a/drivers/acpi/acpica/tbxfroot.c
+++ b/drivers/acpi/acpica/tbxfroot.c
@@ -50,6 +50,36 @@ ACPI_MODULE_NAME("tbxfroot")
50 50
51/******************************************************************************* 51/*******************************************************************************
52 * 52 *
53 * FUNCTION: acpi_tb_get_rsdp_length
54 *
55 * PARAMETERS: rsdp - Pointer to RSDP
56 *
57 * RETURN: Table length
58 *
59 * DESCRIPTION: Get the length of the RSDP
60 *
61 ******************************************************************************/
62u32 acpi_tb_get_rsdp_length(struct acpi_table_rsdp *rsdp)
63{
64
65 if (!ACPI_VALIDATE_RSDP_SIG(rsdp->signature)) {
66
67 /* BAD Signature */
68
69 return (0);
70 }
71
72 /* "Length" field is available if table version >= 2 */
73
74 if (rsdp->revision >= 2) {
75 return (rsdp->length);
76 } else {
77 return (ACPI_RSDP_CHECKSUM_LENGTH);
78 }
79}
80
81/*******************************************************************************
82 *
53 * FUNCTION: acpi_tb_validate_rsdp 83 * FUNCTION: acpi_tb_validate_rsdp
54 * 84 *
55 * PARAMETERS: rsdp - Pointer to unvalidated RSDP 85 * PARAMETERS: rsdp - Pointer to unvalidated RSDP
@@ -59,7 +89,8 @@ ACPI_MODULE_NAME("tbxfroot")
59 * DESCRIPTION: Validate the RSDP (ptr) 89 * DESCRIPTION: Validate the RSDP (ptr)
60 * 90 *
61 ******************************************************************************/ 91 ******************************************************************************/
62acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp *rsdp) 92
93acpi_status acpi_tb_validate_rsdp(struct acpi_table_rsdp * rsdp)
63{ 94{
64 95
65 /* 96 /*
diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
index ed122e17636e..7556e7c4a055 100644
--- a/drivers/acpi/blacklist.c
+++ b/drivers/acpi/blacklist.c
@@ -290,6 +290,14 @@ static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
290 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3446"), 290 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3446"),
291 }, 291 },
292 }, 292 },
293 {
294 .callback = dmi_disable_osi_win8,
295 .ident = "Dell Vostro 3546",
296 .matches = {
297 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
298 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3546"),
299 },
300 },
293 301
294 /* 302 /*
295 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug. 303 * BIOS invocation of _OSI(Linux) is almost always a BIOS bug.
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index bea6896be122..7db193160766 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -343,6 +343,7 @@ int acpi_device_update_power(struct acpi_device *device, int *state_p)
343 343
344 return 0; 344 return 0;
345} 345}
346EXPORT_SYMBOL_GPL(acpi_device_update_power);
346 347
347int acpi_bus_update_power(acpi_handle handle, int *state_p) 348int acpi_bus_update_power(acpi_handle handle, int *state_p)
348{ 349{
@@ -710,7 +711,7 @@ int acpi_pm_device_run_wake(struct device *phys_dev, bool enable)
710 return -ENODEV; 711 return -ENODEV;
711 } 712 }
712 713
713 return acpi_device_wakeup(adev, enable, ACPI_STATE_S0); 714 return acpi_device_wakeup(adev, ACPI_STATE_S0, enable);
714} 715}
715EXPORT_SYMBOL(acpi_pm_device_run_wake); 716EXPORT_SYMBOL(acpi_pm_device_run_wake);
716#endif /* CONFIG_PM_RUNTIME */ 717#endif /* CONFIG_PM_RUNTIME */
@@ -877,7 +878,7 @@ int acpi_dev_suspend_late(struct device *dev)
877 return 0; 878 return 0;
878 879
879 target_state = acpi_target_system_state(); 880 target_state = acpi_target_system_state();
880 wakeup = device_may_wakeup(dev); 881 wakeup = device_may_wakeup(dev) && acpi_device_can_wakeup(adev);
881 error = acpi_device_wakeup(adev, target_state, wakeup); 882 error = acpi_device_wakeup(adev, target_state, wakeup);
882 if (wakeup && error) 883 if (wakeup && error)
883 return error; 884 return error;
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index cb6066c809ea..5f9b74b9b71f 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -126,14 +126,16 @@ static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
126static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */ 126static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
127static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */ 127static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
128static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */ 128static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
129static int EC_FLAGS_QUERY_HANDSHAKE; /* Needs QR_EC issued when SCI_EVT set */
129 130
130/* -------------------------------------------------------------------------- 131/* --------------------------------------------------------------------------
131 Transaction Management 132 * Transaction Management
132 -------------------------------------------------------------------------- */ 133 * -------------------------------------------------------------------------- */
133 134
134static inline u8 acpi_ec_read_status(struct acpi_ec *ec) 135static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
135{ 136{
136 u8 x = inb(ec->command_addr); 137 u8 x = inb(ec->command_addr);
138
137 pr_debug("EC_SC(R) = 0x%2.2x " 139 pr_debug("EC_SC(R) = 0x%2.2x "
138 "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d\n", 140 "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d\n",
139 x, 141 x,
@@ -148,6 +150,7 @@ static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
148static inline u8 acpi_ec_read_data(struct acpi_ec *ec) 150static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
149{ 151{
150 u8 x = inb(ec->data_addr); 152 u8 x = inb(ec->data_addr);
153
151 pr_debug("EC_DATA(R) = 0x%2.2x\n", x); 154 pr_debug("EC_DATA(R) = 0x%2.2x\n", x);
152 return x; 155 return x;
153} 156}
@@ -164,10 +167,32 @@ static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
164 outb(data, ec->data_addr); 167 outb(data, ec->data_addr);
165} 168}
166 169
170#ifdef DEBUG
171static const char *acpi_ec_cmd_string(u8 cmd)
172{
173 switch (cmd) {
174 case 0x80:
175 return "RD_EC";
176 case 0x81:
177 return "WR_EC";
178 case 0x82:
179 return "BE_EC";
180 case 0x83:
181 return "BD_EC";
182 case 0x84:
183 return "QR_EC";
184 }
185 return "UNKNOWN";
186}
187#else
188#define acpi_ec_cmd_string(cmd) "UNDEF"
189#endif
190
167static int ec_transaction_completed(struct acpi_ec *ec) 191static int ec_transaction_completed(struct acpi_ec *ec)
168{ 192{
169 unsigned long flags; 193 unsigned long flags;
170 int ret = 0; 194 int ret = 0;
195
171 spin_lock_irqsave(&ec->lock, flags); 196 spin_lock_irqsave(&ec->lock, flags);
172 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE)) 197 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
173 ret = 1; 198 ret = 1;
@@ -181,7 +206,8 @@ static bool advance_transaction(struct acpi_ec *ec)
181 u8 status; 206 u8 status;
182 bool wakeup = false; 207 bool wakeup = false;
183 208
184 pr_debug("===== %s =====\n", in_interrupt() ? "IRQ" : "TASK"); 209 pr_debug("===== %s (%d) =====\n",
210 in_interrupt() ? "IRQ" : "TASK", smp_processor_id());
185 status = acpi_ec_read_status(ec); 211 status = acpi_ec_read_status(ec);
186 t = ec->curr; 212 t = ec->curr;
187 if (!t) 213 if (!t)
@@ -198,7 +224,8 @@ static bool advance_transaction(struct acpi_ec *ec)
198 if (t->rlen == t->ri) { 224 if (t->rlen == t->ri) {
199 t->flags |= ACPI_EC_COMMAND_COMPLETE; 225 t->flags |= ACPI_EC_COMMAND_COMPLETE;
200 if (t->command == ACPI_EC_COMMAND_QUERY) 226 if (t->command == ACPI_EC_COMMAND_QUERY)
201 pr_debug("hardware QR_EC completion\n"); 227 pr_debug("***** Command(%s) hardware completion *****\n",
228 acpi_ec_cmd_string(t->command));
202 wakeup = true; 229 wakeup = true;
203 } 230 }
204 } else 231 } else
@@ -210,18 +237,14 @@ static bool advance_transaction(struct acpi_ec *ec)
210 } 237 }
211 return wakeup; 238 return wakeup;
212 } else { 239 } else {
213 /* 240 if (EC_FLAGS_QUERY_HANDSHAKE &&
214 * There is firmware refusing to respond QR_EC when SCI_EVT 241 !(status & ACPI_EC_FLAG_SCI) &&
215 * is not set, for which case, we complete the QR_EC
216 * without issuing it to the firmware.
217 * https://bugzilla.kernel.org/show_bug.cgi?id=86211
218 */
219 if (!(status & ACPI_EC_FLAG_SCI) &&
220 (t->command == ACPI_EC_COMMAND_QUERY)) { 242 (t->command == ACPI_EC_COMMAND_QUERY)) {
221 t->flags |= ACPI_EC_COMMAND_POLL; 243 t->flags |= ACPI_EC_COMMAND_POLL;
222 t->rdata[t->ri++] = 0x00; 244 t->rdata[t->ri++] = 0x00;
223 t->flags |= ACPI_EC_COMMAND_COMPLETE; 245 t->flags |= ACPI_EC_COMMAND_COMPLETE;
224 pr_debug("software QR_EC completion\n"); 246 pr_debug("***** Command(%s) software completion *****\n",
247 acpi_ec_cmd_string(t->command));
225 wakeup = true; 248 wakeup = true;
226 } else if ((status & ACPI_EC_FLAG_IBF) == 0) { 249 } else if ((status & ACPI_EC_FLAG_IBF) == 0) {
227 acpi_ec_write_cmd(ec, t->command); 250 acpi_ec_write_cmd(ec, t->command);
@@ -264,6 +287,7 @@ static int ec_poll(struct acpi_ec *ec)
264{ 287{
265 unsigned long flags; 288 unsigned long flags;
266 int repeat = 5; /* number of command restarts */ 289 int repeat = 5; /* number of command restarts */
290
267 while (repeat--) { 291 while (repeat--) {
268 unsigned long delay = jiffies + 292 unsigned long delay = jiffies +
269 msecs_to_jiffies(ec_delay); 293 msecs_to_jiffies(ec_delay);
@@ -296,18 +320,25 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
296{ 320{
297 unsigned long tmp; 321 unsigned long tmp;
298 int ret = 0; 322 int ret = 0;
323
299 if (EC_FLAGS_MSI) 324 if (EC_FLAGS_MSI)
300 udelay(ACPI_EC_MSI_UDELAY); 325 udelay(ACPI_EC_MSI_UDELAY);
301 /* start transaction */ 326 /* start transaction */
302 spin_lock_irqsave(&ec->lock, tmp); 327 spin_lock_irqsave(&ec->lock, tmp);
303 /* following two actions should be kept atomic */ 328 /* following two actions should be kept atomic */
304 ec->curr = t; 329 ec->curr = t;
330 pr_debug("***** Command(%s) started *****\n",
331 acpi_ec_cmd_string(t->command));
305 start_transaction(ec); 332 start_transaction(ec);
333 if (ec->curr->command == ACPI_EC_COMMAND_QUERY) {
334 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags);
335 pr_debug("***** Event stopped *****\n");
336 }
306 spin_unlock_irqrestore(&ec->lock, tmp); 337 spin_unlock_irqrestore(&ec->lock, tmp);
307 ret = ec_poll(ec); 338 ret = ec_poll(ec);
308 spin_lock_irqsave(&ec->lock, tmp); 339 spin_lock_irqsave(&ec->lock, tmp);
309 if (ec->curr->command == ACPI_EC_COMMAND_QUERY) 340 pr_debug("***** Command(%s) stopped *****\n",
310 clear_bit(EC_FLAGS_QUERY_PENDING, &ec->flags); 341 acpi_ec_cmd_string(t->command));
311 ec->curr = NULL; 342 ec->curr = NULL;
312 spin_unlock_irqrestore(&ec->lock, tmp); 343 spin_unlock_irqrestore(&ec->lock, tmp);
313 return ret; 344 return ret;
@@ -317,6 +348,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
317{ 348{
318 int status; 349 int status;
319 u32 glk; 350 u32 glk;
351
320 if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata)) 352 if (!ec || (!t) || (t->wlen && !t->wdata) || (t->rlen && !t->rdata))
321 return -EINVAL; 353 return -EINVAL;
322 if (t->rdata) 354 if (t->rdata)
@@ -333,8 +365,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
333 goto unlock; 365 goto unlock;
334 } 366 }
335 } 367 }
336 pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n",
337 t->command, t->wdata ? t->wdata[0] : 0);
338 /* disable GPE during transaction if storm is detected */ 368 /* disable GPE during transaction if storm is detected */
339 if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) { 369 if (test_bit(EC_FLAGS_GPE_STORM, &ec->flags)) {
340 /* It has to be disabled, so that it doesn't trigger. */ 370 /* It has to be disabled, so that it doesn't trigger. */
@@ -355,7 +385,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
355 t->irq_count); 385 t->irq_count);
356 set_bit(EC_FLAGS_GPE_STORM, &ec->flags); 386 set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
357 } 387 }
358 pr_debug("transaction end\n");
359 if (ec->global_lock) 388 if (ec->global_lock)
360 acpi_release_global_lock(glk); 389 acpi_release_global_lock(glk);
361unlock: 390unlock:
@@ -383,7 +412,7 @@ static int acpi_ec_burst_disable(struct acpi_ec *ec)
383 acpi_ec_transaction(ec, &t) : 0; 412 acpi_ec_transaction(ec, &t) : 0;
384} 413}
385 414
386static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 * data) 415static int acpi_ec_read(struct acpi_ec *ec, u8 address, u8 *data)
387{ 416{
388 int result; 417 int result;
389 u8 d; 418 u8 d;
@@ -419,10 +448,9 @@ int ec_read(u8 addr, u8 *val)
419 if (!err) { 448 if (!err) {
420 *val = temp_data; 449 *val = temp_data;
421 return 0; 450 return 0;
422 } else 451 }
423 return err; 452 return err;
424} 453}
425
426EXPORT_SYMBOL(ec_read); 454EXPORT_SYMBOL(ec_read);
427 455
428int ec_write(u8 addr, u8 val) 456int ec_write(u8 addr, u8 val)
@@ -436,22 +464,21 @@ int ec_write(u8 addr, u8 val)
436 464
437 return err; 465 return err;
438} 466}
439
440EXPORT_SYMBOL(ec_write); 467EXPORT_SYMBOL(ec_write);
441 468
442int ec_transaction(u8 command, 469int ec_transaction(u8 command,
443 const u8 * wdata, unsigned wdata_len, 470 const u8 *wdata, unsigned wdata_len,
444 u8 * rdata, unsigned rdata_len) 471 u8 *rdata, unsigned rdata_len)
445{ 472{
446 struct transaction t = {.command = command, 473 struct transaction t = {.command = command,
447 .wdata = wdata, .rdata = rdata, 474 .wdata = wdata, .rdata = rdata,
448 .wlen = wdata_len, .rlen = rdata_len}; 475 .wlen = wdata_len, .rlen = rdata_len};
476
449 if (!first_ec) 477 if (!first_ec)
450 return -ENODEV; 478 return -ENODEV;
451 479
452 return acpi_ec_transaction(first_ec, &t); 480 return acpi_ec_transaction(first_ec, &t);
453} 481}
454
455EXPORT_SYMBOL(ec_transaction); 482EXPORT_SYMBOL(ec_transaction);
456 483
457/* Get the handle to the EC device */ 484/* Get the handle to the EC device */
@@ -461,7 +488,6 @@ acpi_handle ec_get_handle(void)
461 return NULL; 488 return NULL;
462 return first_ec->handle; 489 return first_ec->handle;
463} 490}
464
465EXPORT_SYMBOL(ec_get_handle); 491EXPORT_SYMBOL(ec_get_handle);
466 492
467/* 493/*
@@ -525,13 +551,14 @@ void acpi_ec_unblock_transactions_early(void)
525 clear_bit(EC_FLAGS_BLOCKED, &first_ec->flags); 551 clear_bit(EC_FLAGS_BLOCKED, &first_ec->flags);
526} 552}
527 553
528static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 * data) 554static int acpi_ec_query_unlocked(struct acpi_ec *ec, u8 *data)
529{ 555{
530 int result; 556 int result;
531 u8 d; 557 u8 d;
532 struct transaction t = {.command = ACPI_EC_COMMAND_QUERY, 558 struct transaction t = {.command = ACPI_EC_COMMAND_QUERY,
533 .wdata = NULL, .rdata = &d, 559 .wdata = NULL, .rdata = &d,
534 .wlen = 0, .rlen = 1}; 560 .wlen = 0, .rlen = 1};
561
535 if (!ec || !data) 562 if (!ec || !data)
536 return -EINVAL; 563 return -EINVAL;
537 /* 564 /*
@@ -557,6 +584,7 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
557{ 584{
558 struct acpi_ec_query_handler *handler = 585 struct acpi_ec_query_handler *handler =
559 kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL); 586 kzalloc(sizeof(struct acpi_ec_query_handler), GFP_KERNEL);
587
560 if (!handler) 588 if (!handler)
561 return -ENOMEM; 589 return -ENOMEM;
562 590
@@ -569,12 +597,12 @@ int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
569 mutex_unlock(&ec->mutex); 597 mutex_unlock(&ec->mutex);
570 return 0; 598 return 0;
571} 599}
572
573EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler); 600EXPORT_SYMBOL_GPL(acpi_ec_add_query_handler);
574 601
575void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit) 602void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
576{ 603{
577 struct acpi_ec_query_handler *handler, *tmp; 604 struct acpi_ec_query_handler *handler, *tmp;
605
578 mutex_lock(&ec->mutex); 606 mutex_lock(&ec->mutex);
579 list_for_each_entry_safe(handler, tmp, &ec->list, node) { 607 list_for_each_entry_safe(handler, tmp, &ec->list, node) {
580 if (query_bit == handler->query_bit) { 608 if (query_bit == handler->query_bit) {
@@ -584,20 +612,20 @@ void acpi_ec_remove_query_handler(struct acpi_ec *ec, u8 query_bit)
584 } 612 }
585 mutex_unlock(&ec->mutex); 613 mutex_unlock(&ec->mutex);
586} 614}
587
588EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler); 615EXPORT_SYMBOL_GPL(acpi_ec_remove_query_handler);
589 616
590static void acpi_ec_run(void *cxt) 617static void acpi_ec_run(void *cxt)
591{ 618{
592 struct acpi_ec_query_handler *handler = cxt; 619 struct acpi_ec_query_handler *handler = cxt;
620
593 if (!handler) 621 if (!handler)
594 return; 622 return;
595 pr_debug("start query execution\n"); 623 pr_debug("##### Query(0x%02x) started #####\n", handler->query_bit);
596 if (handler->func) 624 if (handler->func)
597 handler->func(handler->data); 625 handler->func(handler->data);
598 else if (handler->handle) 626 else if (handler->handle)
599 acpi_evaluate_object(handler->handle, NULL, NULL, NULL); 627 acpi_evaluate_object(handler->handle, NULL, NULL, NULL);
600 pr_debug("stop query execution\n"); 628 pr_debug("##### Query(0x%02x) stopped #####\n", handler->query_bit);
601 kfree(handler); 629 kfree(handler);
602} 630}
603 631
@@ -620,8 +648,8 @@ static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data)
620 if (!copy) 648 if (!copy)
621 return -ENOMEM; 649 return -ENOMEM;
622 memcpy(copy, handler, sizeof(*copy)); 650 memcpy(copy, handler, sizeof(*copy));
623 pr_debug("push query execution (0x%2x) on queue\n", 651 pr_debug("##### Query(0x%02x) scheduled #####\n",
624 value); 652 handler->query_bit);
625 return acpi_os_execute((copy->func) ? 653 return acpi_os_execute((copy->func) ?
626 OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER, 654 OSL_NOTIFY_HANDLER : OSL_GPE_HANDLER,
627 acpi_ec_run, copy); 655 acpi_ec_run, copy);
@@ -633,6 +661,7 @@ static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data)
633static void acpi_ec_gpe_query(void *ec_cxt) 661static void acpi_ec_gpe_query(void *ec_cxt)
634{ 662{
635 struct acpi_ec *ec = ec_cxt; 663 struct acpi_ec *ec = ec_cxt;
664
636 if (!ec) 665 if (!ec)
637 return; 666 return;
638 mutex_lock(&ec->mutex); 667 mutex_lock(&ec->mutex);
@@ -644,7 +673,7 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state)
644{ 673{
645 if (state & ACPI_EC_FLAG_SCI) { 674 if (state & ACPI_EC_FLAG_SCI) {
646 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) { 675 if (!test_and_set_bit(EC_FLAGS_QUERY_PENDING, &ec->flags)) {
647 pr_debug("push gpe query to the queue\n"); 676 pr_debug("***** Event started *****\n");
648 return acpi_os_execute(OSL_NOTIFY_HANDLER, 677 return acpi_os_execute(OSL_NOTIFY_HANDLER,
649 acpi_ec_gpe_query, ec); 678 acpi_ec_gpe_query, ec);
650 } 679 }
@@ -667,8 +696,8 @@ static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
667} 696}
668 697
669/* -------------------------------------------------------------------------- 698/* --------------------------------------------------------------------------
670 Address Space Management 699 * Address Space Management
671 -------------------------------------------------------------------------- */ 700 * -------------------------------------------------------------------------- */
672 701
673static acpi_status 702static acpi_status
674acpi_ec_space_handler(u32 function, acpi_physical_address address, 703acpi_ec_space_handler(u32 function, acpi_physical_address address,
@@ -699,27 +728,26 @@ acpi_ec_space_handler(u32 function, acpi_physical_address address,
699 switch (result) { 728 switch (result) {
700 case -EINVAL: 729 case -EINVAL:
701 return AE_BAD_PARAMETER; 730 return AE_BAD_PARAMETER;
702 break;
703 case -ENODEV: 731 case -ENODEV:
704 return AE_NOT_FOUND; 732 return AE_NOT_FOUND;
705 break;
706 case -ETIME: 733 case -ETIME:
707 return AE_TIME; 734 return AE_TIME;
708 break;
709 default: 735 default:
710 return AE_OK; 736 return AE_OK;
711 } 737 }
712} 738}
713 739
714/* -------------------------------------------------------------------------- 740/* --------------------------------------------------------------------------
715 Driver Interface 741 * Driver Interface
716 -------------------------------------------------------------------------- */ 742 * -------------------------------------------------------------------------- */
743
717static acpi_status 744static acpi_status
718ec_parse_io_ports(struct acpi_resource *resource, void *context); 745ec_parse_io_ports(struct acpi_resource *resource, void *context);
719 746
720static struct acpi_ec *make_acpi_ec(void) 747static struct acpi_ec *make_acpi_ec(void)
721{ 748{
722 struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL); 749 struct acpi_ec *ec = kzalloc(sizeof(struct acpi_ec), GFP_KERNEL);
750
723 if (!ec) 751 if (!ec)
724 return NULL; 752 return NULL;
725 ec->flags = 1 << EC_FLAGS_QUERY_PENDING; 753 ec->flags = 1 << EC_FLAGS_QUERY_PENDING;
@@ -742,9 +770,8 @@ acpi_ec_register_query_methods(acpi_handle handle, u32 level,
742 770
743 status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer); 771 status = acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer);
744 772
745 if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1) { 773 if (ACPI_SUCCESS(status) && sscanf(node_name, "_Q%x", &value) == 1)
746 acpi_ec_add_query_handler(ec, value, handle, NULL, NULL); 774 acpi_ec_add_query_handler(ec, value, handle, NULL, NULL);
747 }
748 return AE_OK; 775 return AE_OK;
749} 776}
750 777
@@ -753,7 +780,6 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
753{ 780{
754 acpi_status status; 781 acpi_status status;
755 unsigned long long tmp = 0; 782 unsigned long long tmp = 0;
756
757 struct acpi_ec *ec = context; 783 struct acpi_ec *ec = context;
758 784
759 /* clear addr values, ec_parse_io_ports depend on it */ 785 /* clear addr values, ec_parse_io_ports depend on it */
@@ -781,6 +807,7 @@ ec_parse_device(acpi_handle handle, u32 Level, void *context, void **retval)
781static int ec_install_handlers(struct acpi_ec *ec) 807static int ec_install_handlers(struct acpi_ec *ec)
782{ 808{
783 acpi_status status; 809 acpi_status status;
810
784 if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags)) 811 if (test_bit(EC_FLAGS_HANDLERS_INSTALLED, &ec->flags))
785 return 0; 812 return 0;
786 status = acpi_install_gpe_handler(NULL, ec->gpe, 813 status = acpi_install_gpe_handler(NULL, ec->gpe,
@@ -981,6 +1008,18 @@ static int ec_enlarge_storm_threshold(const struct dmi_system_id *id)
981} 1008}
982 1009
983/* 1010/*
1011 * Acer EC firmware refuses to respond QR_EC when SCI_EVT is not set, for
1012 * which case, we complete the QR_EC without issuing it to the firmware.
1013 * https://bugzilla.kernel.org/show_bug.cgi?id=86211
1014 */
1015static int ec_flag_query_handshake(const struct dmi_system_id *id)
1016{
1017 pr_debug("Detected the EC firmware requiring QR_EC issued when SCI_EVT set\n");
1018 EC_FLAGS_QUERY_HANDSHAKE = 1;
1019 return 0;
1020}
1021
1022/*
984 * On some hardware it is necessary to clear events accumulated by the EC during 1023 * On some hardware it is necessary to clear events accumulated by the EC during
985 * sleep. These ECs stop reporting GPEs until they are manually polled, if too 1024 * sleep. These ECs stop reporting GPEs until they are manually polled, if too
986 * many events are accumulated. (e.g. Samsung Series 5/9 notebooks) 1025 * many events are accumulated. (e.g. Samsung Series 5/9 notebooks)
@@ -1054,6 +1093,9 @@ static struct dmi_system_id ec_dmi_table[] __initdata = {
1054 { 1093 {
1055 ec_clear_on_resume, "Samsung hardware", { 1094 ec_clear_on_resume, "Samsung hardware", {
1056 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL}, 1095 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD.")}, NULL},
1096 {
1097 ec_flag_query_handshake, "Acer hardware", {
1098 DMI_MATCH(DMI_SYS_VENDOR, "Acer"), }, NULL},
1057 {}, 1099 {},
1058}; 1100};
1059 1101
@@ -1078,7 +1120,8 @@ int __init acpi_ec_ecdt_probe(void)
1078 boot_ec->data_addr = ecdt_ptr->data.address; 1120 boot_ec->data_addr = ecdt_ptr->data.address;
1079 boot_ec->gpe = ecdt_ptr->gpe; 1121 boot_ec->gpe = ecdt_ptr->gpe;
1080 boot_ec->handle = ACPI_ROOT_OBJECT; 1122 boot_ec->handle = ACPI_ROOT_OBJECT;
1081 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle); 1123 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id,
1124 &boot_ec->handle);
1082 /* Don't trust ECDT, which comes from ASUSTek */ 1125 /* Don't trust ECDT, which comes from ASUSTek */
1083 if (!EC_FLAGS_VALIDATE_ECDT) 1126 if (!EC_FLAGS_VALIDATE_ECDT)
1084 goto install; 1127 goto install;
@@ -1162,6 +1205,5 @@ static void __exit acpi_ec_exit(void)
1162{ 1205{
1163 1206
1164 acpi_bus_unregister_driver(&acpi_ec_driver); 1207 acpi_bus_unregister_driver(&acpi_ec_driver);
1165 return;
1166} 1208}
1167#endif /* 0 */ 1209#endif /* 0 */
diff --git a/drivers/acpi/fan.c b/drivers/acpi/fan.c
index 5328b1090e08..caf9b76b7ef8 100644
--- a/drivers/acpi/fan.c
+++ b/drivers/acpi/fan.c
@@ -30,22 +30,19 @@
30#include <linux/uaccess.h> 30#include <linux/uaccess.h>
31#include <linux/thermal.h> 31#include <linux/thermal.h>
32#include <linux/acpi.h> 32#include <linux/acpi.h>
33 33#include <linux/platform_device.h>
34#define ACPI_FAN_CLASS "fan" 34#include <linux/sort.h>
35#define ACPI_FAN_FILE_STATE "state"
36
37#define _COMPONENT ACPI_FAN_COMPONENT
38ACPI_MODULE_NAME("fan");
39 35
40MODULE_AUTHOR("Paul Diefenbaugh"); 36MODULE_AUTHOR("Paul Diefenbaugh");
41MODULE_DESCRIPTION("ACPI Fan Driver"); 37MODULE_DESCRIPTION("ACPI Fan Driver");
42MODULE_LICENSE("GPL"); 38MODULE_LICENSE("GPL");
43 39
44static int acpi_fan_add(struct acpi_device *device); 40static int acpi_fan_probe(struct platform_device *pdev);
45static int acpi_fan_remove(struct acpi_device *device); 41static int acpi_fan_remove(struct platform_device *pdev);
46 42
47static const struct acpi_device_id fan_device_ids[] = { 43static const struct acpi_device_id fan_device_ids[] = {
48 {"PNP0C0B", 0}, 44 {"PNP0C0B", 0},
45 {"INT3404", 0},
49 {"", 0}, 46 {"", 0},
50}; 47};
51MODULE_DEVICE_TABLE(acpi, fan_device_ids); 48MODULE_DEVICE_TABLE(acpi, fan_device_ids);
@@ -64,37 +61,100 @@ static struct dev_pm_ops acpi_fan_pm = {
64#define FAN_PM_OPS_PTR NULL 61#define FAN_PM_OPS_PTR NULL
65#endif 62#endif
66 63
67static struct acpi_driver acpi_fan_driver = { 64struct acpi_fan_fps {
68 .name = "fan", 65 u64 control;
69 .class = ACPI_FAN_CLASS, 66 u64 trip_point;
70 .ids = fan_device_ids, 67 u64 speed;
71 .ops = { 68 u64 noise_level;
72 .add = acpi_fan_add, 69 u64 power;
73 .remove = acpi_fan_remove, 70};
74 }, 71
75 .drv.pm = FAN_PM_OPS_PTR, 72struct acpi_fan_fif {
73 u64 revision;
74 u64 fine_grain_ctrl;
75 u64 step_size;
76 u64 low_speed_notification;
77};
78
79struct acpi_fan {
80 bool acpi4;
81 struct acpi_fan_fif fif;
82 struct acpi_fan_fps *fps;
83 int fps_count;
84 struct thermal_cooling_device *cdev;
85};
86
87static struct platform_driver acpi_fan_driver = {
88 .probe = acpi_fan_probe,
89 .remove = acpi_fan_remove,
90 .driver = {
91 .name = "acpi-fan",
92 .acpi_match_table = fan_device_ids,
93 .pm = FAN_PM_OPS_PTR,
94 },
76}; 95};
77 96
78/* thermal cooling device callbacks */ 97/* thermal cooling device callbacks */
79static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long 98static int fan_get_max_state(struct thermal_cooling_device *cdev, unsigned long
80 *state) 99 *state)
81{ 100{
82 /* ACPI fan device only support two states: ON/OFF */ 101 struct acpi_device *device = cdev->devdata;
83 *state = 1; 102 struct acpi_fan *fan = acpi_driver_data(device);
103
104 if (fan->acpi4)
105 *state = fan->fps_count - 1;
106 else
107 *state = 1;
84 return 0; 108 return 0;
85} 109}
86 110
87static int fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long 111static int fan_get_state_acpi4(struct acpi_device *device, unsigned long *state)
88 *state) 112{
113 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
114 struct acpi_fan *fan = acpi_driver_data(device);
115 union acpi_object *obj;
116 acpi_status status;
117 int control, i;
118
119 status = acpi_evaluate_object(device->handle, "_FST", NULL, &buffer);
120 if (ACPI_FAILURE(status)) {
121 dev_err(&device->dev, "Get fan state failed\n");
122 return status;
123 }
124
125 obj = buffer.pointer;
126 if (!obj || obj->type != ACPI_TYPE_PACKAGE ||
127 obj->package.count != 3 ||
128 obj->package.elements[1].type != ACPI_TYPE_INTEGER) {
129 dev_err(&device->dev, "Invalid _FST data\n");
130 status = -EINVAL;
131 goto err;
132 }
133
134 control = obj->package.elements[1].integer.value;
135 for (i = 0; i < fan->fps_count; i++) {
136 if (control == fan->fps[i].control)
137 break;
138 }
139 if (i == fan->fps_count) {
140 dev_dbg(&device->dev, "Invalid control value returned\n");
141 status = -EINVAL;
142 goto err;
143 }
144
145 *state = i;
146
147err:
148 kfree(obj);
149 return status;
150}
151
152static int fan_get_state(struct acpi_device *device, unsigned long *state)
89{ 153{
90 struct acpi_device *device = cdev->devdata;
91 int result; 154 int result;
92 int acpi_state = ACPI_STATE_D0; 155 int acpi_state = ACPI_STATE_D0;
93 156
94 if (!device) 157 result = acpi_device_update_power(device, &acpi_state);
95 return -EINVAL;
96
97 result = acpi_bus_update_power(device->handle, &acpi_state);
98 if (result) 158 if (result)
99 return result; 159 return result;
100 160
@@ -103,21 +163,57 @@ static int fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long
103 return 0; 163 return 0;
104} 164}
105 165
106static int 166static int fan_get_cur_state(struct thermal_cooling_device *cdev, unsigned long
107fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state) 167 *state)
108{ 168{
109 struct acpi_device *device = cdev->devdata; 169 struct acpi_device *device = cdev->devdata;
110 int result; 170 struct acpi_fan *fan = acpi_driver_data(device);
111 171
112 if (!device || (state != 0 && state != 1)) 172 if (fan->acpi4)
173 return fan_get_state_acpi4(device, state);
174 else
175 return fan_get_state(device, state);
176}
177
178static int fan_set_state(struct acpi_device *device, unsigned long state)
179{
180 if (state != 0 && state != 1)
113 return -EINVAL; 181 return -EINVAL;
114 182
115 result = acpi_bus_set_power(device->handle, 183 return acpi_device_set_power(device,
116 state ? ACPI_STATE_D0 : ACPI_STATE_D3_COLD); 184 state ? ACPI_STATE_D0 : ACPI_STATE_D3_COLD);
185}
117 186
118 return result; 187static int fan_set_state_acpi4(struct acpi_device *device, unsigned long state)
188{
189 struct acpi_fan *fan = acpi_driver_data(device);
190 acpi_status status;
191
192 if (state >= fan->fps_count)
193 return -EINVAL;
194
195 status = acpi_execute_simple_method(device->handle, "_FSL",
196 fan->fps[state].control);
197 if (ACPI_FAILURE(status)) {
198 dev_dbg(&device->dev, "Failed to set state by _FSL\n");
199 return status;
200 }
201
202 return 0;
119} 203}
120 204
205static int
206fan_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
207{
208 struct acpi_device *device = cdev->devdata;
209 struct acpi_fan *fan = acpi_driver_data(device);
210
211 if (fan->acpi4)
212 return fan_set_state_acpi4(device, state);
213 else
214 return fan_set_state(device, state);
215 }
216
121static const struct thermal_cooling_device_ops fan_cooling_ops = { 217static const struct thermal_cooling_device_ops fan_cooling_ops = {
122 .get_max_state = fan_get_max_state, 218 .get_max_state = fan_get_max_state,
123 .get_cur_state = fan_get_cur_state, 219 .get_cur_state = fan_get_cur_state,
@@ -129,21 +225,125 @@ static const struct thermal_cooling_device_ops fan_cooling_ops = {
129 * -------------------------------------------------------------------------- 225 * --------------------------------------------------------------------------
130*/ 226*/
131 227
132static int acpi_fan_add(struct acpi_device *device) 228static bool acpi_fan_is_acpi4(struct acpi_device *device)
133{ 229{
134 int result = 0; 230 return acpi_has_method(device->handle, "_FIF") &&
135 struct thermal_cooling_device *cdev; 231 acpi_has_method(device->handle, "_FPS") &&
232 acpi_has_method(device->handle, "_FSL") &&
233 acpi_has_method(device->handle, "_FST");
234}
136 235
137 if (!device) 236static int acpi_fan_get_fif(struct acpi_device *device)
138 return -EINVAL; 237{
238 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
239 struct acpi_fan *fan = acpi_driver_data(device);
240 struct acpi_buffer format = { sizeof("NNNN"), "NNNN" };
241 struct acpi_buffer fif = { sizeof(fan->fif), &fan->fif };
242 union acpi_object *obj;
243 acpi_status status;
244
245 status = acpi_evaluate_object(device->handle, "_FIF", NULL, &buffer);
246 if (ACPI_FAILURE(status))
247 return status;
248
249 obj = buffer.pointer;
250 if (!obj || obj->type != ACPI_TYPE_PACKAGE) {
251 dev_err(&device->dev, "Invalid _FIF data\n");
252 status = -EINVAL;
253 goto err;
254 }
139 255
140 strcpy(acpi_device_name(device), "Fan"); 256 status = acpi_extract_package(obj, &format, &fif);
141 strcpy(acpi_device_class(device), ACPI_FAN_CLASS); 257 if (ACPI_FAILURE(status)) {
258 dev_err(&device->dev, "Invalid _FIF element\n");
259 status = -EINVAL;
260 }
142 261
143 result = acpi_bus_update_power(device->handle, NULL); 262err:
144 if (result) { 263 kfree(obj);
145 dev_err(&device->dev, "Setting initial power state\n"); 264 return status;
146 goto end; 265}
266
267static int acpi_fan_speed_cmp(const void *a, const void *b)
268{
269 const struct acpi_fan_fps *fps1 = a;
270 const struct acpi_fan_fps *fps2 = b;
271 return fps1->speed - fps2->speed;
272}
273
274static int acpi_fan_get_fps(struct acpi_device *device)
275{
276 struct acpi_fan *fan = acpi_driver_data(device);
277 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
278 union acpi_object *obj;
279 acpi_status status;
280 int i;
281
282 status = acpi_evaluate_object(device->handle, "_FPS", NULL, &buffer);
283 if (ACPI_FAILURE(status))
284 return status;
285
286 obj = buffer.pointer;
287 if (!obj || obj->type != ACPI_TYPE_PACKAGE || obj->package.count < 2) {
288 dev_err(&device->dev, "Invalid _FPS data\n");
289 status = -EINVAL;
290 goto err;
291 }
292
293 fan->fps_count = obj->package.count - 1; /* minus revision field */
294 fan->fps = devm_kzalloc(&device->dev,
295 fan->fps_count * sizeof(struct acpi_fan_fps),
296 GFP_KERNEL);
297 if (!fan->fps) {
298 dev_err(&device->dev, "Not enough memory\n");
299 status = -ENOMEM;
300 goto err;
301 }
302 for (i = 0; i < fan->fps_count; i++) {
303 struct acpi_buffer format = { sizeof("NNNNN"), "NNNNN" };
304 struct acpi_buffer fps = { sizeof(fan->fps[i]), &fan->fps[i] };
305 status = acpi_extract_package(&obj->package.elements[i + 1],
306 &format, &fps);
307 if (ACPI_FAILURE(status)) {
308 dev_err(&device->dev, "Invalid _FPS element\n");
309 break;
310 }
311 }
312
313 /* sort the state array according to fan speed in increase order */
314 sort(fan->fps, fan->fps_count, sizeof(*fan->fps),
315 acpi_fan_speed_cmp, NULL);
316
317err:
318 kfree(obj);
319 return status;
320}
321
322static int acpi_fan_probe(struct platform_device *pdev)
323{
324 int result = 0;
325 struct thermal_cooling_device *cdev;
326 struct acpi_fan *fan;
327 struct acpi_device *device = ACPI_COMPANION(&pdev->dev);
328
329 fan = devm_kzalloc(&pdev->dev, sizeof(*fan), GFP_KERNEL);
330 if (!fan) {
331 dev_err(&device->dev, "No memory for fan\n");
332 return -ENOMEM;
333 }
334 device->driver_data = fan;
335 platform_set_drvdata(pdev, fan);
336
337 if (acpi_fan_is_acpi4(device)) {
338 if (acpi_fan_get_fif(device) || acpi_fan_get_fps(device))
339 goto end;
340 fan->acpi4 = true;
341 } else {
342 result = acpi_device_update_power(device, NULL);
343 if (result) {
344 dev_err(&device->dev, "Setting initial power state\n");
345 goto end;
346 }
147 } 347 }
148 348
149 cdev = thermal_cooling_device_register("Fan", device, 349 cdev = thermal_cooling_device_register("Fan", device,
@@ -153,44 +353,32 @@ static int acpi_fan_add(struct acpi_device *device)
153 goto end; 353 goto end;
154 } 354 }
155 355
156 dev_dbg(&device->dev, "registered as cooling_device%d\n", cdev->id); 356 dev_dbg(&pdev->dev, "registered as cooling_device%d\n", cdev->id);
157 357
158 device->driver_data = cdev; 358 fan->cdev = cdev;
159 result = sysfs_create_link(&device->dev.kobj, 359 result = sysfs_create_link(&pdev->dev.kobj,
160 &cdev->device.kobj, 360 &cdev->device.kobj,
161 "thermal_cooling"); 361 "thermal_cooling");
162 if (result) 362 if (result)
163 dev_err(&device->dev, "Failed to create sysfs link " 363 dev_err(&pdev->dev, "Failed to create sysfs link 'thermal_cooling'\n");
164 "'thermal_cooling'\n");
165 364
166 result = sysfs_create_link(&cdev->device.kobj, 365 result = sysfs_create_link(&cdev->device.kobj,
167 &device->dev.kobj, 366 &pdev->dev.kobj,
168 "device"); 367 "device");
169 if (result) 368 if (result)
170 dev_err(&device->dev, "Failed to create sysfs link 'device'\n"); 369 dev_err(&pdev->dev, "Failed to create sysfs link 'device'\n");
171
172 dev_info(&device->dev, "ACPI: %s [%s] (%s)\n",
173 acpi_device_name(device), acpi_device_bid(device),
174 !device->power.state ? "on" : "off");
175 370
176end: 371end:
177 return result; 372 return result;
178} 373}
179 374
180static int acpi_fan_remove(struct acpi_device *device) 375static int acpi_fan_remove(struct platform_device *pdev)
181{ 376{
182 struct thermal_cooling_device *cdev; 377 struct acpi_fan *fan = platform_get_drvdata(pdev);
183
184 if (!device)
185 return -EINVAL;
186
187 cdev = acpi_driver_data(device);
188 if (!cdev)
189 return -EINVAL;
190 378
191 sysfs_remove_link(&device->dev.kobj, "thermal_cooling"); 379 sysfs_remove_link(&pdev->dev.kobj, "thermal_cooling");
192 sysfs_remove_link(&cdev->device.kobj, "device"); 380 sysfs_remove_link(&fan->cdev->device.kobj, "device");
193 thermal_cooling_device_unregister(cdev); 381 thermal_cooling_device_unregister(fan->cdev);
194 382
195 return 0; 383 return 0;
196} 384}
@@ -198,10 +386,11 @@ static int acpi_fan_remove(struct acpi_device *device)
198#ifdef CONFIG_PM_SLEEP 386#ifdef CONFIG_PM_SLEEP
199static int acpi_fan_suspend(struct device *dev) 387static int acpi_fan_suspend(struct device *dev)
200{ 388{
201 if (!dev) 389 struct acpi_fan *fan = dev_get_drvdata(dev);
202 return -EINVAL; 390 if (fan->acpi4)
391 return 0;
203 392
204 acpi_bus_set_power(to_acpi_device(dev)->handle, ACPI_STATE_D0); 393 acpi_device_set_power(ACPI_COMPANION(dev), ACPI_STATE_D0);
205 394
206 return AE_OK; 395 return AE_OK;
207} 396}
@@ -209,11 +398,12 @@ static int acpi_fan_suspend(struct device *dev)
209static int acpi_fan_resume(struct device *dev) 398static int acpi_fan_resume(struct device *dev)
210{ 399{
211 int result; 400 int result;
401 struct acpi_fan *fan = dev_get_drvdata(dev);
212 402
213 if (!dev) 403 if (fan->acpi4)
214 return -EINVAL; 404 return 0;
215 405
216 result = acpi_bus_update_power(to_acpi_device(dev)->handle, NULL); 406 result = acpi_device_update_power(ACPI_COMPANION(dev), NULL);
217 if (result) 407 if (result)
218 dev_err(dev, "Error updating fan power state\n"); 408 dev_err(dev, "Error updating fan power state\n");
219 409
@@ -221,4 +411,4 @@ static int acpi_fan_resume(struct device *dev)
221} 411}
222#endif 412#endif
223 413
224module_acpi_driver(acpi_fan_driver); 414module_platform_driver(acpi_fan_driver);
diff --git a/drivers/acpi/int340x_thermal.c b/drivers/acpi/int340x_thermal.c
new file mode 100644
index 000000000000..a27d31d1ba24
--- /dev/null
+++ b/drivers/acpi/int340x_thermal.c
@@ -0,0 +1,51 @@
1/*
2 * ACPI support for int340x thermal drivers
3 *
4 * Copyright (C) 2014, Intel Corporation
5 * Authors: Zhang Rui <rui.zhang@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/acpi.h>
13#include <linux/module.h>
14
15#include "internal.h"
16
17#define DO_ENUMERATION 0x01
18static const struct acpi_device_id int340x_thermal_device_ids[] = {
19 {"INT3400", DO_ENUMERATION },
20 {"INT3401"},
21 {"INT3402"},
22 {"INT3403"},
23 {"INT3404"},
24 {"INT3406"},
25 {"INT3407"},
26 {"INT3408"},
27 {"INT3409"},
28 {"INT340A"},
29 {"INT340B"},
30 {""},
31};
32
33static int int340x_thermal_handler_attach(struct acpi_device *adev,
34 const struct acpi_device_id *id)
35{
36#if defined(CONFIG_INT340X_THERMAL) || defined(CONFIG_INT340X_THERMAL_MODULE)
37 if (id->driver_data == DO_ENUMERATION)
38 acpi_create_platform_device(adev);
39#endif
40 return 1;
41}
42
43static struct acpi_scan_handler int340x_thermal_handler = {
44 .ids = int340x_thermal_device_ids,
45 .attach = int340x_thermal_handler_attach,
46};
47
48void __init acpi_int340x_thermal_init(void)
49{
50 acpi_scan_add_handler(&int340x_thermal_handler);
51}
diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h
index 4c5cf77e7576..447f6d679b29 100644
--- a/drivers/acpi/internal.h
+++ b/drivers/acpi/internal.h
@@ -31,6 +31,7 @@ void acpi_pci_link_init(void);
31void acpi_processor_init(void); 31void acpi_processor_init(void);
32void acpi_platform_init(void); 32void acpi_platform_init(void);
33void acpi_pnp_init(void); 33void acpi_pnp_init(void);
34void acpi_int340x_thermal_init(void);
34int acpi_sysfs_init(void); 35int acpi_sysfs_init(void);
35void acpi_container_init(void); 36void acpi_container_init(void);
36void acpi_memory_hotplug_init(void); 37void acpi_memory_hotplug_init(void);
@@ -103,8 +104,6 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state);
103int acpi_power_on_resources(struct acpi_device *device, int state); 104int acpi_power_on_resources(struct acpi_device *device, int state);
104int acpi_power_transition(struct acpi_device *device, int state); 105int acpi_power_transition(struct acpi_device *device, int state);
105 106
106int acpi_device_update_power(struct acpi_device *device, int *state_p);
107
108int acpi_wakeup_device_init(void); 107int acpi_wakeup_device_init(void);
109 108
110#ifdef CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC 109#ifdef CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC
@@ -168,13 +167,6 @@ static inline void suspend_nvs_restore(void) {}
168#endif 167#endif
169 168
170/*-------------------------------------------------------------------------- 169/*--------------------------------------------------------------------------
171 Platform bus support
172 -------------------------------------------------------------------------- */
173struct platform_device;
174
175struct platform_device *acpi_create_platform_device(struct acpi_device *adev);
176
177/*--------------------------------------------------------------------------
178 Video 170 Video
179 -------------------------------------------------------------------------- */ 171 -------------------------------------------------------------------------- */
180#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE) 172#if defined(CONFIG_ACPI_VIDEO) || defined(CONFIG_ACPI_VIDEO_MODULE)
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index ae44d8654c82..0476e90b2091 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -142,6 +142,53 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
142} 142}
143 143
144/* 144/*
145 * acpi_companion_match() - Can we match via ACPI companion device
146 * @dev: Device in question
147 *
148 * Check if the given device has an ACPI companion and if that companion has
149 * a valid list of PNP IDs, and if the device is the first (primary) physical
150 * device associated with it.
151 *
152 * If multiple physical devices are attached to a single ACPI companion, we need
153 * to be careful. The usage scenario for this kind of relationship is that all
154 * of the physical devices in question use resources provided by the ACPI
155 * companion. A typical case is an MFD device where all the sub-devices share
156 * the parent's ACPI companion. In such cases we can only allow the primary
157 * (first) physical device to be matched with the help of the companion's PNP
158 * IDs.
159 *
160 * Additional physical devices sharing the ACPI companion can still use
161 * resources available from it but they will be matched normally using functions
162 * provided by their bus types (and analogously for their modalias).
163 */
164static bool acpi_companion_match(const struct device *dev)
165{
166 struct acpi_device *adev;
167 bool ret;
168
169 adev = ACPI_COMPANION(dev);
170 if (!adev)
171 return false;
172
173 if (list_empty(&adev->pnp.ids))
174 return false;
175
176 mutex_lock(&adev->physical_node_lock);
177 if (list_empty(&adev->physical_node_list)) {
178 ret = false;
179 } else {
180 const struct acpi_device_physical_node *node;
181
182 node = list_first_entry(&adev->physical_node_list,
183 struct acpi_device_physical_node, node);
184 ret = node->dev == dev;
185 }
186 mutex_unlock(&adev->physical_node_lock);
187
188 return ret;
189}
190
191/*
145 * Creates uevent modalias field for ACPI enumerated devices. 192 * Creates uevent modalias field for ACPI enumerated devices.
146 * Because the other buses does not support ACPI HIDs & CIDs. 193 * Because the other buses does not support ACPI HIDs & CIDs.
147 * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get: 194 * e.g. for a device with hid:IBM0001 and cid:ACPI0001 you get:
@@ -149,20 +196,14 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
149 */ 196 */
150int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env) 197int acpi_device_uevent_modalias(struct device *dev, struct kobj_uevent_env *env)
151{ 198{
152 struct acpi_device *acpi_dev;
153 int len; 199 int len;
154 200
155 acpi_dev = ACPI_COMPANION(dev); 201 if (!acpi_companion_match(dev))
156 if (!acpi_dev)
157 return -ENODEV;
158
159 /* Fall back to bus specific way of modalias exporting */
160 if (list_empty(&acpi_dev->pnp.ids))
161 return -ENODEV; 202 return -ENODEV;
162 203
163 if (add_uevent_var(env, "MODALIAS=")) 204 if (add_uevent_var(env, "MODALIAS="))
164 return -ENOMEM; 205 return -ENOMEM;
165 len = create_modalias(acpi_dev, &env->buf[env->buflen - 1], 206 len = create_modalias(ACPI_COMPANION(dev), &env->buf[env->buflen - 1],
166 sizeof(env->buf) - env->buflen); 207 sizeof(env->buf) - env->buflen);
167 if (len <= 0) 208 if (len <= 0)
168 return len; 209 return len;
@@ -179,18 +220,12 @@ EXPORT_SYMBOL_GPL(acpi_device_uevent_modalias);
179 */ 220 */
180int acpi_device_modalias(struct device *dev, char *buf, int size) 221int acpi_device_modalias(struct device *dev, char *buf, int size)
181{ 222{
182 struct acpi_device *acpi_dev;
183 int len; 223 int len;
184 224
185 acpi_dev = ACPI_COMPANION(dev); 225 if (!acpi_companion_match(dev))
186 if (!acpi_dev)
187 return -ENODEV; 226 return -ENODEV;
188 227
189 /* Fall back to bus specific way of modalias exporting */ 228 len = create_modalias(ACPI_COMPANION(dev), buf, size -1);
190 if (list_empty(&acpi_dev->pnp.ids))
191 return -ENODEV;
192
193 len = create_modalias(acpi_dev, buf, size -1);
194 if (len <= 0) 229 if (len <= 0)
195 return len; 230 return len;
196 buf[len++] = '\n'; 231 buf[len++] = '\n';
@@ -853,6 +888,9 @@ const struct acpi_device_id *acpi_match_device(const struct acpi_device_id *ids,
853 if (!ids || !handle || acpi_bus_get_device(handle, &adev)) 888 if (!ids || !handle || acpi_bus_get_device(handle, &adev))
854 return NULL; 889 return NULL;
855 890
891 if (!acpi_companion_match(dev))
892 return NULL;
893
856 return __acpi_match_device(adev, ids); 894 return __acpi_match_device(adev, ids);
857} 895}
858EXPORT_SYMBOL_GPL(acpi_match_device); 896EXPORT_SYMBOL_GPL(acpi_match_device);
@@ -1470,7 +1508,7 @@ static void acpi_wakeup_gpe_init(struct acpi_device *device)
1470 if (ACPI_FAILURE(status)) 1508 if (ACPI_FAILURE(status))
1471 return; 1509 return;
1472 1510
1473 wakeup->flags.run_wake = !!(event_status & ACPI_EVENT_FLAG_HANDLE); 1511 wakeup->flags.run_wake = !!(event_status & ACPI_EVENT_FLAG_HAS_HANDLER);
1474} 1512}
1475 1513
1476static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device) 1514static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
@@ -2315,6 +2353,7 @@ int __init acpi_scan_init(void)
2315 acpi_container_init(); 2353 acpi_container_init();
2316 acpi_memory_hotplug_init(); 2354 acpi_memory_hotplug_init();
2317 acpi_pnp_init(); 2355 acpi_pnp_init();
2356 acpi_int340x_thermal_init();
2318 2357
2319 mutex_lock(&acpi_scan_lock); 2358 mutex_lock(&acpi_scan_lock);
2320 /* 2359 /*
diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
index 38cb9782d4b8..13e577c80201 100644
--- a/drivers/acpi/sysfs.c
+++ b/drivers/acpi/sysfs.c
@@ -537,7 +537,7 @@ static ssize_t counter_show(struct kobject *kobj,
537 if (result) 537 if (result)
538 goto end; 538 goto end;
539 539
540 if (!(status & ACPI_EVENT_FLAG_HANDLE)) 540 if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER))
541 size += sprintf(buf + size, " invalid"); 541 size += sprintf(buf + size, " invalid");
542 else if (status & ACPI_EVENT_FLAG_ENABLED) 542 else if (status & ACPI_EVENT_FLAG_ENABLED)
543 size += sprintf(buf + size, " enabled"); 543 size += sprintf(buf + size, " enabled");
@@ -581,7 +581,7 @@ static ssize_t counter_set(struct kobject *kobj,
581 if (result) 581 if (result)
582 goto end; 582 goto end;
583 583
584 if (!(status & ACPI_EVENT_FLAG_HANDLE)) { 584 if (!(status & ACPI_EVENT_FLAG_HAS_HANDLER)) {
585 printk(KERN_WARNING PREFIX 585 printk(KERN_WARNING PREFIX
586 "Can not change Invalid GPE/Fixed Event status\n"); 586 "Can not change Invalid GPE/Fixed Event status\n");
587 return -EINVAL; 587 return -EINVAL;
diff --git a/drivers/acpi/thermal.c b/drivers/acpi/thermal.c
index 112817e963e0..d24fa1964eb8 100644
--- a/drivers/acpi/thermal.c
+++ b/drivers/acpi/thermal.c
@@ -528,7 +528,6 @@ static void acpi_thermal_check(void *data)
528} 528}
529 529
530/* sys I/F for generic thermal sysfs support */ 530/* sys I/F for generic thermal sysfs support */
531#define KELVIN_TO_MILLICELSIUS(t, off) (((t) - (off)) * 100)
532 531
533static int thermal_get_temp(struct thermal_zone_device *thermal, 532static int thermal_get_temp(struct thermal_zone_device *thermal,
534 unsigned long *temp) 533 unsigned long *temp)
@@ -543,7 +542,8 @@ static int thermal_get_temp(struct thermal_zone_device *thermal,
543 if (result) 542 if (result)
544 return result; 543 return result;
545 544
546 *temp = KELVIN_TO_MILLICELSIUS(tz->temperature, tz->kelvin_offset); 545 *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(tz->temperature,
546 tz->kelvin_offset);
547 return 0; 547 return 0;
548} 548}
549 549
@@ -647,7 +647,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
647 647
648 if (tz->trips.critical.flags.valid) { 648 if (tz->trips.critical.flags.valid) {
649 if (!trip) { 649 if (!trip) {
650 *temp = KELVIN_TO_MILLICELSIUS( 650 *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
651 tz->trips.critical.temperature, 651 tz->trips.critical.temperature,
652 tz->kelvin_offset); 652 tz->kelvin_offset);
653 return 0; 653 return 0;
@@ -657,7 +657,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
657 657
658 if (tz->trips.hot.flags.valid) { 658 if (tz->trips.hot.flags.valid) {
659 if (!trip) { 659 if (!trip) {
660 *temp = KELVIN_TO_MILLICELSIUS( 660 *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
661 tz->trips.hot.temperature, 661 tz->trips.hot.temperature,
662 tz->kelvin_offset); 662 tz->kelvin_offset);
663 return 0; 663 return 0;
@@ -667,7 +667,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
667 667
668 if (tz->trips.passive.flags.valid) { 668 if (tz->trips.passive.flags.valid) {
669 if (!trip) { 669 if (!trip) {
670 *temp = KELVIN_TO_MILLICELSIUS( 670 *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
671 tz->trips.passive.temperature, 671 tz->trips.passive.temperature,
672 tz->kelvin_offset); 672 tz->kelvin_offset);
673 return 0; 673 return 0;
@@ -678,7 +678,7 @@ static int thermal_get_trip_temp(struct thermal_zone_device *thermal,
678 for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE && 678 for (i = 0; i < ACPI_THERMAL_MAX_ACTIVE &&
679 tz->trips.active[i].flags.valid; i++) { 679 tz->trips.active[i].flags.valid; i++) {
680 if (!trip) { 680 if (!trip) {
681 *temp = KELVIN_TO_MILLICELSIUS( 681 *temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
682 tz->trips.active[i].temperature, 682 tz->trips.active[i].temperature,
683 tz->kelvin_offset); 683 tz->kelvin_offset);
684 return 0; 684 return 0;
@@ -694,7 +694,7 @@ static int thermal_get_crit_temp(struct thermal_zone_device *thermal,
694 struct acpi_thermal *tz = thermal->devdata; 694 struct acpi_thermal *tz = thermal->devdata;
695 695
696 if (tz->trips.critical.flags.valid) { 696 if (tz->trips.critical.flags.valid) {
697 *temperature = KELVIN_TO_MILLICELSIUS( 697 *temperature = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
698 tz->trips.critical.temperature, 698 tz->trips.critical.temperature,
699 tz->kelvin_offset); 699 tz->kelvin_offset);
700 return 0; 700 return 0;
@@ -714,8 +714,8 @@ static int thermal_get_trend(struct thermal_zone_device *thermal,
714 714
715 if (type == THERMAL_TRIP_ACTIVE) { 715 if (type == THERMAL_TRIP_ACTIVE) {
716 unsigned long trip_temp; 716 unsigned long trip_temp;
717 unsigned long temp = KELVIN_TO_MILLICELSIUS(tz->temperature, 717 unsigned long temp = DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(
718 tz->kelvin_offset); 718 tz->temperature, tz->kelvin_offset);
719 if (thermal_get_trip_temp(thermal, trip, &trip_temp)) 719 if (thermal_get_trip_temp(thermal, trip, &trip_temp))
720 return -EINVAL; 720 return -EINVAL;
721 721
diff --git a/drivers/acpi/utils.c b/drivers/acpi/utils.c
index 834f35c4bf8d..371ac12d25b1 100644
--- a/drivers/acpi/utils.c
+++ b/drivers/acpi/utils.c
@@ -149,6 +149,21 @@ acpi_extract_package(union acpi_object *package,
149 break; 149 break;
150 } 150 }
151 break; 151 break;
152 case ACPI_TYPE_LOCAL_REFERENCE:
153 switch (format_string[i]) {
154 case 'R':
155 size_required += sizeof(void *);
156 tail_offset += sizeof(void *);
157 break;
158 default:
159 printk(KERN_WARNING PREFIX "Invalid package element"
160 " [%d] got reference,"
161 " expecting [%c]\n",
162 i, format_string[i]);
163 return AE_BAD_DATA;
164 break;
165 }
166 break;
152 167
153 case ACPI_TYPE_PACKAGE: 168 case ACPI_TYPE_PACKAGE:
154 default: 169 default:
@@ -247,7 +262,18 @@ acpi_extract_package(union acpi_object *package,
247 break; 262 break;
248 } 263 }
249 break; 264 break;
250 265 case ACPI_TYPE_LOCAL_REFERENCE:
266 switch (format_string[i]) {
267 case 'R':
268 *(void **)head =
269 (void *)element->reference.handle;
270 head += sizeof(void *);
271 break;
272 default:
273 /* Should never get here */
274 break;
275 }
276 break;
251 case ACPI_TYPE_PACKAGE: 277 case ACPI_TYPE_PACKAGE:
252 /* TBD: handle nested packages... */ 278 /* TBD: handle nested packages... */
253 default: 279 default:
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 807a88a0f394..9d75ead2a1f9 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -1164,7 +1164,8 @@ static bool acpi_video_device_in_dod(struct acpi_video_device *device)
1164 return true; 1164 return true;
1165 1165
1166 for (i = 0; i < video->attached_count; i++) { 1166 for (i = 0; i < video->attached_count; i++) {
1167 if (video->attached_array[i].bind_info == device) 1167 if ((video->attached_array[i].value.int_val & 0xfff) ==
1168 (device->device_id & 0xfff))
1168 return true; 1169 return true;
1169 } 1170 }
1170 1171
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 5f039f191067..49f1e6890587 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -60,6 +60,7 @@ enum board_ids {
60 /* board IDs by feature in alphabetical order */ 60 /* board IDs by feature in alphabetical order */
61 board_ahci, 61 board_ahci,
62 board_ahci_ign_iferr, 62 board_ahci_ign_iferr,
63 board_ahci_nomsi,
63 board_ahci_noncq, 64 board_ahci_noncq,
64 board_ahci_nosntf, 65 board_ahci_nosntf,
65 board_ahci_yes_fbs, 66 board_ahci_yes_fbs,
@@ -121,6 +122,13 @@ static const struct ata_port_info ahci_port_info[] = {
121 .udma_mask = ATA_UDMA6, 122 .udma_mask = ATA_UDMA6,
122 .port_ops = &ahci_ops, 123 .port_ops = &ahci_ops,
123 }, 124 },
125 [board_ahci_nomsi] = {
126 AHCI_HFLAGS (AHCI_HFLAG_NO_MSI),
127 .flags = AHCI_FLAG_COMMON,
128 .pio_mask = ATA_PIO4,
129 .udma_mask = ATA_UDMA6,
130 .port_ops = &ahci_ops,
131 },
124 [board_ahci_noncq] = { 132 [board_ahci_noncq] = {
125 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ), 133 AHCI_HFLAGS (AHCI_HFLAG_NO_NCQ),
126 .flags = AHCI_FLAG_COMMON, 134 .flags = AHCI_FLAG_COMMON,
@@ -313,6 +321,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
313 { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */ 321 { PCI_VDEVICE(INTEL, 0x8c87), board_ahci }, /* 9 Series RAID */
314 { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */ 322 { PCI_VDEVICE(INTEL, 0x8c8e), board_ahci }, /* 9 Series RAID */
315 { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */ 323 { PCI_VDEVICE(INTEL, 0x8c8f), board_ahci }, /* 9 Series RAID */
324 { PCI_VDEVICE(INTEL, 0x9d03), board_ahci }, /* Sunrise Point-LP AHCI */
325 { PCI_VDEVICE(INTEL, 0x9d05), board_ahci }, /* Sunrise Point-LP RAID */
326 { PCI_VDEVICE(INTEL, 0x9d07), board_ahci }, /* Sunrise Point-LP RAID */
327 { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H AHCI */
328 { PCI_VDEVICE(INTEL, 0xa103), board_ahci }, /* Sunrise Point-H RAID */
329 { PCI_VDEVICE(INTEL, 0xa105), board_ahci }, /* Sunrise Point-H RAID */
330 { PCI_VDEVICE(INTEL, 0xa107), board_ahci }, /* Sunrise Point-H RAID */
331 { PCI_VDEVICE(INTEL, 0xa10f), board_ahci }, /* Sunrise Point-H RAID */
316 332
317 /* JMicron 360/1/3/5/6, match class to avoid IDE function */ 333 /* JMicron 360/1/3/5/6, match class to avoid IDE function */
318 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, 334 { PCI_VENDOR_ID_JMICRON, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
@@ -475,10 +491,11 @@ static const struct pci_device_id ahci_pci_tbl[] = {
475 { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */ 491 { PCI_VDEVICE(ASMEDIA, 0x0612), board_ahci }, /* ASM1062 */
476 492
477 /* 493 /*
478 * Samsung SSDs found on some macbooks. NCQ times out. 494 * Samsung SSDs found on some macbooks. NCQ times out if MSI is
479 * https://bugzilla.kernel.org/show_bug.cgi?id=60731 495 * enabled. https://bugzilla.kernel.org/show_bug.cgi?id=60731
480 */ 496 */
481 { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_noncq }, 497 { PCI_VDEVICE(SAMSUNG, 0x1600), board_ahci_nomsi },
498 { PCI_VDEVICE(SAMSUNG, 0xa800), board_ahci_nomsi },
482 499
483 /* Enmotus */ 500 /* Enmotus */
484 { PCI_DEVICE(0x1c44, 0x8000), board_ahci }, 501 { PCI_DEVICE(0x1c44, 0x8000), board_ahci },
@@ -514,12 +531,9 @@ MODULE_PARM_DESC(marvell_enable, "Marvell SATA via AHCI (1 = enabled)");
514static void ahci_pci_save_initial_config(struct pci_dev *pdev, 531static void ahci_pci_save_initial_config(struct pci_dev *pdev,
515 struct ahci_host_priv *hpriv) 532 struct ahci_host_priv *hpriv)
516{ 533{
517 unsigned int force_port_map = 0;
518 unsigned int mask_port_map = 0;
519
520 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) { 534 if (pdev->vendor == PCI_VENDOR_ID_JMICRON && pdev->device == 0x2361) {
521 dev_info(&pdev->dev, "JMB361 has only one port\n"); 535 dev_info(&pdev->dev, "JMB361 has only one port\n");
522 force_port_map = 1; 536 hpriv->force_port_map = 1;
523 } 537 }
524 538
525 /* 539 /*
@@ -529,9 +543,9 @@ static void ahci_pci_save_initial_config(struct pci_dev *pdev,
529 */ 543 */
530 if (hpriv->flags & AHCI_HFLAG_MV_PATA) { 544 if (hpriv->flags & AHCI_HFLAG_MV_PATA) {
531 if (pdev->device == 0x6121) 545 if (pdev->device == 0x6121)
532 mask_port_map = 0x3; 546 hpriv->mask_port_map = 0x3;
533 else 547 else
534 mask_port_map = 0xf; 548 hpriv->mask_port_map = 0xf;
535 dev_info(&pdev->dev, 549 dev_info(&pdev->dev,
536 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n"); 550 "Disabling your PATA port. Use the boot option 'ahci.marvell_enable=0' to avoid this.\n");
537 } 551 }
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 5eb61c9e63da..97683e45ab04 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -1778,16 +1778,15 @@ static void ahci_handle_port_interrupt(struct ata_port *ap,
1778 } 1778 }
1779} 1779}
1780 1780
1781static void ahci_update_intr_status(struct ata_port *ap) 1781static void ahci_port_intr(struct ata_port *ap)
1782{ 1782{
1783 void __iomem *port_mmio = ahci_port_base(ap); 1783 void __iomem *port_mmio = ahci_port_base(ap);
1784 struct ahci_port_priv *pp = ap->private_data;
1785 u32 status; 1784 u32 status;
1786 1785
1787 status = readl(port_mmio + PORT_IRQ_STAT); 1786 status = readl(port_mmio + PORT_IRQ_STAT);
1788 writel(status, port_mmio + PORT_IRQ_STAT); 1787 writel(status, port_mmio + PORT_IRQ_STAT);
1789 1788
1790 atomic_or(status, &pp->intr_status); 1789 ahci_handle_port_interrupt(ap, port_mmio, status);
1791} 1790}
1792 1791
1793static irqreturn_t ahci_port_thread_fn(int irq, void *dev_instance) 1792static irqreturn_t ahci_port_thread_fn(int irq, void *dev_instance)
@@ -1808,34 +1807,6 @@ static irqreturn_t ahci_port_thread_fn(int irq, void *dev_instance)
1808 return IRQ_HANDLED; 1807 return IRQ_HANDLED;
1809} 1808}
1810 1809
1811irqreturn_t ahci_thread_fn(int irq, void *dev_instance)
1812{
1813 struct ata_host *host = dev_instance;
1814 struct ahci_host_priv *hpriv = host->private_data;
1815 u32 irq_masked = hpriv->port_map;
1816 unsigned int i;
1817
1818 for (i = 0; i < host->n_ports; i++) {
1819 struct ata_port *ap;
1820
1821 if (!(irq_masked & (1 << i)))
1822 continue;
1823
1824 ap = host->ports[i];
1825 if (ap) {
1826 ahci_port_thread_fn(irq, ap);
1827 VPRINTK("port %u\n", i);
1828 } else {
1829 VPRINTK("port %u (no irq)\n", i);
1830 if (ata_ratelimit())
1831 dev_warn(host->dev,
1832 "interrupt on disabled port %u\n", i);
1833 }
1834 }
1835
1836 return IRQ_HANDLED;
1837}
1838
1839static irqreturn_t ahci_multi_irqs_intr(int irq, void *dev_instance) 1810static irqreturn_t ahci_multi_irqs_intr(int irq, void *dev_instance)
1840{ 1811{
1841 struct ata_port *ap = dev_instance; 1812 struct ata_port *ap = dev_instance;
@@ -1875,6 +1846,8 @@ static irqreturn_t ahci_single_irq_intr(int irq, void *dev_instance)
1875 1846
1876 irq_masked = irq_stat & hpriv->port_map; 1847 irq_masked = irq_stat & hpriv->port_map;
1877 1848
1849 spin_lock(&host->lock);
1850
1878 for (i = 0; i < host->n_ports; i++) { 1851 for (i = 0; i < host->n_ports; i++) {
1879 struct ata_port *ap; 1852 struct ata_port *ap;
1880 1853
@@ -1883,7 +1856,7 @@ static irqreturn_t ahci_single_irq_intr(int irq, void *dev_instance)
1883 1856
1884 ap = host->ports[i]; 1857 ap = host->ports[i];
1885 if (ap) { 1858 if (ap) {
1886 ahci_update_intr_status(ap); 1859 ahci_port_intr(ap);
1887 VPRINTK("port %u\n", i); 1860 VPRINTK("port %u\n", i);
1888 } else { 1861 } else {
1889 VPRINTK("port %u (no irq)\n", i); 1862 VPRINTK("port %u (no irq)\n", i);
@@ -1906,9 +1879,11 @@ static irqreturn_t ahci_single_irq_intr(int irq, void *dev_instance)
1906 */ 1879 */
1907 writel(irq_stat, mmio + HOST_IRQ_STAT); 1880 writel(irq_stat, mmio + HOST_IRQ_STAT);
1908 1881
1882 spin_unlock(&host->lock);
1883
1909 VPRINTK("EXIT\n"); 1884 VPRINTK("EXIT\n");
1910 1885
1911 return handled ? IRQ_WAKE_THREAD : IRQ_NONE; 1886 return IRQ_RETVAL(handled);
1912} 1887}
1913 1888
1914unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) 1889unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
@@ -2320,8 +2295,13 @@ static int ahci_port_start(struct ata_port *ap)
2320 */ 2295 */
2321 pp->intr_mask = DEF_PORT_IRQ; 2296 pp->intr_mask = DEF_PORT_IRQ;
2322 2297
2323 spin_lock_init(&pp->lock); 2298 /*
2324 ap->lock = &pp->lock; 2299 * Switch to per-port locking in case each port has its own MSI vector.
2300 */
2301 if ((hpriv->flags & AHCI_HFLAG_MULTI_MSI)) {
2302 spin_lock_init(&pp->lock);
2303 ap->lock = &pp->lock;
2304 }
2325 2305
2326 ap->private_data = pp; 2306 ap->private_data = pp;
2327 2307
@@ -2482,31 +2462,6 @@ out_free_irqs:
2482 return rc; 2462 return rc;
2483} 2463}
2484 2464
2485static int ahci_host_activate_single_irq(struct ata_host *host, int irq,
2486 struct scsi_host_template *sht)
2487{
2488 int i, rc;
2489
2490 rc = ata_host_start(host);
2491 if (rc)
2492 return rc;
2493
2494 rc = devm_request_threaded_irq(host->dev, irq, ahci_single_irq_intr,
2495 ahci_thread_fn, IRQF_SHARED,
2496 dev_driver_string(host->dev), host);
2497 if (rc)
2498 return rc;
2499
2500 for (i = 0; i < host->n_ports; i++)
2501 ata_port_desc(host->ports[i], "irq %d", irq);
2502
2503 rc = ata_host_register(host, sht);
2504 if (rc)
2505 devm_free_irq(host->dev, irq, host);
2506
2507 return rc;
2508}
2509
2510/** 2465/**
2511 * ahci_host_activate - start AHCI host, request IRQs and register it 2466 * ahci_host_activate - start AHCI host, request IRQs and register it
2512 * @host: target ATA host 2467 * @host: target ATA host
@@ -2532,7 +2487,8 @@ int ahci_host_activate(struct ata_host *host, int irq,
2532 if (hpriv->flags & AHCI_HFLAG_MULTI_MSI) 2487 if (hpriv->flags & AHCI_HFLAG_MULTI_MSI)
2533 rc = ahci_host_activate_multi_irqs(host, irq, sht); 2488 rc = ahci_host_activate_multi_irqs(host, irq, sht);
2534 else 2489 else
2535 rc = ahci_host_activate_single_irq(host, irq, sht); 2490 rc = ata_host_activate(host, irq, ahci_single_irq_intr,
2491 IRQF_SHARED, sht);
2536 return rc; 2492 return rc;
2537} 2493}
2538EXPORT_SYMBOL_GPL(ahci_host_activate); 2494EXPORT_SYMBOL_GPL(ahci_host_activate);
diff --git a/drivers/ata/sata_fsl.c b/drivers/ata/sata_fsl.c
index 07bc7e4dbd04..65071591b143 100644
--- a/drivers/ata/sata_fsl.c
+++ b/drivers/ata/sata_fsl.c
@@ -1488,7 +1488,7 @@ static int sata_fsl_probe(struct platform_device *ofdev)
1488 host_priv->csr_base = csr_base; 1488 host_priv->csr_base = csr_base;
1489 1489
1490 irq = irq_of_parse_and_map(ofdev->dev.of_node, 0); 1490 irq = irq_of_parse_and_map(ofdev->dev.of_node, 0);
1491 if (irq < 0) { 1491 if (!irq) {
1492 dev_err(&ofdev->dev, "invalid irq from platform\n"); 1492 dev_err(&ofdev->dev, "invalid irq from platform\n");
1493 goto error_exit_with_cleanup; 1493 goto error_exit_with_cleanup;
1494 } 1494 }
diff --git a/drivers/ata/sata_rcar.c b/drivers/ata/sata_rcar.c
index 61eb6d77dac7..ea1fbc1d4c5f 100644
--- a/drivers/ata/sata_rcar.c
+++ b/drivers/ata/sata_rcar.c
@@ -146,6 +146,7 @@
146enum sata_rcar_type { 146enum sata_rcar_type {
147 RCAR_GEN1_SATA, 147 RCAR_GEN1_SATA,
148 RCAR_GEN2_SATA, 148 RCAR_GEN2_SATA,
149 RCAR_R8A7790_ES1_SATA,
149}; 150};
150 151
151struct sata_rcar_priv { 152struct sata_rcar_priv {
@@ -763,6 +764,9 @@ static void sata_rcar_setup_port(struct ata_host *host)
763 ap->udma_mask = ATA_UDMA6; 764 ap->udma_mask = ATA_UDMA6;
764 ap->flags |= ATA_FLAG_SATA; 765 ap->flags |= ATA_FLAG_SATA;
765 766
767 if (priv->type == RCAR_R8A7790_ES1_SATA)
768 ap->flags |= ATA_FLAG_NO_DIPM;
769
766 ioaddr->cmd_addr = base + SDATA_REG; 770 ioaddr->cmd_addr = base + SDATA_REG;
767 ioaddr->ctl_addr = base + SSDEVCON_REG; 771 ioaddr->ctl_addr = base + SSDEVCON_REG;
768 ioaddr->scr_addr = base + SCRSSTS_REG; 772 ioaddr->scr_addr = base + SCRSSTS_REG;
@@ -792,6 +796,7 @@ static void sata_rcar_init_controller(struct ata_host *host)
792 sata_rcar_gen1_phy_init(priv); 796 sata_rcar_gen1_phy_init(priv);
793 break; 797 break;
794 case RCAR_GEN2_SATA: 798 case RCAR_GEN2_SATA:
799 case RCAR_R8A7790_ES1_SATA:
795 sata_rcar_gen2_phy_init(priv); 800 sata_rcar_gen2_phy_init(priv);
796 break; 801 break;
797 default: 802 default:
@@ -838,9 +843,17 @@ static struct of_device_id sata_rcar_match[] = {
838 .data = (void *)RCAR_GEN2_SATA 843 .data = (void *)RCAR_GEN2_SATA
839 }, 844 },
840 { 845 {
846 .compatible = "renesas,sata-r8a7790-es1",
847 .data = (void *)RCAR_R8A7790_ES1_SATA
848 },
849 {
841 .compatible = "renesas,sata-r8a7791", 850 .compatible = "renesas,sata-r8a7791",
842 .data = (void *)RCAR_GEN2_SATA 851 .data = (void *)RCAR_GEN2_SATA
843 }, 852 },
853 {
854 .compatible = "renesas,sata-r8a7793",
855 .data = (void *)RCAR_GEN2_SATA
856 },
844 { }, 857 { },
845}; 858};
846MODULE_DEVICE_TABLE(of, sata_rcar_match); 859MODULE_DEVICE_TABLE(of, sata_rcar_match);
@@ -849,7 +862,9 @@ static const struct platform_device_id sata_rcar_id_table[] = {
849 { "sata_rcar", RCAR_GEN1_SATA }, /* Deprecated by "sata-r8a7779" */ 862 { "sata_rcar", RCAR_GEN1_SATA }, /* Deprecated by "sata-r8a7779" */
850 { "sata-r8a7779", RCAR_GEN1_SATA }, 863 { "sata-r8a7779", RCAR_GEN1_SATA },
851 { "sata-r8a7790", RCAR_GEN2_SATA }, 864 { "sata-r8a7790", RCAR_GEN2_SATA },
865 { "sata-r8a7790-es1", RCAR_R8A7790_ES1_SATA },
852 { "sata-r8a7791", RCAR_GEN2_SATA }, 866 { "sata-r8a7791", RCAR_GEN2_SATA },
867 { "sata-r8a7793", RCAR_GEN2_SATA },
853 { }, 868 { },
854}; 869};
855MODULE_DEVICE_TABLE(platform, sata_rcar_id_table); 870MODULE_DEVICE_TABLE(platform, sata_rcar_id_table);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index 7652e8dc188f..21b0bc6a9c96 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -1225,11 +1225,13 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
1225 card->config_regs = pci_iomap(dev, 0, CONFIG_RAM_SIZE); 1225 card->config_regs = pci_iomap(dev, 0, CONFIG_RAM_SIZE);
1226 if (!card->config_regs) { 1226 if (!card->config_regs) {
1227 dev_warn(&dev->dev, "Failed to ioremap config registers\n"); 1227 dev_warn(&dev->dev, "Failed to ioremap config registers\n");
1228 err = -ENOMEM;
1228 goto out_release_regions; 1229 goto out_release_regions;
1229 } 1230 }
1230 card->buffers = pci_iomap(dev, 1, DATA_RAM_SIZE); 1231 card->buffers = pci_iomap(dev, 1, DATA_RAM_SIZE);
1231 if (!card->buffers) { 1232 if (!card->buffers) {
1232 dev_warn(&dev->dev, "Failed to ioremap data buffers\n"); 1233 dev_warn(&dev->dev, "Failed to ioremap data buffers\n");
1234 err = -ENOMEM;
1233 goto out_unmap_config; 1235 goto out_unmap_config;
1234 } 1236 }
1235 1237
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 61a33f4ba608..df04227d00cf 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -171,20 +171,23 @@ config WANT_DEV_COREDUMP
171 Drivers should "select" this option if they desire to use the 171 Drivers should "select" this option if they desire to use the
172 device coredump mechanism. 172 device coredump mechanism.
173 173
174config DISABLE_DEV_COREDUMP 174config ALLOW_DEV_COREDUMP
175 bool "Disable device coredump" if EXPERT 175 bool "Allow device coredump" if EXPERT
176 default y
176 help 177 help
177 Disable the device coredump mechanism despite drivers wanting to 178 This option controls if the device coredump mechanism is available or
178 use it; this allows for more sensitive systems or systems that 179 not; if disabled, the mechanism will be omitted even if drivers that
179 don't want to ever access the information to not have the code, 180 can use it are enabled.
180 nor keep any data. 181 Say 'N' for more sensitive systems or systems that don't want
182 to ever access the information to not have the code, nor keep any
183 data.
181 184
182 If unsure, say N. 185 If unsure, say Y.
183 186
184config DEV_COREDUMP 187config DEV_COREDUMP
185 bool 188 bool
186 default y if WANT_DEV_COREDUMP 189 default y if WANT_DEV_COREDUMP
187 depends on !DISABLE_DEV_COREDUMP 190 depends on ALLOW_DEV_COREDUMP
188 191
189config DEBUG_DRIVER 192config DEBUG_DRIVER
190 bool "Driver Core verbose debug messages" 193 bool "Driver Core verbose debug messages"
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 14d162952c3b..842d04707de6 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -724,12 +724,12 @@ class_dir_create_and_add(struct class *class, struct kobject *parent_kobj)
724 return &dir->kobj; 724 return &dir->kobj;
725} 725}
726 726
727static DEFINE_MUTEX(gdp_mutex);
727 728
728static struct kobject *get_device_parent(struct device *dev, 729static struct kobject *get_device_parent(struct device *dev,
729 struct device *parent) 730 struct device *parent)
730{ 731{
731 if (dev->class) { 732 if (dev->class) {
732 static DEFINE_MUTEX(gdp_mutex);
733 struct kobject *kobj = NULL; 733 struct kobject *kobj = NULL;
734 struct kobject *parent_kobj; 734 struct kobject *parent_kobj;
735 struct kobject *k; 735 struct kobject *k;
@@ -793,7 +793,9 @@ static void cleanup_glue_dir(struct device *dev, struct kobject *glue_dir)
793 glue_dir->kset != &dev->class->p->glue_dirs) 793 glue_dir->kset != &dev->class->p->glue_dirs)
794 return; 794 return;
795 795
796 mutex_lock(&gdp_mutex);
796 kobject_put(glue_dir); 797 kobject_put(glue_dir);
798 mutex_unlock(&gdp_mutex);
797} 799}
798 800
799static void cleanup_device_parent(struct device *dev) 801static void cleanup_device_parent(struct device *dev)
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 473ff4892401..950fff9ce453 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -223,9 +223,10 @@ bool dma_release_from_contiguous(struct device *dev, struct page *pages,
223#undef pr_fmt 223#undef pr_fmt
224#define pr_fmt(fmt) fmt 224#define pr_fmt(fmt) fmt
225 225
226static void rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev) 226static int rmem_cma_device_init(struct reserved_mem *rmem, struct device *dev)
227{ 227{
228 dev_set_cma_area(dev, rmem->priv); 228 dev_set_cma_area(dev, rmem->priv);
229 return 0;
229} 230}
230 231
231static void rmem_cma_device_release(struct reserved_mem *rmem, 232static void rmem_cma_device_release(struct reserved_mem *rmem,
diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
index 40bc2f4072cc..fb83d4acd400 100644
--- a/drivers/base/power/domain.c
+++ b/drivers/base/power/domain.c
@@ -361,9 +361,19 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
361 struct device *dev = pdd->dev; 361 struct device *dev = pdd->dev;
362 int ret = 0; 362 int ret = 0;
363 363
364 if (gpd_data->need_restore) 364 if (gpd_data->need_restore > 0)
365 return 0; 365 return 0;
366 366
367 /*
368 * If the value of the need_restore flag is still unknown at this point,
369 * we trust that pm_genpd_poweroff() has verified that the device is
370 * already runtime PM suspended.
371 */
372 if (gpd_data->need_restore < 0) {
373 gpd_data->need_restore = 1;
374 return 0;
375 }
376
367 mutex_unlock(&genpd->lock); 377 mutex_unlock(&genpd->lock);
368 378
369 genpd_start_dev(genpd, dev); 379 genpd_start_dev(genpd, dev);
@@ -373,7 +383,7 @@ static int __pm_genpd_save_device(struct pm_domain_data *pdd,
373 mutex_lock(&genpd->lock); 383 mutex_lock(&genpd->lock);
374 384
375 if (!ret) 385 if (!ret)
376 gpd_data->need_restore = true; 386 gpd_data->need_restore = 1;
377 387
378 return ret; 388 return ret;
379} 389}
@@ -389,12 +399,17 @@ static void __pm_genpd_restore_device(struct pm_domain_data *pdd,
389{ 399{
390 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd); 400 struct generic_pm_domain_data *gpd_data = to_gpd_data(pdd);
391 struct device *dev = pdd->dev; 401 struct device *dev = pdd->dev;
392 bool need_restore = gpd_data->need_restore; 402 int need_restore = gpd_data->need_restore;
393 403
394 gpd_data->need_restore = false; 404 gpd_data->need_restore = 0;
395 mutex_unlock(&genpd->lock); 405 mutex_unlock(&genpd->lock);
396 406
397 genpd_start_dev(genpd, dev); 407 genpd_start_dev(genpd, dev);
408
409 /*
410 * Call genpd_restore_dev() for recently added devices too (need_restore
411 * is negative then).
412 */
398 if (need_restore) 413 if (need_restore)
399 genpd_restore_dev(genpd, dev); 414 genpd_restore_dev(genpd, dev);
400 415
@@ -603,6 +618,7 @@ static void genpd_power_off_work_fn(struct work_struct *work)
603static int pm_genpd_runtime_suspend(struct device *dev) 618static int pm_genpd_runtime_suspend(struct device *dev)
604{ 619{
605 struct generic_pm_domain *genpd; 620 struct generic_pm_domain *genpd;
621 struct generic_pm_domain_data *gpd_data;
606 bool (*stop_ok)(struct device *__dev); 622 bool (*stop_ok)(struct device *__dev);
607 int ret; 623 int ret;
608 624
@@ -628,6 +644,16 @@ static int pm_genpd_runtime_suspend(struct device *dev)
628 return 0; 644 return 0;
629 645
630 mutex_lock(&genpd->lock); 646 mutex_lock(&genpd->lock);
647
648 /*
649 * If we have an unknown state of the need_restore flag, it means none
650 * of the runtime PM callbacks has been invoked yet. Let's update the
651 * flag to reflect that the current state is active.
652 */
653 gpd_data = to_gpd_data(dev->power.subsys_data->domain_data);
654 if (gpd_data->need_restore < 0)
655 gpd_data->need_restore = 0;
656
631 genpd->in_progress++; 657 genpd->in_progress++;
632 pm_genpd_poweroff(genpd); 658 pm_genpd_poweroff(genpd);
633 genpd->in_progress--; 659 genpd->in_progress--;
@@ -1437,12 +1463,12 @@ int __pm_genpd_add_device(struct generic_pm_domain *genpd, struct device *dev,
1437 spin_unlock_irq(&dev->power.lock); 1463 spin_unlock_irq(&dev->power.lock);
1438 1464
1439 if (genpd->attach_dev) 1465 if (genpd->attach_dev)
1440 genpd->attach_dev(dev); 1466 genpd->attach_dev(genpd, dev);
1441 1467
1442 mutex_lock(&gpd_data->lock); 1468 mutex_lock(&gpd_data->lock);
1443 gpd_data->base.dev = dev; 1469 gpd_data->base.dev = dev;
1444 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list); 1470 list_add_tail(&gpd_data->base.list_node, &genpd->dev_list);
1445 gpd_data->need_restore = genpd->status == GPD_STATE_POWER_OFF; 1471 gpd_data->need_restore = -1;
1446 gpd_data->td.constraint_changed = true; 1472 gpd_data->td.constraint_changed = true;
1447 gpd_data->td.effective_constraint_ns = -1; 1473 gpd_data->td.effective_constraint_ns = -1;
1448 mutex_unlock(&gpd_data->lock); 1474 mutex_unlock(&gpd_data->lock);
@@ -1499,7 +1525,7 @@ int pm_genpd_remove_device(struct generic_pm_domain *genpd,
1499 genpd->max_off_time_changed = true; 1525 genpd->max_off_time_changed = true;
1500 1526
1501 if (genpd->detach_dev) 1527 if (genpd->detach_dev)
1502 genpd->detach_dev(dev); 1528 genpd->detach_dev(genpd, dev);
1503 1529
1504 spin_lock_irq(&dev->power.lock); 1530 spin_lock_irq(&dev->power.lock);
1505 1531
@@ -1546,7 +1572,7 @@ void pm_genpd_dev_need_restore(struct device *dev, bool val)
1546 1572
1547 psd = dev_to_psd(dev); 1573 psd = dev_to_psd(dev);
1548 if (psd && psd->domain_data) 1574 if (psd && psd->domain_data)
1549 to_gpd_data(psd->domain_data)->need_restore = val; 1575 to_gpd_data(psd->domain_data)->need_restore = val ? 1 : 0;
1550 1576
1551 spin_unlock_irqrestore(&dev->power.lock, flags); 1577 spin_unlock_irqrestore(&dev->power.lock, flags);
1552} 1578}
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index 44973196d3fd..9717d5f20139 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1266,6 +1266,8 @@ int dpm_suspend_late(pm_message_t state)
1266 } 1266 }
1267 mutex_unlock(&dpm_list_mtx); 1267 mutex_unlock(&dpm_list_mtx);
1268 async_synchronize_full(); 1268 async_synchronize_full();
1269 if (!error)
1270 error = async_error;
1269 if (error) { 1271 if (error) {
1270 suspend_stats.failed_suspend_late++; 1272 suspend_stats.failed_suspend_late++;
1271 dpm_save_failed_step(SUSPEND_SUSPEND_LATE); 1273 dpm_save_failed_step(SUSPEND_SUSPEND_LATE);
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 8a3f51f7b1b9..db9d00c36a3e 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -3,12 +3,15 @@
3# subsystems should select the appropriate symbols. 3# subsystems should select the appropriate symbols.
4 4
5config REGMAP 5config REGMAP
6 default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_MMIO || REGMAP_IRQ) 6 default y if (REGMAP_I2C || REGMAP_SPI || REGMAP_SPMI || REGMAP_AC97 || REGMAP_MMIO || REGMAP_IRQ)
7 select LZO_COMPRESS 7 select LZO_COMPRESS
8 select LZO_DECOMPRESS 8 select LZO_DECOMPRESS
9 select IRQ_DOMAIN if REGMAP_IRQ 9 select IRQ_DOMAIN if REGMAP_IRQ
10 bool 10 bool
11 11
12config REGMAP_AC97
13 tristate
14
12config REGMAP_I2C 15config REGMAP_I2C
13 tristate 16 tristate
14 depends on I2C 17 depends on I2C
diff --git a/drivers/base/regmap/Makefile b/drivers/base/regmap/Makefile
index a7c670b4123a..0a533653ef3b 100644
--- a/drivers/base/regmap/Makefile
+++ b/drivers/base/regmap/Makefile
@@ -1,6 +1,7 @@
1obj-$(CONFIG_REGMAP) += regmap.o regcache.o 1obj-$(CONFIG_REGMAP) += regmap.o regcache.o
2obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o regcache-flat.o 2obj-$(CONFIG_REGMAP) += regcache-rbtree.o regcache-lzo.o regcache-flat.o
3obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o 3obj-$(CONFIG_DEBUG_FS) += regmap-debugfs.o
4obj-$(CONFIG_REGMAP_AC97) += regmap-ac97.o
4obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o 5obj-$(CONFIG_REGMAP_I2C) += regmap-i2c.o
5obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o 6obj-$(CONFIG_REGMAP_SPI) += regmap-spi.o
6obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o 7obj-$(CONFIG_REGMAP_SPMI) += regmap-spmi.o
diff --git a/drivers/base/regmap/regmap-ac97.c b/drivers/base/regmap/regmap-ac97.c
new file mode 100644
index 000000000000..e4c45d2299c1
--- /dev/null
+++ b/drivers/base/regmap/regmap-ac97.c
@@ -0,0 +1,114 @@
1/*
2 * Register map access API - AC'97 support
3 *
4 * Copyright 2013 Linaro Ltd. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/clk.h>
20#include <linux/err.h>
21#include <linux/init.h>
22#include <linux/io.h>
23#include <linux/module.h>
24#include <linux/regmap.h>
25#include <linux/slab.h>
26
27#include <sound/ac97_codec.h>
28
29bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg)
30{
31 switch (reg) {
32 case AC97_RESET:
33 case AC97_POWERDOWN:
34 case AC97_INT_PAGING:
35 case AC97_EXTENDED_ID:
36 case AC97_EXTENDED_STATUS:
37 case AC97_EXTENDED_MID:
38 case AC97_EXTENDED_MSTATUS:
39 case AC97_GPIO_STATUS:
40 case AC97_MISC_AFE:
41 case AC97_VENDOR_ID1:
42 case AC97_VENDOR_ID2:
43 case AC97_CODEC_CLASS_REV:
44 case AC97_PCI_SVID:
45 case AC97_PCI_SID:
46 case AC97_FUNC_SELECT:
47 case AC97_FUNC_INFO:
48 case AC97_SENSE_INFO:
49 return true;
50 default:
51 return false;
52 }
53}
54EXPORT_SYMBOL_GPL(regmap_ac97_default_volatile);
55
56static int regmap_ac97_reg_read(void *context, unsigned int reg,
57 unsigned int *val)
58{
59 struct snd_ac97 *ac97 = context;
60
61 *val = ac97->bus->ops->read(ac97, reg);
62
63 return 0;
64}
65
66static int regmap_ac97_reg_write(void *context, unsigned int reg,
67 unsigned int val)
68{
69 struct snd_ac97 *ac97 = context;
70
71 ac97->bus->ops->write(ac97, reg, val);
72
73 return 0;
74}
75
76static const struct regmap_bus ac97_regmap_bus = {
77 .reg_write = regmap_ac97_reg_write,
78 .reg_read = regmap_ac97_reg_read,
79};
80
81/**
82 * regmap_init_ac97(): Initialise AC'97 register map
83 *
84 * @ac97: Device that will be interacted with
85 * @config: Configuration for register map
86 *
87 * The return value will be an ERR_PTR() on error or a valid pointer to
88 * a struct regmap.
89 */
90struct regmap *regmap_init_ac97(struct snd_ac97 *ac97,
91 const struct regmap_config *config)
92{
93 return regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config);
94}
95EXPORT_SYMBOL_GPL(regmap_init_ac97);
96
97/**
98 * devm_regmap_init_ac97(): Initialise AC'97 register map
99 *
100 * @ac97: Device that will be interacted with
101 * @config: Configuration for register map
102 *
103 * The return value will be an ERR_PTR() on error or a valid pointer
104 * to a struct regmap. The regmap will be automatically freed by the
105 * device management code.
106 */
107struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97,
108 const struct regmap_config *config)
109{
110 return devm_regmap_init(&ac97->dev, &ac97_regmap_bus, ac97, config);
111}
112EXPORT_SYMBOL_GPL(devm_regmap_init_ac97);
113
114MODULE_LICENSE("GPL v2");
diff --git a/drivers/bcma/host_pci.c b/drivers/bcma/host_pci.c
index 1e5ac0a79696..cd9161a8b3a1 100644
--- a/drivers/bcma/host_pci.c
+++ b/drivers/bcma/host_pci.c
@@ -275,7 +275,7 @@ static SIMPLE_DEV_PM_OPS(bcma_pm_ops, bcma_host_pci_suspend,
275static const struct pci_device_id bcma_pci_bridge_tbl[] = { 275static const struct pci_device_id bcma_pci_bridge_tbl[] = {
276 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) }, 276 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x0576) },
277 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4313) }, 277 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4313) },
278 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) }, 278 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43224) }, /* 0xa8d8 */
279 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) }, 279 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4331) },
280 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) }, 280 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4353) },
281 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) }, 281 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4357) },
@@ -285,7 +285,8 @@ static const struct pci_device_id bcma_pci_bridge_tbl[] = {
285 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) }, 285 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43a9) },
286 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) }, 286 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x43aa) },
287 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) }, 287 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 0x4727) },
288 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) }, /* 0xA8DB */ 288 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43227) }, /* 0xa8db, BCM43217 (sic!) */
289 { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, 43228) }, /* 0xa8dc */
289 { 0, }, 290 { 0, },
290}; 291};
291MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl); 292MODULE_DEVICE_TABLE(pci, bcma_pci_bridge_tbl);
diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c
index d1656c2f70af..1000955ce09d 100644
--- a/drivers/bcma/main.c
+++ b/drivers/bcma/main.c
@@ -132,7 +132,7 @@ static bool bcma_is_core_needed_early(u16 core_id)
132 return false; 132 return false;
133} 133}
134 134
135#ifdef CONFIG_OF 135#if defined(CONFIG_OF) && defined(CONFIG_OF_ADDRESS)
136static struct device_node *bcma_of_find_child_device(struct platform_device *parent, 136static struct device_node *bcma_of_find_child_device(struct platform_device *parent,
137 struct bcma_device *core) 137 struct bcma_device *core)
138{ 138{
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 2671a3f02f0c..8001e812018b 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -450,14 +450,10 @@ static int init_driver_queues(struct nullb *nullb)
450 450
451 ret = setup_commands(nq); 451 ret = setup_commands(nq);
452 if (ret) 452 if (ret)
453 goto err_queue; 453 return ret;
454 nullb->nr_queues++; 454 nullb->nr_queues++;
455 } 455 }
456
457 return 0; 456 return 0;
458err_queue:
459 cleanup_queues(nullb);
460 return ret;
461} 457}
462 458
463static int null_add_dev(void) 459static int null_add_dev(void)
@@ -507,7 +503,9 @@ static int null_add_dev(void)
507 goto out_cleanup_queues; 503 goto out_cleanup_queues;
508 } 504 }
509 blk_queue_make_request(nullb->q, null_queue_bio); 505 blk_queue_make_request(nullb->q, null_queue_bio);
510 init_driver_queues(nullb); 506 rv = init_driver_queues(nullb);
507 if (rv)
508 goto out_cleanup_blk_queue;
511 } else { 509 } else {
512 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node); 510 nullb->q = blk_init_queue_node(null_request_fn, &nullb->lock, home_node);
513 if (!nullb->q) { 511 if (!nullb->q) {
@@ -516,7 +514,9 @@ static int null_add_dev(void)
516 } 514 }
517 blk_queue_prep_rq(nullb->q, null_rq_prep_fn); 515 blk_queue_prep_rq(nullb->q, null_rq_prep_fn);
518 blk_queue_softirq_done(nullb->q, null_softirq_done_fn); 516 blk_queue_softirq_done(nullb->q, null_softirq_done_fn);
519 init_driver_queues(nullb); 517 rv = init_driver_queues(nullb);
518 if (rv)
519 goto out_cleanup_blk_queue;
520 } 520 }
521 521
522 nullb->q->queuedata = nullb; 522 nullb->q->queuedata = nullb;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 0a54c588e433..27b71a0b72d0 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -342,7 +342,6 @@ struct rbd_device {
342 342
343 struct list_head rq_queue; /* incoming rq queue */ 343 struct list_head rq_queue; /* incoming rq queue */
344 spinlock_t lock; /* queue, flags, open_count */ 344 spinlock_t lock; /* queue, flags, open_count */
345 struct workqueue_struct *rq_wq;
346 struct work_struct rq_work; 345 struct work_struct rq_work;
347 346
348 struct rbd_image_header header; 347 struct rbd_image_header header;
@@ -402,6 +401,8 @@ static struct kmem_cache *rbd_segment_name_cache;
402static int rbd_major; 401static int rbd_major;
403static DEFINE_IDA(rbd_dev_id_ida); 402static DEFINE_IDA(rbd_dev_id_ida);
404 403
404static struct workqueue_struct *rbd_wq;
405
405/* 406/*
406 * Default to false for now, as single-major requires >= 0.75 version of 407 * Default to false for now, as single-major requires >= 0.75 version of
407 * userspace rbd utility. 408 * userspace rbd utility.
@@ -3452,7 +3453,7 @@ static void rbd_request_fn(struct request_queue *q)
3452 } 3453 }
3453 3454
3454 if (queued) 3455 if (queued)
3455 queue_work(rbd_dev->rq_wq, &rbd_dev->rq_work); 3456 queue_work(rbd_wq, &rbd_dev->rq_work);
3456} 3457}
3457 3458
3458/* 3459/*
@@ -3532,7 +3533,7 @@ static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3532 page_count = (u32) calc_pages_for(offset, length); 3533 page_count = (u32) calc_pages_for(offset, length);
3533 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL); 3534 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3534 if (IS_ERR(pages)) 3535 if (IS_ERR(pages))
3535 ret = PTR_ERR(pages); 3536 return PTR_ERR(pages);
3536 3537
3537 ret = -ENOMEM; 3538 ret = -ENOMEM;
3538 obj_request = rbd_obj_request_create(object_name, offset, length, 3539 obj_request = rbd_obj_request_create(object_name, offset, length,
@@ -5242,16 +5243,9 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5242 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE); 5243 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5243 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only); 5244 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
5244 5245
5245 rbd_dev->rq_wq = alloc_workqueue("%s", WQ_MEM_RECLAIM, 0,
5246 rbd_dev->disk->disk_name);
5247 if (!rbd_dev->rq_wq) {
5248 ret = -ENOMEM;
5249 goto err_out_mapping;
5250 }
5251
5252 ret = rbd_bus_add_dev(rbd_dev); 5246 ret = rbd_bus_add_dev(rbd_dev);
5253 if (ret) 5247 if (ret)
5254 goto err_out_workqueue; 5248 goto err_out_mapping;
5255 5249
5256 /* Everything's ready. Announce the disk to the world. */ 5250 /* Everything's ready. Announce the disk to the world. */
5257 5251
@@ -5263,9 +5257,6 @@ static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5263 5257
5264 return ret; 5258 return ret;
5265 5259
5266err_out_workqueue:
5267 destroy_workqueue(rbd_dev->rq_wq);
5268 rbd_dev->rq_wq = NULL;
5269err_out_mapping: 5260err_out_mapping:
5270 rbd_dev_mapping_clear(rbd_dev); 5261 rbd_dev_mapping_clear(rbd_dev);
5271err_out_disk: 5262err_out_disk:
@@ -5512,7 +5503,6 @@ static void rbd_dev_device_release(struct device *dev)
5512{ 5503{
5513 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev); 5504 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5514 5505
5515 destroy_workqueue(rbd_dev->rq_wq);
5516 rbd_free_disk(rbd_dev); 5506 rbd_free_disk(rbd_dev);
5517 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags); 5507 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5518 rbd_dev_mapping_clear(rbd_dev); 5508 rbd_dev_mapping_clear(rbd_dev);
@@ -5716,11 +5706,21 @@ static int __init rbd_init(void)
5716 if (rc) 5706 if (rc)
5717 return rc; 5707 return rc;
5718 5708
5709 /*
5710 * The number of active work items is limited by the number of
5711 * rbd devices, so leave @max_active at default.
5712 */
5713 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
5714 if (!rbd_wq) {
5715 rc = -ENOMEM;
5716 goto err_out_slab;
5717 }
5718
5719 if (single_major) { 5719 if (single_major) {
5720 rbd_major = register_blkdev(0, RBD_DRV_NAME); 5720 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5721 if (rbd_major < 0) { 5721 if (rbd_major < 0) {
5722 rc = rbd_major; 5722 rc = rbd_major;
5723 goto err_out_slab; 5723 goto err_out_wq;
5724 } 5724 }
5725 } 5725 }
5726 5726
@@ -5738,6 +5738,8 @@ static int __init rbd_init(void)
5738err_out_blkdev: 5738err_out_blkdev:
5739 if (single_major) 5739 if (single_major)
5740 unregister_blkdev(rbd_major, RBD_DRV_NAME); 5740 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5741err_out_wq:
5742 destroy_workqueue(rbd_wq);
5741err_out_slab: 5743err_out_slab:
5742 rbd_slab_exit(); 5744 rbd_slab_exit();
5743 return rc; 5745 return rc;
@@ -5749,6 +5751,7 @@ static void __exit rbd_exit(void)
5749 rbd_sysfs_cleanup(); 5751 rbd_sysfs_cleanup();
5750 if (single_major) 5752 if (single_major)
5751 unregister_blkdev(rbd_major, RBD_DRV_NAME); 5753 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5754 destroy_workqueue(rbd_wq);
5752 rbd_slab_exit(); 5755 rbd_slab_exit();
5753} 5756}
5754 5757
diff --git a/drivers/block/sunvdc.c b/drivers/block/sunvdc.c
index 756b8ec00f16..0ebadf93b6c5 100644
--- a/drivers/block/sunvdc.c
+++ b/drivers/block/sunvdc.c
@@ -69,8 +69,6 @@ struct vdc_port {
69 u8 vdisk_mtype; 69 u8 vdisk_mtype;
70 70
71 char disk_name[32]; 71 char disk_name[32];
72
73 struct vio_disk_vtoc label;
74}; 72};
75 73
76static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio) 74static inline struct vdc_port *to_vdc_port(struct vio_driver_state *vio)
@@ -710,13 +708,6 @@ static int probe_disk(struct vdc_port *port)
710 if (comp.err) 708 if (comp.err)
711 return comp.err; 709 return comp.err;
712 710
713 err = generic_request(port, VD_OP_GET_VTOC,
714 &port->label, sizeof(port->label));
715 if (err < 0) {
716 printk(KERN_ERR PFX "VD_OP_GET_VTOC returns error %d\n", err);
717 return err;
718 }
719
720 if (vdc_version_supported(port, 1, 1)) { 711 if (vdc_version_supported(port, 1, 1)) {
721 /* vdisk_size should be set during the handshake, if it wasn't 712 /* vdisk_size should be set during the handshake, if it wasn't
722 * then the underlying disk is reserved by another system 713 * then the underlying disk is reserved by another system
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 0e63e8aa8279..3920ee45aa59 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -99,11 +99,12 @@ static ssize_t mem_used_total_show(struct device *dev,
99{ 99{
100 u64 val = 0; 100 u64 val = 0;
101 struct zram *zram = dev_to_zram(dev); 101 struct zram *zram = dev_to_zram(dev);
102 struct zram_meta *meta = zram->meta;
103 102
104 down_read(&zram->init_lock); 103 down_read(&zram->init_lock);
105 if (init_done(zram)) 104 if (init_done(zram)) {
105 struct zram_meta *meta = zram->meta;
106 val = zs_get_total_pages(meta->mem_pool); 106 val = zs_get_total_pages(meta->mem_pool);
107 }
107 up_read(&zram->init_lock); 108 up_read(&zram->init_lock);
108 109
109 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT); 110 return scnprintf(buf, PAGE_SIZE, "%llu\n", val << PAGE_SHIFT);
@@ -173,16 +174,17 @@ static ssize_t mem_used_max_store(struct device *dev,
173 int err; 174 int err;
174 unsigned long val; 175 unsigned long val;
175 struct zram *zram = dev_to_zram(dev); 176 struct zram *zram = dev_to_zram(dev);
176 struct zram_meta *meta = zram->meta;
177 177
178 err = kstrtoul(buf, 10, &val); 178 err = kstrtoul(buf, 10, &val);
179 if (err || val != 0) 179 if (err || val != 0)
180 return -EINVAL; 180 return -EINVAL;
181 181
182 down_read(&zram->init_lock); 182 down_read(&zram->init_lock);
183 if (init_done(zram)) 183 if (init_done(zram)) {
184 struct zram_meta *meta = zram->meta;
184 atomic_long_set(&zram->stats.max_used_pages, 185 atomic_long_set(&zram->stats.max_used_pages,
185 zs_get_total_pages(meta->mem_pool)); 186 zs_get_total_pages(meta->mem_pool));
187 }
186 up_read(&zram->init_lock); 188 up_read(&zram->init_lock);
187 189
188 return len; 190 return len;
@@ -558,7 +560,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
558 } 560 }
559 561
560 if (page_zero_filled(uncmem)) { 562 if (page_zero_filled(uncmem)) {
561 kunmap_atomic(user_mem); 563 if (user_mem)
564 kunmap_atomic(user_mem);
562 /* Free memory associated with this sector now. */ 565 /* Free memory associated with this sector now. */
563 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value); 566 bit_spin_lock(ZRAM_ACCESS, &meta->table[index].value);
564 zram_free_page(zram, index); 567 zram_free_page(zram, index);
diff --git a/drivers/char/hw_random/pseries-rng.c b/drivers/char/hw_random/pseries-rng.c
index 6226aa08c36a..bcf86f91800a 100644
--- a/drivers/char/hw_random/pseries-rng.c
+++ b/drivers/char/hw_random/pseries-rng.c
@@ -25,18 +25,21 @@
25#include <asm/vio.h> 25#include <asm/vio.h>
26 26
27 27
28static int pseries_rng_data_read(struct hwrng *rng, u32 *data) 28static int pseries_rng_read(struct hwrng *rng, void *data, size_t max, bool wait)
29{ 29{
30 u64 buffer[PLPAR_HCALL_BUFSIZE];
31 size_t size = max < 8 ? max : 8;
30 int rc; 32 int rc;
31 33
32 rc = plpar_hcall(H_RANDOM, (unsigned long *)data); 34 rc = plpar_hcall(H_RANDOM, (unsigned long *)buffer);
33 if (rc != H_SUCCESS) { 35 if (rc != H_SUCCESS) {
34 pr_err_ratelimited("H_RANDOM call failed %d\n", rc); 36 pr_err_ratelimited("H_RANDOM call failed %d\n", rc);
35 return -EIO; 37 return -EIO;
36 } 38 }
39 memcpy(data, buffer, size);
37 40
38 /* The hypervisor interface returns 64 bits */ 41 /* The hypervisor interface returns 64 bits */
39 return 8; 42 return size;
40} 43}
41 44
42/** 45/**
@@ -55,7 +58,7 @@ static unsigned long pseries_rng_get_desired_dma(struct vio_dev *vdev)
55 58
56static struct hwrng pseries_rng = { 59static struct hwrng pseries_rng = {
57 .name = KBUILD_MODNAME, 60 .name = KBUILD_MODNAME,
58 .data_read = pseries_rng_data_read, 61 .read = pseries_rng_read,
59}; 62};
60 63
61static int __init pseries_rng_probe(struct vio_dev *dev, 64static int __init pseries_rng_probe(struct vio_dev *dev,
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 82759cef9043..04645c09fe5e 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1106,7 +1106,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
1106 __mix_pool_bytes(r, hash.w, sizeof(hash.w)); 1106 __mix_pool_bytes(r, hash.w, sizeof(hash.w));
1107 spin_unlock_irqrestore(&r->lock, flags); 1107 spin_unlock_irqrestore(&r->lock, flags);
1108 1108
1109 memset(workspace, 0, sizeof(workspace)); 1109 memzero_explicit(workspace, sizeof(workspace));
1110 1110
1111 /* 1111 /*
1112 * In case the hash function has some recognizable output 1112 * In case the hash function has some recognizable output
@@ -1118,7 +1118,7 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
1118 hash.w[2] ^= rol32(hash.w[2], 16); 1118 hash.w[2] ^= rol32(hash.w[2], 16);
1119 1119
1120 memcpy(out, &hash, EXTRACT_SIZE); 1120 memcpy(out, &hash, EXTRACT_SIZE);
1121 memset(&hash, 0, sizeof(hash)); 1121 memzero_explicit(&hash, sizeof(hash));
1122} 1122}
1123 1123
1124/* 1124/*
@@ -1175,7 +1175,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
1175 } 1175 }
1176 1176
1177 /* Wipe data just returned from memory */ 1177 /* Wipe data just returned from memory */
1178 memset(tmp, 0, sizeof(tmp)); 1178 memzero_explicit(tmp, sizeof(tmp));
1179 1179
1180 return ret; 1180 return ret;
1181} 1181}
@@ -1218,7 +1218,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
1218 } 1218 }
1219 1219
1220 /* Wipe data just returned from memory */ 1220 /* Wipe data just returned from memory */
1221 memset(tmp, 0, sizeof(tmp)); 1221 memzero_explicit(tmp, sizeof(tmp));
1222 1222
1223 return ret; 1223 return ret;
1224} 1224}
diff --git a/drivers/char/raw.c b/drivers/char/raw.c
index 0102dc788608..a24891b97547 100644
--- a/drivers/char/raw.c
+++ b/drivers/char/raw.c
@@ -285,7 +285,7 @@ static long raw_ctl_compat_ioctl(struct file *file, unsigned int cmd,
285 285
286static const struct file_operations raw_fops = { 286static const struct file_operations raw_fops = {
287 .read = new_sync_read, 287 .read = new_sync_read,
288 .read_iter = generic_file_read_iter, 288 .read_iter = blkdev_read_iter,
289 .write = new_sync_write, 289 .write = new_sync_write,
290 .write_iter = blkdev_write_iter, 290 .write_iter = blkdev_write_iter,
291 .fsync = blkdev_fsync, 291 .fsync = blkdev_fsync,
diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
index bfa640023e64..cf7a561fad7c 100644
--- a/drivers/char/virtio_console.c
+++ b/drivers/char/virtio_console.c
@@ -1449,8 +1449,6 @@ static int add_port(struct ports_device *portdev, u32 id)
1449 spin_lock_init(&port->outvq_lock); 1449 spin_lock_init(&port->outvq_lock);
1450 init_waitqueue_head(&port->waitqueue); 1450 init_waitqueue_head(&port->waitqueue);
1451 1451
1452 virtio_device_ready(portdev->vdev);
1453
1454 /* Fill the in_vq with buffers so the host can send us data. */ 1452 /* Fill the in_vq with buffers so the host can send us data. */
1455 nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock); 1453 nr_added_bufs = fill_queue(port->in_vq, &port->inbuf_lock);
1456 if (!nr_added_bufs) { 1454 if (!nr_added_bufs) {
@@ -2026,6 +2024,8 @@ static int virtcons_probe(struct virtio_device *vdev)
2026 spin_lock_init(&portdev->ports_lock); 2024 spin_lock_init(&portdev->ports_lock);
2027 INIT_LIST_HEAD(&portdev->ports); 2025 INIT_LIST_HEAD(&portdev->ports);
2028 2026
2027 virtio_device_ready(portdev->vdev);
2028
2029 if (multiport) { 2029 if (multiport) {
2030 unsigned int nr_added_bufs; 2030 unsigned int nr_added_bufs;
2031 2031
diff --git a/drivers/clk/at91/clk-usb.c b/drivers/clk/at91/clk-usb.c
index 24b5b020753a..a23ac0c724f0 100644
--- a/drivers/clk/at91/clk-usb.c
+++ b/drivers/clk/at91/clk-usb.c
@@ -52,29 +52,26 @@ static unsigned long at91sam9x5_clk_usb_recalc_rate(struct clk_hw *hw,
52 52
53 tmp = pmc_read(pmc, AT91_PMC_USB); 53 tmp = pmc_read(pmc, AT91_PMC_USB);
54 usbdiv = (tmp & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT; 54 usbdiv = (tmp & AT91_PMC_OHCIUSBDIV) >> SAM9X5_USB_DIV_SHIFT;
55 return parent_rate / (usbdiv + 1); 55
56 return DIV_ROUND_CLOSEST(parent_rate, (usbdiv + 1));
56} 57}
57 58
58static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate, 59static long at91sam9x5_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
59 unsigned long *parent_rate) 60 unsigned long *parent_rate)
60{ 61{
61 unsigned long div; 62 unsigned long div;
62 unsigned long bestrate; 63
63 unsigned long tmp; 64 if (!rate)
65 return -EINVAL;
64 66
65 if (rate >= *parent_rate) 67 if (rate >= *parent_rate)
66 return *parent_rate; 68 return *parent_rate;
67 69
68 div = *parent_rate / rate; 70 div = DIV_ROUND_CLOSEST(*parent_rate, rate);
69 if (div >= SAM9X5_USB_MAX_DIV) 71 if (div > SAM9X5_USB_MAX_DIV + 1)
70 return *parent_rate / (SAM9X5_USB_MAX_DIV + 1); 72 div = SAM9X5_USB_MAX_DIV + 1;
71
72 bestrate = *parent_rate / div;
73 tmp = *parent_rate / (div + 1);
74 if (bestrate - rate > rate - tmp)
75 bestrate = tmp;
76 73
77 return bestrate; 74 return DIV_ROUND_CLOSEST(*parent_rate, div);
78} 75}
79 76
80static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index) 77static int at91sam9x5_clk_usb_set_parent(struct clk_hw *hw, u8 index)
@@ -106,9 +103,13 @@ static int at91sam9x5_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
106 u32 tmp; 103 u32 tmp;
107 struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw); 104 struct at91sam9x5_clk_usb *usb = to_at91sam9x5_clk_usb(hw);
108 struct at91_pmc *pmc = usb->pmc; 105 struct at91_pmc *pmc = usb->pmc;
109 unsigned long div = parent_rate / rate; 106 unsigned long div;
107
108 if (!rate)
109 return -EINVAL;
110 110
111 if (parent_rate % rate || div < 1 || div >= SAM9X5_USB_MAX_DIV) 111 div = DIV_ROUND_CLOSEST(parent_rate, rate);
112 if (div > SAM9X5_USB_MAX_DIV + 1 || !div)
112 return -EINVAL; 113 return -EINVAL;
113 114
114 tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_OHCIUSBDIV; 115 tmp = pmc_read(pmc, AT91_PMC_USB) & ~AT91_PMC_OHCIUSBDIV;
@@ -253,7 +254,7 @@ static long at91rm9200_clk_usb_round_rate(struct clk_hw *hw, unsigned long rate,
253 254
254 tmp_parent_rate = rate * usb->divisors[i]; 255 tmp_parent_rate = rate * usb->divisors[i];
255 tmp_parent_rate = __clk_round_rate(parent, tmp_parent_rate); 256 tmp_parent_rate = __clk_round_rate(parent, tmp_parent_rate);
256 tmprate = tmp_parent_rate / usb->divisors[i]; 257 tmprate = DIV_ROUND_CLOSEST(tmp_parent_rate, usb->divisors[i]);
257 if (tmprate < rate) 258 if (tmprate < rate)
258 tmpdiff = rate - tmprate; 259 tmpdiff = rate - tmprate;
259 else 260 else
@@ -281,10 +282,10 @@ static int at91rm9200_clk_usb_set_rate(struct clk_hw *hw, unsigned long rate,
281 struct at91_pmc *pmc = usb->pmc; 282 struct at91_pmc *pmc = usb->pmc;
282 unsigned long div; 283 unsigned long div;
283 284
284 if (!rate || parent_rate % rate) 285 if (!rate)
285 return -EINVAL; 286 return -EINVAL;
286 287
287 div = parent_rate / rate; 288 div = DIV_ROUND_CLOSEST(parent_rate, rate);
288 289
289 for (i = 0; i < RM9200_USB_DIV_TAB_SIZE; i++) { 290 for (i = 0; i < RM9200_USB_DIV_TAB_SIZE; i++) {
290 if (usb->divisors[i] == div) { 291 if (usb->divisors[i] == div) {
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index 18a9de29df0e..c0a842b335c5 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -263,6 +263,14 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
263 if (!rate) 263 if (!rate)
264 rate = 1; 264 rate = 1;
265 265
266 /* if read only, just return current value */
267 if (divider->flags & CLK_DIVIDER_READ_ONLY) {
268 bestdiv = readl(divider->reg) >> divider->shift;
269 bestdiv &= div_mask(divider);
270 bestdiv = _get_div(divider, bestdiv);
271 return bestdiv;
272 }
273
266 maxdiv = _get_maxdiv(divider); 274 maxdiv = _get_maxdiv(divider);
267 275
268 if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) { 276 if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
@@ -361,11 +369,6 @@ const struct clk_ops clk_divider_ops = {
361}; 369};
362EXPORT_SYMBOL_GPL(clk_divider_ops); 370EXPORT_SYMBOL_GPL(clk_divider_ops);
363 371
364const struct clk_ops clk_divider_ro_ops = {
365 .recalc_rate = clk_divider_recalc_rate,
366};
367EXPORT_SYMBOL_GPL(clk_divider_ro_ops);
368
369static struct clk *_register_divider(struct device *dev, const char *name, 372static struct clk *_register_divider(struct device *dev, const char *name,
370 const char *parent_name, unsigned long flags, 373 const char *parent_name, unsigned long flags,
371 void __iomem *reg, u8 shift, u8 width, 374 void __iomem *reg, u8 shift, u8 width,
@@ -391,10 +394,7 @@ static struct clk *_register_divider(struct device *dev, const char *name,
391 } 394 }
392 395
393 init.name = name; 396 init.name = name;
394 if (clk_divider_flags & CLK_DIVIDER_READ_ONLY) 397 init.ops = &clk_divider_ops;
395 init.ops = &clk_divider_ro_ops;
396 else
397 init.ops = &clk_divider_ops;
398 init.flags = flags | CLK_IS_BASIC; 398 init.flags = flags | CLK_IS_BASIC;
399 init.parent_names = (parent_name ? &parent_name: NULL); 399 init.parent_names = (parent_name ? &parent_name: NULL);
400 init.num_parents = (parent_name ? 1 : 0); 400 init.num_parents = (parent_name ? 1 : 0);
diff --git a/drivers/clk/pxa/clk-pxa27x.c b/drivers/clk/pxa/clk-pxa27x.c
index b345cc791e5d..88b9fe13fa44 100644
--- a/drivers/clk/pxa/clk-pxa27x.c
+++ b/drivers/clk/pxa/clk-pxa27x.c
@@ -322,7 +322,7 @@ static unsigned long clk_pxa27x_memory_get_rate(struct clk_hw *hw,
322 unsigned long ccsr = CCSR; 322 unsigned long ccsr = CCSR;
323 323
324 osc_forced = ccsr & (1 << CCCR_CPDIS_BIT); 324 osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
325 a = cccr & CCCR_A_BIT; 325 a = cccr & (1 << CCCR_A_BIT);
326 l = ccsr & CCSR_L_MASK; 326 l = ccsr & CCSR_L_MASK;
327 327
328 if (osc_forced || a) 328 if (osc_forced || a)
@@ -341,7 +341,7 @@ static u8 clk_pxa27x_memory_get_parent(struct clk_hw *hw)
341 unsigned long ccsr = CCSR; 341 unsigned long ccsr = CCSR;
342 342
343 osc_forced = ccsr & (1 << CCCR_CPDIS_BIT); 343 osc_forced = ccsr & (1 << CCCR_CPDIS_BIT);
344 a = cccr & CCCR_A_BIT; 344 a = cccr & (1 << CCCR_A_BIT);
345 if (osc_forced) 345 if (osc_forced)
346 return PXA_MEM_13Mhz; 346 return PXA_MEM_13Mhz;
347 if (a) 347 if (a)
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index dab988ab8cf1..157139a5c1ca 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -3122,7 +3122,7 @@ static struct clk_regmap *mmcc_apq8084_clocks[] = {
3122 [ESC1_CLK_SRC] = &esc1_clk_src.clkr, 3122 [ESC1_CLK_SRC] = &esc1_clk_src.clkr,
3123 [HDMI_CLK_SRC] = &hdmi_clk_src.clkr, 3123 [HDMI_CLK_SRC] = &hdmi_clk_src.clkr,
3124 [VSYNC_CLK_SRC] = &vsync_clk_src.clkr, 3124 [VSYNC_CLK_SRC] = &vsync_clk_src.clkr,
3125 [RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr, 3125 [MMSS_RBCPR_CLK_SRC] = &rbcpr_clk_src.clkr,
3126 [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr, 3126 [RBBMTIMER_CLK_SRC] = &rbbmtimer_clk_src.clkr,
3127 [MAPLE_CLK_SRC] = &maple_clk_src.clkr, 3127 [MAPLE_CLK_SRC] = &maple_clk_src.clkr,
3128 [VDP_CLK_SRC] = &vdp_clk_src.clkr, 3128 [VDP_CLK_SRC] = &vdp_clk_src.clkr,
diff --git a/drivers/clk/rockchip/clk.c b/drivers/clk/rockchip/clk.c
index 1e68bff481b8..880a266f0143 100644
--- a/drivers/clk/rockchip/clk.c
+++ b/drivers/clk/rockchip/clk.c
@@ -90,9 +90,7 @@ static struct clk *rockchip_clk_register_branch(const char *name,
90 div->width = div_width; 90 div->width = div_width;
91 div->lock = lock; 91 div->lock = lock;
92 div->table = div_table; 92 div->table = div_table;
93 div_ops = (div_flags & CLK_DIVIDER_READ_ONLY) 93 div_ops = &clk_divider_ops;
94 ? &clk_divider_ro_ops
95 : &clk_divider_ops;
96 } 94 }
97 95
98 clk = clk_register_composite(NULL, name, parent_names, num_parents, 96 clk = clk_register_composite(NULL, name, parent_names, num_parents,
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index 2133f9d59d06..43005d4d3348 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -660,11 +660,11 @@ static bool __init
660arch_timer_probed(int type, const struct of_device_id *matches) 660arch_timer_probed(int type, const struct of_device_id *matches)
661{ 661{
662 struct device_node *dn; 662 struct device_node *dn;
663 bool probed = false; 663 bool probed = true;
664 664
665 dn = of_find_matching_node(NULL, matches); 665 dn = of_find_matching_node(NULL, matches);
666 if (dn && of_device_is_available(dn) && (arch_timers_present & type)) 666 if (dn && of_device_is_available(dn) && !(arch_timers_present & type))
667 probed = true; 667 probed = false;
668 of_node_put(dn); 668 of_node_put(dn);
669 669
670 return probed; 670 return probed;
diff --git a/drivers/clocksource/sun4i_timer.c b/drivers/clocksource/sun4i_timer.c
index efb17c3ee120..f4a9c0058b4d 100644
--- a/drivers/clocksource/sun4i_timer.c
+++ b/drivers/clocksource/sun4i_timer.c
@@ -182,6 +182,12 @@ static void __init sun4i_timer_init(struct device_node *node)
182 /* Make sure timer is stopped before playing with interrupts */ 182 /* Make sure timer is stopped before playing with interrupts */
183 sun4i_clkevt_time_stop(0); 183 sun4i_clkevt_time_stop(0);
184 184
185 sun4i_clockevent.cpumask = cpu_possible_mask;
186 sun4i_clockevent.irq = irq;
187
188 clockevents_config_and_register(&sun4i_clockevent, rate,
189 TIMER_SYNC_TICKS, 0xffffffff);
190
185 ret = setup_irq(irq, &sun4i_timer_irq); 191 ret = setup_irq(irq, &sun4i_timer_irq);
186 if (ret) 192 if (ret)
187 pr_warn("failed to setup irq %d\n", irq); 193 pr_warn("failed to setup irq %d\n", irq);
@@ -189,12 +195,6 @@ static void __init sun4i_timer_init(struct device_node *node)
189 /* Enable timer0 interrupt */ 195 /* Enable timer0 interrupt */
190 val = readl(timer_base + TIMER_IRQ_EN_REG); 196 val = readl(timer_base + TIMER_IRQ_EN_REG);
191 writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG); 197 writel(val | TIMER_IRQ_EN(0), timer_base + TIMER_IRQ_EN_REG);
192
193 sun4i_clockevent.cpumask = cpu_possible_mask;
194 sun4i_clockevent.irq = irq;
195
196 clockevents_config_and_register(&sun4i_clockevent, rate,
197 TIMER_SYNC_TICKS, 0xffffffff);
198} 198}
199CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer", 199CLOCKSOURCE_OF_DECLARE(sun4i, "allwinner,sun4i-a10-timer",
200 sun4i_timer_init); 200 sun4i_timer_init);
diff --git a/drivers/cpufreq/cpufreq-dt.c b/drivers/cpufreq/cpufreq-dt.c
index 6bbb8b913446..f657c571b18e 100644
--- a/drivers/cpufreq/cpufreq-dt.c
+++ b/drivers/cpufreq/cpufreq-dt.c
@@ -18,6 +18,7 @@
18#include <linux/cpu.h> 18#include <linux/cpu.h>
19#include <linux/cpu_cooling.h> 19#include <linux/cpu_cooling.h>
20#include <linux/cpufreq.h> 20#include <linux/cpufreq.h>
21#include <linux/cpufreq-dt.h>
21#include <linux/cpumask.h> 22#include <linux/cpumask.h>
22#include <linux/err.h> 23#include <linux/err.h>
23#include <linux/module.h> 24#include <linux/module.h>
@@ -146,8 +147,8 @@ try_again:
146 goto try_again; 147 goto try_again;
147 } 148 }
148 149
149 dev_warn(cpu_dev, "failed to get cpu%d regulator: %ld\n", 150 dev_dbg(cpu_dev, "no regulator for cpu%d: %ld\n",
150 cpu, PTR_ERR(cpu_reg)); 151 cpu, PTR_ERR(cpu_reg));
151 } 152 }
152 153
153 cpu_clk = clk_get(cpu_dev, NULL); 154 cpu_clk = clk_get(cpu_dev, NULL);
@@ -165,8 +166,8 @@ try_again:
165 if (ret == -EPROBE_DEFER) 166 if (ret == -EPROBE_DEFER)
166 dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu); 167 dev_dbg(cpu_dev, "cpu%d clock not ready, retry\n", cpu);
167 else 168 else
168 dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", ret, 169 dev_err(cpu_dev, "failed to get cpu%d clock: %d\n", cpu,
169 cpu); 170 ret);
170 } else { 171 } else {
171 *cdev = cpu_dev; 172 *cdev = cpu_dev;
172 *creg = cpu_reg; 173 *creg = cpu_reg;
@@ -178,6 +179,7 @@ try_again:
178 179
179static int cpufreq_init(struct cpufreq_policy *policy) 180static int cpufreq_init(struct cpufreq_policy *policy)
180{ 181{
182 struct cpufreq_dt_platform_data *pd;
181 struct cpufreq_frequency_table *freq_table; 183 struct cpufreq_frequency_table *freq_table;
182 struct thermal_cooling_device *cdev; 184 struct thermal_cooling_device *cdev;
183 struct device_node *np; 185 struct device_node *np;
@@ -185,6 +187,7 @@ static int cpufreq_init(struct cpufreq_policy *policy)
185 struct device *cpu_dev; 187 struct device *cpu_dev;
186 struct regulator *cpu_reg; 188 struct regulator *cpu_reg;
187 struct clk *cpu_clk; 189 struct clk *cpu_clk;
190 unsigned long min_uV = ~0, max_uV = 0;
188 unsigned int transition_latency; 191 unsigned int transition_latency;
189 int ret; 192 int ret;
190 193
@@ -204,16 +207,10 @@ static int cpufreq_init(struct cpufreq_policy *policy)
204 /* OPPs might be populated at runtime, don't check for error here */ 207 /* OPPs might be populated at runtime, don't check for error here */
205 of_init_opp_table(cpu_dev); 208 of_init_opp_table(cpu_dev);
206 209
207 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
208 if (ret) {
209 dev_err(cpu_dev, "failed to init cpufreq table: %d\n", ret);
210 goto out_put_node;
211 }
212
213 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 210 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
214 if (!priv) { 211 if (!priv) {
215 ret = -ENOMEM; 212 ret = -ENOMEM;
216 goto out_free_table; 213 goto out_put_node;
217 } 214 }
218 215
219 of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance); 216 of_property_read_u32(np, "voltage-tolerance", &priv->voltage_tolerance);
@@ -222,30 +219,51 @@ static int cpufreq_init(struct cpufreq_policy *policy)
222 transition_latency = CPUFREQ_ETERNAL; 219 transition_latency = CPUFREQ_ETERNAL;
223 220
224 if (!IS_ERR(cpu_reg)) { 221 if (!IS_ERR(cpu_reg)) {
225 struct dev_pm_opp *opp; 222 unsigned long opp_freq = 0;
226 unsigned long min_uV, max_uV;
227 int i;
228 223
229 /* 224 /*
230 * OPP is maintained in order of increasing frequency, and 225 * Disable any OPPs where the connected regulator isn't able to
231 * freq_table initialised from OPP is therefore sorted in the 226 * provide the specified voltage and record minimum and maximum
232 * same order. 227 * voltage levels.
233 */ 228 */
234 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++) 229 while (1) {
235 ; 230 struct dev_pm_opp *opp;
236 rcu_read_lock(); 231 unsigned long opp_uV, tol_uV;
237 opp = dev_pm_opp_find_freq_exact(cpu_dev, 232
238 freq_table[0].frequency * 1000, true); 233 rcu_read_lock();
239 min_uV = dev_pm_opp_get_voltage(opp); 234 opp = dev_pm_opp_find_freq_ceil(cpu_dev, &opp_freq);
240 opp = dev_pm_opp_find_freq_exact(cpu_dev, 235 if (IS_ERR(opp)) {
241 freq_table[i-1].frequency * 1000, true); 236 rcu_read_unlock();
242 max_uV = dev_pm_opp_get_voltage(opp); 237 break;
243 rcu_read_unlock(); 238 }
239 opp_uV = dev_pm_opp_get_voltage(opp);
240 rcu_read_unlock();
241
242 tol_uV = opp_uV * priv->voltage_tolerance / 100;
243 if (regulator_is_supported_voltage(cpu_reg, opp_uV,
244 opp_uV + tol_uV)) {
245 if (opp_uV < min_uV)
246 min_uV = opp_uV;
247 if (opp_uV > max_uV)
248 max_uV = opp_uV;
249 } else {
250 dev_pm_opp_disable(cpu_dev, opp_freq);
251 }
252
253 opp_freq++;
254 }
255
244 ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV); 256 ret = regulator_set_voltage_time(cpu_reg, min_uV, max_uV);
245 if (ret > 0) 257 if (ret > 0)
246 transition_latency += ret * 1000; 258 transition_latency += ret * 1000;
247 } 259 }
248 260
261 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
262 if (ret) {
263 pr_err("failed to init cpufreq table: %d\n", ret);
264 goto out_free_priv;
265 }
266
249 /* 267 /*
250 * For now, just loading the cooling device; 268 * For now, just loading the cooling device;
251 * thermal DT code takes care of matching them. 269 * thermal DT code takes care of matching them.
@@ -265,9 +283,18 @@ static int cpufreq_init(struct cpufreq_policy *policy)
265 policy->driver_data = priv; 283 policy->driver_data = priv;
266 284
267 policy->clk = cpu_clk; 285 policy->clk = cpu_clk;
268 ret = cpufreq_generic_init(policy, freq_table, transition_latency); 286 ret = cpufreq_table_validate_and_show(policy, freq_table);
269 if (ret) 287 if (ret) {
288 dev_err(cpu_dev, "%s: invalid frequency table: %d\n", __func__,
289 ret);
270 goto out_cooling_unregister; 290 goto out_cooling_unregister;
291 }
292
293 policy->cpuinfo.transition_latency = transition_latency;
294
295 pd = cpufreq_get_driver_data();
296 if (!pd || !pd->independent_clocks)
297 cpumask_setall(policy->cpus);
271 298
272 of_node_put(np); 299 of_node_put(np);
273 300
@@ -275,9 +302,9 @@ static int cpufreq_init(struct cpufreq_policy *policy)
275 302
276out_cooling_unregister: 303out_cooling_unregister:
277 cpufreq_cooling_unregister(priv->cdev); 304 cpufreq_cooling_unregister(priv->cdev);
278 kfree(priv);
279out_free_table:
280 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table); 305 dev_pm_opp_free_cpufreq_table(cpu_dev, &freq_table);
306out_free_priv:
307 kfree(priv);
281out_put_node: 308out_put_node:
282 of_node_put(np); 309 of_node_put(np);
283out_put_reg_clk: 310out_put_reg_clk:
@@ -335,6 +362,8 @@ static int dt_cpufreq_probe(struct platform_device *pdev)
335 if (!IS_ERR(cpu_reg)) 362 if (!IS_ERR(cpu_reg))
336 regulator_put(cpu_reg); 363 regulator_put(cpu_reg);
337 364
365 dt_cpufreq_driver.driver_data = dev_get_platdata(&pdev->dev);
366
338 ret = cpufreq_register_driver(&dt_cpufreq_driver); 367 ret = cpufreq_register_driver(&dt_cpufreq_driver);
339 if (ret) 368 if (ret)
340 dev_err(cpu_dev, "failed register driver: %d\n", ret); 369 dev_err(cpu_dev, "failed register driver: %d\n", ret);
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 24bf76fba141..4473eba1d6b0 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -512,7 +512,18 @@ show_one(cpuinfo_max_freq, cpuinfo.max_freq);
512show_one(cpuinfo_transition_latency, cpuinfo.transition_latency); 512show_one(cpuinfo_transition_latency, cpuinfo.transition_latency);
513show_one(scaling_min_freq, min); 513show_one(scaling_min_freq, min);
514show_one(scaling_max_freq, max); 514show_one(scaling_max_freq, max);
515show_one(scaling_cur_freq, cur); 515
516static ssize_t show_scaling_cur_freq(
517 struct cpufreq_policy *policy, char *buf)
518{
519 ssize_t ret;
520
521 if (cpufreq_driver && cpufreq_driver->setpolicy && cpufreq_driver->get)
522 ret = sprintf(buf, "%u\n", cpufreq_driver->get(policy->cpu));
523 else
524 ret = sprintf(buf, "%u\n", policy->cur);
525 return ret;
526}
516 527
517static int cpufreq_set_policy(struct cpufreq_policy *policy, 528static int cpufreq_set_policy(struct cpufreq_policy *policy,
518 struct cpufreq_policy *new_policy); 529 struct cpufreq_policy *new_policy);
@@ -906,11 +917,11 @@ static int cpufreq_add_dev_interface(struct cpufreq_policy *policy,
906 if (ret) 917 if (ret)
907 goto err_out_kobj_put; 918 goto err_out_kobj_put;
908 } 919 }
909 if (has_target()) { 920
910 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr); 921 ret = sysfs_create_file(&policy->kobj, &scaling_cur_freq.attr);
911 if (ret) 922 if (ret)
912 goto err_out_kobj_put; 923 goto err_out_kobj_put;
913 } 924
914 if (cpufreq_driver->bios_limit) { 925 if (cpufreq_driver->bios_limit) {
915 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr); 926 ret = sysfs_create_file(&policy->kobj, &bios_limit.attr);
916 if (ret) 927 if (ret)
@@ -1011,7 +1022,8 @@ static struct cpufreq_policy *cpufreq_policy_restore(unsigned int cpu)
1011 1022
1012 read_unlock_irqrestore(&cpufreq_driver_lock, flags); 1023 read_unlock_irqrestore(&cpufreq_driver_lock, flags);
1013 1024
1014 policy->governor = NULL; 1025 if (policy)
1026 policy->governor = NULL;
1015 1027
1016 return policy; 1028 return policy;
1017} 1029}
@@ -1731,6 +1743,21 @@ const char *cpufreq_get_current_driver(void)
1731} 1743}
1732EXPORT_SYMBOL_GPL(cpufreq_get_current_driver); 1744EXPORT_SYMBOL_GPL(cpufreq_get_current_driver);
1733 1745
1746/**
1747 * cpufreq_get_driver_data - return current driver data
1748 *
1749 * Return the private data of the currently loaded cpufreq
1750 * driver, or NULL if no cpufreq driver is loaded.
1751 */
1752void *cpufreq_get_driver_data(void)
1753{
1754 if (cpufreq_driver)
1755 return cpufreq_driver->driver_data;
1756
1757 return NULL;
1758}
1759EXPORT_SYMBOL_GPL(cpufreq_get_driver_data);
1760
1734/********************************************************************* 1761/*********************************************************************
1735 * NOTIFIER LISTS INTERFACE * 1762 * NOTIFIER LISTS INTERFACE *
1736 *********************************************************************/ 1763 *********************************************************************/
diff --git a/drivers/cpufreq/highbank-cpufreq.c b/drivers/cpufreq/highbank-cpufreq.c
index ec399ad2f059..1608f7105c9f 100644
--- a/drivers/cpufreq/highbank-cpufreq.c
+++ b/drivers/cpufreq/highbank-cpufreq.c
@@ -19,7 +19,7 @@
19#include <linux/cpu.h> 19#include <linux/cpu.h>
20#include <linux/err.h> 20#include <linux/err.h>
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/mailbox.h> 22#include <linux/pl320-ipc.h>
23#include <linux/platform_device.h> 23#include <linux/platform_device.h>
24 24
25#define HB_CPUFREQ_CHANGE_NOTE 0x80000001 25#define HB_CPUFREQ_CHANGE_NOTE 0x80000001
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 0668b389c516..27bb6d3877ed 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -52,6 +52,17 @@ static inline int32_t div_fp(int32_t x, int32_t y)
52 return div_s64((int64_t)x << FRAC_BITS, y); 52 return div_s64((int64_t)x << FRAC_BITS, y);
53} 53}
54 54
55static inline int ceiling_fp(int32_t x)
56{
57 int mask, ret;
58
59 ret = fp_toint(x);
60 mask = (1 << FRAC_BITS) - 1;
61 if (x & mask)
62 ret += 1;
63 return ret;
64}
65
55struct sample { 66struct sample {
56 int32_t core_pct_busy; 67 int32_t core_pct_busy;
57 u64 aperf; 68 u64 aperf;
@@ -64,6 +75,7 @@ struct pstate_data {
64 int current_pstate; 75 int current_pstate;
65 int min_pstate; 76 int min_pstate;
66 int max_pstate; 77 int max_pstate;
78 int scaling;
67 int turbo_pstate; 79 int turbo_pstate;
68}; 80};
69 81
@@ -113,6 +125,7 @@ struct pstate_funcs {
113 int (*get_max)(void); 125 int (*get_max)(void);
114 int (*get_min)(void); 126 int (*get_min)(void);
115 int (*get_turbo)(void); 127 int (*get_turbo)(void);
128 int (*get_scaling)(void);
116 void (*set)(struct cpudata*, int pstate); 129 void (*set)(struct cpudata*, int pstate);
117 void (*get_vid)(struct cpudata *); 130 void (*get_vid)(struct cpudata *);
118}; 131};
@@ -138,6 +151,7 @@ struct perf_limits {
138 151
139static struct perf_limits limits = { 152static struct perf_limits limits = {
140 .no_turbo = 0, 153 .no_turbo = 0,
154 .turbo_disabled = 0,
141 .max_perf_pct = 100, 155 .max_perf_pct = 100,
142 .max_perf = int_tofp(1), 156 .max_perf = int_tofp(1),
143 .min_perf_pct = 0, 157 .min_perf_pct = 0,
@@ -218,6 +232,18 @@ static inline void intel_pstate_reset_all_pid(void)
218 } 232 }
219} 233}
220 234
235static inline void update_turbo_state(void)
236{
237 u64 misc_en;
238 struct cpudata *cpu;
239
240 cpu = all_cpu_data[0];
241 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
242 limits.turbo_disabled =
243 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
244 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
245}
246
221/************************** debugfs begin ************************/ 247/************************** debugfs begin ************************/
222static int pid_param_set(void *data, u64 val) 248static int pid_param_set(void *data, u64 val)
223{ 249{
@@ -274,6 +300,20 @@ static void __init intel_pstate_debug_expose_params(void)
274 return sprintf(buf, "%u\n", limits.object); \ 300 return sprintf(buf, "%u\n", limits.object); \
275 } 301 }
276 302
303static ssize_t show_no_turbo(struct kobject *kobj,
304 struct attribute *attr, char *buf)
305{
306 ssize_t ret;
307
308 update_turbo_state();
309 if (limits.turbo_disabled)
310 ret = sprintf(buf, "%u\n", limits.turbo_disabled);
311 else
312 ret = sprintf(buf, "%u\n", limits.no_turbo);
313
314 return ret;
315}
316
277static ssize_t store_no_turbo(struct kobject *a, struct attribute *b, 317static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
278 const char *buf, size_t count) 318 const char *buf, size_t count)
279{ 319{
@@ -283,11 +323,14 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
283 ret = sscanf(buf, "%u", &input); 323 ret = sscanf(buf, "%u", &input);
284 if (ret != 1) 324 if (ret != 1)
285 return -EINVAL; 325 return -EINVAL;
286 limits.no_turbo = clamp_t(int, input, 0 , 1); 326
327 update_turbo_state();
287 if (limits.turbo_disabled) { 328 if (limits.turbo_disabled) {
288 pr_warn("Turbo disabled by BIOS or unavailable on processor\n"); 329 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
289 limits.no_turbo = limits.turbo_disabled; 330 return -EPERM;
290 } 331 }
332 limits.no_turbo = clamp_t(int, input, 0, 1);
333
291 return count; 334 return count;
292} 335}
293 336
@@ -323,7 +366,6 @@ static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
323 return count; 366 return count;
324} 367}
325 368
326show_one(no_turbo, no_turbo);
327show_one(max_perf_pct, max_perf_pct); 369show_one(max_perf_pct, max_perf_pct);
328show_one(min_perf_pct, min_perf_pct); 370show_one(min_perf_pct, min_perf_pct);
329 371
@@ -394,7 +436,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
394 cpudata->vid.ratio); 436 cpudata->vid.ratio);
395 437
396 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max); 438 vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
397 vid = fp_toint(vid_fp); 439 vid = ceiling_fp(vid_fp);
398 440
399 if (pstate > cpudata->pstate.max_pstate) 441 if (pstate > cpudata->pstate.max_pstate)
400 vid = cpudata->vid.turbo; 442 vid = cpudata->vid.turbo;
@@ -404,6 +446,22 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
404 wrmsrl(MSR_IA32_PERF_CTL, val); 446 wrmsrl(MSR_IA32_PERF_CTL, val);
405} 447}
406 448
449#define BYT_BCLK_FREQS 5
450static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800};
451
452static int byt_get_scaling(void)
453{
454 u64 value;
455 int i;
456
457 rdmsrl(MSR_FSB_FREQ, value);
458 i = value & 0x3;
459
460 BUG_ON(i > BYT_BCLK_FREQS);
461
462 return byt_freq_table[i] * 100;
463}
464
407static void byt_get_vid(struct cpudata *cpudata) 465static void byt_get_vid(struct cpudata *cpudata)
408{ 466{
409 u64 value; 467 u64 value;
@@ -449,6 +507,11 @@ static int core_get_turbo_pstate(void)
449 return ret; 507 return ret;
450} 508}
451 509
510static inline int core_get_scaling(void)
511{
512 return 100000;
513}
514
452static void core_set_pstate(struct cpudata *cpudata, int pstate) 515static void core_set_pstate(struct cpudata *cpudata, int pstate)
453{ 516{
454 u64 val; 517 u64 val;
@@ -473,6 +536,7 @@ static struct cpu_defaults core_params = {
473 .get_max = core_get_max_pstate, 536 .get_max = core_get_max_pstate,
474 .get_min = core_get_min_pstate, 537 .get_min = core_get_min_pstate,
475 .get_turbo = core_get_turbo_pstate, 538 .get_turbo = core_get_turbo_pstate,
539 .get_scaling = core_get_scaling,
476 .set = core_set_pstate, 540 .set = core_set_pstate,
477 }, 541 },
478}; 542};
@@ -491,6 +555,7 @@ static struct cpu_defaults byt_params = {
491 .get_min = byt_get_min_pstate, 555 .get_min = byt_get_min_pstate,
492 .get_turbo = byt_get_turbo_pstate, 556 .get_turbo = byt_get_turbo_pstate,
493 .set = byt_set_pstate, 557 .set = byt_set_pstate,
558 .get_scaling = byt_get_scaling,
494 .get_vid = byt_get_vid, 559 .get_vid = byt_get_vid,
495 }, 560 },
496}; 561};
@@ -501,7 +566,7 @@ static void intel_pstate_get_min_max(struct cpudata *cpu, int *min, int *max)
501 int max_perf_adj; 566 int max_perf_adj;
502 int min_perf; 567 int min_perf;
503 568
504 if (limits.no_turbo) 569 if (limits.no_turbo || limits.turbo_disabled)
505 max_perf = cpu->pstate.max_pstate; 570 max_perf = cpu->pstate.max_pstate;
506 571
507 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf)); 572 max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits.max_perf));
@@ -516,6 +581,8 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
516{ 581{
517 int max_perf, min_perf; 582 int max_perf, min_perf;
518 583
584 update_turbo_state();
585
519 intel_pstate_get_min_max(cpu, &min_perf, &max_perf); 586 intel_pstate_get_min_max(cpu, &min_perf, &max_perf);
520 587
521 pstate = clamp_t(int, pstate, min_perf, max_perf); 588 pstate = clamp_t(int, pstate, min_perf, max_perf);
@@ -523,7 +590,7 @@ static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
523 if (pstate == cpu->pstate.current_pstate) 590 if (pstate == cpu->pstate.current_pstate)
524 return; 591 return;
525 592
526 trace_cpu_frequency(pstate * 100000, cpu->cpu); 593 trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
527 594
528 cpu->pstate.current_pstate = pstate; 595 cpu->pstate.current_pstate = pstate;
529 596
@@ -535,6 +602,7 @@ static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
535 cpu->pstate.min_pstate = pstate_funcs.get_min(); 602 cpu->pstate.min_pstate = pstate_funcs.get_min();
536 cpu->pstate.max_pstate = pstate_funcs.get_max(); 603 cpu->pstate.max_pstate = pstate_funcs.get_max();
537 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo(); 604 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
605 cpu->pstate.scaling = pstate_funcs.get_scaling();
538 606
539 if (pstate_funcs.get_vid) 607 if (pstate_funcs.get_vid)
540 pstate_funcs.get_vid(cpu); 608 pstate_funcs.get_vid(cpu);
@@ -550,7 +618,9 @@ static inline void intel_pstate_calc_busy(struct cpudata *cpu)
550 core_pct = div64_u64(core_pct, int_tofp(sample->mperf)); 618 core_pct = div64_u64(core_pct, int_tofp(sample->mperf));
551 619
552 sample->freq = fp_toint( 620 sample->freq = fp_toint(
553 mul_fp(int_tofp(cpu->pstate.max_pstate * 1000), core_pct)); 621 mul_fp(int_tofp(
622 cpu->pstate.max_pstate * cpu->pstate.scaling / 100),
623 core_pct));
554 624
555 sample->core_pct_busy = (int32_t)core_pct; 625 sample->core_pct_busy = (int32_t)core_pct;
556} 626}
@@ -671,7 +741,9 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
671{ 741{
672 struct cpudata *cpu; 742 struct cpudata *cpu;
673 743
674 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata), GFP_KERNEL); 744 if (!all_cpu_data[cpunum])
745 all_cpu_data[cpunum] = kzalloc(sizeof(struct cpudata),
746 GFP_KERNEL);
675 if (!all_cpu_data[cpunum]) 747 if (!all_cpu_data[cpunum])
676 return -ENOMEM; 748 return -ENOMEM;
677 749
@@ -714,9 +786,10 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
714 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) { 786 if (policy->policy == CPUFREQ_POLICY_PERFORMANCE) {
715 limits.min_perf_pct = 100; 787 limits.min_perf_pct = 100;
716 limits.min_perf = int_tofp(1); 788 limits.min_perf = int_tofp(1);
789 limits.max_policy_pct = 100;
717 limits.max_perf_pct = 100; 790 limits.max_perf_pct = 100;
718 limits.max_perf = int_tofp(1); 791 limits.max_perf = int_tofp(1);
719 limits.no_turbo = limits.turbo_disabled; 792 limits.no_turbo = 0;
720 return 0; 793 return 0;
721 } 794 }
722 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 795 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
@@ -751,15 +824,12 @@ static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
751 824
752 del_timer_sync(&all_cpu_data[cpu_num]->timer); 825 del_timer_sync(&all_cpu_data[cpu_num]->timer);
753 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate); 826 intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
754 kfree(all_cpu_data[cpu_num]);
755 all_cpu_data[cpu_num] = NULL;
756} 827}
757 828
758static int intel_pstate_cpu_init(struct cpufreq_policy *policy) 829static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
759{ 830{
760 struct cpudata *cpu; 831 struct cpudata *cpu;
761 int rc; 832 int rc;
762 u64 misc_en;
763 833
764 rc = intel_pstate_init_cpu(policy->cpu); 834 rc = intel_pstate_init_cpu(policy->cpu);
765 if (rc) 835 if (rc)
@@ -767,23 +837,18 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
767 837
768 cpu = all_cpu_data[policy->cpu]; 838 cpu = all_cpu_data[policy->cpu];
769 839
770 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
771 if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
772 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) {
773 limits.turbo_disabled = 1;
774 limits.no_turbo = 1;
775 }
776 if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100) 840 if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
777 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 841 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
778 else 842 else
779 policy->policy = CPUFREQ_POLICY_POWERSAVE; 843 policy->policy = CPUFREQ_POLICY_POWERSAVE;
780 844
781 policy->min = cpu->pstate.min_pstate * 100000; 845 policy->min = cpu->pstate.min_pstate * cpu->pstate.scaling;
782 policy->max = cpu->pstate.turbo_pstate * 100000; 846 policy->max = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
783 847
784 /* cpuinfo and default policy values */ 848 /* cpuinfo and default policy values */
785 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * 100000; 849 policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
786 policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate * 100000; 850 policy->cpuinfo.max_freq =
851 cpu->pstate.turbo_pstate * cpu->pstate.scaling;
787 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL; 852 policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
788 cpumask_set_cpu(policy->cpu, policy->cpus); 853 cpumask_set_cpu(policy->cpu, policy->cpus);
789 854
@@ -841,6 +906,7 @@ static void copy_cpu_funcs(struct pstate_funcs *funcs)
841 pstate_funcs.get_max = funcs->get_max; 906 pstate_funcs.get_max = funcs->get_max;
842 pstate_funcs.get_min = funcs->get_min; 907 pstate_funcs.get_min = funcs->get_min;
843 pstate_funcs.get_turbo = funcs->get_turbo; 908 pstate_funcs.get_turbo = funcs->get_turbo;
909 pstate_funcs.get_scaling = funcs->get_scaling;
844 pstate_funcs.set = funcs->set; 910 pstate_funcs.set = funcs->set;
845 pstate_funcs.get_vid = funcs->get_vid; 911 pstate_funcs.get_vid = funcs->get_vid;
846} 912}
diff --git a/drivers/cpuidle/Kconfig.mips b/drivers/cpuidle/Kconfig.mips
index 0e70ee28a5ca..4102be01d06a 100644
--- a/drivers/cpuidle/Kconfig.mips
+++ b/drivers/cpuidle/Kconfig.mips
@@ -3,7 +3,7 @@
3# 3#
4config MIPS_CPS_CPUIDLE 4config MIPS_CPS_CPUIDLE
5 bool "CPU Idle driver for MIPS CPS platforms" 5 bool "CPU Idle driver for MIPS CPS platforms"
6 depends on CPU_IDLE 6 depends on CPU_IDLE && MIPS_CPS
7 depends on SYS_SUPPORTS_MIPS_CPS 7 depends on SYS_SUPPORTS_MIPS_CPS
8 select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT 8 select ARCH_NEEDS_CPU_IDLE_COUPLED if MIPS_MT
9 select GENERIC_CLOCKEVENTS_BROADCAST if SMP 9 select GENERIC_CLOCKEVENTS_BROADCAST if SMP
diff --git a/drivers/cpuidle/cpuidle-powernv.c b/drivers/cpuidle/cpuidle-powernv.c
index a64be578dab2..7d3a3497dd4c 100644
--- a/drivers/cpuidle/cpuidle-powernv.c
+++ b/drivers/cpuidle/cpuidle-powernv.c
@@ -163,7 +163,8 @@ static int powernv_add_idle_states(void)
163 int nr_idle_states = 1; /* Snooze */ 163 int nr_idle_states = 1; /* Snooze */
164 int dt_idle_states; 164 int dt_idle_states;
165 const __be32 *idle_state_flags; 165 const __be32 *idle_state_flags;
166 u32 len_flags, flags; 166 const __be32 *idle_state_latency;
167 u32 len_flags, flags, latency_ns;
167 int i; 168 int i;
168 169
169 /* Currently we have snooze statically defined */ 170 /* Currently we have snooze statically defined */
@@ -180,18 +181,32 @@ static int powernv_add_idle_states(void)
180 return nr_idle_states; 181 return nr_idle_states;
181 } 182 }
182 183
184 idle_state_latency = of_get_property(power_mgt,
185 "ibm,cpu-idle-state-latencies-ns", NULL);
186 if (!idle_state_latency) {
187 pr_warn("DT-PowerMgmt: missing ibm,cpu-idle-state-latencies-ns\n");
188 return nr_idle_states;
189 }
190
183 dt_idle_states = len_flags / sizeof(u32); 191 dt_idle_states = len_flags / sizeof(u32);
184 192
185 for (i = 0; i < dt_idle_states; i++) { 193 for (i = 0; i < dt_idle_states; i++) {
186 194
187 flags = be32_to_cpu(idle_state_flags[i]); 195 flags = be32_to_cpu(idle_state_flags[i]);
196
197 /* Cpuidle accepts exit_latency in us and we estimate
198 * target residency to be 10x exit_latency
199 */
200 latency_ns = be32_to_cpu(idle_state_latency[i]);
188 if (flags & IDLE_USE_INST_NAP) { 201 if (flags & IDLE_USE_INST_NAP) {
189 /* Add NAP state */ 202 /* Add NAP state */
190 strcpy(powernv_states[nr_idle_states].name, "Nap"); 203 strcpy(powernv_states[nr_idle_states].name, "Nap");
191 strcpy(powernv_states[nr_idle_states].desc, "Nap"); 204 strcpy(powernv_states[nr_idle_states].desc, "Nap");
192 powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIME_VALID; 205 powernv_states[nr_idle_states].flags = CPUIDLE_FLAG_TIME_VALID;
193 powernv_states[nr_idle_states].exit_latency = 10; 206 powernv_states[nr_idle_states].exit_latency =
194 powernv_states[nr_idle_states].target_residency = 100; 207 ((unsigned int)latency_ns) / 1000;
208 powernv_states[nr_idle_states].target_residency =
209 ((unsigned int)latency_ns / 100);
195 powernv_states[nr_idle_states].enter = &nap_loop; 210 powernv_states[nr_idle_states].enter = &nap_loop;
196 nr_idle_states++; 211 nr_idle_states++;
197 } 212 }
@@ -202,8 +217,10 @@ static int powernv_add_idle_states(void)
202 strcpy(powernv_states[nr_idle_states].desc, "FastSleep"); 217 strcpy(powernv_states[nr_idle_states].desc, "FastSleep");
203 powernv_states[nr_idle_states].flags = 218 powernv_states[nr_idle_states].flags =
204 CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TIMER_STOP; 219 CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TIMER_STOP;
205 powernv_states[nr_idle_states].exit_latency = 300; 220 powernv_states[nr_idle_states].exit_latency =
206 powernv_states[nr_idle_states].target_residency = 1000000; 221 ((unsigned int)latency_ns) / 1000;
222 powernv_states[nr_idle_states].target_residency =
223 ((unsigned int)latency_ns / 100);
207 powernv_states[nr_idle_states].enter = &fastsleep_loop; 224 powernv_states[nr_idle_states].enter = &fastsleep_loop;
208 nr_idle_states++; 225 nr_idle_states++;
209 } 226 }
diff --git a/drivers/crypto/caam/key_gen.c b/drivers/crypto/caam/key_gen.c
index 871703c49d2c..e1eaf4ff9762 100644
--- a/drivers/crypto/caam/key_gen.c
+++ b/drivers/crypto/caam/key_gen.c
@@ -48,23 +48,29 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
48 u32 *desc; 48 u32 *desc;
49 struct split_key_result result; 49 struct split_key_result result;
50 dma_addr_t dma_addr_in, dma_addr_out; 50 dma_addr_t dma_addr_in, dma_addr_out;
51 int ret = 0; 51 int ret = -ENOMEM;
52 52
53 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA); 53 desc = kmalloc(CAAM_CMD_SZ * 6 + CAAM_PTR_SZ * 2, GFP_KERNEL | GFP_DMA);
54 if (!desc) { 54 if (!desc) {
55 dev_err(jrdev, "unable to allocate key input memory\n"); 55 dev_err(jrdev, "unable to allocate key input memory\n");
56 return -ENOMEM; 56 return ret;
57 } 57 }
58 58
59 init_job_desc(desc, 0);
60
61 dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen, 59 dma_addr_in = dma_map_single(jrdev, (void *)key_in, keylen,
62 DMA_TO_DEVICE); 60 DMA_TO_DEVICE);
63 if (dma_mapping_error(jrdev, dma_addr_in)) { 61 if (dma_mapping_error(jrdev, dma_addr_in)) {
64 dev_err(jrdev, "unable to map key input memory\n"); 62 dev_err(jrdev, "unable to map key input memory\n");
65 kfree(desc); 63 goto out_free;
66 return -ENOMEM;
67 } 64 }
65
66 dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
67 DMA_FROM_DEVICE);
68 if (dma_mapping_error(jrdev, dma_addr_out)) {
69 dev_err(jrdev, "unable to map key output memory\n");
70 goto out_unmap_in;
71 }
72
73 init_job_desc(desc, 0);
68 append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG); 74 append_key(desc, dma_addr_in, keylen, CLASS_2 | KEY_DEST_CLASS_REG);
69 75
70 /* Sets MDHA up into an HMAC-INIT */ 76 /* Sets MDHA up into an HMAC-INIT */
@@ -81,13 +87,6 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
81 * FIFO_STORE with the explicit split-key content store 87 * FIFO_STORE with the explicit split-key content store
82 * (0x26 output type) 88 * (0x26 output type)
83 */ 89 */
84 dma_addr_out = dma_map_single(jrdev, key_out, split_key_pad_len,
85 DMA_FROM_DEVICE);
86 if (dma_mapping_error(jrdev, dma_addr_out)) {
87 dev_err(jrdev, "unable to map key output memory\n");
88 kfree(desc);
89 return -ENOMEM;
90 }
91 append_fifo_store(desc, dma_addr_out, split_key_len, 90 append_fifo_store(desc, dma_addr_out, split_key_len,
92 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK); 91 LDST_CLASS_2_CCB | FIFOST_TYPE_SPLIT_KEK);
93 92
@@ -115,10 +114,10 @@ int gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
115 114
116 dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len, 115 dma_unmap_single(jrdev, dma_addr_out, split_key_pad_len,
117 DMA_FROM_DEVICE); 116 DMA_FROM_DEVICE);
117out_unmap_in:
118 dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE); 118 dma_unmap_single(jrdev, dma_addr_in, keylen, DMA_TO_DEVICE);
119 119out_free:
120 kfree(desc); 120 kfree(desc);
121
122 return ret; 121 return ret;
123} 122}
124EXPORT_SYMBOL(gen_split_key); 123EXPORT_SYMBOL(gen_split_key);
diff --git a/drivers/crypto/qat/qat_common/adf_accel_devices.h b/drivers/crypto/qat/qat_common/adf_accel_devices.h
index 9282381b03ce..fe7b3f06f6e6 100644
--- a/drivers/crypto/qat/qat_common/adf_accel_devices.h
+++ b/drivers/crypto/qat/qat_common/adf_accel_devices.h
@@ -198,8 +198,7 @@ struct adf_accel_dev {
198 struct dentry *debugfs_dir; 198 struct dentry *debugfs_dir;
199 struct list_head list; 199 struct list_head list;
200 struct module *owner; 200 struct module *owner;
201 uint8_t accel_id;
202 uint8_t numa_node;
203 struct adf_accel_pci accel_pci_dev; 201 struct adf_accel_pci accel_pci_dev;
202 uint8_t accel_id;
204} __packed; 203} __packed;
205#endif 204#endif
diff --git a/drivers/crypto/qat/qat_common/adf_transport.c b/drivers/crypto/qat/qat_common/adf_transport.c
index 5f3fa45348b4..9dd2cb72a4e8 100644
--- a/drivers/crypto/qat/qat_common/adf_transport.c
+++ b/drivers/crypto/qat/qat_common/adf_transport.c
@@ -419,9 +419,10 @@ static int adf_init_bank(struct adf_accel_dev *accel_dev,
419 WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0); 419 WRITE_CSR_RING_BASE(csr_addr, bank_num, i, 0);
420 ring = &bank->rings[i]; 420 ring = &bank->rings[i];
421 if (hw_data->tx_rings_mask & (1 << i)) { 421 if (hw_data->tx_rings_mask & (1 << i)) {
422 ring->inflights = kzalloc_node(sizeof(atomic_t), 422 ring->inflights =
423 GFP_KERNEL, 423 kzalloc_node(sizeof(atomic_t),
424 accel_dev->numa_node); 424 GFP_KERNEL,
425 dev_to_node(&GET_DEV(accel_dev)));
425 if (!ring->inflights) 426 if (!ring->inflights)
426 goto err; 427 goto err;
427 } else { 428 } else {
@@ -469,13 +470,14 @@ int adf_init_etr_data(struct adf_accel_dev *accel_dev)
469 int i, ret; 470 int i, ret;
470 471
471 etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL, 472 etr_data = kzalloc_node(sizeof(*etr_data), GFP_KERNEL,
472 accel_dev->numa_node); 473 dev_to_node(&GET_DEV(accel_dev)));
473 if (!etr_data) 474 if (!etr_data)
474 return -ENOMEM; 475 return -ENOMEM;
475 476
476 num_banks = GET_MAX_BANKS(accel_dev); 477 num_banks = GET_MAX_BANKS(accel_dev);
477 size = num_banks * sizeof(struct adf_etr_bank_data); 478 size = num_banks * sizeof(struct adf_etr_bank_data);
478 etr_data->banks = kzalloc_node(size, GFP_KERNEL, accel_dev->numa_node); 479 etr_data->banks = kzalloc_node(size, GFP_KERNEL,
480 dev_to_node(&GET_DEV(accel_dev)));
479 if (!etr_data->banks) { 481 if (!etr_data->banks) {
480 ret = -ENOMEM; 482 ret = -ENOMEM;
481 goto err_bank; 483 goto err_bank;
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index f2e2f158cfbe..9e9619cd4a79 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -596,7 +596,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
596 if (unlikely(!n)) 596 if (unlikely(!n))
597 return -EINVAL; 597 return -EINVAL;
598 598
599 bufl = kmalloc_node(sz, GFP_ATOMIC, inst->accel_dev->numa_node); 599 bufl = kmalloc_node(sz, GFP_ATOMIC,
600 dev_to_node(&GET_DEV(inst->accel_dev)));
600 if (unlikely(!bufl)) 601 if (unlikely(!bufl))
601 return -ENOMEM; 602 return -ENOMEM;
602 603
@@ -605,6 +606,8 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
605 goto err; 606 goto err;
606 607
607 for_each_sg(assoc, sg, assoc_n, i) { 608 for_each_sg(assoc, sg, assoc_n, i) {
609 if (!sg->length)
610 continue;
608 bufl->bufers[bufs].addr = dma_map_single(dev, 611 bufl->bufers[bufs].addr = dma_map_single(dev,
609 sg_virt(sg), 612 sg_virt(sg),
610 sg->length, 613 sg->length,
@@ -640,7 +643,7 @@ static int qat_alg_sgl_to_bufl(struct qat_crypto_instance *inst,
640 struct qat_alg_buf *bufers; 643 struct qat_alg_buf *bufers;
641 644
642 buflout = kmalloc_node(sz, GFP_ATOMIC, 645 buflout = kmalloc_node(sz, GFP_ATOMIC,
643 inst->accel_dev->numa_node); 646 dev_to_node(&GET_DEV(inst->accel_dev)));
644 if (unlikely(!buflout)) 647 if (unlikely(!buflout))
645 goto err; 648 goto err;
646 bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE); 649 bloutp = dma_map_single(dev, buflout, sz, DMA_TO_DEVICE);
diff --git a/drivers/crypto/qat/qat_common/qat_crypto.c b/drivers/crypto/qat/qat_common/qat_crypto.c
index 0d59bcb50de1..828f2a686aab 100644
--- a/drivers/crypto/qat/qat_common/qat_crypto.c
+++ b/drivers/crypto/qat/qat_common/qat_crypto.c
@@ -109,12 +109,14 @@ struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
109 109
110 list_for_each(itr, adf_devmgr_get_head()) { 110 list_for_each(itr, adf_devmgr_get_head()) {
111 accel_dev = list_entry(itr, struct adf_accel_dev, list); 111 accel_dev = list_entry(itr, struct adf_accel_dev, list);
112 if (accel_dev->numa_node == node && adf_dev_started(accel_dev)) 112 if ((node == dev_to_node(&GET_DEV(accel_dev)) ||
113 dev_to_node(&GET_DEV(accel_dev)) < 0)
114 && adf_dev_started(accel_dev))
113 break; 115 break;
114 accel_dev = NULL; 116 accel_dev = NULL;
115 } 117 }
116 if (!accel_dev) { 118 if (!accel_dev) {
117 pr_err("QAT: Could not find device on give node\n"); 119 pr_err("QAT: Could not find device on node %d\n", node);
118 accel_dev = adf_devmgr_get_first(); 120 accel_dev = adf_devmgr_get_first();
119 } 121 }
120 if (!accel_dev || !adf_dev_started(accel_dev)) 122 if (!accel_dev || !adf_dev_started(accel_dev))
@@ -164,7 +166,7 @@ static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
164 166
165 for (i = 0; i < num_inst; i++) { 167 for (i = 0; i < num_inst; i++) {
166 inst = kzalloc_node(sizeof(*inst), GFP_KERNEL, 168 inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
167 accel_dev->numa_node); 169 dev_to_node(&GET_DEV(accel_dev)));
168 if (!inst) 170 if (!inst)
169 goto err; 171 goto err;
170 172
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
index 978d6c56639d..53c491b59f07 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_admin.c
@@ -108,7 +108,7 @@ int adf_init_admin_comms(struct adf_accel_dev *accel_dev)
108 uint64_t reg_val; 108 uint64_t reg_val;
109 109
110 admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL, 110 admin = kzalloc_node(sizeof(*accel_dev->admin), GFP_KERNEL,
111 accel_dev->numa_node); 111 dev_to_node(&GET_DEV(accel_dev)));
112 if (!admin) 112 if (!admin)
113 return -ENOMEM; 113 return -ENOMEM;
114 admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE, 114 admin->virt_addr = dma_zalloc_coherent(&GET_DEV(accel_dev), PAGE_SIZE,
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index 0d0435a41be9..948f66be262b 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -119,21 +119,6 @@ static void adf_cleanup_accel(struct adf_accel_dev *accel_dev)
119 kfree(accel_dev); 119 kfree(accel_dev);
120} 120}
121 121
122static uint8_t adf_get_dev_node_id(struct pci_dev *pdev)
123{
124 unsigned int bus_per_cpu = 0;
125 struct cpuinfo_x86 *c = &cpu_data(num_online_cpus() - 1);
126
127 if (!c->phys_proc_id)
128 return 0;
129
130 bus_per_cpu = 256 / (c->phys_proc_id + 1);
131
132 if (bus_per_cpu != 0)
133 return pdev->bus->number / bus_per_cpu;
134 return 0;
135}
136
137static int qat_dev_start(struct adf_accel_dev *accel_dev) 122static int qat_dev_start(struct adf_accel_dev *accel_dev)
138{ 123{
139 int cpus = num_online_cpus(); 124 int cpus = num_online_cpus();
@@ -235,7 +220,6 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
235 void __iomem *pmisc_bar_addr = NULL; 220 void __iomem *pmisc_bar_addr = NULL;
236 char name[ADF_DEVICE_NAME_LENGTH]; 221 char name[ADF_DEVICE_NAME_LENGTH];
237 unsigned int i, bar_nr; 222 unsigned int i, bar_nr;
238 uint8_t node;
239 int ret; 223 int ret;
240 224
241 switch (ent->device) { 225 switch (ent->device) {
@@ -246,12 +230,19 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
246 return -ENODEV; 230 return -ENODEV;
247 } 231 }
248 232
249 node = adf_get_dev_node_id(pdev); 233 if (num_possible_nodes() > 1 && dev_to_node(&pdev->dev) < 0) {
250 accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL, node); 234 /* If the accelerator is connected to a node with no memory
235 * there is no point in using the accelerator since the remote
236 * memory transaction will be very slow. */
237 dev_err(&pdev->dev, "Invalid NUMA configuration.\n");
238 return -EINVAL;
239 }
240
241 accel_dev = kzalloc_node(sizeof(*accel_dev), GFP_KERNEL,
242 dev_to_node(&pdev->dev));
251 if (!accel_dev) 243 if (!accel_dev)
252 return -ENOMEM; 244 return -ENOMEM;
253 245
254 accel_dev->numa_node = node;
255 INIT_LIST_HEAD(&accel_dev->crypto_list); 246 INIT_LIST_HEAD(&accel_dev->crypto_list);
256 247
257 /* Add accel device to accel table. 248 /* Add accel device to accel table.
@@ -264,7 +255,8 @@ static int adf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
264 255
265 accel_dev->owner = THIS_MODULE; 256 accel_dev->owner = THIS_MODULE;
266 /* Allocate and configure device configuration structure */ 257 /* Allocate and configure device configuration structure */
267 hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL, node); 258 hw_data = kzalloc_node(sizeof(*hw_data), GFP_KERNEL,
259 dev_to_node(&pdev->dev));
268 if (!hw_data) { 260 if (!hw_data) {
269 ret = -ENOMEM; 261 ret = -ENOMEM;
270 goto out_err; 262 goto out_err;
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
index 67ec61e51185..d96ee21b9b77 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_isr.c
@@ -168,7 +168,7 @@ static int adf_isr_alloc_msix_entry_table(struct adf_accel_dev *accel_dev)
168 uint32_t msix_num_entries = hw_data->num_banks + 1; 168 uint32_t msix_num_entries = hw_data->num_banks + 1;
169 169
170 entries = kzalloc_node(msix_num_entries * sizeof(*entries), 170 entries = kzalloc_node(msix_num_entries * sizeof(*entries),
171 GFP_KERNEL, accel_dev->numa_node); 171 GFP_KERNEL, dev_to_node(&GET_DEV(accel_dev)));
172 if (!entries) 172 if (!entries)
173 return -ENOMEM; 173 return -ENOMEM;
174 174
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 123f578d6dd3..4cfaaa5a49be 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -1107,52 +1107,14 @@ bool edma_filter_fn(struct dma_chan *chan, void *param)
1107} 1107}
1108EXPORT_SYMBOL(edma_filter_fn); 1108EXPORT_SYMBOL(edma_filter_fn);
1109 1109
1110static struct platform_device *pdev0, *pdev1;
1111
1112static const struct platform_device_info edma_dev_info0 = {
1113 .name = "edma-dma-engine",
1114 .id = 0,
1115 .dma_mask = DMA_BIT_MASK(32),
1116};
1117
1118static const struct platform_device_info edma_dev_info1 = {
1119 .name = "edma-dma-engine",
1120 .id = 1,
1121 .dma_mask = DMA_BIT_MASK(32),
1122};
1123
1124static int edma_init(void) 1110static int edma_init(void)
1125{ 1111{
1126 int ret = platform_driver_register(&edma_driver); 1112 return platform_driver_register(&edma_driver);
1127
1128 if (ret == 0) {
1129 pdev0 = platform_device_register_full(&edma_dev_info0);
1130 if (IS_ERR(pdev0)) {
1131 platform_driver_unregister(&edma_driver);
1132 ret = PTR_ERR(pdev0);
1133 goto out;
1134 }
1135 }
1136
1137 if (!of_have_populated_dt() && EDMA_CTLRS == 2) {
1138 pdev1 = platform_device_register_full(&edma_dev_info1);
1139 if (IS_ERR(pdev1)) {
1140 platform_driver_unregister(&edma_driver);
1141 platform_device_unregister(pdev0);
1142 ret = PTR_ERR(pdev1);
1143 }
1144 }
1145
1146out:
1147 return ret;
1148} 1113}
1149subsys_initcall(edma_init); 1114subsys_initcall(edma_init);
1150 1115
1151static void __exit edma_exit(void) 1116static void __exit edma_exit(void)
1152{ 1117{
1153 platform_device_unregister(pdev0);
1154 if (pdev1)
1155 platform_device_unregister(pdev1);
1156 platform_driver_unregister(&edma_driver); 1118 platform_driver_unregister(&edma_driver);
1157} 1119}
1158module_exit(edma_exit); 1120module_exit(edma_exit);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index 4839bfa74a10..19a99743cf52 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -271,7 +271,7 @@ struct pl330_config {
271#define DMAC_MODE_NS (1 << 0) 271#define DMAC_MODE_NS (1 << 0)
272 unsigned int mode; 272 unsigned int mode;
273 unsigned int data_bus_width:10; /* In number of bits */ 273 unsigned int data_bus_width:10; /* In number of bits */
274 unsigned int data_buf_dep:10; 274 unsigned int data_buf_dep:11;
275 unsigned int num_chan:4; 275 unsigned int num_chan:4;
276 unsigned int num_peri:6; 276 unsigned int num_peri:6;
277 u32 peri_ns; 277 u32 peri_ns;
@@ -2336,7 +2336,7 @@ static inline int get_burst_len(struct dma_pl330_desc *desc, size_t len)
2336 int burst_len; 2336 int burst_len;
2337 2337
2338 burst_len = pl330->pcfg.data_bus_width / 8; 2338 burst_len = pl330->pcfg.data_bus_width / 8;
2339 burst_len *= pl330->pcfg.data_buf_dep; 2339 burst_len *= pl330->pcfg.data_buf_dep / pl330->pcfg.num_chan;
2340 burst_len >>= desc->rqcfg.brst_size; 2340 burst_len >>= desc->rqcfg.brst_size;
2341 2341
2342 /* src/dst_burst_len can't be more than 16 */ 2342 /* src/dst_burst_len can't be more than 16 */
@@ -2459,16 +2459,25 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
2459 /* Select max possible burst size */ 2459 /* Select max possible burst size */
2460 burst = pl330->pcfg.data_bus_width / 8; 2460 burst = pl330->pcfg.data_bus_width / 8;
2461 2461
2462 while (burst > 1) { 2462 /*
2463 if (!(len % burst)) 2463 * Make sure we use a burst size that aligns with all the memcpy
2464 break; 2464 * parameters because our DMA programming algorithm doesn't cope with
2465 * transfers which straddle an entry in the DMA device's MFIFO.
2466 */
2467 while ((src | dst | len) & (burst - 1))
2465 burst /= 2; 2468 burst /= 2;
2466 }
2467 2469
2468 desc->rqcfg.brst_size = 0; 2470 desc->rqcfg.brst_size = 0;
2469 while (burst != (1 << desc->rqcfg.brst_size)) 2471 while (burst != (1 << desc->rqcfg.brst_size))
2470 desc->rqcfg.brst_size++; 2472 desc->rqcfg.brst_size++;
2471 2473
2474 /*
2475 * If burst size is smaller than bus width then make sure we only
2476 * transfer one at a time to avoid a burst stradling an MFIFO entry.
2477 */
2478 if (desc->rqcfg.brst_size * 8 < pl330->pcfg.data_bus_width)
2479 desc->rqcfg.brst_len = 1;
2480
2472 desc->rqcfg.brst_len = get_burst_len(desc, len); 2481 desc->rqcfg.brst_len = get_burst_len(desc, len);
2473 2482
2474 desc->txd.flags = flags; 2483 desc->txd.flags = flags;
@@ -2732,7 +2741,7 @@ pl330_probe(struct amba_device *adev, const struct amba_id *id)
2732 2741
2733 2742
2734 dev_info(&adev->dev, 2743 dev_info(&adev->dev,
2735 "Loaded driver for PL330 DMAC-%d\n", adev->periphid); 2744 "Loaded driver for PL330 DMAC-%x\n", adev->periphid);
2736 dev_info(&adev->dev, 2745 dev_info(&adev->dev,
2737 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n", 2746 "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n",
2738 pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan, 2747 pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan,
diff --git a/drivers/dma/sun6i-dma.c b/drivers/dma/sun6i-dma.c
index 3aa10b328254..91292f5513ff 100644
--- a/drivers/dma/sun6i-dma.c
+++ b/drivers/dma/sun6i-dma.c
@@ -230,30 +230,25 @@ static inline void sun6i_dma_dump_chan_regs(struct sun6i_dma_dev *sdev,
230 readl(pchan->base + DMA_CHAN_CUR_PARA)); 230 readl(pchan->base + DMA_CHAN_CUR_PARA));
231} 231}
232 232
233static inline int convert_burst(u32 maxburst, u8 *burst) 233static inline s8 convert_burst(u32 maxburst)
234{ 234{
235 switch (maxburst) { 235 switch (maxburst) {
236 case 1: 236 case 1:
237 *burst = 0; 237 return 0;
238 break;
239 case 8: 238 case 8:
240 *burst = 2; 239 return 2;
241 break;
242 default: 240 default:
243 return -EINVAL; 241 return -EINVAL;
244 } 242 }
245
246 return 0;
247} 243}
248 244
249static inline int convert_buswidth(enum dma_slave_buswidth addr_width, u8 *width) 245static inline s8 convert_buswidth(enum dma_slave_buswidth addr_width)
250{ 246{
251 if ((addr_width < DMA_SLAVE_BUSWIDTH_1_BYTE) || 247 if ((addr_width < DMA_SLAVE_BUSWIDTH_1_BYTE) ||
252 (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES)) 248 (addr_width > DMA_SLAVE_BUSWIDTH_4_BYTES))
253 return -EINVAL; 249 return -EINVAL;
254 250
255 *width = addr_width >> 1; 251 return addr_width >> 1;
256 return 0;
257} 252}
258 253
259static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev, 254static void *sun6i_dma_lli_add(struct sun6i_dma_lli *prev,
@@ -284,26 +279,25 @@ static inline int sun6i_dma_cfg_lli(struct sun6i_dma_lli *lli,
284 struct dma_slave_config *config) 279 struct dma_slave_config *config)
285{ 280{
286 u8 src_width, dst_width, src_burst, dst_burst; 281 u8 src_width, dst_width, src_burst, dst_burst;
287 int ret;
288 282
289 if (!config) 283 if (!config)
290 return -EINVAL; 284 return -EINVAL;
291 285
292 ret = convert_burst(config->src_maxburst, &src_burst); 286 src_burst = convert_burst(config->src_maxburst);
293 if (ret) 287 if (src_burst)
294 return ret; 288 return src_burst;
295 289
296 ret = convert_burst(config->dst_maxburst, &dst_burst); 290 dst_burst = convert_burst(config->dst_maxburst);
297 if (ret) 291 if (dst_burst)
298 return ret; 292 return dst_burst;
299 293
300 ret = convert_buswidth(config->src_addr_width, &src_width); 294 src_width = convert_buswidth(config->src_addr_width);
301 if (ret) 295 if (src_width)
302 return ret; 296 return src_width;
303 297
304 ret = convert_buswidth(config->dst_addr_width, &dst_width); 298 dst_width = convert_buswidth(config->dst_addr_width);
305 if (ret) 299 if (dst_width)
306 return ret; 300 return dst_width;
307 301
308 lli->cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) | 302 lli->cfg = DMA_CHAN_CFG_SRC_BURST(src_burst) |
309 DMA_CHAN_CFG_SRC_WIDTH(src_width) | 303 DMA_CHAN_CFG_SRC_WIDTH(src_width) |
@@ -542,11 +536,10 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
542{ 536{
543 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device); 537 struct sun6i_dma_dev *sdev = to_sun6i_dma_dev(chan->device);
544 struct sun6i_vchan *vchan = to_sun6i_vchan(chan); 538 struct sun6i_vchan *vchan = to_sun6i_vchan(chan);
545 struct dma_slave_config *sconfig = &vchan->cfg;
546 struct sun6i_dma_lli *v_lli; 539 struct sun6i_dma_lli *v_lli;
547 struct sun6i_desc *txd; 540 struct sun6i_desc *txd;
548 dma_addr_t p_lli; 541 dma_addr_t p_lli;
549 int ret; 542 s8 burst, width;
550 543
551 dev_dbg(chan2dev(chan), 544 dev_dbg(chan2dev(chan),
552 "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n", 545 "%s; chan: %d, dest: %pad, src: %pad, len: %zu. flags: 0x%08lx\n",
@@ -565,14 +558,21 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
565 goto err_txd_free; 558 goto err_txd_free;
566 } 559 }
567 560
568 ret = sun6i_dma_cfg_lli(v_lli, src, dest, len, sconfig); 561 v_lli->src = src;
569 if (ret) 562 v_lli->dst = dest;
570 goto err_dma_free; 563 v_lli->len = len;
564 v_lli->para = NORMAL_WAIT;
571 565
566 burst = convert_burst(8);
567 width = convert_buswidth(DMA_SLAVE_BUSWIDTH_4_BYTES);
572 v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) | 568 v_lli->cfg |= DMA_CHAN_CFG_SRC_DRQ(DRQ_SDRAM) |
573 DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) | 569 DMA_CHAN_CFG_DST_DRQ(DRQ_SDRAM) |
574 DMA_CHAN_CFG_DST_LINEAR_MODE | 570 DMA_CHAN_CFG_DST_LINEAR_MODE |
575 DMA_CHAN_CFG_SRC_LINEAR_MODE; 571 DMA_CHAN_CFG_SRC_LINEAR_MODE |
572 DMA_CHAN_CFG_SRC_BURST(burst) |
573 DMA_CHAN_CFG_SRC_WIDTH(width) |
574 DMA_CHAN_CFG_DST_BURST(burst) |
575 DMA_CHAN_CFG_DST_WIDTH(width);
576 576
577 sun6i_dma_lli_add(NULL, v_lli, p_lli, txd); 577 sun6i_dma_lli_add(NULL, v_lli, p_lli, txd);
578 578
@@ -580,8 +580,6 @@ static struct dma_async_tx_descriptor *sun6i_dma_prep_dma_memcpy(
580 580
581 return vchan_tx_prep(&vchan->vc, &txd->vd, flags); 581 return vchan_tx_prep(&vchan->vc, &txd->vd, flags);
582 582
583err_dma_free:
584 dma_pool_free(sdev->pool, v_lli, p_lli);
585err_txd_free: 583err_txd_free:
586 kfree(txd); 584 kfree(txd);
587 return NULL; 585 return NULL;
@@ -915,6 +913,7 @@ static int sun6i_dma_probe(struct platform_device *pdev)
915 sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy; 913 sdc->slave.device_prep_dma_memcpy = sun6i_dma_prep_dma_memcpy;
916 sdc->slave.device_control = sun6i_dma_control; 914 sdc->slave.device_control = sun6i_dma_control;
917 sdc->slave.chancnt = NR_MAX_VCHANS; 915 sdc->slave.chancnt = NR_MAX_VCHANS;
916 sdc->slave.copy_align = 4;
918 917
919 sdc->slave.dev = &pdev->dev; 918 sdc->slave.dev = &pdev->dev;
920 919
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index df6575f1430d..682288ced4ac 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -562,7 +562,7 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
562 562
563 if (apiexcp & UECC_EXCP_DETECTED) { 563 if (apiexcp & UECC_EXCP_DETECTED) {
564 cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n"); 564 cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
565 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 565 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1,
566 pfn, offset, 0, 566 pfn, offset, 0,
567 csrow, -1, -1, 567 csrow, -1, -1,
568 mci->ctl_name, ""); 568 mci->ctl_name, "");
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 3cda79bc8b00..ece3aef16bb1 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -226,7 +226,7 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
226static void process_ce_no_info(struct mem_ctl_info *mci) 226static void process_ce_no_info(struct mem_ctl_info *mci)
227{ 227{
228 edac_dbg(3, "\n"); 228 edac_dbg(3, "\n");
229 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 0, 0, 0, -1, -1, -1, 229 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1, 0, 0, 0, -1, -1, -1,
230 "e7xxx CE log register overflow", ""); 230 "e7xxx CE log register overflow", "");
231} 231}
232 232
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 022a70273ada..aa98b136f5d0 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -242,11 +242,11 @@ static void i3200_process_error_info(struct mem_ctl_info *mci,
242 -1, -1, 242 -1, -1,
243 "i3000 UE", ""); 243 "i3000 UE", "");
244 } else if (log & I3200_ECCERRLOG_CE) { 244 } else if (log & I3200_ECCERRLOG_CE) {
245 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 245 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
246 0, 0, eccerrlog_syndrome(log), 246 0, 0, eccerrlog_syndrome(log),
247 eccerrlog_row(channel, log), 247 eccerrlog_row(channel, log),
248 -1, -1, 248 -1, -1,
249 "i3000 UE", ""); 249 "i3000 CE", "");
250 } 250 }
251 } 251 }
252} 252}
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index 3382f6344e42..4382343a7c60 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -124,7 +124,7 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
124 dimm->location[0], dimm->location[1], -1, 124 dimm->location[0], dimm->location[1], -1,
125 "i82860 UE", ""); 125 "i82860 UE", "");
126 else 126 else
127 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 1, 127 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 1,
128 info->eap, 0, info->derrsyn, 128 info->eap, 0, info->derrsyn,
129 dimm->location[0], dimm->location[1], -1, 129 dimm->location[0], dimm->location[1], -1,
130 "i82860 CE", ""); 130 "i82860 CE", "");
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index 5d997a33907e..2a3973a7c441 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -1637,8 +1637,7 @@ static int dispatch_ioctl(struct client *client,
1637 _IOC_SIZE(cmd) > sizeof(buffer)) 1637 _IOC_SIZE(cmd) > sizeof(buffer))
1638 return -ENOTTY; 1638 return -ENOTTY;
1639 1639
1640 if (_IOC_DIR(cmd) == _IOC_READ) 1640 memset(&buffer, 0, sizeof(buffer));
1641 memset(&buffer, 0, _IOC_SIZE(cmd));
1642 1641
1643 if (_IOC_DIR(cmd) & _IOC_WRITE) 1642 if (_IOC_DIR(cmd) & _IOC_WRITE)
1644 if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd))) 1643 if (copy_from_user(&buffer, arg, _IOC_SIZE(cmd)))
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 64ecbb501c50..8590099ac148 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -41,6 +41,28 @@ struct efi __read_mostly efi = {
41}; 41};
42EXPORT_SYMBOL(efi); 42EXPORT_SYMBOL(efi);
43 43
44static bool disable_runtime;
45static int __init setup_noefi(char *arg)
46{
47 disable_runtime = true;
48 return 0;
49}
50early_param("noefi", setup_noefi);
51
52bool efi_runtime_disabled(void)
53{
54 return disable_runtime;
55}
56
57static int __init parse_efi_cmdline(char *str)
58{
59 if (parse_option_str(str, "noruntime"))
60 disable_runtime = true;
61
62 return 0;
63}
64early_param("efi", parse_efi_cmdline);
65
44static struct kobject *efi_kobj; 66static struct kobject *efi_kobj;
45static struct kobject *efivars_kobj; 67static struct kobject *efivars_kobj;
46 68
@@ -423,3 +445,60 @@ int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose)
423 return ret; 445 return ret;
424} 446}
425#endif /* CONFIG_EFI_PARAMS_FROM_FDT */ 447#endif /* CONFIG_EFI_PARAMS_FROM_FDT */
448
449static __initdata char memory_type_name[][20] = {
450 "Reserved",
451 "Loader Code",
452 "Loader Data",
453 "Boot Code",
454 "Boot Data",
455 "Runtime Code",
456 "Runtime Data",
457 "Conventional Memory",
458 "Unusable Memory",
459 "ACPI Reclaim Memory",
460 "ACPI Memory NVS",
461 "Memory Mapped I/O",
462 "MMIO Port Space",
463 "PAL Code"
464};
465
466char * __init efi_md_typeattr_format(char *buf, size_t size,
467 const efi_memory_desc_t *md)
468{
469 char *pos;
470 int type_len;
471 u64 attr;
472
473 pos = buf;
474 if (md->type >= ARRAY_SIZE(memory_type_name))
475 type_len = snprintf(pos, size, "[type=%u", md->type);
476 else
477 type_len = snprintf(pos, size, "[%-*s",
478 (int)(sizeof(memory_type_name[0]) - 1),
479 memory_type_name[md->type]);
480 if (type_len >= size)
481 return buf;
482
483 pos += type_len;
484 size -= type_len;
485
486 attr = md->attribute;
487 if (attr & ~(EFI_MEMORY_UC | EFI_MEMORY_WC | EFI_MEMORY_WT |
488 EFI_MEMORY_WB | EFI_MEMORY_UCE | EFI_MEMORY_WP |
489 EFI_MEMORY_RP | EFI_MEMORY_XP | EFI_MEMORY_RUNTIME))
490 snprintf(pos, size, "|attr=0x%016llx]",
491 (unsigned long long)attr);
492 else
493 snprintf(pos, size, "|%3s|%2s|%2s|%2s|%3s|%2s|%2s|%2s|%2s]",
494 attr & EFI_MEMORY_RUNTIME ? "RUN" : "",
495 attr & EFI_MEMORY_XP ? "XP" : "",
496 attr & EFI_MEMORY_RP ? "RP" : "",
497 attr & EFI_MEMORY_WP ? "WP" : "",
498 attr & EFI_MEMORY_UCE ? "UCE" : "",
499 attr & EFI_MEMORY_WB ? "WB" : "",
500 attr & EFI_MEMORY_WT ? "WT" : "",
501 attr & EFI_MEMORY_WC ? "WC" : "",
502 attr & EFI_MEMORY_UC ? "UC" : "");
503 return buf;
504}
diff --git a/drivers/firmware/efi/libstub/arm-stub.c b/drivers/firmware/efi/libstub/arm-stub.c
index 480339b6b110..75ee05964cbc 100644
--- a/drivers/firmware/efi/libstub/arm-stub.c
+++ b/drivers/firmware/efi/libstub/arm-stub.c
@@ -226,6 +226,10 @@ unsigned long __init efi_entry(void *handle, efi_system_table_t *sys_table,
226 goto fail_free_image; 226 goto fail_free_image;
227 } 227 }
228 228
229 status = efi_parse_options(cmdline_ptr);
230 if (status != EFI_SUCCESS)
231 pr_efi_err(sys_table, "Failed to parse EFI cmdline options\n");
232
229 /* 233 /*
230 * Unauthenticated device tree data is a security hazard, so 234 * Unauthenticated device tree data is a security hazard, so
231 * ignore 'dtb=' unless UEFI Secure Boot is disabled. 235 * ignore 'dtb=' unless UEFI Secure Boot is disabled.
diff --git a/drivers/firmware/efi/libstub/efi-stub-helper.c b/drivers/firmware/efi/libstub/efi-stub-helper.c
index 32d5cca30f49..a920fec8fe88 100644
--- a/drivers/firmware/efi/libstub/efi-stub-helper.c
+++ b/drivers/firmware/efi/libstub/efi-stub-helper.c
@@ -15,8 +15,23 @@
15 15
16#include "efistub.h" 16#include "efistub.h"
17 17
18/*
19 * Some firmware implementations have problems reading files in one go.
20 * A read chunk size of 1MB seems to work for most platforms.
21 *
22 * Unfortunately, reading files in chunks triggers *other* bugs on some
23 * platforms, so we provide a way to disable this workaround, which can
24 * be done by passing "efi=nochunk" on the EFI boot stub command line.
25 *
26 * If you experience issues with initrd images being corrupt it's worth
27 * trying efi=nochunk, but chunking is enabled by default because there
28 * are far more machines that require the workaround than those that
29 * break with it enabled.
30 */
18#define EFI_READ_CHUNK_SIZE (1024 * 1024) 31#define EFI_READ_CHUNK_SIZE (1024 * 1024)
19 32
33static unsigned long __chunk_size = EFI_READ_CHUNK_SIZE;
34
20struct file_info { 35struct file_info {
21 efi_file_handle_t *handle; 36 efi_file_handle_t *handle;
22 u64 size; 37 u64 size;
@@ -281,6 +296,49 @@ void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
281 efi_call_early(free_pages, addr, nr_pages); 296 efi_call_early(free_pages, addr, nr_pages);
282} 297}
283 298
299/*
300 * Parse the ASCII string 'cmdline' for EFI options, denoted by the efi=
301 * option, e.g. efi=nochunk.
302 *
303 * It should be noted that efi= is parsed in two very different
304 * environments, first in the early boot environment of the EFI boot
305 * stub, and subsequently during the kernel boot.
306 */
307efi_status_t efi_parse_options(char *cmdline)
308{
309 char *str;
310
311 /*
312 * If no EFI parameters were specified on the cmdline we've got
313 * nothing to do.
314 */
315 str = strstr(cmdline, "efi=");
316 if (!str)
317 return EFI_SUCCESS;
318
319 /* Skip ahead to first argument */
320 str += strlen("efi=");
321
322 /*
323 * Remember, because efi= is also used by the kernel we need to
324 * skip over arguments we don't understand.
325 */
326 while (*str) {
327 if (!strncmp(str, "nochunk", 7)) {
328 str += strlen("nochunk");
329 __chunk_size = -1UL;
330 }
331
332 /* Group words together, delimited by "," */
333 while (*str && *str != ',')
334 str++;
335
336 if (*str == ',')
337 str++;
338 }
339
340 return EFI_SUCCESS;
341}
284 342
285/* 343/*
286 * Check the cmdline for a LILO-style file= arguments. 344 * Check the cmdline for a LILO-style file= arguments.
@@ -423,8 +481,8 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
423 size = files[j].size; 481 size = files[j].size;
424 while (size) { 482 while (size) {
425 unsigned long chunksize; 483 unsigned long chunksize;
426 if (size > EFI_READ_CHUNK_SIZE) 484 if (size > __chunk_size)
427 chunksize = EFI_READ_CHUNK_SIZE; 485 chunksize = __chunk_size;
428 else 486 else
429 chunksize = size; 487 chunksize = size;
430 488
diff --git a/drivers/firmware/efi/runtime-wrappers.c b/drivers/firmware/efi/runtime-wrappers.c
index 10daa4bbb258..228bbf910461 100644
--- a/drivers/firmware/efi/runtime-wrappers.c
+++ b/drivers/firmware/efi/runtime-wrappers.c
@@ -14,11 +14,80 @@
14 * This file is released under the GPLv2. 14 * This file is released under the GPLv2.
15 */ 15 */
16 16
17#include <linux/bug.h>
17#include <linux/efi.h> 18#include <linux/efi.h>
18#include <linux/spinlock.h> /* spinlock_t */ 19#include <linux/mutex.h>
20#include <linux/spinlock.h>
19#include <asm/efi.h> 21#include <asm/efi.h>
20 22
21/* 23/*
24 * According to section 7.1 of the UEFI spec, Runtime Services are not fully
25 * reentrant, and there are particular combinations of calls that need to be
26 * serialized. (source: UEFI Specification v2.4A)
27 *
28 * Table 31. Rules for Reentry Into Runtime Services
29 * +------------------------------------+-------------------------------+
30 * | If previous call is busy in | Forbidden to call |
31 * +------------------------------------+-------------------------------+
32 * | Any | SetVirtualAddressMap() |
33 * +------------------------------------+-------------------------------+
34 * | ConvertPointer() | ConvertPointer() |
35 * +------------------------------------+-------------------------------+
36 * | SetVariable() | ResetSystem() |
37 * | UpdateCapsule() | |
38 * | SetTime() | |
39 * | SetWakeupTime() | |
40 * | GetNextHighMonotonicCount() | |
41 * +------------------------------------+-------------------------------+
42 * | GetVariable() | GetVariable() |
43 * | GetNextVariableName() | GetNextVariableName() |
44 * | SetVariable() | SetVariable() |
45 * | QueryVariableInfo() | QueryVariableInfo() |
46 * | UpdateCapsule() | UpdateCapsule() |
47 * | QueryCapsuleCapabilities() | QueryCapsuleCapabilities() |
48 * | GetNextHighMonotonicCount() | GetNextHighMonotonicCount() |
49 * +------------------------------------+-------------------------------+
50 * | GetTime() | GetTime() |
51 * | SetTime() | SetTime() |
52 * | GetWakeupTime() | GetWakeupTime() |
53 * | SetWakeupTime() | SetWakeupTime() |
54 * +------------------------------------+-------------------------------+
55 *
56 * Due to the fact that the EFI pstore may write to the variable store in
57 * interrupt context, we need to use a spinlock for at least the groups that
58 * contain SetVariable() and QueryVariableInfo(). That leaves little else, as
59 * none of the remaining functions are actually ever called at runtime.
60 * So let's just use a single spinlock to serialize all Runtime Services calls.
61 */
62static DEFINE_SPINLOCK(efi_runtime_lock);
63
64/*
65 * Some runtime services calls can be reentrant under NMI, even if the table
66 * above says they are not. (source: UEFI Specification v2.4A)
67 *
68 * Table 32. Functions that may be called after Machine Check, INIT and NMI
69 * +----------------------------+------------------------------------------+
70 * | Function | Called after Machine Check, INIT and NMI |
71 * +----------------------------+------------------------------------------+
72 * | GetTime() | Yes, even if previously busy. |
73 * | GetVariable() | Yes, even if previously busy |
74 * | GetNextVariableName() | Yes, even if previously busy |
75 * | QueryVariableInfo() | Yes, even if previously busy |
76 * | SetVariable() | Yes, even if previously busy |
77 * | UpdateCapsule() | Yes, even if previously busy |
78 * | QueryCapsuleCapabilities() | Yes, even if previously busy |
79 * | ResetSystem() | Yes, even if previously busy |
80 * +----------------------------+------------------------------------------+
81 *
82 * In order to prevent deadlocks under NMI, the wrappers for these functions
83 * may only grab the efi_runtime_lock or rtc_lock spinlocks if !efi_in_nmi().
84 * However, not all of the services listed are reachable through NMI code paths,
85 * so the the special handling as suggested by the UEFI spec is only implemented
86 * for QueryVariableInfo() and SetVariable(), as these can be reached in NMI
87 * context through efi_pstore_write().
88 */
89
90/*
22 * As per commit ef68c8f87ed1 ("x86: Serialize EFI time accesses on rtc_lock"), 91 * As per commit ef68c8f87ed1 ("x86: Serialize EFI time accesses on rtc_lock"),
23 * the EFI specification requires that callers of the time related runtime 92 * the EFI specification requires that callers of the time related runtime
24 * functions serialize with other CMOS accesses in the kernel, as the EFI time 93 * functions serialize with other CMOS accesses in the kernel, as the EFI time
@@ -32,7 +101,9 @@ static efi_status_t virt_efi_get_time(efi_time_t *tm, efi_time_cap_t *tc)
32 efi_status_t status; 101 efi_status_t status;
33 102
34 spin_lock_irqsave(&rtc_lock, flags); 103 spin_lock_irqsave(&rtc_lock, flags);
104 spin_lock(&efi_runtime_lock);
35 status = efi_call_virt(get_time, tm, tc); 105 status = efi_call_virt(get_time, tm, tc);
106 spin_unlock(&efi_runtime_lock);
36 spin_unlock_irqrestore(&rtc_lock, flags); 107 spin_unlock_irqrestore(&rtc_lock, flags);
37 return status; 108 return status;
38} 109}
@@ -43,7 +114,9 @@ static efi_status_t virt_efi_set_time(efi_time_t *tm)
43 efi_status_t status; 114 efi_status_t status;
44 115
45 spin_lock_irqsave(&rtc_lock, flags); 116 spin_lock_irqsave(&rtc_lock, flags);
117 spin_lock(&efi_runtime_lock);
46 status = efi_call_virt(set_time, tm); 118 status = efi_call_virt(set_time, tm);
119 spin_unlock(&efi_runtime_lock);
47 spin_unlock_irqrestore(&rtc_lock, flags); 120 spin_unlock_irqrestore(&rtc_lock, flags);
48 return status; 121 return status;
49} 122}
@@ -56,7 +129,9 @@ static efi_status_t virt_efi_get_wakeup_time(efi_bool_t *enabled,
56 efi_status_t status; 129 efi_status_t status;
57 130
58 spin_lock_irqsave(&rtc_lock, flags); 131 spin_lock_irqsave(&rtc_lock, flags);
132 spin_lock(&efi_runtime_lock);
59 status = efi_call_virt(get_wakeup_time, enabled, pending, tm); 133 status = efi_call_virt(get_wakeup_time, enabled, pending, tm);
134 spin_unlock(&efi_runtime_lock);
60 spin_unlock_irqrestore(&rtc_lock, flags); 135 spin_unlock_irqrestore(&rtc_lock, flags);
61 return status; 136 return status;
62} 137}
@@ -67,7 +142,9 @@ static efi_status_t virt_efi_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
67 efi_status_t status; 142 efi_status_t status;
68 143
69 spin_lock_irqsave(&rtc_lock, flags); 144 spin_lock_irqsave(&rtc_lock, flags);
145 spin_lock(&efi_runtime_lock);
70 status = efi_call_virt(set_wakeup_time, enabled, tm); 146 status = efi_call_virt(set_wakeup_time, enabled, tm);
147 spin_unlock(&efi_runtime_lock);
71 spin_unlock_irqrestore(&rtc_lock, flags); 148 spin_unlock_irqrestore(&rtc_lock, flags);
72 return status; 149 return status;
73} 150}
@@ -78,14 +155,27 @@ static efi_status_t virt_efi_get_variable(efi_char16_t *name,
78 unsigned long *data_size, 155 unsigned long *data_size,
79 void *data) 156 void *data)
80{ 157{
81 return efi_call_virt(get_variable, name, vendor, attr, data_size, data); 158 unsigned long flags;
159 efi_status_t status;
160
161 spin_lock_irqsave(&efi_runtime_lock, flags);
162 status = efi_call_virt(get_variable, name, vendor, attr, data_size,
163 data);
164 spin_unlock_irqrestore(&efi_runtime_lock, flags);
165 return status;
82} 166}
83 167
84static efi_status_t virt_efi_get_next_variable(unsigned long *name_size, 168static efi_status_t virt_efi_get_next_variable(unsigned long *name_size,
85 efi_char16_t *name, 169 efi_char16_t *name,
86 efi_guid_t *vendor) 170 efi_guid_t *vendor)
87{ 171{
88 return efi_call_virt(get_next_variable, name_size, name, vendor); 172 unsigned long flags;
173 efi_status_t status;
174
175 spin_lock_irqsave(&efi_runtime_lock, flags);
176 status = efi_call_virt(get_next_variable, name_size, name, vendor);
177 spin_unlock_irqrestore(&efi_runtime_lock, flags);
178 return status;
89} 179}
90 180
91static efi_status_t virt_efi_set_variable(efi_char16_t *name, 181static efi_status_t virt_efi_set_variable(efi_char16_t *name,
@@ -94,24 +184,61 @@ static efi_status_t virt_efi_set_variable(efi_char16_t *name,
94 unsigned long data_size, 184 unsigned long data_size,
95 void *data) 185 void *data)
96{ 186{
97 return efi_call_virt(set_variable, name, vendor, attr, data_size, data); 187 unsigned long flags;
188 efi_status_t status;
189
190 spin_lock_irqsave(&efi_runtime_lock, flags);
191 status = efi_call_virt(set_variable, name, vendor, attr, data_size,
192 data);
193 spin_unlock_irqrestore(&efi_runtime_lock, flags);
194 return status;
98} 195}
99 196
197static efi_status_t
198virt_efi_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
199 u32 attr, unsigned long data_size,
200 void *data)
201{
202 unsigned long flags;
203 efi_status_t status;
204
205 if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
206 return EFI_NOT_READY;
207
208 status = efi_call_virt(set_variable, name, vendor, attr, data_size,
209 data);
210 spin_unlock_irqrestore(&efi_runtime_lock, flags);
211 return status;
212}
213
214
100static efi_status_t virt_efi_query_variable_info(u32 attr, 215static efi_status_t virt_efi_query_variable_info(u32 attr,
101 u64 *storage_space, 216 u64 *storage_space,
102 u64 *remaining_space, 217 u64 *remaining_space,
103 u64 *max_variable_size) 218 u64 *max_variable_size)
104{ 219{
220 unsigned long flags;
221 efi_status_t status;
222
105 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) 223 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
106 return EFI_UNSUPPORTED; 224 return EFI_UNSUPPORTED;
107 225
108 return efi_call_virt(query_variable_info, attr, storage_space, 226 spin_lock_irqsave(&efi_runtime_lock, flags);
109 remaining_space, max_variable_size); 227 status = efi_call_virt(query_variable_info, attr, storage_space,
228 remaining_space, max_variable_size);
229 spin_unlock_irqrestore(&efi_runtime_lock, flags);
230 return status;
110} 231}
111 232
112static efi_status_t virt_efi_get_next_high_mono_count(u32 *count) 233static efi_status_t virt_efi_get_next_high_mono_count(u32 *count)
113{ 234{
114 return efi_call_virt(get_next_high_mono_count, count); 235 unsigned long flags;
236 efi_status_t status;
237
238 spin_lock_irqsave(&efi_runtime_lock, flags);
239 status = efi_call_virt(get_next_high_mono_count, count);
240 spin_unlock_irqrestore(&efi_runtime_lock, flags);
241 return status;
115} 242}
116 243
117static void virt_efi_reset_system(int reset_type, 244static void virt_efi_reset_system(int reset_type,
@@ -119,17 +246,27 @@ static void virt_efi_reset_system(int reset_type,
119 unsigned long data_size, 246 unsigned long data_size,
120 efi_char16_t *data) 247 efi_char16_t *data)
121{ 248{
249 unsigned long flags;
250
251 spin_lock_irqsave(&efi_runtime_lock, flags);
122 __efi_call_virt(reset_system, reset_type, status, data_size, data); 252 __efi_call_virt(reset_system, reset_type, status, data_size, data);
253 spin_unlock_irqrestore(&efi_runtime_lock, flags);
123} 254}
124 255
125static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules, 256static efi_status_t virt_efi_update_capsule(efi_capsule_header_t **capsules,
126 unsigned long count, 257 unsigned long count,
127 unsigned long sg_list) 258 unsigned long sg_list)
128{ 259{
260 unsigned long flags;
261 efi_status_t status;
262
129 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) 263 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
130 return EFI_UNSUPPORTED; 264 return EFI_UNSUPPORTED;
131 265
132 return efi_call_virt(update_capsule, capsules, count, sg_list); 266 spin_lock_irqsave(&efi_runtime_lock, flags);
267 status = efi_call_virt(update_capsule, capsules, count, sg_list);
268 spin_unlock_irqrestore(&efi_runtime_lock, flags);
269 return status;
133} 270}
134 271
135static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules, 272static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
@@ -137,11 +274,17 @@ static efi_status_t virt_efi_query_capsule_caps(efi_capsule_header_t **capsules,
137 u64 *max_size, 274 u64 *max_size,
138 int *reset_type) 275 int *reset_type)
139{ 276{
277 unsigned long flags;
278 efi_status_t status;
279
140 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION) 280 if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
141 return EFI_UNSUPPORTED; 281 return EFI_UNSUPPORTED;
142 282
143 return efi_call_virt(query_capsule_caps, capsules, count, max_size, 283 spin_lock_irqsave(&efi_runtime_lock, flags);
144 reset_type); 284 status = efi_call_virt(query_capsule_caps, capsules, count, max_size,
285 reset_type);
286 spin_unlock_irqrestore(&efi_runtime_lock, flags);
287 return status;
145} 288}
146 289
147void efi_native_runtime_setup(void) 290void efi_native_runtime_setup(void)
@@ -153,6 +296,7 @@ void efi_native_runtime_setup(void)
153 efi.get_variable = virt_efi_get_variable; 296 efi.get_variable = virt_efi_get_variable;
154 efi.get_next_variable = virt_efi_get_next_variable; 297 efi.get_next_variable = virt_efi_get_next_variable;
155 efi.set_variable = virt_efi_set_variable; 298 efi.set_variable = virt_efi_set_variable;
299 efi.set_variable_nonblocking = virt_efi_set_variable_nonblocking;
156 efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count; 300 efi.get_next_high_mono_count = virt_efi_get_next_high_mono_count;
157 efi.reset_system = virt_efi_reset_system; 301 efi.reset_system = virt_efi_reset_system;
158 efi.query_variable_info = virt_efi_query_variable_info; 302 efi.query_variable_info = virt_efi_query_variable_info;
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 5abe943e3404..70a0fb10517f 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -321,11 +321,11 @@ static unsigned long var_name_strnsize(efi_char16_t *variable_name,
321 * Print a warning when duplicate EFI variables are encountered and 321 * Print a warning when duplicate EFI variables are encountered and
322 * disable the sysfs workqueue since the firmware is buggy. 322 * disable the sysfs workqueue since the firmware is buggy.
323 */ 323 */
324static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid, 324static void dup_variable_bug(efi_char16_t *str16, efi_guid_t *vendor_guid,
325 unsigned long len16) 325 unsigned long len16)
326{ 326{
327 size_t i, len8 = len16 / sizeof(efi_char16_t); 327 size_t i, len8 = len16 / sizeof(efi_char16_t);
328 char *s8; 328 char *str8;
329 329
330 /* 330 /*
331 * Disable the workqueue since the algorithm it uses for 331 * Disable the workqueue since the algorithm it uses for
@@ -334,16 +334,16 @@ static void dup_variable_bug(efi_char16_t *s16, efi_guid_t *vendor_guid,
334 */ 334 */
335 efivar_wq_enabled = false; 335 efivar_wq_enabled = false;
336 336
337 s8 = kzalloc(len8, GFP_KERNEL); 337 str8 = kzalloc(len8, GFP_KERNEL);
338 if (!s8) 338 if (!str8)
339 return; 339 return;
340 340
341 for (i = 0; i < len8; i++) 341 for (i = 0; i < len8; i++)
342 s8[i] = s16[i]; 342 str8[i] = str16[i];
343 343
344 printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n", 344 printk(KERN_WARNING "efivars: duplicate variable: %s-%pUl\n",
345 s8, vendor_guid); 345 str8, vendor_guid);
346 kfree(s8); 346 kfree(str8);
347} 347}
348 348
349/** 349/**
@@ -595,6 +595,39 @@ int efivar_entry_set(struct efivar_entry *entry, u32 attributes,
595} 595}
596EXPORT_SYMBOL_GPL(efivar_entry_set); 596EXPORT_SYMBOL_GPL(efivar_entry_set);
597 597
598/*
599 * efivar_entry_set_nonblocking - call set_variable_nonblocking()
600 *
601 * This function is guaranteed to not block and is suitable for calling
602 * from crash/panic handlers.
603 *
604 * Crucially, this function will not block if it cannot acquire
605 * __efivars->lock. Instead, it returns -EBUSY.
606 */
607static int
608efivar_entry_set_nonblocking(efi_char16_t *name, efi_guid_t vendor,
609 u32 attributes, unsigned long size, void *data)
610{
611 const struct efivar_operations *ops = __efivars->ops;
612 unsigned long flags;
613 efi_status_t status;
614
615 if (!spin_trylock_irqsave(&__efivars->lock, flags))
616 return -EBUSY;
617
618 status = check_var_size(attributes, size + ucs2_strsize(name, 1024));
619 if (status != EFI_SUCCESS) {
620 spin_unlock_irqrestore(&__efivars->lock, flags);
621 return -ENOSPC;
622 }
623
624 status = ops->set_variable_nonblocking(name, &vendor, attributes,
625 size, data);
626
627 spin_unlock_irqrestore(&__efivars->lock, flags);
628 return efi_status_to_err(status);
629}
630
598/** 631/**
599 * efivar_entry_set_safe - call set_variable() if enough space in firmware 632 * efivar_entry_set_safe - call set_variable() if enough space in firmware
600 * @name: buffer containing the variable name 633 * @name: buffer containing the variable name
@@ -622,6 +655,20 @@ int efivar_entry_set_safe(efi_char16_t *name, efi_guid_t vendor, u32 attributes,
622 if (!ops->query_variable_store) 655 if (!ops->query_variable_store)
623 return -ENOSYS; 656 return -ENOSYS;
624 657
658 /*
659 * If the EFI variable backend provides a non-blocking
660 * ->set_variable() operation and we're in a context where we
661 * cannot block, then we need to use it to avoid live-locks,
662 * since the implication is that the regular ->set_variable()
663 * will block.
664 *
665 * If no ->set_variable_nonblocking() is provided then
666 * ->set_variable() is assumed to be non-blocking.
667 */
668 if (!block && ops->set_variable_nonblocking)
669 return efivar_entry_set_nonblocking(name, vendor, attributes,
670 size, data);
671
625 if (!block) { 672 if (!block) {
626 if (!spin_trylock_irqsave(&__efivars->lock, flags)) 673 if (!spin_trylock_irqsave(&__efivars->lock, flags))
627 return -EBUSY; 674 return -EBUSY;
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 9a0cc09e6653..e4a1490b42c2 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -260,7 +260,7 @@ static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
260 * Tell the DRM core that vblank IRQs aren't going to happen for 260 * Tell the DRM core that vblank IRQs aren't going to happen for
261 * a while. This cleans up any pending vblank events for us. 261 * a while. This cleans up any pending vblank events for us.
262 */ 262 */
263 drm_vblank_off(dev, dcrtc->num); 263 drm_crtc_vblank_off(&dcrtc->crtc);
264 264
265 /* Handle any pending flip event. */ 265 /* Handle any pending flip event. */
266 spin_lock_irq(&dev->event_lock); 266 spin_lock_irq(&dev->event_lock);
@@ -289,6 +289,8 @@ static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
289 armada_drm_crtc_update(dcrtc); 289 armada_drm_crtc_update(dcrtc);
290 if (dpms_blanked(dpms)) 290 if (dpms_blanked(dpms))
291 armada_drm_vblank_off(dcrtc); 291 armada_drm_vblank_off(dcrtc);
292 else
293 drm_crtc_vblank_on(&dcrtc->crtc);
292 } 294 }
293} 295}
294 296
@@ -526,7 +528,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
526 /* Wait for pending flips to complete */ 528 /* Wait for pending flips to complete */
527 wait_event(dcrtc->frame_wait, !dcrtc->frame_work); 529 wait_event(dcrtc->frame_wait, !dcrtc->frame_work);
528 530
529 drm_vblank_pre_modeset(crtc->dev, dcrtc->num); 531 drm_crtc_vblank_off(crtc);
530 532
531 crtc->mode = *adj; 533 crtc->mode = *adj;
532 534
@@ -617,7 +619,7 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
617 619
618 armada_drm_crtc_update(dcrtc); 620 armada_drm_crtc_update(dcrtc);
619 621
620 drm_vblank_post_modeset(crtc->dev, dcrtc->num); 622 drm_crtc_vblank_on(crtc);
621 armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms)); 623 armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
622 624
623 return 0; 625 return 0;
@@ -945,18 +947,15 @@ static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
945 armada_reg_queue_end(work->regs, i); 947 armada_reg_queue_end(work->regs, i);
946 948
947 /* 949 /*
948 * Hold the old framebuffer for the work - DRM appears to drop our 950 * Ensure that we hold a reference on the new framebuffer.
949 * reference to the old framebuffer in drm_mode_page_flip_ioctl(). 951 * This has to match the behaviour in mode_set.
950 */ 952 */
951 drm_framebuffer_reference(work->old_fb); 953 drm_framebuffer_reference(fb);
952 954
953 ret = armada_drm_crtc_queue_frame_work(dcrtc, work); 955 ret = armada_drm_crtc_queue_frame_work(dcrtc, work);
954 if (ret) { 956 if (ret) {
955 /* 957 /* Undo our reference above */
956 * Undo our reference above; DRM does not drop the reference 958 drm_framebuffer_unreference(fb);
957 * to this object on error, so that's okay.
958 */
959 drm_framebuffer_unreference(work->old_fb);
960 kfree(work); 959 kfree(work);
961 return ret; 960 return ret;
962 } 961 }
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index f672e6ad8afa..908e5316eac4 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -190,6 +190,7 @@ static int armada_drm_load(struct drm_device *dev, unsigned long flags)
190 if (ret) 190 if (ret)
191 goto err_comp; 191 goto err_comp;
192 192
193 dev->irq_enabled = true;
193 dev->vblank_disable_allowed = 1; 194 dev->vblank_disable_allowed = 1;
194 195
195 ret = armada_fbdev_init(dev); 196 ret = armada_fbdev_init(dev);
@@ -331,7 +332,7 @@ static struct drm_driver armada_drm_driver = {
331 .desc = "Armada SoC DRM", 332 .desc = "Armada SoC DRM",
332 .date = "20120730", 333 .date = "20120730",
333 .driver_features = DRIVER_GEM | DRIVER_MODESET | 334 .driver_features = DRIVER_GEM | DRIVER_MODESET |
334 DRIVER_PRIME, 335 DRIVER_HAVE_IRQ | DRIVER_PRIME,
335 .ioctls = armada_ioctls, 336 .ioctls = armada_ioctls,
336 .fops = &armada_drm_fops, 337 .fops = &armada_drm_fops,
337}; 338};
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index e705335101a5..c2a1cba1e984 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -32,6 +32,8 @@ static struct drm_driver driver;
32static const struct pci_device_id pciidlist[] = { 32static const struct pci_device_id pciidlist[] = {
33 { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0, 33 { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, 0x1af4, 0x1100, 0,
34 0, 0 }, 34 0, 0 },
35 { PCI_VENDOR_ID_CIRRUS, PCI_DEVICE_ID_CIRRUS_5446, PCI_VENDOR_ID_XEN,
36 0x0001, 0, 0, 0 },
35 {0,} 37 {0,}
36}; 38};
37 39
diff --git a/drivers/gpu/drm/exynos/exynos_dp_core.c b/drivers/gpu/drm/exynos/exynos_dp_core.c
index cd50ece31601..6adb1e5cfb08 100644
--- a/drivers/gpu/drm/exynos/exynos_dp_core.c
+++ b/drivers/gpu/drm/exynos/exynos_dp_core.c
@@ -1355,13 +1355,8 @@ static void exynos_dp_unbind(struct device *dev, struct device *master,
1355 void *data) 1355 void *data)
1356{ 1356{
1357 struct exynos_drm_display *display = dev_get_drvdata(dev); 1357 struct exynos_drm_display *display = dev_get_drvdata(dev);
1358 struct exynos_dp_device *dp = display->ctx;
1359 struct drm_encoder *encoder = dp->encoder;
1360 1358
1361 exynos_dp_dpms(display, DRM_MODE_DPMS_OFF); 1359 exynos_dp_dpms(display, DRM_MODE_DPMS_OFF);
1362
1363 exynos_dp_connector_destroy(&dp->connector);
1364 encoder->funcs->destroy(encoder);
1365} 1360}
1366 1361
1367static const struct component_ops exynos_dp_ops = { 1362static const struct component_ops exynos_dp_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_crtc.c b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
index 8e38e9f8e542..45026e693225 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_crtc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_crtc.c
@@ -71,13 +71,16 @@ static void exynos_drm_crtc_dpms(struct drm_crtc *crtc, int mode)
71 !atomic_read(&exynos_crtc->pending_flip), 71 !atomic_read(&exynos_crtc->pending_flip),
72 HZ/20)) 72 HZ/20))
73 atomic_set(&exynos_crtc->pending_flip, 0); 73 atomic_set(&exynos_crtc->pending_flip, 0);
74 drm_vblank_off(crtc->dev, exynos_crtc->pipe); 74 drm_crtc_vblank_off(crtc);
75 } 75 }
76 76
77 if (manager->ops->dpms) 77 if (manager->ops->dpms)
78 manager->ops->dpms(manager, mode); 78 manager->ops->dpms(manager, mode);
79 79
80 exynos_crtc->dpms = mode; 80 exynos_crtc->dpms = mode;
81
82 if (mode == DRM_MODE_DPMS_ON)
83 drm_crtc_vblank_on(crtc);
81} 84}
82 85
83static void exynos_drm_crtc_prepare(struct drm_crtc *crtc) 86static void exynos_drm_crtc_prepare(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 96c87db388fb..3dc678ed9949 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -338,14 +338,10 @@ err_del_component:
338 338
339int exynos_dpi_remove(struct device *dev) 339int exynos_dpi_remove(struct device *dev)
340{ 340{
341 struct drm_encoder *encoder = exynos_dpi_display.encoder;
342 struct exynos_dpi *ctx = exynos_dpi_display.ctx; 341 struct exynos_dpi *ctx = exynos_dpi_display.ctx;
343 342
344 exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF); 343 exynos_dpi_dpms(&exynos_dpi_display, DRM_MODE_DPMS_OFF);
345 344
346 exynos_dpi_connector_destroy(&ctx->connector);
347 encoder->funcs->destroy(encoder);
348
349 if (ctx->panel) 345 if (ctx->panel)
350 drm_panel_detach(ctx->panel); 346 drm_panel_detach(ctx->panel);
351 347
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 443a2069858a..e5c4c6c8c967 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -87,16 +87,12 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
87 87
88 plane = exynos_plane_init(dev, possible_crtcs, 88 plane = exynos_plane_init(dev, possible_crtcs,
89 DRM_PLANE_TYPE_OVERLAY); 89 DRM_PLANE_TYPE_OVERLAY);
90 if (IS_ERR(plane)) 90 if (!IS_ERR(plane))
91 goto err_mode_config_cleanup; 91 continue;
92 }
93
94 /* init kms poll for handling hpd */
95 drm_kms_helper_poll_init(dev);
96 92
97 ret = drm_vblank_init(dev, MAX_CRTC); 93 ret = PTR_ERR(plane);
98 if (ret)
99 goto err_mode_config_cleanup; 94 goto err_mode_config_cleanup;
95 }
100 96
101 /* setup possible_clones. */ 97 /* setup possible_clones. */
102 exynos_drm_encoder_setup(dev); 98 exynos_drm_encoder_setup(dev);
@@ -106,15 +102,16 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
106 /* Try to bind all sub drivers. */ 102 /* Try to bind all sub drivers. */
107 ret = component_bind_all(dev->dev, dev); 103 ret = component_bind_all(dev->dev, dev);
108 if (ret) 104 if (ret)
109 goto err_cleanup_vblank; 105 goto err_mode_config_cleanup;
110 106
111 /* Probe non kms sub drivers and virtual display driver. */ 107 ret = drm_vblank_init(dev, dev->mode_config.num_crtc);
112 ret = exynos_drm_device_subdrv_probe(dev);
113 if (ret) 108 if (ret)
114 goto err_unbind_all; 109 goto err_unbind_all;
115 110
116 /* force connectors detection */ 111 /* Probe non kms sub drivers and virtual display driver. */
117 drm_helper_hpd_irq_event(dev); 112 ret = exynos_drm_device_subdrv_probe(dev);
113 if (ret)
114 goto err_cleanup_vblank;
118 115
119 /* 116 /*
120 * enable drm irq mode. 117 * enable drm irq mode.
@@ -133,12 +130,18 @@ static int exynos_drm_load(struct drm_device *dev, unsigned long flags)
133 */ 130 */
134 dev->vblank_disable_allowed = true; 131 dev->vblank_disable_allowed = true;
135 132
133 /* init kms poll for handling hpd */
134 drm_kms_helper_poll_init(dev);
135
136 /* force connectors detection */
137 drm_helper_hpd_irq_event(dev);
138
136 return 0; 139 return 0;
137 140
138err_unbind_all:
139 component_unbind_all(dev->dev, dev);
140err_cleanup_vblank: 141err_cleanup_vblank:
141 drm_vblank_cleanup(dev); 142 drm_vblank_cleanup(dev);
143err_unbind_all:
144 component_unbind_all(dev->dev, dev);
142err_mode_config_cleanup: 145err_mode_config_cleanup:
143 drm_mode_config_cleanup(dev); 146 drm_mode_config_cleanup(dev);
144 drm_release_iommu_mapping(dev); 147 drm_release_iommu_mapping(dev);
@@ -155,8 +158,8 @@ static int exynos_drm_unload(struct drm_device *dev)
155 exynos_drm_fbdev_fini(dev); 158 exynos_drm_fbdev_fini(dev);
156 drm_kms_helper_poll_fini(dev); 159 drm_kms_helper_poll_fini(dev);
157 160
158 component_unbind_all(dev->dev, dev);
159 drm_vblank_cleanup(dev); 161 drm_vblank_cleanup(dev);
162 component_unbind_all(dev->dev, dev);
160 drm_mode_config_cleanup(dev); 163 drm_mode_config_cleanup(dev);
161 drm_release_iommu_mapping(dev); 164 drm_release_iommu_mapping(dev);
162 165
@@ -191,8 +194,12 @@ static int exynos_drm_resume(struct drm_device *dev)
191 194
192 drm_modeset_lock_all(dev); 195 drm_modeset_lock_all(dev);
193 list_for_each_entry(connector, &dev->mode_config.connector_list, head) { 196 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
194 if (connector->funcs->dpms) 197 if (connector->funcs->dpms) {
195 connector->funcs->dpms(connector, connector->dpms); 198 int dpms = connector->dpms;
199
200 connector->dpms = DRM_MODE_DPMS_OFF;
201 connector->funcs->dpms(connector, dpms);
202 }
196 } 203 }
197 drm_modeset_unlock_all(dev); 204 drm_modeset_unlock_all(dev);
198 205
@@ -488,6 +495,12 @@ static struct component_match *exynos_drm_match_add(struct device *dev)
488 495
489 mutex_lock(&drm_component_lock); 496 mutex_lock(&drm_component_lock);
490 497
498 /* Do not retry to probe if there is no any kms driver regitered. */
499 if (list_empty(&drm_component_list)) {
500 mutex_unlock(&drm_component_lock);
501 return ERR_PTR(-ENODEV);
502 }
503
491 list_for_each_entry(cdev, &drm_component_list, list) { 504 list_for_each_entry(cdev, &drm_component_list, list) {
492 /* 505 /*
493 * Add components to master only in case that crtc and 506 * Add components to master only in case that crtc and
@@ -578,10 +591,21 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
578 goto err_unregister_mixer_drv; 591 goto err_unregister_mixer_drv;
579#endif 592#endif
580 593
594 match = exynos_drm_match_add(&pdev->dev);
595 if (IS_ERR(match)) {
596 ret = PTR_ERR(match);
597 goto err_unregister_hdmi_drv;
598 }
599
600 ret = component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
601 match);
602 if (ret < 0)
603 goto err_unregister_hdmi_drv;
604
581#ifdef CONFIG_DRM_EXYNOS_G2D 605#ifdef CONFIG_DRM_EXYNOS_G2D
582 ret = platform_driver_register(&g2d_driver); 606 ret = platform_driver_register(&g2d_driver);
583 if (ret < 0) 607 if (ret < 0)
584 goto err_unregister_hdmi_drv; 608 goto err_del_component_master;
585#endif 609#endif
586 610
587#ifdef CONFIG_DRM_EXYNOS_FIMC 611#ifdef CONFIG_DRM_EXYNOS_FIMC
@@ -612,23 +636,9 @@ static int exynos_drm_platform_probe(struct platform_device *pdev)
612 goto err_unregister_ipp_drv; 636 goto err_unregister_ipp_drv;
613#endif 637#endif
614 638
615 match = exynos_drm_match_add(&pdev->dev);
616 if (IS_ERR(match)) {
617 ret = PTR_ERR(match);
618 goto err_unregister_resources;
619 }
620
621 ret = component_master_add_with_match(&pdev->dev, &exynos_drm_ops,
622 match);
623 if (ret < 0)
624 goto err_unregister_resources;
625
626 return ret; 639 return ret;
627 640
628err_unregister_resources:
629
630#ifdef CONFIG_DRM_EXYNOS_IPP 641#ifdef CONFIG_DRM_EXYNOS_IPP
631 exynos_platform_device_ipp_unregister();
632err_unregister_ipp_drv: 642err_unregister_ipp_drv:
633 platform_driver_unregister(&ipp_driver); 643 platform_driver_unregister(&ipp_driver);
634err_unregister_gsc_drv: 644err_unregister_gsc_drv:
@@ -651,9 +661,11 @@ err_unregister_g2d_drv:
651 661
652#ifdef CONFIG_DRM_EXYNOS_G2D 662#ifdef CONFIG_DRM_EXYNOS_G2D
653 platform_driver_unregister(&g2d_driver); 663 platform_driver_unregister(&g2d_driver);
654err_unregister_hdmi_drv: 664err_del_component_master:
655#endif 665#endif
666 component_master_del(&pdev->dev, &exynos_drm_ops);
656 667
668err_unregister_hdmi_drv:
657#ifdef CONFIG_DRM_EXYNOS_HDMI 669#ifdef CONFIG_DRM_EXYNOS_HDMI
658 platform_driver_unregister(&hdmi_driver); 670 platform_driver_unregister(&hdmi_driver);
659err_unregister_mixer_drv: 671err_unregister_mixer_drv:
@@ -734,6 +746,18 @@ static int exynos_drm_init(void)
734{ 746{
735 int ret; 747 int ret;
736 748
749 /*
750 * Register device object only in case of Exynos SoC.
751 *
752 * Below codes resolves temporarily infinite loop issue incurred
753 * by Exynos drm driver when using multi-platform kernel.
754 * So these codes will be replaced with more generic way later.
755 */
756 if (!of_machine_is_compatible("samsung,exynos3") &&
757 !of_machine_is_compatible("samsung,exynos4") &&
758 !of_machine_is_compatible("samsung,exynos5"))
759 return -ENODEV;
760
737 exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1, 761 exynos_drm_pdev = platform_device_register_simple("exynos-drm", -1,
738 NULL, 0); 762 NULL, 0);
739 if (IS_ERR(exynos_drm_pdev)) 763 if (IS_ERR(exynos_drm_pdev))
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index 24741d8758e8..acf7e9e39dcd 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1660,13 +1660,9 @@ static void exynos_dsi_unbind(struct device *dev, struct device *master,
1660 void *data) 1660 void *data)
1661{ 1661{
1662 struct exynos_dsi *dsi = exynos_dsi_display.ctx; 1662 struct exynos_dsi *dsi = exynos_dsi_display.ctx;
1663 struct drm_encoder *encoder = dsi->encoder;
1664 1663
1665 exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF); 1664 exynos_dsi_dpms(&exynos_dsi_display, DRM_MODE_DPMS_OFF);
1666 1665
1667 exynos_dsi_connector_destroy(&dsi->connector);
1668 encoder->funcs->destroy(encoder);
1669
1670 mipi_dsi_host_unregister(&dsi->dsi_host); 1666 mipi_dsi_host_unregister(&dsi->dsi_host);
1671} 1667}
1672 1668
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index df7a77d3eff8..6ff8599f6cbf 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -302,9 +302,12 @@ static void g2d_fini_cmdlist(struct g2d_data *g2d)
302 struct exynos_drm_subdrv *subdrv = &g2d->subdrv; 302 struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
303 303
304 kfree(g2d->cmdlist_node); 304 kfree(g2d->cmdlist_node);
305 dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE, 305
306 g2d->cmdlist_pool_virt, 306 if (g2d->cmdlist_pool_virt && g2d->cmdlist_pool) {
307 g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs); 307 dma_free_attrs(subdrv->drm_dev->dev, G2D_CMDLIST_POOL_SIZE,
308 g2d->cmdlist_pool_virt,
309 g2d->cmdlist_pool, &g2d->cmdlist_dma_attrs);
310 }
308} 311}
309 312
310static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d) 313static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index d565207040a2..50faf913e574 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -630,7 +630,6 @@ static int vidi_remove(struct platform_device *pdev)
630{ 630{
631 struct exynos_drm_manager *mgr = platform_get_drvdata(pdev); 631 struct exynos_drm_manager *mgr = platform_get_drvdata(pdev);
632 struct vidi_context *ctx = mgr->ctx; 632 struct vidi_context *ctx = mgr->ctx;
633 struct drm_encoder *encoder = ctx->encoder;
634 633
635 if (ctx->raw_edid != (struct edid *)fake_edid_info) { 634 if (ctx->raw_edid != (struct edid *)fake_edid_info) {
636 kfree(ctx->raw_edid); 635 kfree(ctx->raw_edid);
@@ -639,9 +638,6 @@ static int vidi_remove(struct platform_device *pdev)
639 return -EINVAL; 638 return -EINVAL;
640 } 639 }
641 640
642 encoder->funcs->destroy(encoder);
643 drm_connector_cleanup(&ctx->connector);
644
645 return 0; 641 return 0;
646} 642}
647 643
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 7910fb37d9bb..563a19e62eb2 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -2312,12 +2312,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
2312 2312
2313static void hdmi_unbind(struct device *dev, struct device *master, void *data) 2313static void hdmi_unbind(struct device *dev, struct device *master, void *data)
2314{ 2314{
2315 struct exynos_drm_display *display = get_hdmi_display(dev);
2316 struct drm_encoder *encoder = display->encoder;
2317 struct hdmi_context *hdata = display->ctx;
2318
2319 hdmi_connector_destroy(&hdata->connector);
2320 encoder->funcs->destroy(encoder);
2321} 2315}
2322 2316
2323static const struct component_ops hdmi_component_ops = { 2317static const struct component_ops hdmi_component_ops = {
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 1403b01e8216..318ade9bb5af 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1670,15 +1670,17 @@ int i915_driver_load(struct drm_device *dev, unsigned long flags)
1670 goto out_regs; 1670 goto out_regs;
1671 1671
1672 if (drm_core_check_feature(dev, DRIVER_MODESET)) { 1672 if (drm_core_check_feature(dev, DRIVER_MODESET)) {
1673 ret = i915_kick_out_vgacon(dev_priv); 1673 /* WARNING: Apparently we must kick fbdev drivers before vgacon,
1674 * otherwise the vga fbdev driver falls over. */
1675 ret = i915_kick_out_firmware_fb(dev_priv);
1674 if (ret) { 1676 if (ret) {
1675 DRM_ERROR("failed to remove conflicting VGA console\n"); 1677 DRM_ERROR("failed to remove conflicting framebuffer drivers\n");
1676 goto out_gtt; 1678 goto out_gtt;
1677 } 1679 }
1678 1680
1679 ret = i915_kick_out_firmware_fb(dev_priv); 1681 ret = i915_kick_out_vgacon(dev_priv);
1680 if (ret) { 1682 if (ret) {
1681 DRM_ERROR("failed to remove conflicting framebuffer drivers\n"); 1683 DRM_ERROR("failed to remove conflicting VGA console\n");
1682 goto out_gtt; 1684 goto out_gtt;
1683 } 1685 }
1684 } 1686 }
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 055d5e7fbf12..2318b4c7a8f8 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -986,6 +986,15 @@ static int i915_pm_freeze(struct device *dev)
986 return i915_drm_freeze(drm_dev); 986 return i915_drm_freeze(drm_dev);
987} 987}
988 988
989static int i915_pm_freeze_late(struct device *dev)
990{
991 struct pci_dev *pdev = to_pci_dev(dev);
992 struct drm_device *drm_dev = pci_get_drvdata(pdev);
993 struct drm_i915_private *dev_priv = drm_dev->dev_private;
994
995 return intel_suspend_complete(dev_priv);
996}
997
989static int i915_pm_thaw_early(struct device *dev) 998static int i915_pm_thaw_early(struct device *dev)
990{ 999{
991 struct pci_dev *pdev = to_pci_dev(dev); 1000 struct pci_dev *pdev = to_pci_dev(dev);
@@ -1570,6 +1579,7 @@ static const struct dev_pm_ops i915_pm_ops = {
1570 .resume_early = i915_pm_resume_early, 1579 .resume_early = i915_pm_resume_early,
1571 .resume = i915_pm_resume, 1580 .resume = i915_pm_resume,
1572 .freeze = i915_pm_freeze, 1581 .freeze = i915_pm_freeze,
1582 .freeze_late = i915_pm_freeze_late,
1573 .thaw_early = i915_pm_thaw_early, 1583 .thaw_early = i915_pm_thaw_early,
1574 .thaw = i915_pm_thaw, 1584 .thaw = i915_pm_thaw,
1575 .poweroff = i915_pm_poweroff, 1585 .poweroff = i915_pm_poweroff,
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index b672b843fd5e..728938f02341 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -1902,6 +1902,22 @@ static void bdw_setup_private_ppat(struct drm_i915_private *dev_priv)
1902 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) | 1902 GEN8_PPAT(6, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(2)) |
1903 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3)); 1903 GEN8_PPAT(7, GEN8_PPAT_WB | GEN8_PPAT_LLCELLC | GEN8_PPAT_AGE(3));
1904 1904
1905 if (!USES_PPGTT(dev_priv->dev))
1906 /* Spec: "For GGTT, there is NO pat_sel[2:0] from the entry,
1907 * so RTL will always use the value corresponding to
1908 * pat_sel = 000".
1909 * So let's disable cache for GGTT to avoid screen corruptions.
1910 * MOCS still can be used though.
1911 * - System agent ggtt writes (i.e. cpu gtt mmaps) already work
1912 * before this patch, i.e. the same uncached + snooping access
1913 * like on gen6/7 seems to be in effect.
1914 * - So this just fixes blitter/render access. Again it looks
1915 * like it's not just uncached access, but uncached + snooping.
1916 * So we can still hold onto all our assumptions wrt cpu
1917 * clflushing on LLC machines.
1918 */
1919 pat = GEN8_PPAT(0, GEN8_PPAT_UC);
1920
1905 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b 1921 /* XXX: spec defines this as 2 distinct registers. It's unclear if a 64b
1906 * write would work. */ 1922 * write would work. */
1907 I915_WRITE(GEN8_PRIVATE_PAT, pat); 1923 I915_WRITE(GEN8_PRIVATE_PAT, pat);
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 2cefb597df6d..2b1eaa29ada4 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -364,22 +364,9 @@ i915_gem_set_tiling(struct drm_device *dev, void *data,
364 * has to also include the unfenced register the GPU uses 364 * has to also include the unfenced register the GPU uses
365 * whilst executing a fenced command for an untiled object. 365 * whilst executing a fenced command for an untiled object.
366 */ 366 */
367 367 if (obj->map_and_fenceable &&
368 obj->map_and_fenceable = 368 !i915_gem_object_fence_ok(obj, args->tiling_mode))
369 !i915_gem_obj_ggtt_bound(obj) || 369 ret = i915_gem_object_ggtt_unbind(obj);
370 (i915_gem_obj_ggtt_offset(obj) +
371 obj->base.size <= dev_priv->gtt.mappable_end &&
372 i915_gem_object_fence_ok(obj, args->tiling_mode));
373
374 /* Rebind if we need a change of alignment */
375 if (!obj->map_and_fenceable) {
376 u32 unfenced_align =
377 i915_gem_get_gtt_alignment(dev, obj->base.size,
378 args->tiling_mode,
379 false);
380 if (i915_gem_obj_ggtt_offset(obj) & (unfenced_align - 1))
381 ret = i915_gem_object_ggtt_unbind(obj);
382 }
383 370
384 if (ret == 0) { 371 if (ret == 0) {
385 obj->fence_dirty = 372 obj->fence_dirty =
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 3201986bf25e..f66392b6e287 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1711,7 +1711,7 @@ static irqreturn_t gen8_gt_irq_handler(struct drm_device *dev,
1711#define HPD_STORM_DETECT_PERIOD 1000 1711#define HPD_STORM_DETECT_PERIOD 1000
1712#define HPD_STORM_THRESHOLD 5 1712#define HPD_STORM_THRESHOLD 5
1713 1713
1714static int ilk_port_to_hotplug_shift(enum port port) 1714static int pch_port_to_hotplug_shift(enum port port)
1715{ 1715{
1716 switch (port) { 1716 switch (port) {
1717 case PORT_A: 1717 case PORT_A:
@@ -1727,7 +1727,7 @@ static int ilk_port_to_hotplug_shift(enum port port)
1727 } 1727 }
1728} 1728}
1729 1729
1730static int g4x_port_to_hotplug_shift(enum port port) 1730static int i915_port_to_hotplug_shift(enum port port)
1731{ 1731{
1732 switch (port) { 1732 switch (port) {
1733 case PORT_A: 1733 case PORT_A:
@@ -1785,12 +1785,12 @@ static inline void intel_hpd_irq_handler(struct drm_device *dev,
1785 if (port && dev_priv->hpd_irq_port[port]) { 1785 if (port && dev_priv->hpd_irq_port[port]) {
1786 bool long_hpd; 1786 bool long_hpd;
1787 1787
1788 if (IS_G4X(dev)) { 1788 if (HAS_PCH_SPLIT(dev)) {
1789 dig_shift = g4x_port_to_hotplug_shift(port); 1789 dig_shift = pch_port_to_hotplug_shift(port);
1790 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1791 } else {
1792 dig_shift = ilk_port_to_hotplug_shift(port);
1793 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT; 1790 long_hpd = (dig_hotplug_reg >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1791 } else {
1792 dig_shift = i915_port_to_hotplug_shift(port);
1793 long_hpd = (hotplug_trigger >> dig_shift) & PORTB_HOTPLUG_LONG_DETECT;
1794 } 1794 }
1795 1795
1796 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n", 1796 DRM_DEBUG_DRIVER("digital hpd port %c - %s\n",
@@ -3458,12 +3458,13 @@ static void gen8_irq_reset(struct drm_device *dev)
3458void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv) 3458void gen8_irq_power_well_post_enable(struct drm_i915_private *dev_priv)
3459{ 3459{
3460 unsigned long irqflags; 3460 unsigned long irqflags;
3461 uint32_t extra_ier = GEN8_PIPE_VBLANK | GEN8_PIPE_FIFO_UNDERRUN;
3461 3462
3462 spin_lock_irqsave(&dev_priv->irq_lock, irqflags); 3463 spin_lock_irqsave(&dev_priv->irq_lock, irqflags);
3463 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B], 3464 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_B, dev_priv->de_irq_mask[PIPE_B],
3464 ~dev_priv->de_irq_mask[PIPE_B]); 3465 ~dev_priv->de_irq_mask[PIPE_B] | extra_ier);
3465 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C], 3466 GEN8_IRQ_INIT_NDX(DE_PIPE, PIPE_C, dev_priv->de_irq_mask[PIPE_C],
3466 ~dev_priv->de_irq_mask[PIPE_C]); 3467 ~dev_priv->de_irq_mask[PIPE_C] | extra_ier);
3467 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags); 3468 spin_unlock_irqrestore(&dev_priv->irq_lock, irqflags);
3468} 3469}
3469 3470
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 507370513f3d..9cb5c95d5898 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -73,9 +73,6 @@ static const uint32_t intel_cursor_formats[] = {
73 DRM_FORMAT_ARGB8888, 73 DRM_FORMAT_ARGB8888,
74}; 74};
75 75
76#define DIV_ROUND_CLOSEST_ULL(ll, d) \
77({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
78
79static void intel_increase_pllclock(struct drm_device *dev, 76static void intel_increase_pllclock(struct drm_device *dev,
80 enum pipe pipe); 77 enum pipe pipe);
81static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on); 78static void intel_crtc_update_cursor(struct drm_crtc *crtc, bool on);
@@ -4328,7 +4325,6 @@ static void ironlake_crtc_disable(struct drm_crtc *crtc)
4328 ironlake_fdi_disable(crtc); 4325 ironlake_fdi_disable(crtc);
4329 4326
4330 ironlake_disable_pch_transcoder(dev_priv, pipe); 4327 ironlake_disable_pch_transcoder(dev_priv, pipe);
4331 intel_set_pch_fifo_underrun_reporting(dev, pipe, true);
4332 4328
4333 if (HAS_PCH_CPT(dev)) { 4329 if (HAS_PCH_CPT(dev)) {
4334 /* disable TRANS_DP_CTL */ 4330 /* disable TRANS_DP_CTL */
@@ -4392,7 +4388,6 @@ static void haswell_crtc_disable(struct drm_crtc *crtc)
4392 4388
4393 if (intel_crtc->config.has_pch_encoder) { 4389 if (intel_crtc->config.has_pch_encoder) {
4394 lpt_disable_pch_transcoder(dev_priv); 4390 lpt_disable_pch_transcoder(dev_priv);
4395 intel_set_pch_fifo_underrun_reporting(dev, TRANSCODER_A, true);
4396 intel_ddi_fdi_disable(crtc); 4391 intel_ddi_fdi_disable(crtc);
4397 } 4392 }
4398 4393
@@ -4588,7 +4583,7 @@ static void vlv_update_cdclk(struct drm_device *dev)
4588 * BSpec erroneously claims we should aim for 4MHz, but 4583 * BSpec erroneously claims we should aim for 4MHz, but
4589 * in fact 1MHz is the correct frequency. 4584 * in fact 1MHz is the correct frequency.
4590 */ 4585 */
4591 I915_WRITE(GMBUSFREQ_VLV, dev_priv->vlv_cdclk_freq); 4586 I915_WRITE(GMBUSFREQ_VLV, DIV_ROUND_UP(dev_priv->vlv_cdclk_freq, 1000));
4592} 4587}
4593 4588
4594/* Adjust CDclk dividers to allow high res or save power if possible */ 4589/* Adjust CDclk dividers to allow high res or save power if possible */
@@ -9411,6 +9406,10 @@ static bool page_flip_finished(struct intel_crtc *crtc)
9411 struct drm_device *dev = crtc->base.dev; 9406 struct drm_device *dev = crtc->base.dev;
9412 struct drm_i915_private *dev_priv = dev->dev_private; 9407 struct drm_i915_private *dev_priv = dev->dev_private;
9413 9408
9409 if (i915_reset_in_progress(&dev_priv->gpu_error) ||
9410 crtc->reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter))
9411 return true;
9412
9414 /* 9413 /*
9415 * The relevant registers doen't exist on pre-ctg. 9414 * The relevant registers doen't exist on pre-ctg.
9416 * As the flip done interrupt doesn't trigger for mmio 9415 * As the flip done interrupt doesn't trigger for mmio
@@ -12357,27 +12356,36 @@ static void intel_setup_outputs(struct drm_device *dev)
12357 if (I915_READ(PCH_DP_D) & DP_DETECTED) 12356 if (I915_READ(PCH_DP_D) & DP_DETECTED)
12358 intel_dp_init(dev, PCH_DP_D, PORT_D); 12357 intel_dp_init(dev, PCH_DP_D, PORT_D);
12359 } else if (IS_VALLEYVIEW(dev)) { 12358 } else if (IS_VALLEYVIEW(dev)) {
12360 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED) { 12359 /*
12360 * The DP_DETECTED bit is the latched state of the DDC
12361 * SDA pin at boot. However since eDP doesn't require DDC
12362 * (no way to plug in a DP->HDMI dongle) the DDC pins for
12363 * eDP ports may have been muxed to an alternate function.
12364 * Thus we can't rely on the DP_DETECTED bit alone to detect
12365 * eDP ports. Consult the VBT as well as DP_DETECTED to
12366 * detect eDP ports.
12367 */
12368 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIB) & SDVO_DETECTED)
12361 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB, 12369 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIB,
12362 PORT_B); 12370 PORT_B);
12363 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED) 12371 if (I915_READ(VLV_DISPLAY_BASE + DP_B) & DP_DETECTED ||
12364 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B); 12372 intel_dp_is_edp(dev, PORT_B))
12365 } 12373 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_B, PORT_B);
12366 12374
12367 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED) { 12375 if (I915_READ(VLV_DISPLAY_BASE + GEN4_HDMIC) & SDVO_DETECTED)
12368 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC, 12376 intel_hdmi_init(dev, VLV_DISPLAY_BASE + GEN4_HDMIC,
12369 PORT_C); 12377 PORT_C);
12370 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED) 12378 if (I915_READ(VLV_DISPLAY_BASE + DP_C) & DP_DETECTED ||
12371 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C); 12379 intel_dp_is_edp(dev, PORT_C))
12372 } 12380 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_C, PORT_C);
12373 12381
12374 if (IS_CHERRYVIEW(dev)) { 12382 if (IS_CHERRYVIEW(dev)) {
12375 if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED) { 12383 if (I915_READ(VLV_DISPLAY_BASE + CHV_HDMID) & SDVO_DETECTED)
12376 intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID, 12384 intel_hdmi_init(dev, VLV_DISPLAY_BASE + CHV_HDMID,
12377 PORT_D); 12385 PORT_D);
12378 if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED) 12386 /* eDP not supported on port D, so don't check VBT */
12379 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D); 12387 if (I915_READ(VLV_DISPLAY_BASE + DP_D) & DP_DETECTED)
12380 } 12388 intel_dp_init(dev, VLV_DISPLAY_BASE + DP_D, PORT_D);
12381 } 12389 }
12382 12390
12383 intel_dsi_init(dev); 12391 intel_dsi_init(dev);
@@ -12879,6 +12887,9 @@ static struct intel_quirk intel_quirks[] = {
12879 /* Acer C720 Chromebook (Core i3 4005U) */ 12887 /* Acer C720 Chromebook (Core i3 4005U) */
12880 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present }, 12888 { 0x0a16, 0x1025, 0x0a11, quirk_backlight_present },
12881 12889
12890 /* Apple Macbook 2,1 (Core 2 T7400) */
12891 { 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
12892
12882 /* Toshiba CB35 Chromebook (Celeron 2955U) */ 12893 /* Toshiba CB35 Chromebook (Celeron 2955U) */
12883 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present }, 12894 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
12884 12895
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index f6a3fdd5589e..4bcd91757321 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2806,6 +2806,13 @@ intel_dp_dpcd_read_wake(struct drm_dp_aux *aux, unsigned int offset,
2806 ssize_t ret; 2806 ssize_t ret;
2807 int i; 2807 int i;
2808 2808
2809 /*
2810 * Sometime we just get the same incorrect byte repeated
2811 * over the entire buffer. Doing just one throw away read
2812 * initially seems to "solve" it.
2813 */
2814 drm_dp_dpcd_read(aux, DP_DPCD_REV, buffer, 1);
2815
2809 for (i = 0; i < 3; i++) { 2816 for (i = 0; i < 3; i++) {
2810 ret = drm_dp_dpcd_read(aux, offset, buffer, size); 2817 ret = drm_dp_dpcd_read(aux, offset, buffer, size);
2811 if (ret == size) 2818 if (ret == size)
@@ -3724,9 +3731,10 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
3724 } 3731 }
3725 } 3732 }
3726 3733
3727 /* Training Pattern 3 support */ 3734 /* Training Pattern 3 support, both source and sink */
3728 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 && 3735 if (intel_dp->dpcd[DP_DPCD_REV] >= 0x12 &&
3729 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED) { 3736 intel_dp->dpcd[DP_MAX_LANE_COUNT] & DP_TPS3_SUPPORTED &&
3737 (IS_HASWELL(dev_priv) || INTEL_INFO(dev_priv)->gen >= 8)) {
3730 intel_dp->use_tps3 = true; 3738 intel_dp->use_tps3 = true;
3731 DRM_DEBUG_KMS("Displayport TPS3 supported\n"); 3739 DRM_DEBUG_KMS("Displayport TPS3 supported\n");
3732 } else 3740 } else
@@ -4442,6 +4450,7 @@ static void intel_dp_encoder_suspend(struct intel_encoder *intel_encoder)
4442 * vdd might still be enabled do to the delayed vdd off. 4450 * vdd might still be enabled do to the delayed vdd off.
4443 * Make sure vdd is actually turned off here. 4451 * Make sure vdd is actually turned off here.
4444 */ 4452 */
4453 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
4445 pps_lock(intel_dp); 4454 pps_lock(intel_dp);
4446 edp_panel_vdd_off_sync(intel_dp); 4455 edp_panel_vdd_off_sync(intel_dp);
4447 pps_unlock(intel_dp); 4456 pps_unlock(intel_dp);
@@ -4491,6 +4500,18 @@ intel_dp_hpd_pulse(struct intel_digital_port *intel_dig_port, bool long_hpd)
4491 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP) 4500 if (intel_dig_port->base.type != INTEL_OUTPUT_EDP)
4492 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT; 4501 intel_dig_port->base.type = INTEL_OUTPUT_DISPLAYPORT;
4493 4502
4503 if (long_hpd && intel_dig_port->base.type == INTEL_OUTPUT_EDP) {
4504 /*
4505 * vdd off can generate a long pulse on eDP which
4506 * would require vdd on to handle it, and thus we
4507 * would end up in an endless cycle of
4508 * "vdd off -> long hpd -> vdd on -> detect -> vdd off -> ..."
4509 */
4510 DRM_DEBUG_KMS("ignoring long hpd on eDP port %c\n",
4511 port_name(intel_dig_port->port));
4512 return false;
4513 }
4514
4494 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n", 4515 DRM_DEBUG_KMS("got hpd irq on port %c - %s\n",
4495 port_name(intel_dig_port->port), 4516 port_name(intel_dig_port->port),
4496 long_hpd ? "long" : "short"); 4517 long_hpd ? "long" : "short");
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 07ce04683c30..ba715229a540 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -35,6 +35,9 @@
35#include <drm/drm_fb_helper.h> 35#include <drm/drm_fb_helper.h>
36#include <drm/drm_dp_mst_helper.h> 36#include <drm/drm_dp_mst_helper.h>
37 37
38#define DIV_ROUND_CLOSEST_ULL(ll, d) \
39({ unsigned long long _tmp = (ll)+(d)/2; do_div(_tmp, d); _tmp; })
40
38/** 41/**
39 * _wait_for - magic (register) wait macro 42 * _wait_for - magic (register) wait macro
40 * 43 *
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index a6bd1422e38f..c0bbf2172446 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -899,6 +899,17 @@ void intel_lvds_init(struct drm_device *dev)
899 int pipe; 899 int pipe;
900 u8 pin; 900 u8 pin;
901 901
902 /*
903 * Unlock registers and just leave them unlocked. Do this before
904 * checking quirk lists to avoid bogus WARNINGs.
905 */
906 if (HAS_PCH_SPLIT(dev)) {
907 I915_WRITE(PCH_PP_CONTROL,
908 I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
909 } else {
910 I915_WRITE(PP_CONTROL,
911 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
912 }
902 if (!intel_lvds_supported(dev)) 913 if (!intel_lvds_supported(dev))
903 return; 914 return;
904 915
@@ -1097,17 +1108,6 @@ out:
1097 lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) & 1108 lvds_encoder->a3_power = I915_READ(lvds_encoder->reg) &
1098 LVDS_A3_POWER_MASK; 1109 LVDS_A3_POWER_MASK;
1099 1110
1100 /*
1101 * Unlock registers and just
1102 * leave them unlocked
1103 */
1104 if (HAS_PCH_SPLIT(dev)) {
1105 I915_WRITE(PCH_PP_CONTROL,
1106 I915_READ(PCH_PP_CONTROL) | PANEL_UNLOCK_REGS);
1107 } else {
1108 I915_WRITE(PP_CONTROL,
1109 I915_READ(PP_CONTROL) | PANEL_UNLOCK_REGS);
1110 }
1111 lvds_connector->lid_notifier.notifier_call = intel_lid_notify; 1111 lvds_connector->lid_notifier.notifier_call = intel_lid_notify;
1112 if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) { 1112 if (acpi_lid_notifier_register(&lvds_connector->lid_notifier)) {
1113 DRM_DEBUG_KMS("lid notifier registration failed\n"); 1113 DRM_DEBUG_KMS("lid notifier registration failed\n");
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 18784470a760..41b3be217493 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -419,9 +419,8 @@ static uint32_t scale(uint32_t source_val,
419 source_val = clamp(source_val, source_min, source_max); 419 source_val = clamp(source_val, source_min, source_max);
420 420
421 /* avoid overflows */ 421 /* avoid overflows */
422 target_val = (uint64_t)(source_val - source_min) * 422 target_val = DIV_ROUND_CLOSEST_ULL((uint64_t)(source_val - source_min) *
423 (target_max - target_min); 423 (target_max - target_min), source_max - source_min);
424 do_div(target_val, source_max - source_min);
425 target_val += target_min; 424 target_val += target_min;
426 425
427 return target_val; 426 return target_val;
@@ -1099,12 +1098,25 @@ static u32 get_backlight_min_vbt(struct intel_connector *connector)
1099 struct drm_device *dev = connector->base.dev; 1098 struct drm_device *dev = connector->base.dev;
1100 struct drm_i915_private *dev_priv = dev->dev_private; 1099 struct drm_i915_private *dev_priv = dev->dev_private;
1101 struct intel_panel *panel = &connector->panel; 1100 struct intel_panel *panel = &connector->panel;
1101 int min;
1102 1102
1103 WARN_ON(panel->backlight.max == 0); 1103 WARN_ON(panel->backlight.max == 0);
1104 1104
1105 /*
1106 * XXX: If the vbt value is 255, it makes min equal to max, which leads
1107 * to problems. There are such machines out there. Either our
1108 * interpretation is wrong or the vbt has bogus data. Or both. Safeguard
1109 * against this by letting the minimum be at most (arbitrarily chosen)
1110 * 25% of the max.
1111 */
1112 min = clamp_t(int, dev_priv->vbt.backlight.min_brightness, 0, 64);
1113 if (min != dev_priv->vbt.backlight.min_brightness) {
1114 DRM_DEBUG_KMS("clamping VBT min backlight %d/255 to %d/255\n",
1115 dev_priv->vbt.backlight.min_brightness, min);
1116 }
1117
1105 /* vbt value is a coefficient in range [0..255] */ 1118 /* vbt value is a coefficient in range [0..255] */
1106 return scale(dev_priv->vbt.backlight.min_brightness, 0, 255, 1119 return scale(min, 0, 255, 0, panel->backlight.max);
1107 0, panel->backlight.max);
1108} 1120}
1109 1121
1110static int bdw_setup_backlight(struct intel_connector *connector) 1122static int bdw_setup_backlight(struct intel_connector *connector)
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index c27b6140bfd1..ad2fd605f76b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -5469,11 +5469,6 @@ static void gen6_init_clock_gating(struct drm_device *dev)
5469 I915_WRITE(_3D_CHICKEN, 5469 I915_WRITE(_3D_CHICKEN,
5470 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB)); 5470 _MASKED_BIT_ENABLE(_3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB));
5471 5471
5472 /* WaSetupGtModeTdRowDispatch:snb */
5473 if (IS_SNB_GT1(dev))
5474 I915_WRITE(GEN6_GT_MODE,
5475 _MASKED_BIT_ENABLE(GEN6_TD_FOUR_ROW_DISPATCH_DISABLE));
5476
5477 /* WaDisable_RenderCache_OperationalFlush:snb */ 5472 /* WaDisable_RenderCache_OperationalFlush:snb */
5478 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE)); 5473 I915_WRITE(CACHE_MODE_0, _MASKED_BIT_DISABLE(RC_OP_FLUSH_ENABLE));
5479 5474
diff --git a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
index cd05677ad4b7..72a40f95d048 100644
--- a/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/device/nvc0.c
@@ -218,7 +218,6 @@ nvc0_identify(struct nouveau_device *device)
218 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass; 218 device->oclass[NVDEV_ENGINE_BSP ] = &nvc0_bsp_oclass;
219 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass; 219 device->oclass[NVDEV_ENGINE_PPP ] = &nvc0_ppp_oclass;
220 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass; 220 device->oclass[NVDEV_ENGINE_COPY0 ] = &nvc0_copy0_oclass;
221 device->oclass[NVDEV_ENGINE_COPY1 ] = &nvc0_copy1_oclass;
222 device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass; 221 device->oclass[NVDEV_ENGINE_DISP ] = nva3_disp_oclass;
223 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass; 222 device->oclass[NVDEV_ENGINE_PERFMON] = &nvc0_perfmon_oclass;
224 break; 223 break;
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
index 5ae6a43893b5..1931057f9962 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nv04.c
@@ -551,8 +551,8 @@ nv04_fifo_intr(struct nouveau_subdev *subdev)
551 } 551 }
552 552
553 if (status & 0x40000000) { 553 if (status & 0x40000000) {
554 nouveau_fifo_uevent(&priv->base);
555 nv_wr32(priv, 0x002100, 0x40000000); 554 nv_wr32(priv, 0x002100, 0x40000000);
555 nouveau_fifo_uevent(&priv->base);
556 status &= ~0x40000000; 556 status &= ~0x40000000;
557 } 557 }
558 } 558 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
index 1fe1f8fbda0c..074d434c3077 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nvc0.c
@@ -740,6 +740,8 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
740 u32 inte = nv_rd32(priv, 0x002628); 740 u32 inte = nv_rd32(priv, 0x002628);
741 u32 unkn; 741 u32 unkn;
742 742
743 nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
744
743 for (unkn = 0; unkn < 8; unkn++) { 745 for (unkn = 0; unkn < 8; unkn++) {
744 u32 ints = (intr >> (unkn * 0x04)) & inte; 746 u32 ints = (intr >> (unkn * 0x04)) & inte;
745 if (ints & 0x1) { 747 if (ints & 0x1) {
@@ -751,8 +753,6 @@ nvc0_fifo_intr_engine_unit(struct nvc0_fifo_priv *priv, int engn)
751 nv_mask(priv, 0x002628, ints, 0); 753 nv_mask(priv, 0x002628, ints, 0);
752 } 754 }
753 } 755 }
754
755 nv_wr32(priv, 0x0025a8 + (engn * 0x04), intr);
756} 756}
757 757
758static void 758static void
diff --git a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
index d2f0fd39c145..f8734eb74eaa 100644
--- a/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/fifo/nve0.c
@@ -952,8 +952,8 @@ nve0_fifo_intr(struct nouveau_subdev *subdev)
952 } 952 }
953 953
954 if (stat & 0x80000000) { 954 if (stat & 0x80000000) {
955 nve0_fifo_intr_engine(priv);
956 nv_wr32(priv, 0x002100, 0x80000000); 955 nv_wr32(priv, 0x002100, 0x80000000);
956 nve0_fifo_intr_engine(priv);
957 stat &= ~0x80000000; 957 stat &= ~0x80000000;
958 } 958 }
959 959
diff --git a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
index 552fdbd45ebe..1d0e33fb5f61 100644
--- a/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/graph/ctxnv50.c
@@ -113,6 +113,8 @@
113#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf) 113#define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf)
114#define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac) 114#define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac)
115 115
116#include <subdev/fb.h>
117
116/* 118/*
117 * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's 119 * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's
118 * the GPU itself that does context-switching, but it needs a special 120 * the GPU itself that does context-switching, but it needs a special
@@ -569,8 +571,12 @@ nv50_graph_construct_mmio(struct nouveau_grctx *ctx)
569 gr_def(ctx, 0x407d08, 0x00010040); 571 gr_def(ctx, 0x407d08, 0x00010040);
570 else if (device->chipset < 0xa0) 572 else if (device->chipset < 0xa0)
571 gr_def(ctx, 0x407d08, 0x00390040); 573 gr_def(ctx, 0x407d08, 0x00390040);
572 else 574 else {
573 gr_def(ctx, 0x407d08, 0x003d0040); 575 if (nouveau_fb(device)->ram->type != NV_MEM_TYPE_GDDR5)
576 gr_def(ctx, 0x407d08, 0x003d0040);
577 else
578 gr_def(ctx, 0x407d08, 0x003c0040);
579 }
574 gr_def(ctx, 0x407d0c, 0x00000022); 580 gr_def(ctx, 0x407d0c, 0x00000022);
575 } 581 }
576 582
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c b/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c
index a16024a74771..fde42e4d1b56 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/gk20a.c
@@ -27,6 +27,20 @@ struct gk20a_fb_priv {
27}; 27};
28 28
29static int 29static int
30gk20a_fb_init(struct nouveau_object *object)
31{
32 struct gk20a_fb_priv *priv = (void *)object;
33 int ret;
34
35 ret = nouveau_fb_init(&priv->base);
36 if (ret)
37 return ret;
38
39 nv_mask(priv, 0x100c80, 0x00000001, 0x00000000); /* 128KiB lpg */
40 return 0;
41}
42
43static int
30gk20a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine, 44gk20a_fb_ctor(struct nouveau_object *parent, struct nouveau_object *engine,
31 struct nouveau_oclass *oclass, void *data, u32 size, 45 struct nouveau_oclass *oclass, void *data, u32 size,
32 struct nouveau_object **pobject) 46 struct nouveau_object **pobject)
@@ -48,7 +62,7 @@ gk20a_fb_oclass = &(struct nouveau_fb_impl) {
48 .base.ofuncs = &(struct nouveau_ofuncs) { 62 .base.ofuncs = &(struct nouveau_ofuncs) {
49 .ctor = gk20a_fb_ctor, 63 .ctor = gk20a_fb_ctor,
50 .dtor = _nouveau_fb_dtor, 64 .dtor = _nouveau_fb_dtor,
51 .init = _nouveau_fb_init, 65 .init = gk20a_fb_init,
52 .fini = _nouveau_fb_fini, 66 .fini = _nouveau_fb_fini,
53 }, 67 },
54 .memtype = nvc0_fb_memtype_valid, 68 .memtype = nvc0_fb_memtype_valid,
diff --git a/drivers/gpu/drm/nouveau/nouveau_chan.c b/drivers/gpu/drm/nouveau/nouveau_chan.c
index 589dbb582da2..fd3dbd59d73e 100644
--- a/drivers/gpu/drm/nouveau/nouveau_chan.c
+++ b/drivers/gpu/drm/nouveau/nouveau_chan.c
@@ -400,15 +400,20 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
400 struct nouveau_channel **pchan) 400 struct nouveau_channel **pchan)
401{ 401{
402 struct nouveau_cli *cli = (void *)nvif_client(&device->base); 402 struct nouveau_cli *cli = (void *)nvif_client(&device->base);
403 bool super;
403 int ret; 404 int ret;
404 405
406 /* hack until fencenv50 is fixed, and agp access relaxed */
407 super = cli->base.super;
408 cli->base.super = true;
409
405 ret = nouveau_channel_ind(drm, device, handle, arg0, pchan); 410 ret = nouveau_channel_ind(drm, device, handle, arg0, pchan);
406 if (ret) { 411 if (ret) {
407 NV_PRINTK(debug, cli, "ib channel create, %d\n", ret); 412 NV_PRINTK(debug, cli, "ib channel create, %d\n", ret);
408 ret = nouveau_channel_dma(drm, device, handle, pchan); 413 ret = nouveau_channel_dma(drm, device, handle, pchan);
409 if (ret) { 414 if (ret) {
410 NV_PRINTK(debug, cli, "dma channel create, %d\n", ret); 415 NV_PRINTK(debug, cli, "dma channel create, %d\n", ret);
411 return ret; 416 goto done;
412 } 417 }
413 } 418 }
414 419
@@ -416,8 +421,9 @@ nouveau_channel_new(struct nouveau_drm *drm, struct nvif_device *device,
416 if (ret) { 421 if (ret) {
417 NV_PRINTK(error, cli, "channel failed to initialise, %d\n", ret); 422 NV_PRINTK(error, cli, "channel failed to initialise, %d\n", ret);
418 nouveau_channel_del(pchan); 423 nouveau_channel_del(pchan);
419 return ret;
420 } 424 }
421 425
422 return 0; 426done:
427 cli->base.super = super;
428 return ret;
423} 429}
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 57238076049f..62b97c4eef8d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -629,7 +629,6 @@ int nouveau_pmops_suspend(struct device *dev)
629 629
630 pci_save_state(pdev); 630 pci_save_state(pdev);
631 pci_disable_device(pdev); 631 pci_disable_device(pdev);
632 pci_ignore_hotplug(pdev);
633 pci_set_power_state(pdev, PCI_D3hot); 632 pci_set_power_state(pdev, PCI_D3hot);
634 return 0; 633 return 0;
635} 634}
@@ -933,6 +932,7 @@ static int nouveau_pmops_runtime_suspend(struct device *dev)
933 ret = nouveau_do_suspend(drm_dev, true); 932 ret = nouveau_do_suspend(drm_dev, true);
934 pci_save_state(pdev); 933 pci_save_state(pdev);
935 pci_disable_device(pdev); 934 pci_disable_device(pdev);
935 pci_ignore_hotplug(pdev);
936 pci_set_power_state(pdev, PCI_D3cold); 936 pci_set_power_state(pdev, PCI_D3cold);
937 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF; 937 drm_dev->switch_power_state = DRM_SWITCH_POWER_DYNAMIC_OFF;
938 return ret; 938 return ret;
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 515cd9aebb99..f32a434724e3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -52,20 +52,24 @@ nouveau_fctx(struct nouveau_fence *fence)
52 return container_of(fence->base.lock, struct nouveau_fence_chan, lock); 52 return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
53} 53}
54 54
55static void 55static int
56nouveau_fence_signal(struct nouveau_fence *fence) 56nouveau_fence_signal(struct nouveau_fence *fence)
57{ 57{
58 int drop = 0;
59
58 fence_signal_locked(&fence->base); 60 fence_signal_locked(&fence->base);
59 list_del(&fence->head); 61 list_del(&fence->head);
62 rcu_assign_pointer(fence->channel, NULL);
60 63
61 if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) { 64 if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
62 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 65 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
63 66
64 if (!--fctx->notify_ref) 67 if (!--fctx->notify_ref)
65 nvif_notify_put(&fctx->notify); 68 drop = 1;
66 } 69 }
67 70
68 fence_put(&fence->base); 71 fence_put(&fence->base);
72 return drop;
69} 73}
70 74
71static struct nouveau_fence * 75static struct nouveau_fence *
@@ -88,16 +92,23 @@ nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
88{ 92{
89 struct nouveau_fence *fence; 93 struct nouveau_fence *fence;
90 94
91 nvif_notify_fini(&fctx->notify);
92
93 spin_lock_irq(&fctx->lock); 95 spin_lock_irq(&fctx->lock);
94 while (!list_empty(&fctx->pending)) { 96 while (!list_empty(&fctx->pending)) {
95 fence = list_entry(fctx->pending.next, typeof(*fence), head); 97 fence = list_entry(fctx->pending.next, typeof(*fence), head);
96 98
97 nouveau_fence_signal(fence); 99 if (nouveau_fence_signal(fence))
98 fence->channel = NULL; 100 nvif_notify_put(&fctx->notify);
99 } 101 }
100 spin_unlock_irq(&fctx->lock); 102 spin_unlock_irq(&fctx->lock);
103
104 nvif_notify_fini(&fctx->notify);
105 fctx->dead = 1;
106
107 /*
108 * Ensure that all accesses to fence->channel complete before freeing
109 * the channel.
110 */
111 synchronize_rcu();
101} 112}
102 113
103static void 114static void
@@ -112,21 +123,23 @@ nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
112 kref_put(&fctx->fence_ref, nouveau_fence_context_put); 123 kref_put(&fctx->fence_ref, nouveau_fence_context_put);
113} 124}
114 125
115static void 126static int
116nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx) 127nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
117{ 128{
118 struct nouveau_fence *fence; 129 struct nouveau_fence *fence;
119 130 int drop = 0;
120 u32 seq = fctx->read(chan); 131 u32 seq = fctx->read(chan);
121 132
122 while (!list_empty(&fctx->pending)) { 133 while (!list_empty(&fctx->pending)) {
123 fence = list_entry(fctx->pending.next, typeof(*fence), head); 134 fence = list_entry(fctx->pending.next, typeof(*fence), head);
124 135
125 if ((int)(seq - fence->base.seqno) < 0) 136 if ((int)(seq - fence->base.seqno) < 0)
126 return; 137 break;
127 138
128 nouveau_fence_signal(fence); 139 drop |= nouveau_fence_signal(fence);
129 } 140 }
141
142 return drop;
130} 143}
131 144
132static int 145static int
@@ -135,18 +148,21 @@ nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
135 struct nouveau_fence_chan *fctx = 148 struct nouveau_fence_chan *fctx =
136 container_of(notify, typeof(*fctx), notify); 149 container_of(notify, typeof(*fctx), notify);
137 unsigned long flags; 150 unsigned long flags;
151 int ret = NVIF_NOTIFY_KEEP;
138 152
139 spin_lock_irqsave(&fctx->lock, flags); 153 spin_lock_irqsave(&fctx->lock, flags);
140 if (!list_empty(&fctx->pending)) { 154 if (!list_empty(&fctx->pending)) {
141 struct nouveau_fence *fence; 155 struct nouveau_fence *fence;
156 struct nouveau_channel *chan;
142 157
143 fence = list_entry(fctx->pending.next, typeof(*fence), head); 158 fence = list_entry(fctx->pending.next, typeof(*fence), head);
144 nouveau_fence_update(fence->channel, fctx); 159 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
160 if (nouveau_fence_update(fence->channel, fctx))
161 ret = NVIF_NOTIFY_DROP;
145 } 162 }
146 spin_unlock_irqrestore(&fctx->lock, flags); 163 spin_unlock_irqrestore(&fctx->lock, flags);
147 164
148 /* Always return keep here. NVIF refcount is handled with nouveau_fence_update */ 165 return ret;
149 return NVIF_NOTIFY_KEEP;
150} 166}
151 167
152void 168void
@@ -262,7 +278,10 @@ nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
262 if (!ret) { 278 if (!ret) {
263 fence_get(&fence->base); 279 fence_get(&fence->base);
264 spin_lock_irq(&fctx->lock); 280 spin_lock_irq(&fctx->lock);
265 nouveau_fence_update(chan, fctx); 281
282 if (nouveau_fence_update(chan, fctx))
283 nvif_notify_put(&fctx->notify);
284
266 list_add_tail(&fence->head, &fctx->pending); 285 list_add_tail(&fence->head, &fctx->pending);
267 spin_unlock_irq(&fctx->lock); 286 spin_unlock_irq(&fctx->lock);
268 } 287 }
@@ -276,13 +295,16 @@ nouveau_fence_done(struct nouveau_fence *fence)
276 if (fence->base.ops == &nouveau_fence_ops_legacy || 295 if (fence->base.ops == &nouveau_fence_ops_legacy ||
277 fence->base.ops == &nouveau_fence_ops_uevent) { 296 fence->base.ops == &nouveau_fence_ops_uevent) {
278 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 297 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
298 struct nouveau_channel *chan;
279 unsigned long flags; 299 unsigned long flags;
280 300
281 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags)) 301 if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
282 return true; 302 return true;
283 303
284 spin_lock_irqsave(&fctx->lock, flags); 304 spin_lock_irqsave(&fctx->lock, flags);
285 nouveau_fence_update(fence->channel, fctx); 305 chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
306 if (chan && nouveau_fence_update(chan, fctx))
307 nvif_notify_put(&fctx->notify);
286 spin_unlock_irqrestore(&fctx->lock, flags); 308 spin_unlock_irqrestore(&fctx->lock, flags);
287 } 309 }
288 return fence_is_signaled(&fence->base); 310 return fence_is_signaled(&fence->base);
@@ -387,12 +409,18 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
387 409
388 if (fence && (!exclusive || !fobj || !fobj->shared_count)) { 410 if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
389 struct nouveau_channel *prev = NULL; 411 struct nouveau_channel *prev = NULL;
412 bool must_wait = true;
390 413
391 f = nouveau_local_fence(fence, chan->drm); 414 f = nouveau_local_fence(fence, chan->drm);
392 if (f) 415 if (f) {
393 prev = f->channel; 416 rcu_read_lock();
417 prev = rcu_dereference(f->channel);
418 if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
419 must_wait = false;
420 rcu_read_unlock();
421 }
394 422
395 if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan)))) 423 if (must_wait)
396 ret = fence_wait(fence, intr); 424 ret = fence_wait(fence, intr);
397 425
398 return ret; 426 return ret;
@@ -403,19 +431,22 @@ nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool e
403 431
404 for (i = 0; i < fobj->shared_count && !ret; ++i) { 432 for (i = 0; i < fobj->shared_count && !ret; ++i) {
405 struct nouveau_channel *prev = NULL; 433 struct nouveau_channel *prev = NULL;
434 bool must_wait = true;
406 435
407 fence = rcu_dereference_protected(fobj->shared[i], 436 fence = rcu_dereference_protected(fobj->shared[i],
408 reservation_object_held(resv)); 437 reservation_object_held(resv));
409 438
410 f = nouveau_local_fence(fence, chan->drm); 439 f = nouveau_local_fence(fence, chan->drm);
411 if (f) 440 if (f) {
412 prev = f->channel; 441 rcu_read_lock();
442 prev = rcu_dereference(f->channel);
443 if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
444 must_wait = false;
445 rcu_read_unlock();
446 }
413 447
414 if (!prev || (prev != chan && (ret = fctx->sync(f, prev, chan)))) 448 if (must_wait)
415 ret = fence_wait(fence, intr); 449 ret = fence_wait(fence, intr);
416
417 if (ret)
418 break;
419 } 450 }
420 451
421 return ret; 452 return ret;
@@ -463,7 +494,7 @@ static const char *nouveau_fence_get_timeline_name(struct fence *f)
463 struct nouveau_fence *fence = from_fence(f); 494 struct nouveau_fence *fence = from_fence(f);
464 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 495 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
465 496
466 return fence->channel ? fctx->name : "dead channel"; 497 return !fctx->dead ? fctx->name : "dead channel";
467} 498}
468 499
469/* 500/*
@@ -476,9 +507,16 @@ static bool nouveau_fence_is_signaled(struct fence *f)
476{ 507{
477 struct nouveau_fence *fence = from_fence(f); 508 struct nouveau_fence *fence = from_fence(f);
478 struct nouveau_fence_chan *fctx = nouveau_fctx(fence); 509 struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
479 struct nouveau_channel *chan = fence->channel; 510 struct nouveau_channel *chan;
511 bool ret = false;
512
513 rcu_read_lock();
514 chan = rcu_dereference(fence->channel);
515 if (chan)
516 ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0;
517 rcu_read_unlock();
480 518
481 return (int)(fctx->read(chan) - fence->base.seqno) >= 0; 519 return ret;
482} 520}
483 521
484static bool nouveau_fence_no_signaling(struct fence *f) 522static bool nouveau_fence_no_signaling(struct fence *f)
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.h b/drivers/gpu/drm/nouveau/nouveau_fence.h
index 943b0b17b1fc..96e461c6f68f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.h
@@ -14,7 +14,7 @@ struct nouveau_fence {
14 14
15 bool sysmem; 15 bool sysmem;
16 16
17 struct nouveau_channel *channel; 17 struct nouveau_channel __rcu *channel;
18 unsigned long timeout; 18 unsigned long timeout;
19}; 19};
20 20
@@ -47,7 +47,7 @@ struct nouveau_fence_chan {
47 char name[32]; 47 char name[32];
48 48
49 struct nvif_notify notify; 49 struct nvif_notify notify;
50 int notify_ref; 50 int notify_ref, dead;
51}; 51};
52 52
53struct nouveau_fence_priv { 53struct nouveau_fence_priv {
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index ae873d1a8d46..eb8b36714fa1 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -791,6 +791,22 @@ nv50_crtc_set_scale(struct nouveau_crtc *nv_crtc, bool update)
791} 791}
792 792
793static int 793static int
794nv50_crtc_set_raster_vblank_dmi(struct nouveau_crtc *nv_crtc, u32 usec)
795{
796 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
797 u32 *push;
798
799 push = evo_wait(mast, 8);
800 if (!push)
801 return -ENOMEM;
802
803 evo_mthd(push, 0x0828 + (nv_crtc->index * 0x400), 1);
804 evo_data(push, usec);
805 evo_kick(push, mast);
806 return 0;
807}
808
809static int
794nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update) 810nv50_crtc_set_color_vibrance(struct nouveau_crtc *nv_crtc, bool update)
795{ 811{
796 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); 812 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
@@ -1104,14 +1120,14 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
1104 evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2); 1120 evo_mthd(push, 0x0804 + (nv_crtc->index * 0x400), 2);
1105 evo_data(push, 0x00800000 | mode->clock); 1121 evo_data(push, 0x00800000 | mode->clock);
1106 evo_data(push, (ilace == 2) ? 2 : 0); 1122 evo_data(push, (ilace == 2) ? 2 : 0);
1107 evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 8); 1123 evo_mthd(push, 0x0810 + (nv_crtc->index * 0x400), 6);
1108 evo_data(push, 0x00000000); 1124 evo_data(push, 0x00000000);
1109 evo_data(push, (vactive << 16) | hactive); 1125 evo_data(push, (vactive << 16) | hactive);
1110 evo_data(push, ( vsynce << 16) | hsynce); 1126 evo_data(push, ( vsynce << 16) | hsynce);
1111 evo_data(push, (vblanke << 16) | hblanke); 1127 evo_data(push, (vblanke << 16) | hblanke);
1112 evo_data(push, (vblanks << 16) | hblanks); 1128 evo_data(push, (vblanks << 16) | hblanks);
1113 evo_data(push, (vblan2e << 16) | vblan2s); 1129 evo_data(push, (vblan2e << 16) | vblan2s);
1114 evo_data(push, vblankus); 1130 evo_mthd(push, 0x082c + (nv_crtc->index * 0x400), 1);
1115 evo_data(push, 0x00000000); 1131 evo_data(push, 0x00000000);
1116 evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2); 1132 evo_mthd(push, 0x0900 + (nv_crtc->index * 0x400), 2);
1117 evo_data(push, 0x00000311); 1133 evo_data(push, 0x00000311);
@@ -1141,6 +1157,11 @@ nv50_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *umode,
1141 nv_connector = nouveau_crtc_connector_get(nv_crtc); 1157 nv_connector = nouveau_crtc_connector_get(nv_crtc);
1142 nv50_crtc_set_dither(nv_crtc, false); 1158 nv50_crtc_set_dither(nv_crtc, false);
1143 nv50_crtc_set_scale(nv_crtc, false); 1159 nv50_crtc_set_scale(nv_crtc, false);
1160
1161 /* G94 only accepts this after setting scale */
1162 if (nv50_vers(mast) < GF110_DISP_CORE_CHANNEL_DMA)
1163 nv50_crtc_set_raster_vblank_dmi(nv_crtc, vblankus);
1164
1144 nv50_crtc_set_color_vibrance(nv_crtc, false); 1165 nv50_crtc_set_color_vibrance(nv_crtc, false);
1145 nv50_crtc_set_image(nv_crtc, crtc->primary->fb, x, y, false); 1166 nv50_crtc_set_image(nv_crtc, crtc->primary->fb, x, y, false);
1146 return 0; 1167 return 0;
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index af9e78546688..0d1396266857 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -572,7 +572,6 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
572 struct qxl_framebuffer *qfb; 572 struct qxl_framebuffer *qfb;
573 struct qxl_bo *bo, *old_bo = NULL; 573 struct qxl_bo *bo, *old_bo = NULL;
574 struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); 574 struct qxl_crtc *qcrtc = to_qxl_crtc(crtc);
575 uint32_t width, height, base_offset;
576 bool recreate_primary = false; 575 bool recreate_primary = false;
577 int ret; 576 int ret;
578 int surf_id; 577 int surf_id;
@@ -602,9 +601,10 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
602 if (qcrtc->index == 0) 601 if (qcrtc->index == 0)
603 recreate_primary = true; 602 recreate_primary = true;
604 603
605 width = mode->hdisplay; 604 if (bo->surf.stride * bo->surf.height > qdev->vram_size) {
606 height = mode->vdisplay; 605 DRM_ERROR("Mode doesn't fit in vram size (vgamem)");
607 base_offset = 0; 606 return -EINVAL;
607 }
608 608
609 ret = qxl_bo_reserve(bo, false); 609 ret = qxl_bo_reserve(bo, false);
610 if (ret != 0) 610 if (ret != 0)
@@ -618,10 +618,10 @@ static int qxl_crtc_mode_set(struct drm_crtc *crtc,
618 if (recreate_primary) { 618 if (recreate_primary) {
619 qxl_io_destroy_primary(qdev); 619 qxl_io_destroy_primary(qdev);
620 qxl_io_log(qdev, 620 qxl_io_log(qdev,
621 "recreate primary: %dx%d (was %dx%d,%d,%d)\n", 621 "recreate primary: %dx%d,%d,%d\n",
622 width, height, bo->surf.width, 622 bo->surf.width, bo->surf.height,
623 bo->surf.height, bo->surf.stride, bo->surf.format); 623 bo->surf.stride, bo->surf.format);
624 qxl_io_create_primary(qdev, base_offset, bo); 624 qxl_io_create_primary(qdev, 0, bo);
625 bo->is_primary = true; 625 bo->is_primary = true;
626 } 626 }
627 627
diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c
index 15da7ef344a4..ec1593a6a561 100644
--- a/drivers/gpu/drm/radeon/atom.c
+++ b/drivers/gpu/drm/radeon/atom.c
@@ -1217,7 +1217,7 @@ free:
1217 return ret; 1217 return ret;
1218} 1218}
1219 1219
1220int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) 1220int atom_execute_table_scratch_unlocked(struct atom_context *ctx, int index, uint32_t * params)
1221{ 1221{
1222 int r; 1222 int r;
1223 1223
@@ -1238,6 +1238,15 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1238 return r; 1238 return r;
1239} 1239}
1240 1240
1241int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params)
1242{
1243 int r;
1244 mutex_lock(&ctx->scratch_mutex);
1245 r = atom_execute_table_scratch_unlocked(ctx, index, params);
1246 mutex_unlock(&ctx->scratch_mutex);
1247 return r;
1248}
1249
1241static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 }; 1250static int atom_iio_len[] = { 1, 2, 3, 3, 3, 3, 4, 4, 4, 3 };
1242 1251
1243static void atom_index_iio(struct atom_context *ctx, int base) 1252static void atom_index_iio(struct atom_context *ctx, int base)
diff --git a/drivers/gpu/drm/radeon/atom.h b/drivers/gpu/drm/radeon/atom.h
index feba6b8d36b3..6d014ddb6b78 100644
--- a/drivers/gpu/drm/radeon/atom.h
+++ b/drivers/gpu/drm/radeon/atom.h
@@ -125,6 +125,7 @@ struct card_info {
125struct atom_context { 125struct atom_context {
126 struct card_info *card; 126 struct card_info *card;
127 struct mutex mutex; 127 struct mutex mutex;
128 struct mutex scratch_mutex;
128 void *bios; 129 void *bios;
129 uint32_t cmd_table, data_table; 130 uint32_t cmd_table, data_table;
130 uint16_t *iio; 131 uint16_t *iio;
@@ -145,6 +146,7 @@ extern int atom_debug;
145 146
146struct atom_context *atom_parse(struct card_info *, void *); 147struct atom_context *atom_parse(struct card_info *, void *);
147int atom_execute_table(struct atom_context *, int, uint32_t *); 148int atom_execute_table(struct atom_context *, int, uint32_t *);
149int atom_execute_table_scratch_unlocked(struct atom_context *, int, uint32_t *);
148int atom_asic_init(struct atom_context *); 150int atom_asic_init(struct atom_context *);
149void atom_destroy(struct atom_context *); 151void atom_destroy(struct atom_context *);
150bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size, 152bool atom_parse_data_header(struct atom_context *ctx, int index, uint16_t *size,
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 95d5d4ab3335..11ba9d21b89b 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -100,6 +100,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
100 memset(&args, 0, sizeof(args)); 100 memset(&args, 0, sizeof(args));
101 101
102 mutex_lock(&chan->mutex); 102 mutex_lock(&chan->mutex);
103 mutex_lock(&rdev->mode_info.atom_context->scratch_mutex);
103 104
104 base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1); 105 base = (unsigned char *)(rdev->mode_info.atom_context->scratch + 1);
105 106
@@ -113,7 +114,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
113 if (ASIC_IS_DCE4(rdev)) 114 if (ASIC_IS_DCE4(rdev))
114 args.v2.ucHPD_ID = chan->rec.hpd; 115 args.v2.ucHPD_ID = chan->rec.hpd;
115 116
116 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 117 atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args);
117 118
118 *ack = args.v1.ucReplyStatus; 119 *ack = args.v1.ucReplyStatus;
119 120
@@ -147,6 +148,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
147 148
148 r = recv_bytes; 149 r = recv_bytes;
149done: 150done:
151 mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex);
150 mutex_unlock(&chan->mutex); 152 mutex_unlock(&chan->mutex);
151 153
152 return r; 154 return r;
diff --git a/drivers/gpu/drm/radeon/atombios_i2c.c b/drivers/gpu/drm/radeon/atombios_i2c.c
index 9c570fb15b8c..4157780585a0 100644
--- a/drivers/gpu/drm/radeon/atombios_i2c.c
+++ b/drivers/gpu/drm/radeon/atombios_i2c.c
@@ -48,6 +48,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
48 memset(&args, 0, sizeof(args)); 48 memset(&args, 0, sizeof(args));
49 49
50 mutex_lock(&chan->mutex); 50 mutex_lock(&chan->mutex);
51 mutex_lock(&rdev->mode_info.atom_context->scratch_mutex);
51 52
52 base = (unsigned char *)rdev->mode_info.atom_context->scratch; 53 base = (unsigned char *)rdev->mode_info.atom_context->scratch;
53 54
@@ -82,7 +83,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
82 args.ucSlaveAddr = slave_addr << 1; 83 args.ucSlaveAddr = slave_addr << 1;
83 args.ucLineNumber = chan->rec.i2c_id; 84 args.ucLineNumber = chan->rec.i2c_id;
84 85
85 atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); 86 atom_execute_table_scratch_unlocked(rdev->mode_info.atom_context, index, (uint32_t *)&args);
86 87
87 /* error */ 88 /* error */
88 if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) { 89 if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) {
@@ -95,6 +96,7 @@ static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan,
95 radeon_atom_copy_swap(buf, base, num, false); 96 radeon_atom_copy_swap(buf, base, num, false);
96 97
97done: 98done:
99 mutex_unlock(&rdev->mode_info.atom_context->scratch_mutex);
98 mutex_unlock(&chan->mutex); 100 mutex_unlock(&chan->mutex);
99 101
100 return r; 102 return r;
diff --git a/drivers/gpu/drm/radeon/btc_dpm.c b/drivers/gpu/drm/radeon/btc_dpm.c
index 300d971187c4..0b2929de9f41 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.c
+++ b/drivers/gpu/drm/radeon/btc_dpm.c
@@ -24,6 +24,7 @@
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "radeon.h" 26#include "radeon.h"
27#include "radeon_asic.h"
27#include "btcd.h" 28#include "btcd.h"
28#include "r600_dpm.h" 29#include "r600_dpm.h"
29#include "cypress_dpm.h" 30#include "cypress_dpm.h"
@@ -1170,6 +1171,23 @@ static const struct radeon_blacklist_clocks btc_blacklist_clocks[] =
1170 { 25000, 30000, RADEON_SCLK_UP } 1171 { 25000, 30000, RADEON_SCLK_UP }
1171}; 1172};
1172 1173
1174void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
1175 u32 *max_clock)
1176{
1177 u32 i, clock = 0;
1178
1179 if ((table == NULL) || (table->count == 0)) {
1180 *max_clock = clock;
1181 return;
1182 }
1183
1184 for (i = 0; i < table->count; i++) {
1185 if (clock < table->entries[i].clk)
1186 clock = table->entries[i].clk;
1187 }
1188 *max_clock = clock;
1189}
1190
1173void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, 1191void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
1174 u32 clock, u16 max_voltage, u16 *voltage) 1192 u32 clock, u16 max_voltage, u16 *voltage)
1175{ 1193{
diff --git a/drivers/gpu/drm/radeon/btc_dpm.h b/drivers/gpu/drm/radeon/btc_dpm.h
index 1a15e0e41950..3b6f12b7760b 100644
--- a/drivers/gpu/drm/radeon/btc_dpm.h
+++ b/drivers/gpu/drm/radeon/btc_dpm.h
@@ -46,6 +46,8 @@ void btc_adjust_clock_combinations(struct radeon_device *rdev,
46 struct rv7xx_pl *pl); 46 struct rv7xx_pl *pl);
47void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table, 47void btc_apply_voltage_dependency_rules(struct radeon_clock_voltage_dependency_table *table,
48 u32 clock, u16 max_voltage, u16 *voltage); 48 u32 clock, u16 max_voltage, u16 *voltage);
49void btc_get_max_clock_from_voltage_dependency_table(struct radeon_clock_voltage_dependency_table *table,
50 u32 *max_clock);
49void btc_apply_voltage_delta_rules(struct radeon_device *rdev, 51void btc_apply_voltage_delta_rules(struct radeon_device *rdev,
50 u16 max_vddc, u16 max_vddci, 52 u16 max_vddc, u16 max_vddci,
51 u16 *vddc, u16 *vddci); 53 u16 *vddc, u16 *vddci);
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index f5c8c0445a94..11a55e9dad7f 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -24,6 +24,7 @@
24#include <linux/firmware.h> 24#include <linux/firmware.h>
25#include "drmP.h" 25#include "drmP.h"
26#include "radeon.h" 26#include "radeon.h"
27#include "radeon_asic.h"
27#include "radeon_ucode.h" 28#include "radeon_ucode.h"
28#include "cikd.h" 29#include "cikd.h"
29#include "r600_dpm.h" 30#include "r600_dpm.h"
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index 377afa504d2b..89c01fa6dd8e 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -4313,8 +4313,8 @@ static int cik_cp_gfx_start(struct radeon_device *rdev)
4313 /* init the CE partitions. CE only used for gfx on CIK */ 4313 /* init the CE partitions. CE only used for gfx on CIK */
4314 radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2)); 4314 radeon_ring_write(ring, PACKET3(PACKET3_SET_BASE, 2));
4315 radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE)); 4315 radeon_ring_write(ring, PACKET3_BASE_INDEX(CE_PARTITION_BASE));
4316 radeon_ring_write(ring, 0xc000); 4316 radeon_ring_write(ring, 0x8000);
4317 radeon_ring_write(ring, 0xc000); 4317 radeon_ring_write(ring, 0x8000);
4318 4318
4319 /* setup clear context state */ 4319 /* setup clear context state */
4320 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0)); 4320 radeon_ring_write(ring, PACKET3(PACKET3_PREAMBLE_CNTL, 0));
@@ -9447,6 +9447,9 @@ void dce8_bandwidth_update(struct radeon_device *rdev)
9447 u32 num_heads = 0, lb_size; 9447 u32 num_heads = 0, lb_size;
9448 int i; 9448 int i;
9449 9449
9450 if (!rdev->mode_info.mode_config_initialized)
9451 return;
9452
9450 radeon_update_display_priority(rdev); 9453 radeon_update_display_priority(rdev);
9451 9454
9452 for (i = 0; i < rdev->num_crtc; i++) { 9455 for (i = 0; i < rdev->num_crtc; i++) {
diff --git a/drivers/gpu/drm/radeon/cik_sdma.c b/drivers/gpu/drm/radeon/cik_sdma.c
index c77dad1a4576..d748963af08b 100644
--- a/drivers/gpu/drm/radeon/cik_sdma.c
+++ b/drivers/gpu/drm/radeon/cik_sdma.c
@@ -611,16 +611,19 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
611{ 611{
612 unsigned i; 612 unsigned i;
613 int r; 613 int r;
614 void __iomem *ptr = (void *)rdev->vram_scratch.ptr; 614 unsigned index;
615 u32 tmp; 615 u32 tmp;
616 u64 gpu_addr;
616 617
617 if (!ptr) { 618 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
618 DRM_ERROR("invalid vram scratch pointer\n"); 619 index = R600_WB_DMA_RING_TEST_OFFSET;
619 return -EINVAL; 620 else
620 } 621 index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
622
623 gpu_addr = rdev->wb.gpu_addr + index;
621 624
622 tmp = 0xCAFEDEAD; 625 tmp = 0xCAFEDEAD;
623 writel(tmp, ptr); 626 rdev->wb.wb[index/4] = cpu_to_le32(tmp);
624 627
625 r = radeon_ring_lock(rdev, ring, 5); 628 r = radeon_ring_lock(rdev, ring, 5);
626 if (r) { 629 if (r) {
@@ -628,14 +631,14 @@ int cik_sdma_ring_test(struct radeon_device *rdev,
628 return r; 631 return r;
629 } 632 }
630 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0)); 633 radeon_ring_write(ring, SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0));
631 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); 634 radeon_ring_write(ring, lower_32_bits(gpu_addr));
632 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr)); 635 radeon_ring_write(ring, upper_32_bits(gpu_addr));
633 radeon_ring_write(ring, 1); /* number of DWs to follow */ 636 radeon_ring_write(ring, 1); /* number of DWs to follow */
634 radeon_ring_write(ring, 0xDEADBEEF); 637 radeon_ring_write(ring, 0xDEADBEEF);
635 radeon_ring_unlock_commit(rdev, ring, false); 638 radeon_ring_unlock_commit(rdev, ring, false);
636 639
637 for (i = 0; i < rdev->usec_timeout; i++) { 640 for (i = 0; i < rdev->usec_timeout; i++) {
638 tmp = readl(ptr); 641 tmp = le32_to_cpu(rdev->wb.wb[index/4]);
639 if (tmp == 0xDEADBEEF) 642 if (tmp == 0xDEADBEEF)
640 break; 643 break;
641 DRM_UDELAY(1); 644 DRM_UDELAY(1);
@@ -664,17 +667,20 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
664{ 667{
665 struct radeon_ib ib; 668 struct radeon_ib ib;
666 unsigned i; 669 unsigned i;
670 unsigned index;
667 int r; 671 int r;
668 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
669 u32 tmp = 0; 672 u32 tmp = 0;
673 u64 gpu_addr;
670 674
671 if (!ptr) { 675 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
672 DRM_ERROR("invalid vram scratch pointer\n"); 676 index = R600_WB_DMA_RING_TEST_OFFSET;
673 return -EINVAL; 677 else
674 } 678 index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
679
680 gpu_addr = rdev->wb.gpu_addr + index;
675 681
676 tmp = 0xCAFEDEAD; 682 tmp = 0xCAFEDEAD;
677 writel(tmp, ptr); 683 rdev->wb.wb[index/4] = cpu_to_le32(tmp);
678 684
679 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); 685 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
680 if (r) { 686 if (r) {
@@ -683,8 +689,8 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
683 } 689 }
684 690
685 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0); 691 ib.ptr[0] = SDMA_PACKET(SDMA_OPCODE_WRITE, SDMA_WRITE_SUB_OPCODE_LINEAR, 0);
686 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; 692 ib.ptr[1] = lower_32_bits(gpu_addr);
687 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr); 693 ib.ptr[2] = upper_32_bits(gpu_addr);
688 ib.ptr[3] = 1; 694 ib.ptr[3] = 1;
689 ib.ptr[4] = 0xDEADBEEF; 695 ib.ptr[4] = 0xDEADBEEF;
690 ib.length_dw = 5; 696 ib.length_dw = 5;
@@ -701,7 +707,7 @@ int cik_sdma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
701 return r; 707 return r;
702 } 708 }
703 for (i = 0; i < rdev->usec_timeout; i++) { 709 for (i = 0; i < rdev->usec_timeout; i++) {
704 tmp = readl(ptr); 710 tmp = le32_to_cpu(rdev->wb.wb[index/4]);
705 if (tmp == 0xDEADBEEF) 711 if (tmp == 0xDEADBEEF)
706 break; 712 break;
707 DRM_UDELAY(1); 713 DRM_UDELAY(1);
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 47d31e915758..9aad0327e4d1 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -24,6 +24,7 @@
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "radeon.h" 26#include "radeon.h"
27#include "radeon_asic.h"
27#include "evergreend.h" 28#include "evergreend.h"
28#include "r600_dpm.h" 29#include "r600_dpm.h"
29#include "cypress_dpm.h" 30#include "cypress_dpm.h"
diff --git a/drivers/gpu/drm/radeon/dce3_1_afmt.c b/drivers/gpu/drm/radeon/dce3_1_afmt.c
index 950af153f30e..2fe8cfc966d9 100644
--- a/drivers/gpu/drm/radeon/dce3_1_afmt.c
+++ b/drivers/gpu/drm/radeon/dce3_1_afmt.c
@@ -32,7 +32,7 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
32 struct drm_connector *connector; 32 struct drm_connector *connector;
33 struct radeon_connector *radeon_connector = NULL; 33 struct radeon_connector *radeon_connector = NULL;
34 u32 tmp; 34 u32 tmp;
35 u8 *sadb; 35 u8 *sadb = NULL;
36 int sad_count; 36 int sad_count;
37 37
38 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 38 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
@@ -49,8 +49,8 @@ static void dce3_2_afmt_write_speaker_allocation(struct drm_encoder *encoder)
49 49
50 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); 50 sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
51 if (sad_count < 0) { 51 if (sad_count < 0) {
52 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 52 DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
53 return; 53 sad_count = 0;
54 } 54 }
55 55
56 /* program the speaker allocation */ 56 /* program the speaker allocation */
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index c0bbf68dbc27..f312edf4d50e 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -155,7 +155,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
155 struct drm_connector *connector; 155 struct drm_connector *connector;
156 struct radeon_connector *radeon_connector = NULL; 156 struct radeon_connector *radeon_connector = NULL;
157 u32 offset, tmp; 157 u32 offset, tmp;
158 u8 *sadb; 158 u8 *sadb = NULL;
159 int sad_count; 159 int sad_count;
160 160
161 if (!dig || !dig->afmt || !dig->afmt->pin) 161 if (!dig || !dig->afmt || !dig->afmt->pin)
@@ -176,9 +176,9 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
176 } 176 }
177 177
178 sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb); 178 sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
179 if (sad_count <= 0) { 179 if (sad_count < 0) {
180 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 180 DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
181 return; 181 sad_count = 0;
182 } 182 }
183 183
184 /* program the speaker allocation */ 184 /* program the speaker allocation */
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index a31f1ca40c6a..85995b4e3338 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -2345,6 +2345,9 @@ void evergreen_bandwidth_update(struct radeon_device *rdev)
2345 u32 num_heads = 0, lb_size; 2345 u32 num_heads = 0, lb_size;
2346 int i; 2346 int i;
2347 2347
2348 if (!rdev->mode_info.mode_config_initialized)
2349 return;
2350
2348 radeon_update_display_priority(rdev); 2351 radeon_update_display_priority(rdev);
2349 2352
2350 for (i = 0; i < rdev->num_crtc; i++) { 2353 for (i = 0; i < rdev->num_crtc; i++) {
@@ -2552,6 +2555,7 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav
2552 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1); 2555 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 1);
2553 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN; 2556 tmp |= EVERGREEN_CRTC_BLANK_DATA_EN;
2554 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp); 2557 WREG32(EVERGREEN_CRTC_BLANK_CONTROL + crtc_offsets[i], tmp);
2558 WREG32(EVERGREEN_CRTC_UPDATE_LOCK + crtc_offsets[i], 0);
2555 } 2559 }
2556 } else { 2560 } else {
2557 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]); 2561 tmp = RREG32(EVERGREEN_CRTC_CONTROL + crtc_offsets[i]);
@@ -3005,7 +3009,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
3005 u32 vgt_cache_invalidation; 3009 u32 vgt_cache_invalidation;
3006 u32 hdp_host_path_cntl, tmp; 3010 u32 hdp_host_path_cntl, tmp;
3007 u32 disabled_rb_mask; 3011 u32 disabled_rb_mask;
3008 int i, j, num_shader_engines, ps_thread_count; 3012 int i, j, ps_thread_count;
3009 3013
3010 switch (rdev->family) { 3014 switch (rdev->family) {
3011 case CHIP_CYPRESS: 3015 case CHIP_CYPRESS:
@@ -3303,8 +3307,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
3303 rdev->config.evergreen.tile_config |= 3307 rdev->config.evergreen.tile_config |=
3304 ((gb_addr_config & 0x30000000) >> 28) << 12; 3308 ((gb_addr_config & 0x30000000) >> 28) << 12;
3305 3309
3306 num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
3307
3308 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) { 3310 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
3309 u32 efuse_straps_4; 3311 u32 efuse_straps_4;
3310 u32 efuse_straps_3; 3312 u32 efuse_straps_3;
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index 2514d659b1ba..53abd9b17a50 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -133,7 +133,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
133 struct drm_connector *connector; 133 struct drm_connector *connector;
134 struct radeon_connector *radeon_connector = NULL; 134 struct radeon_connector *radeon_connector = NULL;
135 u32 tmp; 135 u32 tmp;
136 u8 *sadb; 136 u8 *sadb = NULL;
137 int sad_count; 137 int sad_count;
138 138
139 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) { 139 list_for_each_entry(connector, &encoder->dev->mode_config.connector_list, head) {
@@ -149,9 +149,9 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
149 } 149 }
150 150
151 sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb); 151 sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector), &sadb);
152 if (sad_count <= 0) { 152 if (sad_count < 0) {
153 DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); 153 DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
154 return; 154 sad_count = 0;
155 } 155 }
156 156
157 /* program the speaker allocation */ 157 /* program the speaker allocation */
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 1dd976f447fa..9b42001295ba 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2725,7 +2725,11 @@ int kv_dpm_init(struct radeon_device *rdev)
2725 2725
2726 pi->sram_end = SMC_RAM_END; 2726 pi->sram_end = SMC_RAM_END;
2727 2727
2728 pi->enable_nb_dpm = true; 2728 /* Enabling nb dpm on an asrock system prevents dpm from working */
2729 if (rdev->pdev->subsystem_vendor == 0x1849)
2730 pi->enable_nb_dpm = false;
2731 else
2732 pi->enable_nb_dpm = true;
2729 2733
2730 pi->caps_power_containment = true; 2734 pi->caps_power_containment = true;
2731 pi->caps_cac = true; 2735 pi->caps_cac = true;
@@ -2740,10 +2744,19 @@ int kv_dpm_init(struct radeon_device *rdev)
2740 pi->caps_sclk_ds = true; 2744 pi->caps_sclk_ds = true;
2741 pi->enable_auto_thermal_throttling = true; 2745 pi->enable_auto_thermal_throttling = true;
2742 pi->disable_nb_ps3_in_battery = false; 2746 pi->disable_nb_ps3_in_battery = false;
2743 if (radeon_bapm == 0) 2747 if (radeon_bapm == -1) {
2748 /* There are stability issues reported on with
2749 * bapm enabled on an asrock system.
2750 */
2751 if (rdev->pdev->subsystem_vendor == 0x1849)
2752 pi->bapm_enable = false;
2753 else
2754 pi->bapm_enable = true;
2755 } else if (radeon_bapm == 0) {
2744 pi->bapm_enable = false; 2756 pi->bapm_enable = false;
2745 else 2757 } else {
2746 pi->bapm_enable = true; 2758 pi->bapm_enable = true;
2759 }
2747 pi->voltage_drop_t = 0; 2760 pi->voltage_drop_t = 0;
2748 pi->caps_sclk_throttle_low_notification = false; 2761 pi->caps_sclk_throttle_low_notification = false;
2749 pi->caps_fps = false; /* true? */ 2762 pi->caps_fps = false; /* true? */
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 715b181c6243..6d2f16cf2c1c 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -23,6 +23,7 @@
23 23
24#include "drmP.h" 24#include "drmP.h"
25#include "radeon.h" 25#include "radeon.h"
26#include "radeon_asic.h"
26#include "nid.h" 27#include "nid.h"
27#include "r600_dpm.h" 28#include "r600_dpm.h"
28#include "ni_dpm.h" 29#include "ni_dpm.h"
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index 10f8be0ee173..b53b31a7b76f 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -3207,6 +3207,9 @@ void r100_bandwidth_update(struct radeon_device *rdev)
3207 uint32_t pixel_bytes1 = 0; 3207 uint32_t pixel_bytes1 = 0;
3208 uint32_t pixel_bytes2 = 0; 3208 uint32_t pixel_bytes2 = 0;
3209 3209
3210 if (!rdev->mode_info.mode_config_initialized)
3211 return;
3212
3210 radeon_update_display_priority(rdev); 3213 radeon_update_display_priority(rdev);
3211 3214
3212 if (rdev->mode_info.crtcs[0]->base.enabled) { 3215 if (rdev->mode_info.crtcs[0]->base.enabled) {
diff --git a/drivers/gpu/drm/radeon/r600_dma.c b/drivers/gpu/drm/radeon/r600_dma.c
index 100189ec5fa8..cf0df45d455e 100644
--- a/drivers/gpu/drm/radeon/r600_dma.c
+++ b/drivers/gpu/drm/radeon/r600_dma.c
@@ -232,16 +232,19 @@ int r600_dma_ring_test(struct radeon_device *rdev,
232{ 232{
233 unsigned i; 233 unsigned i;
234 int r; 234 int r;
235 void __iomem *ptr = (void *)rdev->vram_scratch.ptr; 235 unsigned index;
236 u32 tmp; 236 u32 tmp;
237 u64 gpu_addr;
237 238
238 if (!ptr) { 239 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
239 DRM_ERROR("invalid vram scratch pointer\n"); 240 index = R600_WB_DMA_RING_TEST_OFFSET;
240 return -EINVAL; 241 else
241 } 242 index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
243
244 gpu_addr = rdev->wb.gpu_addr + index;
242 245
243 tmp = 0xCAFEDEAD; 246 tmp = 0xCAFEDEAD;
244 writel(tmp, ptr); 247 rdev->wb.wb[index/4] = cpu_to_le32(tmp);
245 248
246 r = radeon_ring_lock(rdev, ring, 4); 249 r = radeon_ring_lock(rdev, ring, 4);
247 if (r) { 250 if (r) {
@@ -249,13 +252,13 @@ int r600_dma_ring_test(struct radeon_device *rdev,
249 return r; 252 return r;
250 } 253 }
251 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1)); 254 radeon_ring_write(ring, DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1));
252 radeon_ring_write(ring, rdev->vram_scratch.gpu_addr & 0xfffffffc); 255 radeon_ring_write(ring, lower_32_bits(gpu_addr));
253 radeon_ring_write(ring, upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff); 256 radeon_ring_write(ring, upper_32_bits(gpu_addr) & 0xff);
254 radeon_ring_write(ring, 0xDEADBEEF); 257 radeon_ring_write(ring, 0xDEADBEEF);
255 radeon_ring_unlock_commit(rdev, ring, false); 258 radeon_ring_unlock_commit(rdev, ring, false);
256 259
257 for (i = 0; i < rdev->usec_timeout; i++) { 260 for (i = 0; i < rdev->usec_timeout; i++) {
258 tmp = readl(ptr); 261 tmp = le32_to_cpu(rdev->wb.wb[index/4]);
259 if (tmp == 0xDEADBEEF) 262 if (tmp == 0xDEADBEEF)
260 break; 263 break;
261 DRM_UDELAY(1); 264 DRM_UDELAY(1);
@@ -335,17 +338,17 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
335{ 338{
336 struct radeon_ib ib; 339 struct radeon_ib ib;
337 unsigned i; 340 unsigned i;
341 unsigned index;
338 int r; 342 int r;
339 void __iomem *ptr = (void *)rdev->vram_scratch.ptr;
340 u32 tmp = 0; 343 u32 tmp = 0;
344 u64 gpu_addr;
341 345
342 if (!ptr) { 346 if (ring->idx == R600_RING_TYPE_DMA_INDEX)
343 DRM_ERROR("invalid vram scratch pointer\n"); 347 index = R600_WB_DMA_RING_TEST_OFFSET;
344 return -EINVAL; 348 else
345 } 349 index = CAYMAN_WB_DMA1_RING_TEST_OFFSET;
346 350
347 tmp = 0xCAFEDEAD; 351 gpu_addr = rdev->wb.gpu_addr + index;
348 writel(tmp, ptr);
349 352
350 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256); 353 r = radeon_ib_get(rdev, ring->idx, &ib, NULL, 256);
351 if (r) { 354 if (r) {
@@ -354,8 +357,8 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
354 } 357 }
355 358
356 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1); 359 ib.ptr[0] = DMA_PACKET(DMA_PACKET_WRITE, 0, 0, 1);
357 ib.ptr[1] = rdev->vram_scratch.gpu_addr & 0xfffffffc; 360 ib.ptr[1] = lower_32_bits(gpu_addr);
358 ib.ptr[2] = upper_32_bits(rdev->vram_scratch.gpu_addr) & 0xff; 361 ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff;
359 ib.ptr[3] = 0xDEADBEEF; 362 ib.ptr[3] = 0xDEADBEEF;
360 ib.length_dw = 4; 363 ib.length_dw = 4;
361 364
@@ -371,7 +374,7 @@ int r600_dma_ib_test(struct radeon_device *rdev, struct radeon_ring *ring)
371 return r; 374 return r;
372 } 375 }
373 for (i = 0; i < rdev->usec_timeout; i++) { 376 for (i = 0; i < rdev->usec_timeout; i++) {
374 tmp = readl(ptr); 377 tmp = le32_to_cpu(rdev->wb.wb[index/4]);
375 if (tmp == 0xDEADBEEF) 378 if (tmp == 0xDEADBEEF)
376 break; 379 break;
377 DRM_UDELAY(1); 380 DRM_UDELAY(1);
diff --git a/drivers/gpu/drm/radeon/r600_dpm.c b/drivers/gpu/drm/radeon/r600_dpm.c
index 9c61b74ef441..b5c73df8e202 100644
--- a/drivers/gpu/drm/radeon/r600_dpm.c
+++ b/drivers/gpu/drm/radeon/r600_dpm.c
@@ -24,6 +24,7 @@
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "radeon.h" 26#include "radeon.h"
27#include "radeon_asic.h"
27#include "r600d.h" 28#include "r600d.h"
28#include "r600_dpm.h" 29#include "r600_dpm.h"
29#include "atom.h" 30#include "atom.h"
@@ -1255,7 +1256,7 @@ int r600_parse_extended_power_table(struct radeon_device *rdev)
1255 (mode_info->atom_context->bios + data_offset + 1256 (mode_info->atom_context->bios + data_offset +
1256 le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); 1257 le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
1257 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 1258 rdev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
1258 ppt->usMaximumPowerDeliveryLimit; 1259 le16_to_cpu(ppt->usMaximumPowerDeliveryLimit);
1259 pt = &ppt->power_tune_table; 1260 pt = &ppt->power_tune_table;
1260 } else { 1261 } else {
1261 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) 1262 ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index f7c4b226a284..a9717b3fbf1b 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1133,6 +1133,8 @@ struct radeon_wb {
1133#define R600_WB_EVENT_OFFSET 3072 1133#define R600_WB_EVENT_OFFSET 3072
1134#define CIK_WB_CP1_WPTR_OFFSET 3328 1134#define CIK_WB_CP1_WPTR_OFFSET 3328
1135#define CIK_WB_CP2_WPTR_OFFSET 3584 1135#define CIK_WB_CP2_WPTR_OFFSET 3584
1136#define R600_WB_DMA_RING_TEST_OFFSET 3588
1137#define CAYMAN_WB_DMA1_RING_TEST_OFFSET 3592
1136 1138
1137/** 1139/**
1138 * struct radeon_pm - power management datas 1140 * struct radeon_pm - power management datas
diff --git a/drivers/gpu/drm/radeon/radeon_bios.c b/drivers/gpu/drm/radeon/radeon_bios.c
index 6a03624fadaa..63ccb8fa799c 100644
--- a/drivers/gpu/drm/radeon/radeon_bios.c
+++ b/drivers/gpu/drm/radeon/radeon_bios.c
@@ -658,12 +658,10 @@ bool radeon_get_bios(struct radeon_device *rdev)
658 r = igp_read_bios_from_vram(rdev); 658 r = igp_read_bios_from_vram(rdev);
659 if (r == false) 659 if (r == false)
660 r = radeon_read_bios(rdev); 660 r = radeon_read_bios(rdev);
661 if (r == false) { 661 if (r == false)
662 r = radeon_read_disabled_bios(rdev); 662 r = radeon_read_disabled_bios(rdev);
663 } 663 if (r == false)
664 if (r == false) {
665 r = radeon_read_platform_bios(rdev); 664 r = radeon_read_platform_bios(rdev);
666 }
667 if (r == false || rdev->bios == NULL) { 665 if (r == false || rdev->bios == NULL) {
668 DRM_ERROR("Unable to locate a BIOS ROM\n"); 666 DRM_ERROR("Unable to locate a BIOS ROM\n");
669 rdev->bios = NULL; 667 rdev->bios = NULL;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 300c4b3d4669..26baa9c05f6c 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -322,6 +322,12 @@ static void radeon_connector_get_edid(struct drm_connector *connector)
322 } 322 }
323 323
324 if (!radeon_connector->edid) { 324 if (!radeon_connector->edid) {
325 /* don't fetch the edid from the vbios if ddc fails and runpm is
326 * enabled so we report disconnected.
327 */
328 if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
329 return;
330
325 if (rdev->is_atom_bios) { 331 if (rdev->is_atom_bios) {
326 /* some laptops provide a hardcoded edid in rom for LCDs */ 332 /* some laptops provide a hardcoded edid in rom for LCDs */
327 if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) || 333 if (((connector->connector_type == DRM_MODE_CONNECTOR_LVDS) ||
@@ -826,6 +832,8 @@ static int radeon_lvds_mode_valid(struct drm_connector *connector,
826static enum drm_connector_status 832static enum drm_connector_status
827radeon_lvds_detect(struct drm_connector *connector, bool force) 833radeon_lvds_detect(struct drm_connector *connector, bool force)
828{ 834{
835 struct drm_device *dev = connector->dev;
836 struct radeon_device *rdev = dev->dev_private;
829 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 837 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
830 struct drm_encoder *encoder = radeon_best_single_encoder(connector); 838 struct drm_encoder *encoder = radeon_best_single_encoder(connector);
831 enum drm_connector_status ret = connector_status_disconnected; 839 enum drm_connector_status ret = connector_status_disconnected;
@@ -842,7 +850,11 @@ radeon_lvds_detect(struct drm_connector *connector, bool force)
842 /* check if panel is valid */ 850 /* check if panel is valid */
843 if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) 851 if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
844 ret = connector_status_connected; 852 ret = connector_status_connected;
845 853 /* don't fetch the edid from the vbios if ddc fails and runpm is
854 * enabled so we report disconnected.
855 */
856 if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
857 ret = connector_status_disconnected;
846 } 858 }
847 859
848 /* check for edid as well */ 860 /* check for edid as well */
@@ -1589,6 +1601,11 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1589 /* check if panel is valid */ 1601 /* check if panel is valid */
1590 if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240) 1602 if (native_mode->hdisplay >= 320 && native_mode->vdisplay >= 240)
1591 ret = connector_status_connected; 1603 ret = connector_status_connected;
1604 /* don't fetch the edid from the vbios if ddc fails and runpm is
1605 * enabled so we report disconnected.
1606 */
1607 if ((rdev->flags & RADEON_IS_PX) && (radeon_runtime_pm != 0))
1608 ret = connector_status_disconnected;
1592 } 1609 }
1593 /* eDP is always DP */ 1610 /* eDP is always DP */
1594 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT; 1611 radeon_dig_connector->dp_sink_type = CONNECTOR_OBJECT_ID_DISPLAYPORT;
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 1c893447d7cd..6f377de099f9 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -251,22 +251,19 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
251 251
252static int radeon_cs_sync_rings(struct radeon_cs_parser *p) 252static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
253{ 253{
254 int i, r = 0; 254 struct radeon_cs_reloc *reloc;
255 int r;
255 256
256 for (i = 0; i < p->nrelocs; i++) { 257 list_for_each_entry(reloc, &p->validated, tv.head) {
257 struct reservation_object *resv; 258 struct reservation_object *resv;
258 259
259 if (!p->relocs[i].robj) 260 resv = reloc->robj->tbo.resv;
260 continue;
261
262 resv = p->relocs[i].robj->tbo.resv;
263 r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv, 261 r = radeon_semaphore_sync_resv(p->rdev, p->ib.semaphore, resv,
264 p->relocs[i].tv.shared); 262 reloc->tv.shared);
265
266 if (r) 263 if (r)
267 break; 264 return r;
268 } 265 }
269 return r; 266 return 0;
270} 267}
271 268
272/* XXX: note that this is called from the legacy UMS CS ioctl as well */ 269/* XXX: note that this is called from the legacy UMS CS ioctl as well */
@@ -450,7 +447,7 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error, bo
450 kfree(parser->track); 447 kfree(parser->track);
451 kfree(parser->relocs); 448 kfree(parser->relocs);
452 kfree(parser->relocs_ptr); 449 kfree(parser->relocs_ptr);
453 kfree(parser->vm_bos); 450 drm_free_large(parser->vm_bos);
454 for (i = 0; i < parser->nchunks; i++) 451 for (i = 0; i < parser->nchunks; i++)
455 drm_free_large(parser->chunks[i].kdata); 452 drm_free_large(parser->chunks[i].kdata);
456 kfree(parser->chunks); 453 kfree(parser->chunks);
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index f41cc1538e48..995a8b1770dd 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -952,6 +952,7 @@ int radeon_atombios_init(struct radeon_device *rdev)
952 } 952 }
953 953
954 mutex_init(&rdev->mode_info.atom_context->mutex); 954 mutex_init(&rdev->mode_info.atom_context->mutex);
955 mutex_init(&rdev->mode_info.atom_context->scratch_mutex);
955 radeon_atom_initialize_bios_scratch_regs(rdev->ddev); 956 radeon_atom_initialize_bios_scratch_regs(rdev->ddev);
956 atom_allocate_fb_scratch(rdev->mode_info.atom_context); 957 atom_allocate_fb_scratch(rdev->mode_info.atom_context);
957 return 0; 958 return 0;
@@ -1130,7 +1131,7 @@ static void radeon_check_arguments(struct radeon_device *rdev)
1130 if (radeon_vm_block_size == -1) { 1131 if (radeon_vm_block_size == -1) {
1131 1132
1132 /* Total bits covered by PD + PTs */ 1133 /* Total bits covered by PD + PTs */
1133 unsigned bits = ilog2(radeon_vm_size) + 17; 1134 unsigned bits = ilog2(radeon_vm_size) + 18;
1134 1135
1135 /* Make sure the PD is 4K in size up to 8GB address space. 1136 /* Make sure the PD is 4K in size up to 8GB address space.
1136 Above that split equal between PD and PTs */ 1137 Above that split equal between PD and PTs */
diff --git a/drivers/gpu/drm/radeon/radeon_encoders.c b/drivers/gpu/drm/radeon/radeon_encoders.c
index 9a19e52cc655..6b670b0bc47b 100644
--- a/drivers/gpu/drm/radeon/radeon_encoders.c
+++ b/drivers/gpu/drm/radeon/radeon_encoders.c
@@ -179,6 +179,9 @@ static void radeon_encoder_add_backlight(struct radeon_encoder *radeon_encoder,
179 (rdev->pdev->subsystem_vendor == 0x1734) && 179 (rdev->pdev->subsystem_vendor == 0x1734) &&
180 (rdev->pdev->subsystem_device == 0x1107)) 180 (rdev->pdev->subsystem_device == 0x1107))
181 use_bl = false; 181 use_bl = false;
182 /* disable native backlight control on older asics */
183 else if (rdev->family < CHIP_R600)
184 use_bl = false;
182 else 185 else
183 use_bl = true; 186 use_bl = true;
184 } 187 }
diff --git a/drivers/gpu/drm/radeon/radeon_irq_kms.c b/drivers/gpu/drm/radeon/radeon_irq_kms.c
index 7784911d78ef..00fc59762e0d 100644
--- a/drivers/gpu/drm/radeon/radeon_irq_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_irq_kms.c
@@ -185,6 +185,16 @@ static bool radeon_msi_ok(struct radeon_device *rdev)
185 if (rdev->flags & RADEON_IS_AGP) 185 if (rdev->flags & RADEON_IS_AGP)
186 return false; 186 return false;
187 187
188 /*
189 * Older chips have a HW limitation, they can only generate 40 bits
190 * of address for "64-bit" MSIs which breaks on some platforms, notably
191 * IBM POWER servers, so we limit them
192 */
193 if (rdev->family < CHIP_BONAIRE) {
194 dev_info(rdev->dev, "radeon: MSI limited to 32-bit\n");
195 rdev->pdev->no_64bit_msi = 1;
196 }
197
188 /* force MSI on */ 198 /* force MSI on */
189 if (radeon_msi == 1) 199 if (radeon_msi == 1)
190 return true; 200 return true;
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 8309b11e674d..03586763ee86 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -795,6 +795,8 @@ int radeon_get_vblank_timestamp_kms(struct drm_device *dev, int crtc,
795 795
796 /* Get associated drm_crtc: */ 796 /* Get associated drm_crtc: */
797 drmcrtc = &rdev->mode_info.crtcs[crtc]->base; 797 drmcrtc = &rdev->mode_info.crtcs[crtc]->base;
798 if (!drmcrtc)
799 return -EINVAL;
798 800
799 /* Helper routine in DRM core does all the work: */ 801 /* Helper routine in DRM core does all the work: */
800 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error, 802 return drm_calc_vbltimestamp_from_scanoutpos(dev, crtc, max_error,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index 99a960a4f302..4c0d786d5c7a 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -213,6 +213,13 @@ int radeon_bo_create(struct radeon_device *rdev,
213 if (!(rdev->flags & RADEON_IS_PCIE)) 213 if (!(rdev->flags & RADEON_IS_PCIE))
214 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC); 214 bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
215 215
216#ifdef CONFIG_X86_32
217 /* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
218 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
219 */
220 bo->flags &= ~RADEON_GEM_GTT_WC;
221#endif
222
216 radeon_ttm_placement_from_domain(bo, domain); 223 radeon_ttm_placement_from_domain(bo, domain);
217 /* Kernel allocation are uninterruptible */ 224 /* Kernel allocation are uninterruptible */
218 down_read(&rdev->pm.mclk_lock); 225 down_read(&rdev->pm.mclk_lock);
diff --git a/drivers/gpu/drm/radeon/radeon_ring.c b/drivers/gpu/drm/radeon/radeon_ring.c
index 3d17af34afa7..2456f69efd23 100644
--- a/drivers/gpu/drm/radeon/radeon_ring.c
+++ b/drivers/gpu/drm/radeon/radeon_ring.c
@@ -314,7 +314,7 @@ unsigned radeon_ring_backup(struct radeon_device *rdev, struct radeon_ring *ring
314 } 314 }
315 315
316 /* and then save the content of the ring */ 316 /* and then save the content of the ring */
317 *data = kmalloc_array(size, sizeof(uint32_t), GFP_KERNEL); 317 *data = drm_malloc_ab(size, sizeof(uint32_t));
318 if (!*data) { 318 if (!*data) {
319 mutex_unlock(&rdev->ring_lock); 319 mutex_unlock(&rdev->ring_lock);
320 return 0; 320 return 0;
@@ -356,7 +356,7 @@ int radeon_ring_restore(struct radeon_device *rdev, struct radeon_ring *ring,
356 } 356 }
357 357
358 radeon_ring_unlock_commit(rdev, ring, false); 358 radeon_ring_unlock_commit(rdev, ring, false);
359 kfree(data); 359 drm_free_large(data);
360 return 0; 360 return 0;
361} 361}
362 362
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 4532cc76a0a6..dfde266529e2 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -132,8 +132,8 @@ struct radeon_cs_reloc *radeon_vm_get_bos(struct radeon_device *rdev,
132 struct radeon_cs_reloc *list; 132 struct radeon_cs_reloc *list;
133 unsigned i, idx; 133 unsigned i, idx;
134 134
135 list = kmalloc_array(vm->max_pde_used + 2, 135 list = drm_malloc_ab(vm->max_pde_used + 2,
136 sizeof(struct radeon_cs_reloc), GFP_KERNEL); 136 sizeof(struct radeon_cs_reloc));
137 if (!list) 137 if (!list)
138 return NULL; 138 return NULL;
139 139
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 5f6db4629aaa..9acb1c3c005b 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -879,6 +879,9 @@ void rs600_bandwidth_update(struct radeon_device *rdev)
879 u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt; 879 u32 d1mode_priority_a_cnt, d2mode_priority_a_cnt;
880 /* FIXME: implement full support */ 880 /* FIXME: implement full support */
881 881
882 if (!rdev->mode_info.mode_config_initialized)
883 return;
884
882 radeon_update_display_priority(rdev); 885 radeon_update_display_priority(rdev);
883 886
884 if (rdev->mode_info.crtcs[0]->base.enabled) 887 if (rdev->mode_info.crtcs[0]->base.enabled)
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 3462b64369bf..0a2d36e81108 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -579,6 +579,9 @@ void rs690_bandwidth_update(struct radeon_device *rdev)
579 u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt; 579 u32 d1mode_priority_a_cnt, d1mode_priority_b_cnt;
580 u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt; 580 u32 d2mode_priority_a_cnt, d2mode_priority_b_cnt;
581 581
582 if (!rdev->mode_info.mode_config_initialized)
583 return;
584
582 radeon_update_display_priority(rdev); 585 radeon_update_display_priority(rdev);
583 586
584 if (rdev->mode_info.crtcs[0]->base.enabled) 587 if (rdev->mode_info.crtcs[0]->base.enabled)
diff --git a/drivers/gpu/drm/radeon/rs780_dpm.c b/drivers/gpu/drm/radeon/rs780_dpm.c
index 02f7710de470..9031f4b69824 100644
--- a/drivers/gpu/drm/radeon/rs780_dpm.c
+++ b/drivers/gpu/drm/radeon/rs780_dpm.c
@@ -24,6 +24,7 @@
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "radeon.h" 26#include "radeon.h"
27#include "radeon_asic.h"
27#include "rs780d.h" 28#include "rs780d.h"
28#include "r600_dpm.h" 29#include "r600_dpm.h"
29#include "rs780_dpm.h" 30#include "rs780_dpm.h"
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 8a477bf1fdb3..c55d653aaf5f 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -1277,6 +1277,9 @@ void rv515_bandwidth_update(struct radeon_device *rdev)
1277 struct drm_display_mode *mode0 = NULL; 1277 struct drm_display_mode *mode0 = NULL;
1278 struct drm_display_mode *mode1 = NULL; 1278 struct drm_display_mode *mode1 = NULL;
1279 1279
1280 if (!rdev->mode_info.mode_config_initialized)
1281 return;
1282
1280 radeon_update_display_priority(rdev); 1283 radeon_update_display_priority(rdev);
1281 1284
1282 if (rdev->mode_info.crtcs[0]->base.enabled) 1285 if (rdev->mode_info.crtcs[0]->base.enabled)
diff --git a/drivers/gpu/drm/radeon/rv6xx_dpm.c b/drivers/gpu/drm/radeon/rv6xx_dpm.c
index e7045b085715..6a5c233361e9 100644
--- a/drivers/gpu/drm/radeon/rv6xx_dpm.c
+++ b/drivers/gpu/drm/radeon/rv6xx_dpm.c
@@ -24,6 +24,7 @@
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "radeon.h" 26#include "radeon.h"
27#include "radeon_asic.h"
27#include "rv6xxd.h" 28#include "rv6xxd.h"
28#include "r600_dpm.h" 29#include "r600_dpm.h"
29#include "rv6xx_dpm.h" 30#include "rv6xx_dpm.h"
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index 3c76e1dcdf04..755a8f96fe46 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -24,6 +24,7 @@
24 24
25#include "drmP.h" 25#include "drmP.h"
26#include "radeon.h" 26#include "radeon.h"
27#include "radeon_asic.h"
27#include "rv770d.h" 28#include "rv770d.h"
28#include "r600_dpm.h" 29#include "r600_dpm.h"
29#include "rv770_dpm.h" 30#include "rv770_dpm.h"
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index eeea5b6a1775..7d5083dc4acb 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -2384,6 +2384,9 @@ void dce6_bandwidth_update(struct radeon_device *rdev)
2384 u32 num_heads = 0, lb_size; 2384 u32 num_heads = 0, lb_size;
2385 int i; 2385 int i;
2386 2386
2387 if (!rdev->mode_info.mode_config_initialized)
2388 return;
2389
2387 radeon_update_display_priority(rdev); 2390 radeon_update_display_priority(rdev);
2388 2391
2389 for (i = 0; i < rdev->num_crtc; i++) { 2392 for (i = 0; i < rdev->num_crtc; i++) {
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 9e4d5d7d348f..676e6c2ba90a 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -23,6 +23,7 @@
23 23
24#include "drmP.h" 24#include "drmP.h"
25#include "radeon.h" 25#include "radeon.h"
26#include "radeon_asic.h"
26#include "sid.h" 27#include "sid.h"
27#include "r600_dpm.h" 28#include "r600_dpm.h"
28#include "si_dpm.h" 29#include "si_dpm.h"
@@ -2916,6 +2917,7 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2916 bool disable_sclk_switching = false; 2917 bool disable_sclk_switching = false;
2917 u32 mclk, sclk; 2918 u32 mclk, sclk;
2918 u16 vddc, vddci; 2919 u16 vddc, vddci;
2920 u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
2919 int i; 2921 int i;
2920 2922
2921 if ((rdev->pm.dpm.new_active_crtc_count > 1) || 2923 if ((rdev->pm.dpm.new_active_crtc_count > 1) ||
@@ -2949,6 +2951,29 @@ static void si_apply_state_adjust_rules(struct radeon_device *rdev,
2949 } 2951 }
2950 } 2952 }
2951 2953
2954 /* limit clocks to max supported clocks based on voltage dependency tables */
2955 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
2956 &max_sclk_vddc);
2957 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
2958 &max_mclk_vddci);
2959 btc_get_max_clock_from_voltage_dependency_table(&rdev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
2960 &max_mclk_vddc);
2961
2962 for (i = 0; i < ps->performance_level_count; i++) {
2963 if (max_sclk_vddc) {
2964 if (ps->performance_levels[i].sclk > max_sclk_vddc)
2965 ps->performance_levels[i].sclk = max_sclk_vddc;
2966 }
2967 if (max_mclk_vddci) {
2968 if (ps->performance_levels[i].mclk > max_mclk_vddci)
2969 ps->performance_levels[i].mclk = max_mclk_vddci;
2970 }
2971 if (max_mclk_vddc) {
2972 if (ps->performance_levels[i].mclk > max_mclk_vddc)
2973 ps->performance_levels[i].mclk = max_mclk_vddc;
2974 }
2975 }
2976
2952 /* XXX validate the min clocks required for display */ 2977 /* XXX validate the min clocks required for display */
2953 2978
2954 if (disable_mclk_switching) { 2979 if (disable_mclk_switching) {
@@ -6231,7 +6256,7 @@ static void si_parse_pplib_clock_info(struct radeon_device *rdev,
6231 if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) && 6256 if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
6232 index == 0) { 6257 index == 0) {
6233 /* XXX disable for A0 tahiti */ 6258 /* XXX disable for A0 tahiti */
6234 si_pi->ulv.supported = true; 6259 si_pi->ulv.supported = false;
6235 si_pi->ulv.pl = *pl; 6260 si_pi->ulv.pl = *pl;
6236 si_pi->ulv.one_pcie_lane_in_ulv = false; 6261 si_pi->ulv.one_pcie_lane_in_ulv = false;
6237 si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT; 6262 si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;
diff --git a/drivers/gpu/drm/radeon/sumo_dpm.c b/drivers/gpu/drm/radeon/sumo_dpm.c
index 3f0e8d7b8dbe..1f8a8833e1be 100644
--- a/drivers/gpu/drm/radeon/sumo_dpm.c
+++ b/drivers/gpu/drm/radeon/sumo_dpm.c
@@ -23,6 +23,7 @@
23 23
24#include "drmP.h" 24#include "drmP.h"
25#include "radeon.h" 25#include "radeon.h"
26#include "radeon_asic.h"
26#include "sumod.h" 27#include "sumod.h"
27#include "r600_dpm.h" 28#include "r600_dpm.h"
28#include "cypress_dpm.h" 29#include "cypress_dpm.h"
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 57f780053b3e..b4ec5c4e7969 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -23,6 +23,7 @@
23 23
24#include "drmP.h" 24#include "drmP.h"
25#include "radeon.h" 25#include "radeon.h"
26#include "radeon_asic.h"
26#include "trinityd.h" 27#include "trinityd.h"
27#include "r600_dpm.h" 28#include "r600_dpm.h"
28#include "trinity_dpm.h" 29#include "trinity_dpm.h"
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index 6553fd238685..054a79f143ae 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -736,7 +736,6 @@ static const struct drm_crtc_funcs tegra_crtc_funcs = {
736 736
737static void tegra_crtc_disable(struct drm_crtc *crtc) 737static void tegra_crtc_disable(struct drm_crtc *crtc)
738{ 738{
739 struct tegra_dc *dc = to_tegra_dc(crtc);
740 struct drm_device *drm = crtc->dev; 739 struct drm_device *drm = crtc->dev;
741 struct drm_plane *plane; 740 struct drm_plane *plane;
742 741
@@ -752,7 +751,7 @@ static void tegra_crtc_disable(struct drm_crtc *crtc)
752 } 751 }
753 } 752 }
754 753
755 drm_vblank_off(drm, dc->pipe); 754 drm_crtc_vblank_off(crtc);
756} 755}
757 756
758static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc, 757static bool tegra_crtc_mode_fixup(struct drm_crtc *crtc,
@@ -841,8 +840,6 @@ static int tegra_crtc_mode_set(struct drm_crtc *crtc,
841 u32 value; 840 u32 value;
842 int err; 841 int err;
843 842
844 drm_vblank_pre_modeset(crtc->dev, dc->pipe);
845
846 err = tegra_crtc_setup_clk(crtc, mode); 843 err = tegra_crtc_setup_clk(crtc, mode);
847 if (err) { 844 if (err) {
848 dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err); 845 dev_err(dc->dev, "failed to setup clock for CRTC: %d\n", err);
@@ -896,6 +893,8 @@ static void tegra_crtc_prepare(struct drm_crtc *crtc)
896 unsigned int syncpt; 893 unsigned int syncpt;
897 unsigned long value; 894 unsigned long value;
898 895
896 drm_crtc_vblank_off(crtc);
897
899 /* hardware initialization */ 898 /* hardware initialization */
900 reset_control_deassert(dc->rst); 899 reset_control_deassert(dc->rst);
901 usleep_range(10000, 20000); 900 usleep_range(10000, 20000);
@@ -943,7 +942,7 @@ static void tegra_crtc_commit(struct drm_crtc *crtc)
943 value = GENERAL_ACT_REQ | WIN_A_ACT_REQ; 942 value = GENERAL_ACT_REQ | WIN_A_ACT_REQ;
944 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL); 943 tegra_dc_writel(dc, value, DC_CMD_STATE_CONTROL);
945 944
946 drm_vblank_post_modeset(crtc->dev, dc->pipe); 945 drm_crtc_vblank_on(crtc);
947} 946}
948 947
949static void tegra_crtc_load_lut(struct drm_crtc *crtc) 948static void tegra_crtc_load_lut(struct drm_crtc *crtc)
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 8f5cec67c47d..d395b0bef73b 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -709,6 +709,7 @@ out:
709 709
710static int ttm_mem_evict_first(struct ttm_bo_device *bdev, 710static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
711 uint32_t mem_type, 711 uint32_t mem_type,
712 const struct ttm_place *place,
712 bool interruptible, 713 bool interruptible,
713 bool no_wait_gpu) 714 bool no_wait_gpu)
714{ 715{
@@ -720,8 +721,21 @@ static int ttm_mem_evict_first(struct ttm_bo_device *bdev,
720 spin_lock(&glob->lru_lock); 721 spin_lock(&glob->lru_lock);
721 list_for_each_entry(bo, &man->lru, lru) { 722 list_for_each_entry(bo, &man->lru, lru) {
722 ret = __ttm_bo_reserve(bo, false, true, false, NULL); 723 ret = __ttm_bo_reserve(bo, false, true, false, NULL);
723 if (!ret) 724 if (!ret) {
725 if (place && (place->fpfn || place->lpfn)) {
726 /* Don't evict this BO if it's outside of the
727 * requested placement range
728 */
729 if (place->fpfn >= (bo->mem.start + bo->mem.size) ||
730 (place->lpfn && place->lpfn <= bo->mem.start)) {
731 __ttm_bo_unreserve(bo);
732 ret = -EBUSY;
733 continue;
734 }
735 }
736
724 break; 737 break;
738 }
725 } 739 }
726 740
727 if (ret) { 741 if (ret) {
@@ -782,7 +796,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo,
782 return ret; 796 return ret;
783 if (mem->mm_node) 797 if (mem->mm_node)
784 break; 798 break;
785 ret = ttm_mem_evict_first(bdev, mem_type, 799 ret = ttm_mem_evict_first(bdev, mem_type, place,
786 interruptible, no_wait_gpu); 800 interruptible, no_wait_gpu);
787 if (unlikely(ret != 0)) 801 if (unlikely(ret != 0))
788 return ret; 802 return ret;
@@ -994,9 +1008,9 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
994 1008
995 for (i = 0; i < placement->num_placement; i++) { 1009 for (i = 0; i < placement->num_placement; i++) {
996 const struct ttm_place *heap = &placement->placement[i]; 1010 const struct ttm_place *heap = &placement->placement[i];
997 if (mem->mm_node && heap->lpfn != 0 && 1011 if (mem->mm_node &&
998 (mem->start < heap->fpfn || 1012 (mem->start < heap->fpfn ||
999 mem->start + mem->num_pages > heap->lpfn)) 1013 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1000 continue; 1014 continue;
1001 1015
1002 *new_flags = heap->flags; 1016 *new_flags = heap->flags;
@@ -1007,9 +1021,9 @@ static bool ttm_bo_mem_compat(struct ttm_placement *placement,
1007 1021
1008 for (i = 0; i < placement->num_busy_placement; i++) { 1022 for (i = 0; i < placement->num_busy_placement; i++) {
1009 const struct ttm_place *heap = &placement->busy_placement[i]; 1023 const struct ttm_place *heap = &placement->busy_placement[i];
1010 if (mem->mm_node && heap->lpfn != 0 && 1024 if (mem->mm_node &&
1011 (mem->start < heap->fpfn || 1025 (mem->start < heap->fpfn ||
1012 mem->start + mem->num_pages > heap->lpfn)) 1026 (heap->lpfn != 0 && (mem->start + mem->num_pages) > heap->lpfn)))
1013 continue; 1027 continue;
1014 1028
1015 *new_flags = heap->flags; 1029 *new_flags = heap->flags;
@@ -1233,7 +1247,7 @@ static int ttm_bo_force_list_clean(struct ttm_bo_device *bdev,
1233 spin_lock(&glob->lru_lock); 1247 spin_lock(&glob->lru_lock);
1234 while (!list_empty(&man->lru)) { 1248 while (!list_empty(&man->lru)) {
1235 spin_unlock(&glob->lru_lock); 1249 spin_unlock(&glob->lru_lock);
1236 ret = ttm_mem_evict_first(bdev, mem_type, false, false); 1250 ret = ttm_mem_evict_first(bdev, mem_type, NULL, false, false);
1237 if (ret) { 1251 if (ret) {
1238 if (allow_errors) { 1252 if (allow_errors) {
1239 return ret; 1253 return ret;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
index bfeb4b1f2acf..21e9b7f8dad0 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cmdbuf_res.c
@@ -246,7 +246,8 @@ int vmw_cmdbuf_res_remove(struct vmw_cmdbuf_res_manager *man,
246 struct drm_hash_item *hash; 246 struct drm_hash_item *hash;
247 int ret; 247 int ret;
248 248
249 ret = drm_ht_find_item(&man->resources, user_key, &hash); 249 ret = drm_ht_find_item(&man->resources, user_key | (res_type << 24),
250 &hash);
250 if (likely(ret != 0)) 251 if (likely(ret != 0))
251 return -EINVAL; 252 return -EINVAL;
252 253
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
index 7197af157313..25f3c250fd98 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
@@ -688,7 +688,11 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
688 goto out_err0; 688 goto out_err0;
689 } 689 }
690 690
691 if (unlikely(dev_priv->prim_bb_mem < dev_priv->vram_size)) 691 /*
692 * Limit back buffer size to VRAM size. Remove this once
693 * screen targets are implemented.
694 */
695 if (dev_priv->prim_bb_mem > dev_priv->vram_size)
692 dev_priv->prim_bb_mem = dev_priv->vram_size; 696 dev_priv->prim_bb_mem = dev_priv->vram_size;
693 697
694 mutex_unlock(&dev_priv->hw_mutex); 698 mutex_unlock(&dev_priv->hw_mutex);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index d2bc2b03d4c6..941a7bc0b791 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -187,7 +187,7 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
187 * can do this since the caller in the drm core doesn't check anything 187 * can do this since the caller in the drm core doesn't check anything
188 * which is protected by any looks. 188 * which is protected by any looks.
189 */ 189 */
190 drm_modeset_unlock(&crtc->mutex); 190 drm_modeset_unlock_crtc(crtc);
191 drm_modeset_lock_all(dev_priv->dev); 191 drm_modeset_lock_all(dev_priv->dev);
192 192
193 /* A lot of the code assumes this */ 193 /* A lot of the code assumes this */
@@ -252,7 +252,7 @@ int vmw_du_crtc_cursor_set(struct drm_crtc *crtc, struct drm_file *file_priv,
252 ret = 0; 252 ret = 0;
253out: 253out:
254 drm_modeset_unlock_all(dev_priv->dev); 254 drm_modeset_unlock_all(dev_priv->dev);
255 drm_modeset_lock(&crtc->mutex, NULL); 255 drm_modeset_lock_crtc(crtc);
256 256
257 return ret; 257 return ret;
258} 258}
@@ -273,7 +273,7 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
273 * can do this since the caller in the drm core doesn't check anything 273 * can do this since the caller in the drm core doesn't check anything
274 * which is protected by any looks. 274 * which is protected by any looks.
275 */ 275 */
276 drm_modeset_unlock(&crtc->mutex); 276 drm_modeset_unlock_crtc(crtc);
277 drm_modeset_lock_all(dev_priv->dev); 277 drm_modeset_lock_all(dev_priv->dev);
278 278
279 vmw_cursor_update_position(dev_priv, shown, 279 vmw_cursor_update_position(dev_priv, shown,
@@ -281,7 +281,7 @@ int vmw_du_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
281 du->cursor_y + du->hotspot_y); 281 du->cursor_y + du->hotspot_y);
282 282
283 drm_modeset_unlock_all(dev_priv->dev); 283 drm_modeset_unlock_all(dev_priv->dev);
284 drm_modeset_lock(&crtc->mutex, NULL); 284 drm_modeset_lock_crtc(crtc);
285 285
286 return 0; 286 return 0;
287} 287}
@@ -1950,6 +1950,14 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
1950 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC) 1950 DRM_MODE_FLAG_NHSYNC | DRM_MODE_FLAG_PVSYNC)
1951 }; 1951 };
1952 int i; 1952 int i;
1953 u32 assumed_bpp = 2;
1954
1955 /*
1956 * If using screen objects, then assume 32-bpp because that's what the
1957 * SVGA device is assuming
1958 */
1959 if (dev_priv->sou_priv)
1960 assumed_bpp = 4;
1953 1961
1954 /* Add preferred mode */ 1962 /* Add preferred mode */
1955 { 1963 {
@@ -1960,8 +1968,9 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
1960 mode->vdisplay = du->pref_height; 1968 mode->vdisplay = du->pref_height;
1961 vmw_guess_mode_timing(mode); 1969 vmw_guess_mode_timing(mode);
1962 1970
1963 if (vmw_kms_validate_mode_vram(dev_priv, mode->hdisplay * 2, 1971 if (vmw_kms_validate_mode_vram(dev_priv,
1964 mode->vdisplay)) { 1972 mode->hdisplay * assumed_bpp,
1973 mode->vdisplay)) {
1965 drm_mode_probed_add(connector, mode); 1974 drm_mode_probed_add(connector, mode);
1966 } else { 1975 } else {
1967 drm_mode_destroy(dev, mode); 1976 drm_mode_destroy(dev, mode);
@@ -1983,7 +1992,8 @@ int vmw_du_connector_fill_modes(struct drm_connector *connector,
1983 bmode->vdisplay > max_height) 1992 bmode->vdisplay > max_height)
1984 continue; 1993 continue;
1985 1994
1986 if (!vmw_kms_validate_mode_vram(dev_priv, bmode->hdisplay * 2, 1995 if (!vmw_kms_validate_mode_vram(dev_priv,
1996 bmode->hdisplay * assumed_bpp,
1987 bmode->vdisplay)) 1997 bmode->vdisplay))
1988 continue; 1998 continue;
1989 1999
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 73bd9e2e42bc..3402033fa52a 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1659,6 +1659,7 @@ void hid_disconnect(struct hid_device *hdev)
1659 hdev->hiddev_disconnect(hdev); 1659 hdev->hiddev_disconnect(hdev);
1660 if (hdev->claimed & HID_CLAIMED_HIDRAW) 1660 if (hdev->claimed & HID_CLAIMED_HIDRAW)
1661 hidraw_disconnect(hdev); 1661 hidraw_disconnect(hdev);
1662 hdev->claimed = 0;
1662} 1663}
1663EXPORT_SYMBOL_GPL(hid_disconnect); 1664EXPORT_SYMBOL_GPL(hid_disconnect);
1664 1665
diff --git a/drivers/hid/hid-debug.c b/drivers/hid/hid-debug.c
index 84c3cb15ccdd..8bf61d295ffd 100644
--- a/drivers/hid/hid-debug.c
+++ b/drivers/hid/hid-debug.c
@@ -946,6 +946,12 @@ static const char *keys[KEY_MAX + 1] = {
946 [KEY_BRIGHTNESS_MIN] = "BrightnessMin", 946 [KEY_BRIGHTNESS_MIN] = "BrightnessMin",
947 [KEY_BRIGHTNESS_MAX] = "BrightnessMax", 947 [KEY_BRIGHTNESS_MAX] = "BrightnessMax",
948 [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto", 948 [KEY_BRIGHTNESS_AUTO] = "BrightnessAuto",
949 [KEY_KBDINPUTASSIST_PREV] = "KbdInputAssistPrev",
950 [KEY_KBDINPUTASSIST_NEXT] = "KbdInputAssistNext",
951 [KEY_KBDINPUTASSIST_PREVGROUP] = "KbdInputAssistPrevGroup",
952 [KEY_KBDINPUTASSIST_NEXTGROUP] = "KbdInputAssistNextGroup",
953 [KEY_KBDINPUTASSIST_ACCEPT] = "KbdInputAssistAccept",
954 [KEY_KBDINPUTASSIST_CANCEL] = "KbdInputAssistCancel",
949}; 955};
950 956
951static const char *relatives[REL_MAX + 1] = { 957static const char *relatives[REL_MAX + 1] = {
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index cd9c9e96cf0e..7c863738e419 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -298,6 +298,9 @@
298 298
299#define USB_VENDOR_ID_ELAN 0x04f3 299#define USB_VENDOR_ID_ELAN 0x04f3
300#define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089 300#define USB_DEVICE_ID_ELAN_TOUCHSCREEN 0x0089
301#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B 0x009b
302#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103 0x0103
303#define USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F 0x016f
301 304
302#define USB_VENDOR_ID_ELECOM 0x056e 305#define USB_VENDOR_ID_ELECOM 0x056e
303#define USB_DEVICE_ID_ELECOM_BM084 0x0061 306#define USB_DEVICE_ID_ELECOM_BM084 0x0061
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 2df7fddbd119..725f22ca47fc 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -695,7 +695,10 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
695 break; 695 break;
696 696
697 case 0x5b: /* TransducerSerialNumber */ 697 case 0x5b: /* TransducerSerialNumber */
698 set_bit(MSC_SERIAL, input->mscbit); 698 usage->type = EV_MSC;
699 usage->code = MSC_SERIAL;
700 bit = input->mscbit;
701 max = MSC_MAX;
699 break; 702 break;
700 703
701 default: goto unknown; 704 default: goto unknown;
@@ -862,6 +865,13 @@ static void hidinput_configure_usage(struct hid_input *hidinput, struct hid_fiel
862 case 0x28b: map_key_clear(KEY_FORWARDMAIL); break; 865 case 0x28b: map_key_clear(KEY_FORWARDMAIL); break;
863 case 0x28c: map_key_clear(KEY_SEND); break; 866 case 0x28c: map_key_clear(KEY_SEND); break;
864 867
868 case 0x2c7: map_key_clear(KEY_KBDINPUTASSIST_PREV); break;
869 case 0x2c8: map_key_clear(KEY_KBDINPUTASSIST_NEXT); break;
870 case 0x2c9: map_key_clear(KEY_KBDINPUTASSIST_PREVGROUP); break;
871 case 0x2ca: map_key_clear(KEY_KBDINPUTASSIST_NEXTGROUP); break;
872 case 0x2cb: map_key_clear(KEY_KBDINPUTASSIST_ACCEPT); break;
873 case 0x2cc: map_key_clear(KEY_KBDINPUTASSIST_CANCEL); break;
874
865 default: goto ignore; 875 default: goto ignore;
866 } 876 }
867 break; 877 break;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index f3cb5b0a4345..552671ee7c5d 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -71,6 +71,9 @@ static const struct hid_blacklist {
71 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, 71 { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET },
72 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, 72 { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET },
73 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL }, 73 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN, HID_QUIRK_ALWAYS_POLL },
74 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_009B, HID_QUIRK_ALWAYS_POLL },
75 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_0103, HID_QUIRK_ALWAYS_POLL },
76 { USB_VENDOR_ID_ELAN, USB_DEVICE_ID_ELAN_TOUCHSCREEN_016F, HID_QUIRK_ALWAYS_POLL },
74 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, 77 { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET },
75 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS }, 78 { USB_VENDOR_ID_FORMOSA, USB_DEVICE_ID_FORMOSA_IR_RECEIVER, HID_QUIRK_NO_INIT_REPORTS },
76 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, 79 { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET },
diff --git a/drivers/hwmon/fam15h_power.c b/drivers/hwmon/fam15h_power.c
index fcdbde4ec692..3057dfc7e3bc 100644
--- a/drivers/hwmon/fam15h_power.c
+++ b/drivers/hwmon/fam15h_power.c
@@ -234,7 +234,7 @@ static const struct pci_device_id fam15h_power_id_table[] = {
234 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) }, 234 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
235 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) }, 235 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
236 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) }, 236 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
237 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) }, 237 { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
238 {} 238 {}
239}; 239};
240MODULE_DEVICE_TABLE(pci, fam15h_power_id_table); 240MODULE_DEVICE_TABLE(pci, fam15h_power_id_table);
diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
index 6aac695b1688..9b55e673b67c 100644
--- a/drivers/hwmon/g762.c
+++ b/drivers/hwmon/g762.c
@@ -1084,10 +1084,8 @@ static int g762_probe(struct i2c_client *client, const struct i2c_device_id *id)
1084 if (ret) 1084 if (ret)
1085 goto clock_dis; 1085 goto clock_dis;
1086 1086
1087 data->hwmon_dev = devm_hwmon_device_register_with_groups(dev, 1087 data->hwmon_dev = hwmon_device_register_with_groups(dev, client->name,
1088 client->name, 1088 data, g762_groups);
1089 data,
1090 g762_groups);
1091 if (IS_ERR(data->hwmon_dev)) { 1089 if (IS_ERR(data->hwmon_dev)) {
1092 ret = PTR_ERR(data->hwmon_dev); 1090 ret = PTR_ERR(data->hwmon_dev);
1093 goto clock_dis; 1091 goto clock_dis;
diff --git a/drivers/hwmon/ibmpowernv.c b/drivers/hwmon/ibmpowernv.c
index d2bf2c97ae70..6a30eeea94be 100644
--- a/drivers/hwmon/ibmpowernv.c
+++ b/drivers/hwmon/ibmpowernv.c
@@ -181,7 +181,7 @@ static int __init populate_attr_groups(struct platform_device *pdev)
181 181
182 opal = of_find_node_by_path("/ibm,opal/sensors"); 182 opal = of_find_node_by_path("/ibm,opal/sensors");
183 if (!opal) { 183 if (!opal) {
184 dev_err(&pdev->dev, "Opal node 'sensors' not found\n"); 184 dev_dbg(&pdev->dev, "Opal node 'sensors' not found\n");
185 return -ENODEV; 185 return -ENODEV;
186 } 186 }
187 187
@@ -335,7 +335,9 @@ static int __init ibmpowernv_init(void)
335 335
336 err = platform_driver_probe(&ibmpowernv_driver, ibmpowernv_probe); 336 err = platform_driver_probe(&ibmpowernv_driver, ibmpowernv_probe);
337 if (err) { 337 if (err) {
338 pr_err("Platfrom driver probe failed\n"); 338 if (err != -ENODEV)
339 pr_err("Platform driver probe failed (%d)\n", err);
340
339 goto exit_device_del; 341 goto exit_device_del;
340 } 342 }
341 343
diff --git a/drivers/hwmon/menf21bmc_hwmon.c b/drivers/hwmon/menf21bmc_hwmon.c
index c92229d321c9..afc6b58eaa62 100644
--- a/drivers/hwmon/menf21bmc_hwmon.c
+++ b/drivers/hwmon/menf21bmc_hwmon.c
@@ -21,6 +21,7 @@
21#include <linux/jiffies.h> 21#include <linux/jiffies.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23#include <linux/i2c.h> 23#include <linux/i2c.h>
24#include <linux/err.h>
24 25
25#define DRV_NAME "menf21bmc_hwmon" 26#define DRV_NAME "menf21bmc_hwmon"
26 27
diff --git a/drivers/hwmon/pwm-fan.c b/drivers/hwmon/pwm-fan.c
index 823c877a1ec0..1991d9032c38 100644
--- a/drivers/hwmon/pwm-fan.c
+++ b/drivers/hwmon/pwm-fan.c
@@ -161,10 +161,17 @@ static int pwm_fan_suspend(struct device *dev)
161static int pwm_fan_resume(struct device *dev) 161static int pwm_fan_resume(struct device *dev)
162{ 162{
163 struct pwm_fan_ctx *ctx = dev_get_drvdata(dev); 163 struct pwm_fan_ctx *ctx = dev_get_drvdata(dev);
164 unsigned long duty;
165 int ret;
164 166
165 if (ctx->pwm_value) 167 if (ctx->pwm_value == 0)
166 return pwm_enable(ctx->pwm); 168 return 0;
167 return 0; 169
170 duty = DIV_ROUND_UP(ctx->pwm_value * (ctx->pwm->period - 1), MAX_PWM);
171 ret = pwm_config(ctx->pwm, duty, ctx->pwm->period);
172 if (ret)
173 return ret;
174 return pwm_enable(ctx->pwm);
168} 175}
169#endif 176#endif
170 177
diff --git a/drivers/i2c/algos/i2c-algo-bit.c b/drivers/i2c/algos/i2c-algo-bit.c
index 65ef9664d5da..899bede81b31 100644
--- a/drivers/i2c/algos/i2c-algo-bit.c
+++ b/drivers/i2c/algos/i2c-algo-bit.c
@@ -12,11 +12,6 @@
12 but WITHOUT ANY WARRANTY; without even the implied warranty of 12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details. 14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
19 MA 02110-1301 USA.
20 * ------------------------------------------------------------------------- */ 15 * ------------------------------------------------------------------------- */
21 16
22/* With some changes from Frodo Looijaard <frodol@dds.nl>, Kyösti Mälkki 17/* With some changes from Frodo Looijaard <frodol@dds.nl>, Kyösti Mälkki
diff --git a/drivers/i2c/algos/i2c-algo-pca.c b/drivers/i2c/algos/i2c-algo-pca.c
index 8b10f88b13d9..580dbf05c148 100644
--- a/drivers/i2c/algos/i2c-algo-pca.c
+++ b/drivers/i2c/algos/i2c-algo-pca.c
@@ -12,11 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
19 * MA 02110-1301 USA.
20 */ 15 */
21 16
22#include <linux/kernel.h> 17#include <linux/kernel.h>
diff --git a/drivers/i2c/algos/i2c-algo-pcf.c b/drivers/i2c/algos/i2c-algo-pcf.c
index 34370090b753..270d84bfc2c6 100644
--- a/drivers/i2c/algos/i2c-algo-pcf.c
+++ b/drivers/i2c/algos/i2c-algo-pcf.c
@@ -14,11 +14,6 @@
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 * 16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
20 * MA 02110-1301 USA.
21 *
22 * With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and 17 * With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and
23 * Frodo Looijaard <frodol@dds.nl>, and also from Martin Bailey 18 * Frodo Looijaard <frodol@dds.nl>, and also from Martin Bailey
24 * <mbailey@littlefeet-inc.com> 19 * <mbailey@littlefeet-inc.com>
diff --git a/drivers/i2c/algos/i2c-algo-pcf.h b/drivers/i2c/algos/i2c-algo-pcf.h
index 1ec703ee788d..262ee801975b 100644
--- a/drivers/i2c/algos/i2c-algo-pcf.h
+++ b/drivers/i2c/algos/i2c-algo-pcf.h
@@ -12,12 +12,7 @@
12 This program is distributed in the hope that it will be useful, 12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details. 15 GNU General Public License for more details. */
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
20 MA 02110-1301 USA. */
21/* -------------------------------------------------------------------- */ 16/* -------------------------------------------------------------------- */
22 17
23/* With some changes from Frodo Looijaard <frodol@dds.nl> */ 18/* With some changes from Frodo Looijaard <frodol@dds.nl> */
diff --git a/drivers/i2c/busses/i2c-ali1535.c b/drivers/i2c/busses/i2c-ali1535.c
index 451e305f7971..4f2d78868281 100644
--- a/drivers/i2c/busses/i2c-ali1535.c
+++ b/drivers/i2c/busses/i2c-ali1535.c
@@ -14,10 +14,6 @@
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21*/ 17*/
22 18
23/* 19/*
diff --git a/drivers/i2c/busses/i2c-ali15x3.c b/drivers/i2c/busses/i2c-ali15x3.c
index 2fa21ce9682b..45c5c4883022 100644
--- a/drivers/i2c/busses/i2c-ali15x3.c
+++ b/drivers/i2c/busses/i2c-ali15x3.c
@@ -12,10 +12,6 @@
12 but WITHOUT ANY WARRANTY; without even the implied warranty of 12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details. 14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/ 15*/
20 16
21/* 17/*
diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
index 41fc6837fb8b..65e324054970 100644
--- a/drivers/i2c/busses/i2c-amd756-s4882.c
+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 15 */
20 16
21/* 17/*
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index a16f72891358..6c7113d990f8 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -15,10 +15,6 @@
15 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details. 17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22*/ 18*/
23 19
24/* 20/*
diff --git a/drivers/i2c/busses/i2c-at91.c b/drivers/i2c/busses/i2c-at91.c
index 917d54588d95..e05a672db3e5 100644
--- a/drivers/i2c/busses/i2c-at91.c
+++ b/drivers/i2c/busses/i2c-at91.c
@@ -434,7 +434,7 @@ static int at91_do_twi_transfer(struct at91_twi_dev *dev)
434 } 434 }
435 } 435 }
436 436
437 ret = wait_for_completion_io_timeout(&dev->cmd_complete, 437 ret = wait_for_completion_timeout(&dev->cmd_complete,
438 dev->adapter.timeout); 438 dev->adapter.timeout);
439 if (ret == 0) { 439 if (ret == 0) {
440 dev_err(dev->dev, "controller timed out\n"); 440 dev_err(dev->dev, "controller timed out\n");
diff --git a/drivers/i2c/busses/i2c-au1550.c b/drivers/i2c/busses/i2c-au1550.c
index 8762458ca7da..6f8c0756e350 100644
--- a/drivers/i2c/busses/i2c-au1550.c
+++ b/drivers/i2c/busses/i2c-au1550.c
@@ -21,10 +21,6 @@
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of 21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details. 23 * GNU General Public License for more details.
24 *
25 * You should have received a copy of the GNU General Public License
26 * along with this program; if not, write to the Free Software
27 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
28 */ 24 */
29 25
30#include <linux/delay.h> 26#include <linux/delay.h>
diff --git a/drivers/i2c/busses/i2c-cadence.c b/drivers/i2c/busses/i2c-cadence.c
index 63f3f03ecc9b..c604f4c3ac0d 100644
--- a/drivers/i2c/busses/i2c-cadence.c
+++ b/drivers/i2c/busses/i2c-cadence.c
@@ -111,6 +111,8 @@
111#define CDNS_I2C_DIVA_MAX 4 111#define CDNS_I2C_DIVA_MAX 4
112#define CDNS_I2C_DIVB_MAX 64 112#define CDNS_I2C_DIVB_MAX 64
113 113
114#define CDNS_I2C_TIMEOUT_MAX 0xFF
115
114#define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset) 116#define cdns_i2c_readreg(offset) readl_relaxed(id->membase + offset)
115#define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset) 117#define cdns_i2c_writereg(val, offset) writel_relaxed(val, id->membase + offset)
116 118
@@ -852,6 +854,15 @@ static int cdns_i2c_probe(struct platform_device *pdev)
852 goto err_clk_dis; 854 goto err_clk_dis;
853 } 855 }
854 856
857 /*
858 * Cadence I2C controller has a bug wherein it generates
859 * invalid read transaction after HW timeout in master receiver mode.
860 * HW timeout is not used by this driver and the interrupt is disabled.
861 * But the feature itself cannot be disabled. Hence maximum value
862 * is written to this register to reduce the chances of error.
863 */
864 cdns_i2c_writereg(CDNS_I2C_TIMEOUT_MAX, CDNS_I2C_TIME_OUT_OFFSET);
865
855 dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n", 866 dev_info(&pdev->dev, "%u kHz mmio %08lx irq %d\n",
856 id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq); 867 id->i2c_clk / 1000, (unsigned long)r_mem->start, id->irq);
857 868
diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c
index f3b89a4698b6..5bdbc71698d0 100644
--- a/drivers/i2c/busses/i2c-cpm.c
+++ b/drivers/i2c/busses/i2c-cpm.c
@@ -23,10 +23,6 @@
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of 23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details. 25 * GNU General Public License for more details.
26 *
27 * You should have received a copy of the GNU General Public License
28 * along with this program; if not, write to the Free Software
29 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 */ 26 */
31 27
32#include <linux/kernel.h> 28#include <linux/kernel.h>
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index 4d9614719128..01f0cd87a4a5 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -17,10 +17,6 @@
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of 17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details. 19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24 * ---------------------------------------------------------------------------- 20 * ----------------------------------------------------------------------------
25 * 21 *
26 */ 22 */
@@ -411,11 +407,9 @@ i2c_davinci_xfer_msg(struct i2c_adapter *adap, struct i2c_msg *msg, int stop)
411 if (dev->cmd_err & DAVINCI_I2C_STR_NACK) { 407 if (dev->cmd_err & DAVINCI_I2C_STR_NACK) {
412 if (msg->flags & I2C_M_IGNORE_NAK) 408 if (msg->flags & I2C_M_IGNORE_NAK)
413 return msg->len; 409 return msg->len;
414 if (stop) { 410 w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG);
415 w = davinci_i2c_read_reg(dev, DAVINCI_I2C_MDR_REG); 411 w |= DAVINCI_I2C_MDR_STP;
416 w |= DAVINCI_I2C_MDR_STP; 412 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
417 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, w);
418 }
419 return -EREMOTEIO; 413 return -EREMOTEIO;
420 } 414 }
421 return -EIO; 415 return -EIO;
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index 3c20e4bd6dd1..23628b7bfb8d 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -18,10 +18,6 @@
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details. 20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 * ---------------------------------------------------------------------------- 21 * ----------------------------------------------------------------------------
26 * 22 *
27 */ 23 */
@@ -363,7 +359,7 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
363 } 359 }
364 360
365 /* Configure Tx/Rx FIFO threshold levels */ 361 /* Configure Tx/Rx FIFO threshold levels */
366 dw_writel(dev, dev->tx_fifo_depth - 1, DW_IC_TX_TL); 362 dw_writel(dev, dev->tx_fifo_depth / 2, DW_IC_TX_TL);
367 dw_writel(dev, 0, DW_IC_RX_TL); 363 dw_writel(dev, 0, DW_IC_RX_TL);
368 364
369 /* configure the i2c master */ 365 /* configure the i2c master */
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index d66b6cbc9edc..5a410ef17abd 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -18,10 +18,6 @@
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details. 20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 * ---------------------------------------------------------------------------- 21 * ----------------------------------------------------------------------------
26 * 22 *
27 */ 23 */
diff --git a/drivers/i2c/busses/i2c-designware-pcidrv.c b/drivers/i2c/busses/i2c-designware-pcidrv.c
index d31d313ab4f7..acb40f95db78 100644
--- a/drivers/i2c/busses/i2c-designware-pcidrv.c
+++ b/drivers/i2c/busses/i2c-designware-pcidrv.c
@@ -19,10 +19,6 @@
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of 19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details. 21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
26 * ---------------------------------------------------------------------------- 22 * ----------------------------------------------------------------------------
27 * 23 *
28 */ 24 */
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index a7431150acf7..373dd4d47765 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -18,10 +18,6 @@
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of 18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details. 20 * GNU General Public License for more details.
21 *
22 * You should have received a copy of the GNU General Public License
23 * along with this program; if not, write to the Free Software
24 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 * ---------------------------------------------------------------------------- 21 * ----------------------------------------------------------------------------
26 * 22 *
27 */ 23 */
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index a44ea13d1434..76e699f9ed97 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -9,10 +9,6 @@
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of 9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details. 11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
16 */ 12 */
17 13
18#include <linux/module.h> 14#include <linux/module.h>
diff --git a/drivers/i2c/busses/i2c-elektor.c b/drivers/i2c/busses/i2c-elektor.c
index 485497066ed7..92e8c0ce1625 100644
--- a/drivers/i2c/busses/i2c-elektor.c
+++ b/drivers/i2c/busses/i2c-elektor.c
@@ -12,11 +12,7 @@
12 This program is distributed in the hope that it will be useful, 12 This program is distributed in the hope that it will be useful,
13 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details. 15 GNU General Public License for more details. */
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
20/* ------------------------------------------------------------------------- */ 16/* ------------------------------------------------------------------------- */
21 17
22/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even 18/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi> and even
diff --git a/drivers/i2c/busses/i2c-hydra.c b/drivers/i2c/busses/i2c-hydra.c
index 14d2b76de25f..b7864cf42a72 100644
--- a/drivers/i2c/busses/i2c-hydra.c
+++ b/drivers/i2c/busses/i2c-hydra.c
@@ -15,10 +15,6 @@
15 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details. 17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22*/ 18*/
23 19
24#include <linux/kernel.h> 20#include <linux/kernel.h>
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 7cfc183b3d63..6ab4f1cb21f3 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -15,10 +15,6 @@
15 but WITHOUT ANY WARRANTY; without even the implied warranty of 15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details. 17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the Free Software
21 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22*/ 18*/
23 19
24/* 20/*
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index c48e46af670a..e9fb7cf78612 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -11,11 +11,6 @@
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 * 13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
17 * USA.
18 *
19 * Author: 14 * Author:
20 * Darius Augulis, Teltonika Inc. 15 * Darius Augulis, Teltonika Inc.
21 * 16 *
diff --git a/drivers/i2c/busses/i2c-iop3xx.h b/drivers/i2c/busses/i2c-iop3xx.h
index 097e270955d0..2d6929c2bd92 100644
--- a/drivers/i2c/busses/i2c-iop3xx.h
+++ b/drivers/i2c/busses/i2c-iop3xx.h
@@ -11,11 +11,7 @@
11 This program is distributed in the hope that it will be useful, 11 This program is distributed in the hope that it will be useful,
12 but WITHOUT ANY WARRANTY; without even the implied warranty of 12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details. 14 GNU General Public License for more details. */
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */
19/* ------------------------------------------------------------------------- */ 15/* ------------------------------------------------------------------------- */
20 16
21 17
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index cf99dbf21fd1..113293d275f6 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -14,10 +14,6 @@
14 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details. 16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21*/ 17*/
22 18
23/* 19/*
diff --git a/drivers/i2c/busses/i2c-ismt.c b/drivers/i2c/busses/i2c-ismt.c
index 3f6ecbfb9a56..f2b0ff011631 100644
--- a/drivers/i2c/busses/i2c-ismt.c
+++ b/drivers/i2c/busses/i2c-ismt.c
@@ -14,10 +14,6 @@
14 * WITHOUT ANY WARRANTY; without even the implied warranty of 14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details. 16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution 17 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL. 18 * in the file called LICENSE.GPL.
23 * 19 *
diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
index b170bdffb5de..88eda09e73c0 100644
--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 15 */
20 16
21/* 17/*
diff --git a/drivers/i2c/busses/i2c-nforce2.c b/drivers/i2c/busses/i2c-nforce2.c
index ee3a76c7ae97..70b3c9158509 100644
--- a/drivers/i2c/busses/i2c-nforce2.c
+++ b/drivers/i2c/busses/i2c-nforce2.c
@@ -17,10 +17,6 @@
17 but WITHOUT ANY WARRANTY; without even the implied warranty of 17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 GNU General Public License for more details. 19 GNU General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24*/ 20*/
25 21
26/* 22/*
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 0dffb0e62c3b..277a2288d4a8 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -22,10 +22,6 @@
22 * but WITHOUT ANY WARRANTY; without even the implied warranty of 22 * but WITHOUT ANY WARRANTY; without even the implied warranty of
23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 23 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
24 * GNU General Public License for more details. 24 * GNU General Public License for more details.
25 *
26 * You should have received a copy of the GNU General Public License
27 * along with this program; if not, write to the Free Software
28 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
29 */ 25 */
30 26
31#include <linux/module.h> 27#include <linux/module.h>
@@ -926,14 +922,12 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
926 if (stat & OMAP_I2C_STAT_NACK) { 922 if (stat & OMAP_I2C_STAT_NACK) {
927 err |= OMAP_I2C_STAT_NACK; 923 err |= OMAP_I2C_STAT_NACK;
928 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK); 924 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_NACK);
929 break;
930 } 925 }
931 926
932 if (stat & OMAP_I2C_STAT_AL) { 927 if (stat & OMAP_I2C_STAT_AL) {
933 dev_err(dev->dev, "Arbitration lost\n"); 928 dev_err(dev->dev, "Arbitration lost\n");
934 err |= OMAP_I2C_STAT_AL; 929 err |= OMAP_I2C_STAT_AL;
935 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL); 930 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_AL);
936 break;
937 } 931 }
938 932
939 /* 933 /*
@@ -958,11 +952,13 @@ omap_i2c_isr_thread(int this_irq, void *dev_id)
958 if (dev->fifo_size) 952 if (dev->fifo_size)
959 num_bytes = dev->buf_len; 953 num_bytes = dev->buf_len;
960 954
961 omap_i2c_receive_data(dev, num_bytes, true); 955 if (dev->errata & I2C_OMAP_ERRATA_I207) {
962
963 if (dev->errata & I2C_OMAP_ERRATA_I207)
964 i2c_omap_errata_i207(dev, stat); 956 i2c_omap_errata_i207(dev, stat);
957 num_bytes = (omap_i2c_read_reg(dev,
958 OMAP_I2C_BUFSTAT_REG) >> 8) & 0x3F;
959 }
965 960
961 omap_i2c_receive_data(dev, num_bytes, true);
966 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR); 962 omap_i2c_ack_stat(dev, OMAP_I2C_STAT_RDR);
967 continue; 963 continue;
968 } 964 }
diff --git a/drivers/i2c/busses/i2c-parport-light.c b/drivers/i2c/busses/i2c-parport-light.c
index 62f55fe624cb..d1f625f923c7 100644
--- a/drivers/i2c/busses/i2c-parport-light.c
+++ b/drivers/i2c/busses/i2c-parport-light.c
@@ -18,10 +18,6 @@
18 but WITHOUT ANY WARRANTY; without even the implied warranty of 18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details. 20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 * ------------------------------------------------------------------------ */ 21 * ------------------------------------------------------------------------ */
26 22
27#include <linux/kernel.h> 23#include <linux/kernel.h>
diff --git a/drivers/i2c/busses/i2c-parport.c b/drivers/i2c/busses/i2c-parport.c
index a27aae2d6757..a1fac5aa9bae 100644
--- a/drivers/i2c/busses/i2c-parport.c
+++ b/drivers/i2c/busses/i2c-parport.c
@@ -18,10 +18,6 @@
18 but WITHOUT ANY WARRANTY; without even the implied warranty of 18 but WITHOUT ANY WARRANTY; without even the implied warranty of
19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 19 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 GNU General Public License for more details. 20 GNU General Public License for more details.
21
22 You should have received a copy of the GNU General Public License
23 along with this program; if not, write to the Free Software
24 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
25 * ------------------------------------------------------------------------ */ 21 * ------------------------------------------------------------------------ */
26 22
27#include <linux/kernel.h> 23#include <linux/kernel.h>
diff --git a/drivers/i2c/busses/i2c-parport.h b/drivers/i2c/busses/i2c-parport.h
index e572f3aac0f7..4e1294536805 100644
--- a/drivers/i2c/busses/i2c-parport.h
+++ b/drivers/i2c/busses/i2c-parport.h
@@ -12,10 +12,6 @@
12 but WITHOUT ANY WARRANTY; without even the implied warranty of 12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details. 14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 * ------------------------------------------------------------------------ */ 15 * ------------------------------------------------------------------------ */
20 16
21#define PORT_DATA 0 17#define PORT_DATA 0
diff --git a/drivers/i2c/busses/i2c-pasemi.c b/drivers/i2c/busses/i2c-pasemi.c
index 7a9dce43e115..df1dbc92a024 100644
--- a/drivers/i2c/busses/i2c-pasemi.c
+++ b/drivers/i2c/busses/i2c-pasemi.c
@@ -11,10 +11,6 @@
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */ 14 */
19 15
20#include <linux/module.h> 16#include <linux/module.h>
diff --git a/drivers/i2c/busses/i2c-pca-isa.c b/drivers/i2c/busses/i2c-pca-isa.c
index 323f061a3163..e0eb4ca0102e 100644
--- a/drivers/i2c/busses/i2c-pca-isa.c
+++ b/drivers/i2c/busses/i2c-pca-isa.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 15 */
20 16
21#include <linux/kernel.h> 17#include <linux/kernel.h>
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index a6f54ba27e2a..67cbec6796a0 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -11,10 +11,6 @@
11 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details. 13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18*/ 14*/
19 15
20/* 16/*
diff --git a/drivers/i2c/busses/i2c-pmcmsp.c b/drivers/i2c/busses/i2c-pmcmsp.c
index 8564768fee32..177834e2d841 100644
--- a/drivers/i2c/busses/i2c-pmcmsp.c
+++ b/drivers/i2c/busses/i2c-pmcmsp.c
@@ -18,10 +18,6 @@
18 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 18 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
19 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 19 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
20 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 20 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
21 *
22 * You should have received a copy of the GNU General Public License along
23 * with this program; if not, write to the Free Software Foundation, Inc.,
24 * 675 Mass Ave, Cambridge, MA 02139, USA.
25 */ 21 */
26 22
27#include <linux/kernel.h> 23#include <linux/kernel.h>
diff --git a/drivers/i2c/busses/i2c-powermac.c b/drivers/i2c/busses/i2c-powermac.c
index 01e967763c2a..60a53c169ed2 100644
--- a/drivers/i2c/busses/i2c-powermac.c
+++ b/drivers/i2c/busses/i2c-powermac.c
@@ -14,10 +14,6 @@
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details. 15 GNU General Public License for more details.
16 16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20
21*/ 17*/
22 18
23#include <linux/module.h> 19#include <linux/module.h>
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index e3b0337faeb7..65244774bfa3 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -14,10 +14,6 @@
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21*/ 17*/
22 18
23#include <linux/kernel.h> 19#include <linux/kernel.h>
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 8b5e79cb4468..4855188747c9 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -14,10 +14,6 @@
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of 14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details. 16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */ 17 */
22 18
23#include <linux/kernel.h> 19#include <linux/kernel.h>
diff --git a/drivers/i2c/busses/i2c-sibyte.c b/drivers/i2c/busses/i2c-sibyte.c
index 0fe505d7abe9..2b6219d86b0f 100644
--- a/drivers/i2c/busses/i2c-sibyte.c
+++ b/drivers/i2c/busses/i2c-sibyte.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */ 15 */
20 16
21#include <linux/kernel.h> 17#include <linux/kernel.h>
diff --git a/drivers/i2c/busses/i2c-simtec.c b/drivers/i2c/busses/i2c-simtec.c
index 964e5c6f84ab..15ac8395dcd3 100644
--- a/drivers/i2c/busses/i2c-simtec.c
+++ b/drivers/i2c/busses/i2c-simtec.c
@@ -12,10 +12,6 @@
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of 12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details. 14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19*/ 15*/
20 16
21#include <linux/kernel.h> 17#include <linux/kernel.h>
diff --git a/drivers/i2c/busses/i2c-sis5595.c b/drivers/i2c/busses/i2c-sis5595.c
index ac9bc33acef4..7d58a40faf2d 100644
--- a/drivers/i2c/busses/i2c-sis5595.c
+++ b/drivers/i2c/busses/i2c-sis5595.c
@@ -11,10 +11,6 @@
11 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details. 13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18*/ 14*/
19 15
20/* Note: we assume there can only be one SIS5595 with one SMBus interface */ 16/* Note: we assume there can only be one SIS5595 with one SMBus interface */
diff --git a/drivers/i2c/busses/i2c-sis630.c b/drivers/i2c/busses/i2c-sis630.c
index c6366733008d..1e6805b5cef2 100644
--- a/drivers/i2c/busses/i2c-sis630.c
+++ b/drivers/i2c/busses/i2c-sis630.c
@@ -10,10 +10,6 @@
10 but WITHOUT ANY WARRANTY; without even the implied warranty of 10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details. 12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17*/ 13*/
18 14
19/* 15/*
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index 8dc2fc5f74ff..44b904426073 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -10,10 +10,6 @@
10 but WITHOUT ANY WARRANTY; without even the implied warranty of 10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details. 12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the Free Software
16 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
17*/ 13*/
18 14
19/* 15/*
diff --git a/drivers/i2c/busses/i2c-taos-evm.c b/drivers/i2c/busses/i2c-taos-evm.c
index 10855a0b7e7f..4c7fc2d47014 100644
--- a/drivers/i2c/busses/i2c-taos-evm.c
+++ b/drivers/i2c/busses/i2c-taos-evm.c
@@ -13,10 +13,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 */ 16 */
21 17
22#include <linux/delay.h> 18#include <linux/delay.h>
diff --git a/drivers/i2c/busses/i2c-via.c b/drivers/i2c/busses/i2c-via.c
index f4a1ed757612..59b1d233ca7b 100644
--- a/drivers/i2c/busses/i2c-via.c
+++ b/drivers/i2c/busses/i2c-via.c
@@ -12,10 +12,6 @@
12 but WITHOUT ANY WARRANTY; without even the implied warranty of 12 but WITHOUT ANY WARRANTY; without even the implied warranty of
13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 GNU General Public License for more details. 14 GNU General Public License for more details.
15
16 You should have received a copy of the GNU General Public License
17 along with this program; if not, write to the Free Software
18 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19*/ 15*/
20 16
21#include <linux/kernel.h> 17#include <linux/kernel.h>
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 6841200b6e50..0ee2646f3b00 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -13,10 +13,6 @@
13 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details. 15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20*/ 16*/
21 17
22/* 18/*
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index ade9223912d3..cc65ea0b818f 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -12,10 +12,6 @@
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details. 13 * GNU General Public License for more details.
14 * 14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
18 *
19 * 15 *
20 * This code was implemented by Mocean Laboratories AB when porting linux 16 * This code was implemented by Mocean Laboratories AB when porting linux
21 * to the automotive development board Russellville. The copyright holder 17 * to the automotive development board Russellville. The copyright holder
diff --git a/drivers/i2c/busses/scx200_acb.c b/drivers/i2c/busses/scx200_acb.c
index ff3f5747e43b..5153354b1a6b 100644
--- a/drivers/i2c/busses/scx200_acb.c
+++ b/drivers/i2c/busses/scx200_acb.c
@@ -17,10 +17,6 @@
17 but WITHOUT ANY WARRANTY; without even the implied warranty of 17 but WITHOUT ANY WARRANTY; without even the implied warranty of
18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 18 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 General Public License for more details. 19 General Public License for more details.
20
21 You should have received a copy of the GNU General Public License
22 along with this program; if not, write to the Free Software
23 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
24*/ 20*/
25 21
26#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 22#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
diff --git a/drivers/i2c/i2c-boardinfo.c b/drivers/i2c/i2c-boardinfo.c
index f24cc64e2e8c..90e322959303 100644
--- a/drivers/i2c/i2c-boardinfo.c
+++ b/drivers/i2c/i2c-boardinfo.c
@@ -10,11 +10,6 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
17 * MA 02110-1301 USA.
18 */ 13 */
19 14
20#include <linux/kernel.h> 15#include <linux/kernel.h>
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 2f90ac6a7f79..f43b4e11647a 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -10,12 +10,7 @@
10 This program is distributed in the hope that it will be useful, 10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of 11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details. 13 GNU General Public License for more details. */
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the Free Software
17 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
18 MA 02110-1301 USA. */
19/* ------------------------------------------------------------------------- */ 14/* ------------------------------------------------------------------------- */
20 15
21/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi>. 16/* With some changes from Kyösti Mälkki <kmalkki@cc.hut.fi>.
@@ -670,6 +665,9 @@ static int i2c_device_remove(struct device *dev)
670 status = driver->remove(client); 665 status = driver->remove(client);
671 } 666 }
672 667
668 if (dev->of_node)
669 irq_dispose_mapping(client->irq);
670
673 dev_pm_domain_detach(&client->dev, true); 671 dev_pm_domain_detach(&client->dev, true);
674 return status; 672 return status;
675} 673}
diff --git a/drivers/i2c/i2c-core.h b/drivers/i2c/i2c-core.h
index 18a8fd21d2c2..17700bfddcf5 100644
--- a/drivers/i2c/i2c-core.h
+++ b/drivers/i2c/i2c-core.h
@@ -10,11 +10,6 @@
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details. 12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
17 * MA 02110-1301 USA.
18 */ 13 */
19 14
20#include <linux/rwsem.h> 15#include <linux/rwsem.h>
diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
index 80b47e8ce030..71c7a3975b62 100644
--- a/drivers/i2c/i2c-dev.c
+++ b/drivers/i2c/i2c-dev.c
@@ -14,11 +14,6 @@
14 but WITHOUT ANY WARRANTY; without even the implied warranty of 14 but WITHOUT ANY WARRANTY; without even the implied warranty of
15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 15 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 GNU General Public License for more details. 16 GNU General Public License for more details.
17
18 You should have received a copy of the GNU General Public License
19 along with this program; if not, write to the Free Software
20 Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
21 MA 02110-1301 USA.
22*/ 17*/
23 18
24/* Note that this is a complete rewrite of Simon Vogl's i2c-dev module. 19/* Note that this is a complete rewrite of Simon Vogl's i2c-dev module.
diff --git a/drivers/i2c/i2c-smbus.c b/drivers/i2c/i2c-smbus.c
index fc99f0d6b4a5..9ebf9cb4ad7a 100644
--- a/drivers/i2c/i2c-smbus.c
+++ b/drivers/i2c/i2c-smbus.c
@@ -13,11 +13,6 @@
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
20 * MA 02110-1301 USA.
21 */ 16 */
22 17
23#include <linux/kernel.h> 18#include <linux/kernel.h>
diff --git a/drivers/i2c/i2c-stub.c b/drivers/i2c/i2c-stub.c
index d241aa295d96..af2a94e1140b 100644
--- a/drivers/i2c/i2c-stub.c
+++ b/drivers/i2c/i2c-stub.c
@@ -13,10 +13,6 @@
13 but WITHOUT ANY WARRANTY; without even the implied warranty of 13 but WITHOUT ANY WARRANTY; without even the implied warranty of
14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 14 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 GNU General Public License for more details. 15 GNU General Public License for more details.
16
17 You should have received a copy of the GNU General Public License
18 along with this program; if not, write to the Free Software
19 Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20*/ 16*/
21 17
22#define DEBUG 1 18#define DEBUG 1
diff --git a/drivers/iio/accel/bmc150-accel.c b/drivers/iio/accel/bmc150-accel.c
index 22c096ce39ad..513bd6d14293 100644
--- a/drivers/iio/accel/bmc150-accel.c
+++ b/drivers/iio/accel/bmc150-accel.c
@@ -44,6 +44,9 @@
44 44
45#define BMC150_ACCEL_REG_INT_STATUS_2 0x0B 45#define BMC150_ACCEL_REG_INT_STATUS_2 0x0B
46#define BMC150_ACCEL_ANY_MOTION_MASK 0x07 46#define BMC150_ACCEL_ANY_MOTION_MASK 0x07
47#define BMC150_ACCEL_ANY_MOTION_BIT_X BIT(0)
48#define BMC150_ACCEL_ANY_MOTION_BIT_Y BIT(1)
49#define BMC150_ACCEL_ANY_MOTION_BIT_Z BIT(2)
47#define BMC150_ACCEL_ANY_MOTION_BIT_SIGN BIT(3) 50#define BMC150_ACCEL_ANY_MOTION_BIT_SIGN BIT(3)
48 51
49#define BMC150_ACCEL_REG_PMU_LPW 0x11 52#define BMC150_ACCEL_REG_PMU_LPW 0x11
@@ -92,9 +95,9 @@
92#define BMC150_ACCEL_SLOPE_THRES_MASK 0xFF 95#define BMC150_ACCEL_SLOPE_THRES_MASK 0xFF
93 96
94/* Slope duration in terms of number of samples */ 97/* Slope duration in terms of number of samples */
95#define BMC150_ACCEL_DEF_SLOPE_DURATION 2 98#define BMC150_ACCEL_DEF_SLOPE_DURATION 1
96/* in terms of multiples of g's/LSB, based on range */ 99/* in terms of multiples of g's/LSB, based on range */
97#define BMC150_ACCEL_DEF_SLOPE_THRESHOLD 5 100#define BMC150_ACCEL_DEF_SLOPE_THRESHOLD 1
98 101
99#define BMC150_ACCEL_REG_XOUT_L 0x02 102#define BMC150_ACCEL_REG_XOUT_L 0x02
100 103
@@ -536,6 +539,9 @@ static int bmc150_accel_set_power_state(struct bmc150_accel_data *data, bool on)
536 if (ret < 0) { 539 if (ret < 0) {
537 dev_err(&data->client->dev, 540 dev_err(&data->client->dev,
538 "Failed: bmc150_accel_set_power_state for %d\n", on); 541 "Failed: bmc150_accel_set_power_state for %d\n", on);
542 if (on)
543 pm_runtime_put_noidle(&data->client->dev);
544
539 return ret; 545 return ret;
540 } 546 }
541 547
@@ -811,6 +817,7 @@ static int bmc150_accel_write_event_config(struct iio_dev *indio_dev,
811 817
812 ret = bmc150_accel_setup_any_motion_interrupt(data, state); 818 ret = bmc150_accel_setup_any_motion_interrupt(data, state);
813 if (ret < 0) { 819 if (ret < 0) {
820 bmc150_accel_set_power_state(data, false);
814 mutex_unlock(&data->mutex); 821 mutex_unlock(&data->mutex);
815 return ret; 822 return ret;
816 } 823 }
@@ -846,7 +853,7 @@ static const struct attribute_group bmc150_accel_attrs_group = {
846 853
847static const struct iio_event_spec bmc150_accel_event = { 854static const struct iio_event_spec bmc150_accel_event = {
848 .type = IIO_EV_TYPE_ROC, 855 .type = IIO_EV_TYPE_ROC,
849 .dir = IIO_EV_DIR_RISING | IIO_EV_DIR_FALLING, 856 .dir = IIO_EV_DIR_EITHER,
850 .mask_separate = BIT(IIO_EV_INFO_VALUE) | 857 .mask_separate = BIT(IIO_EV_INFO_VALUE) |
851 BIT(IIO_EV_INFO_ENABLE) | 858 BIT(IIO_EV_INFO_ENABLE) |
852 BIT(IIO_EV_INFO_PERIOD) 859 BIT(IIO_EV_INFO_PERIOD)
@@ -1054,6 +1061,7 @@ static int bmc150_accel_data_rdy_trigger_set_state(struct iio_trigger *trig,
1054 else 1061 else
1055 ret = bmc150_accel_setup_new_data_interrupt(data, state); 1062 ret = bmc150_accel_setup_new_data_interrupt(data, state);
1056 if (ret < 0) { 1063 if (ret < 0) {
1064 bmc150_accel_set_power_state(data, false);
1057 mutex_unlock(&data->mutex); 1065 mutex_unlock(&data->mutex);
1058 return ret; 1066 return ret;
1059 } 1067 }
@@ -1092,12 +1100,26 @@ static irqreturn_t bmc150_accel_event_handler(int irq, void *private)
1092 else 1100 else
1093 dir = IIO_EV_DIR_RISING; 1101 dir = IIO_EV_DIR_RISING;
1094 1102
1095 if (ret & BMC150_ACCEL_ANY_MOTION_MASK) 1103 if (ret & BMC150_ACCEL_ANY_MOTION_BIT_X)
1104 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
1105 0,
1106 IIO_MOD_X,
1107 IIO_EV_TYPE_ROC,
1108 dir),
1109 data->timestamp);
1110 if (ret & BMC150_ACCEL_ANY_MOTION_BIT_Y)
1096 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL, 1111 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
1097 0, 1112 0,
1098 IIO_MOD_X_OR_Y_OR_Z, 1113 IIO_MOD_Y,
1099 IIO_EV_TYPE_ROC, 1114 IIO_EV_TYPE_ROC,
1100 IIO_EV_DIR_EITHER), 1115 dir),
1116 data->timestamp);
1117 if (ret & BMC150_ACCEL_ANY_MOTION_BIT_Z)
1118 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ACCEL,
1119 0,
1120 IIO_MOD_Z,
1121 IIO_EV_TYPE_ROC,
1122 dir),
1101 data->timestamp); 1123 data->timestamp);
1102ack_intr_status: 1124ack_intr_status:
1103 if (!data->dready_trigger_on) 1125 if (!data->dready_trigger_on)
@@ -1354,10 +1376,14 @@ static int bmc150_accel_runtime_suspend(struct device *dev)
1354{ 1376{
1355 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); 1377 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
1356 struct bmc150_accel_data *data = iio_priv(indio_dev); 1378 struct bmc150_accel_data *data = iio_priv(indio_dev);
1379 int ret;
1357 1380
1358 dev_dbg(&data->client->dev, __func__); 1381 dev_dbg(&data->client->dev, __func__);
1382 ret = bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0);
1383 if (ret < 0)
1384 return -EAGAIN;
1359 1385
1360 return bmc150_accel_set_mode(data, BMC150_ACCEL_SLEEP_MODE_SUSPEND, 0); 1386 return 0;
1361} 1387}
1362 1388
1363static int bmc150_accel_runtime_resume(struct device *dev) 1389static int bmc150_accel_runtime_resume(struct device *dev)
diff --git a/drivers/iio/accel/kxcjk-1013.c b/drivers/iio/accel/kxcjk-1013.c
index 98909a9e284e..320aa72c0349 100644
--- a/drivers/iio/accel/kxcjk-1013.c
+++ b/drivers/iio/accel/kxcjk-1013.c
@@ -269,6 +269,8 @@ static int kxcjk1013_set_range(struct kxcjk1013_data *data, int range_index)
269 return ret; 269 return ret;
270 } 270 }
271 271
272 ret &= ~(KXCJK1013_REG_CTRL1_BIT_GSEL0 |
273 KXCJK1013_REG_CTRL1_BIT_GSEL1);
272 ret |= (KXCJK1013_scale_table[range_index].gsel_0 << 3); 274 ret |= (KXCJK1013_scale_table[range_index].gsel_0 << 3);
273 ret |= (KXCJK1013_scale_table[range_index].gsel_1 << 4); 275 ret |= (KXCJK1013_scale_table[range_index].gsel_1 << 4);
274 276
@@ -894,7 +896,7 @@ static const struct attribute_group kxcjk1013_attrs_group = {
894 896
895static const struct iio_event_spec kxcjk1013_event = { 897static const struct iio_event_spec kxcjk1013_event = {
896 .type = IIO_EV_TYPE_THRESH, 898 .type = IIO_EV_TYPE_THRESH,
897 .dir = IIO_EV_DIR_RISING | IIO_EV_DIR_FALLING, 899 .dir = IIO_EV_DIR_EITHER,
898 .mask_separate = BIT(IIO_EV_INFO_VALUE) | 900 .mask_separate = BIT(IIO_EV_INFO_VALUE) |
899 BIT(IIO_EV_INFO_ENABLE) | 901 BIT(IIO_EV_INFO_ENABLE) |
900 BIT(IIO_EV_INFO_PERIOD) 902 BIT(IIO_EV_INFO_PERIOD)
diff --git a/drivers/iio/adc/men_z188_adc.c b/drivers/iio/adc/men_z188_adc.c
index b58d6302521f..d095efe1ba14 100644
--- a/drivers/iio/adc/men_z188_adc.c
+++ b/drivers/iio/adc/men_z188_adc.c
@@ -152,6 +152,7 @@ static void men_z188_remove(struct mcb_device *dev)
152 152
153static const struct mcb_device_id men_z188_ids[] = { 153static const struct mcb_device_id men_z188_ids[] = {
154 { .device = 0xbc }, 154 { .device = 0xbc },
155 { }
155}; 156};
156MODULE_DEVICE_TABLE(mcb, men_z188_ids); 157MODULE_DEVICE_TABLE(mcb, men_z188_ids);
157 158
diff --git a/drivers/iio/common/st_sensors/st_sensors_buffer.c b/drivers/iio/common/st_sensors/st_sensors_buffer.c
index 1665c8e4b62b..e18bc6782256 100644
--- a/drivers/iio/common/st_sensors/st_sensors_buffer.c
+++ b/drivers/iio/common/st_sensors/st_sensors_buffer.c
@@ -71,7 +71,7 @@ int st_sensors_get_buffer_element(struct iio_dev *indio_dev, u8 *buf)
71 goto st_sensors_free_memory; 71 goto st_sensors_free_memory;
72 } 72 }
73 73
74 for (i = 0; i < n * num_data_channels; i++) { 74 for (i = 0; i < n * byte_for_channel; i++) {
75 if (i < n) 75 if (i < n)
76 buf[i] = rx_array[i]; 76 buf[i] = rx_array[i];
77 else 77 else
diff --git a/drivers/iio/gyro/bmg160.c b/drivers/iio/gyro/bmg160.c
index 1f967e0d688e..d2fa526740ca 100644
--- a/drivers/iio/gyro/bmg160.c
+++ b/drivers/iio/gyro/bmg160.c
@@ -67,6 +67,9 @@
67#define BMG160_REG_INT_EN_0 0x15 67#define BMG160_REG_INT_EN_0 0x15
68#define BMG160_DATA_ENABLE_INT BIT(7) 68#define BMG160_DATA_ENABLE_INT BIT(7)
69 69
70#define BMG160_REG_INT_EN_1 0x16
71#define BMG160_INT1_BIT_OD BIT(1)
72
70#define BMG160_REG_XOUT_L 0x02 73#define BMG160_REG_XOUT_L 0x02
71#define BMG160_AXIS_TO_REG(axis) (BMG160_REG_XOUT_L + (axis * 2)) 74#define BMG160_AXIS_TO_REG(axis) (BMG160_REG_XOUT_L + (axis * 2))
72 75
@@ -82,6 +85,9 @@
82 85
83#define BMG160_REG_INT_STATUS_2 0x0B 86#define BMG160_REG_INT_STATUS_2 0x0B
84#define BMG160_ANY_MOTION_MASK 0x07 87#define BMG160_ANY_MOTION_MASK 0x07
88#define BMG160_ANY_MOTION_BIT_X BIT(0)
89#define BMG160_ANY_MOTION_BIT_Y BIT(1)
90#define BMG160_ANY_MOTION_BIT_Z BIT(2)
85 91
86#define BMG160_REG_TEMP 0x08 92#define BMG160_REG_TEMP 0x08
87#define BMG160_TEMP_CENTER_VAL 23 93#define BMG160_TEMP_CENTER_VAL 23
@@ -222,6 +228,19 @@ static int bmg160_chip_init(struct bmg160_data *data)
222 data->slope_thres = ret; 228 data->slope_thres = ret;
223 229
224 /* Set default interrupt mode */ 230 /* Set default interrupt mode */
231 ret = i2c_smbus_read_byte_data(data->client, BMG160_REG_INT_EN_1);
232 if (ret < 0) {
233 dev_err(&data->client->dev, "Error reading reg_int_en_1\n");
234 return ret;
235 }
236 ret &= ~BMG160_INT1_BIT_OD;
237 ret = i2c_smbus_write_byte_data(data->client,
238 BMG160_REG_INT_EN_1, ret);
239 if (ret < 0) {
240 dev_err(&data->client->dev, "Error writing reg_int_en_1\n");
241 return ret;
242 }
243
225 ret = i2c_smbus_write_byte_data(data->client, 244 ret = i2c_smbus_write_byte_data(data->client,
226 BMG160_REG_INT_RST_LATCH, 245 BMG160_REG_INT_RST_LATCH,
227 BMG160_INT_MODE_LATCH_INT | 246 BMG160_INT_MODE_LATCH_INT |
@@ -250,6 +269,9 @@ static int bmg160_set_power_state(struct bmg160_data *data, bool on)
250 if (ret < 0) { 269 if (ret < 0) {
251 dev_err(&data->client->dev, 270 dev_err(&data->client->dev,
252 "Failed: bmg160_set_power_state for %d\n", on); 271 "Failed: bmg160_set_power_state for %d\n", on);
272 if (on)
273 pm_runtime_put_noidle(&data->client->dev);
274
253 return ret; 275 return ret;
254 } 276 }
255#endif 277#endif
@@ -705,6 +727,7 @@ static int bmg160_write_event_config(struct iio_dev *indio_dev,
705 727
706 ret = bmg160_setup_any_motion_interrupt(data, state); 728 ret = bmg160_setup_any_motion_interrupt(data, state);
707 if (ret < 0) { 729 if (ret < 0) {
730 bmg160_set_power_state(data, false);
708 mutex_unlock(&data->mutex); 731 mutex_unlock(&data->mutex);
709 return ret; 732 return ret;
710 } 733 }
@@ -743,7 +766,7 @@ static const struct attribute_group bmg160_attrs_group = {
743 766
744static const struct iio_event_spec bmg160_event = { 767static const struct iio_event_spec bmg160_event = {
745 .type = IIO_EV_TYPE_ROC, 768 .type = IIO_EV_TYPE_ROC,
746 .dir = IIO_EV_DIR_RISING | IIO_EV_DIR_FALLING, 769 .dir = IIO_EV_DIR_EITHER,
747 .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) | 770 .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
748 BIT(IIO_EV_INFO_ENABLE) 771 BIT(IIO_EV_INFO_ENABLE)
749}; 772};
@@ -871,6 +894,7 @@ static int bmg160_data_rdy_trigger_set_state(struct iio_trigger *trig,
871 else 894 else
872 ret = bmg160_setup_new_data_interrupt(data, state); 895 ret = bmg160_setup_new_data_interrupt(data, state);
873 if (ret < 0) { 896 if (ret < 0) {
897 bmg160_set_power_state(data, false);
874 mutex_unlock(&data->mutex); 898 mutex_unlock(&data->mutex);
875 return ret; 899 return ret;
876 } 900 }
@@ -908,10 +932,24 @@ static irqreturn_t bmg160_event_handler(int irq, void *private)
908 else 932 else
909 dir = IIO_EV_DIR_FALLING; 933 dir = IIO_EV_DIR_FALLING;
910 934
911 if (ret & BMG160_ANY_MOTION_MASK) 935 if (ret & BMG160_ANY_MOTION_BIT_X)
912 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL, 936 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
913 0, 937 0,
914 IIO_MOD_X_OR_Y_OR_Z, 938 IIO_MOD_X,
939 IIO_EV_TYPE_ROC,
940 dir),
941 data->timestamp);
942 if (ret & BMG160_ANY_MOTION_BIT_Y)
943 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
944 0,
945 IIO_MOD_Y,
946 IIO_EV_TYPE_ROC,
947 dir),
948 data->timestamp);
949 if (ret & BMG160_ANY_MOTION_BIT_Z)
950 iio_push_event(indio_dev, IIO_MOD_EVENT_CODE(IIO_ANGL_VEL,
951 0,
952 IIO_MOD_Z,
915 IIO_EV_TYPE_ROC, 953 IIO_EV_TYPE_ROC,
916 dir), 954 dir),
917 data->timestamp); 955 data->timestamp);
@@ -1169,8 +1207,15 @@ static int bmg160_runtime_suspend(struct device *dev)
1169{ 1207{
1170 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev)); 1208 struct iio_dev *indio_dev = i2c_get_clientdata(to_i2c_client(dev));
1171 struct bmg160_data *data = iio_priv(indio_dev); 1209 struct bmg160_data *data = iio_priv(indio_dev);
1210 int ret;
1211
1212 ret = bmg160_set_mode(data, BMG160_MODE_SUSPEND);
1213 if (ret < 0) {
1214 dev_err(&data->client->dev, "set mode failed\n");
1215 return -EAGAIN;
1216 }
1172 1217
1173 return bmg160_set_mode(data, BMG160_MODE_SUSPEND); 1218 return 0;
1174} 1219}
1175 1220
1176static int bmg160_runtime_resume(struct device *dev) 1221static int bmg160_runtime_resume(struct device *dev)
diff --git a/drivers/iio/light/tsl4531.c b/drivers/iio/light/tsl4531.c
index a15006efa137..0763b8632573 100644
--- a/drivers/iio/light/tsl4531.c
+++ b/drivers/iio/light/tsl4531.c
@@ -230,9 +230,12 @@ static int tsl4531_resume(struct device *dev)
230 return i2c_smbus_write_byte_data(to_i2c_client(dev), TSL4531_CONTROL, 230 return i2c_smbus_write_byte_data(to_i2c_client(dev), TSL4531_CONTROL,
231 TSL4531_MODE_NORMAL); 231 TSL4531_MODE_NORMAL);
232} 232}
233#endif
234 233
235static SIMPLE_DEV_PM_OPS(tsl4531_pm_ops, tsl4531_suspend, tsl4531_resume); 234static SIMPLE_DEV_PM_OPS(tsl4531_pm_ops, tsl4531_suspend, tsl4531_resume);
235#define TSL4531_PM_OPS (&tsl4531_pm_ops)
236#else
237#define TSL4531_PM_OPS NULL
238#endif
236 239
237static const struct i2c_device_id tsl4531_id[] = { 240static const struct i2c_device_id tsl4531_id[] = {
238 { "tsl4531", 0 }, 241 { "tsl4531", 0 },
@@ -243,7 +246,7 @@ MODULE_DEVICE_TABLE(i2c, tsl4531_id);
243static struct i2c_driver tsl4531_driver = { 246static struct i2c_driver tsl4531_driver = {
244 .driver = { 247 .driver = {
245 .name = TSL4531_DRV_NAME, 248 .name = TSL4531_DRV_NAME,
246 .pm = &tsl4531_pm_ops, 249 .pm = TSL4531_PM_OPS,
247 .owner = THIS_MODULE, 250 .owner = THIS_MODULE,
248 }, 251 },
249 .probe = tsl4531_probe, 252 .probe = tsl4531_probe,
diff --git a/drivers/iio/proximity/as3935.c b/drivers/iio/proximity/as3935.c
index 5e780ef206f3..8349cc0fdf66 100644
--- a/drivers/iio/proximity/as3935.c
+++ b/drivers/iio/proximity/as3935.c
@@ -330,7 +330,7 @@ static int as3935_probe(struct spi_device *spi)
330 return -EINVAL; 330 return -EINVAL;
331 } 331 }
332 332
333 indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(st)); 333 indio_dev = devm_iio_device_alloc(&spi->dev, sizeof(*st));
334 if (!indio_dev) 334 if (!indio_dev)
335 return -ENOMEM; 335 return -ENOMEM;
336 336
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index bda5994ceb68..8b72cf392b34 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -1173,18 +1173,24 @@ static struct ib_flow *mlx4_ib_create_flow(struct ib_qp *qp,
1173 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i], 1173 err = __mlx4_ib_create_flow(qp, flow_attr, domain, type[i],
1174 &mflow->reg_id[i]); 1174 &mflow->reg_id[i]);
1175 if (err) 1175 if (err)
1176 goto err_free; 1176 goto err_create_flow;
1177 i++; 1177 i++;
1178 } 1178 }
1179 1179
1180 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) { 1180 if (i < ARRAY_SIZE(type) && flow_attr->type == IB_FLOW_ATTR_NORMAL) {
1181 err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]); 1181 err = mlx4_ib_tunnel_steer_add(qp, flow_attr, &mflow->reg_id[i]);
1182 if (err) 1182 if (err)
1183 goto err_free; 1183 goto err_create_flow;
1184 i++;
1184 } 1185 }
1185 1186
1186 return &mflow->ibflow; 1187 return &mflow->ibflow;
1187 1188
1189err_create_flow:
1190 while (i) {
1191 (void)__mlx4_ib_destroy_flow(to_mdev(qp->device)->dev, mflow->reg_id[i]);
1192 i--;
1193 }
1188err_free: 1194err_free:
1189 kfree(mflow); 1195 kfree(mflow);
1190 return ERR_PTR(err); 1196 return ERR_PTR(err);
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 0bea5776bcbc..10641b7816f4 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -115,9 +115,12 @@ isert_conn_setup_qp(struct isert_conn *isert_conn, struct rdma_cm_id *cma_id,
115 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS; 115 attr.cap.max_recv_wr = ISERT_QP_MAX_RECV_DTOS;
116 /* 116 /*
117 * FIXME: Use devattr.max_sge - 2 for max_send_sge as 117 * FIXME: Use devattr.max_sge - 2 for max_send_sge as
118 * work-around for RDMA_READ.. 118 * work-around for RDMA_READs with ConnectX-2.
119 *
120 * Also, still make sure to have at least two SGEs for
121 * outgoing control PDU responses.
119 */ 122 */
120 attr.cap.max_send_sge = device->dev_attr.max_sge - 2; 123 attr.cap.max_send_sge = max(2, device->dev_attr.max_sge - 2);
121 isert_conn->max_sge = attr.cap.max_send_sge; 124 isert_conn->max_sge = attr.cap.max_send_sge;
122 125
123 attr.cap.max_recv_sge = 1; 126 attr.cap.max_recv_sge = 1;
@@ -225,12 +228,16 @@ isert_create_device_ib_res(struct isert_device *device)
225 struct isert_cq_desc *cq_desc; 228 struct isert_cq_desc *cq_desc;
226 struct ib_device_attr *dev_attr; 229 struct ib_device_attr *dev_attr;
227 int ret = 0, i, j; 230 int ret = 0, i, j;
231 int max_rx_cqe, max_tx_cqe;
228 232
229 dev_attr = &device->dev_attr; 233 dev_attr = &device->dev_attr;
230 ret = isert_query_device(ib_dev, dev_attr); 234 ret = isert_query_device(ib_dev, dev_attr);
231 if (ret) 235 if (ret)
232 return ret; 236 return ret;
233 237
238 max_rx_cqe = min(ISER_MAX_RX_CQ_LEN, dev_attr->max_cqe);
239 max_tx_cqe = min(ISER_MAX_TX_CQ_LEN, dev_attr->max_cqe);
240
234 /* asign function handlers */ 241 /* asign function handlers */
235 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS && 242 if (dev_attr->device_cap_flags & IB_DEVICE_MEM_MGT_EXTENSIONS &&
236 dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) { 243 dev_attr->device_cap_flags & IB_DEVICE_SIGNATURE_HANDOVER) {
@@ -272,7 +279,7 @@ isert_create_device_ib_res(struct isert_device *device)
272 isert_cq_rx_callback, 279 isert_cq_rx_callback,
273 isert_cq_event_callback, 280 isert_cq_event_callback,
274 (void *)&cq_desc[i], 281 (void *)&cq_desc[i],
275 ISER_MAX_RX_CQ_LEN, i); 282 max_rx_cqe, i);
276 if (IS_ERR(device->dev_rx_cq[i])) { 283 if (IS_ERR(device->dev_rx_cq[i])) {
277 ret = PTR_ERR(device->dev_rx_cq[i]); 284 ret = PTR_ERR(device->dev_rx_cq[i]);
278 device->dev_rx_cq[i] = NULL; 285 device->dev_rx_cq[i] = NULL;
@@ -284,7 +291,7 @@ isert_create_device_ib_res(struct isert_device *device)
284 isert_cq_tx_callback, 291 isert_cq_tx_callback,
285 isert_cq_event_callback, 292 isert_cq_event_callback,
286 (void *)&cq_desc[i], 293 (void *)&cq_desc[i],
287 ISER_MAX_TX_CQ_LEN, i); 294 max_tx_cqe, i);
288 if (IS_ERR(device->dev_tx_cq[i])) { 295 if (IS_ERR(device->dev_tx_cq[i])) {
289 ret = PTR_ERR(device->dev_tx_cq[i]); 296 ret = PTR_ERR(device->dev_tx_cq[i]);
290 device->dev_tx_cq[i] = NULL; 297 device->dev_tx_cq[i] = NULL;
@@ -803,14 +810,25 @@ wake_up:
803 complete(&isert_conn->conn_wait); 810 complete(&isert_conn->conn_wait);
804} 811}
805 812
806static void 813static int
807isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect) 814isert_disconnected_handler(struct rdma_cm_id *cma_id, bool disconnect)
808{ 815{
809 struct isert_conn *isert_conn = (struct isert_conn *)cma_id->context; 816 struct isert_conn *isert_conn;
817
818 if (!cma_id->qp) {
819 struct isert_np *isert_np = cma_id->context;
820
821 isert_np->np_cm_id = NULL;
822 return -1;
823 }
824
825 isert_conn = (struct isert_conn *)cma_id->context;
810 826
811 isert_conn->disconnect = disconnect; 827 isert_conn->disconnect = disconnect;
812 INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work); 828 INIT_WORK(&isert_conn->conn_logout_work, isert_disconnect_work);
813 schedule_work(&isert_conn->conn_logout_work); 829 schedule_work(&isert_conn->conn_logout_work);
830
831 return 0;
814} 832}
815 833
816static int 834static int
@@ -825,6 +843,9 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
825 switch (event->event) { 843 switch (event->event) {
826 case RDMA_CM_EVENT_CONNECT_REQUEST: 844 case RDMA_CM_EVENT_CONNECT_REQUEST:
827 ret = isert_connect_request(cma_id, event); 845 ret = isert_connect_request(cma_id, event);
846 if (ret)
847 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
848 event->event, ret);
828 break; 849 break;
829 case RDMA_CM_EVENT_ESTABLISHED: 850 case RDMA_CM_EVENT_ESTABLISHED:
830 isert_connected_handler(cma_id); 851 isert_connected_handler(cma_id);
@@ -834,7 +855,7 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
834 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */ 855 case RDMA_CM_EVENT_DEVICE_REMOVAL: /* FALLTHRU */
835 disconnect = true; 856 disconnect = true;
836 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */ 857 case RDMA_CM_EVENT_TIMEWAIT_EXIT: /* FALLTHRU */
837 isert_disconnected_handler(cma_id, disconnect); 858 ret = isert_disconnected_handler(cma_id, disconnect);
838 break; 859 break;
839 case RDMA_CM_EVENT_CONNECT_ERROR: 860 case RDMA_CM_EVENT_CONNECT_ERROR:
840 default: 861 default:
@@ -842,12 +863,6 @@ isert_cma_handler(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
842 break; 863 break;
843 } 864 }
844 865
845 if (ret != 0) {
846 pr_err("isert_cma_handler failed RDMA_CM_EVENT: 0x%08x %d\n",
847 event->event, ret);
848 dump_stack();
849 }
850
851 return ret; 866 return ret;
852} 867}
853 868
@@ -2185,7 +2200,7 @@ isert_put_response(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2185 isert_cmd->tx_desc.num_sge = 2; 2200 isert_cmd->tx_desc.num_sge = 2;
2186 } 2201 }
2187 2202
2188 isert_init_send_wr(isert_conn, isert_cmd, send_wr, true); 2203 isert_init_send_wr(isert_conn, isert_cmd, send_wr, false);
2189 2204
2190 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n"); 2205 pr_debug("Posting SCSI Response IB_WR_SEND >>>>>>>>>>>>>>>>>>>>>>\n");
2191 2206
@@ -2871,7 +2886,7 @@ isert_put_datain(struct iscsi_conn *conn, struct iscsi_cmd *cmd)
2871 &isert_cmd->tx_desc.iscsi_header); 2886 &isert_cmd->tx_desc.iscsi_header);
2872 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc); 2887 isert_init_tx_hdrs(isert_conn, &isert_cmd->tx_desc);
2873 isert_init_send_wr(isert_conn, isert_cmd, 2888 isert_init_send_wr(isert_conn, isert_cmd,
2874 &isert_cmd->tx_desc.send_wr, true); 2889 &isert_cmd->tx_desc.send_wr, false);
2875 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr; 2890 isert_cmd->rdma_wr.s_send_wr.next = &isert_cmd->tx_desc.send_wr;
2876 wr->send_wr_num += 1; 2891 wr->send_wr_num += 1;
2877 } 2892 }
@@ -3140,7 +3155,7 @@ isert_accept_np(struct iscsi_np *np, struct iscsi_conn *conn)
3140 3155
3141accept_wait: 3156accept_wait:
3142 ret = down_interruptible(&isert_np->np_sem); 3157 ret = down_interruptible(&isert_np->np_sem);
3143 if (max_accept > 5) 3158 if (ret || max_accept > 5)
3144 return -ENODEV; 3159 return -ENODEV;
3145 3160
3146 spin_lock_bh(&np->np_thread_lock); 3161 spin_lock_bh(&np->np_thread_lock);
@@ -3190,7 +3205,8 @@ isert_free_np(struct iscsi_np *np)
3190{ 3205{
3191 struct isert_np *isert_np = (struct isert_np *)np->np_context; 3206 struct isert_np *isert_np = (struct isert_np *)np->np_context;
3192 3207
3193 rdma_destroy_id(isert_np->np_cm_id); 3208 if (isert_np->np_cm_id)
3209 rdma_destroy_id(isert_np->np_cm_id);
3194 3210
3195 np->np_context = NULL; 3211 np->np_context = NULL;
3196 kfree(isert_np); 3212 kfree(isert_np);
diff --git a/drivers/infiniband/ulp/srpt/ib_srpt.c b/drivers/infiniband/ulp/srpt/ib_srpt.c
index 7206547c13ce..dc829682701a 100644
--- a/drivers/infiniband/ulp/srpt/ib_srpt.c
+++ b/drivers/infiniband/ulp/srpt/ib_srpt.c
@@ -2092,6 +2092,7 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
2092 if (!qp_init) 2092 if (!qp_init)
2093 goto out; 2093 goto out;
2094 2094
2095retry:
2095 ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch, 2096 ch->cq = ib_create_cq(sdev->device, srpt_completion, NULL, ch,
2096 ch->rq_size + srp_sq_size, 0); 2097 ch->rq_size + srp_sq_size, 0);
2097 if (IS_ERR(ch->cq)) { 2098 if (IS_ERR(ch->cq)) {
@@ -2115,6 +2116,13 @@ static int srpt_create_ch_ib(struct srpt_rdma_ch *ch)
2115 ch->qp = ib_create_qp(sdev->pd, qp_init); 2116 ch->qp = ib_create_qp(sdev->pd, qp_init);
2116 if (IS_ERR(ch->qp)) { 2117 if (IS_ERR(ch->qp)) {
2117 ret = PTR_ERR(ch->qp); 2118 ret = PTR_ERR(ch->qp);
2119 if (ret == -ENOMEM) {
2120 srp_sq_size /= 2;
2121 if (srp_sq_size >= MIN_SRPT_SQ_SIZE) {
2122 ib_destroy_cq(ch->cq);
2123 goto retry;
2124 }
2125 }
2118 printk(KERN_ERR "failed to create_qp ret= %d\n", ret); 2126 printk(KERN_ERR "failed to create_qp ret= %d\n", ret);
2119 goto err_destroy_cq; 2127 goto err_destroy_cq;
2120 } 2128 }
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index bc203485716d..8afa28e4570e 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -421,7 +421,7 @@ static int evdev_open(struct inode *inode, struct file *file)
421 421
422 err_free_client: 422 err_free_client:
423 evdev_detach_client(evdev, client); 423 evdev_detach_client(evdev, client);
424 kfree(client); 424 kvfree(client);
425 return error; 425 return error;
426} 426}
427 427
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 2ed7905a068f..fc55f0d15b70 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -1179,9 +1179,19 @@ static int xpad_probe(struct usb_interface *intf, const struct usb_device_id *id
1179 } 1179 }
1180 1180
1181 ep_irq_in = &intf->cur_altsetting->endpoint[1].desc; 1181 ep_irq_in = &intf->cur_altsetting->endpoint[1].desc;
1182 usb_fill_bulk_urb(xpad->bulk_out, udev, 1182 if (usb_endpoint_is_bulk_out(ep_irq_in)) {
1183 usb_sndbulkpipe(udev, ep_irq_in->bEndpointAddress), 1183 usb_fill_bulk_urb(xpad->bulk_out, udev,
1184 xpad->bdata, XPAD_PKT_LEN, xpad_bulk_out, xpad); 1184 usb_sndbulkpipe(udev,
1185 ep_irq_in->bEndpointAddress),
1186 xpad->bdata, XPAD_PKT_LEN,
1187 xpad_bulk_out, xpad);
1188 } else {
1189 usb_fill_int_urb(xpad->bulk_out, udev,
1190 usb_sndintpipe(udev,
1191 ep_irq_in->bEndpointAddress),
1192 xpad->bdata, XPAD_PKT_LEN,
1193 xpad_bulk_out, xpad, 0);
1194 }
1185 1195
1186 /* 1196 /*
1187 * Submit the int URB immediately rather than waiting for open 1197 * Submit the int URB immediately rather than waiting for open
diff --git a/drivers/input/keyboard/opencores-kbd.c b/drivers/input/keyboard/opencores-kbd.c
index 62abe2c16670..f8502bb29176 100644
--- a/drivers/input/keyboard/opencores-kbd.c
+++ b/drivers/input/keyboard/opencores-kbd.c
@@ -70,7 +70,7 @@ static int opencores_kbd_probe(struct platform_device *pdev)
70 70
71 opencores_kbd->addr = devm_ioremap_resource(&pdev->dev, res); 71 opencores_kbd->addr = devm_ioremap_resource(&pdev->dev, res);
72 if (IS_ERR(opencores_kbd->addr)) 72 if (IS_ERR(opencores_kbd->addr))
73 error = PTR_ERR(opencores_kbd->addr); 73 return PTR_ERR(opencores_kbd->addr);
74 74
75 input->name = pdev->name; 75 input->name = pdev->name;
76 input->phys = "opencores-kbd/input0"; 76 input->phys = "opencores-kbd/input0";
diff --git a/drivers/input/keyboard/stmpe-keypad.c b/drivers/input/keyboard/stmpe-keypad.c
index c6727dda68f2..ef5e67fb567e 100644
--- a/drivers/input/keyboard/stmpe-keypad.c
+++ b/drivers/input/keyboard/stmpe-keypad.c
@@ -86,7 +86,7 @@ static const struct stmpe_keypad_variant stmpe_keypad_variants[] = {
86 .max_cols = 8, 86 .max_cols = 8,
87 .max_rows = 12, 87 .max_rows = 12,
88 .col_gpios = 0x0000ff, /* GPIO 0 - 7*/ 88 .col_gpios = 0x0000ff, /* GPIO 0 - 7*/
89 .row_gpios = 0x1fef00, /* GPIO 8-14, 16-20 */ 89 .row_gpios = 0x1f7f00, /* GPIO 8-14, 16-20 */
90 }, 90 },
91 [STMPE2403] = { 91 [STMPE2403] = {
92 .auto_increment = true, 92 .auto_increment = true,
diff --git a/drivers/input/misc/ims-pcu.c b/drivers/input/misc/ims-pcu.c
index 719410feb84b..afed8e2b2f94 100644
--- a/drivers/input/misc/ims-pcu.c
+++ b/drivers/input/misc/ims-pcu.c
@@ -1381,7 +1381,7 @@ static ssize_t ims_pcu_ofn_reg_addr_store(struct device *dev,
1381 pcu->ofn_reg_addr = value; 1381 pcu->ofn_reg_addr = value;
1382 mutex_unlock(&pcu->cmd_mutex); 1382 mutex_unlock(&pcu->cmd_mutex);
1383 1383
1384 return error ?: count; 1384 return count;
1385} 1385}
1386 1386
1387static DEVICE_ATTR(reg_addr, S_IRUGO | S_IWUSR, 1387static DEVICE_ATTR(reg_addr, S_IRUGO | S_IWUSR,
diff --git a/drivers/input/misc/max77693-haptic.c b/drivers/input/misc/max77693-haptic.c
index 7b1fde93799e..ef6a9d650d69 100644
--- a/drivers/input/misc/max77693-haptic.c
+++ b/drivers/input/misc/max77693-haptic.c
@@ -194,7 +194,7 @@ static int max77693_haptic_play_effect(struct input_dev *dev, void *data,
194 struct ff_effect *effect) 194 struct ff_effect *effect)
195{ 195{
196 struct max77693_haptic *haptic = input_get_drvdata(dev); 196 struct max77693_haptic *haptic = input_get_drvdata(dev);
197 uint64_t period_mag_multi; 197 u64 period_mag_multi;
198 198
199 haptic->magnitude = effect->u.rumble.strong_magnitude; 199 haptic->magnitude = effect->u.rumble.strong_magnitude;
200 if (!haptic->magnitude) 200 if (!haptic->magnitude)
@@ -205,8 +205,7 @@ static int max77693_haptic_play_effect(struct input_dev *dev, void *data,
205 * The formula to convert magnitude to pwm_duty as follows: 205 * The formula to convert magnitude to pwm_duty as follows:
206 * - pwm_duty = (magnitude * pwm_period) / MAX_MAGNITUDE(0xFFFF) 206 * - pwm_duty = (magnitude * pwm_period) / MAX_MAGNITUDE(0xFFFF)
207 */ 207 */
208 period_mag_multi = (int64_t)(haptic->pwm_dev->period * 208 period_mag_multi = (u64)haptic->pwm_dev->period * haptic->magnitude;
209 haptic->magnitude);
210 haptic->pwm_duty = (unsigned int)(period_mag_multi >> 209 haptic->pwm_duty = (unsigned int)(period_mag_multi >>
211 MAX_MAGNITUDE_SHIFT); 210 MAX_MAGNITUDE_SHIFT);
212 211
diff --git a/drivers/input/misc/soc_button_array.c b/drivers/input/misc/soc_button_array.c
index 735604753568..e097f1ab427f 100644
--- a/drivers/input/misc/soc_button_array.c
+++ b/drivers/input/misc/soc_button_array.c
@@ -55,7 +55,7 @@ static int soc_button_lookup_gpio(struct device *dev, int acpi_index)
55 struct gpio_desc *desc; 55 struct gpio_desc *desc;
56 int gpio; 56 int gpio;
57 57
58 desc = gpiod_get_index(dev, KBUILD_MODNAME, acpi_index); 58 desc = gpiod_get_index(dev, KBUILD_MODNAME, acpi_index, GPIOD_ASIS);
59 if (IS_ERR(desc)) 59 if (IS_ERR(desc))
60 return PTR_ERR(desc); 60 return PTR_ERR(desc);
61 61
diff --git a/drivers/input/misc/twl4030-pwrbutton.c b/drivers/input/misc/twl4030-pwrbutton.c
index fb3b63b2f85c..8400a1a34d87 100644
--- a/drivers/input/misc/twl4030-pwrbutton.c
+++ b/drivers/input/misc/twl4030-pwrbutton.c
@@ -85,6 +85,7 @@ static int twl4030_pwrbutton_probe(struct platform_device *pdev)
85 } 85 }
86 86
87 platform_set_drvdata(pdev, pwr); 87 platform_set_drvdata(pdev, pwr);
88 device_init_wakeup(&pdev->dev, true);
88 89
89 return 0; 90 return 0;
90} 91}
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 2b0ae8cc8e51..d125a019383f 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -1156,7 +1156,13 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
1156{ 1156{
1157 struct alps_data *priv = psmouse->private; 1157 struct alps_data *priv = psmouse->private;
1158 1158
1159 if ((psmouse->packet[0] & 0xc8) == 0x08) { /* PS/2 packet */ 1159 /*
1160 * Check if we are dealing with a bare PS/2 packet, presumably from
1161 * a device connected to the external PS/2 port. Because bare PS/2
1162 * protocol does not have enough constant bits to self-synchronize
1163 * properly we only do this if the device is fully synchronized.
1164 */
1165 if (!psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) {
1160 if (psmouse->pktcnt == 3) { 1166 if (psmouse->pktcnt == 3) {
1161 alps_report_bare_ps2_packet(psmouse, psmouse->packet, 1167 alps_report_bare_ps2_packet(psmouse, psmouse->packet,
1162 true); 1168 true);
@@ -1180,12 +1186,27 @@ static psmouse_ret_t alps_process_byte(struct psmouse *psmouse)
1180 } 1186 }
1181 1187
1182 /* Bytes 2 - pktsize should have 0 in the highest bit */ 1188 /* Bytes 2 - pktsize should have 0 in the highest bit */
1183 if ((priv->proto_version < ALPS_PROTO_V5) && 1189 if (priv->proto_version < ALPS_PROTO_V5 &&
1184 psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize && 1190 psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize &&
1185 (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) { 1191 (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) {
1186 psmouse_dbg(psmouse, "refusing packet[%i] = %x\n", 1192 psmouse_dbg(psmouse, "refusing packet[%i] = %x\n",
1187 psmouse->pktcnt - 1, 1193 psmouse->pktcnt - 1,
1188 psmouse->packet[psmouse->pktcnt - 1]); 1194 psmouse->packet[psmouse->pktcnt - 1]);
1195
1196 if (priv->proto_version == ALPS_PROTO_V3 &&
1197 psmouse->pktcnt == psmouse->pktsize) {
1198 /*
1199 * Some Dell boxes, such as Latitude E6440 or E7440
1200 * with closed lid, quite often smash last byte of
1201 * otherwise valid packet with 0xff. Given that the
1202 * next packet is very likely to be valid let's
1203 * report PSMOUSE_FULL_PACKET but not process data,
1204 * rather than reporting PSMOUSE_BAD_DATA and
1205 * filling the logs.
1206 */
1207 return PSMOUSE_FULL_PACKET;
1208 }
1209
1189 return PSMOUSE_BAD_DATA; 1210 return PSMOUSE_BAD_DATA;
1190 } 1211 }
1191 1212
@@ -2389,6 +2410,9 @@ int alps_init(struct psmouse *psmouse)
2389 /* We are having trouble resyncing ALPS touchpads so disable it for now */ 2410 /* We are having trouble resyncing ALPS touchpads so disable it for now */
2390 psmouse->resync_time = 0; 2411 psmouse->resync_time = 0;
2391 2412
2413 /* Allow 2 invalid packets without resetting device */
2414 psmouse->resetafter = psmouse->pktsize * 2;
2415
2392 return 0; 2416 return 0;
2393 2417
2394init_fail: 2418init_fail:
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 06fc6e76ffbe..f2b978026407 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -428,14 +428,6 @@ static void elantech_report_trackpoint(struct psmouse *psmouse,
428 int x, y; 428 int x, y;
429 u32 t; 429 u32 t;
430 430
431 if (dev_WARN_ONCE(&psmouse->ps2dev.serio->dev,
432 !tp_dev,
433 psmouse_fmt("Unexpected trackpoint message\n"))) {
434 if (etd->debug == 1)
435 elantech_packet_dump(psmouse);
436 return;
437 }
438
439 t = get_unaligned_le32(&packet[0]); 431 t = get_unaligned_le32(&packet[0]);
440 432
441 switch (t & ~7U) { 433 switch (t & ~7U) {
@@ -563,6 +555,7 @@ static void elantech_input_sync_v4(struct psmouse *psmouse)
563 } else { 555 } else {
564 input_report_key(dev, BTN_LEFT, packet[0] & 0x01); 556 input_report_key(dev, BTN_LEFT, packet[0] & 0x01);
565 input_report_key(dev, BTN_RIGHT, packet[0] & 0x02); 557 input_report_key(dev, BTN_RIGHT, packet[0] & 0x02);
558 input_report_key(dev, BTN_MIDDLE, packet[0] & 0x04);
566 } 559 }
567 560
568 input_mt_report_pointer_emulation(dev, true); 561 input_mt_report_pointer_emulation(dev, true);
@@ -792,6 +785,9 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
792 unsigned char packet_type = packet[3] & 0x03; 785 unsigned char packet_type = packet[3] & 0x03;
793 bool sanity_check; 786 bool sanity_check;
794 787
788 if (etd->tp_dev && (packet[3] & 0x0f) == 0x06)
789 return PACKET_TRACKPOINT;
790
795 /* 791 /*
796 * Sanity check based on the constant bits of a packet. 792 * Sanity check based on the constant bits of a packet.
797 * The constant bits change depending on the value of 793 * The constant bits change depending on the value of
@@ -877,10 +873,19 @@ static psmouse_ret_t elantech_process_byte(struct psmouse *psmouse)
877 873
878 case 4: 874 case 4:
879 packet_type = elantech_packet_check_v4(psmouse); 875 packet_type = elantech_packet_check_v4(psmouse);
880 if (packet_type == PACKET_UNKNOWN) 876 switch (packet_type) {
877 case PACKET_UNKNOWN:
881 return PSMOUSE_BAD_DATA; 878 return PSMOUSE_BAD_DATA;
882 879
883 elantech_report_absolute_v4(psmouse, packet_type); 880 case PACKET_TRACKPOINT:
881 elantech_report_trackpoint(psmouse, packet_type);
882 break;
883
884 default:
885 elantech_report_absolute_v4(psmouse, packet_type);
886 break;
887 }
888
884 break; 889 break;
885 } 890 }
886 891
@@ -1120,6 +1125,22 @@ static void elantech_set_buttonpad_prop(struct psmouse *psmouse)
1120} 1125}
1121 1126
1122/* 1127/*
1128 * Some hw_version 4 models do have a middle button
1129 */
1130static const struct dmi_system_id elantech_dmi_has_middle_button[] = {
1131#if defined(CONFIG_DMI) && defined(CONFIG_X86)
1132 {
1133 /* Fujitsu H730 has a middle button */
1134 .matches = {
1135 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1136 DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
1137 },
1138 },
1139#endif
1140 { }
1141};
1142
1143/*
1123 * Set the appropriate event bits for the input subsystem 1144 * Set the appropriate event bits for the input subsystem
1124 */ 1145 */
1125static int elantech_set_input_params(struct psmouse *psmouse) 1146static int elantech_set_input_params(struct psmouse *psmouse)
@@ -1138,6 +1159,8 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1138 __clear_bit(EV_REL, dev->evbit); 1159 __clear_bit(EV_REL, dev->evbit);
1139 1160
1140 __set_bit(BTN_LEFT, dev->keybit); 1161 __set_bit(BTN_LEFT, dev->keybit);
1162 if (dmi_check_system(elantech_dmi_has_middle_button))
1163 __set_bit(BTN_MIDDLE, dev->keybit);
1141 __set_bit(BTN_RIGHT, dev->keybit); 1164 __set_bit(BTN_RIGHT, dev->keybit);
1142 1165
1143 __set_bit(BTN_TOUCH, dev->keybit); 1166 __set_bit(BTN_TOUCH, dev->keybit);
@@ -1299,6 +1322,7 @@ ELANTECH_INT_ATTR(reg_25, 0x25);
1299ELANTECH_INT_ATTR(reg_26, 0x26); 1322ELANTECH_INT_ATTR(reg_26, 0x26);
1300ELANTECH_INT_ATTR(debug, 0); 1323ELANTECH_INT_ATTR(debug, 0);
1301ELANTECH_INT_ATTR(paritycheck, 0); 1324ELANTECH_INT_ATTR(paritycheck, 0);
1325ELANTECH_INT_ATTR(crc_enabled, 0);
1302 1326
1303static struct attribute *elantech_attrs[] = { 1327static struct attribute *elantech_attrs[] = {
1304 &psmouse_attr_reg_07.dattr.attr, 1328 &psmouse_attr_reg_07.dattr.attr,
@@ -1313,6 +1337,7 @@ static struct attribute *elantech_attrs[] = {
1313 &psmouse_attr_reg_26.dattr.attr, 1337 &psmouse_attr_reg_26.dattr.attr,
1314 &psmouse_attr_debug.dattr.attr, 1338 &psmouse_attr_debug.dattr.attr,
1315 &psmouse_attr_paritycheck.dattr.attr, 1339 &psmouse_attr_paritycheck.dattr.attr,
1340 &psmouse_attr_crc_enabled.dattr.attr,
1316 NULL 1341 NULL
1317}; 1342};
1318 1343
@@ -1439,6 +1464,22 @@ static int elantech_reconnect(struct psmouse *psmouse)
1439} 1464}
1440 1465
1441/* 1466/*
1467 * Some hw_version 4 models do not work with crc_disabled
1468 */
1469static const struct dmi_system_id elantech_dmi_force_crc_enabled[] = {
1470#if defined(CONFIG_DMI) && defined(CONFIG_X86)
1471 {
1472 /* Fujitsu H730 does not work with crc_enabled == 0 */
1473 .matches = {
1474 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
1475 DMI_MATCH(DMI_PRODUCT_NAME, "CELSIUS H730"),
1476 },
1477 },
1478#endif
1479 { }
1480};
1481
1482/*
1442 * Some hw_version 3 models go into error state when we try to set 1483 * Some hw_version 3 models go into error state when we try to set
1443 * bit 3 and/or bit 1 of r10. 1484 * bit 3 and/or bit 1 of r10.
1444 */ 1485 */
@@ -1513,7 +1554,8 @@ static int elantech_set_properties(struct elantech_data *etd)
1513 * The signatures of v3 and v4 packets change depending on the 1554 * The signatures of v3 and v4 packets change depending on the
1514 * value of this hardware flag. 1555 * value of this hardware flag.
1515 */ 1556 */
1516 etd->crc_enabled = ((etd->fw_version & 0x4000) == 0x4000); 1557 etd->crc_enabled = (etd->fw_version & 0x4000) == 0x4000 ||
1558 dmi_check_system(elantech_dmi_force_crc_enabled);
1517 1559
1518 /* Enable real hardware resolution on hw_version 3 ? */ 1560 /* Enable real hardware resolution on hw_version 3 ? */
1519 etd->set_hw_resolution = !dmi_check_system(no_hw_res_dmi_table); 1561 etd->set_hw_resolution = !dmi_check_system(no_hw_res_dmi_table);
diff --git a/drivers/input/mouse/psmouse-base.c b/drivers/input/mouse/psmouse-base.c
index 26994f6a2b2a..95a3a6e2faf6 100644
--- a/drivers/input/mouse/psmouse-base.c
+++ b/drivers/input/mouse/psmouse-base.c
@@ -1536,16 +1536,9 @@ static int psmouse_reconnect(struct serio *serio)
1536{ 1536{
1537 struct psmouse *psmouse = serio_get_drvdata(serio); 1537 struct psmouse *psmouse = serio_get_drvdata(serio);
1538 struct psmouse *parent = NULL; 1538 struct psmouse *parent = NULL;
1539 struct serio_driver *drv = serio->drv;
1540 unsigned char type; 1539 unsigned char type;
1541 int rc = -1; 1540 int rc = -1;
1542 1541
1543 if (!drv || !psmouse) {
1544 psmouse_dbg(psmouse,
1545 "reconnect request, but serio is disconnected, ignoring...\n");
1546 return -1;
1547 }
1548
1549 mutex_lock(&psmouse_mutex); 1542 mutex_lock(&psmouse_mutex);
1550 1543
1551 if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) { 1544 if (serio->parent && serio->id.type == SERIO_PS_PSTHRU) {
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 9031a0a28ea4..f9472920d986 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -135,14 +135,18 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
135 1232, 5710, 1156, 4696 135 1232, 5710, 1156, 4696
136 }, 136 },
137 { 137 {
138 (const char * const []){"LEN0034", "LEN0036", "LEN2002", 138 (const char * const []){"LEN0034", "LEN0036", "LEN0039",
139 "LEN2004", NULL}, 139 "LEN2002", "LEN2004", NULL},
140 1024, 5112, 2024, 4832 140 1024, 5112, 2024, 4832
141 }, 141 },
142 { 142 {
143 (const char * const []){"LEN2001", NULL}, 143 (const char * const []){"LEN2001", NULL},
144 1024, 5022, 2508, 4832 144 1024, 5022, 2508, 4832
145 }, 145 },
146 {
147 (const char * const []){"LEN2006", NULL},
148 1264, 5675, 1171, 4688
149 },
146 { } 150 { }
147}; 151};
148 152
@@ -163,6 +167,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
163 "LEN0036", /* T440 */ 167 "LEN0036", /* T440 */
164 "LEN0037", 168 "LEN0037",
165 "LEN0038", 169 "LEN0038",
170 "LEN0039", /* T440s */
166 "LEN0041", 171 "LEN0041",
167 "LEN0042", /* Yoga */ 172 "LEN0042", /* Yoga */
168 "LEN0045", 173 "LEN0045",
diff --git a/drivers/input/mouse/vsxxxaa.c b/drivers/input/mouse/vsxxxaa.c
index 38298232124f..abd494411e69 100644
--- a/drivers/input/mouse/vsxxxaa.c
+++ b/drivers/input/mouse/vsxxxaa.c
@@ -128,7 +128,7 @@ static void vsxxxaa_drop_bytes(struct vsxxxaa *mouse, int num)
128 if (num >= mouse->count) { 128 if (num >= mouse->count) {
129 mouse->count = 0; 129 mouse->count = 0;
130 } else { 130 } else {
131 memmove(mouse->buf, mouse->buf + num - 1, BUFLEN - num); 131 memmove(mouse->buf, mouse->buf + num, BUFLEN - num);
132 mouse->count -= num; 132 mouse->count -= num;
133 } 133 }
134} 134}
diff --git a/drivers/input/serio/altera_ps2.c b/drivers/input/serio/altera_ps2.c
index cce69d6b9587..58781c8a8aec 100644
--- a/drivers/input/serio/altera_ps2.c
+++ b/drivers/input/serio/altera_ps2.c
@@ -37,7 +37,7 @@ static irqreturn_t altera_ps2_rxint(int irq, void *dev_id)
37{ 37{
38 struct ps2if *ps2if = dev_id; 38 struct ps2if *ps2if = dev_id;
39 unsigned int status; 39 unsigned int status;
40 int handled = IRQ_NONE; 40 irqreturn_t handled = IRQ_NONE;
41 41
42 while ((status = readl(ps2if->base)) & 0xffff0000) { 42 while ((status = readl(ps2if->base)) & 0xffff0000) {
43 serio_interrupt(ps2if->io, status & 0xff, 0); 43 serio_interrupt(ps2if->io, status & 0xff, 0);
@@ -74,7 +74,7 @@ static void altera_ps2_close(struct serio *io)
74{ 74{
75 struct ps2if *ps2if = io->port_data; 75 struct ps2if *ps2if = io->port_data;
76 76
77 writel(0, ps2if->base); /* disable rx irq */ 77 writel(0, ps2if->base + 4); /* disable rx irq */
78} 78}
79 79
80/* 80/*
diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h
index a0bcbb64d06d..faeeb1372462 100644
--- a/drivers/input/serio/i8042-x86ia64io.h
+++ b/drivers/input/serio/i8042-x86ia64io.h
@@ -207,17 +207,282 @@ static const struct dmi_system_id __initconst i8042_dmi_noloop_table[] = {
207}; 207};
208 208
209/* 209/*
210 * Some laptops do implement active multiplexing mode correctly; 210 * Some Fujitsu notebooks are having trouble with touchpads if
211 * unfortunately they are in minority. 211 * active multiplexing mode is activated. Luckily they don't have
212 * external PS/2 ports so we can safely disable it.
213 * ... apparently some Toshibas don't like MUX mode either and
214 * die horrible death on reboot.
212 */ 215 */
213static const struct dmi_system_id __initconst i8042_dmi_mux_table[] = { 216static const struct dmi_system_id __initconst i8042_dmi_nomux_table[] = {
217 {
218 /* Fujitsu Lifebook P7010/P7010D */
219 .matches = {
220 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
221 DMI_MATCH(DMI_PRODUCT_NAME, "P7010"),
222 },
223 },
224 {
225 /* Fujitsu Lifebook P7010 */
226 .matches = {
227 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
228 DMI_MATCH(DMI_PRODUCT_NAME, "0000000000"),
229 },
230 },
231 {
232 /* Fujitsu Lifebook P5020D */
233 .matches = {
234 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
235 DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook P Series"),
236 },
237 },
238 {
239 /* Fujitsu Lifebook S2000 */
240 .matches = {
241 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
242 DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S Series"),
243 },
244 },
245 {
246 /* Fujitsu Lifebook S6230 */
247 .matches = {
248 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
249 DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook S6230"),
250 },
251 },
252 {
253 /* Fujitsu T70H */
254 .matches = {
255 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
256 DMI_MATCH(DMI_PRODUCT_NAME, "FMVLT70H"),
257 },
258 },
259 {
260 /* Fujitsu-Siemens Lifebook T3010 */
261 .matches = {
262 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
263 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK T3010"),
264 },
265 },
266 {
267 /* Fujitsu-Siemens Lifebook E4010 */
268 .matches = {
269 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
270 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK E4010"),
271 },
272 },
273 {
274 /* Fujitsu-Siemens Amilo Pro 2010 */
275 .matches = {
276 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
277 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Pro V2010"),
278 },
279 },
280 {
281 /* Fujitsu-Siemens Amilo Pro 2030 */
282 .matches = {
283 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"),
284 DMI_MATCH(DMI_PRODUCT_NAME, "AMILO PRO V2030"),
285 },
286 },
287 {
288 /*
289 * No data is coming from the touchscreen unless KBC
290 * is in legacy mode.
291 */
292 /* Panasonic CF-29 */
293 .matches = {
294 DMI_MATCH(DMI_SYS_VENDOR, "Matsushita"),
295 DMI_MATCH(DMI_PRODUCT_NAME, "CF-29"),
296 },
297 },
298 {
299 /*
300 * HP Pavilion DV4017EA -
301 * errors on MUX ports are reported without raising AUXDATA
302 * causing "spurious NAK" messages.
303 */
304 .matches = {
305 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
306 DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EA032EA#ABF)"),
307 },
308 },
309 {
310 /*
311 * HP Pavilion ZT1000 -
312 * like DV4017EA does not raise AUXERR for errors on MUX ports.
313 */
314 .matches = {
315 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
316 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion Notebook PC"),
317 DMI_MATCH(DMI_PRODUCT_VERSION, "HP Pavilion Notebook ZT1000"),
318 },
319 },
320 {
321 /*
322 * HP Pavilion DV4270ca -
323 * like DV4017EA does not raise AUXERR for errors on MUX ports.
324 */
325 .matches = {
326 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
327 DMI_MATCH(DMI_PRODUCT_NAME, "Pavilion dv4000 (EH476UA#ABL)"),
328 },
329 },
330 {
331 .matches = {
332 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
333 DMI_MATCH(DMI_PRODUCT_NAME, "Satellite P10"),
334 },
335 },
336 {
337 .matches = {
338 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
339 DMI_MATCH(DMI_PRODUCT_NAME, "EQUIUM A110"),
340 },
341 },
342 {
343 .matches = {
344 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
345 DMI_MATCH(DMI_PRODUCT_NAME, "SATELLITE C850D"),
346 },
347 },
348 {
349 .matches = {
350 DMI_MATCH(DMI_SYS_VENDOR, "ALIENWARE"),
351 DMI_MATCH(DMI_PRODUCT_NAME, "Sentia"),
352 },
353 },
354 {
355 /* Sharp Actius MM20 */
356 .matches = {
357 DMI_MATCH(DMI_SYS_VENDOR, "SHARP"),
358 DMI_MATCH(DMI_PRODUCT_NAME, "PC-MM20 Series"),
359 },
360 },
361 {
362 /* Sony Vaio FS-115b */
363 .matches = {
364 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
365 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FS115B"),
366 },
367 },
368 {
369 /*
370 * Sony Vaio FZ-240E -
371 * reset and GET ID commands issued via KBD port are
372 * sometimes being delivered to AUX3.
373 */
374 .matches = {
375 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
376 DMI_MATCH(DMI_PRODUCT_NAME, "VGN-FZ240E"),
377 },
378 },
214 { 379 {
215 /* 380 /*
216 * Panasonic CF-18 needs to be in MUX mode since the 381 * Most (all?) VAIOs do not have external PS/2 ports nor
217 * touchscreen is on serio3 and it also has touchpad. 382 * they implement active multiplexing properly, and
383 * MUX discovery usually messes up keyboard/touchpad.
218 */ 384 */
219 .matches = { 385 .matches = {
220 DMI_MATCH(DMI_PRODUCT_NAME, "CF-18"), 386 DMI_MATCH(DMI_SYS_VENDOR, "Sony Corporation"),
387 DMI_MATCH(DMI_BOARD_NAME, "VAIO"),
388 },
389 },
390 {
391 /* Amoi M636/A737 */
392 .matches = {
393 DMI_MATCH(DMI_SYS_VENDOR, "Amoi Electronics CO.,LTD."),
394 DMI_MATCH(DMI_PRODUCT_NAME, "M636/A737 platform"),
395 },
396 },
397 {
398 /* Lenovo 3000 n100 */
399 .matches = {
400 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
401 DMI_MATCH(DMI_PRODUCT_NAME, "076804U"),
402 },
403 },
404 {
405 .matches = {
406 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
407 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"),
408 },
409 },
410 {
411 /* Acer Aspire 5710 */
412 .matches = {
413 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
414 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5710"),
415 },
416 },
417 {
418 /* Gericom Bellagio */
419 .matches = {
420 DMI_MATCH(DMI_SYS_VENDOR, "Gericom"),
421 DMI_MATCH(DMI_PRODUCT_NAME, "N34AS6"),
422 },
423 },
424 {
425 /* IBM 2656 */
426 .matches = {
427 DMI_MATCH(DMI_SYS_VENDOR, "IBM"),
428 DMI_MATCH(DMI_PRODUCT_NAME, "2656"),
429 },
430 },
431 {
432 /* Dell XPS M1530 */
433 .matches = {
434 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
435 DMI_MATCH(DMI_PRODUCT_NAME, "XPS M1530"),
436 },
437 },
438 {
439 /* Compal HEL80I */
440 .matches = {
441 DMI_MATCH(DMI_SYS_VENDOR, "COMPAL"),
442 DMI_MATCH(DMI_PRODUCT_NAME, "HEL80I"),
443 },
444 },
445 {
446 /* Dell Vostro 1510 */
447 .matches = {
448 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
449 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro1510"),
450 },
451 },
452 {
453 /* Acer Aspire 5536 */
454 .matches = {
455 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
456 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5536"),
457 DMI_MATCH(DMI_PRODUCT_VERSION, "0100"),
458 },
459 },
460 {
461 /* Dell Vostro V13 */
462 .matches = {
463 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
464 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V13"),
465 },
466 },
467 {
468 /* Newer HP Pavilion dv4 models */
469 .matches = {
470 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
471 DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion dv4 Notebook PC"),
472 },
473 },
474 {
475 /* Asus X450LCP */
476 .matches = {
477 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
478 DMI_MATCH(DMI_PRODUCT_NAME, "X450LCP"),
479 },
480 },
481 {
482 /* Avatar AVIU-145A6 */
483 .matches = {
484 DMI_MATCH(DMI_SYS_VENDOR, "Intel"),
485 DMI_MATCH(DMI_PRODUCT_NAME, "IC4I"),
221 }, 486 },
222 }, 487 },
223 { } 488 { }
@@ -364,6 +629,22 @@ static const struct dmi_system_id __initconst i8042_dmi_notimeout_table[] = {
364 }, 629 },
365 }, 630 },
366 { 631 {
632 /* Fujitsu A544 laptop */
633 /* https://bugzilla.redhat.com/show_bug.cgi?id=1111138 */
634 .matches = {
635 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
636 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK A544"),
637 },
638 },
639 {
640 /* Fujitsu AH544 laptop */
641 /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
642 .matches = {
643 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
644 DMI_MATCH(DMI_PRODUCT_NAME, "LIFEBOOK AH544"),
645 },
646 },
647 {
367 /* Fujitsu U574 laptop */ 648 /* Fujitsu U574 laptop */
368 /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */ 649 /* https://bugzilla.kernel.org/show_bug.cgi?id=69731 */
369 .matches = { 650 .matches = {
@@ -740,8 +1021,8 @@ static int __init i8042_platform_init(void)
740 if (dmi_check_system(i8042_dmi_noloop_table)) 1021 if (dmi_check_system(i8042_dmi_noloop_table))
741 i8042_noloop = true; 1022 i8042_noloop = true;
742 1023
743 if (dmi_check_system(i8042_dmi_mux_table)) 1024 if (dmi_check_system(i8042_dmi_nomux_table))
744 i8042_nomux = false; 1025 i8042_nomux = true;
745 1026
746 if (dmi_check_system(i8042_dmi_notimeout_table)) 1027 if (dmi_check_system(i8042_dmi_notimeout_table))
747 i8042_notimeout = true; 1028 i8042_notimeout = true;
diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c
index 9a97c2b10926..f5a98af3b325 100644
--- a/drivers/input/serio/i8042.c
+++ b/drivers/input/serio/i8042.c
@@ -39,7 +39,7 @@ static bool i8042_noaux;
39module_param_named(noaux, i8042_noaux, bool, 0); 39module_param_named(noaux, i8042_noaux, bool, 0);
40MODULE_PARM_DESC(noaux, "Do not probe or use AUX (mouse) port."); 40MODULE_PARM_DESC(noaux, "Do not probe or use AUX (mouse) port.");
41 41
42static bool i8042_nomux = true; 42static bool i8042_nomux;
43module_param_named(nomux, i8042_nomux, bool, 0); 43module_param_named(nomux, i8042_nomux, bool, 0);
44MODULE_PARM_DESC(nomux, "Do not check whether an active multiplexing controller is present."); 44MODULE_PARM_DESC(nomux, "Do not check whether an active multiplexing controller is present.");
45 45
diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c
index d0ef91fc87d1..b1ae77995968 100644
--- a/drivers/input/touchscreen/wm97xx-core.c
+++ b/drivers/input/touchscreen/wm97xx-core.c
@@ -70,11 +70,11 @@
70 * Documentation/input/input-programming.txt for more details. 70 * Documentation/input/input-programming.txt for more details.
71 */ 71 */
72 72
73static int abs_x[3] = {350, 3900, 5}; 73static int abs_x[3] = {150, 4000, 5};
74module_param_array(abs_x, int, NULL, 0); 74module_param_array(abs_x, int, NULL, 0);
75MODULE_PARM_DESC(abs_x, "Touchscreen absolute X min, max, fuzz"); 75MODULE_PARM_DESC(abs_x, "Touchscreen absolute X min, max, fuzz");
76 76
77static int abs_y[3] = {320, 3750, 40}; 77static int abs_y[3] = {200, 4000, 40};
78module_param_array(abs_y, int, NULL, 0); 78module_param_array(abs_y, int, NULL, 0);
79MODULE_PARM_DESC(abs_y, "Touchscreen absolute Y min, max, fuzz"); 79MODULE_PARM_DESC(abs_y, "Touchscreen absolute Y min, max, fuzz");
80 80
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index 3e238cd049e6..6a2e168c3ab0 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -43,6 +43,7 @@
43#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34) 43#define ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS (0x34)
44#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4) 44#define ARMADA_370_XP_INT_SOURCE_CTL(irq) (0x100 + irq*4)
45#define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF 45#define ARMADA_370_XP_INT_SOURCE_CPU_MASK 0xF
46#define ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid) ((BIT(0) | BIT(8)) << cpuid)
46 47
47#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44) 48#define ARMADA_370_XP_CPU_INTACK_OFFS (0x44)
48#define ARMADA_375_PPI_CAUSE (0x10) 49#define ARMADA_375_PPI_CAUSE (0x10)
@@ -406,19 +407,29 @@ static void armada_370_xp_mpic_handle_cascade_irq(unsigned int irq,
406 struct irq_desc *desc) 407 struct irq_desc *desc)
407{ 408{
408 struct irq_chip *chip = irq_get_chip(irq); 409 struct irq_chip *chip = irq_get_chip(irq);
409 unsigned long irqmap, irqn; 410 unsigned long irqmap, irqn, irqsrc, cpuid;
410 unsigned int cascade_irq; 411 unsigned int cascade_irq;
411 412
412 chained_irq_enter(chip, desc); 413 chained_irq_enter(chip, desc);
413 414
414 irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE); 415 irqmap = readl_relaxed(per_cpu_int_base + ARMADA_375_PPI_CAUSE);
415 416 cpuid = cpu_logical_map(smp_processor_id());
416 if (irqmap & BIT(0)) {
417 armada_370_xp_handle_msi_irq(NULL, true);
418 irqmap &= ~BIT(0);
419 }
420 417
421 for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) { 418 for_each_set_bit(irqn, &irqmap, BITS_PER_LONG) {
419 irqsrc = readl_relaxed(main_int_base +
420 ARMADA_370_XP_INT_SOURCE_CTL(irqn));
421
422 /* Check if the interrupt is not masked on current CPU.
423 * Test IRQ (0-1) and FIQ (8-9) mask bits.
424 */
425 if (!(irqsrc & ARMADA_370_XP_INT_IRQ_FIQ_MASK(cpuid)))
426 continue;
427
428 if (irqn == 1) {
429 armada_370_xp_handle_msi_irq(NULL, true);
430 continue;
431 }
432
422 cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn); 433 cascade_irq = irq_find_mapping(armada_370_xp_mpic_domain, irqn);
423 generic_handle_irq(cascade_irq); 434 generic_handle_irq(cascade_irq);
424 } 435 }
diff --git a/drivers/irqchip/irq-atmel-aic-common.c b/drivers/irqchip/irq-atmel-aic-common.c
index 6ae3cdee0681..cc4f9d80122e 100644
--- a/drivers/irqchip/irq-atmel-aic-common.c
+++ b/drivers/irqchip/irq-atmel-aic-common.c
@@ -217,8 +217,9 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
217 } 217 }
218 218
219 ret = irq_alloc_domain_generic_chips(domain, 32, 1, name, 219 ret = irq_alloc_domain_generic_chips(domain, 32, 1, name,
220 handle_level_irq, 0, 0, 220 handle_fasteoi_irq,
221 IRQCHIP_SKIP_SET_WAKE); 221 IRQ_NOREQUEST | IRQ_NOPROBE |
222 IRQ_NOAUTOEN, 0, 0);
222 if (ret) 223 if (ret)
223 goto err_domain_remove; 224 goto err_domain_remove;
224 225
@@ -230,7 +231,6 @@ struct irq_domain *__init aic_common_of_init(struct device_node *node,
230 gc->unused = 0; 231 gc->unused = 0;
231 gc->wake_enabled = ~0; 232 gc->wake_enabled = ~0;
232 gc->chip_types[0].type = IRQ_TYPE_SENSE_MASK; 233 gc->chip_types[0].type = IRQ_TYPE_SENSE_MASK;
233 gc->chip_types[0].handler = handle_fasteoi_irq;
234 gc->chip_types[0].chip.irq_eoi = irq_gc_eoi; 234 gc->chip_types[0].chip.irq_eoi = irq_gc_eoi;
235 gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake; 235 gc->chip_types[0].chip.irq_set_wake = irq_gc_set_wake;
236 gc->chip_types[0].chip.irq_shutdown = aic_common_shutdown; 236 gc->chip_types[0].chip.irq_shutdown = aic_common_shutdown;
diff --git a/drivers/irqchip/irq-bcm7120-l2.c b/drivers/irqchip/irq-bcm7120-l2.c
index b9f4fb808e49..5fb38a2ac226 100644
--- a/drivers/irqchip/irq-bcm7120-l2.c
+++ b/drivers/irqchip/irq-bcm7120-l2.c
@@ -101,9 +101,9 @@ static int bcm7120_l2_intc_init_one(struct device_node *dn,
101 int parent_irq; 101 int parent_irq;
102 102
103 parent_irq = irq_of_parse_and_map(dn, irq); 103 parent_irq = irq_of_parse_and_map(dn, irq);
104 if (parent_irq < 0) { 104 if (!parent_irq) {
105 pr_err("failed to map interrupt %d\n", irq); 105 pr_err("failed to map interrupt %d\n", irq);
106 return parent_irq; 106 return -EINVAL;
107 } 107 }
108 108
109 data->irq_map_mask |= be32_to_cpup(map_mask + irq); 109 data->irq_map_mask |= be32_to_cpup(map_mask + irq);
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index c15c840987d2..14691a4cb84c 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -135,9 +135,9 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np,
135 __raw_writel(0xffffffff, data->base + CPU_CLEAR); 135 __raw_writel(0xffffffff, data->base + CPU_CLEAR);
136 136
137 data->parent_irq = irq_of_parse_and_map(np, 0); 137 data->parent_irq = irq_of_parse_and_map(np, 0);
138 if (data->parent_irq < 0) { 138 if (!data->parent_irq) {
139 pr_err("failed to find parent interrupt\n"); 139 pr_err("failed to find parent interrupt\n");
140 ret = data->parent_irq; 140 ret = -EINVAL;
141 goto out_unmap; 141 goto out_unmap;
142 } 142 }
143 143
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index aa29198fca3e..7440c58b8e6f 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -9,26 +9,21 @@
9 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
10 */ 10 */
11 11
12#include <linux/module.h> 12#include <linux/ctype.h>
13#include <linux/kernel.h> 13#include <linux/device.h>
14#include <linux/err.h>
14#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/kernel.h>
17#include <linux/leds.h>
15#include <linux/list.h> 18#include <linux/list.h>
19#include <linux/module.h>
20#include <linux/slab.h>
16#include <linux/spinlock.h> 21#include <linux/spinlock.h>
17#include <linux/device.h>
18#include <linux/timer.h> 22#include <linux/timer.h>
19#include <linux/err.h>
20#include <linux/ctype.h>
21#include <linux/leds.h>
22#include "leds.h" 23#include "leds.h"
23 24
24static struct class *leds_class; 25static struct class *leds_class;
25 26
26static void led_update_brightness(struct led_classdev *led_cdev)
27{
28 if (led_cdev->brightness_get)
29 led_cdev->brightness = led_cdev->brightness_get(led_cdev);
30}
31
32static ssize_t brightness_show(struct device *dev, 27static ssize_t brightness_show(struct device *dev,
33 struct device_attribute *attr, char *buf) 28 struct device_attribute *attr, char *buf)
34{ 29{
@@ -59,14 +54,14 @@ static ssize_t brightness_store(struct device *dev,
59} 54}
60static DEVICE_ATTR_RW(brightness); 55static DEVICE_ATTR_RW(brightness);
61 56
62static ssize_t led_max_brightness_show(struct device *dev, 57static ssize_t max_brightness_show(struct device *dev,
63 struct device_attribute *attr, char *buf) 58 struct device_attribute *attr, char *buf)
64{ 59{
65 struct led_classdev *led_cdev = dev_get_drvdata(dev); 60 struct led_classdev *led_cdev = dev_get_drvdata(dev);
66 61
67 return sprintf(buf, "%u\n", led_cdev->max_brightness); 62 return sprintf(buf, "%u\n", led_cdev->max_brightness);
68} 63}
69static DEVICE_ATTR(max_brightness, 0444, led_max_brightness_show, NULL); 64static DEVICE_ATTR_RO(max_brightness);
70 65
71#ifdef CONFIG_LEDS_TRIGGERS 66#ifdef CONFIG_LEDS_TRIGGERS
72static DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store); 67static DEVICE_ATTR(trigger, 0644, led_trigger_show, led_trigger_store);
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index 71b40d3bf776..aaa8eba9099f 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -12,10 +12,11 @@
12 */ 12 */
13 13
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/leds.h>
15#include <linux/list.h> 16#include <linux/list.h>
16#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/mutex.h>
17#include <linux/rwsem.h> 19#include <linux/rwsem.h>
18#include <linux/leds.h>
19#include "leds.h" 20#include "leds.h"
20 21
21DECLARE_RWSEM(leds_list_lock); 22DECLARE_RWSEM(leds_list_lock);
@@ -126,3 +127,19 @@ void led_set_brightness(struct led_classdev *led_cdev,
126 __led_set_brightness(led_cdev, brightness); 127 __led_set_brightness(led_cdev, brightness);
127} 128}
128EXPORT_SYMBOL(led_set_brightness); 129EXPORT_SYMBOL(led_set_brightness);
130
131int led_update_brightness(struct led_classdev *led_cdev)
132{
133 int ret = 0;
134
135 if (led_cdev->brightness_get) {
136 ret = led_cdev->brightness_get(led_cdev);
137 if (ret >= 0) {
138 led_cdev->brightness = ret;
139 return 0;
140 }
141 }
142
143 return ret;
144}
145EXPORT_SYMBOL(led_update_brightness);
diff --git a/drivers/leds/leds-gpio-register.c b/drivers/leds/leds-gpio-register.c
index 1c4ed5510f35..75717ba68ae0 100644
--- a/drivers/leds/leds-gpio-register.c
+++ b/drivers/leds/leds-gpio-register.c
@@ -7,9 +7,9 @@
7 * Free Software Foundation. 7 * Free Software Foundation.
8 */ 8 */
9#include <linux/err.h> 9#include <linux/err.h>
10#include <linux/leds.h>
10#include <linux/platform_device.h> 11#include <linux/platform_device.h>
11#include <linux/slab.h> 12#include <linux/slab.h>
12#include <linux/leds.h>
13 13
14/** 14/**
15 * gpio_led_register_device - register a gpio-led device 15 * gpio_led_register_device - register a gpio-led device
@@ -28,6 +28,9 @@ struct platform_device *__init gpio_led_register_device(
28 struct platform_device *ret; 28 struct platform_device *ret;
29 struct gpio_led_platform_data _pdata = *pdata; 29 struct gpio_led_platform_data _pdata = *pdata;
30 30
31 if (!pdata->num_leds)
32 return ERR_PTR(-EINVAL);
33
31 _pdata.leds = kmemdup(pdata->leds, 34 _pdata.leds = kmemdup(pdata->leds,
32 pdata->num_leds * sizeof(*pdata->leds), GFP_KERNEL); 35 pdata->num_leds * sizeof(*pdata->leds), GFP_KERNEL);
33 if (!_pdata.leds) 36 if (!_pdata.leds)
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 57ff20fecf57..b4518c8751c8 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -10,17 +10,17 @@
10 * published by the Free Software Foundation. 10 * published by the Free Software Foundation.
11 * 11 *
12 */ 12 */
13#include <linux/kernel.h> 13#include <linux/err.h>
14#include <linux/platform_device.h>
15#include <linux/gpio.h> 14#include <linux/gpio.h>
15#include <linux/kernel.h>
16#include <linux/leds.h> 16#include <linux/leds.h>
17#include <linux/module.h>
17#include <linux/of.h> 18#include <linux/of.h>
18#include <linux/of_platform.h>
19#include <linux/of_gpio.h> 19#include <linux/of_gpio.h>
20#include <linux/of_platform.h>
21#include <linux/platform_device.h>
20#include <linux/slab.h> 22#include <linux/slab.h>
21#include <linux/workqueue.h> 23#include <linux/workqueue.h>
22#include <linux/module.h>
23#include <linux/err.h>
24 24
25struct gpio_led_data { 25struct gpio_led_data {
26 struct led_classdev cdev; 26 struct led_classdev cdev;
@@ -36,7 +36,7 @@ struct gpio_led_data {
36 36
37static void gpio_led_work(struct work_struct *work) 37static void gpio_led_work(struct work_struct *work)
38{ 38{
39 struct gpio_led_data *led_dat = 39 struct gpio_led_data *led_dat =
40 container_of(work, struct gpio_led_data, work); 40 container_of(work, struct gpio_led_data, work);
41 41
42 if (led_dat->blinking) { 42 if (led_dat->blinking) {
@@ -235,14 +235,12 @@ static struct gpio_leds_priv *gpio_leds_create_of(struct platform_device *pdev)
235} 235}
236#endif /* CONFIG_OF_GPIO */ 236#endif /* CONFIG_OF_GPIO */
237 237
238
239static int gpio_led_probe(struct platform_device *pdev) 238static int gpio_led_probe(struct platform_device *pdev)
240{ 239{
241 struct gpio_led_platform_data *pdata = dev_get_platdata(&pdev->dev); 240 struct gpio_led_platform_data *pdata = dev_get_platdata(&pdev->dev);
242 struct gpio_leds_priv *priv; 241 struct gpio_leds_priv *priv;
243 int i, ret = 0; 242 int i, ret = 0;
244 243
245
246 if (pdata && pdata->num_leds) { 244 if (pdata && pdata->num_leds) {
247 priv = devm_kzalloc(&pdev->dev, 245 priv = devm_kzalloc(&pdev->dev,
248 sizeof_gpio_leds_priv(pdata->num_leds), 246 sizeof_gpio_leds_priv(pdata->num_leds),
diff --git a/drivers/leds/leds-lp3944.c b/drivers/leds/leds-lp3944.c
index 8e1abdcd4c9d..53144fb96167 100644
--- a/drivers/leds/leds-lp3944.c
+++ b/drivers/leds/leds-lp3944.c
@@ -335,7 +335,8 @@ static int lp3944_configure(struct i2c_client *client,
335 } 335 }
336 336
337 /* to expose the default value to userspace */ 337 /* to expose the default value to userspace */
338 led->ldev.brightness = led->status; 338 led->ldev.brightness =
339 (enum led_brightness) led->status;
339 340
340 /* Set the default led status */ 341 /* Set the default led status */
341 err = lp3944_led_set(led, led->status); 342 err = lp3944_led_set(led, led->status);
diff --git a/drivers/leds/trigger/ledtrig-gpio.c b/drivers/leds/trigger/ledtrig-gpio.c
index 35812e3a37f2..c86c41826476 100644
--- a/drivers/leds/trigger/ledtrig-gpio.c
+++ b/drivers/leds/trigger/ledtrig-gpio.c
@@ -48,7 +48,7 @@ static void gpio_trig_work(struct work_struct *work)
48 if (!gpio_data->gpio) 48 if (!gpio_data->gpio)
49 return; 49 return;
50 50
51 tmp = gpio_get_value(gpio_data->gpio); 51 tmp = gpio_get_value_cansleep(gpio_data->gpio);
52 if (gpio_data->inverted) 52 if (gpio_data->inverted)
53 tmp = !tmp; 53 tmp = !tmp;
54 54
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 6d184dbcaca8..94ed7cefb14d 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -1,3 +1,7 @@
1# Generic MAILBOX API
2
3obj-$(CONFIG_MAILBOX) += mailbox.o
4
1obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o 5obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
2 6
3obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o 7obj-$(CONFIG_OMAP2PLUS_MBOX) += omap-mailbox.o
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
new file mode 100644
index 000000000000..afcb430508ec
--- /dev/null
+++ b/drivers/mailbox/mailbox.c
@@ -0,0 +1,465 @@
1/*
2 * Mailbox: Common code for Mailbox controllers and users
3 *
4 * Copyright (C) 2013-2014 Linaro Ltd.
5 * Author: Jassi Brar <jassisinghbrar@gmail.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/interrupt.h>
13#include <linux/spinlock.h>
14#include <linux/mutex.h>
15#include <linux/delay.h>
16#include <linux/slab.h>
17#include <linux/err.h>
18#include <linux/module.h>
19#include <linux/device.h>
20#include <linux/bitops.h>
21#include <linux/mailbox_client.h>
22#include <linux/mailbox_controller.h>
23
24#define TXDONE_BY_IRQ BIT(0) /* controller has remote RTR irq */
25#define TXDONE_BY_POLL BIT(1) /* controller can read status of last TX */
26#define TXDONE_BY_ACK BIT(2) /* S/W ACK recevied by Client ticks the TX */
27
28static LIST_HEAD(mbox_cons);
29static DEFINE_MUTEX(con_mutex);
30
31static int add_to_rbuf(struct mbox_chan *chan, void *mssg)
32{
33 int idx;
34 unsigned long flags;
35
36 spin_lock_irqsave(&chan->lock, flags);
37
38 /* See if there is any space left */
39 if (chan->msg_count == MBOX_TX_QUEUE_LEN) {
40 spin_unlock_irqrestore(&chan->lock, flags);
41 return -ENOBUFS;
42 }
43
44 idx = chan->msg_free;
45 chan->msg_data[idx] = mssg;
46 chan->msg_count++;
47
48 if (idx == MBOX_TX_QUEUE_LEN - 1)
49 chan->msg_free = 0;
50 else
51 chan->msg_free++;
52
53 spin_unlock_irqrestore(&chan->lock, flags);
54
55 return idx;
56}
57
58static void msg_submit(struct mbox_chan *chan)
59{
60 unsigned count, idx;
61 unsigned long flags;
62 void *data;
63 int err;
64
65 spin_lock_irqsave(&chan->lock, flags);
66
67 if (!chan->msg_count || chan->active_req)
68 goto exit;
69
70 count = chan->msg_count;
71 idx = chan->msg_free;
72 if (idx >= count)
73 idx -= count;
74 else
75 idx += MBOX_TX_QUEUE_LEN - count;
76
77 data = chan->msg_data[idx];
78
79 /* Try to submit a message to the MBOX controller */
80 err = chan->mbox->ops->send_data(chan, data);
81 if (!err) {
82 chan->active_req = data;
83 chan->msg_count--;
84 }
85exit:
86 spin_unlock_irqrestore(&chan->lock, flags);
87}
88
89static void tx_tick(struct mbox_chan *chan, int r)
90{
91 unsigned long flags;
92 void *mssg;
93
94 spin_lock_irqsave(&chan->lock, flags);
95 mssg = chan->active_req;
96 chan->active_req = NULL;
97 spin_unlock_irqrestore(&chan->lock, flags);
98
99 /* Submit next message */
100 msg_submit(chan);
101
102 /* Notify the client */
103 if (mssg && chan->cl->tx_done)
104 chan->cl->tx_done(chan->cl, mssg, r);
105
106 if (chan->cl->tx_block)
107 complete(&chan->tx_complete);
108}
109
110static void poll_txdone(unsigned long data)
111{
112 struct mbox_controller *mbox = (struct mbox_controller *)data;
113 bool txdone, resched = false;
114 int i;
115
116 for (i = 0; i < mbox->num_chans; i++) {
117 struct mbox_chan *chan = &mbox->chans[i];
118
119 if (chan->active_req && chan->cl) {
120 resched = true;
121 txdone = chan->mbox->ops->last_tx_done(chan);
122 if (txdone)
123 tx_tick(chan, 0);
124 }
125 }
126
127 if (resched)
128 mod_timer(&mbox->poll, jiffies +
129 msecs_to_jiffies(mbox->txpoll_period));
130}
131
132/**
133 * mbox_chan_received_data - A way for controller driver to push data
134 * received from remote to the upper layer.
135 * @chan: Pointer to the mailbox channel on which RX happened.
136 * @mssg: Client specific message typecasted as void *
137 *
138 * After startup and before shutdown any data received on the chan
139 * is passed on to the API via atomic mbox_chan_received_data().
140 * The controller should ACK the RX only after this call returns.
141 */
142void mbox_chan_received_data(struct mbox_chan *chan, void *mssg)
143{
144 /* No buffering the received data */
145 if (chan->cl->rx_callback)
146 chan->cl->rx_callback(chan->cl, mssg);
147}
148EXPORT_SYMBOL_GPL(mbox_chan_received_data);
149
150/**
151 * mbox_chan_txdone - A way for controller driver to notify the
152 * framework that the last TX has completed.
153 * @chan: Pointer to the mailbox chan on which TX happened.
154 * @r: Status of last TX - OK or ERROR
155 *
156 * The controller that has IRQ for TX ACK calls this atomic API
157 * to tick the TX state machine. It works only if txdone_irq
158 * is set by the controller.
159 */
160void mbox_chan_txdone(struct mbox_chan *chan, int r)
161{
162 if (unlikely(!(chan->txdone_method & TXDONE_BY_IRQ))) {
163 dev_err(chan->mbox->dev,
164 "Controller can't run the TX ticker\n");
165 return;
166 }
167
168 tx_tick(chan, r);
169}
170EXPORT_SYMBOL_GPL(mbox_chan_txdone);
171
172/**
173 * mbox_client_txdone - The way for a client to run the TX state machine.
174 * @chan: Mailbox channel assigned to this client.
175 * @r: Success status of last transmission.
176 *
177 * The client/protocol had received some 'ACK' packet and it notifies
178 * the API that the last packet was sent successfully. This only works
179 * if the controller can't sense TX-Done.
180 */
181void mbox_client_txdone(struct mbox_chan *chan, int r)
182{
183 if (unlikely(!(chan->txdone_method & TXDONE_BY_ACK))) {
184 dev_err(chan->mbox->dev, "Client can't run the TX ticker\n");
185 return;
186 }
187
188 tx_tick(chan, r);
189}
190EXPORT_SYMBOL_GPL(mbox_client_txdone);
191
192/**
193 * mbox_client_peek_data - A way for client driver to pull data
194 * received from remote by the controller.
195 * @chan: Mailbox channel assigned to this client.
196 *
197 * A poke to controller driver for any received data.
198 * The data is actually passed onto client via the
199 * mbox_chan_received_data()
200 * The call can be made from atomic context, so the controller's
201 * implementation of peek_data() must not sleep.
202 *
203 * Return: True, if controller has, and is going to push after this,
204 * some data.
205 * False, if controller doesn't have any data to be read.
206 */
207bool mbox_client_peek_data(struct mbox_chan *chan)
208{
209 if (chan->mbox->ops->peek_data)
210 return chan->mbox->ops->peek_data(chan);
211
212 return false;
213}
214EXPORT_SYMBOL_GPL(mbox_client_peek_data);
215
216/**
217 * mbox_send_message - For client to submit a message to be
218 * sent to the remote.
219 * @chan: Mailbox channel assigned to this client.
220 * @mssg: Client specific message typecasted.
221 *
222 * For client to submit data to the controller destined for a remote
223 * processor. If the client had set 'tx_block', the call will return
224 * either when the remote receives the data or when 'tx_tout' millisecs
225 * run out.
226 * In non-blocking mode, the requests are buffered by the API and a
227 * non-negative token is returned for each queued request. If the request
228 * is not queued, a negative token is returned. Upon failure or successful
229 * TX, the API calls 'tx_done' from atomic context, from which the client
230 * could submit yet another request.
231 * The pointer to message should be preserved until it is sent
232 * over the chan, i.e, tx_done() is made.
233 * This function could be called from atomic context as it simply
234 * queues the data and returns a token against the request.
235 *
236 * Return: Non-negative integer for successful submission (non-blocking mode)
237 * or transmission over chan (blocking mode).
238 * Negative value denotes failure.
239 */
240int mbox_send_message(struct mbox_chan *chan, void *mssg)
241{
242 int t;
243
244 if (!chan || !chan->cl)
245 return -EINVAL;
246
247 t = add_to_rbuf(chan, mssg);
248 if (t < 0) {
249 dev_err(chan->mbox->dev, "Try increasing MBOX_TX_QUEUE_LEN\n");
250 return t;
251 }
252
253 msg_submit(chan);
254
255 if (chan->txdone_method == TXDONE_BY_POLL)
256 poll_txdone((unsigned long)chan->mbox);
257
258 if (chan->cl->tx_block && chan->active_req) {
259 unsigned long wait;
260 int ret;
261
262 if (!chan->cl->tx_tout) /* wait forever */
263 wait = msecs_to_jiffies(3600000);
264 else
265 wait = msecs_to_jiffies(chan->cl->tx_tout);
266
267 ret = wait_for_completion_timeout(&chan->tx_complete, wait);
268 if (ret == 0) {
269 t = -EIO;
270 tx_tick(chan, -EIO);
271 }
272 }
273
274 return t;
275}
276EXPORT_SYMBOL_GPL(mbox_send_message);
277
278/**
279 * mbox_request_channel - Request a mailbox channel.
280 * @cl: Identity of the client requesting the channel.
281 * @index: Index of mailbox specifier in 'mboxes' property.
282 *
283 * The Client specifies its requirements and capabilities while asking for
284 * a mailbox channel. It can't be called from atomic context.
285 * The channel is exclusively allocated and can't be used by another
286 * client before the owner calls mbox_free_channel.
287 * After assignment, any packet received on this channel will be
288 * handed over to the client via the 'rx_callback'.
289 * The framework holds reference to the client, so the mbox_client
290 * structure shouldn't be modified until the mbox_free_channel returns.
291 *
292 * Return: Pointer to the channel assigned to the client if successful.
293 * ERR_PTR for request failure.
294 */
295struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index)
296{
297 struct device *dev = cl->dev;
298 struct mbox_controller *mbox;
299 struct of_phandle_args spec;
300 struct mbox_chan *chan;
301 unsigned long flags;
302 int ret;
303
304 if (!dev || !dev->of_node) {
305 pr_debug("%s: No owner device node\n", __func__);
306 return ERR_PTR(-ENODEV);
307 }
308
309 mutex_lock(&con_mutex);
310
311 if (of_parse_phandle_with_args(dev->of_node, "mboxes",
312 "#mbox-cells", index, &spec)) {
313 dev_dbg(dev, "%s: can't parse \"mboxes\" property\n", __func__);
314 mutex_unlock(&con_mutex);
315 return ERR_PTR(-ENODEV);
316 }
317
318 chan = NULL;
319 list_for_each_entry(mbox, &mbox_cons, node)
320 if (mbox->dev->of_node == spec.np) {
321 chan = mbox->of_xlate(mbox, &spec);
322 break;
323 }
324
325 of_node_put(spec.np);
326
327 if (!chan || chan->cl || !try_module_get(mbox->dev->driver->owner)) {
328 dev_dbg(dev, "%s: mailbox not free\n", __func__);
329 mutex_unlock(&con_mutex);
330 return ERR_PTR(-EBUSY);
331 }
332
333 spin_lock_irqsave(&chan->lock, flags);
334 chan->msg_free = 0;
335 chan->msg_count = 0;
336 chan->active_req = NULL;
337 chan->cl = cl;
338 init_completion(&chan->tx_complete);
339
340 if (chan->txdone_method == TXDONE_BY_POLL && cl->knows_txdone)
341 chan->txdone_method |= TXDONE_BY_ACK;
342
343 spin_unlock_irqrestore(&chan->lock, flags);
344
345 ret = chan->mbox->ops->startup(chan);
346 if (ret) {
347 dev_err(dev, "Unable to startup the chan (%d)\n", ret);
348 mbox_free_channel(chan);
349 chan = ERR_PTR(ret);
350 }
351
352 mutex_unlock(&con_mutex);
353 return chan;
354}
355EXPORT_SYMBOL_GPL(mbox_request_channel);
356
357/**
358 * mbox_free_channel - The client relinquishes control of a mailbox
359 * channel by this call.
360 * @chan: The mailbox channel to be freed.
361 */
362void mbox_free_channel(struct mbox_chan *chan)
363{
364 unsigned long flags;
365
366 if (!chan || !chan->cl)
367 return;
368
369 chan->mbox->ops->shutdown(chan);
370
371 /* The queued TX requests are simply aborted, no callbacks are made */
372 spin_lock_irqsave(&chan->lock, flags);
373 chan->cl = NULL;
374 chan->active_req = NULL;
375 if (chan->txdone_method == (TXDONE_BY_POLL | TXDONE_BY_ACK))
376 chan->txdone_method = TXDONE_BY_POLL;
377
378 module_put(chan->mbox->dev->driver->owner);
379 spin_unlock_irqrestore(&chan->lock, flags);
380}
381EXPORT_SYMBOL_GPL(mbox_free_channel);
382
383static struct mbox_chan *
384of_mbox_index_xlate(struct mbox_controller *mbox,
385 const struct of_phandle_args *sp)
386{
387 int ind = sp->args[0];
388
389 if (ind >= mbox->num_chans)
390 return NULL;
391
392 return &mbox->chans[ind];
393}
394
395/**
396 * mbox_controller_register - Register the mailbox controller
397 * @mbox: Pointer to the mailbox controller.
398 *
399 * The controller driver registers its communication channels
400 */
401int mbox_controller_register(struct mbox_controller *mbox)
402{
403 int i, txdone;
404
405 /* Sanity check */
406 if (!mbox || !mbox->dev || !mbox->ops || !mbox->num_chans)
407 return -EINVAL;
408
409 if (mbox->txdone_irq)
410 txdone = TXDONE_BY_IRQ;
411 else if (mbox->txdone_poll)
412 txdone = TXDONE_BY_POLL;
413 else /* It has to be ACK then */
414 txdone = TXDONE_BY_ACK;
415
416 if (txdone == TXDONE_BY_POLL) {
417 mbox->poll.function = &poll_txdone;
418 mbox->poll.data = (unsigned long)mbox;
419 init_timer(&mbox->poll);
420 }
421
422 for (i = 0; i < mbox->num_chans; i++) {
423 struct mbox_chan *chan = &mbox->chans[i];
424
425 chan->cl = NULL;
426 chan->mbox = mbox;
427 chan->txdone_method = txdone;
428 spin_lock_init(&chan->lock);
429 }
430
431 if (!mbox->of_xlate)
432 mbox->of_xlate = of_mbox_index_xlate;
433
434 mutex_lock(&con_mutex);
435 list_add_tail(&mbox->node, &mbox_cons);
436 mutex_unlock(&con_mutex);
437
438 return 0;
439}
440EXPORT_SYMBOL_GPL(mbox_controller_register);
441
442/**
443 * mbox_controller_unregister - Unregister the mailbox controller
444 * @mbox: Pointer to the mailbox controller.
445 */
446void mbox_controller_unregister(struct mbox_controller *mbox)
447{
448 int i;
449
450 if (!mbox)
451 return;
452
453 mutex_lock(&con_mutex);
454
455 list_del(&mbox->node);
456
457 for (i = 0; i < mbox->num_chans; i++)
458 mbox_free_channel(&mbox->chans[i]);
459
460 if (mbox->txdone_poll)
461 del_timer_sync(&mbox->poll);
462
463 mutex_unlock(&con_mutex);
464}
465EXPORT_SYMBOL_GPL(mbox_controller_unregister);
diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c
index d873cbae2fbb..f3755e0aa935 100644
--- a/drivers/mailbox/pl320-ipc.c
+++ b/drivers/mailbox/pl320-ipc.c
@@ -26,7 +26,7 @@
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/amba/bus.h> 27#include <linux/amba/bus.h>
28 28
29#include <linux/mailbox.h> 29#include <linux/pl320-ipc.h>
30 30
31#define IPCMxSOURCE(m) ((m) * 0x40) 31#define IPCMxSOURCE(m) ((m) * 0x40)
32#define IPCMxDSET(m) (((m) * 0x40) + 0x004) 32#define IPCMxDSET(m) (((m) * 0x40) + 0x004)
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 825ca1f87639..afe79719ea32 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -1434,9 +1434,9 @@ static void drop_buffers(struct dm_bufio_client *c)
1434 1434
1435/* 1435/*
1436 * Test if the buffer is unused and too old, and commit it. 1436 * Test if the buffer is unused and too old, and commit it.
1437 * At if noio is set, we must not do any I/O because we hold 1437 * And if GFP_NOFS is used, we must not do any I/O because we hold
1438 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets rerouted to 1438 * dm_bufio_clients_lock and we would risk deadlock if the I/O gets
1439 * different bufio client. 1439 * rerouted to different bufio client.
1440 */ 1440 */
1441static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp, 1441static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
1442 unsigned long max_jiffies) 1442 unsigned long max_jiffies)
@@ -1444,7 +1444,7 @@ static int __cleanup_old_buffer(struct dm_buffer *b, gfp_t gfp,
1444 if (jiffies - b->last_accessed < max_jiffies) 1444 if (jiffies - b->last_accessed < max_jiffies)
1445 return 0; 1445 return 0;
1446 1446
1447 if (!(gfp & __GFP_IO)) { 1447 if (!(gfp & __GFP_FS)) {
1448 if (test_bit(B_READING, &b->state) || 1448 if (test_bit(B_READING, &b->state) ||
1449 test_bit(B_WRITING, &b->state) || 1449 test_bit(B_WRITING, &b->state) ||
1450 test_bit(B_DIRTY, &b->state)) 1450 test_bit(B_DIRTY, &b->state))
@@ -1486,7 +1486,7 @@ dm_bufio_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
1486 unsigned long freed; 1486 unsigned long freed;
1487 1487
1488 c = container_of(shrink, struct dm_bufio_client, shrinker); 1488 c = container_of(shrink, struct dm_bufio_client, shrinker);
1489 if (sc->gfp_mask & __GFP_IO) 1489 if (sc->gfp_mask & __GFP_FS)
1490 dm_bufio_lock(c); 1490 dm_bufio_lock(c);
1491 else if (!dm_bufio_trylock(c)) 1491 else if (!dm_bufio_trylock(c))
1492 return SHRINK_STOP; 1492 return SHRINK_STOP;
@@ -1503,7 +1503,7 @@ dm_bufio_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
1503 unsigned long count; 1503 unsigned long count;
1504 1504
1505 c = container_of(shrink, struct dm_bufio_client, shrinker); 1505 c = container_of(shrink, struct dm_bufio_client, shrinker);
1506 if (sc->gfp_mask & __GFP_IO) 1506 if (sc->gfp_mask & __GFP_FS)
1507 dm_bufio_lock(c); 1507 dm_bufio_lock(c);
1508 else if (!dm_bufio_trylock(c)) 1508 else if (!dm_bufio_trylock(c))
1509 return 0; 1509 return 0;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index 4857fa4a5484..07c0fa0fa284 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -789,8 +789,7 @@ struct dm_raid_superblock {
789 __le32 layout; 789 __le32 layout;
790 __le32 stripe_sectors; 790 __le32 stripe_sectors;
791 791
792 __u8 pad[452]; /* Round struct to 512 bytes. */ 792 /* Remainder of a logical block is zero-filled when writing (see super_sync()). */
793 /* Always set to 0 when writing. */
794} __packed; 793} __packed;
795 794
796static int read_disk_sb(struct md_rdev *rdev, int size) 795static int read_disk_sb(struct md_rdev *rdev, int size)
@@ -827,7 +826,7 @@ static void super_sync(struct mddev *mddev, struct md_rdev *rdev)
827 test_bit(Faulty, &(rs->dev[i].rdev.flags))) 826 test_bit(Faulty, &(rs->dev[i].rdev.flags)))
828 failed_devices |= (1ULL << i); 827 failed_devices |= (1ULL << i);
829 828
830 memset(sb, 0, sizeof(*sb)); 829 memset(sb + 1, 0, rdev->sb_size - sizeof(*sb));
831 830
832 sb->magic = cpu_to_le32(DM_RAID_MAGIC); 831 sb->magic = cpu_to_le32(DM_RAID_MAGIC);
833 sb->features = cpu_to_le32(0); /* No features yet */ 832 sb->features = cpu_to_le32(0); /* No features yet */
@@ -862,7 +861,11 @@ static int super_load(struct md_rdev *rdev, struct md_rdev *refdev)
862 uint64_t events_sb, events_refsb; 861 uint64_t events_sb, events_refsb;
863 862
864 rdev->sb_start = 0; 863 rdev->sb_start = 0;
865 rdev->sb_size = sizeof(*sb); 864 rdev->sb_size = bdev_logical_block_size(rdev->meta_bdev);
865 if (rdev->sb_size < sizeof(*sb) || rdev->sb_size > PAGE_SIZE) {
866 DMERR("superblock size of a logical block is no longer valid");
867 return -EINVAL;
868 }
866 869
867 ret = read_disk_sb(rdev, rdev->sb_size); 870 ret = read_disk_sb(rdev, rdev->sb_size);
868 if (ret) 871 if (ret)
@@ -1169,8 +1172,12 @@ static void configure_discard_support(struct dm_target *ti, struct raid_set *rs)
1169 raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6); 1172 raid456 = (rs->md.level == 4 || rs->md.level == 5 || rs->md.level == 6);
1170 1173
1171 for (i = 0; i < rs->md.raid_disks; i++) { 1174 for (i = 0; i < rs->md.raid_disks; i++) {
1172 struct request_queue *q = bdev_get_queue(rs->dev[i].rdev.bdev); 1175 struct request_queue *q;
1176
1177 if (!rs->dev[i].rdev.bdev)
1178 continue;
1173 1179
1180 q = bdev_get_queue(rs->dev[i].rdev.bdev);
1174 if (!q || !blk_queue_discard(q)) 1181 if (!q || !blk_queue_discard(q))
1175 return; 1182 return;
1176 1183
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index d1600d2aa2e2..f8b37d4c05d8 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -159,8 +159,10 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
159 sc->stripes_shift = __ffs(stripes); 159 sc->stripes_shift = __ffs(stripes);
160 160
161 r = dm_set_target_max_io_len(ti, chunk_size); 161 r = dm_set_target_max_io_len(ti, chunk_size);
162 if (r) 162 if (r) {
163 kfree(sc);
163 return r; 164 return r;
165 }
164 166
165 ti->num_flush_bios = stripes; 167 ti->num_flush_bios = stripes;
166 ti->num_discard_bios = stripes; 168 ti->num_discard_bios = stripes;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 4843801173fe..0f86d802b533 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -1936,6 +1936,14 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1936 return DM_MAPIO_SUBMITTED; 1936 return DM_MAPIO_SUBMITTED;
1937 } 1937 }
1938 1938
1939 /*
1940 * We must hold the virtual cell before doing the lookup, otherwise
1941 * there's a race with discard.
1942 */
1943 build_virtual_key(tc->td, block, &key);
1944 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
1945 return DM_MAPIO_SUBMITTED;
1946
1939 r = dm_thin_find_block(td, block, 0, &result); 1947 r = dm_thin_find_block(td, block, 0, &result);
1940 1948
1941 /* 1949 /*
@@ -1959,13 +1967,10 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1959 * shared flag will be set in their case. 1967 * shared flag will be set in their case.
1960 */ 1968 */
1961 thin_defer_bio(tc, bio); 1969 thin_defer_bio(tc, bio);
1970 cell_defer_no_holder_no_free(tc, &cell1);
1962 return DM_MAPIO_SUBMITTED; 1971 return DM_MAPIO_SUBMITTED;
1963 } 1972 }
1964 1973
1965 build_virtual_key(tc->td, block, &key);
1966 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1, &cell_result))
1967 return DM_MAPIO_SUBMITTED;
1968
1969 build_data_key(tc->td, result.block, &key); 1974 build_data_key(tc->td, result.block, &key);
1970 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) { 1975 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2, &cell_result)) {
1971 cell_defer_no_holder_no_free(tc, &cell1); 1976 cell_defer_no_holder_no_free(tc, &cell1);
@@ -1986,6 +1991,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1986 * of doing so. 1991 * of doing so.
1987 */ 1992 */
1988 handle_unserviceable_bio(tc->pool, bio); 1993 handle_unserviceable_bio(tc->pool, bio);
1994 cell_defer_no_holder_no_free(tc, &cell1);
1989 return DM_MAPIO_SUBMITTED; 1995 return DM_MAPIO_SUBMITTED;
1990 } 1996 }
1991 /* fall through */ 1997 /* fall through */
@@ -1996,6 +2002,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1996 * provide the hint to load the metadata into cache. 2002 * provide the hint to load the metadata into cache.
1997 */ 2003 */
1998 thin_defer_bio(tc, bio); 2004 thin_defer_bio(tc, bio);
2005 cell_defer_no_holder_no_free(tc, &cell1);
1999 return DM_MAPIO_SUBMITTED; 2006 return DM_MAPIO_SUBMITTED;
2000 2007
2001 default: 2008 default:
@@ -2005,6 +2012,7 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
2005 * pool is switched to fail-io mode. 2012 * pool is switched to fail-io mode.
2006 */ 2013 */
2007 bio_io_error(bio); 2014 bio_io_error(bio);
2015 cell_defer_no_holder_no_free(tc, &cell1);
2008 return DM_MAPIO_SUBMITTED; 2016 return DM_MAPIO_SUBMITTED;
2009 } 2017 }
2010} 2018}
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 4dfa15da9cb8..9233c71138f1 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5121,6 +5121,7 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
5121 printk("md: %s still in use.\n",mdname(mddev)); 5121 printk("md: %s still in use.\n",mdname(mddev));
5122 if (did_freeze) { 5122 if (did_freeze) {
5123 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5123 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5124 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5124 md_wakeup_thread(mddev->thread); 5125 md_wakeup_thread(mddev->thread);
5125 } 5126 }
5126 err = -EBUSY; 5127 err = -EBUSY;
@@ -5135,6 +5136,8 @@ static int md_set_readonly(struct mddev *mddev, struct block_device *bdev)
5135 mddev->ro = 1; 5136 mddev->ro = 1;
5136 set_disk_ro(mddev->gendisk, 1); 5137 set_disk_ro(mddev->gendisk, 1);
5137 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5138 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5139 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5140 md_wakeup_thread(mddev->thread);
5138 sysfs_notify_dirent_safe(mddev->sysfs_state); 5141 sysfs_notify_dirent_safe(mddev->sysfs_state);
5139 err = 0; 5142 err = 0;
5140 } 5143 }
@@ -5178,6 +5181,7 @@ static int do_md_stop(struct mddev *mddev, int mode,
5178 mutex_unlock(&mddev->open_mutex); 5181 mutex_unlock(&mddev->open_mutex);
5179 if (did_freeze) { 5182 if (did_freeze) {
5180 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery); 5183 clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
5184 set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
5181 md_wakeup_thread(mddev->thread); 5185 md_wakeup_thread(mddev->thread);
5182 } 5186 }
5183 return -EBUSY; 5187 return -EBUSY;
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index 37d367bb9aa8..bf2b80d5c470 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -42,6 +42,12 @@ struct btree_node {
42} __packed; 42} __packed;
43 43
44 44
45/*
46 * Locks a block using the btree node validator.
47 */
48int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
49 struct dm_block **result);
50
45void inc_children(struct dm_transaction_manager *tm, struct btree_node *n, 51void inc_children(struct dm_transaction_manager *tm, struct btree_node *n,
46 struct dm_btree_value_type *vt); 52 struct dm_btree_value_type *vt);
47 53
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index cf9fd676ae44..1b5e13ec7f96 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -92,7 +92,7 @@ struct dm_block_validator btree_node_validator = {
92 92
93/*----------------------------------------------------------------*/ 93/*----------------------------------------------------------------*/
94 94
95static int bn_read_lock(struct dm_btree_info *info, dm_block_t b, 95int bn_read_lock(struct dm_btree_info *info, dm_block_t b,
96 struct dm_block **result) 96 struct dm_block **result)
97{ 97{
98 return dm_tm_read_lock(info->tm, b, &btree_node_validator, result); 98 return dm_tm_read_lock(info->tm, b, &btree_node_validator, result);
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index 416060c25709..200ac12a1d40 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -847,22 +847,26 @@ EXPORT_SYMBOL_GPL(dm_btree_find_lowest_key);
847 * FIXME: We shouldn't use a recursive algorithm when we have limited stack 847 * FIXME: We shouldn't use a recursive algorithm when we have limited stack
848 * space. Also this only works for single level trees. 848 * space. Also this only works for single level trees.
849 */ 849 */
850static int walk_node(struct ro_spine *s, dm_block_t block, 850static int walk_node(struct dm_btree_info *info, dm_block_t block,
851 int (*fn)(void *context, uint64_t *keys, void *leaf), 851 int (*fn)(void *context, uint64_t *keys, void *leaf),
852 void *context) 852 void *context)
853{ 853{
854 int r; 854 int r;
855 unsigned i, nr; 855 unsigned i, nr;
856 struct dm_block *node;
856 struct btree_node *n; 857 struct btree_node *n;
857 uint64_t keys; 858 uint64_t keys;
858 859
859 r = ro_step(s, block); 860 r = bn_read_lock(info, block, &node);
860 n = ro_node(s); 861 if (r)
862 return r;
863
864 n = dm_block_data(node);
861 865
862 nr = le32_to_cpu(n->header.nr_entries); 866 nr = le32_to_cpu(n->header.nr_entries);
863 for (i = 0; i < nr; i++) { 867 for (i = 0; i < nr; i++) {
864 if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) { 868 if (le32_to_cpu(n->header.flags) & INTERNAL_NODE) {
865 r = walk_node(s, value64(n, i), fn, context); 869 r = walk_node(info, value64(n, i), fn, context);
866 if (r) 870 if (r)
867 goto out; 871 goto out;
868 } else { 872 } else {
@@ -874,7 +878,7 @@ static int walk_node(struct ro_spine *s, dm_block_t block,
874 } 878 }
875 879
876out: 880out:
877 ro_pop(s); 881 dm_tm_unlock(info->tm, node);
878 return r; 882 return r;
879} 883}
880 884
@@ -882,15 +886,7 @@ int dm_btree_walk(struct dm_btree_info *info, dm_block_t root,
882 int (*fn)(void *context, uint64_t *keys, void *leaf), 886 int (*fn)(void *context, uint64_t *keys, void *leaf),
883 void *context) 887 void *context)
884{ 888{
885 int r;
886 struct ro_spine spine;
887
888 BUG_ON(info->levels > 1); 889 BUG_ON(info->levels > 1);
889 890 return walk_node(info, root, fn, context);
890 init_ro_spine(&spine, info);
891 r = walk_node(&spine, root, fn, context);
892 exit_ro_spine(&spine);
893
894 return r;
895} 891}
896EXPORT_SYMBOL_GPL(dm_btree_walk); 892EXPORT_SYMBOL_GPL(dm_btree_walk);
diff --git a/drivers/media/common/saa7146/saa7146_core.c b/drivers/media/common/saa7146/saa7146_core.c
index 97afee672d07..4418119cf707 100644
--- a/drivers/media/common/saa7146/saa7146_core.c
+++ b/drivers/media/common/saa7146/saa7146_core.c
@@ -364,6 +364,9 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent
364 goto out; 364 goto out;
365 } 365 }
366 366
367 /* create a nice device name */
368 sprintf(dev->name, "saa7146 (%d)", saa7146_num);
369
367 DEB_EE("pci:%p\n", pci); 370 DEB_EE("pci:%p\n", pci);
368 371
369 err = pci_enable_device(pci); 372 err = pci_enable_device(pci);
@@ -438,9 +441,6 @@ static int saa7146_init_one(struct pci_dev *pci, const struct pci_device_id *ent
438 441
439 /* the rest + print status message */ 442 /* the rest + print status message */
440 443
441 /* create a nice device name */
442 sprintf(dev->name, "saa7146 (%d)", saa7146_num);
443
444 pr_info("found saa7146 @ mem %p (revision %d, irq %d) (0x%04x,0x%04x)\n", 444 pr_info("found saa7146 @ mem %p (revision %d, irq %d) (0x%04x,0x%04x)\n",
445 dev->mem, dev->revision, pci->irq, 445 dev->mem, dev->revision, pci->irq,
446 pci->subsystem_vendor, pci->subsystem_device); 446 pci->subsystem_vendor, pci->subsystem_device);
diff --git a/drivers/media/dvb-core/dvb_frontend.c b/drivers/media/dvb-core/dvb_frontend.c
index b8579ee68bd6..2cf30576bf39 100644
--- a/drivers/media/dvb-core/dvb_frontend.c
+++ b/drivers/media/dvb-core/dvb_frontend.c
@@ -962,6 +962,11 @@ static int dvb_frontend_clear_cache(struct dvb_frontend *fe)
962 case SYS_ATSC: 962 case SYS_ATSC:
963 c->modulation = VSB_8; 963 c->modulation = VSB_8;
964 break; 964 break;
965 case SYS_ISDBS:
966 c->symbol_rate = 28860000;
967 c->rolloff = ROLLOFF_35;
968 c->bandwidth_hz = c->symbol_rate / 100 * 135;
969 break;
965 default: 970 default:
966 c->modulation = QAM_AUTO; 971 c->modulation = QAM_AUTO;
967 break; 972 break;
@@ -2072,6 +2077,7 @@ static int dtv_set_frontend(struct dvb_frontend *fe)
2072 break; 2077 break;
2073 case SYS_DVBS: 2078 case SYS_DVBS:
2074 case SYS_TURBO: 2079 case SYS_TURBO:
2080 case SYS_ISDBS:
2075 rolloff = 135; 2081 rolloff = 135;
2076 break; 2082 break;
2077 case SYS_DVBS2: 2083 case SYS_DVBS2:
diff --git a/drivers/media/dvb-frontends/ds3000.c b/drivers/media/dvb-frontends/ds3000.c
index 335daeff91b9..9d0d0347758f 100644
--- a/drivers/media/dvb-frontends/ds3000.c
+++ b/drivers/media/dvb-frontends/ds3000.c
@@ -864,6 +864,13 @@ struct dvb_frontend *ds3000_attach(const struct ds3000_config *config,
864 memcpy(&state->frontend.ops, &ds3000_ops, 864 memcpy(&state->frontend.ops, &ds3000_ops,
865 sizeof(struct dvb_frontend_ops)); 865 sizeof(struct dvb_frontend_ops));
866 state->frontend.demodulator_priv = state; 866 state->frontend.demodulator_priv = state;
867
868 /*
869 * Some devices like T480 starts with voltage on. Be sure
870 * to turn voltage off during init, as this can otherwise
871 * interfere with Unicable SCR systems.
872 */
873 ds3000_set_voltage(&state->frontend, SEC_VOLTAGE_OFF);
867 return &state->frontend; 874 return &state->frontend;
868 875
869error3: 876error3:
diff --git a/drivers/media/dvb-frontends/sp2.c b/drivers/media/dvb-frontends/sp2.c
index 9b684d5c8f91..15bf4318cb74 100644
--- a/drivers/media/dvb-frontends/sp2.c
+++ b/drivers/media/dvb-frontends/sp2.c
@@ -266,7 +266,7 @@ int sp2_ci_poll_slot_status(struct dvb_ca_en50221 *en50221,
266 return s->status; 266 return s->status;
267} 267}
268 268
269int sp2_init(struct sp2 *s) 269static int sp2_init(struct sp2 *s)
270{ 270{
271 int ret = 0; 271 int ret = 0;
272 u8 buf; 272 u8 buf;
@@ -348,7 +348,7 @@ err:
348 return ret; 348 return ret;
349} 349}
350 350
351int sp2_exit(struct i2c_client *client) 351static int sp2_exit(struct i2c_client *client)
352{ 352{
353 struct sp2 *s; 353 struct sp2 *s;
354 354
diff --git a/drivers/media/dvb-frontends/tc90522.c b/drivers/media/dvb-frontends/tc90522.c
index d9905fb52f84..b35d65c9cc05 100644
--- a/drivers/media/dvb-frontends/tc90522.c
+++ b/drivers/media/dvb-frontends/tc90522.c
@@ -216,32 +216,30 @@ static int tc90522s_get_frontend(struct dvb_frontend *fe)
216 c->delivery_system = SYS_ISDBS; 216 c->delivery_system = SYS_ISDBS;
217 217
218 layers = 0; 218 layers = 0;
219 ret = reg_read(state, 0xe8, val, 3); 219 ret = reg_read(state, 0xe6, val, 5);
220 if (ret == 0) { 220 if (ret == 0) {
221 int slots;
222 u8 v; 221 u8 v;
223 222
223 c->stream_id = val[0] << 8 | val[1];
224
224 /* high/single layer */ 225 /* high/single layer */
225 v = (val[0] & 0x70) >> 4; 226 v = (val[2] & 0x70) >> 4;
226 c->modulation = (v == 7) ? PSK_8 : QPSK; 227 c->modulation = (v == 7) ? PSK_8 : QPSK;
227 c->fec_inner = fec_conv_sat[v]; 228 c->fec_inner = fec_conv_sat[v];
228 c->layer[0].fec = c->fec_inner; 229 c->layer[0].fec = c->fec_inner;
229 c->layer[0].modulation = c->modulation; 230 c->layer[0].modulation = c->modulation;
230 c->layer[0].segment_count = val[1] & 0x3f; /* slots */ 231 c->layer[0].segment_count = val[3] & 0x3f; /* slots */
231 232
232 /* low layer */ 233 /* low layer */
233 v = (val[0] & 0x07); 234 v = (val[2] & 0x07);
234 c->layer[1].fec = fec_conv_sat[v]; 235 c->layer[1].fec = fec_conv_sat[v];
235 if (v == 0) /* no low layer */ 236 if (v == 0) /* no low layer */
236 c->layer[1].segment_count = 0; 237 c->layer[1].segment_count = 0;
237 else 238 else
238 c->layer[1].segment_count = val[2] & 0x3f; /* slots */ 239 c->layer[1].segment_count = val[4] & 0x3f; /* slots */
239 /* actually, BPSK if v==1, but not defined in fe_modulation_t */ 240 /* actually, BPSK if v==1, but not defined in fe_modulation_t */
240 c->layer[1].modulation = QPSK; 241 c->layer[1].modulation = QPSK;
241 layers = (v > 0) ? 2 : 1; 242 layers = (v > 0) ? 2 : 1;
242
243 slots = c->layer[0].segment_count + c->layer[1].segment_count;
244 c->symbol_rate = 28860000 * slots / 48;
245 } 243 }
246 244
247 /* statistics */ 245 /* statistics */
@@ -363,7 +361,7 @@ static int tc90522t_get_frontend(struct dvb_frontend *fe)
363 u8 v; 361 u8 v;
364 362
365 c->isdbt_partial_reception = val[0] & 0x01; 363 c->isdbt_partial_reception = val[0] & 0x01;
366 c->isdbt_sb_mode = (val[0] & 0xc0) == 0x01; 364 c->isdbt_sb_mode = (val[0] & 0xc0) == 0x40;
367 365
368 /* layer A */ 366 /* layer A */
369 v = (val[2] & 0x78) >> 3; 367 v = (val[2] & 0x78) >> 3;
diff --git a/drivers/media/i2c/smiapp/smiapp-core.c b/drivers/media/i2c/smiapp/smiapp-core.c
index 932ed9be9ff3..b10aaeda2bb4 100644
--- a/drivers/media/i2c/smiapp/smiapp-core.c
+++ b/drivers/media/i2c/smiapp/smiapp-core.c
@@ -2190,7 +2190,7 @@ static int smiapp_set_selection(struct v4l2_subdev *subdev,
2190 ret = smiapp_set_compose(subdev, fh, sel); 2190 ret = smiapp_set_compose(subdev, fh, sel);
2191 break; 2191 break;
2192 default: 2192 default:
2193 BUG(); 2193 ret = -EINVAL;
2194 } 2194 }
2195 2195
2196 mutex_unlock(&sensor->mutex); 2196 mutex_unlock(&sensor->mutex);
diff --git a/drivers/media/pci/cx23885/cx23885-core.c b/drivers/media/pci/cx23885/cx23885-core.c
index 331eddac7222..3bd386c371f7 100644
--- a/drivers/media/pci/cx23885/cx23885-core.c
+++ b/drivers/media/pci/cx23885/cx23885-core.c
@@ -1078,7 +1078,7 @@ static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1078 for (line = 0; line < lines; line++) { 1078 for (line = 0; line < lines; line++) {
1079 while (offset && offset >= sg_dma_len(sg)) { 1079 while (offset && offset >= sg_dma_len(sg)) {
1080 offset -= sg_dma_len(sg); 1080 offset -= sg_dma_len(sg);
1081 sg++; 1081 sg = sg_next(sg);
1082 } 1082 }
1083 1083
1084 if (lpi && line > 0 && !(line % lpi)) 1084 if (lpi && line > 0 && !(line % lpi))
@@ -1101,14 +1101,14 @@ static __le32 *cx23885_risc_field(__le32 *rp, struct scatterlist *sglist,
1101 *(rp++) = cpu_to_le32(0); /* bits 63-32 */ 1101 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1102 todo -= (sg_dma_len(sg)-offset); 1102 todo -= (sg_dma_len(sg)-offset);
1103 offset = 0; 1103 offset = 0;
1104 sg++; 1104 sg = sg_next(sg);
1105 while (todo > sg_dma_len(sg)) { 1105 while (todo > sg_dma_len(sg)) {
1106 *(rp++) = cpu_to_le32(RISC_WRITE| 1106 *(rp++) = cpu_to_le32(RISC_WRITE|
1107 sg_dma_len(sg)); 1107 sg_dma_len(sg));
1108 *(rp++) = cpu_to_le32(sg_dma_address(sg)); 1108 *(rp++) = cpu_to_le32(sg_dma_address(sg));
1109 *(rp++) = cpu_to_le32(0); /* bits 63-32 */ 1109 *(rp++) = cpu_to_le32(0); /* bits 63-32 */
1110 todo -= sg_dma_len(sg); 1110 todo -= sg_dma_len(sg);
1111 sg++; 1111 sg = sg_next(sg);
1112 } 1112 }
1113 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo); 1113 *(rp++) = cpu_to_le32(RISC_WRITE|RISC_EOL|todo);
1114 *(rp++) = cpu_to_le32(sg_dma_address(sg)); 1114 *(rp++) = cpu_to_le32(sg_dma_address(sg));
diff --git a/drivers/media/pci/cx23885/cx23885-dvb.c b/drivers/media/pci/cx23885/cx23885-dvb.c
index 13734b8c7917..4cb90317ff45 100644
--- a/drivers/media/pci/cx23885/cx23885-dvb.c
+++ b/drivers/media/pci/cx23885/cx23885-dvb.c
@@ -1600,6 +1600,7 @@ static int dvb_register(struct cx23885_tsport *port)
1600 break; 1600 break;
1601 1601
1602 /* attach tuner */ 1602 /* attach tuner */
1603 memset(&m88ts2022_config, 0, sizeof(m88ts2022_config));
1603 m88ts2022_config.fe = fe0->dvb.frontend; 1604 m88ts2022_config.fe = fe0->dvb.frontend;
1604 m88ts2022_config.clock = 27000000; 1605 m88ts2022_config.clock = 27000000;
1605 memset(&info, 0, sizeof(struct i2c_board_info)); 1606 memset(&info, 0, sizeof(struct i2c_board_info));
@@ -1635,6 +1636,7 @@ static int dvb_register(struct cx23885_tsport *port)
1635 /* port c - terrestrial/cable */ 1636 /* port c - terrestrial/cable */
1636 case 2: 1637 case 2:
1637 /* attach frontend */ 1638 /* attach frontend */
1639 memset(&si2168_config, 0, sizeof(si2168_config));
1638 si2168_config.i2c_adapter = &adapter; 1640 si2168_config.i2c_adapter = &adapter;
1639 si2168_config.fe = &fe0->dvb.frontend; 1641 si2168_config.fe = &fe0->dvb.frontend;
1640 si2168_config.ts_mode = SI2168_TS_SERIAL; 1642 si2168_config.ts_mode = SI2168_TS_SERIAL;
@@ -1654,6 +1656,7 @@ static int dvb_register(struct cx23885_tsport *port)
1654 port->i2c_client_demod = client_demod; 1656 port->i2c_client_demod = client_demod;
1655 1657
1656 /* attach tuner */ 1658 /* attach tuner */
1659 memset(&si2157_config, 0, sizeof(si2157_config));
1657 si2157_config.fe = fe0->dvb.frontend; 1660 si2157_config.fe = fe0->dvb.frontend;
1658 memset(&info, 0, sizeof(struct i2c_board_info)); 1661 memset(&info, 0, sizeof(struct i2c_board_info));
1659 strlcpy(info.type, "si2157", I2C_NAME_SIZE); 1662 strlcpy(info.type, "si2157", I2C_NAME_SIZE);
diff --git a/drivers/media/pci/solo6x10/solo6x10-core.c b/drivers/media/pci/solo6x10/solo6x10-core.c
index 172583d736fe..8cbe6b49f4c2 100644
--- a/drivers/media/pci/solo6x10/solo6x10-core.c
+++ b/drivers/media/pci/solo6x10/solo6x10-core.c
@@ -105,11 +105,8 @@ static irqreturn_t solo_isr(int irq, void *data)
105 if (!status) 105 if (!status)
106 return IRQ_NONE; 106 return IRQ_NONE;
107 107
108 if (status & ~solo_dev->irq_mask) { 108 /* Acknowledge all interrupts immediately */
109 solo_reg_write(solo_dev, SOLO_IRQ_STAT, 109 solo_reg_write(solo_dev, SOLO_IRQ_STAT, status);
110 status & ~solo_dev->irq_mask);
111 status &= solo_dev->irq_mask;
112 }
113 110
114 if (status & SOLO_IRQ_PCI_ERR) 111 if (status & SOLO_IRQ_PCI_ERR)
115 solo_p2m_error_isr(solo_dev); 112 solo_p2m_error_isr(solo_dev);
@@ -132,9 +129,6 @@ static irqreturn_t solo_isr(int irq, void *data)
132 if (status & SOLO_IRQ_G723) 129 if (status & SOLO_IRQ_G723)
133 solo_g723_isr(solo_dev); 130 solo_g723_isr(solo_dev);
134 131
135 /* Clear all interrupts handled */
136 solo_reg_write(solo_dev, SOLO_IRQ_STAT, status);
137
138 return IRQ_HANDLED; 132 return IRQ_HANDLED;
139} 133}
140 134
diff --git a/drivers/media/pci/tw68/Kconfig b/drivers/media/pci/tw68/Kconfig
index 5425ba1e320d..95d5d5202048 100644
--- a/drivers/media/pci/tw68/Kconfig
+++ b/drivers/media/pci/tw68/Kconfig
@@ -1,7 +1,6 @@
1config VIDEO_TW68 1config VIDEO_TW68
2 tristate "Techwell tw68x Video For Linux" 2 tristate "Techwell tw68x Video For Linux"
3 depends on VIDEO_DEV && PCI && VIDEO_V4L2 3 depends on VIDEO_DEV && PCI && VIDEO_V4L2
4 select I2C_ALGOBIT
5 select VIDEOBUF2_DMA_SG 4 select VIDEOBUF2_DMA_SG
6 ---help--- 5 ---help---
7 Support for Techwell tw68xx based frame grabber boards. 6 Support for Techwell tw68xx based frame grabber boards.
diff --git a/drivers/media/pci/tw68/tw68-core.c b/drivers/media/pci/tw68/tw68-core.c
index a6fb48cf7aae..63f0b64057cb 100644
--- a/drivers/media/pci/tw68/tw68-core.c
+++ b/drivers/media/pci/tw68/tw68-core.c
@@ -306,7 +306,7 @@ static int tw68_initdev(struct pci_dev *pci_dev,
306 306
307 /* get irq */ 307 /* get irq */
308 err = devm_request_irq(&pci_dev->dev, pci_dev->irq, tw68_irq, 308 err = devm_request_irq(&pci_dev->dev, pci_dev->irq, tw68_irq,
309 IRQF_SHARED | IRQF_DISABLED, dev->name, dev); 309 IRQF_SHARED, dev->name, dev);
310 if (err < 0) { 310 if (err < 0) {
311 pr_err("%s: can't get IRQ %d\n", 311 pr_err("%s: can't get IRQ %d\n",
312 dev->name, pci_dev->irq); 312 dev->name, pci_dev->irq);
diff --git a/drivers/media/platform/Kconfig b/drivers/media/platform/Kconfig
index bee9074ebc13..3aac88f1d54a 100644
--- a/drivers/media/platform/Kconfig
+++ b/drivers/media/platform/Kconfig
@@ -166,7 +166,7 @@ config VIDEO_MEM2MEM_DEINTERLACE
166config VIDEO_SAMSUNG_S5P_G2D 166config VIDEO_SAMSUNG_S5P_G2D
167 tristate "Samsung S5P and EXYNOS4 G2D 2d graphics accelerator driver" 167 tristate "Samsung S5P and EXYNOS4 G2D 2d graphics accelerator driver"
168 depends on VIDEO_DEV && VIDEO_V4L2 168 depends on VIDEO_DEV && VIDEO_V4L2
169 depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST 169 depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
170 depends on HAS_DMA 170 depends on HAS_DMA
171 select VIDEOBUF2_DMA_CONTIG 171 select VIDEOBUF2_DMA_CONTIG
172 select V4L2_MEM2MEM_DEV 172 select V4L2_MEM2MEM_DEV
@@ -178,7 +178,7 @@ config VIDEO_SAMSUNG_S5P_G2D
178config VIDEO_SAMSUNG_S5P_JPEG 178config VIDEO_SAMSUNG_S5P_JPEG
179 tristate "Samsung S5P/Exynos3250/Exynos4 JPEG codec driver" 179 tristate "Samsung S5P/Exynos3250/Exynos4 JPEG codec driver"
180 depends on VIDEO_DEV && VIDEO_V4L2 180 depends on VIDEO_DEV && VIDEO_V4L2
181 depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST 181 depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
182 depends on HAS_DMA 182 depends on HAS_DMA
183 select VIDEOBUF2_DMA_CONTIG 183 select VIDEOBUF2_DMA_CONTIG
184 select V4L2_MEM2MEM_DEV 184 select V4L2_MEM2MEM_DEV
@@ -189,7 +189,7 @@ config VIDEO_SAMSUNG_S5P_JPEG
189config VIDEO_SAMSUNG_S5P_MFC 189config VIDEO_SAMSUNG_S5P_MFC
190 tristate "Samsung S5P MFC Video Codec" 190 tristate "Samsung S5P MFC Video Codec"
191 depends on VIDEO_DEV && VIDEO_V4L2 191 depends on VIDEO_DEV && VIDEO_V4L2
192 depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST 192 depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
193 depends on HAS_DMA 193 depends on HAS_DMA
194 select VIDEOBUF2_DMA_CONTIG 194 select VIDEOBUF2_DMA_CONTIG
195 default n 195 default n
diff --git a/drivers/media/platform/exynos4-is/Kconfig b/drivers/media/platform/exynos4-is/Kconfig
index 77c951237744..b7b2e472240a 100644
--- a/drivers/media/platform/exynos4-is/Kconfig
+++ b/drivers/media/platform/exynos4-is/Kconfig
@@ -2,7 +2,7 @@
2config VIDEO_SAMSUNG_EXYNOS4_IS 2config VIDEO_SAMSUNG_EXYNOS4_IS
3 bool "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver" 3 bool "Samsung S5P/EXYNOS4 SoC series Camera Subsystem driver"
4 depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API 4 depends on VIDEO_V4L2 && VIDEO_V4L2_SUBDEV_API
5 depends on (PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST) 5 depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
6 depends on OF && COMMON_CLK 6 depends on OF && COMMON_CLK
7 help 7 help
8 Say Y here to enable camera host interface devices for 8 Say Y here to enable camera host interface devices for
diff --git a/drivers/media/platform/exynos4-is/fimc-core.c b/drivers/media/platform/exynos4-is/fimc-core.c
index b70fd996d794..aee92d908e49 100644
--- a/drivers/media/platform/exynos4-is/fimc-core.c
+++ b/drivers/media/platform/exynos4-is/fimc-core.c
@@ -832,6 +832,7 @@ err:
832 return -ENXIO; 832 return -ENXIO;
833} 833}
834 834
835#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
835static int fimc_m2m_suspend(struct fimc_dev *fimc) 836static int fimc_m2m_suspend(struct fimc_dev *fimc)
836{ 837{
837 unsigned long flags; 838 unsigned long flags;
@@ -870,6 +871,7 @@ static int fimc_m2m_resume(struct fimc_dev *fimc)
870 871
871 return 0; 872 return 0;
872} 873}
874#endif /* CONFIG_PM_RUNTIME || CONFIG_PM_SLEEP */
873 875
874static const struct of_device_id fimc_of_match[]; 876static const struct of_device_id fimc_of_match[];
875 877
diff --git a/drivers/media/platform/s5p-jpeg/jpeg-core.c b/drivers/media/platform/s5p-jpeg/jpeg-core.c
index e525a7c8d885..6fcc7f072ace 100644
--- a/drivers/media/platform/s5p-jpeg/jpeg-core.c
+++ b/drivers/media/platform/s5p-jpeg/jpeg-core.c
@@ -893,7 +893,7 @@ static bool s5p_jpeg_parse_hdr(struct s5p_jpeg_q_data *result,
893 unsigned long buffer, unsigned long size, 893 unsigned long buffer, unsigned long size,
894 struct s5p_jpeg_ctx *ctx) 894 struct s5p_jpeg_ctx *ctx)
895{ 895{
896 int c, components, notfound; 896 int c, components = 0, notfound;
897 unsigned int height, width, word, subsampling = 0; 897 unsigned int height, width, word, subsampling = 0;
898 long length; 898 long length;
899 struct s5p_jpeg_buffer jpeg_buffer; 899 struct s5p_jpeg_buffer jpeg_buffer;
@@ -2632,6 +2632,7 @@ static int s5p_jpeg_remove(struct platform_device *pdev)
2632 return 0; 2632 return 0;
2633} 2633}
2634 2634
2635#if defined(CONFIG_PM_RUNTIME) || defined(CONFIG_PM_SLEEP)
2635static int s5p_jpeg_runtime_suspend(struct device *dev) 2636static int s5p_jpeg_runtime_suspend(struct device *dev)
2636{ 2637{
2637 struct s5p_jpeg *jpeg = dev_get_drvdata(dev); 2638 struct s5p_jpeg *jpeg = dev_get_drvdata(dev);
@@ -2681,7 +2682,9 @@ static int s5p_jpeg_runtime_resume(struct device *dev)
2681 2682
2682 return 0; 2683 return 0;
2683} 2684}
2685#endif /* CONFIG_PM_RUNTIME || CONFIG_PM_SLEEP */
2684 2686
2687#ifdef CONFIG_PM_SLEEP
2685static int s5p_jpeg_suspend(struct device *dev) 2688static int s5p_jpeg_suspend(struct device *dev)
2686{ 2689{
2687 if (pm_runtime_suspended(dev)) 2690 if (pm_runtime_suspended(dev))
@@ -2697,6 +2700,7 @@ static int s5p_jpeg_resume(struct device *dev)
2697 2700
2698 return s5p_jpeg_runtime_resume(dev); 2701 return s5p_jpeg_runtime_resume(dev);
2699} 2702}
2703#endif
2700 2704
2701static const struct dev_pm_ops s5p_jpeg_pm_ops = { 2705static const struct dev_pm_ops s5p_jpeg_pm_ops = {
2702 SET_SYSTEM_SLEEP_PM_OPS(s5p_jpeg_suspend, s5p_jpeg_resume) 2706 SET_SYSTEM_SLEEP_PM_OPS(s5p_jpeg_suspend, s5p_jpeg_resume)
diff --git a/drivers/media/platform/s5p-tv/Kconfig b/drivers/media/platform/s5p-tv/Kconfig
index a9d56f8936b4..beb180e71ba0 100644
--- a/drivers/media/platform/s5p-tv/Kconfig
+++ b/drivers/media/platform/s5p-tv/Kconfig
@@ -9,7 +9,7 @@
9config VIDEO_SAMSUNG_S5P_TV 9config VIDEO_SAMSUNG_S5P_TV
10 bool "Samsung TV driver for S5P platform" 10 bool "Samsung TV driver for S5P platform"
11 depends on PM_RUNTIME 11 depends on PM_RUNTIME
12 depends on PLAT_S5P || ARCH_EXYNOS || COMPILE_TEST 12 depends on ARCH_S5PV210 || ARCH_EXYNOS || COMPILE_TEST
13 default n 13 default n
14 ---help--- 14 ---help---
15 Say Y here to enable selecting the TV output devices for 15 Say Y here to enable selecting the TV output devices for
diff --git a/drivers/media/platform/vivid/Kconfig b/drivers/media/platform/vivid/Kconfig
index d71139a2ae00..c3090932f06d 100644
--- a/drivers/media/platform/vivid/Kconfig
+++ b/drivers/media/platform/vivid/Kconfig
@@ -1,8 +1,11 @@
1config VIDEO_VIVID 1config VIDEO_VIVID
2 tristate "Virtual Video Test Driver" 2 tristate "Virtual Video Test Driver"
3 depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64 3 depends on VIDEO_DEV && VIDEO_V4L2 && !SPARC32 && !SPARC64 && FB
4 select FONT_SUPPORT 4 select FONT_SUPPORT
5 select FONT_8x16 5 select FONT_8x16
6 select FB_CFB_FILLRECT
7 select FB_CFB_COPYAREA
8 select FB_CFB_IMAGEBLIT
6 select VIDEOBUF2_VMALLOC 9 select VIDEOBUF2_VMALLOC
7 default n 10 default n
8 ---help--- 11 ---help---
diff --git a/drivers/media/platform/vivid/vivid-core.c b/drivers/media/platform/vivid/vivid-core.c
index 2c61a62ab48b..686c3c2ad05b 100644
--- a/drivers/media/platform/vivid/vivid-core.c
+++ b/drivers/media/platform/vivid/vivid-core.c
@@ -100,11 +100,9 @@ MODULE_PARM_DESC(ccs_out_mode, " output crop/compose/scale mode:\n"
100 "\t\t bit 0=crop, 1=compose, 2=scale,\n" 100 "\t\t bit 0=crop, 1=compose, 2=scale,\n"
101 "\t\t -1=user-controlled (default)"); 101 "\t\t -1=user-controlled (default)");
102 102
103static unsigned multiplanar[VIVID_MAX_DEVS]; 103static unsigned multiplanar[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 1 };
104module_param_array(multiplanar, uint, NULL, 0444); 104module_param_array(multiplanar, uint, NULL, 0444);
105MODULE_PARM_DESC(multiplanar, " 0 (default) is alternating single and multiplanar devices,\n" 105MODULE_PARM_DESC(multiplanar, " 1 (default) creates a single planar device, 2 creates a multiplanar device.");
106 "\t\t 1 is single planar devices,\n"
107 "\t\t 2 is multiplanar devices");
108 106
109/* Default: video + vbi-cap (raw and sliced) + radio rx + radio tx + sdr + vbi-out + vid-out */ 107/* Default: video + vbi-cap (raw and sliced) + radio rx + radio tx + sdr + vbi-out + vid-out */
110static unsigned node_types[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 0x1d3d }; 108static unsigned node_types[VIVID_MAX_DEVS] = { [0 ... (VIVID_MAX_DEVS - 1)] = 0x1d3d };
@@ -669,10 +667,7 @@ static int __init vivid_create_instance(int inst)
669 /* start detecting feature set */ 667 /* start detecting feature set */
670 668
671 /* do we use single- or multi-planar? */ 669 /* do we use single- or multi-planar? */
672 if (multiplanar[inst] == 0) 670 dev->multiplanar = multiplanar[inst] > 1;
673 dev->multiplanar = inst & 1;
674 else
675 dev->multiplanar = multiplanar[inst] > 1;
676 v4l2_info(&dev->v4l2_dev, "using %splanar format API\n", 671 v4l2_info(&dev->v4l2_dev, "using %splanar format API\n",
677 dev->multiplanar ? "multi" : "single "); 672 dev->multiplanar ? "multi" : "single ");
678 673
diff --git a/drivers/media/platform/vivid/vivid-tpg.c b/drivers/media/platform/vivid/vivid-tpg.c
index 0c6fa53fa646..cbcd6250e7b2 100644
--- a/drivers/media/platform/vivid/vivid-tpg.c
+++ b/drivers/media/platform/vivid/vivid-tpg.c
@@ -136,7 +136,7 @@ int tpg_alloc(struct tpg_data *tpg, unsigned max_w)
136 tpg->black_line[plane] = vzalloc(max_w * pixelsz); 136 tpg->black_line[plane] = vzalloc(max_w * pixelsz);
137 if (!tpg->black_line[plane]) 137 if (!tpg->black_line[plane])
138 return -ENOMEM; 138 return -ENOMEM;
139 tpg->random_line[plane] = vzalloc(max_w * pixelsz); 139 tpg->random_line[plane] = vzalloc(max_w * 2 * pixelsz);
140 if (!tpg->random_line[plane]) 140 if (!tpg->random_line[plane])
141 return -ENOMEM; 141 return -ENOMEM;
142 } 142 }
diff --git a/drivers/media/radio/wl128x/fmdrv_common.c b/drivers/media/radio/wl128x/fmdrv_common.c
index 6f28f6e02ea5..704397f3c106 100644
--- a/drivers/media/radio/wl128x/fmdrv_common.c
+++ b/drivers/media/radio/wl128x/fmdrv_common.c
@@ -1256,7 +1256,7 @@ static int fm_download_firmware(struct fmdev *fmdev, const u8 *fw_name)
1256 fmerr("Unable to read firmware(%s) content\n", fw_name); 1256 fmerr("Unable to read firmware(%s) content\n", fw_name);
1257 return ret; 1257 return ret;
1258 } 1258 }
1259 fmdbg("Firmware(%s) length : %d bytes\n", fw_name, fw_entry->size); 1259 fmdbg("Firmware(%s) length : %zu bytes\n", fw_name, fw_entry->size);
1260 1260
1261 fw_data = (void *)fw_entry->data; 1261 fw_data = (void *)fw_entry->data;
1262 fw_len = fw_entry->size; 1262 fw_len = fw_entry->size;
diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c
index b8837dd39bb2..65f80b8b9f7a 100644
--- a/drivers/media/rc/imon.c
+++ b/drivers/media/rc/imon.c
@@ -1678,7 +1678,8 @@ static void imon_incoming_packet(struct imon_context *ictx,
1678 if (press_type == 0) 1678 if (press_type == 0)
1679 rc_keyup(ictx->rdev); 1679 rc_keyup(ictx->rdev);
1680 else { 1680 else {
1681 if (ictx->rc_type == RC_BIT_RC6_MCE) 1681 if (ictx->rc_type == RC_BIT_RC6_MCE ||
1682 ictx->rc_type == RC_BIT_OTHER)
1682 rc_keydown(ictx->rdev, 1683 rc_keydown(ictx->rdev,
1683 ictx->rc_type == RC_BIT_RC6_MCE ? RC_TYPE_RC6_MCE : RC_TYPE_OTHER, 1684 ictx->rc_type == RC_BIT_RC6_MCE ? RC_TYPE_RC6_MCE : RC_TYPE_OTHER,
1684 ictx->rc_scancode, ictx->rc_toggle); 1685 ictx->rc_scancode, ictx->rc_toggle);
diff --git a/drivers/media/rc/ir-hix5hd2.c b/drivers/media/rc/ir-hix5hd2.c
index 08bbd4f508cd..b0df62961c14 100644
--- a/drivers/media/rc/ir-hix5hd2.c
+++ b/drivers/media/rc/ir-hix5hd2.c
@@ -297,7 +297,7 @@ static int hix5hd2_ir_remove(struct platform_device *pdev)
297 return 0; 297 return 0;
298} 298}
299 299
300#ifdef CONFIG_PM 300#ifdef CONFIG_PM_SLEEP
301static int hix5hd2_ir_suspend(struct device *dev) 301static int hix5hd2_ir_suspend(struct device *dev)
302{ 302{
303 struct hix5hd2_ir_priv *priv = dev_get_drvdata(dev); 303 struct hix5hd2_ir_priv *priv = dev_get_drvdata(dev);
diff --git a/drivers/media/rc/ir-rc5-decoder.c b/drivers/media/rc/ir-rc5-decoder.c
index 2ef763928ca4..84fa6e9b59a1 100644
--- a/drivers/media/rc/ir-rc5-decoder.c
+++ b/drivers/media/rc/ir-rc5-decoder.c
@@ -53,7 +53,7 @@ static int ir_rc5_decode(struct rc_dev *dev, struct ir_raw_event ev)
53 u32 scancode; 53 u32 scancode;
54 enum rc_type protocol; 54 enum rc_type protocol;
55 55
56 if (!(dev->enabled_protocols & (RC_BIT_RC5 | RC_BIT_RC5X))) 56 if (!(dev->enabled_protocols & (RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ)))
57 return 0; 57 return 0;
58 58
59 if (!is_timing_event(ev)) { 59 if (!is_timing_event(ev)) {
diff --git a/drivers/media/rc/ir-rc6-decoder.c b/drivers/media/rc/ir-rc6-decoder.c
index f1f098e22f7e..d16bc67af732 100644
--- a/drivers/media/rc/ir-rc6-decoder.c
+++ b/drivers/media/rc/ir-rc6-decoder.c
@@ -259,8 +259,8 @@ again:
259 case 32: 259 case 32:
260 if ((scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) { 260 if ((scancode & RC6_6A_LCC_MASK) == RC6_6A_MCE_CC) {
261 protocol = RC_TYPE_RC6_MCE; 261 protocol = RC_TYPE_RC6_MCE;
262 scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
263 toggle = !!(scancode & RC6_6A_MCE_TOGGLE_MASK); 262 toggle = !!(scancode & RC6_6A_MCE_TOGGLE_MASK);
263 scancode &= ~RC6_6A_MCE_TOGGLE_MASK;
264 } else { 264 } else {
265 protocol = RC_BIT_RC6_6A_32; 265 protocol = RC_BIT_RC6_6A_32;
266 toggle = 0; 266 toggle = 0;
diff --git a/drivers/media/rc/rc-ir-raw.c b/drivers/media/rc/rc-ir-raw.c
index e8fff2add265..b732ac6a26d8 100644
--- a/drivers/media/rc/rc-ir-raw.c
+++ b/drivers/media/rc/rc-ir-raw.c
@@ -262,7 +262,6 @@ int ir_raw_event_register(struct rc_dev *dev)
262 return -ENOMEM; 262 return -ENOMEM;
263 263
264 dev->raw->dev = dev; 264 dev->raw->dev = dev;
265 dev->enabled_protocols = ~0;
266 dev->change_protocol = change_protocol; 265 dev->change_protocol = change_protocol;
267 rc = kfifo_alloc(&dev->raw->kfifo, 266 rc = kfifo_alloc(&dev->raw->kfifo,
268 sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE, 267 sizeof(struct ir_raw_event) * MAX_IR_EVENT_SIZE,
diff --git a/drivers/media/rc/rc-main.c b/drivers/media/rc/rc-main.c
index a7991c7d010a..8d3b74c5a717 100644
--- a/drivers/media/rc/rc-main.c
+++ b/drivers/media/rc/rc-main.c
@@ -1421,6 +1421,8 @@ int rc_register_device(struct rc_dev *dev)
1421 1421
1422 if (dev->change_protocol) { 1422 if (dev->change_protocol) {
1423 u64 rc_type = (1 << rc_map->rc_type); 1423 u64 rc_type = (1 << rc_map->rc_type);
1424 if (dev->driver_type == RC_DRIVER_IR_RAW)
1425 rc_type |= RC_BIT_LIRC;
1424 rc = dev->change_protocol(dev, &rc_type); 1426 rc = dev->change_protocol(dev, &rc_type);
1425 if (rc < 0) 1427 if (rc < 0)
1426 goto out_raw; 1428 goto out_raw;
diff --git a/drivers/media/tuners/xc5000.c b/drivers/media/tuners/xc5000.c
index e44c8aba6074..803a0e63d47e 100644
--- a/drivers/media/tuners/xc5000.c
+++ b/drivers/media/tuners/xc5000.c
@@ -1333,9 +1333,9 @@ static int xc5000_release(struct dvb_frontend *fe)
1333 1333
1334 if (priv) { 1334 if (priv) {
1335 cancel_delayed_work(&priv->timer_sleep); 1335 cancel_delayed_work(&priv->timer_sleep);
1336 hybrid_tuner_release_state(priv);
1337 if (priv->firmware) 1336 if (priv->firmware)
1338 release_firmware(priv->firmware); 1337 release_firmware(priv->firmware);
1338 hybrid_tuner_release_state(priv);
1339 } 1339 }
1340 1340
1341 mutex_unlock(&xc5000_list_mutex); 1341 mutex_unlock(&xc5000_list_mutex);
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 00758c83eec7..1896ab218b11 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -193,8 +193,8 @@ static int af9035_wr_reg_mask(struct dvb_usb_device *d, u32 reg, u8 val,
193 return af9035_wr_regs(d, reg, &val, 1); 193 return af9035_wr_regs(d, reg, &val, 1);
194} 194}
195 195
196static int af9035_add_i2c_dev(struct dvb_usb_device *d, char *type, u8 addr, 196static int af9035_add_i2c_dev(struct dvb_usb_device *d, const char *type,
197 void *platform_data, struct i2c_adapter *adapter) 197 u8 addr, void *platform_data, struct i2c_adapter *adapter)
198{ 198{
199 int ret, num; 199 int ret, num;
200 struct state *state = d_to_priv(d); 200 struct state *state = d_to_priv(d);
@@ -221,7 +221,7 @@ static int af9035_add_i2c_dev(struct dvb_usb_device *d, char *type, u8 addr,
221 goto err; 221 goto err;
222 } 222 }
223 223
224 request_module(board_info.type); 224 request_module("%s", board_info.type);
225 225
226 /* register I2C device */ 226 /* register I2C device */
227 client = i2c_new_device(adapter, &board_info); 227 client = i2c_new_device(adapter, &board_info);
diff --git a/drivers/media/usb/dvb-usb-v2/anysee.c b/drivers/media/usb/dvb-usb-v2/anysee.c
index d3c5f230e97a..ae917c042a52 100644
--- a/drivers/media/usb/dvb-usb-v2/anysee.c
+++ b/drivers/media/usb/dvb-usb-v2/anysee.c
@@ -630,8 +630,8 @@ error:
630 return ret; 630 return ret;
631} 631}
632 632
633static int anysee_add_i2c_dev(struct dvb_usb_device *d, char *type, u8 addr, 633static int anysee_add_i2c_dev(struct dvb_usb_device *d, const char *type,
634 void *platform_data) 634 u8 addr, void *platform_data)
635{ 635{
636 int ret, num; 636 int ret, num;
637 struct anysee_state *state = d_to_priv(d); 637 struct anysee_state *state = d_to_priv(d);
@@ -659,7 +659,7 @@ static int anysee_add_i2c_dev(struct dvb_usb_device *d, char *type, u8 addr,
659 goto err; 659 goto err;
660 } 660 }
661 661
662 request_module(board_info.type); 662 request_module("%s", board_info.type);
663 663
664 /* register I2C device */ 664 /* register I2C device */
665 client = i2c_new_device(adapter, &board_info); 665 client = i2c_new_device(adapter, &board_info);
diff --git a/drivers/media/usb/em28xx/em28xx-core.c b/drivers/media/usb/em28xx/em28xx-core.c
index b5e52fe7957a..901cf2b952d7 100644
--- a/drivers/media/usb/em28xx/em28xx-core.c
+++ b/drivers/media/usb/em28xx/em28xx-core.c
@@ -504,7 +504,7 @@ EXPORT_SYMBOL_GPL(em28xx_audio_analog_set);
504int em28xx_audio_setup(struct em28xx *dev) 504int em28xx_audio_setup(struct em28xx *dev)
505{ 505{
506 int vid1, vid2, feat, cfg; 506 int vid1, vid2, feat, cfg;
507 u32 vid; 507 u32 vid = 0;
508 u8 i2s_samplerates; 508 u8 i2s_samplerates;
509 509
510 if (dev->chip_id == CHIP_ID_EM2870 || 510 if (dev->chip_id == CHIP_ID_EM2870 ||
diff --git a/drivers/media/usb/em28xx/em28xx-input.c b/drivers/media/usb/em28xx/em28xx-input.c
index 581f6dad4ca9..23f8f6afa2e0 100644
--- a/drivers/media/usb/em28xx/em28xx-input.c
+++ b/drivers/media/usb/em28xx/em28xx-input.c
@@ -712,8 +712,10 @@ static int em28xx_ir_init(struct em28xx *dev)
712 em28xx_info("Registering input extension\n"); 712 em28xx_info("Registering input extension\n");
713 713
714 ir = kzalloc(sizeof(*ir), GFP_KERNEL); 714 ir = kzalloc(sizeof(*ir), GFP_KERNEL);
715 if (!ir)
716 return -ENOMEM;
715 rc = rc_allocate_device(); 717 rc = rc_allocate_device();
716 if (!ir || !rc) 718 if (!rc)
717 goto error; 719 goto error;
718 720
719 /* record handles to ourself */ 721 /* record handles to ourself */
diff --git a/drivers/media/usb/hackrf/hackrf.c b/drivers/media/usb/hackrf/hackrf.c
index 328b5ba47a0a..fd1fa412e094 100644
--- a/drivers/media/usb/hackrf/hackrf.c
+++ b/drivers/media/usb/hackrf/hackrf.c
@@ -932,7 +932,7 @@ static int hackrf_set_bandwidth(struct hackrf_dev *dev)
932 dev->bandwidth->val = bandwidth; 932 dev->bandwidth->val = bandwidth;
933 dev->bandwidth->cur.val = bandwidth; 933 dev->bandwidth->cur.val = bandwidth;
934 934
935 dev_dbg(dev->dev, "bandwidth selected=%d\n", bandwidth_lut[i].freq); 935 dev_dbg(dev->dev, "bandwidth selected=%d\n", bandwidth);
936 936
937 u16tmp = 0; 937 u16tmp = 0;
938 u16tmp |= ((bandwidth >> 0) & 0xff) << 0; 938 u16tmp |= ((bandwidth >> 0) & 0xff) << 0;
diff --git a/drivers/media/usb/s2255/s2255drv.c b/drivers/media/usb/s2255/s2255drv.c
index ccc00099b261..1c0dbf428a3a 100644
--- a/drivers/media/usb/s2255/s2255drv.c
+++ b/drivers/media/usb/s2255/s2255drv.c
@@ -632,7 +632,7 @@ static void s2255_fillbuff(struct s2255_vc *vc,
632 break; 632 break;
633 case V4L2_PIX_FMT_JPEG: 633 case V4L2_PIX_FMT_JPEG:
634 case V4L2_PIX_FMT_MJPEG: 634 case V4L2_PIX_FMT_MJPEG:
635 buf->vb.v4l2_buf.length = jpgsize; 635 vb2_set_plane_payload(&buf->vb, 0, jpgsize);
636 memcpy(vbuf, tmpbuf, jpgsize); 636 memcpy(vbuf, tmpbuf, jpgsize);
637 break; 637 break;
638 case V4L2_PIX_FMT_YUV422P: 638 case V4L2_PIX_FMT_YUV422P:
diff --git a/drivers/media/usb/usbvision/usbvision-video.c b/drivers/media/usb/usbvision/usbvision-video.c
index 68bc9615660e..9bfa041e3316 100644
--- a/drivers/media/usb/usbvision/usbvision-video.c
+++ b/drivers/media/usb/usbvision/usbvision-video.c
@@ -446,6 +446,7 @@ static int usbvision_v4l2_close(struct file *file)
446 if (usbvision->remove_pending) { 446 if (usbvision->remove_pending) {
447 printk(KERN_INFO "%s: Final disconnect\n", __func__); 447 printk(KERN_INFO "%s: Final disconnect\n", __func__);
448 usbvision_release(usbvision); 448 usbvision_release(usbvision);
449 return 0;
449 } 450 }
450 mutex_unlock(&usbvision->v4l2_lock); 451 mutex_unlock(&usbvision->v4l2_lock);
451 452
@@ -1221,6 +1222,7 @@ static int usbvision_radio_close(struct file *file)
1221 if (usbvision->remove_pending) { 1222 if (usbvision->remove_pending) {
1222 printk(KERN_INFO "%s: Final disconnect\n", __func__); 1223 printk(KERN_INFO "%s: Final disconnect\n", __func__);
1223 usbvision_release(usbvision); 1224 usbvision_release(usbvision);
1225 return err_code;
1224 } 1226 }
1225 1227
1226 mutex_unlock(&usbvision->v4l2_lock); 1228 mutex_unlock(&usbvision->v4l2_lock);
diff --git a/drivers/media/usb/uvc/uvc_v4l2.c b/drivers/media/usb/uvc/uvc_v4l2.c
index 60a8e2c3631e..378ae02e593b 100644
--- a/drivers/media/usb/uvc/uvc_v4l2.c
+++ b/drivers/media/usb/uvc/uvc_v4l2.c
@@ -318,7 +318,6 @@ static int uvc_v4l2_set_format(struct uvc_streaming *stream,
318 stream->ctrl = probe; 318 stream->ctrl = probe;
319 stream->cur_format = format; 319 stream->cur_format = format;
320 stream->cur_frame = frame; 320 stream->cur_frame = frame;
321 stream->frame_size = fmt->fmt.pix.sizeimage;
322 321
323done: 322done:
324 mutex_unlock(&stream->mutex); 323 mutex_unlock(&stream->mutex);
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index 9ace520bb079..df81b9c4faf1 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -1143,7 +1143,7 @@ static int uvc_video_encode_data(struct uvc_streaming *stream,
1143static void uvc_video_validate_buffer(const struct uvc_streaming *stream, 1143static void uvc_video_validate_buffer(const struct uvc_streaming *stream,
1144 struct uvc_buffer *buf) 1144 struct uvc_buffer *buf)
1145{ 1145{
1146 if (stream->frame_size != buf->bytesused && 1146 if (stream->ctrl.dwMaxVideoFrameSize != buf->bytesused &&
1147 !(stream->cur_format->flags & UVC_FMT_FLAG_COMPRESSED)) 1147 !(stream->cur_format->flags & UVC_FMT_FLAG_COMPRESSED))
1148 buf->error = 1; 1148 buf->error = 1;
1149} 1149}
diff --git a/drivers/media/usb/uvc/uvcvideo.h b/drivers/media/usb/uvc/uvcvideo.h
index 6f676c29ec09..864ada740360 100644
--- a/drivers/media/usb/uvc/uvcvideo.h
+++ b/drivers/media/usb/uvc/uvcvideo.h
@@ -457,7 +457,6 @@ struct uvc_streaming {
457 struct uvc_format *def_format; 457 struct uvc_format *def_format;
458 struct uvc_format *cur_format; 458 struct uvc_format *cur_format;
459 struct uvc_frame *cur_frame; 459 struct uvc_frame *cur_frame;
460 size_t frame_size;
461 460
462 /* Protect access to ctrl, cur_format, cur_frame and hardware video 461 /* Protect access to ctrl, cur_format, cur_frame and hardware video
463 * probe control. 462 * probe control.
diff --git a/drivers/media/v4l2-core/videobuf-dma-contig.c b/drivers/media/v4l2-core/videobuf-dma-contig.c
index bf80f0f7dfb8..e02353e340dd 100644
--- a/drivers/media/v4l2-core/videobuf-dma-contig.c
+++ b/drivers/media/v4l2-core/videobuf-dma-contig.c
@@ -305,6 +305,15 @@ static int __videobuf_mmap_mapper(struct videobuf_queue *q,
305 /* Try to remap memory */ 305 /* Try to remap memory */
306 size = vma->vm_end - vma->vm_start; 306 size = vma->vm_end - vma->vm_start;
307 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); 307 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
308
309 /* the "vm_pgoff" is just used in v4l2 to find the
310 * corresponding buffer data structure which is allocated
311 * earlier and it does not mean the offset from the physical
312 * buffer start address as usual. So set it to 0 to pass
313 * the sanity check in vm_iomap_memory().
314 */
315 vma->vm_pgoff = 0;
316
308 retval = vm_iomap_memory(vma, mem->dma_handle, size); 317 retval = vm_iomap_memory(vma, mem->dma_handle, size);
309 if (retval) { 318 if (retval) {
310 dev_err(q->dev, "mmap: remap failed with error %d. ", 319 dev_err(q->dev, "mmap: remap failed with error %d. ",
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
index cf008f45968c..711773e8e64b 100644
--- a/drivers/mfd/max77693.c
+++ b/drivers/mfd/max77693.c
@@ -240,7 +240,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
240 goto err_irq_charger; 240 goto err_irq_charger;
241 } 241 }
242 242
243 ret = regmap_add_irq_chip(max77693->regmap, max77693->irq, 243 ret = regmap_add_irq_chip(max77693->regmap_muic, max77693->irq,
244 IRQF_ONESHOT | IRQF_SHARED | 244 IRQF_ONESHOT | IRQF_SHARED |
245 IRQF_TRIGGER_FALLING, 0, 245 IRQF_TRIGGER_FALLING, 0,
246 &max77693_muic_irq_chip, 246 &max77693_muic_irq_chip,
@@ -250,6 +250,17 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
250 goto err_irq_muic; 250 goto err_irq_muic;
251 } 251 }
252 252
253 /* Unmask interrupts from all blocks in interrupt source register */
254 ret = regmap_update_bits(max77693->regmap,
255 MAX77693_PMIC_REG_INTSRC_MASK,
256 SRC_IRQ_ALL, (unsigned int)~SRC_IRQ_ALL);
257 if (ret < 0) {
258 dev_err(max77693->dev,
259 "Could not unmask interrupts in INTSRC: %d\n",
260 ret);
261 goto err_intsrc;
262 }
263
253 pm_runtime_set_active(max77693->dev); 264 pm_runtime_set_active(max77693->dev);
254 265
255 ret = mfd_add_devices(max77693->dev, -1, max77693_devs, 266 ret = mfd_add_devices(max77693->dev, -1, max77693_devs,
@@ -261,6 +272,7 @@ static int max77693_i2c_probe(struct i2c_client *i2c,
261 272
262err_mfd: 273err_mfd:
263 mfd_remove_devices(max77693->dev); 274 mfd_remove_devices(max77693->dev);
275err_intsrc:
264 regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic); 276 regmap_del_irq_chip(max77693->irq, max77693->irq_data_muic);
265err_irq_muic: 277err_irq_muic:
266 regmap_del_irq_chip(max77693->irq, max77693->irq_data_charger); 278 regmap_del_irq_chip(max77693->irq, max77693->irq_data_charger);
diff --git a/drivers/mfd/rtsx_pcr.c b/drivers/mfd/rtsx_pcr.c
index f2643c221d34..30f7ca89a0e6 100644
--- a/drivers/mfd/rtsx_pcr.c
+++ b/drivers/mfd/rtsx_pcr.c
@@ -947,6 +947,7 @@ static void rtsx_pci_idle_work(struct work_struct *work)
947 mutex_unlock(&pcr->pcr_mutex); 947 mutex_unlock(&pcr->pcr_mutex);
948} 948}
949 949
950#ifdef CONFIG_PM
950static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state) 951static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
951{ 952{
952 if (pcr->ops->turn_off_led) 953 if (pcr->ops->turn_off_led)
@@ -961,6 +962,7 @@ static void rtsx_pci_power_off(struct rtsx_pcr *pcr, u8 pm_state)
961 if (pcr->ops->force_power_down) 962 if (pcr->ops->force_power_down)
962 pcr->ops->force_power_down(pcr, pm_state); 963 pcr->ops->force_power_down(pcr, pm_state);
963} 964}
965#endif
964 966
965static int rtsx_pci_init_hw(struct rtsx_pcr *pcr) 967static int rtsx_pci_init_hw(struct rtsx_pcr *pcr)
966{ 968{
diff --git a/drivers/mfd/stmpe.h b/drivers/mfd/stmpe.h
index 2d045f26f193..bee0abf82040 100644
--- a/drivers/mfd/stmpe.h
+++ b/drivers/mfd/stmpe.h
@@ -269,7 +269,7 @@ int stmpe_remove(struct stmpe *stmpe);
269#define STMPE24XX_REG_CHIP_ID 0x80 269#define STMPE24XX_REG_CHIP_ID 0x80
270#define STMPE24XX_REG_IEGPIOR_LSB 0x18 270#define STMPE24XX_REG_IEGPIOR_LSB 0x18
271#define STMPE24XX_REG_ISGPIOR_MSB 0x19 271#define STMPE24XX_REG_ISGPIOR_MSB 0x19
272#define STMPE24XX_REG_GPMR_LSB 0xA5 272#define STMPE24XX_REG_GPMR_LSB 0xA4
273#define STMPE24XX_REG_GPSR_LSB 0x85 273#define STMPE24XX_REG_GPSR_LSB 0x85
274#define STMPE24XX_REG_GPCR_LSB 0x88 274#define STMPE24XX_REG_GPCR_LSB 0x88
275#define STMPE24XX_REG_GPDR_LSB 0x8B 275#define STMPE24XX_REG_GPDR_LSB 0x8B
diff --git a/drivers/mfd/twl4030-power.c b/drivers/mfd/twl4030-power.c
index cf92a6d1c532..50f9091bcd38 100644
--- a/drivers/mfd/twl4030-power.c
+++ b/drivers/mfd/twl4030-power.c
@@ -44,6 +44,15 @@ static u8 twl4030_start_script_address = 0x2b;
44#define PWR_DEVSLP BIT(1) 44#define PWR_DEVSLP BIT(1)
45#define PWR_DEVOFF BIT(0) 45#define PWR_DEVOFF BIT(0)
46 46
47/* Register bits for CFG_P1_TRANSITION (also for P2 and P3) */
48#define STARTON_SWBUG BIT(7) /* Start on watchdog */
49#define STARTON_VBUS BIT(5) /* Start on VBUS */
50#define STARTON_VBAT BIT(4) /* Start on battery insert */
51#define STARTON_RTC BIT(3) /* Start on RTC */
52#define STARTON_USB BIT(2) /* Start on USB host */
53#define STARTON_CHG BIT(1) /* Start on charger */
54#define STARTON_PWON BIT(0) /* Start on PWRON button */
55
47#define SEQ_OFFSYNC (1 << 0) 56#define SEQ_OFFSYNC (1 << 0)
48 57
49#define PHY_TO_OFF_PM_MASTER(p) (p - 0x36) 58#define PHY_TO_OFF_PM_MASTER(p) (p - 0x36)
@@ -606,6 +615,44 @@ twl4030_power_configure_resources(const struct twl4030_power_data *pdata)
606 return 0; 615 return 0;
607} 616}
608 617
618static int twl4030_starton_mask_and_set(u8 bitmask, u8 bitvalues)
619{
620 u8 regs[3] = { TWL4030_PM_MASTER_CFG_P1_TRANSITION,
621 TWL4030_PM_MASTER_CFG_P2_TRANSITION,
622 TWL4030_PM_MASTER_CFG_P3_TRANSITION, };
623 u8 val;
624 int i, err;
625
626 err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, TWL4030_PM_MASTER_KEY_CFG1,
627 TWL4030_PM_MASTER_PROTECT_KEY);
628 if (err)
629 goto relock;
630 err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
631 TWL4030_PM_MASTER_KEY_CFG2,
632 TWL4030_PM_MASTER_PROTECT_KEY);
633 if (err)
634 goto relock;
635
636 for (i = 0; i < sizeof(regs); i++) {
637 err = twl_i2c_read_u8(TWL_MODULE_PM_MASTER,
638 &val, regs[i]);
639 if (err)
640 break;
641 val = (~bitmask & val) | (bitmask & bitvalues);
642 err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER,
643 val, regs[i]);
644 if (err)
645 break;
646 }
647
648 if (err)
649 pr_err("TWL4030 Register access failed: %i\n", err);
650
651relock:
652 return twl_i2c_write_u8(TWL_MODULE_PM_MASTER, 0,
653 TWL4030_PM_MASTER_PROTECT_KEY);
654}
655
609/* 656/*
610 * In master mode, start the power off sequence. 657 * In master mode, start the power off sequence.
611 * After a successful execution, TWL shuts down the power to the SoC 658 * After a successful execution, TWL shuts down the power to the SoC
@@ -615,6 +662,11 @@ void twl4030_power_off(void)
615{ 662{
616 int err; 663 int err;
617 664
665 /* Disable start on charger or VBUS as it can break poweroff */
666 err = twl4030_starton_mask_and_set(STARTON_VBUS | STARTON_CHG, 0);
667 if (err)
668 pr_err("TWL4030 Unable to configure start-up\n");
669
618 err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, PWR_DEVOFF, 670 err = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, PWR_DEVOFF,
619 TWL4030_PM_MASTER_P1_SW_EVENTS); 671 TWL4030_PM_MASTER_P1_SW_EVENTS);
620 if (err) 672 if (err)
diff --git a/drivers/mfd/viperboard.c b/drivers/mfd/viperboard.c
index e00f5340ed87..3c2b8f9e3c84 100644
--- a/drivers/mfd/viperboard.c
+++ b/drivers/mfd/viperboard.c
@@ -93,8 +93,9 @@ static int vprbrd_probe(struct usb_interface *interface,
93 version >> 8, version & 0xff, 93 version >> 8, version & 0xff,
94 vb->usb_dev->bus->busnum, vb->usb_dev->devnum); 94 vb->usb_dev->bus->busnum, vb->usb_dev->devnum);
95 95
96 ret = mfd_add_devices(&interface->dev, -1, vprbrd_devs, 96 ret = mfd_add_devices(&interface->dev, PLATFORM_DEVID_AUTO,
97 ARRAY_SIZE(vprbrd_devs), NULL, 0, NULL); 97 vprbrd_devs, ARRAY_SIZE(vprbrd_devs), NULL, 0,
98 NULL);
98 if (ret != 0) { 99 if (ret != 0) {
99 dev_err(&interface->dev, "Failed to add mfd devices to core."); 100 dev_err(&interface->dev, "Failed to add mfd devices to core.");
100 goto error; 101 goto error;
diff --git a/drivers/misc/cxl/fault.c b/drivers/misc/cxl/fault.c
index 69506ebd4d07..c99e896604ee 100644
--- a/drivers/misc/cxl/fault.c
+++ b/drivers/misc/cxl/fault.c
@@ -21,60 +21,64 @@
21 21
22#include "cxl.h" 22#include "cxl.h"
23 23
24static struct cxl_sste* find_free_sste(struct cxl_sste *primary_group, 24static bool sste_matches(struct cxl_sste *sste, struct copro_slb *slb)
25 bool sec_hash,
26 struct cxl_sste *secondary_group,
27 unsigned int *lru)
28{ 25{
29 unsigned int i, entry; 26 return ((sste->vsid_data == cpu_to_be64(slb->vsid)) &&
30 struct cxl_sste *sste, *group = primary_group; 27 (sste->esid_data == cpu_to_be64(slb->esid)));
31 28}
32 for (i = 0; i < 2; i++) { 29
33 for (entry = 0; entry < 8; entry++) { 30/*
34 sste = group + entry; 31 * This finds a free SSTE for the given SLB, or returns NULL if it's already in
35 if (!(be64_to_cpu(sste->esid_data) & SLB_ESID_V)) 32 * the segment table.
36 return sste; 33 */
37 } 34static struct cxl_sste* find_free_sste(struct cxl_context *ctx,
38 if (!sec_hash) 35 struct copro_slb *slb)
39 break; 36{
40 group = secondary_group; 37 struct cxl_sste *primary, *sste, *ret = NULL;
38 unsigned int mask = (ctx->sst_size >> 7) - 1; /* SSTP0[SegTableSize] */
39 unsigned int entry;
40 unsigned int hash;
41
42 if (slb->vsid & SLB_VSID_B_1T)
43 hash = (slb->esid >> SID_SHIFT_1T) & mask;
44 else /* 256M */
45 hash = (slb->esid >> SID_SHIFT) & mask;
46
47 primary = ctx->sstp + (hash << 3);
48
49 for (entry = 0, sste = primary; entry < 8; entry++, sste++) {
50 if (!ret && !(be64_to_cpu(sste->esid_data) & SLB_ESID_V))
51 ret = sste;
52 if (sste_matches(sste, slb))
53 return NULL;
41 } 54 }
55 if (ret)
56 return ret;
57
42 /* Nothing free, select an entry to cast out */ 58 /* Nothing free, select an entry to cast out */
43 if (sec_hash && (*lru & 0x8)) 59 ret = primary + ctx->sst_lru;
44 sste = secondary_group + (*lru & 0x7); 60 ctx->sst_lru = (ctx->sst_lru + 1) & 0x7;
45 else
46 sste = primary_group + (*lru & 0x7);
47 *lru = (*lru + 1) & 0xf;
48 61
49 return sste; 62 return ret;
50} 63}
51 64
52static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb) 65static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb)
53{ 66{
54 /* mask is the group index, we search primary and secondary here. */ 67 /* mask is the group index, we search primary and secondary here. */
55 unsigned int mask = (ctx->sst_size >> 7)-1; /* SSTP0[SegTableSize] */
56 bool sec_hash = 1;
57 struct cxl_sste *sste; 68 struct cxl_sste *sste;
58 unsigned int hash;
59 unsigned long flags; 69 unsigned long flags;
60 70
61
62 sec_hash = !!(cxl_p1n_read(ctx->afu, CXL_PSL_SR_An) & CXL_PSL_SR_An_SC);
63
64 if (slb->vsid & SLB_VSID_B_1T)
65 hash = (slb->esid >> SID_SHIFT_1T) & mask;
66 else /* 256M */
67 hash = (slb->esid >> SID_SHIFT) & mask;
68
69 spin_lock_irqsave(&ctx->sste_lock, flags); 71 spin_lock_irqsave(&ctx->sste_lock, flags);
70 sste = find_free_sste(ctx->sstp + (hash << 3), sec_hash, 72 sste = find_free_sste(ctx, slb);
71 ctx->sstp + ((~hash & mask) << 3), &ctx->sst_lru); 73 if (!sste)
74 goto out_unlock;
72 75
73 pr_devel("CXL Populating SST[%li]: %#llx %#llx\n", 76 pr_devel("CXL Populating SST[%li]: %#llx %#llx\n",
74 sste - ctx->sstp, slb->vsid, slb->esid); 77 sste - ctx->sstp, slb->vsid, slb->esid);
75 78
76 sste->vsid_data = cpu_to_be64(slb->vsid); 79 sste->vsid_data = cpu_to_be64(slb->vsid);
77 sste->esid_data = cpu_to_be64(slb->esid); 80 sste->esid_data = cpu_to_be64(slb->esid);
81out_unlock:
78 spin_unlock_irqrestore(&ctx->sste_lock, flags); 82 spin_unlock_irqrestore(&ctx->sste_lock, flags);
79} 83}
80 84
diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c
index 623286a77114..d47532e8f4f1 100644
--- a/drivers/misc/cxl/native.c
+++ b/drivers/misc/cxl/native.c
@@ -417,7 +417,7 @@ static int attach_afu_directed(struct cxl_context *ctx, u64 wed, u64 amr)
417 ctx->elem->haurp = 0; /* disable */ 417 ctx->elem->haurp = 0; /* disable */
418 ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1)); 418 ctx->elem->sdr = cpu_to_be64(mfspr(SPRN_SDR1));
419 419
420 sr = CXL_PSL_SR_An_SC; 420 sr = 0;
421 if (ctx->master) 421 if (ctx->master)
422 sr |= CXL_PSL_SR_An_MP; 422 sr |= CXL_PSL_SR_An_MP;
423 if (mfspr(SPRN_LPCR) & LPCR_TC) 423 if (mfspr(SPRN_LPCR) & LPCR_TC)
@@ -508,7 +508,7 @@ static int attach_dedicated(struct cxl_context *ctx, u64 wed, u64 amr)
508 u64 sr; 508 u64 sr;
509 int rc; 509 int rc;
510 510
511 sr = CXL_PSL_SR_An_SC; 511 sr = 0;
512 set_endian(sr); 512 set_endian(sr);
513 if (ctx->master) 513 if (ctx->master)
514 sr |= CXL_PSL_SR_An_MP; 514 sr |= CXL_PSL_SR_An_MP;
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index 03c53b72a2d6..270d58a4c43d 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -311,7 +311,8 @@ int mmc_of_parse(struct mmc_host *host)
311 struct device_node *np; 311 struct device_node *np;
312 u32 bus_width; 312 u32 bus_width;
313 int len, ret; 313 int len, ret;
314 bool cap_invert, gpio_invert; 314 bool cd_cap_invert, cd_gpio_invert = false;
315 bool ro_cap_invert, ro_gpio_invert = false;
315 316
316 if (!host->parent || !host->parent->of_node) 317 if (!host->parent || !host->parent->of_node)
317 return 0; 318 return 0;
@@ -359,16 +360,13 @@ int mmc_of_parse(struct mmc_host *host)
359 if (of_find_property(np, "non-removable", &len)) { 360 if (of_find_property(np, "non-removable", &len)) {
360 host->caps |= MMC_CAP_NONREMOVABLE; 361 host->caps |= MMC_CAP_NONREMOVABLE;
361 } else { 362 } else {
362 if (of_property_read_bool(np, "cd-inverted")) 363 cd_cap_invert = of_property_read_bool(np, "cd-inverted");
363 cap_invert = true;
364 else
365 cap_invert = false;
366 364
367 if (of_find_property(np, "broken-cd", &len)) 365 if (of_find_property(np, "broken-cd", &len))
368 host->caps |= MMC_CAP_NEEDS_POLL; 366 host->caps |= MMC_CAP_NEEDS_POLL;
369 367
370 ret = mmc_gpiod_request_cd(host, "cd", 0, true, 368 ret = mmc_gpiod_request_cd(host, "cd", 0, true,
371 0, &gpio_invert); 369 0, &cd_gpio_invert);
372 if (ret) { 370 if (ret) {
373 if (ret == -EPROBE_DEFER) 371 if (ret == -EPROBE_DEFER)
374 return ret; 372 return ret;
@@ -391,17 +389,14 @@ int mmc_of_parse(struct mmc_host *host)
391 * both inverted, the end result is that the CD line is 389 * both inverted, the end result is that the CD line is
392 * not inverted. 390 * not inverted.
393 */ 391 */
394 if (cap_invert ^ gpio_invert) 392 if (cd_cap_invert ^ cd_gpio_invert)
395 host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH; 393 host->caps2 |= MMC_CAP2_CD_ACTIVE_HIGH;
396 } 394 }
397 395
398 /* Parse Write Protection */ 396 /* Parse Write Protection */
399 if (of_property_read_bool(np, "wp-inverted")) 397 ro_cap_invert = of_property_read_bool(np, "wp-inverted");
400 cap_invert = true;
401 else
402 cap_invert = false;
403 398
404 ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &gpio_invert); 399 ret = mmc_gpiod_request_ro(host, "wp", 0, false, 0, &ro_gpio_invert);
405 if (ret) { 400 if (ret) {
406 if (ret == -EPROBE_DEFER) 401 if (ret == -EPROBE_DEFER)
407 goto out; 402 goto out;
@@ -414,7 +409,7 @@ int mmc_of_parse(struct mmc_host *host)
414 dev_info(host->parent, "Got WP GPIO\n"); 409 dev_info(host->parent, "Got WP GPIO\n");
415 410
416 /* See the comment on CD inversion above */ 411 /* See the comment on CD inversion above */
417 if (cap_invert ^ gpio_invert) 412 if (ro_cap_invert ^ ro_gpio_invert)
418 host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH; 413 host->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
419 414
420 if (of_find_property(np, "cap-sd-highspeed", &len)) 415 if (of_find_property(np, "cap-sd-highspeed", &len))
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index a7543ba3e190..3096f3ded3ad 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -2590,6 +2590,8 @@ static void cfi_intelext_resume(struct mtd_info *mtd)
2590 2590
2591 /* Go to known state. Chip may have been power cycled */ 2591 /* Go to known state. Chip may have been power cycled */
2592 if (chip->state == FL_PM_SUSPENDED) { 2592 if (chip->state == FL_PM_SUSPENDED) {
2593 /* Refresh LH28F640BF Partition Config. Register */
2594 fixup_LH28F640BF(mtd);
2593 map_write(map, CMD(0xFF), cfi->chips[i].start); 2595 map_write(map, CMD(0xFF), cfi->chips[i].start);
2594 chip->oldstate = chip->state = FL_READY; 2596 chip->oldstate = chip->state = FL_READY;
2595 wake_up(&chip->wq); 2597 wake_up(&chip->wq);
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index dcda6287228d..ed827cf894e4 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -193,10 +193,10 @@ static int m25p_probe(struct spi_device *spi)
193{ 193{
194 struct mtd_part_parser_data ppdata; 194 struct mtd_part_parser_data ppdata;
195 struct flash_platform_data *data; 195 struct flash_platform_data *data;
196 const struct spi_device_id *id = NULL;
197 struct m25p *flash; 196 struct m25p *flash;
198 struct spi_nor *nor; 197 struct spi_nor *nor;
199 enum read_mode mode = SPI_NOR_NORMAL; 198 enum read_mode mode = SPI_NOR_NORMAL;
199 char *flash_name = NULL;
200 int ret; 200 int ret;
201 201
202 data = dev_get_platdata(&spi->dev); 202 data = dev_get_platdata(&spi->dev);
@@ -236,13 +236,11 @@ static int m25p_probe(struct spi_device *spi)
236 * If that's the case, respect "type" and ignore a "name". 236 * If that's the case, respect "type" and ignore a "name".
237 */ 237 */
238 if (data && data->type) 238 if (data && data->type)
239 id = spi_nor_match_id(data->type); 239 flash_name = data->type;
240 else
241 flash_name = spi->modalias;
240 242
241 /* If we didn't get name from platform, simply use "modalias". */ 243 ret = spi_nor_scan(nor, flash_name, mode);
242 if (!id)
243 id = spi_get_device_id(spi);
244
245 ret = spi_nor_scan(nor, id, mode);
246 if (ret) 244 if (ret)
247 return ret; 245 return ret;
248 246
@@ -263,12 +261,62 @@ static int m25p_remove(struct spi_device *spi)
263} 261}
264 262
265 263
264/*
265 * XXX This needs to be kept in sync with spi_nor_ids. We can't share
266 * it with spi-nor, because if this is built as a module then modpost
267 * won't be able to read it and add appropriate aliases.
268 */
269static const struct spi_device_id m25p_ids[] = {
270 {"at25fs010"}, {"at25fs040"}, {"at25df041a"}, {"at25df321a"},
271 {"at25df641"}, {"at26f004"}, {"at26df081a"}, {"at26df161a"},
272 {"at26df321"}, {"at45db081d"},
273 {"en25f32"}, {"en25p32"}, {"en25q32b"}, {"en25p64"},
274 {"en25q64"}, {"en25qh128"}, {"en25qh256"},
275 {"f25l32pa"},
276 {"mr25h256"}, {"mr25h10"},
277 {"gd25q32"}, {"gd25q64"},
278 {"160s33b"}, {"320s33b"}, {"640s33b"},
279 {"mx25l2005a"}, {"mx25l4005a"}, {"mx25l8005"}, {"mx25l1606e"},
280 {"mx25l3205d"}, {"mx25l3255e"}, {"mx25l6405d"}, {"mx25l12805d"},
281 {"mx25l12855e"},{"mx25l25635e"},{"mx25l25655e"},{"mx66l51235l"},
282 {"mx66l1g55g"},
283 {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q256a"},
284 {"n25q512a"}, {"n25q512ax3"}, {"n25q00"},
285 {"pm25lv512"}, {"pm25lv010"}, {"pm25lq032"},
286 {"s25sl032p"}, {"s25sl064p"}, {"s25fl256s0"}, {"s25fl256s1"},
287 {"s25fl512s"}, {"s70fl01gs"}, {"s25sl12800"}, {"s25sl12801"},
288 {"s25fl129p0"}, {"s25fl129p1"}, {"s25sl004a"}, {"s25sl008a"},
289 {"s25sl016a"}, {"s25sl032a"}, {"s25sl064a"}, {"s25fl008k"},
290 {"s25fl016k"}, {"s25fl064k"},
291 {"sst25vf040b"},{"sst25vf080b"},{"sst25vf016b"},{"sst25vf032b"},
292 {"sst25vf064c"},{"sst25wf512"}, {"sst25wf010"}, {"sst25wf020"},
293 {"sst25wf040"},
294 {"m25p05"}, {"m25p10"}, {"m25p20"}, {"m25p40"},
295 {"m25p80"}, {"m25p16"}, {"m25p32"}, {"m25p64"},
296 {"m25p128"}, {"n25q032"},
297 {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"},
298 {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"},
299 {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"},
300 {"m45pe10"}, {"m45pe80"}, {"m45pe16"},
301 {"m25pe20"}, {"m25pe80"}, {"m25pe16"},
302 {"m25px16"}, {"m25px32"}, {"m25px32-s0"}, {"m25px32-s1"},
303 {"m25px64"},
304 {"w25x10"}, {"w25x20"}, {"w25x40"}, {"w25x80"},
305 {"w25x16"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"},
306 {"w25x64"}, {"w25q64"}, {"w25q128"}, {"w25q80"},
307 {"w25q80bl"}, {"w25q128"}, {"w25q256"}, {"cat25c11"},
308 {"cat25c03"}, {"cat25c09"}, {"cat25c17"}, {"cat25128"},
309 { },
310};
311MODULE_DEVICE_TABLE(spi, m25p_ids);
312
313
266static struct spi_driver m25p80_driver = { 314static struct spi_driver m25p80_driver = {
267 .driver = { 315 .driver = {
268 .name = "m25p80", 316 .name = "m25p80",
269 .owner = THIS_MODULE, 317 .owner = THIS_MODULE,
270 }, 318 },
271 .id_table = spi_nor_ids, 319 .id_table = m25p_ids,
272 .probe = m25p_probe, 320 .probe = m25p_probe,
273 .remove = m25p_remove, 321 .remove = m25p_remove,
274 322
diff --git a/drivers/mtd/nand/omap_elm.c b/drivers/mtd/nand/omap_elm.c
index b4f61c7fc161..058531044ceb 100644
--- a/drivers/mtd/nand/omap_elm.c
+++ b/drivers/mtd/nand/omap_elm.c
@@ -115,7 +115,7 @@ int elm_config(struct device *dev, enum bch_ecc bch_type,
115 115
116 if (!info) { 116 if (!info) {
117 dev_err(dev, "Unable to configure elm - device not probed?\n"); 117 dev_err(dev, "Unable to configure elm - device not probed?\n");
118 return -ENODEV; 118 return -EPROBE_DEFER;
119 } 119 }
120 /* ELM cannot detect ECC errors for chunks > 1KB */ 120 /* ELM cannot detect ECC errors for chunks > 1KB */
121 if (ecc_step_size > ((ELM_ECC_SIZE + 1) / 2)) { 121 if (ecc_step_size > ((ELM_ECC_SIZE + 1) / 2)) {
diff --git a/drivers/mtd/spi-nor/fsl-quadspi.c b/drivers/mtd/spi-nor/fsl-quadspi.c
index 8d659a2888d5..d5269a26c839 100644
--- a/drivers/mtd/spi-nor/fsl-quadspi.c
+++ b/drivers/mtd/spi-nor/fsl-quadspi.c
@@ -881,7 +881,6 @@ static int fsl_qspi_probe(struct platform_device *pdev)
881 881
882 /* iterate the subnodes. */ 882 /* iterate the subnodes. */
883 for_each_available_child_of_node(dev->of_node, np) { 883 for_each_available_child_of_node(dev->of_node, np) {
884 const struct spi_device_id *id;
885 char modalias[40]; 884 char modalias[40];
886 885
887 /* skip the holes */ 886 /* skip the holes */
@@ -909,10 +908,6 @@ static int fsl_qspi_probe(struct platform_device *pdev)
909 if (of_modalias_node(np, modalias, sizeof(modalias)) < 0) 908 if (of_modalias_node(np, modalias, sizeof(modalias)) < 0)
910 goto map_failed; 909 goto map_failed;
911 910
912 id = spi_nor_match_id(modalias);
913 if (!id)
914 goto map_failed;
915
916 ret = of_property_read_u32(np, "spi-max-frequency", 911 ret = of_property_read_u32(np, "spi-max-frequency",
917 &q->clk_rate); 912 &q->clk_rate);
918 if (ret < 0) 913 if (ret < 0)
@@ -921,7 +916,7 @@ static int fsl_qspi_probe(struct platform_device *pdev)
921 /* set the chip address for READID */ 916 /* set the chip address for READID */
922 fsl_qspi_set_base_addr(q, nor); 917 fsl_qspi_set_base_addr(q, nor);
923 918
924 ret = spi_nor_scan(nor, id, SPI_NOR_QUAD); 919 ret = spi_nor_scan(nor, modalias, SPI_NOR_QUAD);
925 if (ret) 920 if (ret)
926 goto map_failed; 921 goto map_failed;
927 922
diff --git a/drivers/mtd/spi-nor/spi-nor.c b/drivers/mtd/spi-nor/spi-nor.c
index ae16aa2f6885..c51ee52386a7 100644
--- a/drivers/mtd/spi-nor/spi-nor.c
+++ b/drivers/mtd/spi-nor/spi-nor.c
@@ -28,6 +28,8 @@
28 28
29#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16) 29#define JEDEC_MFR(_jedec_id) ((_jedec_id) >> 16)
30 30
31static const struct spi_device_id *spi_nor_match_id(const char *name);
32
31/* 33/*
32 * Read the status register, returning its value in the location 34 * Read the status register, returning its value in the location
33 * Return the status register value. 35 * Return the status register value.
@@ -473,7 +475,7 @@ struct flash_info {
473 * more nor chips. This current list focusses on newer chips, which 475 * more nor chips. This current list focusses on newer chips, which
474 * have been converging on command sets which including JEDEC ID. 476 * have been converging on command sets which including JEDEC ID.
475 */ 477 */
476const struct spi_device_id spi_nor_ids[] = { 478static const struct spi_device_id spi_nor_ids[] = {
477 /* Atmel -- some are (confusingly) marketed as "DataFlash" */ 479 /* Atmel -- some are (confusingly) marketed as "DataFlash" */
478 { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) }, 480 { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) },
479 { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) }, 481 { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) },
@@ -637,7 +639,6 @@ const struct spi_device_id spi_nor_ids[] = {
637 { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, 639 { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) },
638 { }, 640 { },
639}; 641};
640EXPORT_SYMBOL_GPL(spi_nor_ids);
641 642
642static const struct spi_device_id *spi_nor_read_id(struct spi_nor *nor) 643static const struct spi_device_id *spi_nor_read_id(struct spi_nor *nor)
643{ 644{
@@ -911,9 +912,9 @@ static int spi_nor_check(struct spi_nor *nor)
911 return 0; 912 return 0;
912} 913}
913 914
914int spi_nor_scan(struct spi_nor *nor, const struct spi_device_id *id, 915int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode)
915 enum read_mode mode)
916{ 916{
917 const struct spi_device_id *id = NULL;
917 struct flash_info *info; 918 struct flash_info *info;
918 struct device *dev = nor->dev; 919 struct device *dev = nor->dev;
919 struct mtd_info *mtd = nor->mtd; 920 struct mtd_info *mtd = nor->mtd;
@@ -925,6 +926,10 @@ int spi_nor_scan(struct spi_nor *nor, const struct spi_device_id *id,
925 if (ret) 926 if (ret)
926 return ret; 927 return ret;
927 928
929 id = spi_nor_match_id(name);
930 if (!id)
931 return -ENOENT;
932
928 info = (void *)id->driver_data; 933 info = (void *)id->driver_data;
929 934
930 if (info->jedec_id) { 935 if (info->jedec_id) {
@@ -1113,7 +1118,7 @@ int spi_nor_scan(struct spi_nor *nor, const struct spi_device_id *id,
1113} 1118}
1114EXPORT_SYMBOL_GPL(spi_nor_scan); 1119EXPORT_SYMBOL_GPL(spi_nor_scan);
1115 1120
1116const struct spi_device_id *spi_nor_match_id(char *name) 1121static const struct spi_device_id *spi_nor_match_id(const char *name)
1117{ 1122{
1118 const struct spi_device_id *id = spi_nor_ids; 1123 const struct spi_device_id *id = spi_nor_ids;
1119 1124
@@ -1124,7 +1129,6 @@ const struct spi_device_id *spi_nor_match_id(char *name)
1124 } 1129 }
1125 return NULL; 1130 return NULL;
1126} 1131}
1127EXPORT_SYMBOL_GPL(spi_nor_match_id);
1128 1132
1129MODULE_LICENSE("GPL"); 1133MODULE_LICENSE("GPL");
1130MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>"); 1134MODULE_AUTHOR("Huang Shijie <shijie8@gmail.com>");
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index 4706386b7d34..f9009be3f307 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -135,6 +135,7 @@ config MACVLAN
135config MACVTAP 135config MACVTAP
136 tristate "MAC-VLAN based tap driver" 136 tristate "MAC-VLAN based tap driver"
137 depends on MACVLAN 137 depends on MACVLAN
138 depends on INET
138 help 139 help
139 This adds a specialized tap character device driver that is based 140 This adds a specialized tap character device driver that is based
140 on the MAC-VLAN network interface, called macvtap. A macvtap device 141 on the MAC-VLAN network interface, called macvtap. A macvtap device
@@ -200,6 +201,7 @@ config RIONET_RX_SIZE
200 201
201config TUN 202config TUN
202 tristate "Universal TUN/TAP device driver support" 203 tristate "Universal TUN/TAP device driver support"
204 depends on INET
203 select CRC32 205 select CRC32
204 ---help--- 206 ---help---
205 TUN/TAP provides packet reception and transmission for user space 207 TUN/TAP provides packet reception and transmission for user space
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index c9ac06cfe6b7..a5115fb7cf33 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -2471,7 +2471,8 @@ static void bond_loadbalance_arp_mon(struct work_struct *work)
2471 bond_slave_state_change(bond); 2471 bond_slave_state_change(bond);
2472 if (BOND_MODE(bond) == BOND_MODE_XOR) 2472 if (BOND_MODE(bond) == BOND_MODE_XOR)
2473 bond_update_slave_arr(bond, NULL); 2473 bond_update_slave_arr(bond, NULL);
2474 } else if (do_failover) { 2474 }
2475 if (do_failover) {
2475 block_netpoll_tx(); 2476 block_netpoll_tx();
2476 bond_select_active_slave(bond); 2477 bond_select_active_slave(bond);
2477 unblock_netpoll_tx(); 2478 unblock_netpoll_tx();
diff --git a/drivers/net/bonding/bond_netlink.c b/drivers/net/bonding/bond_netlink.c
index c13d83e15ace..45f09a66e6c9 100644
--- a/drivers/net/bonding/bond_netlink.c
+++ b/drivers/net/bonding/bond_netlink.c
@@ -225,7 +225,12 @@ static int bond_changelink(struct net_device *bond_dev,
225 225
226 bond_option_arp_ip_targets_clear(bond); 226 bond_option_arp_ip_targets_clear(bond);
227 nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) { 227 nla_for_each_nested(attr, data[IFLA_BOND_ARP_IP_TARGET], rem) {
228 __be32 target = nla_get_be32(attr); 228 __be32 target;
229
230 if (nla_len(attr) < sizeof(target))
231 return -EINVAL;
232
233 target = nla_get_be32(attr);
229 234
230 bond_opt_initval(&newval, (__force u64)target); 235 bond_opt_initval(&newval, (__force u64)target);
231 err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS, 236 err = __bond_opt_set(bond, BOND_OPT_ARP_TARGETS,
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 02492d241e4c..2cfe5012e4e5 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -110,7 +110,7 @@ static int can_calc_bittiming(struct net_device *dev, struct can_bittiming *bt,
110 long rate; 110 long rate;
111 u64 v64; 111 u64 v64;
112 112
113 /* Use CIA recommended sample points */ 113 /* Use CiA recommended sample points */
114 if (bt->sample_point) { 114 if (bt->sample_point) {
115 sampl_pt = bt->sample_point; 115 sampl_pt = bt->sample_point;
116 } else { 116 } else {
@@ -382,7 +382,7 @@ void can_free_echo_skb(struct net_device *dev, unsigned int idx)
382 BUG_ON(idx >= priv->echo_skb_max); 382 BUG_ON(idx >= priv->echo_skb_max);
383 383
384 if (priv->echo_skb[idx]) { 384 if (priv->echo_skb[idx]) {
385 kfree_skb(priv->echo_skb[idx]); 385 dev_kfree_skb_any(priv->echo_skb[idx]);
386 priv->echo_skb[idx] = NULL; 386 priv->echo_skb[idx] = NULL;
387 } 387 }
388} 388}
diff --git a/drivers/net/can/m_can/Kconfig b/drivers/net/can/m_can/Kconfig
index fca5482c09ac..04f20dd39007 100644
--- a/drivers/net/can/m_can/Kconfig
+++ b/drivers/net/can/m_can/Kconfig
@@ -1,4 +1,5 @@
1config CAN_M_CAN 1config CAN_M_CAN
2 depends on HAS_IOMEM
2 tristate "Bosch M_CAN devices" 3 tristate "Bosch M_CAN devices"
3 ---help--- 4 ---help---
4 Say Y here if you want to support for Bosch M_CAN controller. 5 Say Y here if you want to support for Bosch M_CAN controller.
diff --git a/drivers/net/can/m_can/m_can.c b/drivers/net/can/m_can/m_can.c
index 10d571eaed85..d7bc462aafdc 100644
--- a/drivers/net/can/m_can/m_can.c
+++ b/drivers/net/can/m_can/m_can.c
@@ -105,14 +105,36 @@ enum m_can_mram_cfg {
105 MRAM_CFG_NUM, 105 MRAM_CFG_NUM,
106}; 106};
107 107
108/* Fast Bit Timing & Prescaler Register (FBTP) */
109#define FBTR_FBRP_MASK 0x1f
110#define FBTR_FBRP_SHIFT 16
111#define FBTR_FTSEG1_SHIFT 8
112#define FBTR_FTSEG1_MASK (0xf << FBTR_FTSEG1_SHIFT)
113#define FBTR_FTSEG2_SHIFT 4
114#define FBTR_FTSEG2_MASK (0x7 << FBTR_FTSEG2_SHIFT)
115#define FBTR_FSJW_SHIFT 0
116#define FBTR_FSJW_MASK 0x3
117
108/* Test Register (TEST) */ 118/* Test Register (TEST) */
109#define TEST_LBCK BIT(4) 119#define TEST_LBCK BIT(4)
110 120
111/* CC Control Register(CCCR) */ 121/* CC Control Register(CCCR) */
112#define CCCR_TEST BIT(7) 122#define CCCR_TEST BIT(7)
113#define CCCR_MON BIT(5) 123#define CCCR_CMR_MASK 0x3
114#define CCCR_CCE BIT(1) 124#define CCCR_CMR_SHIFT 10
115#define CCCR_INIT BIT(0) 125#define CCCR_CMR_CANFD 0x1
126#define CCCR_CMR_CANFD_BRS 0x2
127#define CCCR_CMR_CAN 0x3
128#define CCCR_CME_MASK 0x3
129#define CCCR_CME_SHIFT 8
130#define CCCR_CME_CAN 0
131#define CCCR_CME_CANFD 0x1
132#define CCCR_CME_CANFD_BRS 0x2
133#define CCCR_TEST BIT(7)
134#define CCCR_MON BIT(5)
135#define CCCR_CCE BIT(1)
136#define CCCR_INIT BIT(0)
137#define CCCR_CANFD 0x10
116 138
117/* Bit Timing & Prescaler Register (BTP) */ 139/* Bit Timing & Prescaler Register (BTP) */
118#define BTR_BRP_MASK 0x3ff 140#define BTR_BRP_MASK 0x3ff
@@ -204,6 +226,7 @@ enum m_can_mram_cfg {
204 226
205/* Rx Buffer / FIFO Element Size Configuration (RXESC) */ 227/* Rx Buffer / FIFO Element Size Configuration (RXESC) */
206#define M_CAN_RXESC_8BYTES 0x0 228#define M_CAN_RXESC_8BYTES 0x0
229#define M_CAN_RXESC_64BYTES 0x777
207 230
208/* Tx Buffer Configuration(TXBC) */ 231/* Tx Buffer Configuration(TXBC) */
209#define TXBC_NDTB_OFF 16 232#define TXBC_NDTB_OFF 16
@@ -211,6 +234,7 @@ enum m_can_mram_cfg {
211 234
212/* Tx Buffer Element Size Configuration(TXESC) */ 235/* Tx Buffer Element Size Configuration(TXESC) */
213#define TXESC_TBDS_8BYTES 0x0 236#define TXESC_TBDS_8BYTES 0x0
237#define TXESC_TBDS_64BYTES 0x7
214 238
215/* Tx Event FIFO Con.guration (TXEFC) */ 239/* Tx Event FIFO Con.guration (TXEFC) */
216#define TXEFC_EFS_OFF 16 240#define TXEFC_EFS_OFF 16
@@ -219,11 +243,11 @@ enum m_can_mram_cfg {
219/* Message RAM Configuration (in bytes) */ 243/* Message RAM Configuration (in bytes) */
220#define SIDF_ELEMENT_SIZE 4 244#define SIDF_ELEMENT_SIZE 4
221#define XIDF_ELEMENT_SIZE 8 245#define XIDF_ELEMENT_SIZE 8
222#define RXF0_ELEMENT_SIZE 16 246#define RXF0_ELEMENT_SIZE 72
223#define RXF1_ELEMENT_SIZE 16 247#define RXF1_ELEMENT_SIZE 72
224#define RXB_ELEMENT_SIZE 16 248#define RXB_ELEMENT_SIZE 16
225#define TXE_ELEMENT_SIZE 8 249#define TXE_ELEMENT_SIZE 8
226#define TXB_ELEMENT_SIZE 16 250#define TXB_ELEMENT_SIZE 72
227 251
228/* Message RAM Elements */ 252/* Message RAM Elements */
229#define M_CAN_FIFO_ID 0x0 253#define M_CAN_FIFO_ID 0x0
@@ -231,11 +255,17 @@ enum m_can_mram_cfg {
231#define M_CAN_FIFO_DATA(n) (0x8 + ((n) << 2)) 255#define M_CAN_FIFO_DATA(n) (0x8 + ((n) << 2))
232 256
233/* Rx Buffer Element */ 257/* Rx Buffer Element */
258/* R0 */
234#define RX_BUF_ESI BIT(31) 259#define RX_BUF_ESI BIT(31)
235#define RX_BUF_XTD BIT(30) 260#define RX_BUF_XTD BIT(30)
236#define RX_BUF_RTR BIT(29) 261#define RX_BUF_RTR BIT(29)
262/* R1 */
263#define RX_BUF_ANMF BIT(31)
264#define RX_BUF_EDL BIT(21)
265#define RX_BUF_BRS BIT(20)
237 266
238/* Tx Buffer Element */ 267/* Tx Buffer Element */
268/* R0 */
239#define TX_BUF_XTD BIT(30) 269#define TX_BUF_XTD BIT(30)
240#define TX_BUF_RTR BIT(29) 270#define TX_BUF_RTR BIT(29)
241 271
@@ -296,6 +326,7 @@ static inline void m_can_config_endisable(const struct m_can_priv *priv,
296 if (enable) { 326 if (enable) {
297 /* enable m_can configuration */ 327 /* enable m_can configuration */
298 m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT); 328 m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT);
329 udelay(5);
299 /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */ 330 /* CCCR.CCE can only be set/reset while CCCR.INIT = '1' */
300 m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE); 331 m_can_write(priv, M_CAN_CCCR, cccr | CCCR_INIT | CCCR_CCE);
301 } else { 332 } else {
@@ -326,41 +357,67 @@ static inline void m_can_disable_all_interrupts(const struct m_can_priv *priv)
326 m_can_write(priv, M_CAN_ILE, 0x0); 357 m_can_write(priv, M_CAN_ILE, 0x0);
327} 358}
328 359
329static void m_can_read_fifo(const struct net_device *dev, struct can_frame *cf, 360static void m_can_read_fifo(struct net_device *dev, u32 rxfs)
330 u32 rxfs)
331{ 361{
362 struct net_device_stats *stats = &dev->stats;
332 struct m_can_priv *priv = netdev_priv(dev); 363 struct m_can_priv *priv = netdev_priv(dev);
333 u32 id, fgi; 364 struct canfd_frame *cf;
365 struct sk_buff *skb;
366 u32 id, fgi, dlc;
367 int i;
334 368
335 /* calculate the fifo get index for where to read data */ 369 /* calculate the fifo get index for where to read data */
336 fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_OFF; 370 fgi = (rxfs & RXFS_FGI_MASK) >> RXFS_FGI_OFF;
371 dlc = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC);
372 if (dlc & RX_BUF_EDL)
373 skb = alloc_canfd_skb(dev, &cf);
374 else
375 skb = alloc_can_skb(dev, (struct can_frame **)&cf);
376 if (!skb) {
377 stats->rx_dropped++;
378 return;
379 }
380
381 if (dlc & RX_BUF_EDL)
382 cf->len = can_dlc2len((dlc >> 16) & 0x0F);
383 else
384 cf->len = get_can_dlc((dlc >> 16) & 0x0F);
385
337 id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_ID); 386 id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_ID);
338 if (id & RX_BUF_XTD) 387 if (id & RX_BUF_XTD)
339 cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG; 388 cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG;
340 else 389 else
341 cf->can_id = (id >> 18) & CAN_SFF_MASK; 390 cf->can_id = (id >> 18) & CAN_SFF_MASK;
342 391
343 if (id & RX_BUF_RTR) { 392 if (id & RX_BUF_ESI) {
393 cf->flags |= CANFD_ESI;
394 netdev_dbg(dev, "ESI Error\n");
395 }
396
397 if (!(dlc & RX_BUF_EDL) && (id & RX_BUF_RTR)) {
344 cf->can_id |= CAN_RTR_FLAG; 398 cf->can_id |= CAN_RTR_FLAG;
345 } else { 399 } else {
346 id = m_can_fifo_read(priv, fgi, M_CAN_FIFO_DLC); 400 if (dlc & RX_BUF_BRS)
347 cf->can_dlc = get_can_dlc((id >> 16) & 0x0F); 401 cf->flags |= CANFD_BRS;
348 *(u32 *)(cf->data + 0) = m_can_fifo_read(priv, fgi, 402
349 M_CAN_FIFO_DATA(0)); 403 for (i = 0; i < cf->len; i += 4)
350 *(u32 *)(cf->data + 4) = m_can_fifo_read(priv, fgi, 404 *(u32 *)(cf->data + i) =
351 M_CAN_FIFO_DATA(1)); 405 m_can_fifo_read(priv, fgi,
406 M_CAN_FIFO_DATA(i / 4));
352 } 407 }
353 408
354 /* acknowledge rx fifo 0 */ 409 /* acknowledge rx fifo 0 */
355 m_can_write(priv, M_CAN_RXF0A, fgi); 410 m_can_write(priv, M_CAN_RXF0A, fgi);
411
412 stats->rx_packets++;
413 stats->rx_bytes += cf->len;
414
415 netif_receive_skb(skb);
356} 416}
357 417
358static int m_can_do_rx_poll(struct net_device *dev, int quota) 418static int m_can_do_rx_poll(struct net_device *dev, int quota)
359{ 419{
360 struct m_can_priv *priv = netdev_priv(dev); 420 struct m_can_priv *priv = netdev_priv(dev);
361 struct net_device_stats *stats = &dev->stats;
362 struct sk_buff *skb;
363 struct can_frame *frame;
364 u32 pkts = 0; 421 u32 pkts = 0;
365 u32 rxfs; 422 u32 rxfs;
366 423
@@ -374,18 +431,7 @@ static int m_can_do_rx_poll(struct net_device *dev, int quota)
374 if (rxfs & RXFS_RFL) 431 if (rxfs & RXFS_RFL)
375 netdev_warn(dev, "Rx FIFO 0 Message Lost\n"); 432 netdev_warn(dev, "Rx FIFO 0 Message Lost\n");
376 433
377 skb = alloc_can_skb(dev, &frame); 434 m_can_read_fifo(dev, rxfs);
378 if (!skb) {
379 stats->rx_dropped++;
380 return pkts;
381 }
382
383 m_can_read_fifo(dev, frame, rxfs);
384
385 stats->rx_packets++;
386 stats->rx_bytes += frame->can_dlc;
387
388 netif_receive_skb(skb);
389 435
390 quota--; 436 quota--;
391 pkts++; 437 pkts++;
@@ -481,11 +527,23 @@ static int m_can_handle_lec_err(struct net_device *dev,
481 return 1; 527 return 1;
482} 528}
483 529
530static int __m_can_get_berr_counter(const struct net_device *dev,
531 struct can_berr_counter *bec)
532{
533 struct m_can_priv *priv = netdev_priv(dev);
534 unsigned int ecr;
535
536 ecr = m_can_read(priv, M_CAN_ECR);
537 bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT;
538 bec->txerr = ecr & ECR_TEC_MASK;
539
540 return 0;
541}
542
484static int m_can_get_berr_counter(const struct net_device *dev, 543static int m_can_get_berr_counter(const struct net_device *dev,
485 struct can_berr_counter *bec) 544 struct can_berr_counter *bec)
486{ 545{
487 struct m_can_priv *priv = netdev_priv(dev); 546 struct m_can_priv *priv = netdev_priv(dev);
488 unsigned int ecr;
489 int err; 547 int err;
490 548
491 err = clk_prepare_enable(priv->hclk); 549 err = clk_prepare_enable(priv->hclk);
@@ -498,9 +556,7 @@ static int m_can_get_berr_counter(const struct net_device *dev,
498 return err; 556 return err;
499 } 557 }
500 558
501 ecr = m_can_read(priv, M_CAN_ECR); 559 __m_can_get_berr_counter(dev, bec);
502 bec->rxerr = (ecr & ECR_REC_MASK) >> ECR_REC_SHIFT;
503 bec->txerr = ecr & ECR_TEC_MASK;
504 560
505 clk_disable_unprepare(priv->cclk); 561 clk_disable_unprepare(priv->cclk);
506 clk_disable_unprepare(priv->hclk); 562 clk_disable_unprepare(priv->hclk);
@@ -544,7 +600,7 @@ static int m_can_handle_state_change(struct net_device *dev,
544 if (unlikely(!skb)) 600 if (unlikely(!skb))
545 return 0; 601 return 0;
546 602
547 m_can_get_berr_counter(dev, &bec); 603 __m_can_get_berr_counter(dev, &bec);
548 604
549 switch (new_state) { 605 switch (new_state) {
550 case CAN_STATE_ERROR_ACTIVE: 606 case CAN_STATE_ERROR_ACTIVE:
@@ -596,14 +652,14 @@ static int m_can_handle_state_errors(struct net_device *dev, u32 psr)
596 652
597 if ((psr & PSR_EP) && 653 if ((psr & PSR_EP) &&
598 (priv->can.state != CAN_STATE_ERROR_PASSIVE)) { 654 (priv->can.state != CAN_STATE_ERROR_PASSIVE)) {
599 netdev_dbg(dev, "entered error warning state\n"); 655 netdev_dbg(dev, "entered error passive state\n");
600 work_done += m_can_handle_state_change(dev, 656 work_done += m_can_handle_state_change(dev,
601 CAN_STATE_ERROR_PASSIVE); 657 CAN_STATE_ERROR_PASSIVE);
602 } 658 }
603 659
604 if ((psr & PSR_BO) && 660 if ((psr & PSR_BO) &&
605 (priv->can.state != CAN_STATE_BUS_OFF)) { 661 (priv->can.state != CAN_STATE_BUS_OFF)) {
606 netdev_dbg(dev, "entered error warning state\n"); 662 netdev_dbg(dev, "entered error bus off state\n");
607 work_done += m_can_handle_state_change(dev, 663 work_done += m_can_handle_state_change(dev,
608 CAN_STATE_BUS_OFF); 664 CAN_STATE_BUS_OFF);
609 } 665 }
@@ -615,7 +671,7 @@ static void m_can_handle_other_err(struct net_device *dev, u32 irqstatus)
615{ 671{
616 if (irqstatus & IR_WDI) 672 if (irqstatus & IR_WDI)
617 netdev_err(dev, "Message RAM Watchdog event due to missing READY\n"); 673 netdev_err(dev, "Message RAM Watchdog event due to missing READY\n");
618 if (irqstatus & IR_BEU) 674 if (irqstatus & IR_ELO)
619 netdev_err(dev, "Error Logging Overflow\n"); 675 netdev_err(dev, "Error Logging Overflow\n");
620 if (irqstatus & IR_BEU) 676 if (irqstatus & IR_BEU)
621 netdev_err(dev, "Bit Error Uncorrected\n"); 677 netdev_err(dev, "Bit Error Uncorrected\n");
@@ -733,10 +789,23 @@ static const struct can_bittiming_const m_can_bittiming_const = {
733 .brp_inc = 1, 789 .brp_inc = 1,
734}; 790};
735 791
792static const struct can_bittiming_const m_can_data_bittiming_const = {
793 .name = KBUILD_MODNAME,
794 .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */
795 .tseg1_max = 16,
796 .tseg2_min = 1, /* Time segment 2 = phase_seg2 */
797 .tseg2_max = 8,
798 .sjw_max = 4,
799 .brp_min = 1,
800 .brp_max = 32,
801 .brp_inc = 1,
802};
803
736static int m_can_set_bittiming(struct net_device *dev) 804static int m_can_set_bittiming(struct net_device *dev)
737{ 805{
738 struct m_can_priv *priv = netdev_priv(dev); 806 struct m_can_priv *priv = netdev_priv(dev);
739 const struct can_bittiming *bt = &priv->can.bittiming; 807 const struct can_bittiming *bt = &priv->can.bittiming;
808 const struct can_bittiming *dbt = &priv->can.data_bittiming;
740 u16 brp, sjw, tseg1, tseg2; 809 u16 brp, sjw, tseg1, tseg2;
741 u32 reg_btp; 810 u32 reg_btp;
742 811
@@ -747,7 +816,17 @@ static int m_can_set_bittiming(struct net_device *dev)
747 reg_btp = (brp << BTR_BRP_SHIFT) | (sjw << BTR_SJW_SHIFT) | 816 reg_btp = (brp << BTR_BRP_SHIFT) | (sjw << BTR_SJW_SHIFT) |
748 (tseg1 << BTR_TSEG1_SHIFT) | (tseg2 << BTR_TSEG2_SHIFT); 817 (tseg1 << BTR_TSEG1_SHIFT) | (tseg2 << BTR_TSEG2_SHIFT);
749 m_can_write(priv, M_CAN_BTP, reg_btp); 818 m_can_write(priv, M_CAN_BTP, reg_btp);
750 netdev_dbg(dev, "setting BTP 0x%x\n", reg_btp); 819
820 if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
821 brp = dbt->brp - 1;
822 sjw = dbt->sjw - 1;
823 tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1;
824 tseg2 = dbt->phase_seg2 - 1;
825 reg_btp = (brp << FBTR_FBRP_SHIFT) | (sjw << FBTR_FSJW_SHIFT) |
826 (tseg1 << FBTR_FTSEG1_SHIFT) |
827 (tseg2 << FBTR_FTSEG2_SHIFT);
828 m_can_write(priv, M_CAN_FBTP, reg_btp);
829 }
751 830
752 return 0; 831 return 0;
753} 832}
@@ -767,8 +846,8 @@ static void m_can_chip_config(struct net_device *dev)
767 846
768 m_can_config_endisable(priv, true); 847 m_can_config_endisable(priv, true);
769 848
770 /* RX Buffer/FIFO Element Size 8 bytes data field */ 849 /* RX Buffer/FIFO Element Size 64 bytes data field */
771 m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_8BYTES); 850 m_can_write(priv, M_CAN_RXESC, M_CAN_RXESC_64BYTES);
772 851
773 /* Accept Non-matching Frames Into FIFO 0 */ 852 /* Accept Non-matching Frames Into FIFO 0 */
774 m_can_write(priv, M_CAN_GFC, 0x0); 853 m_can_write(priv, M_CAN_GFC, 0x0);
@@ -777,8 +856,8 @@ static void m_can_chip_config(struct net_device *dev)
777 m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_OFF) | 856 m_can_write(priv, M_CAN_TXBC, (1 << TXBC_NDTB_OFF) |
778 priv->mcfg[MRAM_TXB].off); 857 priv->mcfg[MRAM_TXB].off);
779 858
780 /* only support 8 bytes firstly */ 859 /* support 64 bytes payload */
781 m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_8BYTES); 860 m_can_write(priv, M_CAN_TXESC, TXESC_TBDS_64BYTES);
782 861
783 m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_OFF) | 862 m_can_write(priv, M_CAN_TXEFC, (1 << TXEFC_EFS_OFF) |
784 priv->mcfg[MRAM_TXE].off); 863 priv->mcfg[MRAM_TXE].off);
@@ -793,7 +872,8 @@ static void m_can_chip_config(struct net_device *dev)
793 RXFC_FWM_1 | priv->mcfg[MRAM_RXF1].off); 872 RXFC_FWM_1 | priv->mcfg[MRAM_RXF1].off);
794 873
795 cccr = m_can_read(priv, M_CAN_CCCR); 874 cccr = m_can_read(priv, M_CAN_CCCR);
796 cccr &= ~(CCCR_TEST | CCCR_MON); 875 cccr &= ~(CCCR_TEST | CCCR_MON | (CCCR_CMR_MASK << CCCR_CMR_SHIFT) |
876 (CCCR_CME_MASK << CCCR_CME_SHIFT));
797 test = m_can_read(priv, M_CAN_TEST); 877 test = m_can_read(priv, M_CAN_TEST);
798 test &= ~TEST_LBCK; 878 test &= ~TEST_LBCK;
799 879
@@ -805,6 +885,9 @@ static void m_can_chip_config(struct net_device *dev)
805 test |= TEST_LBCK; 885 test |= TEST_LBCK;
806 } 886 }
807 887
888 if (priv->can.ctrlmode & CAN_CTRLMODE_FD)
889 cccr |= CCCR_CME_CANFD_BRS << CCCR_CME_SHIFT;
890
808 m_can_write(priv, M_CAN_CCCR, cccr); 891 m_can_write(priv, M_CAN_CCCR, cccr);
809 m_can_write(priv, M_CAN_TEST, test); 892 m_can_write(priv, M_CAN_TEST, test);
810 893
@@ -869,11 +952,13 @@ static struct net_device *alloc_m_can_dev(void)
869 952
870 priv->dev = dev; 953 priv->dev = dev;
871 priv->can.bittiming_const = &m_can_bittiming_const; 954 priv->can.bittiming_const = &m_can_bittiming_const;
955 priv->can.data_bittiming_const = &m_can_data_bittiming_const;
872 priv->can.do_set_mode = m_can_set_mode; 956 priv->can.do_set_mode = m_can_set_mode;
873 priv->can.do_get_berr_counter = m_can_get_berr_counter; 957 priv->can.do_get_berr_counter = m_can_get_berr_counter;
874 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | 958 priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK |
875 CAN_CTRLMODE_LISTENONLY | 959 CAN_CTRLMODE_LISTENONLY |
876 CAN_CTRLMODE_BERR_REPORTING; 960 CAN_CTRLMODE_BERR_REPORTING |
961 CAN_CTRLMODE_FD;
877 962
878 return dev; 963 return dev;
879} 964}
@@ -956,8 +1041,9 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
956 struct net_device *dev) 1041 struct net_device *dev)
957{ 1042{
958 struct m_can_priv *priv = netdev_priv(dev); 1043 struct m_can_priv *priv = netdev_priv(dev);
959 struct can_frame *cf = (struct can_frame *)skb->data; 1044 struct canfd_frame *cf = (struct canfd_frame *)skb->data;
960 u32 id; 1045 u32 id, cccr;
1046 int i;
961 1047
962 if (can_dropped_invalid_skb(dev, skb)) 1048 if (can_dropped_invalid_skb(dev, skb))
963 return NETDEV_TX_OK; 1049 return NETDEV_TX_OK;
@@ -976,11 +1062,28 @@ static netdev_tx_t m_can_start_xmit(struct sk_buff *skb,
976 1062
977 /* message ram configuration */ 1063 /* message ram configuration */
978 m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id); 1064 m_can_fifo_write(priv, 0, M_CAN_FIFO_ID, id);
979 m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, cf->can_dlc << 16); 1065 m_can_fifo_write(priv, 0, M_CAN_FIFO_DLC, can_len2dlc(cf->len) << 16);
980 m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(0), *(u32 *)(cf->data + 0)); 1066
981 m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(1), *(u32 *)(cf->data + 4)); 1067 for (i = 0; i < cf->len; i += 4)
1068 m_can_fifo_write(priv, 0, M_CAN_FIFO_DATA(i / 4),
1069 *(u32 *)(cf->data + i));
1070
982 can_put_echo_skb(skb, dev, 0); 1071 can_put_echo_skb(skb, dev, 0);
983 1072
1073 if (priv->can.ctrlmode & CAN_CTRLMODE_FD) {
1074 cccr = m_can_read(priv, M_CAN_CCCR);
1075 cccr &= ~(CCCR_CMR_MASK << CCCR_CMR_SHIFT);
1076 if (can_is_canfd_skb(skb)) {
1077 if (cf->flags & CANFD_BRS)
1078 cccr |= CCCR_CMR_CANFD_BRS << CCCR_CMR_SHIFT;
1079 else
1080 cccr |= CCCR_CMR_CANFD << CCCR_CMR_SHIFT;
1081 } else {
1082 cccr |= CCCR_CMR_CAN << CCCR_CMR_SHIFT;
1083 }
1084 m_can_write(priv, M_CAN_CCCR, cccr);
1085 }
1086
984 /* enable first TX buffer to start transfer */ 1087 /* enable first TX buffer to start transfer */
985 m_can_write(priv, M_CAN_TXBTIE, 0x1); 1088 m_can_write(priv, M_CAN_TXBTIE, 0x1);
986 m_can_write(priv, M_CAN_TXBAR, 0x1); 1089 m_can_write(priv, M_CAN_TXBAR, 0x1);
@@ -992,6 +1095,7 @@ static const struct net_device_ops m_can_netdev_ops = {
992 .ndo_open = m_can_open, 1095 .ndo_open = m_can_open,
993 .ndo_stop = m_can_close, 1096 .ndo_stop = m_can_close,
994 .ndo_start_xmit = m_can_start_xmit, 1097 .ndo_start_xmit = m_can_start_xmit,
1098 .ndo_change_mtu = can_change_mtu,
995}; 1099};
996 1100
997static int register_m_can_dev(struct net_device *dev) 1101static int register_m_can_dev(struct net_device *dev)
@@ -1009,7 +1113,7 @@ static int m_can_of_parse_mram(struct platform_device *pdev,
1009 struct resource *res; 1113 struct resource *res;
1010 void __iomem *addr; 1114 void __iomem *addr;
1011 u32 out_val[MRAM_CFG_LEN]; 1115 u32 out_val[MRAM_CFG_LEN];
1012 int ret; 1116 int i, start, end, ret;
1013 1117
1014 /* message ram could be shared */ 1118 /* message ram could be shared */
1015 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); 1119 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram");
@@ -1060,6 +1164,15 @@ static int m_can_of_parse_mram(struct platform_device *pdev,
1060 priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num, 1164 priv->mcfg[MRAM_TXE].off, priv->mcfg[MRAM_TXE].num,
1061 priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num); 1165 priv->mcfg[MRAM_TXB].off, priv->mcfg[MRAM_TXB].num);
1062 1166
1167 /* initialize the entire Message RAM in use to avoid possible
1168 * ECC/parity checksum errors when reading an uninitialized buffer
1169 */
1170 start = priv->mcfg[MRAM_SIDF].off;
1171 end = priv->mcfg[MRAM_TXB].off +
1172 priv->mcfg[MRAM_TXB].num * TXB_ELEMENT_SIZE;
1173 for (i = start; i < end; i += 4)
1174 writel(0x0, priv->mram_base + i);
1175
1063 return 0; 1176 return 0;
1064} 1177}
1065 1178
diff --git a/drivers/net/can/rcar_can.c b/drivers/net/can/rcar_can.c
index 1abe133d1594..9718248e55f1 100644
--- a/drivers/net/can/rcar_can.c
+++ b/drivers/net/can/rcar_can.c
@@ -628,6 +628,7 @@ static const struct net_device_ops rcar_can_netdev_ops = {
628 .ndo_open = rcar_can_open, 628 .ndo_open = rcar_can_open,
629 .ndo_stop = rcar_can_close, 629 .ndo_stop = rcar_can_close,
630 .ndo_start_xmit = rcar_can_start_xmit, 630 .ndo_start_xmit = rcar_can_start_xmit,
631 .ndo_change_mtu = can_change_mtu,
631}; 632};
632 633
633static void rcar_can_rx_pkt(struct rcar_can_priv *priv) 634static void rcar_can_rx_pkt(struct rcar_can_priv *priv)
diff --git a/drivers/net/can/sja1000/kvaser_pci.c b/drivers/net/can/sja1000/kvaser_pci.c
index 8ff3424d5147..15c00faeec61 100644
--- a/drivers/net/can/sja1000/kvaser_pci.c
+++ b/drivers/net/can/sja1000/kvaser_pci.c
@@ -214,7 +214,7 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel,
214 struct net_device *dev; 214 struct net_device *dev;
215 struct sja1000_priv *priv; 215 struct sja1000_priv *priv;
216 struct kvaser_pci *board; 216 struct kvaser_pci *board;
217 int err, init_step; 217 int err;
218 218
219 dev = alloc_sja1000dev(sizeof(struct kvaser_pci)); 219 dev = alloc_sja1000dev(sizeof(struct kvaser_pci));
220 if (dev == NULL) 220 if (dev == NULL)
@@ -235,7 +235,6 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel,
235 if (channel == 0) { 235 if (channel == 0) {
236 board->xilinx_ver = 236 board->xilinx_ver =
237 ioread8(board->res_addr + XILINX_VERINT) >> 4; 237 ioread8(board->res_addr + XILINX_VERINT) >> 4;
238 init_step = 2;
239 238
240 /* Assert PTADR# - we're in passive mode so the other bits are 239 /* Assert PTADR# - we're in passive mode so the other bits are
241 not important */ 240 not important */
@@ -264,8 +263,6 @@ static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel,
264 priv->irq_flags = IRQF_SHARED; 263 priv->irq_flags = IRQF_SHARED;
265 dev->irq = pdev->irq; 264 dev->irq = pdev->irq;
266 265
267 init_step = 4;
268
269 dev_info(&pdev->dev, "reg_base=%p conf_addr=%p irq=%d\n", 266 dev_info(&pdev->dev, "reg_base=%p conf_addr=%p irq=%d\n",
270 priv->reg_base, board->conf_addr, dev->irq); 267 priv->reg_base, board->conf_addr, dev->irq);
271 268
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 00f2534dde73..29d3f0938eb8 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -434,10 +434,9 @@ static void ems_usb_read_bulk_callback(struct urb *urb)
434 if (urb->actual_length > CPC_HEADER_SIZE) { 434 if (urb->actual_length > CPC_HEADER_SIZE) {
435 struct ems_cpc_msg *msg; 435 struct ems_cpc_msg *msg;
436 u8 *ibuf = urb->transfer_buffer; 436 u8 *ibuf = urb->transfer_buffer;
437 u8 msg_count, again, start; 437 u8 msg_count, start;
438 438
439 msg_count = ibuf[0] & ~0x80; 439 msg_count = ibuf[0] & ~0x80;
440 again = ibuf[0] & 0x80;
441 440
442 start = CPC_HEADER_SIZE; 441 start = CPC_HEADER_SIZE;
443 442
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index b7c9e8b11460..c063a54ab8dd 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -464,7 +464,6 @@ static void esd_usb2_write_bulk_callback(struct urb *urb)
464{ 464{
465 struct esd_tx_urb_context *context = urb->context; 465 struct esd_tx_urb_context *context = urb->context;
466 struct esd_usb2_net_priv *priv; 466 struct esd_usb2_net_priv *priv;
467 struct esd_usb2 *dev;
468 struct net_device *netdev; 467 struct net_device *netdev;
469 size_t size = sizeof(struct esd_usb2_msg); 468 size_t size = sizeof(struct esd_usb2_msg);
470 469
@@ -472,7 +471,6 @@ static void esd_usb2_write_bulk_callback(struct urb *urb)
472 471
473 priv = context->priv; 472 priv = context->priv;
474 netdev = priv->netdev; 473 netdev = priv->netdev;
475 dev = priv->usb2;
476 474
477 /* free up our allocated buffer */ 475 /* free up our allocated buffer */
478 usb_free_coherent(urb->dev, size, 476 usb_free_coherent(urb->dev, size,
@@ -1143,6 +1141,7 @@ static void esd_usb2_disconnect(struct usb_interface *intf)
1143 } 1141 }
1144 } 1142 }
1145 unlink_all_urbs(dev); 1143 unlink_all_urbs(dev);
1144 kfree(dev);
1146 } 1145 }
1147} 1146}
1148 1147
diff --git a/drivers/net/can/usb/gs_usb.c b/drivers/net/can/usb/gs_usb.c
index 04b0f84612f0..009acc8641fc 100644
--- a/drivers/net/can/usb/gs_usb.c
+++ b/drivers/net/can/usb/gs_usb.c
@@ -718,6 +718,7 @@ static const struct net_device_ops gs_usb_netdev_ops = {
718 .ndo_open = gs_can_open, 718 .ndo_open = gs_can_open,
719 .ndo_stop = gs_can_close, 719 .ndo_stop = gs_can_close,
720 .ndo_start_xmit = gs_can_start_xmit, 720 .ndo_start_xmit = gs_can_start_xmit,
721 .ndo_change_mtu = can_change_mtu,
721}; 722};
722 723
723static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf) 724static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf)
diff --git a/drivers/net/can/xilinx_can.c b/drivers/net/can/xilinx_can.c
index 5e8b5609c067..8a998e3884ce 100644
--- a/drivers/net/can/xilinx_can.c
+++ b/drivers/net/can/xilinx_can.c
@@ -300,7 +300,8 @@ static int xcan_set_bittiming(struct net_device *ndev)
300static int xcan_chip_start(struct net_device *ndev) 300static int xcan_chip_start(struct net_device *ndev)
301{ 301{
302 struct xcan_priv *priv = netdev_priv(ndev); 302 struct xcan_priv *priv = netdev_priv(ndev);
303 u32 err, reg_msr, reg_sr_mask; 303 u32 reg_msr, reg_sr_mask;
304 int err;
304 unsigned long timeout; 305 unsigned long timeout;
305 306
306 /* Check if it is in reset mode */ 307 /* Check if it is in reset mode */
@@ -961,6 +962,7 @@ static const struct net_device_ops xcan_netdev_ops = {
961 .ndo_open = xcan_open, 962 .ndo_open = xcan_open,
962 .ndo_stop = xcan_close, 963 .ndo_stop = xcan_close,
963 .ndo_start_xmit = xcan_start_xmit, 964 .ndo_start_xmit = xcan_start_xmit,
965 .ndo_change_mtu = can_change_mtu,
964}; 966};
965 967
966/** 968/**
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index b9625968daac..4f4c2a7888e5 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -377,6 +377,29 @@ static irqreturn_t bcm_sf2_switch_1_isr(int irq, void *dev_id)
377 return IRQ_HANDLED; 377 return IRQ_HANDLED;
378} 378}
379 379
380static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
381{
382 unsigned int timeout = 1000;
383 u32 reg;
384
385 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
386 reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
387 core_writel(priv, reg, CORE_WATCHDOG_CTRL);
388
389 do {
390 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
391 if (!(reg & SOFTWARE_RESET))
392 break;
393
394 usleep_range(1000, 2000);
395 } while (timeout-- > 0);
396
397 if (timeout == 0)
398 return -ETIMEDOUT;
399
400 return 0;
401}
402
380static int bcm_sf2_sw_setup(struct dsa_switch *ds) 403static int bcm_sf2_sw_setup(struct dsa_switch *ds)
381{ 404{
382 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME; 405 const char *reg_names[BCM_SF2_REGS_NUM] = BCM_SF2_REGS_NAME;
@@ -404,11 +427,18 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
404 *base = of_iomap(dn, i); 427 *base = of_iomap(dn, i);
405 if (*base == NULL) { 428 if (*base == NULL) {
406 pr_err("unable to find register: %s\n", reg_names[i]); 429 pr_err("unable to find register: %s\n", reg_names[i]);
407 return -ENODEV; 430 ret = -ENOMEM;
431 goto out_unmap;
408 } 432 }
409 base++; 433 base++;
410 } 434 }
411 435
436 ret = bcm_sf2_sw_rst(priv);
437 if (ret) {
438 pr_err("unable to software reset switch: %d\n", ret);
439 goto out_unmap;
440 }
441
412 /* Disable all interrupts and request them */ 442 /* Disable all interrupts and request them */
413 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET); 443 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_MASK_SET);
414 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR); 444 intrl2_0_writel(priv, 0xffffffff, INTRL2_CPU_CLEAR);
@@ -484,7 +514,8 @@ out_free_irq0:
484out_unmap: 514out_unmap:
485 base = &priv->core; 515 base = &priv->core;
486 for (i = 0; i < BCM_SF2_REGS_NUM; i++) { 516 for (i = 0; i < BCM_SF2_REGS_NUM; i++) {
487 iounmap(*base); 517 if (*base)
518 iounmap(*base);
488 base++; 519 base++;
489 } 520 }
490 return ret; 521 return ret;
@@ -733,29 +764,6 @@ static int bcm_sf2_sw_suspend(struct dsa_switch *ds)
733 return 0; 764 return 0;
734} 765}
735 766
736static int bcm_sf2_sw_rst(struct bcm_sf2_priv *priv)
737{
738 unsigned int timeout = 1000;
739 u32 reg;
740
741 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
742 reg |= SOFTWARE_RESET | EN_CHIP_RST | EN_SW_RESET;
743 core_writel(priv, reg, CORE_WATCHDOG_CTRL);
744
745 do {
746 reg = core_readl(priv, CORE_WATCHDOG_CTRL);
747 if (!(reg & SOFTWARE_RESET))
748 break;
749
750 usleep_range(1000, 2000);
751 } while (timeout-- > 0);
752
753 if (timeout == 0)
754 return -ETIMEDOUT;
755
756 return 0;
757}
758
759static int bcm_sf2_sw_resume(struct dsa_switch *ds) 767static int bcm_sf2_sw_resume(struct dsa_switch *ds)
760{ 768{
761 struct bcm_sf2_priv *priv = ds_to_priv(ds); 769 struct bcm_sf2_priv *priv = ds_to_priv(ds);
diff --git a/drivers/net/dsa/mv88e6171.c b/drivers/net/dsa/mv88e6171.c
index 1020a7af67cf..78d8e876f3aa 100644
--- a/drivers/net/dsa/mv88e6171.c
+++ b/drivers/net/dsa/mv88e6171.c
@@ -395,7 +395,7 @@ static int mv88e6171_get_sset_count(struct dsa_switch *ds)
395} 395}
396 396
397struct dsa_switch_driver mv88e6171_switch_driver = { 397struct dsa_switch_driver mv88e6171_switch_driver = {
398 .tag_protocol = DSA_TAG_PROTO_DSA, 398 .tag_protocol = DSA_TAG_PROTO_EDSA,
399 .priv_size = sizeof(struct mv88e6xxx_priv_state), 399 .priv_size = sizeof(struct mv88e6xxx_priv_state),
400 .probe = mv88e6171_probe, 400 .probe = mv88e6171_probe,
401 .setup = mv88e6171_setup, 401 .setup = mv88e6171_setup,
diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
index 29554992215a..2349ea970255 100644
--- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
+++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c
@@ -1465,7 +1465,7 @@ static int xgbe_set_features(struct net_device *netdev,
1465{ 1465{
1466 struct xgbe_prv_data *pdata = netdev_priv(netdev); 1466 struct xgbe_prv_data *pdata = netdev_priv(netdev);
1467 struct xgbe_hw_if *hw_if = &pdata->hw_if; 1467 struct xgbe_hw_if *hw_if = &pdata->hw_if;
1468 unsigned int rxcsum, rxvlan, rxvlan_filter; 1468 netdev_features_t rxcsum, rxvlan, rxvlan_filter;
1469 1469
1470 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM; 1470 rxcsum = pdata->netdev_features & NETIF_F_RXCSUM;
1471 rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX; 1471 rxvlan = pdata->netdev_features & NETIF_F_HW_VLAN_CTAG_RX;
@@ -1598,7 +1598,8 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1598 struct skb_shared_hwtstamps *hwtstamps; 1598 struct skb_shared_hwtstamps *hwtstamps;
1599 unsigned int incomplete, error, context_next, context; 1599 unsigned int incomplete, error, context_next, context;
1600 unsigned int len, put_len, max_len; 1600 unsigned int len, put_len, max_len;
1601 int received = 0; 1601 unsigned int received = 0;
1602 int packet_count = 0;
1602 1603
1603 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget); 1604 DBGPR("-->xgbe_rx_poll: budget=%d\n", budget);
1604 1605
@@ -1608,7 +1609,7 @@ static int xgbe_rx_poll(struct xgbe_channel *channel, int budget)
1608 1609
1609 rdata = XGBE_GET_DESC_DATA(ring, ring->cur); 1610 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1610 packet = &ring->packet_data; 1611 packet = &ring->packet_data;
1611 while (received < budget) { 1612 while (packet_count < budget) {
1612 DBGPR(" cur = %d\n", ring->cur); 1613 DBGPR(" cur = %d\n", ring->cur);
1613 1614
1614 /* First time in loop see if we need to restore state */ 1615 /* First time in loop see if we need to restore state */
@@ -1662,7 +1663,7 @@ read_again:
1662 if (packet->errors) 1663 if (packet->errors)
1663 DBGPR("Error in received packet\n"); 1664 DBGPR("Error in received packet\n");
1664 dev_kfree_skb(skb); 1665 dev_kfree_skb(skb);
1665 continue; 1666 goto next_packet;
1666 } 1667 }
1667 1668
1668 if (!context) { 1669 if (!context) {
@@ -1677,7 +1678,7 @@ read_again:
1677 } 1678 }
1678 1679
1679 dev_kfree_skb(skb); 1680 dev_kfree_skb(skb);
1680 continue; 1681 goto next_packet;
1681 } 1682 }
1682 memcpy(skb_tail_pointer(skb), rdata->skb->data, 1683 memcpy(skb_tail_pointer(skb), rdata->skb->data,
1683 put_len); 1684 put_len);
@@ -1694,7 +1695,7 @@ read_again:
1694 1695
1695 /* Stray Context Descriptor? */ 1696 /* Stray Context Descriptor? */
1696 if (!skb) 1697 if (!skb)
1697 continue; 1698 goto next_packet;
1698 1699
1699 /* Be sure we don't exceed the configured MTU */ 1700 /* Be sure we don't exceed the configured MTU */
1700 max_len = netdev->mtu + ETH_HLEN; 1701 max_len = netdev->mtu + ETH_HLEN;
@@ -1705,7 +1706,7 @@ read_again:
1705 if (skb->len > max_len) { 1706 if (skb->len > max_len) {
1706 DBGPR("packet length exceeds configured MTU\n"); 1707 DBGPR("packet length exceeds configured MTU\n");
1707 dev_kfree_skb(skb); 1708 dev_kfree_skb(skb);
1708 continue; 1709 goto next_packet;
1709 } 1710 }
1710 1711
1711#ifdef XGMAC_ENABLE_RX_PKT_DUMP 1712#ifdef XGMAC_ENABLE_RX_PKT_DUMP
@@ -1739,6 +1740,9 @@ read_again:
1739 1740
1740 netdev->last_rx = jiffies; 1741 netdev->last_rx = jiffies;
1741 napi_gro_receive(&pdata->napi, skb); 1742 napi_gro_receive(&pdata->napi, skb);
1743
1744next_packet:
1745 packet_count++;
1742 } 1746 }
1743 1747
1744 /* Check if we need to save state before leaving */ 1748 /* Check if we need to save state before leaving */
@@ -1752,9 +1756,9 @@ read_again:
1752 rdata->state.error = error; 1756 rdata->state.error = error;
1753 } 1757 }
1754 1758
1755 DBGPR("<--xgbe_rx_poll: received = %d\n", received); 1759 DBGPR("<--xgbe_rx_poll: packet_count = %d\n", packet_count);
1756 1760
1757 return received; 1761 return packet_count;
1758} 1762}
1759 1763
1760static int xgbe_poll(struct napi_struct *napi, int budget) 1764static int xgbe_poll(struct napi_struct *napi, int budget)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
index 63ea1941e973..7ba83ffb08ac 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.c
@@ -575,10 +575,24 @@ static void xgene_gmac_tx_disable(struct xgene_enet_pdata *pdata)
575 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN); 575 xgene_enet_wr_mcx_mac(pdata, MAC_CONFIG_1_ADDR, data & ~TX_EN);
576} 576}
577 577
578static void xgene_enet_reset(struct xgene_enet_pdata *pdata) 578bool xgene_ring_mgr_init(struct xgene_enet_pdata *p)
579{
580 if (!ioread32(p->ring_csr_addr + CLKEN_ADDR))
581 return false;
582
583 if (ioread32(p->ring_csr_addr + SRST_ADDR))
584 return false;
585
586 return true;
587}
588
589static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
579{ 590{
580 u32 val; 591 u32 val;
581 592
593 if (!xgene_ring_mgr_init(pdata))
594 return -ENODEV;
595
582 clk_prepare_enable(pdata->clk); 596 clk_prepare_enable(pdata->clk);
583 clk_disable_unprepare(pdata->clk); 597 clk_disable_unprepare(pdata->clk);
584 clk_prepare_enable(pdata->clk); 598 clk_prepare_enable(pdata->clk);
@@ -590,6 +604,8 @@ static void xgene_enet_reset(struct xgene_enet_pdata *pdata)
590 val |= SCAN_AUTO_INCR; 604 val |= SCAN_AUTO_INCR;
591 MGMT_CLOCK_SEL_SET(&val, 1); 605 MGMT_CLOCK_SEL_SET(&val, 1);
592 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val); 606 xgene_enet_wr_mcx_mac(pdata, MII_MGMT_CONFIG_ADDR, val);
607
608 return 0;
593} 609}
594 610
595static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata) 611static void xgene_gport_shutdown(struct xgene_enet_pdata *pdata)
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
index 38558584080e..ec45f3256f0e 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_hw.h
@@ -104,6 +104,9 @@ enum xgene_enet_rm {
104#define BLOCK_ETH_MAC_OFFSET 0x0000 104#define BLOCK_ETH_MAC_OFFSET 0x0000
105#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800 105#define BLOCK_ETH_MAC_CSR_OFFSET 0x2800
106 106
107#define CLKEN_ADDR 0xc208
108#define SRST_ADDR 0xc200
109
107#define MAC_ADDR_REG_OFFSET 0x00 110#define MAC_ADDR_REG_OFFSET 0x00
108#define MAC_COMMAND_REG_OFFSET 0x04 111#define MAC_COMMAND_REG_OFFSET 0x04
109#define MAC_WRITE_REG_OFFSET 0x08 112#define MAC_WRITE_REG_OFFSET 0x08
@@ -318,6 +321,7 @@ void xgene_enet_parse_error(struct xgene_enet_desc_ring *ring,
318 321
319int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata); 322int xgene_enet_mdio_config(struct xgene_enet_pdata *pdata);
320void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata); 323void xgene_enet_mdio_remove(struct xgene_enet_pdata *pdata);
324bool xgene_ring_mgr_init(struct xgene_enet_pdata *p);
321 325
322extern struct xgene_mac_ops xgene_gmac_ops; 326extern struct xgene_mac_ops xgene_gmac_ops;
323extern struct xgene_port_ops xgene_gport_ops; 327extern struct xgene_port_ops xgene_gport_ops;
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
index 3c208cc6f6bb..123669696184 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.c
@@ -639,9 +639,9 @@ static int xgene_enet_create_desc_rings(struct net_device *ndev)
639 struct device *dev = ndev_to_dev(ndev); 639 struct device *dev = ndev_to_dev(ndev);
640 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring; 640 struct xgene_enet_desc_ring *rx_ring, *tx_ring, *cp_ring;
641 struct xgene_enet_desc_ring *buf_pool = NULL; 641 struct xgene_enet_desc_ring *buf_pool = NULL;
642 u8 cpu_bufnum = 0, eth_bufnum = 0; 642 u8 cpu_bufnum = 0, eth_bufnum = START_ETH_BUFNUM;
643 u8 bp_bufnum = 0x20; 643 u8 bp_bufnum = START_BP_BUFNUM;
644 u16 ring_id, ring_num = 0; 644 u16 ring_id, ring_num = START_RING_NUM;
645 int ret; 645 int ret;
646 646
647 /* allocate rx descriptor ring */ 647 /* allocate rx descriptor ring */
@@ -852,7 +852,9 @@ static int xgene_enet_init_hw(struct xgene_enet_pdata *pdata)
852 u16 dst_ring_num; 852 u16 dst_ring_num;
853 int ret; 853 int ret;
854 854
855 pdata->port_ops->reset(pdata); 855 ret = pdata->port_ops->reset(pdata);
856 if (ret)
857 return ret;
856 858
857 ret = xgene_enet_create_desc_rings(ndev); 859 ret = xgene_enet_create_desc_rings(ndev);
858 if (ret) { 860 if (ret) {
@@ -954,6 +956,7 @@ static int xgene_enet_probe(struct platform_device *pdev)
954 956
955 return ret; 957 return ret;
956err: 958err:
959 unregister_netdev(ndev);
957 free_netdev(ndev); 960 free_netdev(ndev);
958 return ret; 961 return ret;
959} 962}
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
index 874e5a01161f..f9958fae6ffd 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_main.h
@@ -38,6 +38,9 @@
38#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN) 38#define SKB_BUFFER_SIZE (XGENE_ENET_MAX_MTU - NET_IP_ALIGN)
39#define NUM_PKT_BUF 64 39#define NUM_PKT_BUF 64
40#define NUM_BUFPOOL 32 40#define NUM_BUFPOOL 32
41#define START_ETH_BUFNUM 2
42#define START_BP_BUFNUM 0x22
43#define START_RING_NUM 8
41 44
42#define PHY_POLL_LINK_ON (10 * HZ) 45#define PHY_POLL_LINK_ON (10 * HZ)
43#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5) 46#define PHY_POLL_LINK_OFF (PHY_POLL_LINK_ON / 5)
@@ -83,7 +86,7 @@ struct xgene_mac_ops {
83}; 86};
84 87
85struct xgene_port_ops { 88struct xgene_port_ops {
86 void (*reset)(struct xgene_enet_pdata *pdata); 89 int (*reset)(struct xgene_enet_pdata *pdata);
87 void (*cle_bypass)(struct xgene_enet_pdata *pdata, 90 void (*cle_bypass)(struct xgene_enet_pdata *pdata,
88 u32 dst_ring_num, u16 bufpool_id); 91 u32 dst_ring_num, u16 bufpool_id);
89 void (*shutdown)(struct xgene_enet_pdata *pdata); 92 void (*shutdown)(struct xgene_enet_pdata *pdata);
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
index e6d24c210198..f5d4f68c288c 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_sgmac.c
@@ -124,20 +124,18 @@ static int xgene_enet_ecc_init(struct xgene_enet_pdata *p)
124{ 124{
125 struct net_device *ndev = p->ndev; 125 struct net_device *ndev = p->ndev;
126 u32 data; 126 u32 data;
127 int i; 127 int i = 0;
128 128
129 xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0); 129 xgene_enet_wr_diag_csr(p, ENET_CFG_MEM_RAM_SHUTDOWN_ADDR, 0);
130 for (i = 0; i < 10 && data != ~0U ; i++) { 130 do {
131 usleep_range(100, 110); 131 usleep_range(100, 110);
132 data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR); 132 data = xgene_enet_rd_diag_csr(p, ENET_BLOCK_MEM_RDY_ADDR);
133 } 133 if (data == ~0U)
134 134 return 0;
135 if (data != ~0U) { 135 } while (++i < 10);
136 netdev_err(ndev, "Failed to release memory from shutdown\n");
137 return -ENODEV;
138 }
139 136
140 return 0; 137 netdev_err(ndev, "Failed to release memory from shutdown\n");
138 return -ENODEV;
141} 139}
142 140
143static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p) 141static void xgene_enet_config_ring_if_assoc(struct xgene_enet_pdata *p)
@@ -313,14 +311,19 @@ static void xgene_sgmac_tx_disable(struct xgene_enet_pdata *p)
313 xgene_sgmac_rxtx(p, TX_EN, false); 311 xgene_sgmac_rxtx(p, TX_EN, false);
314} 312}
315 313
316static void xgene_enet_reset(struct xgene_enet_pdata *p) 314static int xgene_enet_reset(struct xgene_enet_pdata *p)
317{ 315{
316 if (!xgene_ring_mgr_init(p))
317 return -ENODEV;
318
318 clk_prepare_enable(p->clk); 319 clk_prepare_enable(p->clk);
319 clk_disable_unprepare(p->clk); 320 clk_disable_unprepare(p->clk);
320 clk_prepare_enable(p->clk); 321 clk_prepare_enable(p->clk);
321 322
322 xgene_enet_ecc_init(p); 323 xgene_enet_ecc_init(p);
323 xgene_enet_config_ring_if_assoc(p); 324 xgene_enet_config_ring_if_assoc(p);
325
326 return 0;
324} 327}
325 328
326static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p, 329static void xgene_enet_cle_bypass(struct xgene_enet_pdata *p,
diff --git a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
index 67d07206b3c7..a18a9d1f1143 100644
--- a/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
+++ b/drivers/net/ethernet/apm/xgene/xgene_enet_xgmac.c
@@ -252,14 +252,19 @@ static void xgene_xgmac_tx_disable(struct xgene_enet_pdata *pdata)
252 xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN); 252 xgene_enet_wr_mac(pdata, AXGMAC_CONFIG_1, data & ~HSTTFEN);
253} 253}
254 254
255static void xgene_enet_reset(struct xgene_enet_pdata *pdata) 255static int xgene_enet_reset(struct xgene_enet_pdata *pdata)
256{ 256{
257 if (!xgene_ring_mgr_init(pdata))
258 return -ENODEV;
259
257 clk_prepare_enable(pdata->clk); 260 clk_prepare_enable(pdata->clk);
258 clk_disable_unprepare(pdata->clk); 261 clk_disable_unprepare(pdata->clk);
259 clk_prepare_enable(pdata->clk); 262 clk_prepare_enable(pdata->clk);
260 263
261 xgene_enet_ecc_init(pdata); 264 xgene_enet_ecc_init(pdata);
262 xgene_enet_config_ring_if_assoc(pdata); 265 xgene_enet_config_ring_if_assoc(pdata);
266
267 return 0;
263} 268}
264 269
265static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata, 270static void xgene_enet_xgcle_bypass(struct xgene_enet_pdata *pdata,
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 9ae36979bdee..531bb7c57531 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -1110,7 +1110,8 @@ static int bcm_sysport_init_tx_ring(struct bcm_sysport_priv *priv,
1110 /* We just need one DMA descriptor which is DMA-able, since writing to 1110 /* We just need one DMA descriptor which is DMA-able, since writing to
1111 * the port will allocate a new descriptor in its internal linked-list 1111 * the port will allocate a new descriptor in its internal linked-list
1112 */ 1112 */
1113 p = dma_zalloc_coherent(kdev, 1, &ring->desc_dma, GFP_KERNEL); 1113 p = dma_zalloc_coherent(kdev, sizeof(struct dma_desc), &ring->desc_dma,
1114 GFP_KERNEL);
1114 if (!p) { 1115 if (!p) {
1115 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n"); 1116 netif_err(priv, hw, priv->netdev, "DMA alloc failed\n");
1116 return -ENOMEM; 1117 return -ENOMEM;
@@ -1174,6 +1175,13 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1174 if (!(reg & TDMA_DISABLED)) 1175 if (!(reg & TDMA_DISABLED))
1175 netdev_warn(priv->netdev, "TDMA not stopped!\n"); 1176 netdev_warn(priv->netdev, "TDMA not stopped!\n");
1176 1177
1178 /* ring->cbs is the last part in bcm_sysport_init_tx_ring which could
1179 * fail, so by checking this pointer we know whether the TX ring was
1180 * fully initialized or not.
1181 */
1182 if (!ring->cbs)
1183 return;
1184
1177 napi_disable(&ring->napi); 1185 napi_disable(&ring->napi);
1178 netif_napi_del(&ring->napi); 1186 netif_napi_del(&ring->napi);
1179 1187
@@ -1183,7 +1191,8 @@ static void bcm_sysport_fini_tx_ring(struct bcm_sysport_priv *priv,
1183 ring->cbs = NULL; 1191 ring->cbs = NULL;
1184 1192
1185 if (ring->desc_dma) { 1193 if (ring->desc_dma) {
1186 dma_free_coherent(kdev, 1, ring->desc_cpu, ring->desc_dma); 1194 dma_free_coherent(kdev, sizeof(struct dma_desc),
1195 ring->desc_cpu, ring->desc_dma);
1187 ring->desc_dma = 0; 1196 ring->desc_dma = 0;
1188 } 1197 }
1189 ring->size = 0; 1198 ring->size = 0;
@@ -1397,6 +1406,9 @@ static void bcm_sysport_netif_start(struct net_device *dev)
1397 /* Enable NAPI */ 1406 /* Enable NAPI */
1398 napi_enable(&priv->napi); 1407 napi_enable(&priv->napi);
1399 1408
1409 /* Enable RX interrupt and TX ring full interrupt */
1410 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1411
1400 phy_start(priv->phydev); 1412 phy_start(priv->phydev);
1401 1413
1402 /* Enable TX interrupts for the 32 TXQs */ 1414 /* Enable TX interrupts for the 32 TXQs */
@@ -1499,9 +1511,6 @@ static int bcm_sysport_open(struct net_device *dev)
1499 if (ret) 1511 if (ret)
1500 goto out_free_rx_ring; 1512 goto out_free_rx_ring;
1501 1513
1502 /* Enable RX interrupt and TX ring full interrupt */
1503 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1504
1505 /* Turn on TDMA */ 1514 /* Turn on TDMA */
1506 ret = tdma_enable_set(priv, 1); 1515 ret = tdma_enable_set(priv, 1);
1507 if (ret) 1516 if (ret)
@@ -1858,6 +1867,8 @@ static int bcm_sysport_resume(struct device *d)
1858 if (!netif_running(dev)) 1867 if (!netif_running(dev))
1859 return 0; 1868 return 0;
1860 1869
1870 umac_reset(priv);
1871
1861 /* We may have been suspended and never received a WOL event that 1872 /* We may have been suspended and never received a WOL event that
1862 * would turn off MPD detection, take care of that now 1873 * would turn off MPD detection, take care of that now
1863 */ 1874 */
@@ -1885,9 +1896,6 @@ static int bcm_sysport_resume(struct device *d)
1885 1896
1886 netif_device_attach(dev); 1897 netif_device_attach(dev);
1887 1898
1888 /* Enable RX interrupt and TX ring full interrupt */
1889 intrl2_0_mask_clear(priv, INTRL2_0_RDMA_MBDONE | INTRL2_0_TX_RING_FULL);
1890
1891 /* RX pipe enable */ 1899 /* RX pipe enable */
1892 topctrl_writel(priv, 0, RX_FLUSH_CNTL); 1900 topctrl_writel(priv, 0, RX_FLUSH_CNTL);
1893 1901
diff --git a/drivers/net/ethernet/broadcom/cnic.c b/drivers/net/ethernet/broadcom/cnic.c
index 23f23c97c2ad..f05fab65d78a 100644
--- a/drivers/net/ethernet/broadcom/cnic.c
+++ b/drivers/net/ethernet/broadcom/cnic.c
@@ -382,10 +382,8 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
382 if (l5_cid >= MAX_CM_SK_TBL_SZ) 382 if (l5_cid >= MAX_CM_SK_TBL_SZ)
383 break; 383 break;
384 384
385 rcu_read_lock();
386 if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) { 385 if (!rcu_access_pointer(cp->ulp_ops[CNIC_ULP_L4])) {
387 rc = -ENODEV; 386 rc = -ENODEV;
388 rcu_read_unlock();
389 break; 387 break;
390 } 388 }
391 csk = &cp->csk_tbl[l5_cid]; 389 csk = &cp->csk_tbl[l5_cid];
@@ -414,7 +412,6 @@ static int cnic_iscsi_nl_msg_recv(struct cnic_dev *dev, u32 msg_type,
414 } 412 }
415 } 413 }
416 csk_put(csk); 414 csk_put(csk);
417 rcu_read_unlock();
418 rc = 0; 415 rc = 0;
419 } 416 }
420 } 417 }
@@ -615,7 +612,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
615 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL); 612 cnic_send_nlmsg(cp, ISCSI_KEVENT_IF_DOWN, NULL);
616 613
617 mutex_lock(&cnic_lock); 614 mutex_lock(&cnic_lock);
618 if (rcu_dereference(cp->ulp_ops[ulp_type])) { 615 if (rcu_access_pointer(cp->ulp_ops[ulp_type])) {
619 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL); 616 RCU_INIT_POINTER(cp->ulp_ops[ulp_type], NULL);
620 cnic_put(dev); 617 cnic_put(dev);
621 } else { 618 } else {
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index fdc9ec09e453..da1a2500c91c 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2140,6 +2140,12 @@ static int bcmgenet_open(struct net_device *dev)
2140 goto err_irq0; 2140 goto err_irq0;
2141 } 2141 }
2142 2142
2143 /* Re-configure the port multiplexer towards the PHY device */
2144 bcmgenet_mii_config(priv->dev, false);
2145
2146 phy_connect_direct(dev, priv->phydev, bcmgenet_mii_setup,
2147 priv->phy_interface);
2148
2143 bcmgenet_netif_start(dev); 2149 bcmgenet_netif_start(dev);
2144 2150
2145 return 0; 2151 return 0;
@@ -2184,6 +2190,9 @@ static int bcmgenet_close(struct net_device *dev)
2184 2190
2185 bcmgenet_netif_stop(dev); 2191 bcmgenet_netif_stop(dev);
2186 2192
2193 /* Really kill the PHY state machine and disconnect from it */
2194 phy_disconnect(priv->phydev);
2195
2187 /* Disable MAC receive */ 2196 /* Disable MAC receive */
2188 umac_enable_set(priv, CMD_RX_EN, false); 2197 umac_enable_set(priv, CMD_RX_EN, false);
2189 2198
@@ -2685,7 +2694,7 @@ static int bcmgenet_resume(struct device *d)
2685 2694
2686 phy_init_hw(priv->phydev); 2695 phy_init_hw(priv->phydev);
2687 /* Speed settings must be restored */ 2696 /* Speed settings must be restored */
2688 bcmgenet_mii_config(priv->dev); 2697 bcmgenet_mii_config(priv->dev, false);
2689 2698
2690 /* disable ethernet MAC while updating its registers */ 2699 /* disable ethernet MAC while updating its registers */
2691 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false); 2700 umac_enable_set(priv, CMD_TX_EN | CMD_RX_EN, false);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index dbf524ea3b19..31b2da5f9b82 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -617,9 +617,10 @@ GENET_IO_MACRO(rbuf, GENET_RBUF_OFF);
617 617
618/* MDIO routines */ 618/* MDIO routines */
619int bcmgenet_mii_init(struct net_device *dev); 619int bcmgenet_mii_init(struct net_device *dev);
620int bcmgenet_mii_config(struct net_device *dev); 620int bcmgenet_mii_config(struct net_device *dev, bool init);
621void bcmgenet_mii_exit(struct net_device *dev); 621void bcmgenet_mii_exit(struct net_device *dev);
622void bcmgenet_mii_reset(struct net_device *dev); 622void bcmgenet_mii_reset(struct net_device *dev);
623void bcmgenet_mii_setup(struct net_device *dev);
623 624
624/* Wake-on-LAN routines */ 625/* Wake-on-LAN routines */
625void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol); 626void bcmgenet_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol);
diff --git a/drivers/net/ethernet/broadcom/genet/bcmmii.c b/drivers/net/ethernet/broadcom/genet/bcmmii.c
index 9ff799a9f801..933cd7e7cd33 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmmii.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmmii.c
@@ -77,7 +77,7 @@ static int bcmgenet_mii_write(struct mii_bus *bus, int phy_id,
77/* setup netdev link state when PHY link status change and 77/* setup netdev link state when PHY link status change and
78 * update UMAC and RGMII block when link up 78 * update UMAC and RGMII block when link up
79 */ 79 */
80static void bcmgenet_mii_setup(struct net_device *dev) 80void bcmgenet_mii_setup(struct net_device *dev)
81{ 81{
82 struct bcmgenet_priv *priv = netdev_priv(dev); 82 struct bcmgenet_priv *priv = netdev_priv(dev);
83 struct phy_device *phydev = priv->phydev; 83 struct phy_device *phydev = priv->phydev;
@@ -211,7 +211,7 @@ static void bcmgenet_moca_phy_setup(struct bcmgenet_priv *priv)
211 bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL); 211 bcmgenet_sys_writel(priv, reg, SYS_PORT_CTRL);
212} 212}
213 213
214int bcmgenet_mii_config(struct net_device *dev) 214int bcmgenet_mii_config(struct net_device *dev, bool init)
215{ 215{
216 struct bcmgenet_priv *priv = netdev_priv(dev); 216 struct bcmgenet_priv *priv = netdev_priv(dev);
217 struct phy_device *phydev = priv->phydev; 217 struct phy_device *phydev = priv->phydev;
@@ -298,7 +298,8 @@ int bcmgenet_mii_config(struct net_device *dev)
298 return -EINVAL; 298 return -EINVAL;
299 } 299 }
300 300
301 dev_info(kdev, "configuring instance for %s\n", phy_name); 301 if (init)
302 dev_info(kdev, "configuring instance for %s\n", phy_name);
302 303
303 return 0; 304 return 0;
304} 305}
@@ -350,7 +351,7 @@ static int bcmgenet_mii_probe(struct net_device *dev)
350 * PHY speed which is needed for bcmgenet_mii_config() to configure 351 * PHY speed which is needed for bcmgenet_mii_config() to configure
351 * things appropriately. 352 * things appropriately.
352 */ 353 */
353 ret = bcmgenet_mii_config(dev); 354 ret = bcmgenet_mii_config(dev, true);
354 if (ret) { 355 if (ret) {
355 phy_disconnect(priv->phydev); 356 phy_disconnect(priv->phydev);
356 return ret; 357 return ret;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index dbb41c1923e6..77f8f836cbbe 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -8563,7 +8563,8 @@ static int tg3_init_rings(struct tg3 *tp)
8563 if (tnapi->rx_rcb) 8563 if (tnapi->rx_rcb)
8564 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 8564 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
8565 8565
8566 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) { 8566 if (tnapi->prodring.rx_std &&
8567 tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
8567 tg3_free_rings(tp); 8568 tg3_free_rings(tp);
8568 return -ENOMEM; 8569 return -ENOMEM;
8569 } 8570 }
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
index 8edf0f5bd679..4fe33606f372 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_dcb.c
@@ -60,6 +60,43 @@ void cxgb4_dcb_version_init(struct net_device *dev)
60 dcb->dcb_version = FW_PORT_DCB_VER_AUTO; 60 dcb->dcb_version = FW_PORT_DCB_VER_AUTO;
61} 61}
62 62
63static void cxgb4_dcb_cleanup_apps(struct net_device *dev)
64{
65 struct port_info *pi = netdev2pinfo(dev);
66 struct adapter *adap = pi->adapter;
67 struct port_dcb_info *dcb = &pi->dcb;
68 struct dcb_app app;
69 int i, err;
70
71 /* zero priority implies remove */
72 app.priority = 0;
73
74 for (i = 0; i < CXGB4_MAX_DCBX_APP_SUPPORTED; i++) {
75 /* Check if app list is exhausted */
76 if (!dcb->app_priority[i].protocolid)
77 break;
78
79 app.protocol = dcb->app_priority[i].protocolid;
80
81 if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE) {
82 app.priority = dcb->app_priority[i].user_prio_map;
83 app.selector = dcb->app_priority[i].sel_field + 1;
84 err = dcb_ieee_delapp(dev, &app);
85 } else {
86 app.selector = !!(dcb->app_priority[i].sel_field);
87 err = dcb_setapp(dev, &app);
88 }
89
90 if (err) {
91 dev_err(adap->pdev_dev,
92 "Failed DCB Clear %s Application Priority: sel=%d, prot=%d, , err=%d\n",
93 dcb_ver_array[dcb->dcb_version], app.selector,
94 app.protocol, -err);
95 break;
96 }
97 }
98}
99
63/* Finite State machine for Data Center Bridging. 100/* Finite State machine for Data Center Bridging.
64 */ 101 */
65void cxgb4_dcb_state_fsm(struct net_device *dev, 102void cxgb4_dcb_state_fsm(struct net_device *dev,
@@ -80,14 +117,17 @@ void cxgb4_dcb_state_fsm(struct net_device *dev,
80 /* we're going to use Host DCB */ 117 /* we're going to use Host DCB */
81 dcb->state = CXGB4_DCB_STATE_HOST; 118 dcb->state = CXGB4_DCB_STATE_HOST;
82 dcb->supported = CXGB4_DCBX_HOST_SUPPORT; 119 dcb->supported = CXGB4_DCBX_HOST_SUPPORT;
83 dcb->enabled = 1;
84 break; 120 break;
85 } 121 }
86 122
87 case CXGB4_DCB_INPUT_FW_ENABLED: { 123 case CXGB4_DCB_INPUT_FW_ENABLED: {
88 /* we're going to use Firmware DCB */ 124 /* we're going to use Firmware DCB */
89 dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; 125 dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
90 dcb->supported = CXGB4_DCBX_FW_SUPPORT; 126 dcb->supported = DCB_CAP_DCBX_LLD_MANAGED;
127 if (dcb->dcb_version == FW_PORT_DCB_VER_IEEE)
128 dcb->supported |= DCB_CAP_DCBX_VER_IEEE;
129 else
130 dcb->supported |= DCB_CAP_DCBX_VER_CEE;
91 break; 131 break;
92 } 132 }
93 133
@@ -145,6 +185,7 @@ void cxgb4_dcb_state_fsm(struct net_device *dev,
145 * state. We need to reset back to a ground state 185 * state. We need to reset back to a ground state
146 * of incomplete. 186 * of incomplete.
147 */ 187 */
188 cxgb4_dcb_cleanup_apps(dev);
148 cxgb4_dcb_state_init(dev); 189 cxgb4_dcb_state_init(dev);
149 dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE; 190 dcb->state = CXGB4_DCB_STATE_FW_INCOMPLETE;
150 dcb->supported = CXGB4_DCBX_FW_SUPPORT; 191 dcb->supported = CXGB4_DCBX_FW_SUPPORT;
@@ -349,6 +390,12 @@ static u8 cxgb4_setstate(struct net_device *dev, u8 enabled)
349{ 390{
350 struct port_info *pi = netdev2pinfo(dev); 391 struct port_info *pi = netdev2pinfo(dev);
351 392
393 /* If DCBx is host-managed, dcb is enabled by outside lldp agents */
394 if (pi->dcb.state == CXGB4_DCB_STATE_HOST) {
395 pi->dcb.enabled = enabled;
396 return 0;
397 }
398
352 /* Firmware doesn't provide any mechanism to control the DCB state. 399 /* Firmware doesn't provide any mechanism to control the DCB state.
353 */ 400 */
354 if (enabled != (pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED)) 401 if (enabled != (pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED))
@@ -394,14 +441,17 @@ static void cxgb4_getpgtccfg(struct net_device *dev, int tc,
394 *up_tc_map = (1 << tc); 441 *up_tc_map = (1 << tc);
395 442
396 /* prio_type is link strict */ 443 /* prio_type is link strict */
397 *prio_type = 0x2; 444 if (*pgid != 0xF)
445 *prio_type = 0x2;
398} 446}
399 447
400static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc, 448static void cxgb4_getpgtccfg_tx(struct net_device *dev, int tc,
401 u8 *prio_type, u8 *pgid, u8 *bw_per, 449 u8 *prio_type, u8 *pgid, u8 *bw_per,
402 u8 *up_tc_map) 450 u8 *up_tc_map)
403{ 451{
404 return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 1); 452 /* tc 0 is written at MSB position */
453 return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
454 up_tc_map, 1);
405} 455}
406 456
407 457
@@ -409,7 +459,9 @@ static void cxgb4_getpgtccfg_rx(struct net_device *dev, int tc,
409 u8 *prio_type, u8 *pgid, u8 *bw_per, 459 u8 *prio_type, u8 *pgid, u8 *bw_per,
410 u8 *up_tc_map) 460 u8 *up_tc_map)
411{ 461{
412 return cxgb4_getpgtccfg(dev, tc, prio_type, pgid, bw_per, up_tc_map, 0); 462 /* tc 0 is written at MSB position */
463 return cxgb4_getpgtccfg(dev, (7 - tc), prio_type, pgid, bw_per,
464 up_tc_map, 0);
413} 465}
414 466
415static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc, 467static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
@@ -419,6 +471,7 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
419 struct fw_port_cmd pcmd; 471 struct fw_port_cmd pcmd;
420 struct port_info *pi = netdev2pinfo(dev); 472 struct port_info *pi = netdev2pinfo(dev);
421 struct adapter *adap = pi->adapter; 473 struct adapter *adap = pi->adapter;
474 int fw_tc = 7 - tc;
422 u32 _pgid; 475 u32 _pgid;
423 int err; 476 int err;
424 477
@@ -437,8 +490,8 @@ static void cxgb4_setpgtccfg_tx(struct net_device *dev, int tc,
437 } 490 }
438 491
439 _pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid); 492 _pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
440 _pgid &= ~(0xF << (tc * 4)); 493 _pgid &= ~(0xF << (fw_tc * 4));
441 _pgid |= pgid << (tc * 4); 494 _pgid |= pgid << (fw_tc * 4);
442 pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid); 495 pcmd.u.dcb.pgid.pgid = cpu_to_be32(_pgid);
443 496
444 INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id); 497 INIT_PORT_DCB_WRITE_CMD(pcmd, pi->port_id);
@@ -551,7 +604,7 @@ static void cxgb4_getpfccfg(struct net_device *dev, int priority, u8 *pfccfg)
551 priority >= CXGB4_MAX_PRIORITY) 604 priority >= CXGB4_MAX_PRIORITY)
552 *pfccfg = 0; 605 *pfccfg = 0;
553 else 606 else
554 *pfccfg = (pi->dcb.pfcen >> priority) & 1; 607 *pfccfg = (pi->dcb.pfcen >> (7 - priority)) & 1;
555} 608}
556 609
557/* Enable/disable Priority Pause Frames for the specified Traffic Class 610/* Enable/disable Priority Pause Frames for the specified Traffic Class
@@ -576,9 +629,9 @@ static void cxgb4_setpfccfg(struct net_device *dev, int priority, u8 pfccfg)
576 pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen; 629 pcmd.u.dcb.pfc.pfcen = pi->dcb.pfcen;
577 630
578 if (pfccfg) 631 if (pfccfg)
579 pcmd.u.dcb.pfc.pfcen |= (1 << priority); 632 pcmd.u.dcb.pfc.pfcen |= (1 << (7 - priority));
580 else 633 else
581 pcmd.u.dcb.pfc.pfcen &= (~(1 << priority)); 634 pcmd.u.dcb.pfc.pfcen &= (~(1 << (7 - priority)));
582 635
583 err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd); 636 err = t4_wr_mbox(adap, adap->mbox, &pcmd, sizeof(pcmd), &pcmd);
584 if (err != FW_PORT_DCB_CFG_SUCCESS) { 637 if (err != FW_PORT_DCB_CFG_SUCCESS) {
@@ -833,11 +886,16 @@ static int cxgb4_setapp(struct net_device *dev, u8 app_idtype, u16 app_id,
833 886
834/* Return whether IEEE Data Center Bridging has been negotiated. 887/* Return whether IEEE Data Center Bridging has been negotiated.
835 */ 888 */
836static inline int cxgb4_ieee_negotiation_complete(struct net_device *dev) 889static inline int
890cxgb4_ieee_negotiation_complete(struct net_device *dev,
891 enum cxgb4_dcb_fw_msgs dcb_subtype)
837{ 892{
838 struct port_info *pi = netdev2pinfo(dev); 893 struct port_info *pi = netdev2pinfo(dev);
839 struct port_dcb_info *dcb = &pi->dcb; 894 struct port_dcb_info *dcb = &pi->dcb;
840 895
896 if (dcb_subtype && !(dcb->msgs & dcb_subtype))
897 return 0;
898
841 return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED && 899 return (dcb->state == CXGB4_DCB_STATE_FW_ALLSYNCED &&
842 (dcb->supported & DCB_CAP_DCBX_VER_IEEE)); 900 (dcb->supported & DCB_CAP_DCBX_VER_IEEE));
843} 901}
@@ -850,7 +908,7 @@ static int cxgb4_ieee_getapp(struct net_device *dev, struct dcb_app *app)
850{ 908{
851 int prio; 909 int prio;
852 910
853 if (!cxgb4_ieee_negotiation_complete(dev)) 911 if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID))
854 return -EINVAL; 912 return -EINVAL;
855 if (!(app->selector && app->protocol)) 913 if (!(app->selector && app->protocol))
856 return -EINVAL; 914 return -EINVAL;
@@ -872,7 +930,7 @@ static int cxgb4_ieee_setapp(struct net_device *dev, struct dcb_app *app)
872{ 930{
873 int ret; 931 int ret;
874 932
875 if (!cxgb4_ieee_negotiation_complete(dev)) 933 if (!cxgb4_ieee_negotiation_complete(dev, CXGB4_DCB_FW_APP_ID))
876 return -EINVAL; 934 return -EINVAL;
877 if (!(app->selector && app->protocol)) 935 if (!(app->selector && app->protocol))
878 return -EINVAL; 936 return -EINVAL;
@@ -1024,7 +1082,7 @@ static int cxgb4_cee_peer_getpg(struct net_device *dev, struct cee_pg *pg)
1024 pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid); 1082 pgid = be32_to_cpu(pcmd.u.dcb.pgid.pgid);
1025 1083
1026 for (i = 0; i < CXGB4_MAX_PRIORITY; i++) 1084 for (i = 0; i < CXGB4_MAX_PRIORITY; i++)
1027 pg->prio_pg[i] = (pgid >> (i * 4)) & 0xF; 1085 pg->prio_pg[7 - i] = (pgid >> (i * 4)) & 0xF;
1028 1086
1029 INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id); 1087 INIT_PORT_DCB_READ_PEER_CMD(pcmd, pi->port_id);
1030 pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE; 1088 pcmd.u.dcb.pgrate.type = FW_PORT_DCB_TYPE_PGRATE;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 3f60070f2519..279873cb6e3a 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -694,7 +694,11 @@ int cxgb4_dcb_enabled(const struct net_device *dev)
694#ifdef CONFIG_CHELSIO_T4_DCB 694#ifdef CONFIG_CHELSIO_T4_DCB
695 struct port_info *pi = netdev_priv(dev); 695 struct port_info *pi = netdev_priv(dev);
696 696
697 return pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED; 697 if (!pi->dcb.enabled)
698 return 0;
699
700 return ((pi->dcb.state == CXGB4_DCB_STATE_FW_ALLSYNCED) ||
701 (pi->dcb.state == CXGB4_DCB_STATE_HOST));
698#else 702#else
699 return 0; 703 return 0;
700#endif 704#endif
@@ -2438,9 +2442,13 @@ static unsigned int from_fw_linkcaps(unsigned int type, unsigned int caps)
2438 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full | 2442 SUPPORTED_10000baseKR_Full | SUPPORTED_1000baseKX_Full |
2439 SUPPORTED_10000baseKX4_Full; 2443 SUPPORTED_10000baseKX4_Full;
2440 else if (type == FW_PORT_TYPE_FIBER_XFI || 2444 else if (type == FW_PORT_TYPE_FIBER_XFI ||
2441 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) 2445 type == FW_PORT_TYPE_FIBER_XAUI || type == FW_PORT_TYPE_SFP) {
2442 v |= SUPPORTED_FIBRE; 2446 v |= SUPPORTED_FIBRE;
2443 else if (type == FW_PORT_TYPE_BP40_BA) 2447 if (caps & FW_PORT_CAP_SPEED_1G)
2448 v |= SUPPORTED_1000baseT_Full;
2449 if (caps & FW_PORT_CAP_SPEED_10G)
2450 v |= SUPPORTED_10000baseT_Full;
2451 } else if (type == FW_PORT_TYPE_BP40_BA)
2444 v |= SUPPORTED_40000baseSR4_Full; 2452 v |= SUPPORTED_40000baseSR4_Full;
2445 2453
2446 if (caps & FW_PORT_CAP_ANEG) 2454 if (caps & FW_PORT_CAP_ANEG)
@@ -6610,6 +6618,7 @@ static int init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6610 6618
6611 spin_lock_init(&adapter->stats_lock); 6619 spin_lock_init(&adapter->stats_lock);
6612 spin_lock_init(&adapter->tid_release_lock); 6620 spin_lock_init(&adapter->tid_release_lock);
6621 spin_lock_init(&adapter->win0_lock);
6613 6622
6614 INIT_WORK(&adapter->tid_release_task, process_tid_release_list); 6623 INIT_WORK(&adapter->tid_release_task, process_tid_release_list);
6615 INIT_WORK(&adapter->db_full_task, process_db_full); 6624 INIT_WORK(&adapter->db_full_task, process_db_full);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/sge.c b/drivers/net/ethernet/chelsio/cxgb4/sge.c
index 5e1b314e11af..39f2b13e66c7 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/sge.c
@@ -2914,7 +2914,8 @@ static int t4_sge_init_hard(struct adapter *adap)
2914int t4_sge_init(struct adapter *adap) 2914int t4_sge_init(struct adapter *adap)
2915{ 2915{
2916 struct sge *s = &adap->sge; 2916 struct sge *s = &adap->sge;
2917 u32 sge_control, sge_conm_ctrl; 2917 u32 sge_control, sge_control2, sge_conm_ctrl;
2918 unsigned int ingpadboundary, ingpackboundary;
2918 int ret, egress_threshold; 2919 int ret, egress_threshold;
2919 2920
2920 /* 2921 /*
@@ -2924,8 +2925,31 @@ int t4_sge_init(struct adapter *adap)
2924 sge_control = t4_read_reg(adap, SGE_CONTROL); 2925 sge_control = t4_read_reg(adap, SGE_CONTROL);
2925 s->pktshift = PKTSHIFT_GET(sge_control); 2926 s->pktshift = PKTSHIFT_GET(sge_control);
2926 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64; 2927 s->stat_len = (sge_control & EGRSTATUSPAGESIZE_MASK) ? 128 : 64;
2927 s->fl_align = 1 << (INGPADBOUNDARY_GET(sge_control) + 2928
2928 X_INGPADBOUNDARY_SHIFT); 2929 /* T4 uses a single control field to specify both the PCIe Padding and
2930 * Packing Boundary. T5 introduced the ability to specify these
2931 * separately. The actual Ingress Packet Data alignment boundary
2932 * within Packed Buffer Mode is the maximum of these two
2933 * specifications.
2934 */
2935 ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_control) +
2936 X_INGPADBOUNDARY_SHIFT);
2937 if (is_t4(adap->params.chip)) {
2938 s->fl_align = ingpadboundary;
2939 } else {
2940 /* T5 has a different interpretation of one of the PCIe Packing
2941 * Boundary values.
2942 */
2943 sge_control2 = t4_read_reg(adap, SGE_CONTROL2_A);
2944 ingpackboundary = INGPACKBOUNDARY_G(sge_control2);
2945 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
2946 ingpackboundary = 16;
2947 else
2948 ingpackboundary = 1 << (ingpackboundary +
2949 INGPACKBOUNDARY_SHIFT_X);
2950
2951 s->fl_align = max(ingpadboundary, ingpackboundary);
2952 }
2929 2953
2930 if (adap->flags & USING_SOFT_PARAMS) 2954 if (adap->flags & USING_SOFT_PARAMS)
2931 ret = t4_sge_init_soft(adap); 2955 ret = t4_sge_init_soft(adap);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index a9d9d74e4f09..163a2a14948c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3129,12 +3129,51 @@ int t4_fixup_host_params(struct adapter *adap, unsigned int page_size,
3129 HOSTPAGESIZEPF6(sge_hps) | 3129 HOSTPAGESIZEPF6(sge_hps) |
3130 HOSTPAGESIZEPF7(sge_hps)); 3130 HOSTPAGESIZEPF7(sge_hps));
3131 3131
3132 t4_set_reg_field(adap, SGE_CONTROL, 3132 if (is_t4(adap->params.chip)) {
3133 INGPADBOUNDARY_MASK | 3133 t4_set_reg_field(adap, SGE_CONTROL,
3134 EGRSTATUSPAGESIZE_MASK, 3134 INGPADBOUNDARY_MASK |
3135 INGPADBOUNDARY(fl_align_log - 5) | 3135 EGRSTATUSPAGESIZE_MASK,
3136 EGRSTATUSPAGESIZE(stat_len != 64)); 3136 INGPADBOUNDARY(fl_align_log - 5) |
3137 3137 EGRSTATUSPAGESIZE(stat_len != 64));
3138 } else {
3139 /* T5 introduced the separation of the Free List Padding and
3140 * Packing Boundaries. Thus, we can select a smaller Padding
3141 * Boundary to avoid uselessly chewing up PCIe Link and Memory
3142 * Bandwidth, and use a Packing Boundary which is large enough
3143 * to avoid false sharing between CPUs, etc.
3144 *
3145 * For the PCI Link, the smaller the Padding Boundary the
3146 * better. For the Memory Controller, a smaller Padding
3147 * Boundary is better until we cross under the Memory Line
3148 * Size (the minimum unit of transfer to/from Memory). If we
3149 * have a Padding Boundary which is smaller than the Memory
3150 * Line Size, that'll involve a Read-Modify-Write cycle on the
3151 * Memory Controller which is never good. For T5 the smallest
3152 * Padding Boundary which we can select is 32 bytes which is
3153 * larger than any known Memory Controller Line Size so we'll
3154 * use that.
3155 *
3156 * T5 has a different interpretation of the "0" value for the
3157 * Packing Boundary. This corresponds to 16 bytes instead of
3158 * the expected 32 bytes. We never have a Packing Boundary
3159 * less than 32 bytes so we can't use that special value but
3160 * on the other hand, if we wanted 32 bytes, the best we can
3161 * really do is 64 bytes.
3162 */
3163 if (fl_align <= 32) {
3164 fl_align = 64;
3165 fl_align_log = 6;
3166 }
3167 t4_set_reg_field(adap, SGE_CONTROL,
3168 INGPADBOUNDARY_MASK |
3169 EGRSTATUSPAGESIZE_MASK,
3170 INGPADBOUNDARY(INGPCIEBOUNDARY_32B_X) |
3171 EGRSTATUSPAGESIZE(stat_len != 64));
3172 t4_set_reg_field(adap, SGE_CONTROL2_A,
3173 INGPACKBOUNDARY_V(INGPACKBOUNDARY_M),
3174 INGPACKBOUNDARY_V(fl_align_log -
3175 INGPACKBOUNDARY_SHIFT_X));
3176 }
3138 /* 3177 /*
3139 * Adjust various SGE Free List Host Buffer Sizes. 3178 * Adjust various SGE Free List Host Buffer Sizes.
3140 * 3179 *
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
index a1024db5dc13..8d2de1006b08 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_regs.h
@@ -95,6 +95,7 @@
95#define X_INGPADBOUNDARY_SHIFT 5 95#define X_INGPADBOUNDARY_SHIFT 5
96 96
97#define SGE_CONTROL 0x1008 97#define SGE_CONTROL 0x1008
98#define SGE_CONTROL2_A 0x1124
98#define DCASYSTYPE 0x00080000U 99#define DCASYSTYPE 0x00080000U
99#define RXPKTCPLMODE_MASK 0x00040000U 100#define RXPKTCPLMODE_MASK 0x00040000U
100#define RXPKTCPLMODE_SHIFT 18 101#define RXPKTCPLMODE_SHIFT 18
@@ -106,6 +107,7 @@
106#define PKTSHIFT_SHIFT 10 107#define PKTSHIFT_SHIFT 10
107#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT) 108#define PKTSHIFT(x) ((x) << PKTSHIFT_SHIFT)
108#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT) 109#define PKTSHIFT_GET(x) (((x) & PKTSHIFT_MASK) >> PKTSHIFT_SHIFT)
110#define INGPCIEBOUNDARY_32B_X 0
109#define INGPCIEBOUNDARY_MASK 0x00000380U 111#define INGPCIEBOUNDARY_MASK 0x00000380U
110#define INGPCIEBOUNDARY_SHIFT 7 112#define INGPCIEBOUNDARY_SHIFT 7
111#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT) 113#define INGPCIEBOUNDARY(x) ((x) << INGPCIEBOUNDARY_SHIFT)
@@ -114,6 +116,14 @@
114#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT) 116#define INGPADBOUNDARY(x) ((x) << INGPADBOUNDARY_SHIFT)
115#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \ 117#define INGPADBOUNDARY_GET(x) (((x) & INGPADBOUNDARY_MASK) \
116 >> INGPADBOUNDARY_SHIFT) 118 >> INGPADBOUNDARY_SHIFT)
119#define INGPACKBOUNDARY_16B_X 0
120#define INGPACKBOUNDARY_SHIFT_X 5
121
122#define INGPACKBOUNDARY_S 16
123#define INGPACKBOUNDARY_M 0x7U
124#define INGPACKBOUNDARY_V(x) ((x) << INGPACKBOUNDARY_S)
125#define INGPACKBOUNDARY_G(x) (((x) >> INGPACKBOUNDARY_S) \
126 & INGPACKBOUNDARY_M)
117#define EGRPCIEBOUNDARY_MASK 0x0000000eU 127#define EGRPCIEBOUNDARY_MASK 0x0000000eU
118#define EGRPCIEBOUNDARY_SHIFT 1 128#define EGRPCIEBOUNDARY_SHIFT 1
119#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT) 129#define EGRPCIEBOUNDARY(x) ((x) << EGRPCIEBOUNDARY_SHIFT)
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
index 68eaa9c88c7d..3d06e77d7121 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/adapter.h
@@ -299,6 +299,14 @@ struct sge {
299 u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */ 299 u16 timer_val[SGE_NTIMERS]; /* interrupt holdoff timer array */
300 u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */ 300 u8 counter_val[SGE_NCOUNTERS]; /* interrupt RX threshold array */
301 301
302 /* Decoded Adapter Parameters.
303 */
304 u32 fl_pg_order; /* large page allocation size */
305 u32 stat_len; /* length of status page at ring end */
306 u32 pktshift; /* padding between CPL & packet data */
307 u32 fl_align; /* response queue message alignment */
308 u32 fl_starve_thres; /* Free List starvation threshold */
309
302 /* 310 /*
303 * Reverse maps from Absolute Queue IDs to associated queue pointers. 311 * Reverse maps from Absolute Queue IDs to associated queue pointers.
304 * The absolute Queue IDs are in a compact range which start at a 312 * The absolute Queue IDs are in a compact range which start at a
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
index bfa398d91826..0b42bddaf284 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/cxgb4vf_main.c
@@ -2929,14 +2929,14 @@ static const struct pci_device_id cxgb4vf_pci_tbl[] = {
2929 CH_DEVICE(0x480d), /* T480-cr */ 2929 CH_DEVICE(0x480d), /* T480-cr */
2930 CH_DEVICE(0x480e), /* T440-lp-cr */ 2930 CH_DEVICE(0x480e), /* T440-lp-cr */
2931 CH_DEVICE(0x4880), 2931 CH_DEVICE(0x4880),
2932 CH_DEVICE(0x4880), 2932 CH_DEVICE(0x4881),
2933 CH_DEVICE(0x4880), 2933 CH_DEVICE(0x4882),
2934 CH_DEVICE(0x4880), 2934 CH_DEVICE(0x4883),
2935 CH_DEVICE(0x4880), 2935 CH_DEVICE(0x4884),
2936 CH_DEVICE(0x4880), 2936 CH_DEVICE(0x4885),
2937 CH_DEVICE(0x4880), 2937 CH_DEVICE(0x4886),
2938 CH_DEVICE(0x4880), 2938 CH_DEVICE(0x4887),
2939 CH_DEVICE(0x4880), 2939 CH_DEVICE(0x4888),
2940 CH_DEVICE(0x5801), /* T520-cr */ 2940 CH_DEVICE(0x5801), /* T520-cr */
2941 CH_DEVICE(0x5802), /* T522-cr */ 2941 CH_DEVICE(0x5802), /* T522-cr */
2942 CH_DEVICE(0x5803), /* T540-cr */ 2942 CH_DEVICE(0x5803), /* T540-cr */
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
index 85036e6b42c4..fdd078d7d82c 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/sge.c
@@ -51,14 +51,6 @@
51#include "../cxgb4/t4_msg.h" 51#include "../cxgb4/t4_msg.h"
52 52
53/* 53/*
54 * Decoded Adapter Parameters.
55 */
56static u32 FL_PG_ORDER; /* large page allocation size */
57static u32 STAT_LEN; /* length of status page at ring end */
58static u32 PKTSHIFT; /* padding between CPL and packet data */
59static u32 FL_ALIGN; /* response queue message alignment */
60
61/*
62 * Constants ... 54 * Constants ...
63 */ 55 */
64enum { 56enum {
@@ -102,12 +94,6 @@ enum {
102 MAX_TIMER_TX_RECLAIM = 100, 94 MAX_TIMER_TX_RECLAIM = 100,
103 95
104 /* 96 /*
105 * An FL with <= FL_STARVE_THRES buffers is starving and a periodic
106 * timer will attempt to refill it.
107 */
108 FL_STARVE_THRES = 4,
109
110 /*
111 * Suspend an Ethernet TX queue with fewer available descriptors than 97 * Suspend an Ethernet TX queue with fewer available descriptors than
112 * this. We always want to have room for a maximum sized packet: 98 * this. We always want to have room for a maximum sized packet:
113 * inline immediate data + MAX_SKB_FRAGS. This is the same as 99 * inline immediate data + MAX_SKB_FRAGS. This is the same as
@@ -264,15 +250,19 @@ static inline unsigned int fl_cap(const struct sge_fl *fl)
264 250
265/** 251/**
266 * fl_starving - return whether a Free List is starving. 252 * fl_starving - return whether a Free List is starving.
253 * @adapter: pointer to the adapter
267 * @fl: the Free List 254 * @fl: the Free List
268 * 255 *
269 * Tests specified Free List to see whether the number of buffers 256 * Tests specified Free List to see whether the number of buffers
270 * available to the hardware has falled below our "starvation" 257 * available to the hardware has falled below our "starvation"
271 * threshold. 258 * threshold.
272 */ 259 */
273static inline bool fl_starving(const struct sge_fl *fl) 260static inline bool fl_starving(const struct adapter *adapter,
261 const struct sge_fl *fl)
274{ 262{
275 return fl->avail - fl->pend_cred <= FL_STARVE_THRES; 263 const struct sge *s = &adapter->sge;
264
265 return fl->avail - fl->pend_cred <= s->fl_starve_thres;
276} 266}
277 267
278/** 268/**
@@ -457,13 +447,16 @@ static inline void reclaim_completed_tx(struct adapter *adapter,
457 447
458/** 448/**
459 * get_buf_size - return the size of an RX Free List buffer. 449 * get_buf_size - return the size of an RX Free List buffer.
450 * @adapter: pointer to the associated adapter
460 * @sdesc: pointer to the software buffer descriptor 451 * @sdesc: pointer to the software buffer descriptor
461 */ 452 */
462static inline int get_buf_size(const struct rx_sw_desc *sdesc) 453static inline int get_buf_size(const struct adapter *adapter,
454 const struct rx_sw_desc *sdesc)
463{ 455{
464 return FL_PG_ORDER > 0 && (sdesc->dma_addr & RX_LARGE_BUF) 456 const struct sge *s = &adapter->sge;
465 ? (PAGE_SIZE << FL_PG_ORDER) 457
466 : PAGE_SIZE; 458 return (s->fl_pg_order > 0 && (sdesc->dma_addr & RX_LARGE_BUF)
459 ? (PAGE_SIZE << s->fl_pg_order) : PAGE_SIZE);
467} 460}
468 461
469/** 462/**
@@ -483,7 +476,8 @@ static void free_rx_bufs(struct adapter *adapter, struct sge_fl *fl, int n)
483 476
484 if (is_buf_mapped(sdesc)) 477 if (is_buf_mapped(sdesc))
485 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), 478 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
486 get_buf_size(sdesc), PCI_DMA_FROMDEVICE); 479 get_buf_size(adapter, sdesc),
480 PCI_DMA_FROMDEVICE);
487 put_page(sdesc->page); 481 put_page(sdesc->page);
488 sdesc->page = NULL; 482 sdesc->page = NULL;
489 if (++fl->cidx == fl->size) 483 if (++fl->cidx == fl->size)
@@ -511,7 +505,8 @@ static void unmap_rx_buf(struct adapter *adapter, struct sge_fl *fl)
511 505
512 if (is_buf_mapped(sdesc)) 506 if (is_buf_mapped(sdesc))
513 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc), 507 dma_unmap_page(adapter->pdev_dev, get_buf_addr(sdesc),
514 get_buf_size(sdesc), PCI_DMA_FROMDEVICE); 508 get_buf_size(adapter, sdesc),
509 PCI_DMA_FROMDEVICE);
515 sdesc->page = NULL; 510 sdesc->page = NULL;
516 if (++fl->cidx == fl->size) 511 if (++fl->cidx == fl->size)
517 fl->cidx = 0; 512 fl->cidx = 0;
@@ -589,6 +584,7 @@ static inline void poison_buf(struct page *page, size_t sz)
589static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl, 584static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
590 int n, gfp_t gfp) 585 int n, gfp_t gfp)
591{ 586{
587 struct sge *s = &adapter->sge;
592 struct page *page; 588 struct page *page;
593 dma_addr_t dma_addr; 589 dma_addr_t dma_addr;
594 unsigned int cred = fl->avail; 590 unsigned int cred = fl->avail;
@@ -608,12 +604,12 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
608 * If we don't support large pages, drop directly into the small page 604 * If we don't support large pages, drop directly into the small page
609 * allocation code. 605 * allocation code.
610 */ 606 */
611 if (FL_PG_ORDER == 0) 607 if (s->fl_pg_order == 0)
612 goto alloc_small_pages; 608 goto alloc_small_pages;
613 609
614 while (n) { 610 while (n) {
615 page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN, 611 page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
616 FL_PG_ORDER); 612 s->fl_pg_order);
617 if (unlikely(!page)) { 613 if (unlikely(!page)) {
618 /* 614 /*
619 * We've failed inour attempt to allocate a "large 615 * We've failed inour attempt to allocate a "large
@@ -623,10 +619,10 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
623 fl->large_alloc_failed++; 619 fl->large_alloc_failed++;
624 break; 620 break;
625 } 621 }
626 poison_buf(page, PAGE_SIZE << FL_PG_ORDER); 622 poison_buf(page, PAGE_SIZE << s->fl_pg_order);
627 623
628 dma_addr = dma_map_page(adapter->pdev_dev, page, 0, 624 dma_addr = dma_map_page(adapter->pdev_dev, page, 0,
629 PAGE_SIZE << FL_PG_ORDER, 625 PAGE_SIZE << s->fl_pg_order,
630 PCI_DMA_FROMDEVICE); 626 PCI_DMA_FROMDEVICE);
631 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) { 627 if (unlikely(dma_mapping_error(adapter->pdev_dev, dma_addr))) {
632 /* 628 /*
@@ -637,7 +633,7 @@ static unsigned int refill_fl(struct adapter *adapter, struct sge_fl *fl,
637 * because DMA mapping resources are typically 633 * because DMA mapping resources are typically
638 * critical resources once they become scarse. 634 * critical resources once they become scarse.
639 */ 635 */
640 __free_pages(page, FL_PG_ORDER); 636 __free_pages(page, s->fl_pg_order);
641 goto out; 637 goto out;
642 } 638 }
643 dma_addr |= RX_LARGE_BUF; 639 dma_addr |= RX_LARGE_BUF;
@@ -693,7 +689,7 @@ out:
693 fl->pend_cred += cred; 689 fl->pend_cred += cred;
694 ring_fl_db(adapter, fl); 690 ring_fl_db(adapter, fl);
695 691
696 if (unlikely(fl_starving(fl))) { 692 if (unlikely(fl_starving(adapter, fl))) {
697 smp_wmb(); 693 smp_wmb();
698 set_bit(fl->cntxt_id, adapter->sge.starving_fl); 694 set_bit(fl->cntxt_id, adapter->sge.starving_fl);
699 } 695 }
@@ -1468,6 +1464,8 @@ static void t4vf_pktgl_free(const struct pkt_gl *gl)
1468static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl, 1464static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1469 const struct cpl_rx_pkt *pkt) 1465 const struct cpl_rx_pkt *pkt)
1470{ 1466{
1467 struct adapter *adapter = rxq->rspq.adapter;
1468 struct sge *s = &adapter->sge;
1471 int ret; 1469 int ret;
1472 struct sk_buff *skb; 1470 struct sk_buff *skb;
1473 1471
@@ -1478,8 +1476,8 @@ static void do_gro(struct sge_eth_rxq *rxq, const struct pkt_gl *gl,
1478 return; 1476 return;
1479 } 1477 }
1480 1478
1481 copy_frags(skb, gl, PKTSHIFT); 1479 copy_frags(skb, gl, s->pktshift);
1482 skb->len = gl->tot_len - PKTSHIFT; 1480 skb->len = gl->tot_len - s->pktshift;
1483 skb->data_len = skb->len; 1481 skb->data_len = skb->len;
1484 skb->truesize += skb->data_len; 1482 skb->truesize += skb->data_len;
1485 skb->ip_summed = CHECKSUM_UNNECESSARY; 1483 skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -1516,6 +1514,8 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1516 bool csum_ok = pkt->csum_calc && !pkt->err_vec && 1514 bool csum_ok = pkt->csum_calc && !pkt->err_vec &&
1517 (rspq->netdev->features & NETIF_F_RXCSUM); 1515 (rspq->netdev->features & NETIF_F_RXCSUM);
1518 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1516 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1517 struct adapter *adapter = rspq->adapter;
1518 struct sge *s = &adapter->sge;
1519 1519
1520 /* 1520 /*
1521 * If this is a good TCP packet and we have Generic Receive Offload 1521 * If this is a good TCP packet and we have Generic Receive Offload
@@ -1537,7 +1537,7 @@ int t4vf_ethrx_handler(struct sge_rspq *rspq, const __be64 *rsp,
1537 rxq->stats.rx_drops++; 1537 rxq->stats.rx_drops++;
1538 return 0; 1538 return 0;
1539 } 1539 }
1540 __skb_pull(skb, PKTSHIFT); 1540 __skb_pull(skb, s->pktshift);
1541 skb->protocol = eth_type_trans(skb, rspq->netdev); 1541 skb->protocol = eth_type_trans(skb, rspq->netdev);
1542 skb_record_rx_queue(skb, rspq->idx); 1542 skb_record_rx_queue(skb, rspq->idx);
1543 rxq->stats.pkts++; 1543 rxq->stats.pkts++;
@@ -1648,6 +1648,8 @@ static inline void rspq_next(struct sge_rspq *rspq)
1648static int process_responses(struct sge_rspq *rspq, int budget) 1648static int process_responses(struct sge_rspq *rspq, int budget)
1649{ 1649{
1650 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq); 1650 struct sge_eth_rxq *rxq = container_of(rspq, struct sge_eth_rxq, rspq);
1651 struct adapter *adapter = rspq->adapter;
1652 struct sge *s = &adapter->sge;
1651 int budget_left = budget; 1653 int budget_left = budget;
1652 1654
1653 while (likely(budget_left)) { 1655 while (likely(budget_left)) {
@@ -1697,7 +1699,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
1697 BUG_ON(frag >= MAX_SKB_FRAGS); 1699 BUG_ON(frag >= MAX_SKB_FRAGS);
1698 BUG_ON(rxq->fl.avail == 0); 1700 BUG_ON(rxq->fl.avail == 0);
1699 sdesc = &rxq->fl.sdesc[rxq->fl.cidx]; 1701 sdesc = &rxq->fl.sdesc[rxq->fl.cidx];
1700 bufsz = get_buf_size(sdesc); 1702 bufsz = get_buf_size(adapter, sdesc);
1701 fp->page = sdesc->page; 1703 fp->page = sdesc->page;
1702 fp->offset = rspq->offset; 1704 fp->offset = rspq->offset;
1703 fp->size = min(bufsz, len); 1705 fp->size = min(bufsz, len);
@@ -1726,7 +1728,7 @@ static int process_responses(struct sge_rspq *rspq, int budget)
1726 */ 1728 */
1727 ret = rspq->handler(rspq, rspq->cur_desc, &gl); 1729 ret = rspq->handler(rspq, rspq->cur_desc, &gl);
1728 if (likely(ret == 0)) 1730 if (likely(ret == 0))
1729 rspq->offset += ALIGN(fp->size, FL_ALIGN); 1731 rspq->offset += ALIGN(fp->size, s->fl_align);
1730 else 1732 else
1731 restore_rx_bufs(&gl, &rxq->fl, frag); 1733 restore_rx_bufs(&gl, &rxq->fl, frag);
1732 } else if (likely(rsp_type == RSP_TYPE_CPL)) { 1734 } else if (likely(rsp_type == RSP_TYPE_CPL)) {
@@ -1963,7 +1965,7 @@ static void sge_rx_timer_cb(unsigned long data)
1963 * schedule napi but the FL is no longer starving. 1965 * schedule napi but the FL is no longer starving.
1964 * No biggie. 1966 * No biggie.
1965 */ 1967 */
1966 if (fl_starving(fl)) { 1968 if (fl_starving(adapter, fl)) {
1967 struct sge_eth_rxq *rxq; 1969 struct sge_eth_rxq *rxq;
1968 1970
1969 rxq = container_of(fl, struct sge_eth_rxq, fl); 1971 rxq = container_of(fl, struct sge_eth_rxq, fl);
@@ -2047,6 +2049,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2047 int intr_dest, 2049 int intr_dest,
2048 struct sge_fl *fl, rspq_handler_t hnd) 2050 struct sge_fl *fl, rspq_handler_t hnd)
2049{ 2051{
2052 struct sge *s = &adapter->sge;
2050 struct port_info *pi = netdev_priv(dev); 2053 struct port_info *pi = netdev_priv(dev);
2051 struct fw_iq_cmd cmd, rpl; 2054 struct fw_iq_cmd cmd, rpl;
2052 int ret, iqandst, flsz = 0; 2055 int ret, iqandst, flsz = 0;
@@ -2117,7 +2120,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2117 fl->size = roundup(fl->size, FL_PER_EQ_UNIT); 2120 fl->size = roundup(fl->size, FL_PER_EQ_UNIT);
2118 fl->desc = alloc_ring(adapter->pdev_dev, fl->size, 2121 fl->desc = alloc_ring(adapter->pdev_dev, fl->size,
2119 sizeof(__be64), sizeof(struct rx_sw_desc), 2122 sizeof(__be64), sizeof(struct rx_sw_desc),
2120 &fl->addr, &fl->sdesc, STAT_LEN); 2123 &fl->addr, &fl->sdesc, s->stat_len);
2121 if (!fl->desc) { 2124 if (!fl->desc) {
2122 ret = -ENOMEM; 2125 ret = -ENOMEM;
2123 goto err; 2126 goto err;
@@ -2129,7 +2132,7 @@ int t4vf_sge_alloc_rxq(struct adapter *adapter, struct sge_rspq *rspq,
2129 * free list ring) in Egress Queue Units. 2132 * free list ring) in Egress Queue Units.
2130 */ 2133 */
2131 flsz = (fl->size / FL_PER_EQ_UNIT + 2134 flsz = (fl->size / FL_PER_EQ_UNIT +
2132 STAT_LEN / EQ_UNIT); 2135 s->stat_len / EQ_UNIT);
2133 2136
2134 /* 2137 /*
2135 * Fill in all the relevant firmware Ingress Queue Command 2138 * Fill in all the relevant firmware Ingress Queue Command
@@ -2217,6 +2220,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2217 struct net_device *dev, struct netdev_queue *devq, 2220 struct net_device *dev, struct netdev_queue *devq,
2218 unsigned int iqid) 2221 unsigned int iqid)
2219{ 2222{
2223 struct sge *s = &adapter->sge;
2220 int ret, nentries; 2224 int ret, nentries;
2221 struct fw_eq_eth_cmd cmd, rpl; 2225 struct fw_eq_eth_cmd cmd, rpl;
2222 struct port_info *pi = netdev_priv(dev); 2226 struct port_info *pi = netdev_priv(dev);
@@ -2225,7 +2229,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2225 * Calculate the size of the hardware TX Queue (including the Status 2229 * Calculate the size of the hardware TX Queue (including the Status
2226 * Page on the end of the TX Queue) in units of TX Descriptors. 2230 * Page on the end of the TX Queue) in units of TX Descriptors.
2227 */ 2231 */
2228 nentries = txq->q.size + STAT_LEN / sizeof(struct tx_desc); 2232 nentries = txq->q.size + s->stat_len / sizeof(struct tx_desc);
2229 2233
2230 /* 2234 /*
2231 * Allocate the hardware ring for the TX ring (with space for its 2235 * Allocate the hardware ring for the TX ring (with space for its
@@ -2234,7 +2238,7 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2234 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size, 2238 txq->q.desc = alloc_ring(adapter->pdev_dev, txq->q.size,
2235 sizeof(struct tx_desc), 2239 sizeof(struct tx_desc),
2236 sizeof(struct tx_sw_desc), 2240 sizeof(struct tx_sw_desc),
2237 &txq->q.phys_addr, &txq->q.sdesc, STAT_LEN); 2241 &txq->q.phys_addr, &txq->q.sdesc, s->stat_len);
2238 if (!txq->q.desc) 2242 if (!txq->q.desc)
2239 return -ENOMEM; 2243 return -ENOMEM;
2240 2244
@@ -2307,8 +2311,10 @@ int t4vf_sge_alloc_eth_txq(struct adapter *adapter, struct sge_eth_txq *txq,
2307 */ 2311 */
2308static void free_txq(struct adapter *adapter, struct sge_txq *tq) 2312static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2309{ 2313{
2314 struct sge *s = &adapter->sge;
2315
2310 dma_free_coherent(adapter->pdev_dev, 2316 dma_free_coherent(adapter->pdev_dev,
2311 tq->size * sizeof(*tq->desc) + STAT_LEN, 2317 tq->size * sizeof(*tq->desc) + s->stat_len,
2312 tq->desc, tq->phys_addr); 2318 tq->desc, tq->phys_addr);
2313 tq->cntxt_id = 0; 2319 tq->cntxt_id = 0;
2314 tq->sdesc = NULL; 2320 tq->sdesc = NULL;
@@ -2322,6 +2328,7 @@ static void free_txq(struct adapter *adapter, struct sge_txq *tq)
2322static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq, 2328static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2323 struct sge_fl *fl) 2329 struct sge_fl *fl)
2324{ 2330{
2331 struct sge *s = &adapter->sge;
2325 unsigned int flid = fl ? fl->cntxt_id : 0xffff; 2332 unsigned int flid = fl ? fl->cntxt_id : 0xffff;
2326 2333
2327 t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP, 2334 t4vf_iq_free(adapter, FW_IQ_TYPE_FL_INT_CAP,
@@ -2337,7 +2344,7 @@ static void free_rspq_fl(struct adapter *adapter, struct sge_rspq *rspq,
2337 if (fl) { 2344 if (fl) {
2338 free_rx_bufs(adapter, fl, fl->avail); 2345 free_rx_bufs(adapter, fl, fl->avail);
2339 dma_free_coherent(adapter->pdev_dev, 2346 dma_free_coherent(adapter->pdev_dev,
2340 fl->size * sizeof(*fl->desc) + STAT_LEN, 2347 fl->size * sizeof(*fl->desc) + s->stat_len,
2341 fl->desc, fl->addr); 2348 fl->desc, fl->addr);
2342 kfree(fl->sdesc); 2349 kfree(fl->sdesc);
2343 fl->sdesc = NULL; 2350 fl->sdesc = NULL;
@@ -2423,6 +2430,7 @@ int t4vf_sge_init(struct adapter *adapter)
2423 u32 fl0 = sge_params->sge_fl_buffer_size[0]; 2430 u32 fl0 = sge_params->sge_fl_buffer_size[0];
2424 u32 fl1 = sge_params->sge_fl_buffer_size[1]; 2431 u32 fl1 = sge_params->sge_fl_buffer_size[1];
2425 struct sge *s = &adapter->sge; 2432 struct sge *s = &adapter->sge;
2433 unsigned int ingpadboundary, ingpackboundary;
2426 2434
2427 /* 2435 /*
2428 * Start by vetting the basic SGE parameters which have been set up by 2436 * Start by vetting the basic SGE parameters which have been set up by
@@ -2443,12 +2451,48 @@ int t4vf_sge_init(struct adapter *adapter)
2443 * Now translate the adapter parameters into our internal forms. 2451 * Now translate the adapter parameters into our internal forms.
2444 */ 2452 */
2445 if (fl1) 2453 if (fl1)
2446 FL_PG_ORDER = ilog2(fl1) - PAGE_SHIFT; 2454 s->fl_pg_order = ilog2(fl1) - PAGE_SHIFT;
2447 STAT_LEN = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK) 2455 s->stat_len = ((sge_params->sge_control & EGRSTATUSPAGESIZE_MASK)
2448 ? 128 : 64); 2456 ? 128 : 64);
2449 PKTSHIFT = PKTSHIFT_GET(sge_params->sge_control); 2457 s->pktshift = PKTSHIFT_GET(sge_params->sge_control);
2450 FL_ALIGN = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) + 2458
2451 SGE_INGPADBOUNDARY_SHIFT); 2459 /* T4 uses a single control field to specify both the PCIe Padding and
2460 * Packing Boundary. T5 introduced the ability to specify these
2461 * separately. The actual Ingress Packet Data alignment boundary
2462 * within Packed Buffer Mode is the maximum of these two
2463 * specifications. (Note that it makes no real practical sense to
2464 * have the Pading Boudary be larger than the Packing Boundary but you
2465 * could set the chip up that way and, in fact, legacy T4 code would
2466 * end doing this because it would initialize the Padding Boundary and
2467 * leave the Packing Boundary initialized to 0 (16 bytes).)
2468 */
2469 ingpadboundary = 1 << (INGPADBOUNDARY_GET(sge_params->sge_control) +
2470 X_INGPADBOUNDARY_SHIFT);
2471 if (is_t4(adapter->params.chip)) {
2472 s->fl_align = ingpadboundary;
2473 } else {
2474 /* T5 has a different interpretation of one of the PCIe Packing
2475 * Boundary values.
2476 */
2477 ingpackboundary = INGPACKBOUNDARY_G(sge_params->sge_control2);
2478 if (ingpackboundary == INGPACKBOUNDARY_16B_X)
2479 ingpackboundary = 16;
2480 else
2481 ingpackboundary = 1 << (ingpackboundary +
2482 INGPACKBOUNDARY_SHIFT_X);
2483
2484 s->fl_align = max(ingpadboundary, ingpackboundary);
2485 }
2486
2487 /* A FL with <= fl_starve_thres buffers is starving and a periodic
2488 * timer will attempt to refill it. This needs to be larger than the
2489 * SGE's Egress Congestion Threshold. If it isn't, then we can get
2490 * stuck waiting for new packets while the SGE is waiting for us to
2491 * give it more Free List entries. (Note that the SGE's Egress
2492 * Congestion Threshold is in units of 2 Free List pointers.)
2493 */
2494 s->fl_starve_thres
2495 = EGRTHRESHOLD_GET(sge_params->sge_congestion_control)*2 + 1;
2452 2496
2453 /* 2497 /*
2454 * Set up tasklet timers. 2498 * Set up tasklet timers.
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
index 95df61dcb4ce..4b6a6d14d86d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_common.h
@@ -134,11 +134,13 @@ struct dev_params {
134 */ 134 */
135struct sge_params { 135struct sge_params {
136 u32 sge_control; /* padding, boundaries, lengths, etc. */ 136 u32 sge_control; /* padding, boundaries, lengths, etc. */
137 u32 sge_control2; /* T5: more of the same */
137 u32 sge_host_page_size; /* RDMA page sizes */ 138 u32 sge_host_page_size; /* RDMA page sizes */
138 u32 sge_queues_per_page; /* RDMA queues/page */ 139 u32 sge_queues_per_page; /* RDMA queues/page */
139 u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */ 140 u32 sge_user_mode_limits; /* limits for BAR2 user mode accesses */
140 u32 sge_fl_buffer_size[16]; /* free list buffer sizes */ 141 u32 sge_fl_buffer_size[16]; /* free list buffer sizes */
141 u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */ 142 u32 sge_ingress_rx_threshold; /* RX counter interrupt threshold[4] */
143 u32 sge_congestion_control; /* congestion thresholds, etc. */
142 u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */ 144 u32 sge_timer_value_0_and_1; /* interrupt coalescing timer values */
143 u32 sge_timer_value_2_and_3; 145 u32 sge_timer_value_2_and_3;
144 u32 sge_timer_value_4_and_5; 146 u32 sge_timer_value_4_and_5;
diff --git a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
index e984fdc48ba2..1e896b923234 100644
--- a/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4vf/t4vf_hw.c
@@ -468,12 +468,38 @@ int t4vf_get_sge_params(struct adapter *adapter)
468 sge_params->sge_timer_value_2_and_3 = vals[5]; 468 sge_params->sge_timer_value_2_and_3 = vals[5];
469 sge_params->sge_timer_value_4_and_5 = vals[6]; 469 sge_params->sge_timer_value_4_and_5 = vals[6];
470 470
471 /* T4 uses a single control field to specify both the PCIe Padding and
472 * Packing Boundary. T5 introduced the ability to specify these
473 * separately with the Padding Boundary in SGE_CONTROL and and Packing
474 * Boundary in SGE_CONTROL2. So for T5 and later we need to grab
475 * SGE_CONTROL in order to determine how ingress packet data will be
476 * laid out in Packed Buffer Mode. Unfortunately, older versions of
477 * the firmware won't let us retrieve SGE_CONTROL2 so if we get a
478 * failure grabbing it we throw an error since we can't figure out the
479 * right value.
480 */
481 if (!is_t4(adapter->params.chip)) {
482 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
483 FW_PARAMS_PARAM_XYZ(SGE_CONTROL2_A));
484 v = t4vf_query_params(adapter, 1, params, vals);
485 if (v != FW_SUCCESS) {
486 dev_err(adapter->pdev_dev,
487 "Unable to get SGE Control2; "
488 "probably old firmware.\n");
489 return v;
490 }
491 sge_params->sge_control2 = vals[0];
492 }
493
471 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) | 494 params[0] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
472 FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD)); 495 FW_PARAMS_PARAM_XYZ(SGE_INGRESS_RX_THRESHOLD));
473 v = t4vf_query_params(adapter, 1, params, vals); 496 params[1] = (FW_PARAMS_MNEM(FW_PARAMS_MNEM_REG) |
497 FW_PARAMS_PARAM_XYZ(SGE_CONM_CTRL));
498 v = t4vf_query_params(adapter, 2, params, vals);
474 if (v) 499 if (v)
475 return v; 500 return v;
476 sge_params->sge_ingress_rx_threshold = vals[0]; 501 sge_params->sge_ingress_rx_threshold = vals[0];
502 sge_params->sge_congestion_control = vals[1];
477 503
478 return 0; 504 return 0;
479} 505}
diff --git a/drivers/net/ethernet/cisco/enic/enic_clsf.c b/drivers/net/ethernet/cisco/enic/enic_clsf.c
index 69dfd3c9e529..0be6850be8a2 100644
--- a/drivers/net/ethernet/cisco/enic/enic_clsf.c
+++ b/drivers/net/ethernet/cisco/enic/enic_clsf.c
@@ -86,7 +86,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
86 int i; 86 int i;
87 87
88 enic_rfs_timer_stop(enic); 88 enic_rfs_timer_stop(enic);
89 spin_lock(&enic->rfs_h.lock); 89 spin_lock_bh(&enic->rfs_h.lock);
90 enic->rfs_h.free = 0; 90 enic->rfs_h.free = 0;
91 for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) { 91 for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) {
92 struct hlist_head *hhead; 92 struct hlist_head *hhead;
@@ -100,7 +100,7 @@ void enic_rfs_flw_tbl_free(struct enic *enic)
100 kfree(n); 100 kfree(n);
101 } 101 }
102 } 102 }
103 spin_unlock(&enic->rfs_h.lock); 103 spin_unlock_bh(&enic->rfs_h.lock);
104} 104}
105 105
106struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id) 106struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id)
@@ -128,7 +128,7 @@ void enic_flow_may_expire(unsigned long data)
128 bool res; 128 bool res;
129 int j; 129 int j;
130 130
131 spin_lock(&enic->rfs_h.lock); 131 spin_lock_bh(&enic->rfs_h.lock);
132 for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) { 132 for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) {
133 struct hlist_head *hhead; 133 struct hlist_head *hhead;
134 struct hlist_node *tmp; 134 struct hlist_node *tmp;
@@ -148,7 +148,7 @@ void enic_flow_may_expire(unsigned long data)
148 } 148 }
149 } 149 }
150 } 150 }
151 spin_unlock(&enic->rfs_h.lock); 151 spin_unlock_bh(&enic->rfs_h.lock);
152 mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4); 152 mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4);
153} 153}
154 154
@@ -183,7 +183,7 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
183 return -EPROTONOSUPPORT; 183 return -EPROTONOSUPPORT;
184 184
185 tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK; 185 tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK;
186 spin_lock(&enic->rfs_h.lock); 186 spin_lock_bh(&enic->rfs_h.lock);
187 n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys); 187 n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys);
188 188
189 if (n) { /* entry already present */ 189 if (n) { /* entry already present */
@@ -277,7 +277,7 @@ int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb,
277 } 277 }
278 278
279ret_unlock: 279ret_unlock:
280 spin_unlock(&enic->rfs_h.lock); 280 spin_unlock_bh(&enic->rfs_h.lock);
281 return res; 281 return res;
282} 282}
283 283
diff --git a/drivers/net/ethernet/cisco/enic/enic_main.c b/drivers/net/ethernet/cisco/enic/enic_main.c
index 929bfe70080a..73cf1653a4a3 100644
--- a/drivers/net/ethernet/cisco/enic/enic_main.c
+++ b/drivers/net/ethernet/cisco/enic/enic_main.c
@@ -940,18 +940,8 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
940 struct vnic_rq_buf *buf = rq->to_use; 940 struct vnic_rq_buf *buf = rq->to_use;
941 941
942 if (buf->os_buf) { 942 if (buf->os_buf) {
943 buf = buf->next; 943 enic_queue_rq_desc(rq, buf->os_buf, os_buf_index, buf->dma_addr,
944 rq->to_use = buf; 944 buf->len);
945 rq->ring.desc_avail--;
946 if ((buf->index & VNIC_RQ_RETURN_RATE) == 0) {
947 /* Adding write memory barrier prevents compiler and/or
948 * CPU reordering, thus avoiding descriptor posting
949 * before descriptor is initialized. Otherwise, hardware
950 * can read stale descriptor fields.
951 */
952 wmb();
953 iowrite32(buf->index, &rq->ctrl->posted_index);
954 }
955 945
956 return 0; 946 return 0;
957 } 947 }
@@ -1037,7 +1027,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1037 enic->rq_truncated_pkts++; 1027 enic->rq_truncated_pkts++;
1038 } 1028 }
1039 1029
1030 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
1031 PCI_DMA_FROMDEVICE);
1040 dev_kfree_skb_any(skb); 1032 dev_kfree_skb_any(skb);
1033 buf->os_buf = NULL;
1041 1034
1042 return; 1035 return;
1043 } 1036 }
@@ -1088,7 +1081,10 @@ static void enic_rq_indicate_buf(struct vnic_rq *rq,
1088 /* Buffer overflow 1081 /* Buffer overflow
1089 */ 1082 */
1090 1083
1084 pci_unmap_single(enic->pdev, buf->dma_addr, buf->len,
1085 PCI_DMA_FROMDEVICE);
1091 dev_kfree_skb_any(skb); 1086 dev_kfree_skb_any(skb);
1087 buf->os_buf = NULL;
1092 } 1088 }
1093} 1089}
1094 1090
@@ -1674,13 +1670,13 @@ static int enic_stop(struct net_device *netdev)
1674 1670
1675 enic_dev_disable(enic); 1671 enic_dev_disable(enic);
1676 1672
1677 local_bh_disable();
1678 for (i = 0; i < enic->rq_count; i++) { 1673 for (i = 0; i < enic->rq_count; i++) {
1679 napi_disable(&enic->napi[i]); 1674 napi_disable(&enic->napi[i]);
1675 local_bh_disable();
1680 while (!enic_poll_lock_napi(&enic->rq[i])) 1676 while (!enic_poll_lock_napi(&enic->rq[i]))
1681 mdelay(1); 1677 mdelay(1);
1678 local_bh_enable();
1682 } 1679 }
1683 local_bh_enable();
1684 1680
1685 netif_carrier_off(netdev); 1681 netif_carrier_off(netdev);
1686 netif_tx_disable(netdev); 1682 netif_tx_disable(netdev);
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 9a18e7930b31..597c463e384d 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -4309,11 +4309,16 @@ static int be_ndo_bridge_setlink(struct net_device *dev, struct nlmsghdr *nlh)
4309 return -EOPNOTSUPP; 4309 return -EOPNOTSUPP;
4310 4310
4311 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 4311 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
4312 if (!br_spec)
4313 return -EINVAL;
4312 4314
4313 nla_for_each_nested(attr, br_spec, rem) { 4315 nla_for_each_nested(attr, br_spec, rem) {
4314 if (nla_type(attr) != IFLA_BRIDGE_MODE) 4316 if (nla_type(attr) != IFLA_BRIDGE_MODE)
4315 continue; 4317 continue;
4316 4318
4319 if (nla_len(attr) < sizeof(mode))
4320 return -EINVAL;
4321
4317 mode = nla_get_u16(attr); 4322 mode = nla_get_u16(attr);
4318 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB) 4323 if (mode != BRIDGE_MODE_VEPA && mode != BRIDGE_MODE_VEB)
4319 return -EINVAL; 4324 return -EINVAL;
@@ -4421,6 +4426,11 @@ static void be_del_vxlan_port(struct net_device *netdev, sa_family_t sa_family,
4421 "Disabled VxLAN offloads for UDP port %d\n", 4426 "Disabled VxLAN offloads for UDP port %d\n",
4422 be16_to_cpu(port)); 4427 be16_to_cpu(port));
4423} 4428}
4429
4430static bool be_gso_check(struct sk_buff *skb, struct net_device *dev)
4431{
4432 return vxlan_gso_check(skb);
4433}
4424#endif 4434#endif
4425 4435
4426static const struct net_device_ops be_netdev_ops = { 4436static const struct net_device_ops be_netdev_ops = {
@@ -4450,6 +4460,7 @@ static const struct net_device_ops be_netdev_ops = {
4450#ifdef CONFIG_BE2NET_VXLAN 4460#ifdef CONFIG_BE2NET_VXLAN
4451 .ndo_add_vxlan_port = be_add_vxlan_port, 4461 .ndo_add_vxlan_port = be_add_vxlan_port,
4452 .ndo_del_vxlan_port = be_del_vxlan_port, 4462 .ndo_del_vxlan_port = be_del_vxlan_port,
4463 .ndo_gso_check = be_gso_check,
4453#endif 4464#endif
4454}; 4465};
4455 4466
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 81b96cf87574..3dca494797bd 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -298,6 +298,16 @@ static void *swap_buffer(void *bufaddr, int len)
298 return bufaddr; 298 return bufaddr;
299} 299}
300 300
301static void swap_buffer2(void *dst_buf, void *src_buf, int len)
302{
303 int i;
304 unsigned int *src = src_buf;
305 unsigned int *dst = dst_buf;
306
307 for (i = 0; i < len; i += 4, src++, dst++)
308 *dst = swab32p(src);
309}
310
301static void fec_dump(struct net_device *ndev) 311static void fec_dump(struct net_device *ndev)
302{ 312{
303 struct fec_enet_private *fep = netdev_priv(ndev); 313 struct fec_enet_private *fep = netdev_priv(ndev);
@@ -1307,7 +1317,7 @@ fec_enet_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct sk_buff
1307} 1317}
1308 1318
1309static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb, 1319static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1310 struct bufdesc *bdp, u32 length) 1320 struct bufdesc *bdp, u32 length, bool swap)
1311{ 1321{
1312 struct fec_enet_private *fep = netdev_priv(ndev); 1322 struct fec_enet_private *fep = netdev_priv(ndev);
1313 struct sk_buff *new_skb; 1323 struct sk_buff *new_skb;
@@ -1322,7 +1332,10 @@ static bool fec_enet_copybreak(struct net_device *ndev, struct sk_buff **skb,
1322 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr, 1332 dma_sync_single_for_cpu(&fep->pdev->dev, bdp->cbd_bufaddr,
1323 FEC_ENET_RX_FRSIZE - fep->rx_align, 1333 FEC_ENET_RX_FRSIZE - fep->rx_align,
1324 DMA_FROM_DEVICE); 1334 DMA_FROM_DEVICE);
1325 memcpy(new_skb->data, (*skb)->data, length); 1335 if (!swap)
1336 memcpy(new_skb->data, (*skb)->data, length);
1337 else
1338 swap_buffer2(new_skb->data, (*skb)->data, length);
1326 *skb = new_skb; 1339 *skb = new_skb;
1327 1340
1328 return true; 1341 return true;
@@ -1352,6 +1365,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1352 u16 vlan_tag; 1365 u16 vlan_tag;
1353 int index = 0; 1366 int index = 0;
1354 bool is_copybreak; 1367 bool is_copybreak;
1368 bool need_swap = id_entry->driver_data & FEC_QUIRK_SWAP_FRAME;
1355 1369
1356#ifdef CONFIG_M532x 1370#ifdef CONFIG_M532x
1357 flush_cache_all(); 1371 flush_cache_all();
@@ -1415,7 +1429,8 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1415 * include that when passing upstream as it messes up 1429 * include that when passing upstream as it messes up
1416 * bridging applications. 1430 * bridging applications.
1417 */ 1431 */
1418 is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4); 1432 is_copybreak = fec_enet_copybreak(ndev, &skb, bdp, pkt_len - 4,
1433 need_swap);
1419 if (!is_copybreak) { 1434 if (!is_copybreak) {
1420 skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE); 1435 skb_new = netdev_alloc_skb(ndev, FEC_ENET_RX_FRSIZE);
1421 if (unlikely(!skb_new)) { 1436 if (unlikely(!skb_new)) {
@@ -1430,7 +1445,7 @@ fec_enet_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
1430 prefetch(skb->data - NET_IP_ALIGN); 1445 prefetch(skb->data - NET_IP_ALIGN);
1431 skb_put(skb, pkt_len - 4); 1446 skb_put(skb, pkt_len - 4);
1432 data = skb->data; 1447 data = skb->data;
1433 if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME) 1448 if (!is_copybreak && need_swap)
1434 swap_buffer(data, pkt_len); 1449 swap_buffer(data, pkt_len);
1435 1450
1436 /* Extract the enhanced buffer descriptor */ 1451 /* Extract the enhanced buffer descriptor */
@@ -1581,7 +1596,8 @@ fec_enet_interrupt(int irq, void *dev_id)
1581 complete(&fep->mdio_done); 1596 complete(&fep->mdio_done);
1582 } 1597 }
1583 1598
1584 fec_ptp_check_pps_event(fep); 1599 if (fep->ptp_clock)
1600 fec_ptp_check_pps_event(fep);
1585 1601
1586 return ret; 1602 return ret;
1587} 1603}
@@ -3342,12 +3358,11 @@ static int __maybe_unused fec_suspend(struct device *dev)
3342 netif_device_detach(ndev); 3358 netif_device_detach(ndev);
3343 netif_tx_unlock_bh(ndev); 3359 netif_tx_unlock_bh(ndev);
3344 fec_stop(ndev); 3360 fec_stop(ndev);
3361 fec_enet_clk_enable(ndev, false);
3362 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3345 } 3363 }
3346 rtnl_unlock(); 3364 rtnl_unlock();
3347 3365
3348 fec_enet_clk_enable(ndev, false);
3349 pinctrl_pm_select_sleep_state(&fep->pdev->dev);
3350
3351 if (fep->reg_phy) 3366 if (fep->reg_phy)
3352 regulator_disable(fep->reg_phy); 3367 regulator_disable(fep->reg_phy);
3353 3368
@@ -3366,13 +3381,14 @@ static int __maybe_unused fec_resume(struct device *dev)
3366 return ret; 3381 return ret;
3367 } 3382 }
3368 3383
3369 pinctrl_pm_select_default_state(&fep->pdev->dev);
3370 ret = fec_enet_clk_enable(ndev, true);
3371 if (ret)
3372 goto failed_clk;
3373
3374 rtnl_lock(); 3384 rtnl_lock();
3375 if (netif_running(ndev)) { 3385 if (netif_running(ndev)) {
3386 pinctrl_pm_select_default_state(&fep->pdev->dev);
3387 ret = fec_enet_clk_enable(ndev, true);
3388 if (ret) {
3389 rtnl_unlock();
3390 goto failed_clk;
3391 }
3376 fec_restart(ndev); 3392 fec_restart(ndev);
3377 netif_tx_lock_bh(ndev); 3393 netif_tx_lock_bh(ndev);
3378 netif_device_attach(ndev); 3394 netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index 3d4e08be1709..b34214e2df5f 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -341,6 +341,9 @@ static void restart(struct net_device *dev)
341 FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */ 341 FC(fecp, x_cntrl, FEC_TCNTRL_FDEN); /* FD disable */
342 } 342 }
343 343
344 /* Restore multicast and promiscuous settings */
345 set_multicast_list(dev);
346
344 /* 347 /*
345 * Enable interrupts we wish to service. 348 * Enable interrupts we wish to service.
346 */ 349 */
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
index f30411f0701f..7a184e8816a4 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-scc.c
@@ -355,6 +355,9 @@ static void restart(struct net_device *dev)
355 if (fep->phydev->duplex) 355 if (fep->phydev->duplex)
356 S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE); 356 S16(sccp, scc_psmr, SCC_PSMR_LPB | SCC_PSMR_FDE);
357 357
358 /* Restore multicast and promiscuous settings */
359 set_multicast_list(dev);
360
358 S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); 361 S32(sccp, scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT);
359} 362}
360 363
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 5f6aded512f5..24f3986cfae2 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -1075,7 +1075,10 @@ static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1075 NETIF_F_HW_CSUM | 1075 NETIF_F_HW_CSUM |
1076 NETIF_F_SG); 1076 NETIF_F_SG);
1077 1077
1078 netdev->priv_flags |= IFF_UNICAST_FLT; 1078 /* Do not set IFF_UNICAST_FLT for VMWare's 82545EM */
1079 if (hw->device_id != E1000_DEV_ID_82545EM_COPPER ||
1080 hw->subsystem_vendor_id != PCI_VENDOR_ID_VMWARE)
1081 netdev->priv_flags |= IFF_UNICAST_FLT;
1079 1082
1080 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw); 1083 adapter->en_mng_pt = e1000_enable_mng_pass_thru(hw);
1081 1084
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index ed5f1c15fb0f..c3a7f4a4b775 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -6151,7 +6151,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
6151 I40E_GL_MDET_TX_PF_NUM_SHIFT; 6151 I40E_GL_MDET_TX_PF_NUM_SHIFT;
6152 u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >> 6152 u8 vf_num = (reg & I40E_GL_MDET_TX_VF_NUM_MASK) >>
6153 I40E_GL_MDET_TX_VF_NUM_SHIFT; 6153 I40E_GL_MDET_TX_VF_NUM_SHIFT;
6154 u8 event = (reg & I40E_GL_MDET_TX_EVENT_SHIFT) >> 6154 u8 event = (reg & I40E_GL_MDET_TX_EVENT_MASK) >>
6155 I40E_GL_MDET_TX_EVENT_SHIFT; 6155 I40E_GL_MDET_TX_EVENT_SHIFT;
6156 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >> 6156 u8 queue = (reg & I40E_GL_MDET_TX_QUEUE_MASK) >>
6157 I40E_GL_MDET_TX_QUEUE_SHIFT; 6157 I40E_GL_MDET_TX_QUEUE_SHIFT;
@@ -6165,7 +6165,7 @@ static void i40e_handle_mdd_event(struct i40e_pf *pf)
6165 if (reg & I40E_GL_MDET_RX_VALID_MASK) { 6165 if (reg & I40E_GL_MDET_RX_VALID_MASK) {
6166 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >> 6166 u8 func = (reg & I40E_GL_MDET_RX_FUNCTION_MASK) >>
6167 I40E_GL_MDET_RX_FUNCTION_SHIFT; 6167 I40E_GL_MDET_RX_FUNCTION_SHIFT;
6168 u8 event = (reg & I40E_GL_MDET_RX_EVENT_SHIFT) >> 6168 u8 event = (reg & I40E_GL_MDET_RX_EVENT_MASK) >>
6169 I40E_GL_MDET_RX_EVENT_SHIFT; 6169 I40E_GL_MDET_RX_EVENT_SHIFT;
6170 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >> 6170 u8 queue = (reg & I40E_GL_MDET_RX_QUEUE_MASK) >>
6171 I40E_GL_MDET_RX_QUEUE_SHIFT; 6171 I40E_GL_MDET_RX_QUEUE_SHIFT;
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index a21b14495ebd..487cd9c4ac0d 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -1012,7 +1012,8 @@ static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
1012 /* igb_get_stats64() might access the rings on this vector, 1012 /* igb_get_stats64() might access the rings on this vector,
1013 * we must wait a grace period before freeing it. 1013 * we must wait a grace period before freeing it.
1014 */ 1014 */
1015 kfree_rcu(q_vector, rcu); 1015 if (q_vector)
1016 kfree_rcu(q_vector, rcu);
1016} 1017}
1017 1018
1018/** 1019/**
@@ -1792,8 +1793,10 @@ void igb_down(struct igb_adapter *adapter)
1792 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE; 1793 adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
1793 1794
1794 for (i = 0; i < adapter->num_q_vectors; i++) { 1795 for (i = 0; i < adapter->num_q_vectors; i++) {
1795 napi_synchronize(&(adapter->q_vector[i]->napi)); 1796 if (adapter->q_vector[i]) {
1796 napi_disable(&(adapter->q_vector[i]->napi)); 1797 napi_synchronize(&adapter->q_vector[i]->napi);
1798 napi_disable(&adapter->q_vector[i]->napi);
1799 }
1797 } 1800 }
1798 1801
1799 1802
@@ -3717,7 +3720,8 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
3717 int i; 3720 int i;
3718 3721
3719 for (i = 0; i < adapter->num_tx_queues; i++) 3722 for (i = 0; i < adapter->num_tx_queues; i++)
3720 igb_free_tx_resources(adapter->tx_ring[i]); 3723 if (adapter->tx_ring[i])
3724 igb_free_tx_resources(adapter->tx_ring[i]);
3721} 3725}
3722 3726
3723void igb_unmap_and_free_tx_resource(struct igb_ring *ring, 3727void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
@@ -3782,7 +3786,8 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
3782 int i; 3786 int i;
3783 3787
3784 for (i = 0; i < adapter->num_tx_queues; i++) 3788 for (i = 0; i < adapter->num_tx_queues; i++)
3785 igb_clean_tx_ring(adapter->tx_ring[i]); 3789 if (adapter->tx_ring[i])
3790 igb_clean_tx_ring(adapter->tx_ring[i]);
3786} 3791}
3787 3792
3788/** 3793/**
@@ -3819,7 +3824,8 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
3819 int i; 3824 int i;
3820 3825
3821 for (i = 0; i < adapter->num_rx_queues; i++) 3826 for (i = 0; i < adapter->num_rx_queues; i++)
3822 igb_free_rx_resources(adapter->rx_ring[i]); 3827 if (adapter->rx_ring[i])
3828 igb_free_rx_resources(adapter->rx_ring[i]);
3823} 3829}
3824 3830
3825/** 3831/**
@@ -3874,7 +3880,8 @@ static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
3874 int i; 3880 int i;
3875 3881
3876 for (i = 0; i < adapter->num_rx_queues; i++) 3882 for (i = 0; i < adapter->num_rx_queues; i++)
3877 igb_clean_rx_ring(adapter->rx_ring[i]); 3883 if (adapter->rx_ring[i])
3884 igb_clean_rx_ring(adapter->rx_ring[i]);
3878} 3885}
3879 3886
3880/** 3887/**
@@ -6537,6 +6544,9 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
6537 if (unlikely(page_to_nid(page) != numa_node_id())) 6544 if (unlikely(page_to_nid(page) != numa_node_id()))
6538 return false; 6545 return false;
6539 6546
6547 if (unlikely(page->pfmemalloc))
6548 return false;
6549
6540#if (PAGE_SIZE < 8192) 6550#if (PAGE_SIZE < 8192)
6541 /* if we are only owner of page we can reuse it */ 6551 /* if we are only owner of page we can reuse it */
6542 if (unlikely(page_count(page) != 1)) 6552 if (unlikely(page_count(page) != 1))
@@ -6603,7 +6613,8 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
6603 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); 6613 memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
6604 6614
6605 /* we can reuse buffer as-is, just make sure it is local */ 6615 /* we can reuse buffer as-is, just make sure it is local */
6606 if (likely(page_to_nid(page) == numa_node_id())) 6616 if (likely((page_to_nid(page) == numa_node_id()) &&
6617 !page->pfmemalloc))
6607 return true; 6618 return true;
6608 6619
6609 /* this page cannot be reused so discard it */ 6620 /* this page cannot be reused so discard it */
@@ -7400,6 +7411,8 @@ static int igb_resume(struct device *dev)
7400 pci_restore_state(pdev); 7411 pci_restore_state(pdev);
7401 pci_save_state(pdev); 7412 pci_save_state(pdev);
7402 7413
7414 if (!pci_device_is_present(pdev))
7415 return -ENODEV;
7403 err = pci_enable_device_mem(pdev); 7416 err = pci_enable_device_mem(pdev);
7404 if (err) { 7417 if (err) {
7405 dev_err(&pdev->dev, 7418 dev_err(&pdev->dev,
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
index 3ce4a258f945..0ae038b9af90 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c
@@ -342,12 +342,16 @@ static int ixgbe_set_settings(struct net_device *netdev,
342 if (old == advertised) 342 if (old == advertised)
343 return err; 343 return err;
344 /* this sets the link speed and restarts auto-neg */ 344 /* this sets the link speed and restarts auto-neg */
345 while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state))
346 usleep_range(1000, 2000);
347
345 hw->mac.autotry_restart = true; 348 hw->mac.autotry_restart = true;
346 err = hw->mac.ops.setup_link(hw, advertised, true); 349 err = hw->mac.ops.setup_link(hw, advertised, true);
347 if (err) { 350 if (err) {
348 e_info(probe, "setup link failed with code %d\n", err); 351 e_info(probe, "setup link failed with code %d\n", err);
349 hw->mac.ops.setup_link(hw, old, true); 352 hw->mac.ops.setup_link(hw, old, true);
350 } 353 }
354 clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state);
351 } else { 355 } else {
352 /* in this case we currently only support 10Gb/FULL */ 356 /* in this case we currently only support 10Gb/FULL */
353 u32 speed = ethtool_cmd_speed(ecmd); 357 u32 speed = ethtool_cmd_speed(ecmd);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index fec5212d4337..cc51554c9e99 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -3936,8 +3936,8 @@ void ixgbe_set_rx_mode(struct net_device *netdev)
3936 * if SR-IOV and VMDQ are disabled - otherwise ensure 3936 * if SR-IOV and VMDQ are disabled - otherwise ensure
3937 * that hardware VLAN filters remain enabled. 3937 * that hardware VLAN filters remain enabled.
3938 */ 3938 */
3939 if (!(adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | 3939 if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED |
3940 IXGBE_FLAG_SRIOV_ENABLED))) 3940 IXGBE_FLAG_SRIOV_ENABLED))
3941 vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); 3941 vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN);
3942 } else { 3942 } else {
3943 if (netdev->flags & IFF_ALLMULTI) { 3943 if (netdev->flags & IFF_ALLMULTI) {
@@ -4321,8 +4321,8 @@ static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
4321 IXGBE_CB(skb)->page_released = false; 4321 IXGBE_CB(skb)->page_released = false;
4322 } 4322 }
4323 dev_kfree_skb(skb); 4323 dev_kfree_skb(skb);
4324 rx_buffer->skb = NULL;
4324 } 4325 }
4325 rx_buffer->skb = NULL;
4326 if (rx_buffer->dma) 4326 if (rx_buffer->dma)
4327 dma_unmap_page(dev, rx_buffer->dma, 4327 dma_unmap_page(dev, rx_buffer->dma,
4328 ixgbe_rx_pg_size(rx_ring), 4328 ixgbe_rx_pg_size(rx_ring),
@@ -7669,6 +7669,8 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
7669 return -EOPNOTSUPP; 7669 return -EOPNOTSUPP;
7670 7670
7671 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC); 7671 br_spec = nlmsg_find_attr(nlh, sizeof(struct ifinfomsg), IFLA_AF_SPEC);
7672 if (!br_spec)
7673 return -EINVAL;
7672 7674
7673 nla_for_each_nested(attr, br_spec, rem) { 7675 nla_for_each_nested(attr, br_spec, rem) {
7674 __u16 mode; 7676 __u16 mode;
@@ -7677,6 +7679,9 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev,
7677 if (nla_type(attr) != IFLA_BRIDGE_MODE) 7679 if (nla_type(attr) != IFLA_BRIDGE_MODE)
7678 continue; 7680 continue;
7679 7681
7682 if (nla_len(attr) < sizeof(mode))
7683 return -EINVAL;
7684
7680 mode = nla_get_u16(attr); 7685 mode = nla_get_u16(attr);
7681 if (mode == BRIDGE_MODE_VEPA) { 7686 if (mode == BRIDGE_MODE_VEPA) {
7682 reg = 0; 7687 reg = 0;
@@ -7979,6 +7984,7 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
7979 int i, err, pci_using_dac, expected_gts; 7984 int i, err, pci_using_dac, expected_gts;
7980 unsigned int indices = MAX_TX_QUEUES; 7985 unsigned int indices = MAX_TX_QUEUES;
7981 u8 part_str[IXGBE_PBANUM_LENGTH]; 7986 u8 part_str[IXGBE_PBANUM_LENGTH];
7987 bool disable_dev = false;
7982#ifdef IXGBE_FCOE 7988#ifdef IXGBE_FCOE
7983 u16 device_caps; 7989 u16 device_caps;
7984#endif 7990#endif
@@ -8369,13 +8375,14 @@ err_sw_init:
8369 iounmap(adapter->io_addr); 8375 iounmap(adapter->io_addr);
8370 kfree(adapter->mac_table); 8376 kfree(adapter->mac_table);
8371err_ioremap: 8377err_ioremap:
8378 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
8372 free_netdev(netdev); 8379 free_netdev(netdev);
8373err_alloc_etherdev: 8380err_alloc_etherdev:
8374 pci_release_selected_regions(pdev, 8381 pci_release_selected_regions(pdev,
8375 pci_select_bars(pdev, IORESOURCE_MEM)); 8382 pci_select_bars(pdev, IORESOURCE_MEM));
8376err_pci_reg: 8383err_pci_reg:
8377err_dma: 8384err_dma:
8378 if (!adapter || !test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) 8385 if (!adapter || disable_dev)
8379 pci_disable_device(pdev); 8386 pci_disable_device(pdev);
8380 return err; 8387 return err;
8381} 8388}
@@ -8393,6 +8400,7 @@ static void ixgbe_remove(struct pci_dev *pdev)
8393{ 8400{
8394 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); 8401 struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
8395 struct net_device *netdev = adapter->netdev; 8402 struct net_device *netdev = adapter->netdev;
8403 bool disable_dev;
8396 8404
8397 ixgbe_dbg_adapter_exit(adapter); 8405 ixgbe_dbg_adapter_exit(adapter);
8398 8406
@@ -8442,11 +8450,12 @@ static void ixgbe_remove(struct pci_dev *pdev)
8442 e_dev_info("complete\n"); 8450 e_dev_info("complete\n");
8443 8451
8444 kfree(adapter->mac_table); 8452 kfree(adapter->mac_table);
8453 disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state);
8445 free_netdev(netdev); 8454 free_netdev(netdev);
8446 8455
8447 pci_disable_pcie_error_reporting(pdev); 8456 pci_disable_pcie_error_reporting(pdev);
8448 8457
8449 if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) 8458 if (disable_dev)
8450 pci_disable_device(pdev); 8459 pci_disable_device(pdev);
8451} 8460}
8452 8461
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
index d47b19f27c35..28b81ae09b5a 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c
@@ -635,7 +635,6 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed,
635 **/ 635 **/
636s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) 636s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
637{ 637{
638 s32 status;
639 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; 638 u16 autoneg_reg = IXGBE_MII_AUTONEG_REG;
640 bool autoneg = false; 639 bool autoneg = false;
641 ixgbe_link_speed speed; 640 ixgbe_link_speed speed;
@@ -700,8 +699,7 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw)
700 699
701 hw->phy.ops.write_reg(hw, MDIO_CTRL1, 700 hw->phy.ops.write_reg(hw, MDIO_CTRL1,
702 MDIO_MMD_AN, autoneg_reg); 701 MDIO_MMD_AN, autoneg_reg);
703 702 return 0;
704 return status;
705} 703}
706 704
707/** 705/**
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index b151a949f352..d44560d1d268 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -1047,7 +1047,6 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
1047 int tx_index; 1047 int tx_index;
1048 struct tx_desc *desc; 1048 struct tx_desc *desc;
1049 u32 cmd_sts; 1049 u32 cmd_sts;
1050 struct sk_buff *skb;
1051 1050
1052 tx_index = txq->tx_used_desc; 1051 tx_index = txq->tx_used_desc;
1053 desc = &txq->tx_desc_area[tx_index]; 1052 desc = &txq->tx_desc_area[tx_index];
@@ -1066,19 +1065,22 @@ static int txq_reclaim(struct tx_queue *txq, int budget, int force)
1066 reclaimed++; 1065 reclaimed++;
1067 txq->tx_desc_count--; 1066 txq->tx_desc_count--;
1068 1067
1069 skb = NULL; 1068 if (!IS_TSO_HEADER(txq, desc->buf_ptr))
1070 if (cmd_sts & TX_LAST_DESC) 1069 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
1071 skb = __skb_dequeue(&txq->tx_skb); 1070 desc->byte_cnt, DMA_TO_DEVICE);
1071
1072 if (cmd_sts & TX_ENABLE_INTERRUPT) {
1073 struct sk_buff *skb = __skb_dequeue(&txq->tx_skb);
1074
1075 if (!WARN_ON(!skb))
1076 dev_kfree_skb(skb);
1077 }
1072 1078
1073 if (cmd_sts & ERROR_SUMMARY) { 1079 if (cmd_sts & ERROR_SUMMARY) {
1074 netdev_info(mp->dev, "tx error\n"); 1080 netdev_info(mp->dev, "tx error\n");
1075 mp->dev->stats.tx_errors++; 1081 mp->dev->stats.tx_errors++;
1076 } 1082 }
1077 1083
1078 if (!IS_TSO_HEADER(txq, desc->buf_ptr))
1079 dma_unmap_single(mp->dev->dev.parent, desc->buf_ptr,
1080 desc->byte_cnt, DMA_TO_DEVICE);
1081 dev_kfree_skb(skb);
1082 } 1084 }
1083 1085
1084 __netif_tx_unlock_bh(nq); 1086 __netif_tx_unlock_bh(nq);
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index ece83f101526..fdf3e382e464 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -1692,6 +1692,7 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1692{ 1692{
1693 struct mvpp2_prs_entry *pe; 1693 struct mvpp2_prs_entry *pe;
1694 int tid_aux, tid; 1694 int tid_aux, tid;
1695 int ret = 0;
1695 1696
1696 pe = mvpp2_prs_vlan_find(priv, tpid, ai); 1697 pe = mvpp2_prs_vlan_find(priv, tpid, ai);
1697 1698
@@ -1723,8 +1724,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1723 break; 1724 break;
1724 } 1725 }
1725 1726
1726 if (tid <= tid_aux) 1727 if (tid <= tid_aux) {
1727 return -EINVAL; 1728 ret = -EINVAL;
1729 goto error;
1730 }
1728 1731
1729 memset(pe, 0 , sizeof(struct mvpp2_prs_entry)); 1732 memset(pe, 0 , sizeof(struct mvpp2_prs_entry));
1730 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); 1733 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
@@ -1756,9 +1759,10 @@ static int mvpp2_prs_vlan_add(struct mvpp2 *priv, unsigned short tpid, int ai,
1756 1759
1757 mvpp2_prs_hw_write(priv, pe); 1760 mvpp2_prs_hw_write(priv, pe);
1758 1761
1762error:
1759 kfree(pe); 1763 kfree(pe);
1760 1764
1761 return 0; 1765 return ret;
1762} 1766}
1763 1767
1764/* Get first free double vlan ai number */ 1768/* Get first free double vlan ai number */
@@ -1821,7 +1825,7 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1821 unsigned int port_map) 1825 unsigned int port_map)
1822{ 1826{
1823 struct mvpp2_prs_entry *pe; 1827 struct mvpp2_prs_entry *pe;
1824 int tid_aux, tid, ai; 1828 int tid_aux, tid, ai, ret = 0;
1825 1829
1826 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2); 1830 pe = mvpp2_prs_double_vlan_find(priv, tpid1, tpid2);
1827 1831
@@ -1838,8 +1842,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1838 1842
1839 /* Set ai value for new double vlan entry */ 1843 /* Set ai value for new double vlan entry */
1840 ai = mvpp2_prs_double_vlan_ai_free_get(priv); 1844 ai = mvpp2_prs_double_vlan_ai_free_get(priv);
1841 if (ai < 0) 1845 if (ai < 0) {
1842 return ai; 1846 ret = ai;
1847 goto error;
1848 }
1843 1849
1844 /* Get first single/triple vlan tid */ 1850 /* Get first single/triple vlan tid */
1845 for (tid_aux = MVPP2_PE_FIRST_FREE_TID; 1851 for (tid_aux = MVPP2_PE_FIRST_FREE_TID;
@@ -1859,8 +1865,10 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1859 break; 1865 break;
1860 } 1866 }
1861 1867
1862 if (tid >= tid_aux) 1868 if (tid >= tid_aux) {
1863 return -ERANGE; 1869 ret = -ERANGE;
1870 goto error;
1871 }
1864 1872
1865 memset(pe, 0, sizeof(struct mvpp2_prs_entry)); 1873 memset(pe, 0, sizeof(struct mvpp2_prs_entry));
1866 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN); 1874 mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_VLAN);
@@ -1887,8 +1895,9 @@ static int mvpp2_prs_double_vlan_add(struct mvpp2 *priv, unsigned short tpid1,
1887 mvpp2_prs_tcam_port_map_set(pe, port_map); 1895 mvpp2_prs_tcam_port_map_set(pe, port_map);
1888 mvpp2_prs_hw_write(priv, pe); 1896 mvpp2_prs_hw_write(priv, pe);
1889 1897
1898error:
1890 kfree(pe); 1899 kfree(pe);
1891 return 0; 1900 return ret;
1892} 1901}
1893 1902
1894/* IPv4 header parsing for fragmentation and L4 offset */ 1903/* IPv4 header parsing for fragmentation and L4 offset */
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index f3032fec8fce..4d69e382b4e5 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -1693,7 +1693,7 @@ int mlx4_en_start_port(struct net_device *dev)
1693 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap); 1693 mlx4_set_stats_bitmap(mdev->dev, &priv->stats_bitmap);
1694 1694
1695#ifdef CONFIG_MLX4_EN_VXLAN 1695#ifdef CONFIG_MLX4_EN_VXLAN
1696 if (priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS) 1696 if (priv->mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
1697 vxlan_get_rx_port(dev); 1697 vxlan_get_rx_port(dev);
1698#endif 1698#endif
1699 priv->port_up = true; 1699 priv->port_up = true;
@@ -2281,8 +2281,16 @@ static void mlx4_en_add_vxlan_offloads(struct work_struct *work)
2281 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, 2281 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2282 VXLAN_STEER_BY_OUTER_MAC, 1); 2282 VXLAN_STEER_BY_OUTER_MAC, 1);
2283out: 2283out:
2284 if (ret) 2284 if (ret) {
2285 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret); 2285 en_err(priv, "failed setting L2 tunnel configuration ret %d\n", ret);
2286 return;
2287 }
2288
2289 /* set offloads */
2290 priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2291 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
2292 priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2293 priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
2286} 2294}
2287 2295
2288static void mlx4_en_del_vxlan_offloads(struct work_struct *work) 2296static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2290,6 +2298,11 @@ static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
2290 int ret; 2298 int ret;
2291 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv, 2299 struct mlx4_en_priv *priv = container_of(work, struct mlx4_en_priv,
2292 vxlan_del_task); 2300 vxlan_del_task);
2301 /* unset offloads */
2302 priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2303 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
2304 priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
2305 priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
2293 2306
2294 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port, 2307 ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
2295 VXLAN_STEER_BY_OUTER_MAC, 0); 2308 VXLAN_STEER_BY_OUTER_MAC, 0);
@@ -2342,6 +2355,11 @@ static void mlx4_en_del_vxlan_port(struct net_device *dev,
2342 2355
2343 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task); 2356 queue_work(priv->mdev->workqueue, &priv->vxlan_del_task);
2344} 2357}
2358
2359static bool mlx4_en_gso_check(struct sk_buff *skb, struct net_device *dev)
2360{
2361 return vxlan_gso_check(skb);
2362}
2345#endif 2363#endif
2346 2364
2347static const struct net_device_ops mlx4_netdev_ops = { 2365static const struct net_device_ops mlx4_netdev_ops = {
@@ -2373,6 +2391,7 @@ static const struct net_device_ops mlx4_netdev_ops = {
2373#ifdef CONFIG_MLX4_EN_VXLAN 2391#ifdef CONFIG_MLX4_EN_VXLAN
2374 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port, 2392 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
2375 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port, 2393 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
2394 .ndo_gso_check = mlx4_en_gso_check,
2376#endif 2395#endif
2377}; 2396};
2378 2397
@@ -2403,6 +2422,11 @@ static const struct net_device_ops mlx4_netdev_ops_master = {
2403 .ndo_rx_flow_steer = mlx4_en_filter_rfs, 2422 .ndo_rx_flow_steer = mlx4_en_filter_rfs,
2404#endif 2423#endif
2405 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id, 2424 .ndo_get_phys_port_id = mlx4_en_get_phys_port_id,
2425#ifdef CONFIG_MLX4_EN_VXLAN
2426 .ndo_add_vxlan_port = mlx4_en_add_vxlan_port,
2427 .ndo_del_vxlan_port = mlx4_en_del_vxlan_port,
2428 .ndo_gso_check = mlx4_en_gso_check,
2429#endif
2406}; 2430};
2407 2431
2408int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port, 2432int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
@@ -2568,13 +2592,6 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2568 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0) 2592 if (mdev->dev->caps.steering_mode != MLX4_STEERING_MODE_A0)
2569 dev->priv_flags |= IFF_UNICAST_FLT; 2593 dev->priv_flags |= IFF_UNICAST_FLT;
2570 2594
2571 if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
2572 dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2573 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
2574 dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2575 dev->features |= NETIF_F_GSO_UDP_TUNNEL;
2576 }
2577
2578 mdev->pndev[port] = dev; 2595 mdev->pndev[port] = dev;
2579 2596
2580 netif_carrier_off(dev); 2597 netif_carrier_off(dev);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 34c137878545..454d9fea640e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -836,8 +836,11 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
836 * whether LSO is used */ 836 * whether LSO is used */
837 tx_desc->ctrl.srcrb_flags = priv->ctrl_flags; 837 tx_desc->ctrl.srcrb_flags = priv->ctrl_flags;
838 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { 838 if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
839 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM | 839 if (!skb->encapsulation)
840 MLX4_WQE_CTRL_TCP_UDP_CSUM); 840 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM |
841 MLX4_WQE_CTRL_TCP_UDP_CSUM);
842 else
843 tx_desc->ctrl.srcrb_flags |= cpu_to_be32(MLX4_WQE_CTRL_IP_CSUM);
841 ring->tx_csum++; 844 ring->tx_csum++;
842 } 845 }
843 846
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index a49c9d11d8a5..49290a405903 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -1026,6 +1026,7 @@ static void mlx4_free_eq(struct mlx4_dev *dev,
1026 pr_cont("\n"); 1026 pr_cont("\n");
1027 } 1027 }
1028 } 1028 }
1029 synchronize_irq(eq->irq);
1029 1030
1030 mlx4_mtt_cleanup(dev, &eq->mtt); 1031 mlx4_mtt_cleanup(dev, &eq->mtt);
1031 for (i = 0; i < npages; ++i) 1032 for (i = 0; i < npages; ++i)
diff --git a/drivers/net/ethernet/mellanox/mlx4/mcg.c b/drivers/net/ethernet/mellanox/mlx4/mcg.c
index ca0f98c95105..872843179f44 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mcg.c
+++ b/drivers/net/ethernet/mellanox/mlx4/mcg.c
@@ -955,6 +955,10 @@ static void mlx4_err_rule(struct mlx4_dev *dev, char *str,
955 cur->ib.dst_gid_msk); 955 cur->ib.dst_gid_msk);
956 break; 956 break;
957 957
958 case MLX4_NET_TRANS_RULE_ID_VXLAN:
959 len += snprintf(buf + len, BUF_SIZE - len,
960 "VNID = %d ", be32_to_cpu(cur->vxlan.vni));
961 break;
958 case MLX4_NET_TRANS_RULE_ID_IPV6: 962 case MLX4_NET_TRANS_RULE_ID_IPV6:
959 break; 963 break;
960 964
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 5d2498dcf536..cd5cf6d957c7 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -1546,7 +1546,7 @@ static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1546 1546
1547 switch (op) { 1547 switch (op) {
1548 case RES_OP_RESERVE: 1548 case RES_OP_RESERVE:
1549 count = get_param_l(&in_param); 1549 count = get_param_l(&in_param) & 0xffffff;
1550 align = get_param_h(&in_param); 1550 align = get_param_h(&in_param);
1551 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0); 1551 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
1552 if (err) 1552 if (err)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eq.c b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
index ed53291468f3..ad2c96a02a53 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/eq.c
@@ -374,15 +374,14 @@ int mlx5_create_map_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq, u8 vecidx,
374 snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s", 374 snprintf(eq->name, MLX5_MAX_EQ_NAME, "%s@pci:%s",
375 name, pci_name(dev->pdev)); 375 name, pci_name(dev->pdev));
376 eq->eqn = out.eq_number; 376 eq->eqn = out.eq_number;
377 eq->irqn = vecidx;
378 eq->dev = dev;
379 eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
377 err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0, 380 err = request_irq(table->msix_arr[vecidx].vector, mlx5_msix_handler, 0,
378 eq->name, eq); 381 eq->name, eq);
379 if (err) 382 if (err)
380 goto err_eq; 383 goto err_eq;
381 384
382 eq->irqn = vecidx;
383 eq->dev = dev;
384 eq->doorbell = uar->map + MLX5_EQ_DOORBEL_OFFSET;
385
386 err = mlx5_debug_eq_add(dev, eq); 385 err = mlx5_debug_eq_add(dev, eq);
387 if (err) 386 if (err)
388 goto err_irq; 387 goto err_irq;
@@ -420,6 +419,7 @@ int mlx5_destroy_unmap_eq(struct mlx5_core_dev *dev, struct mlx5_eq *eq)
420 if (err) 419 if (err)
421 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n", 420 mlx5_core_warn(dev, "failed to destroy a previously created eq: eqn %d\n",
422 eq->eqn); 421 eq->eqn);
422 synchronize_irq(table->msix_arr[eq->irqn].vector);
423 mlx5_buf_free(dev, &eq->buf); 423 mlx5_buf_free(dev, &eq->buf);
424 424
425 return err; 425 return err;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index 3d8e8e489b2d..71b10b210792 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -864,14 +864,14 @@ static int init_one(struct pci_dev *pdev,
864 dev->profile = &profile[prof_sel]; 864 dev->profile = &profile[prof_sel];
865 dev->event = mlx5_core_event; 865 dev->event = mlx5_core_event;
866 866
867 INIT_LIST_HEAD(&priv->ctx_list);
868 spin_lock_init(&priv->ctx_lock);
867 err = mlx5_dev_init(dev, pdev); 869 err = mlx5_dev_init(dev, pdev);
868 if (err) { 870 if (err) {
869 dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err); 871 dev_err(&pdev->dev, "mlx5_dev_init failed %d\n", err);
870 goto out; 872 goto out;
871 } 873 }
872 874
873 INIT_LIST_HEAD(&priv->ctx_list);
874 spin_lock_init(&priv->ctx_lock);
875 err = mlx5_register_device(dev); 875 err = mlx5_register_device(dev);
876 if (err) { 876 if (err) {
877 dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err); 877 dev_err(&pdev->dev, "mlx5_register_device failed %d\n", err);
diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
index 0b2a1ccd276d..613037584d08 100644
--- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
+++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
@@ -2762,7 +2762,8 @@ netxen_fw_poll_work(struct work_struct *work)
2762 if (test_bit(__NX_RESETTING, &adapter->state)) 2762 if (test_bit(__NX_RESETTING, &adapter->state))
2763 goto reschedule; 2763 goto reschedule;
2764 2764
2765 if (test_bit(__NX_DEV_UP, &adapter->state)) { 2765 if (test_bit(__NX_DEV_UP, &adapter->state) &&
2766 !(adapter->capabilities & NX_FW_CAPABILITY_LINK_NOTIFICATION)) {
2766 if (!adapter->has_link_events) { 2767 if (!adapter->has_link_events) {
2767 2768
2768 netxen_nic_handle_phy_intr(adapter); 2769 netxen_nic_handle_phy_intr(adapter);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
index f5e29f7bdae3..a913b3ad2f89 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_main.c
@@ -503,6 +503,11 @@ static void qlcnic_del_vxlan_port(struct net_device *netdev,
503 503
504 adapter->flags |= QLCNIC_DEL_VXLAN_PORT; 504 adapter->flags |= QLCNIC_DEL_VXLAN_PORT;
505} 505}
506
507static bool qlcnic_gso_check(struct sk_buff *skb, struct net_device *dev)
508{
509 return vxlan_gso_check(skb);
510}
506#endif 511#endif
507 512
508static const struct net_device_ops qlcnic_netdev_ops = { 513static const struct net_device_ops qlcnic_netdev_ops = {
@@ -526,6 +531,7 @@ static const struct net_device_ops qlcnic_netdev_ops = {
526#ifdef CONFIG_QLCNIC_VXLAN 531#ifdef CONFIG_QLCNIC_VXLAN
527 .ndo_add_vxlan_port = qlcnic_add_vxlan_port, 532 .ndo_add_vxlan_port = qlcnic_add_vxlan_port,
528 .ndo_del_vxlan_port = qlcnic_del_vxlan_port, 533 .ndo_del_vxlan_port = qlcnic_del_vxlan_port,
534 .ndo_gso_check = qlcnic_gso_check,
529#endif 535#endif
530#ifdef CONFIG_NET_POLL_CONTROLLER 536#ifdef CONFIG_NET_POLL_CONTROLLER
531 .ndo_poll_controller = qlcnic_poll_controller, 537 .ndo_poll_controller = qlcnic_poll_controller,
diff --git a/drivers/net/ethernet/qualcomm/Kconfig b/drivers/net/ethernet/qualcomm/Kconfig
index f3a47147937d..9a49f42ac2ba 100644
--- a/drivers/net/ethernet/qualcomm/Kconfig
+++ b/drivers/net/ethernet/qualcomm/Kconfig
@@ -5,7 +5,6 @@
5config NET_VENDOR_QUALCOMM 5config NET_VENDOR_QUALCOMM
6 bool "Qualcomm devices" 6 bool "Qualcomm devices"
7 default y 7 default y
8 depends on SPI_MASTER && OF_GPIO
9 ---help--- 8 ---help---
10 If you have a network (Ethernet) card belonging to this class, say Y 9 If you have a network (Ethernet) card belonging to this class, say Y
11 and read the Ethernet-HOWTO, available from 10 and read the Ethernet-HOWTO, available from
@@ -20,7 +19,7 @@ if NET_VENDOR_QUALCOMM
20 19
21config QCA7000 20config QCA7000
22 tristate "Qualcomm Atheros QCA7000 support" 21 tristate "Qualcomm Atheros QCA7000 support"
23 depends on SPI_MASTER && OF_GPIO 22 depends on SPI_MASTER && OF
24 ---help--- 23 ---help---
25 This SPI protocol driver supports the Qualcomm Atheros QCA7000. 24 This SPI protocol driver supports the Qualcomm Atheros QCA7000.
26 25
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index 60e9c2cd051e..b5db6b3f939f 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -917,21 +917,13 @@ static int sh_eth_reset(struct net_device *ndev)
917 return ret; 917 return ret;
918} 918}
919 919
920#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
921static void sh_eth_set_receive_align(struct sk_buff *skb) 920static void sh_eth_set_receive_align(struct sk_buff *skb)
922{ 921{
923 int reserve; 922 uintptr_t reserve = (uintptr_t)skb->data & (SH_ETH_RX_ALIGN - 1);
924 923
925 reserve = SH4_SKB_RX_ALIGN - ((u32)skb->data & (SH4_SKB_RX_ALIGN - 1));
926 if (reserve) 924 if (reserve)
927 skb_reserve(skb, reserve); 925 skb_reserve(skb, SH_ETH_RX_ALIGN - reserve);
928} 926}
929#else
930static void sh_eth_set_receive_align(struct sk_buff *skb)
931{
932 skb_reserve(skb, SH2_SH3_SKB_RX_ALIGN);
933}
934#endif
935 927
936 928
937/* CPU <-> EDMAC endian convert */ 929/* CPU <-> EDMAC endian convert */
@@ -1119,6 +1111,7 @@ static void sh_eth_ring_format(struct net_device *ndev)
1119 struct sh_eth_txdesc *txdesc = NULL; 1111 struct sh_eth_txdesc *txdesc = NULL;
1120 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring; 1112 int rx_ringsize = sizeof(*rxdesc) * mdp->num_rx_ring;
1121 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring; 1113 int tx_ringsize = sizeof(*txdesc) * mdp->num_tx_ring;
1114 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1122 1115
1123 mdp->cur_rx = 0; 1116 mdp->cur_rx = 0;
1124 mdp->cur_tx = 0; 1117 mdp->cur_tx = 0;
@@ -1131,21 +1124,21 @@ static void sh_eth_ring_format(struct net_device *ndev)
1131 for (i = 0; i < mdp->num_rx_ring; i++) { 1124 for (i = 0; i < mdp->num_rx_ring; i++) {
1132 /* skb */ 1125 /* skb */
1133 mdp->rx_skbuff[i] = NULL; 1126 mdp->rx_skbuff[i] = NULL;
1134 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 1127 skb = netdev_alloc_skb(ndev, skbuff_size);
1135 mdp->rx_skbuff[i] = skb; 1128 mdp->rx_skbuff[i] = skb;
1136 if (skb == NULL) 1129 if (skb == NULL)
1137 break; 1130 break;
1138 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1139 DMA_FROM_DEVICE);
1140 sh_eth_set_receive_align(skb); 1131 sh_eth_set_receive_align(skb);
1141 1132
1142 /* RX descriptor */ 1133 /* RX descriptor */
1143 rxdesc = &mdp->rx_ring[i]; 1134 rxdesc = &mdp->rx_ring[i];
1135 /* The size of the buffer is a multiple of 16 bytes. */
1136 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1137 dma_map_single(&ndev->dev, skb->data, rxdesc->buffer_length,
1138 DMA_FROM_DEVICE);
1144 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 1139 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
1145 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP); 1140 rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);
1146 1141
1147 /* The size of the buffer is 16 byte boundary. */
1148 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1149 /* Rx descriptor address set */ 1142 /* Rx descriptor address set */
1150 if (i == 0) { 1143 if (i == 0) {
1151 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR); 1144 sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
@@ -1397,6 +1390,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1397 struct sk_buff *skb; 1390 struct sk_buff *skb;
1398 u16 pkt_len = 0; 1391 u16 pkt_len = 0;
1399 u32 desc_status; 1392 u32 desc_status;
1393 int skbuff_size = mdp->rx_buf_sz + SH_ETH_RX_ALIGN - 1;
1400 1394
1401 rxdesc = &mdp->rx_ring[entry]; 1395 rxdesc = &mdp->rx_ring[entry];
1402 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) { 1396 while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
@@ -1448,7 +1442,7 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1448 if (mdp->cd->rpadir) 1442 if (mdp->cd->rpadir)
1449 skb_reserve(skb, NET_IP_ALIGN); 1443 skb_reserve(skb, NET_IP_ALIGN);
1450 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr, 1444 dma_sync_single_for_cpu(&ndev->dev, rxdesc->addr,
1451 mdp->rx_buf_sz, 1445 ALIGN(mdp->rx_buf_sz, 16),
1452 DMA_FROM_DEVICE); 1446 DMA_FROM_DEVICE);
1453 skb_put(skb, pkt_len); 1447 skb_put(skb, pkt_len);
1454 skb->protocol = eth_type_trans(skb, ndev); 1448 skb->protocol = eth_type_trans(skb, ndev);
@@ -1468,13 +1462,13 @@ static int sh_eth_rx(struct net_device *ndev, u32 intr_status, int *quota)
1468 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16); 1462 rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
1469 1463
1470 if (mdp->rx_skbuff[entry] == NULL) { 1464 if (mdp->rx_skbuff[entry] == NULL) {
1471 skb = netdev_alloc_skb(ndev, mdp->rx_buf_sz); 1465 skb = netdev_alloc_skb(ndev, skbuff_size);
1472 mdp->rx_skbuff[entry] = skb; 1466 mdp->rx_skbuff[entry] = skb;
1473 if (skb == NULL) 1467 if (skb == NULL)
1474 break; /* Better luck next round. */ 1468 break; /* Better luck next round. */
1475 dma_map_single(&ndev->dev, skb->data, mdp->rx_buf_sz,
1476 DMA_FROM_DEVICE);
1477 sh_eth_set_receive_align(skb); 1469 sh_eth_set_receive_align(skb);
1470 dma_map_single(&ndev->dev, skb->data,
1471 rxdesc->buffer_length, DMA_FROM_DEVICE);
1478 1472
1479 skb_checksum_none_assert(skb); 1473 skb_checksum_none_assert(skb);
1480 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4)); 1474 rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
@@ -2042,6 +2036,8 @@ static int sh_eth_open(struct net_device *ndev)
2042 if (ret) 2036 if (ret)
2043 goto out_free_irq; 2037 goto out_free_irq;
2044 2038
2039 mdp->is_opened = 1;
2040
2045 return ret; 2041 return ret;
2046 2042
2047out_free_irq: 2043out_free_irq:
@@ -2131,6 +2127,36 @@ static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
2131 return NETDEV_TX_OK; 2127 return NETDEV_TX_OK;
2132} 2128}
2133 2129
2130static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2131{
2132 struct sh_eth_private *mdp = netdev_priv(ndev);
2133
2134 if (sh_eth_is_rz_fast_ether(mdp))
2135 return &ndev->stats;
2136
2137 if (!mdp->is_opened)
2138 return &ndev->stats;
2139
2140 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
2141 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
2142 ndev->stats.collisions += sh_eth_read(ndev, CDCR);
2143 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
2144 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
2145 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
2146
2147 if (sh_eth_is_gether(mdp)) {
2148 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
2149 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
2150 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
2151 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
2152 } else {
2153 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
2154 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
2155 }
2156
2157 return &ndev->stats;
2158}
2159
2134/* device close function */ 2160/* device close function */
2135static int sh_eth_close(struct net_device *ndev) 2161static int sh_eth_close(struct net_device *ndev)
2136{ 2162{
@@ -2145,6 +2171,7 @@ static int sh_eth_close(struct net_device *ndev)
2145 sh_eth_write(ndev, 0, EDTRR); 2171 sh_eth_write(ndev, 0, EDTRR);
2146 sh_eth_write(ndev, 0, EDRRR); 2172 sh_eth_write(ndev, 0, EDRRR);
2147 2173
2174 sh_eth_get_stats(ndev);
2148 /* PHY Disconnect */ 2175 /* PHY Disconnect */
2149 if (mdp->phydev) { 2176 if (mdp->phydev) {
2150 phy_stop(mdp->phydev); 2177 phy_stop(mdp->phydev);
@@ -2163,36 +2190,9 @@ static int sh_eth_close(struct net_device *ndev)
2163 2190
2164 pm_runtime_put_sync(&mdp->pdev->dev); 2191 pm_runtime_put_sync(&mdp->pdev->dev);
2165 2192
2166 return 0; 2193 mdp->is_opened = 0;
2167}
2168
2169static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
2170{
2171 struct sh_eth_private *mdp = netdev_priv(ndev);
2172
2173 if (sh_eth_is_rz_fast_ether(mdp))
2174 return &ndev->stats;
2175 2194
2176 pm_runtime_get_sync(&mdp->pdev->dev); 2195 return 0;
2177
2178 ndev->stats.tx_dropped += sh_eth_read(ndev, TROCR);
2179 sh_eth_write(ndev, 0, TROCR); /* (write clear) */
2180 ndev->stats.collisions += sh_eth_read(ndev, CDCR);
2181 sh_eth_write(ndev, 0, CDCR); /* (write clear) */
2182 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, LCCR);
2183 sh_eth_write(ndev, 0, LCCR); /* (write clear) */
2184 if (sh_eth_is_gether(mdp)) {
2185 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CERCR);
2186 sh_eth_write(ndev, 0, CERCR); /* (write clear) */
2187 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CEECR);
2188 sh_eth_write(ndev, 0, CEECR); /* (write clear) */
2189 } else {
2190 ndev->stats.tx_carrier_errors += sh_eth_read(ndev, CNDCR);
2191 sh_eth_write(ndev, 0, CNDCR); /* (write clear) */
2192 }
2193 pm_runtime_put_sync(&mdp->pdev->dev);
2194
2195 return &ndev->stats;
2196} 2196}
2197 2197
2198/* ioctl to device function */ 2198/* ioctl to device function */
diff --git a/drivers/net/ethernet/renesas/sh_eth.h b/drivers/net/ethernet/renesas/sh_eth.h
index b37c427144ee..22301bf9c21d 100644
--- a/drivers/net/ethernet/renesas/sh_eth.h
+++ b/drivers/net/ethernet/renesas/sh_eth.h
@@ -162,9 +162,9 @@ enum {
162 162
163/* Driver's parameters */ 163/* Driver's parameters */
164#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE) 164#if defined(CONFIG_CPU_SH4) || defined(CONFIG_ARCH_SHMOBILE)
165#define SH4_SKB_RX_ALIGN 32 165#define SH_ETH_RX_ALIGN 32
166#else 166#else
167#define SH2_SH3_SKB_RX_ALIGN 2 167#define SH_ETH_RX_ALIGN 2
168#endif 168#endif
169 169
170/* Register's bits 170/* Register's bits
@@ -522,6 +522,7 @@ struct sh_eth_private {
522 522
523 unsigned no_ether_link:1; 523 unsigned no_ether_link:1;
524 unsigned ether_link_active_low:1; 524 unsigned ether_link_active_low:1;
525 unsigned is_opened:1;
525}; 526};
526 527
527static inline void sh_eth_soft_swap(char *src, int len) 528static inline void sh_eth_soft_swap(char *src, int len)
diff --git a/drivers/net/ethernet/sfc/ef10.c b/drivers/net/ethernet/sfc/ef10.c
index 002d4cdc319f..a77f05ce8325 100644
--- a/drivers/net/ethernet/sfc/ef10.c
+++ b/drivers/net/ethernet/sfc/ef10.c
@@ -180,7 +180,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
180 EFX_MAX_CHANNELS, 180 EFX_MAX_CHANNELS,
181 resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) / 181 resource_size(&efx->pci_dev->resource[EFX_MEM_BAR]) /
182 (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES)); 182 (EFX_VI_PAGE_SIZE * EFX_TXQ_TYPES));
183 BUG_ON(efx->max_channels == 0); 183 if (WARN_ON(efx->max_channels == 0))
184 return -EIO;
184 185
185 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); 186 nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
186 if (!nic_data) 187 if (!nic_data)
diff --git a/drivers/net/ethernet/sfc/tx.c b/drivers/net/ethernet/sfc/tx.c
index ee84a90e371c..aaf2987512b5 100644
--- a/drivers/net/ethernet/sfc/tx.c
+++ b/drivers/net/ethernet/sfc/tx.c
@@ -343,8 +343,6 @@ netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue, struct sk_buff *skb)
343 unsigned short dma_flags; 343 unsigned short dma_flags;
344 int i = 0; 344 int i = 0;
345 345
346 EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count);
347
348 if (skb_shinfo(skb)->gso_size) 346 if (skb_shinfo(skb)->gso_size)
349 return efx_enqueue_skb_tso(tx_queue, skb); 347 return efx_enqueue_skb_tso(tx_queue, skb);
350 348
@@ -1258,8 +1256,6 @@ static int efx_enqueue_skb_tso(struct efx_tx_queue *tx_queue,
1258 /* Find the packet protocol and sanity-check it */ 1256 /* Find the packet protocol and sanity-check it */
1259 state.protocol = efx_tso_check_protocol(skb); 1257 state.protocol = efx_tso_check_protocol(skb);
1260 1258
1261 EFX_BUG_ON_PARANOID(tx_queue->write_count > tx_queue->insert_count);
1262
1263 rc = tso_start(&state, efx, skb); 1259 rc = tso_start(&state, efx, skb);
1264 if (rc) 1260 if (rc)
1265 goto mem_err; 1261 goto mem_err;
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 5e94d00b96b3..6cc3cf6f17c8 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -81,6 +81,7 @@ static const char version[] =
81#include <linux/workqueue.h> 81#include <linux/workqueue.h>
82#include <linux/of.h> 82#include <linux/of.h>
83#include <linux/of_device.h> 83#include <linux/of_device.h>
84#include <linux/of_gpio.h>
84 85
85#include <linux/netdevice.h> 86#include <linux/netdevice.h>
86#include <linux/etherdevice.h> 87#include <linux/etherdevice.h>
@@ -2188,6 +2189,41 @@ static const struct of_device_id smc91x_match[] = {
2188 {}, 2189 {},
2189}; 2190};
2190MODULE_DEVICE_TABLE(of, smc91x_match); 2191MODULE_DEVICE_TABLE(of, smc91x_match);
2192
2193/**
2194 * of_try_set_control_gpio - configure a gpio if it exists
2195 */
2196static int try_toggle_control_gpio(struct device *dev,
2197 struct gpio_desc **desc,
2198 const char *name, int index,
2199 int value, unsigned int nsdelay)
2200{
2201 struct gpio_desc *gpio = *desc;
2202 int res;
2203
2204 gpio = devm_gpiod_get_index(dev, name, index);
2205 if (IS_ERR(gpio)) {
2206 if (PTR_ERR(gpio) == -ENOENT) {
2207 *desc = NULL;
2208 return 0;
2209 }
2210
2211 return PTR_ERR(gpio);
2212 }
2213 res = gpiod_direction_output(gpio, !value);
2214 if (res) {
2215 dev_err(dev, "unable to toggle gpio %s: %i\n", name, res);
2216 devm_gpiod_put(dev, gpio);
2217 gpio = NULL;
2218 return res;
2219 }
2220 if (nsdelay)
2221 usleep_range(nsdelay, 2 * nsdelay);
2222 gpiod_set_value_cansleep(gpio, value);
2223 *desc = gpio;
2224
2225 return 0;
2226}
2191#endif 2227#endif
2192 2228
2193/* 2229/*
@@ -2207,9 +2243,10 @@ static int smc_drv_probe(struct platform_device *pdev)
2207 const struct of_device_id *match = NULL; 2243 const struct of_device_id *match = NULL;
2208 struct smc_local *lp; 2244 struct smc_local *lp;
2209 struct net_device *ndev; 2245 struct net_device *ndev;
2210 struct resource *res, *ires; 2246 struct resource *res;
2211 unsigned int __iomem *addr; 2247 unsigned int __iomem *addr;
2212 unsigned long irq_flags = SMC_IRQ_FLAGS; 2248 unsigned long irq_flags = SMC_IRQ_FLAGS;
2249 unsigned long irq_resflags;
2213 int ret; 2250 int ret;
2214 2251
2215 ndev = alloc_etherdev(sizeof(struct smc_local)); 2252 ndev = alloc_etherdev(sizeof(struct smc_local));
@@ -2237,6 +2274,28 @@ static int smc_drv_probe(struct platform_device *pdev)
2237 struct device_node *np = pdev->dev.of_node; 2274 struct device_node *np = pdev->dev.of_node;
2238 u32 val; 2275 u32 val;
2239 2276
2277 /* Optional pwrdwn GPIO configured? */
2278 ret = try_toggle_control_gpio(&pdev->dev, &lp->power_gpio,
2279 "power", 0, 0, 100);
2280 if (ret)
2281 return ret;
2282
2283 /*
2284 * Optional reset GPIO configured? Minimum 100 ns reset needed
2285 * according to LAN91C96 datasheet page 14.
2286 */
2287 ret = try_toggle_control_gpio(&pdev->dev, &lp->reset_gpio,
2288 "reset", 0, 0, 100);
2289 if (ret)
2290 return ret;
2291
2292 /*
2293 * Need to wait for optional EEPROM to load, max 750 us according
2294 * to LAN91C96 datasheet page 55.
2295 */
2296 if (lp->reset_gpio)
2297 usleep_range(750, 1000);
2298
2240 /* Combination of IO widths supported, default to 16-bit */ 2299 /* Combination of IO widths supported, default to 16-bit */
2241 if (!of_property_read_u32(np, "reg-io-width", &val)) { 2300 if (!of_property_read_u32(np, "reg-io-width", &val)) {
2242 if (val & 1) 2301 if (val & 1)
@@ -2279,16 +2338,19 @@ static int smc_drv_probe(struct platform_device *pdev)
2279 goto out_free_netdev; 2338 goto out_free_netdev;
2280 } 2339 }
2281 2340
2282 ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 2341 ndev->irq = platform_get_irq(pdev, 0);
2283 if (!ires) { 2342 if (ndev->irq <= 0) {
2284 ret = -ENODEV; 2343 ret = -ENODEV;
2285 goto out_release_io; 2344 goto out_release_io;
2286 } 2345 }
2287 2346 /*
2288 ndev->irq = ires->start; 2347 * If this platform does not specify any special irqflags, or if
2289 2348 * the resource supplies a trigger, override the irqflags with
2290 if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK) 2349 * the trigger flags from the resource.
2291 irq_flags = ires->flags & IRQF_TRIGGER_MASK; 2350 */
2351 irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
2352 if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
2353 irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
2292 2354
2293 ret = smc_request_attrib(pdev, ndev); 2355 ret = smc_request_attrib(pdev, ndev);
2294 if (ret) 2356 if (ret)
diff --git a/drivers/net/ethernet/smsc/smc91x.h b/drivers/net/ethernet/smsc/smc91x.h
index 47dce918eb0f..2a38dacbbd27 100644
--- a/drivers/net/ethernet/smsc/smc91x.h
+++ b/drivers/net/ethernet/smsc/smc91x.h
@@ -298,6 +298,9 @@ struct smc_local {
298 struct sk_buff *pending_tx_skb; 298 struct sk_buff *pending_tx_skb;
299 struct tasklet_struct tx_task; 299 struct tasklet_struct tx_task;
300 300
301 struct gpio_desc *power_gpio;
302 struct gpio_desc *reset_gpio;
303
301 /* version/revision of the SMC91x chip */ 304 /* version/revision of the SMC91x chip */
302 int version; 305 int version;
303 306
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index affb29da353e..77ed74561e5f 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -1342,6 +1342,42 @@ static void smsc911x_rx_multicast_update_workaround(struct smsc911x_data *pdata)
1342 spin_unlock(&pdata->mac_lock); 1342 spin_unlock(&pdata->mac_lock);
1343} 1343}
1344 1344
1345static int smsc911x_phy_general_power_up(struct smsc911x_data *pdata)
1346{
1347 int rc = 0;
1348
1349 if (!pdata->phy_dev)
1350 return rc;
1351
1352 /* If the internal PHY is in General Power-Down mode, all, except the
1353 * management interface, is powered-down and stays in that condition as
1354 * long as Phy register bit 0.11 is HIGH.
1355 *
1356 * In that case, clear the bit 0.11, so the PHY powers up and we can
1357 * access to the phy registers.
1358 */
1359 rc = phy_read(pdata->phy_dev, MII_BMCR);
1360 if (rc < 0) {
1361 SMSC_WARN(pdata, drv, "Failed reading PHY control reg");
1362 return rc;
1363 }
1364
1365 /* If the PHY general power-down bit is not set is not necessary to
1366 * disable the general power down-mode.
1367 */
1368 if (rc & BMCR_PDOWN) {
1369 rc = phy_write(pdata->phy_dev, MII_BMCR, rc & ~BMCR_PDOWN);
1370 if (rc < 0) {
1371 SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
1372 return rc;
1373 }
1374
1375 usleep_range(1000, 1500);
1376 }
1377
1378 return 0;
1379}
1380
1345static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata) 1381static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
1346{ 1382{
1347 int rc = 0; 1383 int rc = 0;
@@ -1356,12 +1392,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
1356 return rc; 1392 return rc;
1357 } 1393 }
1358 1394
1359 /* 1395 /* Only disable if energy detect mode is already enabled */
1360 * If energy is detected the PHY is already awake so is not necessary 1396 if (rc & MII_LAN83C185_EDPWRDOWN) {
1361 * to disable the energy detect power-down mode.
1362 */
1363 if ((rc & MII_LAN83C185_EDPWRDOWN) &&
1364 !(rc & MII_LAN83C185_ENERGYON)) {
1365 /* Disable energy detect mode for this SMSC Transceivers */ 1397 /* Disable energy detect mode for this SMSC Transceivers */
1366 rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, 1398 rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
1367 rc & (~MII_LAN83C185_EDPWRDOWN)); 1399 rc & (~MII_LAN83C185_EDPWRDOWN));
@@ -1370,8 +1402,8 @@ static int smsc911x_phy_disable_energy_detect(struct smsc911x_data *pdata)
1370 SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); 1402 SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
1371 return rc; 1403 return rc;
1372 } 1404 }
1373 1405 /* Allow PHY to wakeup */
1374 mdelay(1); 1406 mdelay(2);
1375 } 1407 }
1376 1408
1377 return 0; 1409 return 0;
@@ -1393,7 +1425,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
1393 1425
1394 /* Only enable if energy detect mode is already disabled */ 1426 /* Only enable if energy detect mode is already disabled */
1395 if (!(rc & MII_LAN83C185_EDPWRDOWN)) { 1427 if (!(rc & MII_LAN83C185_EDPWRDOWN)) {
1396 mdelay(100);
1397 /* Enable energy detect mode for this SMSC Transceivers */ 1428 /* Enable energy detect mode for this SMSC Transceivers */
1398 rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS, 1429 rc = phy_write(pdata->phy_dev, MII_LAN83C185_CTRL_STATUS,
1399 rc | MII_LAN83C185_EDPWRDOWN); 1430 rc | MII_LAN83C185_EDPWRDOWN);
@@ -1402,8 +1433,6 @@ static int smsc911x_phy_enable_energy_detect(struct smsc911x_data *pdata)
1402 SMSC_WARN(pdata, drv, "Failed writing PHY control reg"); 1433 SMSC_WARN(pdata, drv, "Failed writing PHY control reg");
1403 return rc; 1434 return rc;
1404 } 1435 }
1405
1406 mdelay(1);
1407 } 1436 }
1408 return 0; 1437 return 0;
1409} 1438}
@@ -1415,6 +1444,16 @@ static int smsc911x_soft_reset(struct smsc911x_data *pdata)
1415 int ret; 1444 int ret;
1416 1445
1417 /* 1446 /*
1447 * Make sure to power-up the PHY chip before doing a reset, otherwise
1448 * the reset fails.
1449 */
1450 ret = smsc911x_phy_general_power_up(pdata);
1451 if (ret) {
1452 SMSC_WARN(pdata, drv, "Failed to power-up the PHY chip");
1453 return ret;
1454 }
1455
1456 /*
1418 * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that 1457 * LAN9210/LAN9211/LAN9220/LAN9221 chips have an internal PHY that
1419 * are initialized in a Energy Detect Power-Down mode that prevents 1458 * are initialized in a Energy Detect Power-Down mode that prevents
1420 * the MAC chip to be software reseted. So we have to wakeup the PHY 1459 * the MAC chip to be software reseted. So we have to wakeup the PHY
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6f77a46c7e2c..18c46bb0f3bf 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -276,6 +276,7 @@ static void stmmac_eee_ctrl_timer(unsigned long arg)
276bool stmmac_eee_init(struct stmmac_priv *priv) 276bool stmmac_eee_init(struct stmmac_priv *priv)
277{ 277{
278 char *phy_bus_name = priv->plat->phy_bus_name; 278 char *phy_bus_name = priv->plat->phy_bus_name;
279 unsigned long flags;
279 bool ret = false; 280 bool ret = false;
280 281
281 /* Using PCS we cannot dial with the phy registers at this stage 282 /* Using PCS we cannot dial with the phy registers at this stage
@@ -300,6 +301,7 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
300 * changed). 301 * changed).
301 * In that case the driver disable own timers. 302 * In that case the driver disable own timers.
302 */ 303 */
304 spin_lock_irqsave(&priv->lock, flags);
303 if (priv->eee_active) { 305 if (priv->eee_active) {
304 pr_debug("stmmac: disable EEE\n"); 306 pr_debug("stmmac: disable EEE\n");
305 del_timer_sync(&priv->eee_ctrl_timer); 307 del_timer_sync(&priv->eee_ctrl_timer);
@@ -307,9 +309,11 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
307 tx_lpi_timer); 309 tx_lpi_timer);
308 } 310 }
309 priv->eee_active = 0; 311 priv->eee_active = 0;
312 spin_unlock_irqrestore(&priv->lock, flags);
310 goto out; 313 goto out;
311 } 314 }
312 /* Activate the EEE and start timers */ 315 /* Activate the EEE and start timers */
316 spin_lock_irqsave(&priv->lock, flags);
313 if (!priv->eee_active) { 317 if (!priv->eee_active) {
314 priv->eee_active = 1; 318 priv->eee_active = 1;
315 init_timer(&priv->eee_ctrl_timer); 319 init_timer(&priv->eee_ctrl_timer);
@@ -325,9 +329,10 @@ bool stmmac_eee_init(struct stmmac_priv *priv)
325 /* Set HW EEE according to the speed */ 329 /* Set HW EEE according to the speed */
326 priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link); 330 priv->hw->mac->set_eee_pls(priv->hw, priv->phydev->link);
327 331
328 pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
329
330 ret = true; 332 ret = true;
333 spin_unlock_irqrestore(&priv->lock, flags);
334
335 pr_debug("stmmac: Energy-Efficient Ethernet initialized\n");
331 } 336 }
332out: 337out:
333 return ret; 338 return ret;
@@ -760,12 +765,12 @@ static void stmmac_adjust_link(struct net_device *dev)
760 if (new_state && netif_msg_link(priv)) 765 if (new_state && netif_msg_link(priv))
761 phy_print_status(phydev); 766 phy_print_status(phydev);
762 767
768 spin_unlock_irqrestore(&priv->lock, flags);
769
763 /* At this stage, it could be needed to setup the EEE or adjust some 770 /* At this stage, it could be needed to setup the EEE or adjust some
764 * MAC related HW registers. 771 * MAC related HW registers.
765 */ 772 */
766 priv->eee_enabled = stmmac_eee_init(priv); 773 priv->eee_enabled = stmmac_eee_init(priv);
767
768 spin_unlock_irqrestore(&priv->lock, flags);
769} 774}
770 775
771/** 776/**
@@ -959,12 +964,12 @@ static void stmmac_clear_descriptors(struct stmmac_priv *priv)
959} 964}
960 965
961static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p, 966static int stmmac_init_rx_buffers(struct stmmac_priv *priv, struct dma_desc *p,
962 int i) 967 int i, gfp_t flags)
963{ 968{
964 struct sk_buff *skb; 969 struct sk_buff *skb;
965 970
966 skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN, 971 skb = __netdev_alloc_skb(priv->dev, priv->dma_buf_sz + NET_IP_ALIGN,
967 GFP_KERNEL); 972 flags);
968 if (!skb) { 973 if (!skb) {
969 pr_err("%s: Rx init fails; skb is NULL\n", __func__); 974 pr_err("%s: Rx init fails; skb is NULL\n", __func__);
970 return -ENOMEM; 975 return -ENOMEM;
@@ -1006,7 +1011,7 @@ static void stmmac_free_rx_buffers(struct stmmac_priv *priv, int i)
1006 * and allocates the socket buffers. It suppors the chained and ring 1011 * and allocates the socket buffers. It suppors the chained and ring
1007 * modes. 1012 * modes.
1008 */ 1013 */
1009static int init_dma_desc_rings(struct net_device *dev) 1014static int init_dma_desc_rings(struct net_device *dev, gfp_t flags)
1010{ 1015{
1011 int i; 1016 int i;
1012 struct stmmac_priv *priv = netdev_priv(dev); 1017 struct stmmac_priv *priv = netdev_priv(dev);
@@ -1041,7 +1046,7 @@ static int init_dma_desc_rings(struct net_device *dev)
1041 else 1046 else
1042 p = priv->dma_rx + i; 1047 p = priv->dma_rx + i;
1043 1048
1044 ret = stmmac_init_rx_buffers(priv, p, i); 1049 ret = stmmac_init_rx_buffers(priv, p, i, flags);
1045 if (ret) 1050 if (ret)
1046 goto err_init_rx_buffers; 1051 goto err_init_rx_buffers;
1047 1052
@@ -1647,11 +1652,6 @@ static int stmmac_hw_setup(struct net_device *dev)
1647 struct stmmac_priv *priv = netdev_priv(dev); 1652 struct stmmac_priv *priv = netdev_priv(dev);
1648 int ret; 1653 int ret;
1649 1654
1650 ret = init_dma_desc_rings(dev);
1651 if (ret < 0) {
1652 pr_err("%s: DMA descriptors initialization failed\n", __func__);
1653 return ret;
1654 }
1655 /* DMA initialization and SW reset */ 1655 /* DMA initialization and SW reset */
1656 ret = stmmac_init_dma_engine(priv); 1656 ret = stmmac_init_dma_engine(priv);
1657 if (ret < 0) { 1657 if (ret < 0) {
@@ -1705,10 +1705,6 @@ static int stmmac_hw_setup(struct net_device *dev)
1705 } 1705 }
1706 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS; 1706 priv->tx_lpi_timer = STMMAC_DEFAULT_TWT_LS;
1707 1707
1708 priv->eee_enabled = stmmac_eee_init(priv);
1709
1710 stmmac_init_tx_coalesce(priv);
1711
1712 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) { 1708 if ((priv->use_riwt) && (priv->hw->dma->rx_watchdog)) {
1713 priv->rx_riwt = MAX_DMA_RIWT; 1709 priv->rx_riwt = MAX_DMA_RIWT;
1714 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT); 1710 priv->hw->dma->rx_watchdog(priv->ioaddr, MAX_DMA_RIWT);
@@ -1761,12 +1757,20 @@ static int stmmac_open(struct net_device *dev)
1761 goto dma_desc_error; 1757 goto dma_desc_error;
1762 } 1758 }
1763 1759
1760 ret = init_dma_desc_rings(dev, GFP_KERNEL);
1761 if (ret < 0) {
1762 pr_err("%s: DMA descriptors initialization failed\n", __func__);
1763 goto init_error;
1764 }
1765
1764 ret = stmmac_hw_setup(dev); 1766 ret = stmmac_hw_setup(dev);
1765 if (ret < 0) { 1767 if (ret < 0) {
1766 pr_err("%s: Hw setup failed\n", __func__); 1768 pr_err("%s: Hw setup failed\n", __func__);
1767 goto init_error; 1769 goto init_error;
1768 } 1770 }
1769 1771
1772 stmmac_init_tx_coalesce(priv);
1773
1770 if (priv->phydev) 1774 if (priv->phydev)
1771 phy_start(priv->phydev); 1775 phy_start(priv->phydev);
1772 1776
@@ -1894,7 +1898,10 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1894 unsigned int nopaged_len = skb_headlen(skb); 1898 unsigned int nopaged_len = skb_headlen(skb);
1895 unsigned int enh_desc = priv->plat->enh_desc; 1899 unsigned int enh_desc = priv->plat->enh_desc;
1896 1900
1901 spin_lock(&priv->tx_lock);
1902
1897 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) { 1903 if (unlikely(stmmac_tx_avail(priv) < nfrags + 1)) {
1904 spin_unlock(&priv->tx_lock);
1898 if (!netif_queue_stopped(dev)) { 1905 if (!netif_queue_stopped(dev)) {
1899 netif_stop_queue(dev); 1906 netif_stop_queue(dev);
1900 /* This is a hard error, log it. */ 1907 /* This is a hard error, log it. */
@@ -1903,8 +1910,6 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
1903 return NETDEV_TX_BUSY; 1910 return NETDEV_TX_BUSY;
1904 } 1911 }
1905 1912
1906 spin_lock(&priv->tx_lock);
1907
1908 if (priv->tx_path_in_lpi_mode) 1913 if (priv->tx_path_in_lpi_mode)
1909 stmmac_disable_eee_mode(priv); 1914 stmmac_disable_eee_mode(priv);
1910 1915
@@ -2025,6 +2030,7 @@ static netdev_tx_t stmmac_xmit(struct sk_buff *skb, struct net_device *dev)
2025 return NETDEV_TX_OK; 2030 return NETDEV_TX_OK;
2026 2031
2027dma_map_err: 2032dma_map_err:
2033 spin_unlock(&priv->tx_lock);
2028 dev_err(priv->device, "Tx dma map failed\n"); 2034 dev_err(priv->device, "Tx dma map failed\n");
2029 dev_kfree_skb(skb); 2035 dev_kfree_skb(skb);
2030 priv->dev->stats.tx_dropped++; 2036 priv->dev->stats.tx_dropped++;
@@ -2281,9 +2287,7 @@ static void stmmac_set_rx_mode(struct net_device *dev)
2281{ 2287{
2282 struct stmmac_priv *priv = netdev_priv(dev); 2288 struct stmmac_priv *priv = netdev_priv(dev);
2283 2289
2284 spin_lock(&priv->lock);
2285 priv->hw->mac->set_filter(priv->hw, dev); 2290 priv->hw->mac->set_filter(priv->hw, dev);
2286 spin_unlock(&priv->lock);
2287} 2291}
2288 2292
2289/** 2293/**
@@ -2950,7 +2954,7 @@ int stmmac_suspend(struct net_device *ndev)
2950 stmmac_set_mac(priv->ioaddr, false); 2954 stmmac_set_mac(priv->ioaddr, false);
2951 pinctrl_pm_select_sleep_state(priv->device); 2955 pinctrl_pm_select_sleep_state(priv->device);
2952 /* Disable clock in case of PWM is off */ 2956 /* Disable clock in case of PWM is off */
2953 clk_disable_unprepare(priv->stmmac_clk); 2957 clk_disable(priv->stmmac_clk);
2954 } 2958 }
2955 spin_unlock_irqrestore(&priv->lock, flags); 2959 spin_unlock_irqrestore(&priv->lock, flags);
2956 2960
@@ -2982,7 +2986,7 @@ int stmmac_resume(struct net_device *ndev)
2982 } else { 2986 } else {
2983 pinctrl_pm_select_default_state(priv->device); 2987 pinctrl_pm_select_default_state(priv->device);
2984 /* enable the clk prevously disabled */ 2988 /* enable the clk prevously disabled */
2985 clk_prepare_enable(priv->stmmac_clk); 2989 clk_enable(priv->stmmac_clk);
2986 /* reset the phy so that it's ready */ 2990 /* reset the phy so that it's ready */
2987 if (priv->mii) 2991 if (priv->mii)
2988 stmmac_mdio_reset(priv->mii); 2992 stmmac_mdio_reset(priv->mii);
@@ -2990,7 +2994,9 @@ int stmmac_resume(struct net_device *ndev)
2990 2994
2991 netif_device_attach(ndev); 2995 netif_device_attach(ndev);
2992 2996
2997 init_dma_desc_rings(ndev, GFP_ATOMIC);
2993 stmmac_hw_setup(ndev); 2998 stmmac_hw_setup(ndev);
2999 stmmac_init_tx_coalesce(priv);
2994 3000
2995 napi_enable(&priv->napi); 3001 napi_enable(&priv->napi);
2996 3002
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 655a23bbc451..e17a970eaf2b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -33,6 +33,7 @@ static struct stmmac_dma_cfg dma_cfg;
33static void stmmac_default_data(void) 33static void stmmac_default_data(void)
34{ 34{
35 memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data)); 35 memset(&plat_dat, 0, sizeof(struct plat_stmmacenet_data));
36
36 plat_dat.bus_id = 1; 37 plat_dat.bus_id = 1;
37 plat_dat.phy_addr = 0; 38 plat_dat.phy_addr = 0;
38 plat_dat.interface = PHY_INTERFACE_MODE_GMII; 39 plat_dat.interface = PHY_INTERFACE_MODE_GMII;
@@ -47,6 +48,12 @@ static void stmmac_default_data(void)
47 dma_cfg.pbl = 32; 48 dma_cfg.pbl = 32;
48 dma_cfg.burst_len = DMA_AXI_BLEN_256; 49 dma_cfg.burst_len = DMA_AXI_BLEN_256;
49 plat_dat.dma_cfg = &dma_cfg; 50 plat_dat.dma_cfg = &dma_cfg;
51
52 /* Set default value for multicast hash bins */
53 plat_dat.multicast_filter_bins = HASH_TABLE_SIZE;
54
55 /* Set default value for unicast filter entries */
56 plat_dat.unicast_filter_entries = 1;
50} 57}
51 58
52/** 59/**
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index db56fa7ce8f9..58a1a0a423d4 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -177,12 +177,6 @@ static int stmmac_probe_config_dt(struct platform_device *pdev,
177 */ 177 */
178 plat->maxmtu = JUMBO_LEN; 178 plat->maxmtu = JUMBO_LEN;
179 179
180 /* Set default value for multicast hash bins */
181 plat->multicast_filter_bins = HASH_TABLE_SIZE;
182
183 /* Set default value for unicast filter entries */
184 plat->unicast_filter_entries = 1;
185
186 /* 180 /*
187 * Currently only the properties needed on SPEAr600 181 * Currently only the properties needed on SPEAr600
188 * are provided. All other properties should be added 182 * are provided. All other properties should be added
@@ -270,16 +264,23 @@ static int stmmac_pltfr_probe(struct platform_device *pdev)
270 return PTR_ERR(addr); 264 return PTR_ERR(addr);
271 265
272 plat_dat = dev_get_platdata(&pdev->dev); 266 plat_dat = dev_get_platdata(&pdev->dev);
273 if (pdev->dev.of_node) { 267
274 if (!plat_dat) 268 if (!plat_dat)
275 plat_dat = devm_kzalloc(&pdev->dev, 269 plat_dat = devm_kzalloc(&pdev->dev,
276 sizeof(struct plat_stmmacenet_data), 270 sizeof(struct plat_stmmacenet_data),
277 GFP_KERNEL); 271 GFP_KERNEL);
278 if (!plat_dat) { 272 if (!plat_dat) {
279 pr_err("%s: ERROR: no memory", __func__); 273 pr_err("%s: ERROR: no memory", __func__);
280 return -ENOMEM; 274 return -ENOMEM;
281 } 275 }
276
277 /* Set default value for multicast hash bins */
278 plat_dat->multicast_filter_bins = HASH_TABLE_SIZE;
282 279
280 /* Set default value for unicast filter entries */
281 plat_dat->unicast_filter_entries = 1;
282
283 if (pdev->dev.of_node) {
283 ret = stmmac_probe_config_dt(pdev, plat_dat, &mac); 284 ret = stmmac_probe_config_dt(pdev, plat_dat, &mac);
284 if (ret) { 285 if (ret) {
285 pr_err("%s: main dt probe failed", __func__); 286 pr_err("%s: main dt probe failed", __func__);
diff --git a/drivers/net/ethernet/sun/sunhme.c b/drivers/net/ethernet/sun/sunhme.c
index 72c8525d5457..9c014803b03b 100644
--- a/drivers/net/ethernet/sun/sunhme.c
+++ b/drivers/net/ethernet/sun/sunhme.c
@@ -1262,6 +1262,7 @@ static void happy_meal_init_rings(struct happy_meal *hp)
1262 HMD(("init rxring, ")); 1262 HMD(("init rxring, "));
1263 for (i = 0; i < RX_RING_SIZE; i++) { 1263 for (i = 0; i < RX_RING_SIZE; i++) {
1264 struct sk_buff *skb; 1264 struct sk_buff *skb;
1265 u32 mapping;
1265 1266
1266 skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); 1267 skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
1267 if (!skb) { 1268 if (!skb) {
@@ -1272,10 +1273,16 @@ static void happy_meal_init_rings(struct happy_meal *hp)
1272 1273
1273 /* Because we reserve afterwards. */ 1274 /* Because we reserve afterwards. */
1274 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4)); 1275 skb_put(skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
1276 mapping = dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE,
1277 DMA_FROM_DEVICE);
1278 if (dma_mapping_error(hp->dma_dev, mapping)) {
1279 dev_kfree_skb_any(skb);
1280 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 0, 0);
1281 continue;
1282 }
1275 hme_write_rxd(hp, &hb->happy_meal_rxd[i], 1283 hme_write_rxd(hp, &hb->happy_meal_rxd[i],
1276 (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)), 1284 (RXFLAG_OWN | ((RX_BUF_ALLOC_SIZE - RX_OFFSET) << 16)),
1277 dma_map_single(hp->dma_dev, skb->data, RX_BUF_ALLOC_SIZE, 1285 mapping);
1278 DMA_FROM_DEVICE));
1279 skb_reserve(skb, RX_OFFSET); 1286 skb_reserve(skb, RX_OFFSET);
1280 } 1287 }
1281 1288
@@ -2020,6 +2027,7 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
2020 skb = hp->rx_skbs[elem]; 2027 skb = hp->rx_skbs[elem];
2021 if (len > RX_COPY_THRESHOLD) { 2028 if (len > RX_COPY_THRESHOLD) {
2022 struct sk_buff *new_skb; 2029 struct sk_buff *new_skb;
2030 u32 mapping;
2023 2031
2024 /* Now refill the entry, if we can. */ 2032 /* Now refill the entry, if we can. */
2025 new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC); 2033 new_skb = happy_meal_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
@@ -2027,13 +2035,21 @@ static void happy_meal_rx(struct happy_meal *hp, struct net_device *dev)
2027 drops++; 2035 drops++;
2028 goto drop_it; 2036 goto drop_it;
2029 } 2037 }
2038 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
2039 mapping = dma_map_single(hp->dma_dev, new_skb->data,
2040 RX_BUF_ALLOC_SIZE,
2041 DMA_FROM_DEVICE);
2042 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2043 dev_kfree_skb_any(new_skb);
2044 drops++;
2045 goto drop_it;
2046 }
2047
2030 dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE); 2048 dma_unmap_single(hp->dma_dev, dma_addr, RX_BUF_ALLOC_SIZE, DMA_FROM_DEVICE);
2031 hp->rx_skbs[elem] = new_skb; 2049 hp->rx_skbs[elem] = new_skb;
2032 skb_put(new_skb, (ETH_FRAME_LEN + RX_OFFSET + 4));
2033 hme_write_rxd(hp, this, 2050 hme_write_rxd(hp, this,
2034 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)), 2051 (RXFLAG_OWN|((RX_BUF_ALLOC_SIZE-RX_OFFSET)<<16)),
2035 dma_map_single(hp->dma_dev, new_skb->data, RX_BUF_ALLOC_SIZE, 2052 mapping);
2036 DMA_FROM_DEVICE));
2037 skb_reserve(new_skb, RX_OFFSET); 2053 skb_reserve(new_skb, RX_OFFSET);
2038 2054
2039 /* Trim the original skb for the netif. */ 2055 /* Trim the original skb for the netif. */
@@ -2248,6 +2264,25 @@ static void happy_meal_tx_timeout(struct net_device *dev)
2248 netif_wake_queue(dev); 2264 netif_wake_queue(dev);
2249} 2265}
2250 2266
2267static void unmap_partial_tx_skb(struct happy_meal *hp, u32 first_mapping,
2268 u32 first_len, u32 first_entry, u32 entry)
2269{
2270 struct happy_meal_txd *txbase = &hp->happy_block->happy_meal_txd[0];
2271
2272 dma_unmap_single(hp->dma_dev, first_mapping, first_len, DMA_TO_DEVICE);
2273
2274 first_entry = NEXT_TX(first_entry);
2275 while (first_entry != entry) {
2276 struct happy_meal_txd *this = &txbase[first_entry];
2277 u32 addr, len;
2278
2279 addr = hme_read_desc32(hp, &this->tx_addr);
2280 len = hme_read_desc32(hp, &this->tx_flags);
2281 len &= TXFLAG_SIZE;
2282 dma_unmap_page(hp->dma_dev, addr, len, DMA_TO_DEVICE);
2283 }
2284}
2285
2251static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb, 2286static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2252 struct net_device *dev) 2287 struct net_device *dev)
2253{ 2288{
@@ -2284,6 +2319,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2284 2319
2285 len = skb->len; 2320 len = skb->len;
2286 mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE); 2321 mapping = dma_map_single(hp->dma_dev, skb->data, len, DMA_TO_DEVICE);
2322 if (unlikely(dma_mapping_error(hp->dma_dev, mapping)))
2323 goto out_dma_error;
2287 tx_flags |= (TXFLAG_SOP | TXFLAG_EOP); 2324 tx_flags |= (TXFLAG_SOP | TXFLAG_EOP);
2288 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry], 2325 hme_write_txd(hp, &hp->happy_block->happy_meal_txd[entry],
2289 (tx_flags | (len & TXFLAG_SIZE)), 2326 (tx_flags | (len & TXFLAG_SIZE)),
@@ -2299,6 +2336,8 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2299 first_len = skb_headlen(skb); 2336 first_len = skb_headlen(skb);
2300 first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len, 2337 first_mapping = dma_map_single(hp->dma_dev, skb->data, first_len,
2301 DMA_TO_DEVICE); 2338 DMA_TO_DEVICE);
2339 if (unlikely(dma_mapping_error(hp->dma_dev, first_mapping)))
2340 goto out_dma_error;
2302 entry = NEXT_TX(entry); 2341 entry = NEXT_TX(entry);
2303 2342
2304 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) { 2343 for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
@@ -2308,6 +2347,11 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2308 len = skb_frag_size(this_frag); 2347 len = skb_frag_size(this_frag);
2309 mapping = skb_frag_dma_map(hp->dma_dev, this_frag, 2348 mapping = skb_frag_dma_map(hp->dma_dev, this_frag,
2310 0, len, DMA_TO_DEVICE); 2349 0, len, DMA_TO_DEVICE);
2350 if (unlikely(dma_mapping_error(hp->dma_dev, mapping))) {
2351 unmap_partial_tx_skb(hp, first_mapping, first_len,
2352 first_entry, entry);
2353 goto out_dma_error;
2354 }
2311 this_txflags = tx_flags; 2355 this_txflags = tx_flags;
2312 if (frag == skb_shinfo(skb)->nr_frags - 1) 2356 if (frag == skb_shinfo(skb)->nr_frags - 1)
2313 this_txflags |= TXFLAG_EOP; 2357 this_txflags |= TXFLAG_EOP;
@@ -2333,6 +2377,14 @@ static netdev_tx_t happy_meal_start_xmit(struct sk_buff *skb,
2333 2377
2334 tx_add_log(hp, TXLOG_ACTION_TXMIT, 0); 2378 tx_add_log(hp, TXLOG_ACTION_TXMIT, 0);
2335 return NETDEV_TX_OK; 2379 return NETDEV_TX_OK;
2380
2381out_dma_error:
2382 hp->tx_skbs[hp->tx_new] = NULL;
2383 spin_unlock_irq(&hp->happy_lock);
2384
2385 dev_kfree_skb_any(skb);
2386 dev->stats.tx_dropped++;
2387 return NETDEV_TX_OK;
2336} 2388}
2337 2389
2338static struct net_device_stats *happy_meal_get_stats(struct net_device *dev) 2390static struct net_device_stats *happy_meal_get_stats(struct net_device *dev)
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index 952e1e4764b7..c560f9aeb55d 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -129,9 +129,9 @@ do { \
129#define CPSW_VLAN_AWARE BIT(1) 129#define CPSW_VLAN_AWARE BIT(1)
130#define CPSW_ALE_VLAN_AWARE 1 130#define CPSW_ALE_VLAN_AWARE 1
131 131
132#define CPSW_FIFO_NORMAL_MODE (0 << 15) 132#define CPSW_FIFO_NORMAL_MODE (0 << 16)
133#define CPSW_FIFO_DUAL_MAC_MODE (1 << 15) 133#define CPSW_FIFO_DUAL_MAC_MODE (1 << 16)
134#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 15) 134#define CPSW_FIFO_RATE_LIMIT_MODE (2 << 16)
135 135
136#define CPSW_INTPACEEN (0x3f << 16) 136#define CPSW_INTPACEEN (0x3f << 16)
137#define CPSW_INTPRESCALE_MASK (0x7FF << 0) 137#define CPSW_INTPRESCALE_MASK (0x7FF << 0)
@@ -591,8 +591,8 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
591 if (enable) { 591 if (enable) {
592 unsigned long timeout = jiffies + HZ; 592 unsigned long timeout = jiffies + HZ;
593 593
594 /* Disable Learn for all ports */ 594 /* Disable Learn for all ports (host is port 0 and slaves are port 1 and up */
595 for (i = 0; i < priv->data.slaves; i++) { 595 for (i = 0; i <= priv->data.slaves; i++) {
596 cpsw_ale_control_set(ale, i, 596 cpsw_ale_control_set(ale, i,
597 ALE_PORT_NOLEARN, 1); 597 ALE_PORT_NOLEARN, 1);
598 cpsw_ale_control_set(ale, i, 598 cpsw_ale_control_set(ale, i,
@@ -616,11 +616,11 @@ static void cpsw_set_promiscious(struct net_device *ndev, bool enable)
616 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1); 616 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 1);
617 dev_dbg(&ndev->dev, "promiscuity enabled\n"); 617 dev_dbg(&ndev->dev, "promiscuity enabled\n");
618 } else { 618 } else {
619 /* Flood All Unicast Packets to Host port */ 619 /* Don't Flood All Unicast Packets to Host port */
620 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0); 620 cpsw_ale_control_set(ale, 0, ALE_P0_UNI_FLOOD, 0);
621 621
622 /* Enable Learn for all ports */ 622 /* Enable Learn for all ports (host is port 0 and slaves are port 1 and up */
623 for (i = 0; i < priv->data.slaves; i++) { 623 for (i = 0; i <= priv->data.slaves; i++) {
624 cpsw_ale_control_set(ale, i, 624 cpsw_ale_control_set(ale, i,
625 ALE_PORT_NOLEARN, 0); 625 ALE_PORT_NOLEARN, 0);
626 cpsw_ale_control_set(ale, i, 626 cpsw_ale_control_set(ale, i,
@@ -638,12 +638,16 @@ static void cpsw_ndo_set_rx_mode(struct net_device *ndev)
638 if (ndev->flags & IFF_PROMISC) { 638 if (ndev->flags & IFF_PROMISC) {
639 /* Enable promiscuous mode */ 639 /* Enable promiscuous mode */
640 cpsw_set_promiscious(ndev, true); 640 cpsw_set_promiscious(ndev, true);
641 cpsw_ale_set_allmulti(priv->ale, IFF_ALLMULTI);
641 return; 642 return;
642 } else { 643 } else {
643 /* Disable promiscuous mode */ 644 /* Disable promiscuous mode */
644 cpsw_set_promiscious(ndev, false); 645 cpsw_set_promiscious(ndev, false);
645 } 646 }
646 647
648 /* Restore allmulti on vlans if necessary */
649 cpsw_ale_set_allmulti(priv->ale, priv->ndev->flags & IFF_ALLMULTI);
650
647 /* Clear all mcast from ALE */ 651 /* Clear all mcast from ALE */
648 cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port); 652 cpsw_ale_flush_multicast(priv->ale, ALE_ALL_PORTS << priv->host_port);
649 653
@@ -1149,6 +1153,7 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
1149 const int port = priv->host_port; 1153 const int port = priv->host_port;
1150 u32 reg; 1154 u32 reg;
1151 int i; 1155 int i;
1156 int unreg_mcast_mask;
1152 1157
1153 reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN : 1158 reg = (priv->version == CPSW_VERSION_1) ? CPSW1_PORT_VLAN :
1154 CPSW2_PORT_VLAN; 1159 CPSW2_PORT_VLAN;
@@ -1158,9 +1163,14 @@ static inline void cpsw_add_default_vlan(struct cpsw_priv *priv)
1158 for (i = 0; i < priv->data.slaves; i++) 1163 for (i = 0; i < priv->data.slaves; i++)
1159 slave_write(priv->slaves + i, vlan, reg); 1164 slave_write(priv->slaves + i, vlan, reg);
1160 1165
1166 if (priv->ndev->flags & IFF_ALLMULTI)
1167 unreg_mcast_mask = ALE_ALL_PORTS;
1168 else
1169 unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
1170
1161 cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port, 1171 cpsw_ale_add_vlan(priv->ale, vlan, ALE_ALL_PORTS << port,
1162 ALE_ALL_PORTS << port, ALE_ALL_PORTS << port, 1172 ALE_ALL_PORTS << port, ALE_ALL_PORTS << port,
1163 (ALE_PORT_1 | ALE_PORT_2) << port); 1173 unreg_mcast_mask << port);
1164} 1174}
1165 1175
1166static void cpsw_init_host_port(struct cpsw_priv *priv) 1176static void cpsw_init_host_port(struct cpsw_priv *priv)
@@ -1620,11 +1630,17 @@ static inline int cpsw_add_vlan_ale_entry(struct cpsw_priv *priv,
1620 unsigned short vid) 1630 unsigned short vid)
1621{ 1631{
1622 int ret; 1632 int ret;
1633 int unreg_mcast_mask;
1634
1635 if (priv->ndev->flags & IFF_ALLMULTI)
1636 unreg_mcast_mask = ALE_ALL_PORTS;
1637 else
1638 unreg_mcast_mask = ALE_PORT_1 | ALE_PORT_2;
1623 1639
1624 ret = cpsw_ale_add_vlan(priv->ale, vid, 1640 ret = cpsw_ale_add_vlan(priv->ale, vid,
1625 ALE_ALL_PORTS << priv->host_port, 1641 ALE_ALL_PORTS << priv->host_port,
1626 0, ALE_ALL_PORTS << priv->host_port, 1642 0, ALE_ALL_PORTS << priv->host_port,
1627 (ALE_PORT_1 | ALE_PORT_2) << priv->host_port); 1643 unreg_mcast_mask << priv->host_port);
1628 if (ret != 0) 1644 if (ret != 0)
1629 return ret; 1645 return ret;
1630 1646
@@ -2006,7 +2022,7 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
2006 parp = of_get_property(slave_node, "phy_id", &lenp); 2022 parp = of_get_property(slave_node, "phy_id", &lenp);
2007 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) { 2023 if ((parp == NULL) || (lenp != (sizeof(void *) * 2))) {
2008 dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i); 2024 dev_err(&pdev->dev, "Missing slave[%d] phy_id property\n", i);
2009 return -EINVAL; 2025 goto no_phy_slave;
2010 } 2026 }
2011 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp)); 2027 mdio_node = of_find_node_by_phandle(be32_to_cpup(parp));
2012 phyid = be32_to_cpup(parp+1); 2028 phyid = be32_to_cpup(parp+1);
@@ -2019,6 +2035,14 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
2019 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id), 2035 snprintf(slave_data->phy_id, sizeof(slave_data->phy_id),
2020 PHY_ID_FMT, mdio->name, phyid); 2036 PHY_ID_FMT, mdio->name, phyid);
2021 2037
2038 slave_data->phy_if = of_get_phy_mode(slave_node);
2039 if (slave_data->phy_if < 0) {
2040 dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
2041 i);
2042 return slave_data->phy_if;
2043 }
2044
2045no_phy_slave:
2022 mac_addr = of_get_mac_address(slave_node); 2046 mac_addr = of_get_mac_address(slave_node);
2023 if (mac_addr) { 2047 if (mac_addr) {
2024 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN); 2048 memcpy(slave_data->mac_addr, mac_addr, ETH_ALEN);
@@ -2030,14 +2054,6 @@ static int cpsw_probe_dt(struct cpsw_platform_data *data,
2030 return ret; 2054 return ret;
2031 } 2055 }
2032 } 2056 }
2033
2034 slave_data->phy_if = of_get_phy_mode(slave_node);
2035 if (slave_data->phy_if < 0) {
2036 dev_err(&pdev->dev, "Missing or malformed slave[%d] phy-mode property\n",
2037 i);
2038 return slave_data->phy_if;
2039 }
2040
2041 if (data->dual_emac) { 2057 if (data->dual_emac) {
2042 if (of_property_read_u32(slave_node, "dual_emac_res_vlan", 2058 if (of_property_read_u32(slave_node, "dual_emac_res_vlan",
2043 &prop)) { 2059 &prop)) {
diff --git a/drivers/net/ethernet/ti/cpsw_ale.c b/drivers/net/ethernet/ti/cpsw_ale.c
index 0579b2243bb6..097ebe7077ac 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.c
+++ b/drivers/net/ethernet/ti/cpsw_ale.c
@@ -443,6 +443,35 @@ int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port_mask)
443 return 0; 443 return 0;
444} 444}
445 445
446void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti)
447{
448 u32 ale_entry[ALE_ENTRY_WORDS];
449 int type, idx;
450 int unreg_mcast = 0;
451
452 /* Only bother doing the work if the setting is actually changing */
453 if (ale->allmulti == allmulti)
454 return;
455
456 /* Remember the new setting to check against next time */
457 ale->allmulti = allmulti;
458
459 for (idx = 0; idx < ale->params.ale_entries; idx++) {
460 cpsw_ale_read(ale, idx, ale_entry);
461 type = cpsw_ale_get_entry_type(ale_entry);
462 if (type != ALE_TYPE_VLAN)
463 continue;
464
465 unreg_mcast = cpsw_ale_get_vlan_unreg_mcast(ale_entry);
466 if (allmulti)
467 unreg_mcast |= 1;
468 else
469 unreg_mcast &= ~1;
470 cpsw_ale_set_vlan_unreg_mcast(ale_entry, unreg_mcast);
471 cpsw_ale_write(ale, idx, ale_entry);
472 }
473}
474
446struct ale_control_info { 475struct ale_control_info {
447 const char *name; 476 const char *name;
448 int offset, port_offset; 477 int offset, port_offset;
@@ -756,7 +785,6 @@ int cpsw_ale_destroy(struct cpsw_ale *ale)
756{ 785{
757 if (!ale) 786 if (!ale)
758 return -EINVAL; 787 return -EINVAL;
759 cpsw_ale_stop(ale);
760 cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0); 788 cpsw_ale_control_set(ale, 0, ALE_ENABLE, 0);
761 kfree(ale); 789 kfree(ale);
762 return 0; 790 return 0;
diff --git a/drivers/net/ethernet/ti/cpsw_ale.h b/drivers/net/ethernet/ti/cpsw_ale.h
index 31cf43cab42e..c0d4127aa549 100644
--- a/drivers/net/ethernet/ti/cpsw_ale.h
+++ b/drivers/net/ethernet/ti/cpsw_ale.h
@@ -27,6 +27,7 @@ struct cpsw_ale {
27 struct cpsw_ale_params params; 27 struct cpsw_ale_params params;
28 struct timer_list timer; 28 struct timer_list timer;
29 unsigned long ageout; 29 unsigned long ageout;
30 int allmulti;
30}; 31};
31 32
32enum cpsw_ale_control { 33enum cpsw_ale_control {
@@ -103,6 +104,7 @@ int cpsw_ale_del_mcast(struct cpsw_ale *ale, u8 *addr, int port_mask,
103int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag, 104int cpsw_ale_add_vlan(struct cpsw_ale *ale, u16 vid, int port, int untag,
104 int reg_mcast, int unreg_mcast); 105 int reg_mcast, int unreg_mcast);
105int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port); 106int cpsw_ale_del_vlan(struct cpsw_ale *ale, u16 vid, int port);
107void cpsw_ale_set_allmulti(struct cpsw_ale *ale, int allmulti);
106 108
107int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control); 109int cpsw_ale_control_get(struct cpsw_ale *ale, int port, int control);
108int cpsw_ale_control_set(struct cpsw_ale *ale, int port, 110int cpsw_ale_control_set(struct cpsw_ale *ale, int port,
diff --git a/drivers/net/ethernet/ti/cpts.c b/drivers/net/ethernet/ti/cpts.c
index ab92f67da035..4a4388b813ac 100644
--- a/drivers/net/ethernet/ti/cpts.c
+++ b/drivers/net/ethernet/ti/cpts.c
@@ -264,7 +264,7 @@ static int cpts_match(struct sk_buff *skb, unsigned int ptp_class,
264 264
265 switch (ptp_class & PTP_CLASS_PMASK) { 265 switch (ptp_class & PTP_CLASS_PMASK) {
266 case PTP_CLASS_IPV4: 266 case PTP_CLASS_IPV4:
267 offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; 267 offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
268 break; 268 break;
269 case PTP_CLASS_IPV6: 269 case PTP_CLASS_IPV6:
270 offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; 270 offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 9e17d1a91e71..78ec33f5100b 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -550,6 +550,7 @@ do_lso:
550do_send: 550do_send:
551 /* Start filling in the page buffers with the rndis hdr */ 551 /* Start filling in the page buffers with the rndis hdr */
552 rndis_msg->msg_len += rndis_msg_size; 552 rndis_msg->msg_len += rndis_msg_size;
553 packet->total_data_buflen = rndis_msg->msg_len;
553 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size, 554 packet->page_buf_cnt = init_page_array(rndis_msg, rndis_msg_size,
554 skb, &packet->page_buf[0]); 555 skb, &packet->page_buf[0]);
555 556
diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
index 9ce854f43917..6cbc56ad9ff4 100644
--- a/drivers/net/ieee802154/fakehard.c
+++ b/drivers/net/ieee802154/fakehard.c
@@ -377,17 +377,20 @@ static int ieee802154fake_probe(struct platform_device *pdev)
377 377
378 err = wpan_phy_register(phy); 378 err = wpan_phy_register(phy);
379 if (err) 379 if (err)
380 goto out; 380 goto err_phy_reg;
381 381
382 err = register_netdev(dev); 382 err = register_netdev(dev);
383 if (err < 0) 383 if (err)
384 goto out; 384 goto err_netdev_reg;
385 385
386 dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n"); 386 dev_info(&pdev->dev, "Added ieee802154 HardMAC hardware\n");
387 return 0; 387 return 0;
388 388
389out: 389err_netdev_reg:
390 unregister_netdev(dev); 390 wpan_phy_unregister(phy);
391err_phy_reg:
392 free_netdev(dev);
393 wpan_phy_free(phy);
391 return err; 394 return err;
392} 395}
393 396
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 29b3bb410781..bfb0b6ec8c56 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -272,7 +272,7 @@ static void macvlan_process_broadcast(struct work_struct *w)
272 struct sk_buff *skb; 272 struct sk_buff *skb;
273 struct sk_buff_head list; 273 struct sk_buff_head list;
274 274
275 skb_queue_head_init(&list); 275 __skb_queue_head_init(&list);
276 276
277 spin_lock_bh(&port->bc_queue.lock); 277 spin_lock_bh(&port->bc_queue.lock);
278 skb_queue_splice_tail_init(&port->bc_queue, &list); 278 skb_queue_splice_tail_init(&port->bc_queue, &list);
@@ -1082,9 +1082,15 @@ static void macvlan_port_destroy(struct net_device *dev)
1082{ 1082{
1083 struct macvlan_port *port = macvlan_port_get_rtnl(dev); 1083 struct macvlan_port *port = macvlan_port_get_rtnl(dev);
1084 1084
1085 cancel_work_sync(&port->bc_work);
1086 dev->priv_flags &= ~IFF_MACVLAN_PORT; 1085 dev->priv_flags &= ~IFF_MACVLAN_PORT;
1087 netdev_rx_handler_unregister(dev); 1086 netdev_rx_handler_unregister(dev);
1087
1088 /* After this point, no packet can schedule bc_work anymore,
1089 * but we need to cancel it and purge left skbs if any.
1090 */
1091 cancel_work_sync(&port->bc_work);
1092 __skb_queue_purge(&port->bc_queue);
1093
1088 kfree_rcu(port, rcu); 1094 kfree_rcu(port, rcu);
1089} 1095}
1090 1096
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 65e2892342bd..880cc090dc44 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -16,6 +16,7 @@
16#include <linux/idr.h> 16#include <linux/idr.h>
17#include <linux/fs.h> 17#include <linux/fs.h>
18 18
19#include <net/ipv6.h>
19#include <net/net_namespace.h> 20#include <net/net_namespace.h>
20#include <net/rtnetlink.h> 21#include <net/rtnetlink.h>
21#include <net/sock.h> 22#include <net/sock.h>
@@ -65,7 +66,7 @@ static struct cdev macvtap_cdev;
65static const struct proto_ops macvtap_socket_ops; 66static const struct proto_ops macvtap_socket_ops;
66 67
67#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \ 68#define TUN_OFFLOADS (NETIF_F_HW_CSUM | NETIF_F_TSO_ECN | NETIF_F_TSO | \
68 NETIF_F_TSO6 | NETIF_F_UFO) 69 NETIF_F_TSO6)
69#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO) 70#define RX_OFFLOADS (NETIF_F_GRO | NETIF_F_LRO)
70#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG) 71#define TAP_FEATURES (NETIF_F_GSO | NETIF_F_SG)
71 72
@@ -569,7 +570,11 @@ static int macvtap_skb_from_vnet_hdr(struct sk_buff *skb,
569 gso_type = SKB_GSO_TCPV6; 570 gso_type = SKB_GSO_TCPV6;
570 break; 571 break;
571 case VIRTIO_NET_HDR_GSO_UDP: 572 case VIRTIO_NET_HDR_GSO_UDP:
573 pr_warn_once("macvtap: %s: using disabled UFO feature; please fix this program\n",
574 current->comm);
572 gso_type = SKB_GSO_UDP; 575 gso_type = SKB_GSO_UDP;
576 if (skb->protocol == htons(ETH_P_IPV6))
577 ipv6_proxy_select_ident(skb);
573 break; 578 break;
574 default: 579 default:
575 return -EINVAL; 580 return -EINVAL;
@@ -614,8 +619,6 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
614 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 619 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
615 else if (sinfo->gso_type & SKB_GSO_TCPV6) 620 else if (sinfo->gso_type & SKB_GSO_TCPV6)
616 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 621 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
617 else if (sinfo->gso_type & SKB_GSO_UDP)
618 vnet_hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
619 else 622 else
620 BUG(); 623 BUG();
621 if (sinfo->gso_type & SKB_GSO_TCP_ECN) 624 if (sinfo->gso_type & SKB_GSO_TCP_ECN)
@@ -626,6 +629,8 @@ static void macvtap_skb_to_vnet_hdr(const struct sk_buff *skb,
626 if (skb->ip_summed == CHECKSUM_PARTIAL) { 629 if (skb->ip_summed == CHECKSUM_PARTIAL) {
627 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 630 vnet_hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
628 vnet_hdr->csum_start = skb_checksum_start_offset(skb); 631 vnet_hdr->csum_start = skb_checksum_start_offset(skb);
632 if (vlan_tx_tag_present(skb))
633 vnet_hdr->csum_start += VLAN_HLEN;
629 vnet_hdr->csum_offset = skb->csum_offset; 634 vnet_hdr->csum_offset = skb->csum_offset;
630 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 635 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
631 vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID; 636 vnet_hdr->flags = VIRTIO_NET_HDR_F_DATA_VALID;
@@ -950,9 +955,6 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
950 if (arg & TUN_F_TSO6) 955 if (arg & TUN_F_TSO6)
951 feature_mask |= NETIF_F_TSO6; 956 feature_mask |= NETIF_F_TSO6;
952 } 957 }
953
954 if (arg & TUN_F_UFO)
955 feature_mask |= NETIF_F_UFO;
956 } 958 }
957 959
958 /* tun/tap driver inverts the usage for TSO offloads, where 960 /* tun/tap driver inverts the usage for TSO offloads, where
@@ -963,7 +965,7 @@ static int set_offload(struct macvtap_queue *q, unsigned long arg)
963 * When user space turns off TSO, we turn off GSO/LRO so that 965 * When user space turns off TSO, we turn off GSO/LRO so that
964 * user-space will not receive TSO frames. 966 * user-space will not receive TSO frames.
965 */ 967 */
966 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_UFO)) 968 if (feature_mask & (NETIF_F_TSO | NETIF_F_TSO6))
967 features |= RX_OFFLOADS; 969 features |= RX_OFFLOADS;
968 else 970 else
969 features &= ~RX_OFFLOADS; 971 features &= ~RX_OFFLOADS;
@@ -1064,7 +1066,7 @@ static long macvtap_ioctl(struct file *file, unsigned int cmd,
1064 case TUNSETOFFLOAD: 1066 case TUNSETOFFLOAD:
1065 /* let the user check for future flags */ 1067 /* let the user check for future flags */
1066 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 | 1068 if (arg & ~(TUN_F_CSUM | TUN_F_TSO4 | TUN_F_TSO6 |
1067 TUN_F_TSO_ECN | TUN_F_UFO)) 1069 TUN_F_TSO_ECN))
1068 return -EINVAL; 1070 return -EINVAL;
1069 1071
1070 rtnl_lock(); 1072 rtnl_lock();
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 2954052706e8..e22e602beef3 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -791,7 +791,7 @@ static int match(struct sk_buff *skb, unsigned int type, struct rxts *rxts)
791 791
792 switch (type & PTP_CLASS_PMASK) { 792 switch (type & PTP_CLASS_PMASK) {
793 case PTP_CLASS_IPV4: 793 case PTP_CLASS_IPV4:
794 offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; 794 offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
795 break; 795 break;
796 case PTP_CLASS_IPV6: 796 case PTP_CLASS_IPV6:
797 offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; 797 offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
@@ -934,7 +934,7 @@ static int is_sync(struct sk_buff *skb, int type)
934 934
935 switch (type & PTP_CLASS_PMASK) { 935 switch (type & PTP_CLASS_PMASK) {
936 case PTP_CLASS_IPV4: 936 case PTP_CLASS_IPV4:
937 offset += ETH_HLEN + IPV4_HLEN(data) + UDP_HLEN; 937 offset += ETH_HLEN + IPV4_HLEN(data + offset) + UDP_HLEN;
938 break; 938 break;
939 case PTP_CLASS_IPV6: 939 case PTP_CLASS_IPV6:
940 offset += ETH_HLEN + IP6_HLEN + UDP_HLEN; 940 offset += ETH_HLEN + IP6_HLEN + UDP_HLEN;
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index bd37e45c89c0..225c033b08f3 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -50,10 +50,15 @@
50#define MII_M1011_PHY_SCR 0x10 50#define MII_M1011_PHY_SCR 0x10
51#define MII_M1011_PHY_SCR_AUTO_CROSS 0x0060 51#define MII_M1011_PHY_SCR_AUTO_CROSS 0x0060
52 52
53#define MII_M1145_PHY_EXT_SR 0x1b
53#define MII_M1145_PHY_EXT_CR 0x14 54#define MII_M1145_PHY_EXT_CR 0x14
54#define MII_M1145_RGMII_RX_DELAY 0x0080 55#define MII_M1145_RGMII_RX_DELAY 0x0080
55#define MII_M1145_RGMII_TX_DELAY 0x0002 56#define MII_M1145_RGMII_TX_DELAY 0x0002
56 57
58#define MII_M1145_HWCFG_MODE_SGMII_NO_CLK 0x4
59#define MII_M1145_HWCFG_MODE_MASK 0xf
60#define MII_M1145_HWCFG_FIBER_COPPER_AUTO 0x8000
61
57#define MII_M1111_PHY_LED_CONTROL 0x18 62#define MII_M1111_PHY_LED_CONTROL 0x18
58#define MII_M1111_PHY_LED_DIRECT 0x4100 63#define MII_M1111_PHY_LED_DIRECT 0x4100
59#define MII_M1111_PHY_LED_COMBINE 0x411c 64#define MII_M1111_PHY_LED_COMBINE 0x411c
@@ -676,6 +681,20 @@ static int m88e1145_config_init(struct phy_device *phydev)
676 } 681 }
677 } 682 }
678 683
684 if (phydev->interface == PHY_INTERFACE_MODE_SGMII) {
685 int temp = phy_read(phydev, MII_M1145_PHY_EXT_SR);
686 if (temp < 0)
687 return temp;
688
689 temp &= ~MII_M1145_HWCFG_MODE_MASK;
690 temp |= MII_M1145_HWCFG_MODE_SGMII_NO_CLK;
691 temp |= MII_M1145_HWCFG_FIBER_COPPER_AUTO;
692
693 err = phy_write(phydev, MII_M1145_PHY_EXT_SR, temp);
694 if (err < 0)
695 return err;
696 }
697
679 err = marvell_of_reg_init(phydev); 698 err = marvell_of_reg_init(phydev);
680 if (err < 0) 699 if (err < 0)
681 return err; 700 return err;
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 1dfffdc9dfc3..767cd110f496 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -352,6 +352,7 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
352{ 352{
353 struct mii_ioctl_data *mii_data = if_mii(ifr); 353 struct mii_ioctl_data *mii_data = if_mii(ifr);
354 u16 val = mii_data->val_in; 354 u16 val = mii_data->val_in;
355 bool change_autoneg = false;
355 356
356 switch (cmd) { 357 switch (cmd) {
357 case SIOCGMIIPHY: 358 case SIOCGMIIPHY:
@@ -367,22 +368,29 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
367 if (mii_data->phy_id == phydev->addr) { 368 if (mii_data->phy_id == phydev->addr) {
368 switch (mii_data->reg_num) { 369 switch (mii_data->reg_num) {
369 case MII_BMCR: 370 case MII_BMCR:
370 if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) 371 if ((val & (BMCR_RESET | BMCR_ANENABLE)) == 0) {
372 if (phydev->autoneg == AUTONEG_ENABLE)
373 change_autoneg = true;
371 phydev->autoneg = AUTONEG_DISABLE; 374 phydev->autoneg = AUTONEG_DISABLE;
372 else 375 if (val & BMCR_FULLDPLX)
376 phydev->duplex = DUPLEX_FULL;
377 else
378 phydev->duplex = DUPLEX_HALF;
379 if (val & BMCR_SPEED1000)
380 phydev->speed = SPEED_1000;
381 else if (val & BMCR_SPEED100)
382 phydev->speed = SPEED_100;
383 else phydev->speed = SPEED_10;
384 }
385 else {
386 if (phydev->autoneg == AUTONEG_DISABLE)
387 change_autoneg = true;
373 phydev->autoneg = AUTONEG_ENABLE; 388 phydev->autoneg = AUTONEG_ENABLE;
374 if (!phydev->autoneg && (val & BMCR_FULLDPLX)) 389 }
375 phydev->duplex = DUPLEX_FULL;
376 else
377 phydev->duplex = DUPLEX_HALF;
378 if (!phydev->autoneg && (val & BMCR_SPEED1000))
379 phydev->speed = SPEED_1000;
380 else if (!phydev->autoneg &&
381 (val & BMCR_SPEED100))
382 phydev->speed = SPEED_100;
383 break; 390 break;
384 case MII_ADVERTISE: 391 case MII_ADVERTISE:
385 phydev->advertising = val; 392 phydev->advertising = mii_adv_to_ethtool_adv_t(val);
393 change_autoneg = true;
386 break; 394 break;
387 default: 395 default:
388 /* do nothing */ 396 /* do nothing */
@@ -396,6 +404,10 @@ int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd)
396 if (mii_data->reg_num == MII_BMCR && 404 if (mii_data->reg_num == MII_BMCR &&
397 val & BMCR_RESET) 405 val & BMCR_RESET)
398 return phy_init_hw(phydev); 406 return phy_init_hw(phydev);
407
408 if (change_autoneg)
409 return phy_start_aneg(phydev);
410
399 return 0; 411 return 0;
400 412
401 case SIOCSHWTSTAMP: 413 case SIOCSHWTSTAMP:
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 68c3a3f4e0ab..794a47329368 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -755,23 +755,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
755 755
756 err = get_filter(argp, &code); 756 err = get_filter(argp, &code);
757 if (err >= 0) { 757 if (err >= 0) {
758 struct bpf_prog *pass_filter = NULL;
758 struct sock_fprog_kern fprog = { 759 struct sock_fprog_kern fprog = {
759 .len = err, 760 .len = err,
760 .filter = code, 761 .filter = code,
761 }; 762 };
762 763
763 ppp_lock(ppp); 764 err = 0;
764 if (ppp->pass_filter) { 765 if (fprog.filter)
765 bpf_prog_destroy(ppp->pass_filter); 766 err = bpf_prog_create(&pass_filter, &fprog);
766 ppp->pass_filter = NULL; 767 if (!err) {
768 ppp_lock(ppp);
769 if (ppp->pass_filter)
770 bpf_prog_destroy(ppp->pass_filter);
771 ppp->pass_filter = pass_filter;
772 ppp_unlock(ppp);
767 } 773 }
768 if (fprog.filter != NULL)
769 err = bpf_prog_create(&ppp->pass_filter,
770 &fprog);
771 else
772 err = 0;
773 kfree(code); 774 kfree(code);
774 ppp_unlock(ppp);
775 } 775 }
776 break; 776 break;
777 } 777 }
@@ -781,23 +781,23 @@ static long ppp_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
781 781
782 err = get_filter(argp, &code); 782 err = get_filter(argp, &code);
783 if (err >= 0) { 783 if (err >= 0) {
784 struct bpf_prog *active_filter = NULL;
784 struct sock_fprog_kern fprog = { 785 struct sock_fprog_kern fprog = {
785 .len = err, 786 .len = err,
786 .filter = code, 787 .filter = code,
787 }; 788 };
788 789
789 ppp_lock(ppp); 790 err = 0;
790 if (ppp->active_filter) { 791 if (fprog.filter)
791 bpf_prog_destroy(ppp->active_filter); 792 err = bpf_prog_create(&active_filter, &fprog);
792 ppp->active_filter = NULL; 793 if (!err) {
794 ppp_lock(ppp);
795 if (ppp->active_filter)
796 bpf_prog_destroy(ppp->active_filter);
797 ppp->active_filter = active_filter;
798 ppp_unlock(ppp);
793 } 799 }
794 if (fprog.filter != NULL)
795 err = bpf_prog_create(&ppp->active_filter,
796 &fprog);
797 else
798 err = 0;
799 kfree(code); 800 kfree(code);
800 ppp_unlock(ppp);
801 } 801 }
802 break; 802 break;
803 } 803 }
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 1aff970be33e..1dc628ffce2b 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -506,7 +506,9 @@ static int pptp_getname(struct socket *sock, struct sockaddr *uaddr,
506 int len = sizeof(struct sockaddr_pppox); 506 int len = sizeof(struct sockaddr_pppox);
507 struct sockaddr_pppox sp; 507 struct sockaddr_pppox sp;
508 508
509 sp.sa_family = AF_PPPOX; 509 memset(&sp.sa_addr, 0, sizeof(sp.sa_addr));
510
511 sp.sa_family = AF_PPPOX;
510 sp.sa_protocol = PX_PROTO_PPTP; 512 sp.sa_protocol = PX_PROTO_PPTP;
511 sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr; 513 sp.sa_addr.pptp = pppox_sk(sock->sk)->proto.pptp.src_addr;
512 514
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 186ce541c657..9dd3746994a4 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -65,6 +65,7 @@
65#include <linux/nsproxy.h> 65#include <linux/nsproxy.h>
66#include <linux/virtio_net.h> 66#include <linux/virtio_net.h>
67#include <linux/rcupdate.h> 67#include <linux/rcupdate.h>
68#include <net/ipv6.h>
68#include <net/net_namespace.h> 69#include <net/net_namespace.h>
69#include <net/netns/generic.h> 70#include <net/netns/generic.h>
70#include <net/rtnetlink.h> 71#include <net/rtnetlink.h>
@@ -174,7 +175,7 @@ struct tun_struct {
174 struct net_device *dev; 175 struct net_device *dev;
175 netdev_features_t set_features; 176 netdev_features_t set_features;
176#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \ 177#define TUN_USER_FEATURES (NETIF_F_HW_CSUM|NETIF_F_TSO_ECN|NETIF_F_TSO| \
177 NETIF_F_TSO6|NETIF_F_UFO) 178 NETIF_F_TSO6)
178 179
179 int vnet_hdr_sz; 180 int vnet_hdr_sz;
180 int sndbuf; 181 int sndbuf;
@@ -1139,6 +1140,8 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1139 break; 1140 break;
1140 } 1141 }
1141 1142
1143 skb_reset_network_header(skb);
1144
1142 if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) { 1145 if (gso.gso_type != VIRTIO_NET_HDR_GSO_NONE) {
1143 pr_debug("GSO!\n"); 1146 pr_debug("GSO!\n");
1144 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) { 1147 switch (gso.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
@@ -1149,8 +1152,20 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1149 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 1152 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
1150 break; 1153 break;
1151 case VIRTIO_NET_HDR_GSO_UDP: 1154 case VIRTIO_NET_HDR_GSO_UDP:
1155 {
1156 static bool warned;
1157
1158 if (!warned) {
1159 warned = true;
1160 netdev_warn(tun->dev,
1161 "%s: using disabled UFO feature; please fix this program\n",
1162 current->comm);
1163 }
1152 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 1164 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1165 if (skb->protocol == htons(ETH_P_IPV6))
1166 ipv6_proxy_select_ident(skb);
1153 break; 1167 break;
1168 }
1154 default: 1169 default:
1155 tun->dev->stats.rx_frame_errors++; 1170 tun->dev->stats.rx_frame_errors++;
1156 kfree_skb(skb); 1171 kfree_skb(skb);
@@ -1179,7 +1194,6 @@ static ssize_t tun_get_user(struct tun_struct *tun, struct tun_file *tfile,
1179 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG; 1194 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
1180 } 1195 }
1181 1196
1182 skb_reset_network_header(skb);
1183 skb_probe_transport_header(skb, 0); 1197 skb_probe_transport_header(skb, 0);
1184 1198
1185 rxhash = skb_get_hash(skb); 1199 rxhash = skb_get_hash(skb);
@@ -1221,12 +1235,20 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1221 struct tun_pi pi = { 0, skb->protocol }; 1235 struct tun_pi pi = { 0, skb->protocol };
1222 ssize_t total = 0; 1236 ssize_t total = 0;
1223 int vlan_offset = 0, copied; 1237 int vlan_offset = 0, copied;
1238 int vlan_hlen = 0;
1239 int vnet_hdr_sz = 0;
1240
1241 if (vlan_tx_tag_present(skb))
1242 vlan_hlen = VLAN_HLEN;
1243
1244 if (tun->flags & TUN_VNET_HDR)
1245 vnet_hdr_sz = tun->vnet_hdr_sz;
1224 1246
1225 if (!(tun->flags & TUN_NO_PI)) { 1247 if (!(tun->flags & TUN_NO_PI)) {
1226 if ((len -= sizeof(pi)) < 0) 1248 if ((len -= sizeof(pi)) < 0)
1227 return -EINVAL; 1249 return -EINVAL;
1228 1250
1229 if (len < skb->len) { 1251 if (len < skb->len + vlan_hlen + vnet_hdr_sz) {
1230 /* Packet will be striped */ 1252 /* Packet will be striped */
1231 pi.flags |= TUN_PKT_STRIP; 1253 pi.flags |= TUN_PKT_STRIP;
1232 } 1254 }
@@ -1236,9 +1258,9 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1236 total += sizeof(pi); 1258 total += sizeof(pi);
1237 } 1259 }
1238 1260
1239 if (tun->flags & TUN_VNET_HDR) { 1261 if (vnet_hdr_sz) {
1240 struct virtio_net_hdr gso = { 0 }; /* no info leak */ 1262 struct virtio_net_hdr gso = { 0 }; /* no info leak */
1241 if ((len -= tun->vnet_hdr_sz) < 0) 1263 if ((len -= vnet_hdr_sz) < 0)
1242 return -EINVAL; 1264 return -EINVAL;
1243 1265
1244 if (skb_is_gso(skb)) { 1266 if (skb_is_gso(skb)) {
@@ -1251,8 +1273,6 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1251 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 1273 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
1252 else if (sinfo->gso_type & SKB_GSO_TCPV6) 1274 else if (sinfo->gso_type & SKB_GSO_TCPV6)
1253 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 1275 gso.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
1254 else if (sinfo->gso_type & SKB_GSO_UDP)
1255 gso.gso_type = VIRTIO_NET_HDR_GSO_UDP;
1256 else { 1276 else {
1257 pr_err("unexpected GSO type: " 1277 pr_err("unexpected GSO type: "
1258 "0x%x, gso_size %d, hdr_len %d\n", 1278 "0x%x, gso_size %d, hdr_len %d\n",
@@ -1272,7 +1292,8 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1272 1292
1273 if (skb->ip_summed == CHECKSUM_PARTIAL) { 1293 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1274 gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM; 1294 gso.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
1275 gso.csum_start = skb_checksum_start_offset(skb); 1295 gso.csum_start = skb_checksum_start_offset(skb) +
1296 vlan_hlen;
1276 gso.csum_offset = skb->csum_offset; 1297 gso.csum_offset = skb->csum_offset;
1277 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 1298 } else if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1278 gso.flags = VIRTIO_NET_HDR_F_DATA_VALID; 1299 gso.flags = VIRTIO_NET_HDR_F_DATA_VALID;
@@ -1281,14 +1302,13 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1281 if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total, 1302 if (unlikely(memcpy_toiovecend(iv, (void *)&gso, total,
1282 sizeof(gso)))) 1303 sizeof(gso))))
1283 return -EFAULT; 1304 return -EFAULT;
1284 total += tun->vnet_hdr_sz; 1305 total += vnet_hdr_sz;
1285 } 1306 }
1286 1307
1287 copied = total; 1308 copied = total;
1288 total += skb->len; 1309 len = min_t(int, skb->len + vlan_hlen, len);
1289 if (!vlan_tx_tag_present(skb)) { 1310 total += skb->len + vlan_hlen;
1290 len = min_t(int, skb->len, len); 1311 if (vlan_hlen) {
1291 } else {
1292 int copy, ret; 1312 int copy, ret;
1293 struct { 1313 struct {
1294 __be16 h_vlan_proto; 1314 __be16 h_vlan_proto;
@@ -1299,8 +1319,6 @@ static ssize_t tun_put_user(struct tun_struct *tun,
1299 veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb)); 1319 veth.h_vlan_TCI = htons(vlan_tx_tag_get(skb));
1300 1320
1301 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto); 1321 vlan_offset = offsetof(struct vlan_ethhdr, h_vlan_proto);
1302 len = min_t(int, skb->len + VLAN_HLEN, len);
1303 total += VLAN_HLEN;
1304 1322
1305 copy = min_t(int, vlan_offset, len); 1323 copy = min_t(int, vlan_offset, len);
1306 ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy); 1324 ret = skb_copy_datagram_const_iovec(skb, 0, iv, copied, copy);
@@ -1762,11 +1780,6 @@ static int set_offload(struct tun_struct *tun, unsigned long arg)
1762 features |= NETIF_F_TSO6; 1780 features |= NETIF_F_TSO6;
1763 arg &= ~(TUN_F_TSO4|TUN_F_TSO6); 1781 arg &= ~(TUN_F_TSO4|TUN_F_TSO6);
1764 } 1782 }
1765
1766 if (arg & TUN_F_UFO) {
1767 features |= NETIF_F_UFO;
1768 arg &= ~TUN_F_UFO;
1769 }
1770 } 1783 }
1771 1784
1772 /* This gives the user a way to test for new features in future by 1785 /* This gives the user a way to test for new features in future by
diff --git a/drivers/net/usb/asix_devices.c b/drivers/net/usb/asix_devices.c
index 2c05f6cdb12f..816d511e34d3 100644
--- a/drivers/net/usb/asix_devices.c
+++ b/drivers/net/usb/asix_devices.c
@@ -465,19 +465,7 @@ static int ax88772_bind(struct usbnet *dev, struct usb_interface *intf)
465 return ret; 465 return ret;
466 } 466 }
467 467
468 ret = asix_sw_reset(dev, AX_SWRESET_IPPD | AX_SWRESET_PRL); 468 ax88772_reset(dev);
469 if (ret < 0)
470 return ret;
471
472 msleep(150);
473
474 ret = asix_sw_reset(dev, AX_SWRESET_CLEAR);
475 if (ret < 0)
476 return ret;
477
478 msleep(150);
479
480 ret = asix_sw_reset(dev, embd_phy ? AX_SWRESET_IPRL : AX_SWRESET_PRTE);
481 469
482 /* Read PHYID register *AFTER* the PHY was reset properly */ 470 /* Read PHYID register *AFTER* the PHY was reset properly */
483 phyid = asix_get_phyid(dev); 471 phyid = asix_get_phyid(dev);
diff --git a/drivers/net/usb/ax88179_178a.c b/drivers/net/usb/ax88179_178a.c
index be4275721039..e6338c16081a 100644
--- a/drivers/net/usb/ax88179_178a.c
+++ b/drivers/net/usb/ax88179_178a.c
@@ -937,6 +937,7 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p)
937{ 937{
938 struct usbnet *dev = netdev_priv(net); 938 struct usbnet *dev = netdev_priv(net);
939 struct sockaddr *addr = p; 939 struct sockaddr *addr = p;
940 int ret;
940 941
941 if (netif_running(net)) 942 if (netif_running(net))
942 return -EBUSY; 943 return -EBUSY;
@@ -946,8 +947,12 @@ static int ax88179_set_mac_addr(struct net_device *net, void *p)
946 memcpy(net->dev_addr, addr->sa_data, ETH_ALEN); 947 memcpy(net->dev_addr, addr->sa_data, ETH_ALEN);
947 948
948 /* Set the MAC address */ 949 /* Set the MAC address */
949 return ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN, 950 ret = ax88179_write_cmd(dev, AX_ACCESS_MAC, AX_NODE_ID, ETH_ALEN,
950 ETH_ALEN, net->dev_addr); 951 ETH_ALEN, net->dev_addr);
952 if (ret < 0)
953 return ret;
954
955 return 0;
951} 956}
952 957
953static const struct net_device_ops ax88179_netdev_ops = { 958static const struct net_device_ops ax88179_netdev_ops = {
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 2a32d9167d3b..d3920b54a92c 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -67,6 +67,35 @@ static const u8 mbm_guid[16] = {
67 0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a, 67 0xa6, 0x07, 0xc0, 0xff, 0xcb, 0x7e, 0x39, 0x2a,
68}; 68};
69 69
70static void usbnet_cdc_update_filter(struct usbnet *dev)
71{
72 struct cdc_state *info = (void *) &dev->data;
73 struct usb_interface *intf = info->control;
74
75 u16 cdc_filter =
76 USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED |
77 USB_CDC_PACKET_TYPE_BROADCAST;
78
79 if (dev->net->flags & IFF_PROMISC)
80 cdc_filter |= USB_CDC_PACKET_TYPE_PROMISCUOUS;
81
82 /* FIXME cdc-ether has some multicast code too, though it complains
83 * in routine cases. info->ether describes the multicast support.
84 * Implement that here, manipulating the cdc filter as needed.
85 */
86
87 usb_control_msg(dev->udev,
88 usb_sndctrlpipe(dev->udev, 0),
89 USB_CDC_SET_ETHERNET_PACKET_FILTER,
90 USB_TYPE_CLASS | USB_RECIP_INTERFACE,
91 cdc_filter,
92 intf->cur_altsetting->desc.bInterfaceNumber,
93 NULL,
94 0,
95 USB_CTRL_SET_TIMEOUT
96 );
97}
98
70/* probes control interface, claims data interface, collects the bulk 99/* probes control interface, claims data interface, collects the bulk
71 * endpoints, activates data interface (if needed), maybe sets MTU. 100 * endpoints, activates data interface (if needed), maybe sets MTU.
72 * all pure cdc, except for certain firmware workarounds, and knowing 101 * all pure cdc, except for certain firmware workarounds, and knowing
@@ -347,16 +376,8 @@ next_desc:
347 * don't do reset all the way. So the packet filter should 376 * don't do reset all the way. So the packet filter should
348 * be set to a sane initial value. 377 * be set to a sane initial value.
349 */ 378 */
350 usb_control_msg(dev->udev, 379 usbnet_cdc_update_filter(dev);
351 usb_sndctrlpipe(dev->udev, 0), 380
352 USB_CDC_SET_ETHERNET_PACKET_FILTER,
353 USB_TYPE_CLASS | USB_RECIP_INTERFACE,
354 USB_CDC_PACKET_TYPE_ALL_MULTICAST | USB_CDC_PACKET_TYPE_DIRECTED | USB_CDC_PACKET_TYPE_BROADCAST,
355 intf->cur_altsetting->desc.bInterfaceNumber,
356 NULL,
357 0,
358 USB_CTRL_SET_TIMEOUT
359 );
360 return 0; 381 return 0;
361 382
362bad_desc: 383bad_desc:
@@ -468,10 +489,6 @@ int usbnet_cdc_bind(struct usbnet *dev, struct usb_interface *intf)
468 return status; 489 return status;
469 } 490 }
470 491
471 /* FIXME cdc-ether has some multicast code too, though it complains
472 * in routine cases. info->ether describes the multicast support.
473 * Implement that here, manipulating the cdc filter as needed.
474 */
475 return 0; 492 return 0;
476} 493}
477EXPORT_SYMBOL_GPL(usbnet_cdc_bind); 494EXPORT_SYMBOL_GPL(usbnet_cdc_bind);
@@ -482,6 +499,7 @@ static const struct driver_info cdc_info = {
482 .bind = usbnet_cdc_bind, 499 .bind = usbnet_cdc_bind,
483 .unbind = usbnet_cdc_unbind, 500 .unbind = usbnet_cdc_unbind,
484 .status = usbnet_cdc_status, 501 .status = usbnet_cdc_status,
502 .set_rx_mode = usbnet_cdc_update_filter,
485 .manage_power = usbnet_manage_power, 503 .manage_power = usbnet_manage_power,
486}; 504};
487 505
@@ -491,6 +509,7 @@ static const struct driver_info wwan_info = {
491 .bind = usbnet_cdc_bind, 509 .bind = usbnet_cdc_bind,
492 .unbind = usbnet_cdc_unbind, 510 .unbind = usbnet_cdc_unbind,
493 .status = usbnet_cdc_status, 511 .status = usbnet_cdc_status,
512 .set_rx_mode = usbnet_cdc_update_filter,
494 .manage_power = usbnet_manage_power, 513 .manage_power = usbnet_manage_power,
495}; 514};
496 515
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 22756db53dca..b8a82b86f909 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -780,6 +780,7 @@ static const struct usb_device_id products[] = {
780 {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ 780 {QMI_FIXED_INTF(0x413c, 0x81a4, 8)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
781 {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ 781 {QMI_FIXED_INTF(0x413c, 0x81a8, 8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
782 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 782 {QMI_FIXED_INTF(0x413c, 0x81a9, 8)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
783 {QMI_FIXED_INTF(0x03f0, 0x581d, 4)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Module (Huawei me906e) */
783 784
784 /* 4. Gobi 1000 devices */ 785 /* 4. Gobi 1000 devices */
785 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */ 786 {QMI_GOBI1K_DEVICE(0x05c6, 0x9212)}, /* Acer Gobi Modem Device */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index e3d84c322e4e..c6554c7a8147 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1162,6 +1162,9 @@ static void intr_callback(struct urb *urb)
1162 case -ESHUTDOWN: 1162 case -ESHUTDOWN:
1163 netif_device_detach(tp->netdev); 1163 netif_device_detach(tp->netdev);
1164 case -ENOENT: 1164 case -ENOENT:
1165 case -EPROTO:
1166 netif_info(tp, intr, tp->netdev,
1167 "Stop submitting intr, status %d\n", status);
1165 return; 1168 return;
1166 case -EOVERFLOW: 1169 case -EOVERFLOW:
1167 netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n"); 1170 netif_info(tp, intr, tp->netdev, "intr status -EOVERFLOW\n");
@@ -2891,6 +2894,9 @@ static int rtl8152_open(struct net_device *netdev)
2891 if (res) 2894 if (res)
2892 goto out; 2895 goto out;
2893 2896
2897 /* set speed to 0 to avoid autoresume try to submit rx */
2898 tp->speed = 0;
2899
2894 res = usb_autopm_get_interface(tp->intf); 2900 res = usb_autopm_get_interface(tp->intf);
2895 if (res < 0) { 2901 if (res < 0) {
2896 free_all_mem(tp); 2902 free_all_mem(tp);
@@ -2904,6 +2910,8 @@ static int rtl8152_open(struct net_device *netdev)
2904 clear_bit(WORK_ENABLE, &tp->flags); 2910 clear_bit(WORK_ENABLE, &tp->flags);
2905 usb_kill_urb(tp->intr_urb); 2911 usb_kill_urb(tp->intr_urb);
2906 cancel_delayed_work_sync(&tp->schedule); 2912 cancel_delayed_work_sync(&tp->schedule);
2913
2914 /* disable the tx/rx, if the workqueue has enabled them. */
2907 if (tp->speed & LINK_STATUS) 2915 if (tp->speed & LINK_STATUS)
2908 tp->rtl_ops.disable(tp); 2916 tp->rtl_ops.disable(tp);
2909 } 2917 }
@@ -2955,10 +2963,7 @@ static int rtl8152_close(struct net_device *netdev)
2955 * be disable when autoresume occurs, because the 2963 * be disable when autoresume occurs, because the
2956 * netif_running() would be false. 2964 * netif_running() would be false.
2957 */ 2965 */
2958 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 2966 rtl_runtime_suspend_enable(tp, false);
2959 rtl_runtime_suspend_enable(tp, false);
2960 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
2961 }
2962 2967
2963 tasklet_disable(&tp->tl); 2968 tasklet_disable(&tp->tl);
2964 tp->rtl_ops.down(tp); 2969 tp->rtl_ops.down(tp);
@@ -3205,7 +3210,7 @@ static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
3205 netif_device_detach(netdev); 3210 netif_device_detach(netdev);
3206 } 3211 }
3207 3212
3208 if (netif_running(netdev)) { 3213 if (netif_running(netdev) && test_bit(WORK_ENABLE, &tp->flags)) {
3209 clear_bit(WORK_ENABLE, &tp->flags); 3214 clear_bit(WORK_ENABLE, &tp->flags);
3210 usb_kill_urb(tp->intr_urb); 3215 usb_kill_urb(tp->intr_urb);
3211 tasklet_disable(&tp->tl); 3216 tasklet_disable(&tp->tl);
@@ -3253,6 +3258,8 @@ static int rtl8152_resume(struct usb_interface *intf)
3253 set_bit(WORK_ENABLE, &tp->flags); 3258 set_bit(WORK_ENABLE, &tp->flags);
3254 } 3259 }
3255 usb_submit_urb(tp->intr_urb, GFP_KERNEL); 3260 usb_submit_urb(tp->intr_urb, GFP_KERNEL);
3261 } else if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3262 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3256 } 3263 }
3257 3264
3258 mutex_unlock(&tp->control); 3265 mutex_unlock(&tp->control);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index 20615bbd693b..3a6770a65d78 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1052,6 +1052,21 @@ static void __handle_link_change(struct usbnet *dev)
1052 clear_bit(EVENT_LINK_CHANGE, &dev->flags); 1052 clear_bit(EVENT_LINK_CHANGE, &dev->flags);
1053} 1053}
1054 1054
1055static void usbnet_set_rx_mode(struct net_device *net)
1056{
1057 struct usbnet *dev = netdev_priv(net);
1058
1059 usbnet_defer_kevent(dev, EVENT_SET_RX_MODE);
1060}
1061
1062static void __handle_set_rx_mode(struct usbnet *dev)
1063{
1064 if (dev->driver_info->set_rx_mode)
1065 (dev->driver_info->set_rx_mode)(dev);
1066
1067 clear_bit(EVENT_SET_RX_MODE, &dev->flags);
1068}
1069
1055/* work that cannot be done in interrupt context uses keventd. 1070/* work that cannot be done in interrupt context uses keventd.
1056 * 1071 *
1057 * NOTE: with 2.5 we could do more of this using completion callbacks, 1072 * NOTE: with 2.5 we could do more of this using completion callbacks,
@@ -1157,6 +1172,10 @@ skip_reset:
1157 if (test_bit (EVENT_LINK_CHANGE, &dev->flags)) 1172 if (test_bit (EVENT_LINK_CHANGE, &dev->flags))
1158 __handle_link_change(dev); 1173 __handle_link_change(dev);
1159 1174
1175 if (test_bit (EVENT_SET_RX_MODE, &dev->flags))
1176 __handle_set_rx_mode(dev);
1177
1178
1160 if (dev->flags) 1179 if (dev->flags)
1161 netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags); 1180 netdev_dbg(dev->net, "kevent done, flags = 0x%lx\n", dev->flags);
1162} 1181}
@@ -1525,6 +1544,7 @@ static const struct net_device_ops usbnet_netdev_ops = {
1525 .ndo_stop = usbnet_stop, 1544 .ndo_stop = usbnet_stop,
1526 .ndo_start_xmit = usbnet_start_xmit, 1545 .ndo_start_xmit = usbnet_start_xmit,
1527 .ndo_tx_timeout = usbnet_tx_timeout, 1546 .ndo_tx_timeout = usbnet_tx_timeout,
1547 .ndo_set_rx_mode = usbnet_set_rx_mode,
1528 .ndo_change_mtu = usbnet_change_mtu, 1548 .ndo_change_mtu = usbnet_change_mtu,
1529 .ndo_set_mac_address = eth_mac_addr, 1549 .ndo_set_mac_address = eth_mac_addr,
1530 .ndo_validate_addr = eth_validate_addr, 1550 .ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index d75256bd1a6a..b0bc8ead47de 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -491,8 +491,17 @@ static void receive_buf(struct receive_queue *rq, void *buf, unsigned int len)
491 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; 491 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
492 break; 492 break;
493 case VIRTIO_NET_HDR_GSO_UDP: 493 case VIRTIO_NET_HDR_GSO_UDP:
494 {
495 static bool warned;
496
497 if (!warned) {
498 warned = true;
499 netdev_warn(dev,
500 "host using disabled UFO feature; please fix it\n");
501 }
494 skb_shinfo(skb)->gso_type = SKB_GSO_UDP; 502 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
495 break; 503 break;
504 }
496 case VIRTIO_NET_HDR_GSO_TCPV6: 505 case VIRTIO_NET_HDR_GSO_TCPV6:
497 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6; 506 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
498 break; 507 break;
@@ -881,8 +890,6 @@ static int xmit_skb(struct send_queue *sq, struct sk_buff *skb)
881 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4; 890 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
882 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 891 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
883 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6; 892 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
884 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
885 hdr->hdr.gso_type = VIRTIO_NET_HDR_GSO_UDP;
886 else 893 else
887 BUG(); 894 BUG();
888 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN) 895 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
@@ -1666,6 +1673,40 @@ static const struct attribute_group virtio_net_mrg_rx_group = {
1666}; 1673};
1667#endif 1674#endif
1668 1675
1676static bool virtnet_fail_on_feature(struct virtio_device *vdev,
1677 unsigned int fbit,
1678 const char *fname, const char *dname)
1679{
1680 if (!virtio_has_feature(vdev, fbit))
1681 return false;
1682
1683 dev_err(&vdev->dev, "device advertises feature %s but not %s",
1684 fname, dname);
1685
1686 return true;
1687}
1688
1689#define VIRTNET_FAIL_ON(vdev, fbit, dbit) \
1690 virtnet_fail_on_feature(vdev, fbit, #fbit, dbit)
1691
1692static bool virtnet_validate_features(struct virtio_device *vdev)
1693{
1694 if (!virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ) &&
1695 (VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_RX,
1696 "VIRTIO_NET_F_CTRL_VQ") ||
1697 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_VLAN,
1698 "VIRTIO_NET_F_CTRL_VQ") ||
1699 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_GUEST_ANNOUNCE,
1700 "VIRTIO_NET_F_CTRL_VQ") ||
1701 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_MQ, "VIRTIO_NET_F_CTRL_VQ") ||
1702 VIRTNET_FAIL_ON(vdev, VIRTIO_NET_F_CTRL_MAC_ADDR,
1703 "VIRTIO_NET_F_CTRL_VQ"))) {
1704 return false;
1705 }
1706
1707 return true;
1708}
1709
1669static int virtnet_probe(struct virtio_device *vdev) 1710static int virtnet_probe(struct virtio_device *vdev)
1670{ 1711{
1671 int i, err; 1712 int i, err;
@@ -1673,6 +1714,9 @@ static int virtnet_probe(struct virtio_device *vdev)
1673 struct virtnet_info *vi; 1714 struct virtnet_info *vi;
1674 u16 max_queue_pairs; 1715 u16 max_queue_pairs;
1675 1716
1717 if (!virtnet_validate_features(vdev))
1718 return -EINVAL;
1719
1676 /* Find if host supports multiqueue virtio_net device */ 1720 /* Find if host supports multiqueue virtio_net device */
1677 err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ, 1721 err = virtio_cread_feature(vdev, VIRTIO_NET_F_MQ,
1678 struct virtio_net_config, 1722 struct virtio_net_config,
@@ -1705,7 +1749,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1705 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 1749 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
1706 1750
1707 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 1751 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
1708 dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO 1752 dev->hw_features |= NETIF_F_TSO
1709 | NETIF_F_TSO_ECN | NETIF_F_TSO6; 1753 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
1710 } 1754 }
1711 /* Individual feature bits: what can host handle? */ 1755 /* Individual feature bits: what can host handle? */
@@ -1715,11 +1759,9 @@ static int virtnet_probe(struct virtio_device *vdev)
1715 dev->hw_features |= NETIF_F_TSO6; 1759 dev->hw_features |= NETIF_F_TSO6;
1716 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN)) 1760 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
1717 dev->hw_features |= NETIF_F_TSO_ECN; 1761 dev->hw_features |= NETIF_F_TSO_ECN;
1718 if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
1719 dev->hw_features |= NETIF_F_UFO;
1720 1762
1721 if (gso) 1763 if (gso)
1722 dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO); 1764 dev->features |= dev->hw_features & NETIF_F_ALL_TSO;
1723 /* (!csum && gso) case will be fixed by register_netdev() */ 1765 /* (!csum && gso) case will be fixed by register_netdev() */
1724 } 1766 }
1725 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM)) 1767 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_CSUM))
@@ -1757,8 +1799,7 @@ static int virtnet_probe(struct virtio_device *vdev)
1757 /* If we can receive ANY GSO packets, we must allocate large ones. */ 1799 /* If we can receive ANY GSO packets, we must allocate large ones. */
1758 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) || 1800 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
1759 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) || 1801 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
1760 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN) || 1802 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
1761 virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_UFO))
1762 vi->big_packets = true; 1803 vi->big_packets = true;
1763 1804
1764 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF)) 1805 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
@@ -1952,9 +1993,9 @@ static struct virtio_device_id id_table[] = {
1952static unsigned int features[] = { 1993static unsigned int features[] = {
1953 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM, 1994 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
1954 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC, 1995 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
1955 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6, 1996 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_TSO6,
1956 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6, 1997 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
1957 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO, 1998 VIRTIO_NET_F_GUEST_ECN,
1958 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ, 1999 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
1959 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN, 2000 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
1960 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ, 2001 VIRTIO_NET_F_GUEST_ANNOUNCE, VIRTIO_NET_F_MQ,
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index ca309820d39e..be4649a49c5e 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -67,12 +67,6 @@
67 67
68#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */ 68#define VXLAN_FLAGS 0x08000000 /* struct vxlanhdr.vx_flags required value. */
69 69
70/* VXLAN protocol header */
71struct vxlanhdr {
72 __be32 vx_flags;
73 __be32 vx_vni;
74};
75
76/* UDP port for VXLAN traffic. 70/* UDP port for VXLAN traffic.
77 * The IANA assigned port is 4789, but the Linux default is 8472 71 * The IANA assigned port is 4789, but the Linux default is 8472
78 * for compatibility with early adopters. 72 * for compatibility with early adopters.
@@ -275,13 +269,15 @@ static inline struct vxlan_rdst *first_remote_rtnl(struct vxlan_fdb *fdb)
275 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list); 269 return list_first_entry(&fdb->remotes, struct vxlan_rdst, list);
276} 270}
277 271
278/* Find VXLAN socket based on network namespace and UDP port */ 272/* Find VXLAN socket based on network namespace, address family and UDP port */
279static struct vxlan_sock *vxlan_find_sock(struct net *net, __be16 port) 273static struct vxlan_sock *vxlan_find_sock(struct net *net,
274 sa_family_t family, __be16 port)
280{ 275{
281 struct vxlan_sock *vs; 276 struct vxlan_sock *vs;
282 277
283 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) { 278 hlist_for_each_entry_rcu(vs, vs_head(net, port), hlist) {
284 if (inet_sk(vs->sock->sk)->inet_sport == port) 279 if (inet_sk(vs->sock->sk)->inet_sport == port &&
280 inet_sk(vs->sock->sk)->sk.sk_family == family)
285 return vs; 281 return vs;
286 } 282 }
287 return NULL; 283 return NULL;
@@ -300,11 +296,12 @@ static struct vxlan_dev *vxlan_vs_find_vni(struct vxlan_sock *vs, u32 id)
300} 296}
301 297
302/* Look up VNI in a per net namespace table */ 298/* Look up VNI in a per net namespace table */
303static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id, __be16 port) 299static struct vxlan_dev *vxlan_find_vni(struct net *net, u32 id,
300 sa_family_t family, __be16 port)
304{ 301{
305 struct vxlan_sock *vs; 302 struct vxlan_sock *vs;
306 303
307 vs = vxlan_find_sock(net, port); 304 vs = vxlan_find_sock(net, family, port);
308 if (!vs) 305 if (!vs)
309 return NULL; 306 return NULL;
310 307
@@ -621,6 +618,8 @@ static int vxlan_gro_complete(struct sk_buff *skb, int nhoff)
621 int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr); 618 int vxlan_len = sizeof(struct vxlanhdr) + sizeof(struct ethhdr);
622 int err = -ENOSYS; 619 int err = -ENOSYS;
623 620
621 udp_tunnel_gro_complete(skb, nhoff);
622
624 eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr)); 623 eh = (struct ethhdr *)(skb->data + nhoff + sizeof(struct vxlanhdr));
625 type = eh->h_proto; 624 type = eh->h_proto;
626 625
@@ -1771,7 +1770,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1771 struct vxlan_dev *dst_vxlan; 1770 struct vxlan_dev *dst_vxlan;
1772 1771
1773 ip_rt_put(rt); 1772 ip_rt_put(rt);
1774 dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port); 1773 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
1774 dst->sa.sa_family, dst_port);
1775 if (!dst_vxlan) 1775 if (!dst_vxlan)
1776 goto tx_error; 1776 goto tx_error;
1777 vxlan_encap_bypass(skb, vxlan, dst_vxlan); 1777 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1825,7 +1825,8 @@ static void vxlan_xmit_one(struct sk_buff *skb, struct net_device *dev,
1825 struct vxlan_dev *dst_vxlan; 1825 struct vxlan_dev *dst_vxlan;
1826 1826
1827 dst_release(ndst); 1827 dst_release(ndst);
1828 dst_vxlan = vxlan_find_vni(vxlan->net, vni, dst_port); 1828 dst_vxlan = vxlan_find_vni(vxlan->net, vni,
1829 dst->sa.sa_family, dst_port);
1829 if (!dst_vxlan) 1830 if (!dst_vxlan)
1830 goto tx_error; 1831 goto tx_error;
1831 vxlan_encap_bypass(skb, vxlan, dst_vxlan); 1832 vxlan_encap_bypass(skb, vxlan, dst_vxlan);
@@ -1985,13 +1986,15 @@ static int vxlan_init(struct net_device *dev)
1985 struct vxlan_dev *vxlan = netdev_priv(dev); 1986 struct vxlan_dev *vxlan = netdev_priv(dev);
1986 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id); 1987 struct vxlan_net *vn = net_generic(vxlan->net, vxlan_net_id);
1987 struct vxlan_sock *vs; 1988 struct vxlan_sock *vs;
1989 bool ipv6 = vxlan->flags & VXLAN_F_IPV6;
1988 1990
1989 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1991 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
1990 if (!dev->tstats) 1992 if (!dev->tstats)
1991 return -ENOMEM; 1993 return -ENOMEM;
1992 1994
1993 spin_lock(&vn->sock_lock); 1995 spin_lock(&vn->sock_lock);
1994 vs = vxlan_find_sock(vxlan->net, vxlan->dst_port); 1996 vs = vxlan_find_sock(vxlan->net, ipv6 ? AF_INET6 : AF_INET,
1997 vxlan->dst_port);
1995 if (vs) { 1998 if (vs) {
1996 /* If we have a socket with same port already, reuse it */ 1999 /* If we have a socket with same port already, reuse it */
1997 atomic_inc(&vs->refcnt); 2000 atomic_inc(&vs->refcnt);
@@ -2303,9 +2306,9 @@ static struct socket *vxlan_create_sock(struct net *net, bool ipv6,
2303 if (ipv6) { 2306 if (ipv6) {
2304 udp_conf.family = AF_INET6; 2307 udp_conf.family = AF_INET6;
2305 udp_conf.use_udp6_tx_checksums = 2308 udp_conf.use_udp6_tx_checksums =
2306 !!(flags & VXLAN_F_UDP_ZERO_CSUM6_TX); 2309 !(flags & VXLAN_F_UDP_ZERO_CSUM6_TX);
2307 udp_conf.use_udp6_rx_checksums = 2310 udp_conf.use_udp6_rx_checksums =
2308 !!(flags & VXLAN_F_UDP_ZERO_CSUM6_RX); 2311 !(flags & VXLAN_F_UDP_ZERO_CSUM6_RX);
2309 } else { 2312 } else {
2310 udp_conf.family = AF_INET; 2313 udp_conf.family = AF_INET;
2311 udp_conf.local_ip.s_addr = INADDR_ANY; 2314 udp_conf.local_ip.s_addr = INADDR_ANY;
@@ -2382,6 +2385,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
2382{ 2385{
2383 struct vxlan_net *vn = net_generic(net, vxlan_net_id); 2386 struct vxlan_net *vn = net_generic(net, vxlan_net_id);
2384 struct vxlan_sock *vs; 2387 struct vxlan_sock *vs;
2388 bool ipv6 = flags & VXLAN_F_IPV6;
2385 2389
2386 vs = vxlan_socket_create(net, port, rcv, data, flags); 2390 vs = vxlan_socket_create(net, port, rcv, data, flags);
2387 if (!IS_ERR(vs)) 2391 if (!IS_ERR(vs))
@@ -2391,7 +2395,7 @@ struct vxlan_sock *vxlan_sock_add(struct net *net, __be16 port,
2391 return vs; 2395 return vs;
2392 2396
2393 spin_lock(&vn->sock_lock); 2397 spin_lock(&vn->sock_lock);
2394 vs = vxlan_find_sock(net, port); 2398 vs = vxlan_find_sock(net, ipv6 ? AF_INET6 : AF_INET, port);
2395 if (vs) { 2399 if (vs) {
2396 if (vs->rcv == rcv) 2400 if (vs->rcv == rcv)
2397 atomic_inc(&vs->refcnt); 2401 atomic_inc(&vs->refcnt);
@@ -2550,7 +2554,8 @@ static int vxlan_newlink(struct net *net, struct net_device *dev,
2550 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX])) 2554 nla_get_u8(data[IFLA_VXLAN_UDP_ZERO_CSUM6_RX]))
2551 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX; 2555 vxlan->flags |= VXLAN_F_UDP_ZERO_CSUM6_RX;
2552 2556
2553 if (vxlan_find_vni(net, vni, vxlan->dst_port)) { 2557 if (vxlan_find_vni(net, vni, use_ipv6 ? AF_INET6 : AF_INET,
2558 vxlan->dst_port)) {
2554 pr_info("duplicate VNI %u\n", vni); 2559 pr_info("duplicate VNI %u\n", vni);
2555 return -EEXIST; 2560 return -EEXIST;
2556 } 2561 }
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index e5ba6faf3281..86907e5ba6ca 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -80,6 +80,7 @@ struct reg_dmn_pair_mapping {
80 80
81struct ath_regulatory { 81struct ath_regulatory {
82 char alpha2[2]; 82 char alpha2[2];
83 enum nl80211_dfs_regions region;
83 u16 country_code; 84 u16 country_code;
84 u16 max_power_level; 85 u16 max_power_level;
85 u16 current_rd; 86 u16 current_rd;
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
index 697c4ae90af0..1e8ea5e4d4ca 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c
@@ -664,6 +664,19 @@ static void ar9003_hw_override_ini(struct ath_hw *ah)
664 ah->enabled_cals |= TX_CL_CAL; 664 ah->enabled_cals |= TX_CL_CAL;
665 else 665 else
666 ah->enabled_cals &= ~TX_CL_CAL; 666 ah->enabled_cals &= ~TX_CL_CAL;
667
668 if (AR_SREV_9340(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah)) {
669 if (ah->is_clk_25mhz) {
670 REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
671 REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
672 REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae);
673 } else {
674 REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1);
675 REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400);
676 REG_WRITE(ah, AR_SLP32_INC, 0x0001e800);
677 }
678 udelay(100);
679 }
667} 680}
668 681
669static void ar9003_hw_prog_ini(struct ath_hw *ah, 682static void ar9003_hw_prog_ini(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/common.c b/drivers/net/wireless/ath/ath9k/common.c
index c6dd7f1fed65..33b0c7aef2ea 100644
--- a/drivers/net/wireless/ath/ath9k/common.c
+++ b/drivers/net/wireless/ath/ath9k/common.c
@@ -368,11 +368,11 @@ void ath9k_cmn_update_txpow(struct ath_hw *ah, u16 cur_txpow,
368{ 368{
369 struct ath_regulatory *reg = ath9k_hw_regulatory(ah); 369 struct ath_regulatory *reg = ath9k_hw_regulatory(ah);
370 370
371 if (reg->power_limit != new_txpow) { 371 if (reg->power_limit != new_txpow)
372 ath9k_hw_set_txpowerlimit(ah, new_txpow, false); 372 ath9k_hw_set_txpowerlimit(ah, new_txpow, false);
373 /* read back in case value is clamped */ 373
374 *txpower = reg->max_power_level; 374 /* read back in case value is clamped */
375 } 375 *txpower = reg->max_power_level;
376} 376}
377EXPORT_SYMBOL(ath9k_cmn_update_txpow); 377EXPORT_SYMBOL(ath9k_cmn_update_txpow);
378 378
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 46f20a309b5f..5c45e787814e 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -455,7 +455,7 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
455 "%2d %2x %1x %2x %2x\n", 455 "%2d %2x %1x %2x %2x\n",
456 i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset, 456 i, (*qcuBase & (0x7 << qcuOffset)) >> qcuOffset,
457 (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3), 457 (*qcuBase & (0x8 << qcuOffset)) >> (qcuOffset + 3),
458 val[2] & (0x7 << (i * 3)) >> (i * 3), 458 (val[2] & (0x7 << (i * 3))) >> (i * 3),
459 (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset); 459 (*dcuBase & (0x1f << dcuOffset)) >> dcuOffset);
460 } 460 }
461 461
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 8be4b1453394..2ad605760e21 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -861,19 +861,6 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
861 udelay(RTC_PLL_SETTLE_DELAY); 861 udelay(RTC_PLL_SETTLE_DELAY);
862 862
863 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK); 863 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
864
865 if (AR_SREV_9340(ah) || AR_SREV_9550(ah)) {
866 if (ah->is_clk_25mhz) {
867 REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x17c << 1);
868 REG_WRITE(ah, AR_SLP32_MODE, 0x0010f3d7);
869 REG_WRITE(ah, AR_SLP32_INC, 0x0001e7ae);
870 } else {
871 REG_WRITE(ah, AR_RTC_DERIVED_CLK, 0x261 << 1);
872 REG_WRITE(ah, AR_SLP32_MODE, 0x0010f400);
873 REG_WRITE(ah, AR_SLP32_INC, 0x0001e800);
874 }
875 udelay(100);
876 }
877} 864}
878 865
879static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah, 866static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index 156a944134dc..3bd030494986 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -734,6 +734,32 @@ static const struct ieee80211_iface_combination if_comb[] = {
734#endif 734#endif
735}; 735};
736 736
737#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
738static void ath9k_set_mcc_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
739{
740 struct ath_hw *ah = sc->sc_ah;
741 struct ath_common *common = ath9k_hw_common(ah);
742
743 if (!ath9k_is_chanctx_enabled())
744 return;
745
746 hw->flags |= IEEE80211_HW_QUEUE_CONTROL;
747 hw->queues = ATH9K_NUM_TX_QUEUES;
748 hw->offchannel_tx_hw_queue = hw->queues - 1;
749 hw->wiphy->interface_modes &= ~ BIT(NL80211_IFTYPE_WDS);
750 hw->wiphy->iface_combinations = if_comb_multi;
751 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_multi);
752 hw->wiphy->max_scan_ssids = 255;
753 hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
754 hw->wiphy->max_remain_on_channel_duration = 10000;
755 hw->chanctx_data_size = sizeof(void *);
756 hw->extra_beacon_tailroom =
757 sizeof(struct ieee80211_p2p_noa_attr) + 9;
758
759 ath_dbg(common, CHAN_CTX, "Use channel contexts\n");
760}
761#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */
762
737static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) 763static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
738{ 764{
739 struct ath_hw *ah = sc->sc_ah; 765 struct ath_hw *ah = sc->sc_ah;
@@ -746,7 +772,6 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
746 IEEE80211_HW_SPECTRUM_MGMT | 772 IEEE80211_HW_SPECTRUM_MGMT |
747 IEEE80211_HW_REPORTS_TX_ACK_STATUS | 773 IEEE80211_HW_REPORTS_TX_ACK_STATUS |
748 IEEE80211_HW_SUPPORTS_RC_TABLE | 774 IEEE80211_HW_SUPPORTS_RC_TABLE |
749 IEEE80211_HW_QUEUE_CONTROL |
750 IEEE80211_HW_SUPPORTS_HT_CCK_RATES; 775 IEEE80211_HW_SUPPORTS_HT_CCK_RATES;
751 776
752 if (ath9k_ps_enable) 777 if (ath9k_ps_enable)
@@ -781,24 +806,6 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
781 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb); 806 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb);
782 } 807 }
783 808
784#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
785
786 if (ath9k_is_chanctx_enabled()) {
787 hw->wiphy->interface_modes &= ~ BIT(NL80211_IFTYPE_WDS);
788 hw->wiphy->iface_combinations = if_comb_multi;
789 hw->wiphy->n_iface_combinations = ARRAY_SIZE(if_comb_multi);
790 hw->wiphy->max_scan_ssids = 255;
791 hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
792 hw->wiphy->max_remain_on_channel_duration = 10000;
793 hw->chanctx_data_size = sizeof(void *);
794 hw->extra_beacon_tailroom =
795 sizeof(struct ieee80211_p2p_noa_attr) + 9;
796
797 ath_dbg(common, CHAN_CTX, "Use channel contexts\n");
798 }
799
800#endif /* CONFIG_ATH9K_CHANNEL_CONTEXT */
801
802 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 809 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
803 810
804 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 811 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
@@ -808,12 +815,7 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
808 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 815 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
809 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 816 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
810 817
811 /* allow 4 queues per channel context + 818 hw->queues = 4;
812 * 1 cab queue + 1 offchannel tx queue
813 */
814 hw->queues = ATH9K_NUM_TX_QUEUES;
815 /* last queue for offchannel */
816 hw->offchannel_tx_hw_queue = hw->queues - 1;
817 hw->max_rates = 4; 819 hw->max_rates = 4;
818 hw->max_listen_interval = 10; 820 hw->max_listen_interval = 10;
819 hw->max_rate_tries = 10; 821 hw->max_rate_tries = 10;
@@ -837,6 +839,9 @@ static void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
837 hw->wiphy->bands[IEEE80211_BAND_5GHZ] = 839 hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
838 &common->sbands[IEEE80211_BAND_5GHZ]; 840 &common->sbands[IEEE80211_BAND_5GHZ];
839 841
842#ifdef CONFIG_ATH9K_CHANNEL_CONTEXT
843 ath9k_set_mcc_capab(sc, hw);
844#endif
840 ath9k_init_wow(hw); 845 ath9k_init_wow(hw);
841 ath9k_cmn_reload_chainmask(ah); 846 ath9k_cmn_reload_chainmask(ah);
842 847
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 6f6a974f7fdb..4f18a6be0c7d 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -974,9 +974,8 @@ void ath9k_calculate_iter_data(struct ath_softc *sc,
974 struct ath_vif *avp; 974 struct ath_vif *avp;
975 975
976 /* 976 /*
977 * Pick the MAC address of the first interface as the new hardware 977 * The hardware will use primary station addr together with the
978 * MAC address. The hardware will use it together with the BSSID mask 978 * BSSID mask when matching addresses.
979 * when matching addresses.
980 */ 979 */
981 memset(iter_data, 0, sizeof(*iter_data)); 980 memset(iter_data, 0, sizeof(*iter_data));
982 memset(&iter_data->mask, 0xff, ETH_ALEN); 981 memset(&iter_data->mask, 0xff, ETH_ALEN);
@@ -1162,6 +1161,9 @@ static void ath9k_assign_hw_queues(struct ieee80211_hw *hw,
1162{ 1161{
1163 int i; 1162 int i;
1164 1163
1164 if (!ath9k_is_chanctx_enabled())
1165 return;
1166
1165 for (i = 0; i < IEEE80211_NUM_ACS; i++) 1167 for (i = 0; i < IEEE80211_NUM_ACS; i++)
1166 vif->hw_queue[i] = i; 1168 vif->hw_queue[i] = i;
1167 1169
@@ -1202,6 +1204,8 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
1202 list_add_tail(&avp->list, &avp->chanctx->vifs); 1204 list_add_tail(&avp->list, &avp->chanctx->vifs);
1203 } 1205 }
1204 1206
1207 ath9k_calculate_summary_state(sc, avp->chanctx);
1208
1205 ath9k_assign_hw_queues(hw, vif); 1209 ath9k_assign_hw_queues(hw, vif);
1206 1210
1207 an->sc = sc; 1211 an->sc = sc;
@@ -1271,6 +1275,8 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
1271 1275
1272 ath_tx_node_cleanup(sc, &avp->mcast_node); 1276 ath_tx_node_cleanup(sc, &avp->mcast_node);
1273 1277
1278 ath9k_calculate_summary_state(sc, avp->chanctx);
1279
1274 mutex_unlock(&sc->mutex); 1280 mutex_unlock(&sc->mutex);
1275} 1281}
1276 1282
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 493a183d0aaf..d6e54a3c88f6 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -169,7 +169,10 @@ static void ath_txq_skb_done(struct ath_softc *sc, struct ath_txq *txq,
169 169
170 if (txq->stopped && 170 if (txq->stopped &&
171 txq->pending_frames < sc->tx.txq_max_pending[q]) { 171 txq->pending_frames < sc->tx.txq_max_pending[q]) {
172 ieee80211_wake_queue(sc->hw, info->hw_queue); 172 if (ath9k_is_chanctx_enabled())
173 ieee80211_wake_queue(sc->hw, info->hw_queue);
174 else
175 ieee80211_wake_queue(sc->hw, q);
173 txq->stopped = false; 176 txq->stopped = false;
174 } 177 }
175} 178}
@@ -2247,7 +2250,10 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
2247 fi->txq = q; 2250 fi->txq = q;
2248 if (++txq->pending_frames > sc->tx.txq_max_pending[q] && 2251 if (++txq->pending_frames > sc->tx.txq_max_pending[q] &&
2249 !txq->stopped) { 2252 !txq->stopped) {
2250 ieee80211_stop_queue(sc->hw, info->hw_queue); 2253 if (ath9k_is_chanctx_enabled())
2254 ieee80211_stop_queue(sc->hw, info->hw_queue);
2255 else
2256 ieee80211_stop_queue(sc->hw, q);
2251 txq->stopped = true; 2257 txq->stopped = true;
2252 } 2258 }
2253 } 2259 }
diff --git a/drivers/net/wireless/ath/regd.c b/drivers/net/wireless/ath/regd.c
index 415393dfb6fc..06ea6cc9e30a 100644
--- a/drivers/net/wireless/ath/regd.c
+++ b/drivers/net/wireless/ath/regd.c
@@ -515,6 +515,7 @@ void ath_reg_notifier_apply(struct wiphy *wiphy,
515 if (!request) 515 if (!request)
516 return; 516 return;
517 517
518 reg->region = request->dfs_region;
518 switch (request->initiator) { 519 switch (request->initiator) {
519 case NL80211_REGDOM_SET_BY_CORE: 520 case NL80211_REGDOM_SET_BY_CORE:
520 /* 521 /*
@@ -779,6 +780,19 @@ u32 ath_regd_get_band_ctl(struct ath_regulatory *reg,
779 return SD_NO_CTL; 780 return SD_NO_CTL;
780 } 781 }
781 782
783 if (ath_regd_get_eepromRD(reg) == CTRY_DEFAULT) {
784 switch (reg->region) {
785 case NL80211_DFS_FCC:
786 return CTL_FCC;
787 case NL80211_DFS_ETSI:
788 return CTL_ETSI;
789 case NL80211_DFS_JP:
790 return CTL_MKK;
791 default:
792 break;
793 }
794 }
795
782 switch (band) { 796 switch (band) {
783 case IEEE80211_BAND_2GHZ: 797 case IEEE80211_BAND_2GHZ:
784 return reg->regpair->reg_2ghz_ctl; 798 return reg->regpair->reg_2ghz_ctl;
diff --git a/drivers/net/wireless/b43/phy_common.c b/drivers/net/wireless/b43/phy_common.c
index 1dfc682a8055..ee27b06074e1 100644
--- a/drivers/net/wireless/b43/phy_common.c
+++ b/drivers/net/wireless/b43/phy_common.c
@@ -300,9 +300,7 @@ void b43_phy_write(struct b43_wldev *dev, u16 reg, u16 value)
300 300
301void b43_phy_copy(struct b43_wldev *dev, u16 destreg, u16 srcreg) 301void b43_phy_copy(struct b43_wldev *dev, u16 destreg, u16 srcreg)
302{ 302{
303 assert_mac_suspended(dev); 303 b43_phy_write(dev, destreg, b43_phy_read(dev, srcreg));
304 dev->phy.ops->phy_write(dev, destreg,
305 dev->phy.ops->phy_read(dev, srcreg));
306} 304}
307 305
308void b43_phy_mask(struct b43_wldev *dev, u16 offset, u16 mask) 306void b43_phy_mask(struct b43_wldev *dev, u16 offset, u16 mask)
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
index f55f625fd06b..d20d4e6f391a 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/dhd_sdio.c
@@ -670,7 +670,6 @@ static int brcmf_sdio_get_fwnames(struct brcmf_chip *ci,
670 struct brcmf_sdio_dev *sdiodev) 670 struct brcmf_sdio_dev *sdiodev)
671{ 671{
672 int i; 672 int i;
673 uint fw_len, nv_len;
674 char end; 673 char end;
675 674
676 for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) { 675 for (i = 0; i < ARRAY_SIZE(brcmf_fwname_data); i++) {
@@ -684,25 +683,25 @@ static int brcmf_sdio_get_fwnames(struct brcmf_chip *ci,
684 return -ENODEV; 683 return -ENODEV;
685 } 684 }
686 685
687 fw_len = sizeof(sdiodev->fw_name) - 1;
688 nv_len = sizeof(sdiodev->nvram_name) - 1;
689 /* check if firmware path is provided by module parameter */ 686 /* check if firmware path is provided by module parameter */
690 if (brcmf_firmware_path[0] != '\0') { 687 if (brcmf_firmware_path[0] != '\0') {
691 strncpy(sdiodev->fw_name, brcmf_firmware_path, fw_len); 688 strlcpy(sdiodev->fw_name, brcmf_firmware_path,
692 strncpy(sdiodev->nvram_name, brcmf_firmware_path, nv_len); 689 sizeof(sdiodev->fw_name));
693 fw_len -= strlen(sdiodev->fw_name); 690 strlcpy(sdiodev->nvram_name, brcmf_firmware_path,
694 nv_len -= strlen(sdiodev->nvram_name); 691 sizeof(sdiodev->nvram_name));
695 692
696 end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1]; 693 end = brcmf_firmware_path[strlen(brcmf_firmware_path) - 1];
697 if (end != '/') { 694 if (end != '/') {
698 strncat(sdiodev->fw_name, "/", fw_len); 695 strlcat(sdiodev->fw_name, "/",
699 strncat(sdiodev->nvram_name, "/", nv_len); 696 sizeof(sdiodev->fw_name));
700 fw_len--; 697 strlcat(sdiodev->nvram_name, "/",
701 nv_len--; 698 sizeof(sdiodev->nvram_name));
702 } 699 }
703 } 700 }
704 strncat(sdiodev->fw_name, brcmf_fwname_data[i].bin, fw_len); 701 strlcat(sdiodev->fw_name, brcmf_fwname_data[i].bin,
705 strncat(sdiodev->nvram_name, brcmf_fwname_data[i].nv, nv_len); 702 sizeof(sdiodev->fw_name));
703 strlcat(sdiodev->nvram_name, brcmf_fwname_data[i].nv,
704 sizeof(sdiodev->nvram_name));
706 705
707 return 0; 706 return 0;
708} 707}
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/of.c b/drivers/net/wireless/brcm80211/brcmfmac/of.c
index f05f5270fec1..927bffd5be64 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/of.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/of.c
@@ -40,8 +40,8 @@ void brcmf_of_probe(struct brcmf_sdio_dev *sdiodev)
40 return; 40 return;
41 41
42 irq = irq_of_parse_and_map(np, 0); 42 irq = irq_of_parse_and_map(np, 0);
43 if (irq < 0) { 43 if (!irq) {
44 brcmf_err("interrupt could not be mapped: err=%d\n", irq); 44 brcmf_err("interrupt could not be mapped\n");
45 devm_kfree(dev, sdiodev->pdata); 45 devm_kfree(dev, sdiodev->pdata);
46 return; 46 return;
47 } 47 }
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
index 8c0632ec9f7a..16fef3382019 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/pcie.c
@@ -19,10 +19,10 @@
19#include <linux/pci.h> 19#include <linux/pci.h>
20#include <linux/vmalloc.h> 20#include <linux/vmalloc.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/unaligned/access_ok.h>
23#include <linux/interrupt.h> 22#include <linux/interrupt.h>
24#include <linux/bcma/bcma.h> 23#include <linux/bcma/bcma.h>
25#include <linux/sched.h> 24#include <linux/sched.h>
25#include <asm/unaligned.h>
26 26
27#include <soc.h> 27#include <soc.h>
28#include <chipcommon.h> 28#include <chipcommon.h>
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index dc135915470d..875d1142c8b0 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -669,10 +669,12 @@ static int brcmf_usb_dl_cmd(struct brcmf_usbdev_info *devinfo, u8 cmd,
669 goto finalize; 669 goto finalize;
670 } 670 }
671 671
672 if (!brcmf_usb_ioctl_resp_wait(devinfo)) 672 if (!brcmf_usb_ioctl_resp_wait(devinfo)) {
673 usb_kill_urb(devinfo->ctl_urb);
673 ret = -ETIMEDOUT; 674 ret = -ETIMEDOUT;
674 else 675 } else {
675 memcpy(buffer, tmpbuf, buflen); 676 memcpy(buffer, tmpbuf, buflen);
677 }
676 678
677finalize: 679finalize:
678 kfree(tmpbuf); 680 kfree(tmpbuf);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
index 28fa25b509db..39b45c038a93 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/wl_cfg80211.c
@@ -299,6 +299,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
299 primary_offset = ch->center_freq1 - ch->chan->center_freq; 299 primary_offset = ch->center_freq1 - ch->chan->center_freq;
300 switch (ch->width) { 300 switch (ch->width) {
301 case NL80211_CHAN_WIDTH_20: 301 case NL80211_CHAN_WIDTH_20:
302 case NL80211_CHAN_WIDTH_20_NOHT:
302 ch_inf.bw = BRCMU_CHAN_BW_20; 303 ch_inf.bw = BRCMU_CHAN_BW_20;
303 WARN_ON(primary_offset != 0); 304 WARN_ON(primary_offset != 0);
304 break; 305 break;
@@ -323,6 +324,10 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
323 ch_inf.sb = BRCMU_CHAN_SB_LU; 324 ch_inf.sb = BRCMU_CHAN_SB_LU;
324 } 325 }
325 break; 326 break;
327 case NL80211_CHAN_WIDTH_80P80:
328 case NL80211_CHAN_WIDTH_160:
329 case NL80211_CHAN_WIDTH_5:
330 case NL80211_CHAN_WIDTH_10:
326 default: 331 default:
327 WARN_ON_ONCE(1); 332 WARN_ON_ONCE(1);
328 } 333 }
@@ -333,6 +338,7 @@ static u16 chandef_to_chanspec(struct brcmu_d11inf *d11inf,
333 case IEEE80211_BAND_5GHZ: 338 case IEEE80211_BAND_5GHZ:
334 ch_inf.band = BRCMU_CHAN_BAND_5G; 339 ch_inf.band = BRCMU_CHAN_BAND_5G;
335 break; 340 break;
341 case IEEE80211_BAND_60GHZ:
336 default: 342 default:
337 WARN_ON_ONCE(1); 343 WARN_ON_ONCE(1);
338 } 344 }
diff --git a/drivers/net/wireless/iwlwifi/dvm/mac80211.c b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
index 2364a3c09b9e..cae692ff1013 100644
--- a/drivers/net/wireless/iwlwifi/dvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/dvm/mac80211.c
@@ -1095,6 +1095,7 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1095 u32 queues, bool drop) 1095 u32 queues, bool drop)
1096{ 1096{
1097 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw); 1097 struct iwl_priv *priv = IWL_MAC80211_GET_DVM(hw);
1098 u32 scd_queues;
1098 1099
1099 mutex_lock(&priv->mutex); 1100 mutex_lock(&priv->mutex);
1100 IWL_DEBUG_MAC80211(priv, "enter\n"); 1101 IWL_DEBUG_MAC80211(priv, "enter\n");
@@ -1108,18 +1109,19 @@ static void iwlagn_mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
1108 goto done; 1109 goto done;
1109 } 1110 }
1110 1111
1111 /* 1112 scd_queues = BIT(priv->cfg->base_params->num_of_queues) - 1;
1112 * mac80211 will not push any more frames for transmit 1113 scd_queues &= ~(BIT(IWL_IPAN_CMD_QUEUE_NUM) |
1113 * until the flush is completed 1114 BIT(IWL_DEFAULT_CMD_QUEUE_NUM));
1114 */ 1115
1115 if (drop) { 1116 if (vif)
1116 IWL_DEBUG_MAC80211(priv, "send flush command\n"); 1117 scd_queues &= ~BIT(vif->hw_queue[IEEE80211_AC_VO]);
1117 if (iwlagn_txfifo_flush(priv, 0)) { 1118
1118 IWL_ERR(priv, "flush request fail\n"); 1119 IWL_DEBUG_TX_QUEUES(priv, "Flushing SCD queues: 0x%x\n", scd_queues);
1119 goto done; 1120 if (iwlagn_txfifo_flush(priv, scd_queues)) {
1120 } 1121 IWL_ERR(priv, "flush request fail\n");
1122 goto done;
1121 } 1123 }
1122 IWL_DEBUG_MAC80211(priv, "wait transmit/flush all frames\n"); 1124 IWL_DEBUG_TX_QUEUES(priv, "wait transmit/flush all frames\n");
1123 iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff); 1125 iwl_trans_wait_tx_queue_empty(priv->trans, 0xffffffff);
1124done: 1126done:
1125 mutex_unlock(&priv->mutex); 1127 mutex_unlock(&priv->mutex);
diff --git a/drivers/net/wireless/iwlwifi/iwl-8000.c b/drivers/net/wireless/iwlwifi/iwl-8000.c
index e4351487ca72..d2b7234b1c73 100644
--- a/drivers/net/wireless/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-8000.c
@@ -82,7 +82,8 @@
82#define IWL8000_TX_POWER_VERSION 0xffff /* meaningless */ 82#define IWL8000_TX_POWER_VERSION 0xffff /* meaningless */
83 83
84#define IWL8000_FW_PRE "iwlwifi-8000" 84#define IWL8000_FW_PRE "iwlwifi-8000"
85#define IWL8000_MODULE_FIRMWARE(api) IWL8000_FW_PRE __stringify(api) ".ucode" 85#define IWL8000_MODULE_FIRMWARE(api) \
86 IWL8000_FW_PRE "-" __stringify(api) ".ucode"
86 87
87#define NVM_HW_SECTION_NUM_FAMILY_8000 10 88#define NVM_HW_SECTION_NUM_FAMILY_8000 10
88#define DEFAULT_NVM_FILE_FAMILY_8000 "iwl_nvm_8000.bin" 89#define DEFAULT_NVM_FILE_FAMILY_8000 "iwl_nvm_8000.bin"
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 4f6e66892acc..b894a84e8393 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -155,6 +155,7 @@ enum iwl_ucode_tlv_api {
155 * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests 155 * @IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT: supports Quiet Period requests
156 * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA), 156 * @IWL_UCODE_TLV_CAPA_DQA_SUPPORT: supports dynamic queue allocation (DQA),
157 * which also implies support for the scheduler configuration command 157 * which also implies support for the scheduler configuration command
158 * @IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT: supports Hot Spot Command
158 */ 159 */
159enum iwl_ucode_tlv_capa { 160enum iwl_ucode_tlv_capa {
160 IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0), 161 IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
@@ -163,6 +164,7 @@ enum iwl_ucode_tlv_capa {
163 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10), 164 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT = BIT(10),
164 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11), 165 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT = BIT(11),
165 IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12), 166 IWL_UCODE_TLV_CAPA_DQA_SUPPORT = BIT(12),
167 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT = BIT(18),
166}; 168};
167 169
168/* The default calibrate table size if not specified by firmware file */ 170/* The default calibrate table size if not specified by firmware file */
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans.h b/drivers/net/wireless/iwlwifi/iwl-trans.h
index 9eb85249e89c..d8fc548c0d6c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans.h
@@ -563,6 +563,7 @@ enum iwl_trans_state {
563 * Set during transport allocation. 563 * Set during transport allocation.
564 * @hw_id_str: a string with info about HW ID. Set during transport allocation. 564 * @hw_id_str: a string with info about HW ID. Set during transport allocation.
565 * @pm_support: set to true in start_hw if link pm is supported 565 * @pm_support: set to true in start_hw if link pm is supported
566 * @ltr_enabled: set to true if the LTR is enabled
566 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only. 567 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
567 * The user should use iwl_trans_{alloc,free}_tx_cmd. 568 * The user should use iwl_trans_{alloc,free}_tx_cmd.
568 * @dev_cmd_headroom: room needed for the transport's private use before the 569 * @dev_cmd_headroom: room needed for the transport's private use before the
@@ -589,6 +590,7 @@ struct iwl_trans {
589 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size; 590 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
590 591
591 bool pm_support; 592 bool pm_support;
593 bool ltr_enabled;
592 594
593 /* The following fields are internal only */ 595 /* The following fields are internal only */
594 struct kmem_cache *dev_cmd_pool; 596 struct kmem_cache *dev_cmd_pool;
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex.c b/drivers/net/wireless/iwlwifi/mvm/coex.c
index 8df2021f9856..da2ffb785194 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex.c
@@ -303,8 +303,8 @@ static const __le64 iwl_ci_mask[][3] = {
303}; 303};
304 304
305static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = { 305static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
306 cpu_to_le32(0x28412201), 306 cpu_to_le32(0x2e402280),
307 cpu_to_le32(0x11118451), 307 cpu_to_le32(0x7711a751),
308}; 308};
309 309
310struct corunning_block_luts { 310struct corunning_block_luts {
diff --git a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
index 585c0ab4a3ec..8a1d2f33d5b7 100644
--- a/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
+++ b/drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
@@ -291,8 +291,8 @@ static const __le64 iwl_ci_mask[][3] = {
291}; 291};
292 292
293static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = { 293static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
294 cpu_to_le32(0x28412201), 294 cpu_to_le32(0x2e402280),
295 cpu_to_le32(0x11118451), 295 cpu_to_le32(0x7711a751),
296}; 296};
297 297
298struct corunning_block_luts { 298struct corunning_block_luts {
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
index 27dd86395b39..2fd8ad4633e0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
@@ -68,13 +68,46 @@
68 68
69/* Power Management Commands, Responses, Notifications */ 69/* Power Management Commands, Responses, Notifications */
70 70
71/**
72 * enum iwl_ltr_config_flags - masks for LTR config command flags
73 * @LTR_CFG_FLAG_FEATURE_ENABLE: Feature operational status
74 * @LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS: allow LTR change on shadow
75 * memory access
76 * @LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH: allow LTR msg send on ANY LTR
77 * reg change
78 * @LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3: allow LTR msg send on transition from
79 * D0 to D3
80 * @LTR_CFG_FLAG_SW_SET_SHORT: fixed static short LTR register
81 * @LTR_CFG_FLAG_SW_SET_LONG: fixed static short LONG register
82 * @LTR_CFG_FLAG_DENIE_C10_ON_PD: allow going into C10 on PD
83 */
84enum iwl_ltr_config_flags {
85 LTR_CFG_FLAG_FEATURE_ENABLE = BIT(0),
86 LTR_CFG_FLAG_HW_DIS_ON_SHADOW_REG_ACCESS = BIT(1),
87 LTR_CFG_FLAG_HW_EN_SHRT_WR_THROUGH = BIT(2),
88 LTR_CFG_FLAG_HW_DIS_ON_D0_2_D3 = BIT(3),
89 LTR_CFG_FLAG_SW_SET_SHORT = BIT(4),
90 LTR_CFG_FLAG_SW_SET_LONG = BIT(5),
91 LTR_CFG_FLAG_DENIE_C10_ON_PD = BIT(6),
92};
93
94/**
95 * struct iwl_ltr_config_cmd - configures the LTR
96 * @flags: See %enum iwl_ltr_config_flags
97 */
98struct iwl_ltr_config_cmd {
99 __le32 flags;
100 __le32 static_long;
101 __le32 static_short;
102} __packed;
103
71/* Radio LP RX Energy Threshold measured in dBm */ 104/* Radio LP RX Energy Threshold measured in dBm */
72#define POWER_LPRX_RSSI_THRESHOLD 75 105#define POWER_LPRX_RSSI_THRESHOLD 75
73#define POWER_LPRX_RSSI_THRESHOLD_MAX 94 106#define POWER_LPRX_RSSI_THRESHOLD_MAX 94
74#define POWER_LPRX_RSSI_THRESHOLD_MIN 30 107#define POWER_LPRX_RSSI_THRESHOLD_MIN 30
75 108
76/** 109/**
77 * enum iwl_scan_flags - masks for power table command flags 110 * enum iwl_power_flags - masks for power table command flags
78 * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off 111 * @POWER_FLAGS_POWER_SAVE_ENA_MSK: '1' Allow to save power by turning off
79 * receiver and transmitter. '0' - does not allow. 112 * receiver and transmitter. '0' - does not allow.
80 * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management, 113 * @POWER_FLAGS_POWER_MANAGEMENT_ENA_MSK: '0' Driver disables power management,
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api.h b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
index 667a92274c87..c62575d86bcd 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api.h
@@ -157,6 +157,7 @@ enum {
157 /* Power - legacy power table command */ 157 /* Power - legacy power table command */
158 POWER_TABLE_CMD = 0x77, 158 POWER_TABLE_CMD = 0x77,
159 PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78, 159 PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION = 0x78,
160 LTR_CONFIG = 0xee,
160 161
161 /* Thermal Throttling*/ 162 /* Thermal Throttling*/
162 REPLY_THERMAL_MNG_BACKOFF = 0x7e, 163 REPLY_THERMAL_MNG_BACKOFF = 0x7e,
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw.c b/drivers/net/wireless/iwlwifi/mvm/fw.c
index 23fd711a67e4..eb03943f8463 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw.c
+++ b/drivers/net/wireless/iwlwifi/mvm/fw.c
@@ -284,7 +284,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
284 284
285 lockdep_assert_held(&mvm->mutex); 285 lockdep_assert_held(&mvm->mutex);
286 286
287 if (WARN_ON_ONCE(mvm->init_ucode_complete)) 287 if (WARN_ON_ONCE(mvm->init_ucode_complete || mvm->calibrating))
288 return 0; 288 return 0;
289 289
290 iwl_init_notification_wait(&mvm->notif_wait, 290 iwl_init_notification_wait(&mvm->notif_wait,
@@ -334,6 +334,8 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
334 goto out; 334 goto out;
335 } 335 }
336 336
337 mvm->calibrating = true;
338
337 /* Send TX valid antennas before triggering calibrations */ 339 /* Send TX valid antennas before triggering calibrations */
338 ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant); 340 ret = iwl_send_tx_ant_cfg(mvm, mvm->fw->valid_tx_ant);
339 if (ret) 341 if (ret)
@@ -358,11 +360,17 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
358 MVM_UCODE_CALIB_TIMEOUT); 360 MVM_UCODE_CALIB_TIMEOUT);
359 if (!ret) 361 if (!ret)
360 mvm->init_ucode_complete = true; 362 mvm->init_ucode_complete = true;
363
364 if (ret && iwl_mvm_is_radio_killed(mvm)) {
365 IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
366 ret = 1;
367 }
361 goto out; 368 goto out;
362 369
363error: 370error:
364 iwl_remove_notification(&mvm->notif_wait, &calib_wait); 371 iwl_remove_notification(&mvm->notif_wait, &calib_wait);
365out: 372out:
373 mvm->calibrating = false;
366 if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) { 374 if (iwlmvm_mod_params.init_dbg && !mvm->nvm_data) {
367 /* we want to debug INIT and we have no NVM - fake */ 375 /* we want to debug INIT and we have no NVM - fake */
368 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) + 376 mvm->nvm_data = kzalloc(sizeof(struct iwl_nvm_data) +
@@ -480,6 +488,15 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
480 /* Initialize tx backoffs to the minimal possible */ 488 /* Initialize tx backoffs to the minimal possible */
481 iwl_mvm_tt_tx_backoff(mvm, 0); 489 iwl_mvm_tt_tx_backoff(mvm, 0);
482 490
491 if (mvm->trans->ltr_enabled) {
492 struct iwl_ltr_config_cmd cmd = {
493 .flags = cpu_to_le32(LTR_CFG_FLAG_FEATURE_ENABLE),
494 };
495
496 WARN_ON(iwl_mvm_send_cmd_pdu(mvm, LTR_CONFIG, 0,
497 sizeof(cmd), &cmd));
498 }
499
483 ret = iwl_mvm_power_update_device(mvm); 500 ret = iwl_mvm_power_update_device(mvm);
484 if (ret) 501 if (ret)
485 goto error; 502 goto error;
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index c7a73c68bdab..b6d2683da3a9 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -526,7 +526,8 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
526 } 526 }
527 527
528 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE && 528 if (IEEE80211_SKB_CB(skb)->hw_queue == IWL_MVM_OFFCHANNEL_QUEUE &&
529 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status)) 529 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) &&
530 !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status))
530 goto drop; 531 goto drop;
531 532
532 /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */ 533 /* treat non-bufferable MMPDUs as broadcast if sta is sleeping */
@@ -787,6 +788,7 @@ static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
787 788
788 mvm->scan_status = IWL_MVM_SCAN_NONE; 789 mvm->scan_status = IWL_MVM_SCAN_NONE;
789 mvm->ps_disabled = false; 790 mvm->ps_disabled = false;
791 mvm->calibrating = false;
790 792
791 /* just in case one was running */ 793 /* just in case one was running */
792 ieee80211_remain_on_channel_expired(mvm->hw); 794 ieee80211_remain_on_channel_expired(mvm->hw);
@@ -1734,6 +1736,13 @@ iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm,
1734 if (changes & BSS_CHANGED_BEACON && 1736 if (changes & BSS_CHANGED_BEACON &&
1735 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif)) 1737 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif))
1736 IWL_WARN(mvm, "Failed updating beacon data\n"); 1738 IWL_WARN(mvm, "Failed updating beacon data\n");
1739
1740 if (changes & BSS_CHANGED_TXPOWER) {
1741 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n",
1742 bss_conf->txpower);
1743 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower);
1744 }
1745
1737} 1746}
1738 1747
1739static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, 1748static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw,
@@ -2367,14 +2376,19 @@ static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm,
2367 /* Set the node address */ 2376 /* Set the node address */
2368 memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN); 2377 memcpy(aux_roc_req.node_addr, vif->addr, ETH_ALEN);
2369 2378
2379 lockdep_assert_held(&mvm->mutex);
2380
2381 spin_lock_bh(&mvm->time_event_lock);
2382
2383 if (WARN_ON(te_data->id == HOT_SPOT_CMD)) {
2384 spin_unlock_bh(&mvm->time_event_lock);
2385 return -EIO;
2386 }
2387
2370 te_data->vif = vif; 2388 te_data->vif = vif;
2371 te_data->duration = duration; 2389 te_data->duration = duration;
2372 te_data->id = HOT_SPOT_CMD; 2390 te_data->id = HOT_SPOT_CMD;
2373 2391
2374 lockdep_assert_held(&mvm->mutex);
2375
2376 spin_lock_bh(&mvm->time_event_lock);
2377 list_add_tail(&te_data->list, &mvm->time_event_list);
2378 spin_unlock_bh(&mvm->time_event_lock); 2392 spin_unlock_bh(&mvm->time_event_lock);
2379 2393
2380 /* 2394 /*
@@ -2430,22 +2444,29 @@ static int iwl_mvm_roc(struct ieee80211_hw *hw,
2430 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, 2444 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value,
2431 duration, type); 2445 duration, type);
2432 2446
2447 mutex_lock(&mvm->mutex);
2448
2433 switch (vif->type) { 2449 switch (vif->type) {
2434 case NL80211_IFTYPE_STATION: 2450 case NL80211_IFTYPE_STATION:
2435 /* Use aux roc framework (HS20) */ 2451 if (mvm->fw->ucode_capa.capa[0] &
2436 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, 2452 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT) {
2437 vif, duration); 2453 /* Use aux roc framework (HS20) */
2438 return ret; 2454 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel,
2455 vif, duration);
2456 goto out_unlock;
2457 }
2458 IWL_ERR(mvm, "hotspot not supported\n");
2459 ret = -EINVAL;
2460 goto out_unlock;
2439 case NL80211_IFTYPE_P2P_DEVICE: 2461 case NL80211_IFTYPE_P2P_DEVICE:
2440 /* handle below */ 2462 /* handle below */
2441 break; 2463 break;
2442 default: 2464 default:
2443 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type); 2465 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type);
2444 return -EINVAL; 2466 ret = -EINVAL;
2467 goto out_unlock;
2445 } 2468 }
2446 2469
2447 mutex_lock(&mvm->mutex);
2448
2449 for (i = 0; i < NUM_PHY_CTX; i++) { 2470 for (i = 0; i < NUM_PHY_CTX; i++) {
2450 phy_ctxt = &mvm->phy_ctxts[i]; 2471 phy_ctxt = &mvm->phy_ctxts[i];
2451 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt) 2472 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt)
diff --git a/drivers/net/wireless/iwlwifi/mvm/mvm.h b/drivers/net/wireless/iwlwifi/mvm/mvm.h
index b153ced7015b..845429c88cf4 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mvm.h
+++ b/drivers/net/wireless/iwlwifi/mvm/mvm.h
@@ -548,6 +548,7 @@ struct iwl_mvm {
548 enum iwl_ucode_type cur_ucode; 548 enum iwl_ucode_type cur_ucode;
549 bool ucode_loaded; 549 bool ucode_loaded;
550 bool init_ucode_complete; 550 bool init_ucode_complete;
551 bool calibrating;
551 u32 error_event_table; 552 u32 error_event_table;
552 u32 log_event_table; 553 u32 log_event_table;
553 u32 umac_error_event_table; 554 u32 umac_error_event_table;
diff --git a/drivers/net/wireless/iwlwifi/mvm/ops.c b/drivers/net/wireless/iwlwifi/mvm/ops.c
index 15aa298ee79c..5b719ee8e789 100644
--- a/drivers/net/wireless/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/iwlwifi/mvm/ops.c
@@ -336,6 +336,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
336 CMD(DTS_MEASUREMENT_NOTIFICATION), 336 CMD(DTS_MEASUREMENT_NOTIFICATION),
337 CMD(REPLY_THERMAL_MNG_BACKOFF), 337 CMD(REPLY_THERMAL_MNG_BACKOFF),
338 CMD(MAC_PM_POWER_TABLE), 338 CMD(MAC_PM_POWER_TABLE),
339 CMD(LTR_CONFIG),
339 CMD(BT_COEX_CI), 340 CMD(BT_COEX_CI),
340 CMD(BT_COEX_UPDATE_SW_BOOST), 341 CMD(BT_COEX_UPDATE_SW_BOOST),
341 CMD(BT_COEX_UPDATE_CORUN_LUT), 342 CMD(BT_COEX_UPDATE_CORUN_LUT),
@@ -423,6 +424,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
423 } 424 }
424 mvm->sf_state = SF_UNINIT; 425 mvm->sf_state = SF_UNINIT;
425 mvm->low_latency_agg_frame_limit = 6; 426 mvm->low_latency_agg_frame_limit = 6;
427 mvm->cur_ucode = IWL_UCODE_INIT;
426 428
427 mutex_init(&mvm->mutex); 429 mutex_init(&mvm->mutex);
428 mutex_init(&mvm->d0i3_suspend_mutex); 430 mutex_init(&mvm->d0i3_suspend_mutex);
@@ -751,6 +753,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
751static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state) 753static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
752{ 754{
753 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode); 755 struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
756 bool calibrating = ACCESS_ONCE(mvm->calibrating);
754 757
755 if (state) 758 if (state)
756 set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status); 759 set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
@@ -759,7 +762,15 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
759 762
760 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm)); 763 wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
761 764
762 return state && mvm->cur_ucode != IWL_UCODE_INIT; 765 /* iwl_run_init_mvm_ucode is waiting for results, abort it */
766 if (calibrating)
767 iwl_abort_notification_waits(&mvm->notif_wait);
768
769 /*
770 * Stop the device if we run OPERATIONAL firmware or if we are in the
771 * middle of the calibrations.
772 */
773 return state && (mvm->cur_ucode != IWL_UCODE_INIT || calibrating);
763} 774}
764 775
765static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb) 776static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index cb85e63c20aa..7554f7053830 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -459,7 +459,8 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
459 basic_ssid ? 1 : 0); 459 basic_ssid ? 1 : 0);
460 460
461 cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL | 461 cmd->tx_cmd.tx_flags = cpu_to_le32(TX_CMD_FLG_SEQ_CTL |
462 TX_CMD_FLG_BT_DIS); 462 3 << TX_CMD_FLG_BT_PRIO_POS);
463
463 cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id; 464 cmd->tx_cmd.sta_id = mvm->aux_sta.sta_id;
464 cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE); 465 cmd->tx_cmd.life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
465 cmd->tx_cmd.rate_n_flags = 466 cmd->tx_cmd.rate_n_flags =
@@ -601,16 +602,6 @@ static int iwl_mvm_cancel_regular_scan(struct iwl_mvm *mvm)
601 SCAN_COMPLETE_NOTIFICATION }; 602 SCAN_COMPLETE_NOTIFICATION };
602 int ret; 603 int ret;
603 604
604 if (mvm->scan_status == IWL_MVM_SCAN_NONE)
605 return 0;
606
607 if (iwl_mvm_is_radio_killed(mvm)) {
608 ieee80211_scan_completed(mvm->hw, true);
609 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
610 mvm->scan_status = IWL_MVM_SCAN_NONE;
611 return 0;
612 }
613
614 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort, 605 iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
615 scan_abort_notif, 606 scan_abort_notif,
616 ARRAY_SIZE(scan_abort_notif), 607 ARRAY_SIZE(scan_abort_notif),
@@ -1399,6 +1390,16 @@ int iwl_mvm_unified_sched_scan_lmac(struct iwl_mvm *mvm,
1399 1390
1400int iwl_mvm_cancel_scan(struct iwl_mvm *mvm) 1391int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
1401{ 1392{
1393 if (mvm->scan_status == IWL_MVM_SCAN_NONE)
1394 return 0;
1395
1396 if (iwl_mvm_is_radio_killed(mvm)) {
1397 ieee80211_scan_completed(mvm->hw, true);
1398 iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
1399 mvm->scan_status = IWL_MVM_SCAN_NONE;
1400 return 0;
1401 }
1402
1402 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN) 1403 if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_LMAC_SCAN)
1403 return iwl_mvm_scan_offload_stop(mvm, true); 1404 return iwl_mvm_scan_offload_stop(mvm, true);
1404 return iwl_mvm_cancel_regular_scan(mvm); 1405 return iwl_mvm_cancel_regular_scan(mvm);
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index b7f9e61d14e2..6dfad230be5e 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -305,8 +305,8 @@ static int iwl_mvm_aux_roc_te_handle_notif(struct iwl_mvm *mvm,
305 te_data->running = false; 305 te_data->running = false;
306 te_data->vif = NULL; 306 te_data->vif = NULL;
307 te_data->uid = 0; 307 te_data->uid = 0;
308 te_data->id = TE_MAX;
308 } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) { 309 } else if (le32_to_cpu(notif->action) == TE_V2_NOTIF_HOST_EVENT_START) {
309 set_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status);
310 set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status); 310 set_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status);
311 te_data->running = true; 311 te_data->running = true;
312 ieee80211_ready_on_channel(mvm->hw); /* Start TE */ 312 ieee80211_ready_on_channel(mvm->hw); /* Start TE */
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 1cb793a498ac..c6a517c771df 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -175,14 +175,10 @@ static void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm,
175 175
176 /* 176 /*
177 * for data packets, rate info comes from the table inside the fw. This 177 * for data packets, rate info comes from the table inside the fw. This
178 * table is controlled by LINK_QUALITY commands. Exclude ctrl port 178 * table is controlled by LINK_QUALITY commands
179 * frames like EAPOLs which should be treated as mgmt frames. This
180 * avoids them being sent initially in high rates which increases the
181 * chances for completion of the 4-Way handshake.
182 */ 179 */
183 180
184 if (ieee80211_is_data(fc) && sta && 181 if (ieee80211_is_data(fc) && sta) {
185 !(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)) {
186 tx_cmd->initial_rate_index = 0; 182 tx_cmd->initial_rate_index = 0;
187 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE); 183 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
188 return; 184 return;
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 1393bac0025c..dd2f3f8baa9d 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -174,6 +174,7 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
174{ 174{
175 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 175 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
176 u16 lctl; 176 u16 lctl;
177 u16 cap;
177 178
178 /* 179 /*
179 * HW bug W/A for instability in PCIe bus L0S->L1 transition. 180 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
@@ -184,16 +185,17 @@ static void iwl_pcie_apm_config(struct iwl_trans *trans)
184 * power savings, even without L1. 185 * power savings, even without L1.
185 */ 186 */
186 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl); 187 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
187 if (lctl & PCI_EXP_LNKCTL_ASPM_L1) { 188 if (lctl & PCI_EXP_LNKCTL_ASPM_L1)
188 /* L1-ASPM enabled; disable(!) L0S */
189 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 189 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
190 dev_info(trans->dev, "L1 Enabled; Disabling L0S\n"); 190 else
191 } else {
192 /* L1-ASPM disabled; enable(!) L0S */
193 iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED); 191 iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
194 dev_info(trans->dev, "L1 Disabled; Enabling L0S\n");
195 }
196 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S); 192 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
193
194 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
195 trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
196 dev_info(trans->dev, "L1 %sabled - LTR %sabled\n",
197 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
198 trans->ltr_enabled ? "En" : "Dis");
197} 199}
198 200
199/* 201/*
@@ -428,7 +430,7 @@ static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
428 ret = iwl_poll_bit(trans, CSR_RESET, 430 ret = iwl_poll_bit(trans, CSR_RESET,
429 CSR_RESET_REG_FLAG_MASTER_DISABLED, 431 CSR_RESET_REG_FLAG_MASTER_DISABLED,
430 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100); 432 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
431 if (ret) 433 if (ret < 0)
432 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n"); 434 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
433 435
434 IWL_DEBUG_INFO(trans, "stop master\n"); 436 IWL_DEBUG_INFO(trans, "stop master\n");
@@ -544,7 +546,7 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
544 msleep(25); 546 msleep(25);
545 } 547 }
546 548
547 IWL_DEBUG_INFO(trans, "got NIC after %d iterations\n", iter); 549 IWL_ERR(trans, "Couldn't prepare the card\n");
548 550
549 return ret; 551 return ret;
550} 552}
@@ -913,7 +915,8 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
913 * restart. So don't process again if the device is 915 * restart. So don't process again if the device is
914 * already dead. 916 * already dead.
915 */ 917 */
916 if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) { 918 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
919 IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n");
917 iwl_pcie_tx_stop(trans); 920 iwl_pcie_tx_stop(trans);
918 iwl_pcie_rx_stop(trans); 921 iwl_pcie_rx_stop(trans);
919 922
@@ -943,7 +946,6 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
943 /* clear all status bits */ 946 /* clear all status bits */
944 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 947 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
945 clear_bit(STATUS_INT_ENABLED, &trans->status); 948 clear_bit(STATUS_INT_ENABLED, &trans->status);
946 clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
947 clear_bit(STATUS_TPOWER_PMI, &trans->status); 949 clear_bit(STATUS_TPOWER_PMI, &trans->status);
948 clear_bit(STATUS_RFKILL, &trans->status); 950 clear_bit(STATUS_RFKILL, &trans->status);
949 951
@@ -1043,7 +1045,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1043 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 1045 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1044 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 1046 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1045 25000); 1047 25000);
1046 if (ret) { 1048 if (ret < 0) {
1047 IWL_ERR(trans, "Failed to resume the device (mac ready)\n"); 1049 IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
1048 return ret; 1050 return ret;
1049 } 1051 }
@@ -1892,8 +1894,7 @@ static u32 iwl_trans_pcie_dump_prph(struct iwl_trans *trans,
1892 int reg; 1894 int reg;
1893 __le32 *val; 1895 __le32 *val;
1894 1896
1895 prph_len += sizeof(*data) + sizeof(*prph) + 1897 prph_len += sizeof(**data) + sizeof(*prph) + num_bytes_in_chunk;
1896 num_bytes_in_chunk;
1897 1898
1898 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH); 1899 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PRPH);
1899 (*data)->len = cpu_to_le32(sizeof(*prph) + 1900 (*data)->len = cpu_to_le32(sizeof(*prph) +
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index babbdc1ce741..c9ad4cf1adfb 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1987,7 +1987,7 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
1987 if (err != 0) { 1987 if (err != 0) {
1988 printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n", 1988 printk(KERN_DEBUG "mac80211_hwsim: device_bind_driver failed (%d)\n",
1989 err); 1989 err);
1990 goto failed_hw; 1990 goto failed_bind;
1991 } 1991 }
1992 1992
1993 skb_queue_head_init(&data->pending); 1993 skb_queue_head_init(&data->pending);
@@ -2183,6 +2183,8 @@ static int mac80211_hwsim_create_radio(int channels, const char *reg_alpha2,
2183 return idx; 2183 return idx;
2184 2184
2185failed_hw: 2185failed_hw:
2186 device_release_driver(data->dev);
2187failed_bind:
2186 device_unregister(data->dev); 2188 device_unregister(data->dev);
2187failed_drvdata: 2189failed_drvdata:
2188 ieee80211_free_hw(hw); 2190 ieee80211_free_hw(hw);
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.c b/drivers/net/wireless/mwifiex/11n_rxreorder.c
index 40057079ffb9..5ef5a0eeba50 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.c
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.c
@@ -196,6 +196,7 @@ mwifiex_del_rx_reorder_entry(struct mwifiex_private *priv,
196 mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win); 196 mwifiex_11n_dispatch_pkt_until_start_win(priv, tbl, start_win);
197 197
198 del_timer_sync(&tbl->timer_context.timer); 198 del_timer_sync(&tbl->timer_context.timer);
199 tbl->timer_context.timer_is_set = false;
199 200
200 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags); 201 spin_lock_irqsave(&priv->rx_reorder_tbl_lock, flags);
201 list_del(&tbl->list); 202 list_del(&tbl->list);
@@ -297,6 +298,7 @@ mwifiex_flush_data(unsigned long context)
297 (struct reorder_tmr_cnxt *) context; 298 (struct reorder_tmr_cnxt *) context;
298 int start_win, seq_num; 299 int start_win, seq_num;
299 300
301 ctx->timer_is_set = false;
300 seq_num = mwifiex_11n_find_last_seq_num(ctx); 302 seq_num = mwifiex_11n_find_last_seq_num(ctx);
301 303
302 if (seq_num < 0) 304 if (seq_num < 0)
@@ -385,6 +387,7 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
385 387
386 new_node->timer_context.ptr = new_node; 388 new_node->timer_context.ptr = new_node;
387 new_node->timer_context.priv = priv; 389 new_node->timer_context.priv = priv;
390 new_node->timer_context.timer_is_set = false;
388 391
389 init_timer(&new_node->timer_context.timer); 392 init_timer(&new_node->timer_context.timer);
390 new_node->timer_context.timer.function = mwifiex_flush_data; 393 new_node->timer_context.timer.function = mwifiex_flush_data;
@@ -399,6 +402,22 @@ mwifiex_11n_create_rx_reorder_tbl(struct mwifiex_private *priv, u8 *ta,
399 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags); 402 spin_unlock_irqrestore(&priv->rx_reorder_tbl_lock, flags);
400} 403}
401 404
405static void
406mwifiex_11n_rxreorder_timer_restart(struct mwifiex_rx_reorder_tbl *tbl)
407{
408 u32 min_flush_time;
409
410 if (tbl->win_size >= MWIFIEX_BA_WIN_SIZE_32)
411 min_flush_time = MIN_FLUSH_TIMER_15_MS;
412 else
413 min_flush_time = MIN_FLUSH_TIMER_MS;
414
415 mod_timer(&tbl->timer_context.timer,
416 jiffies + msecs_to_jiffies(min_flush_time * tbl->win_size));
417
418 tbl->timer_context.timer_is_set = true;
419}
420
402/* 421/*
403 * This function prepares command for adding a BA request. 422 * This function prepares command for adding a BA request.
404 * 423 *
@@ -523,31 +542,31 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
523 u8 *ta, u8 pkt_type, void *payload) 542 u8 *ta, u8 pkt_type, void *payload)
524{ 543{
525 struct mwifiex_rx_reorder_tbl *tbl; 544 struct mwifiex_rx_reorder_tbl *tbl;
526 int start_win, end_win, win_size; 545 int prev_start_win, start_win, end_win, win_size;
527 u16 pkt_index; 546 u16 pkt_index;
528 bool init_window_shift = false; 547 bool init_window_shift = false;
548 int ret = 0;
529 549
530 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta); 550 tbl = mwifiex_11n_get_rx_reorder_tbl(priv, tid, ta);
531 if (!tbl) { 551 if (!tbl) {
532 if (pkt_type != PKT_TYPE_BAR) 552 if (pkt_type != PKT_TYPE_BAR)
533 mwifiex_11n_dispatch_pkt(priv, payload); 553 mwifiex_11n_dispatch_pkt(priv, payload);
534 return 0; 554 return ret;
535 } 555 }
536 556
537 if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) { 557 if ((pkt_type == PKT_TYPE_AMSDU) && !tbl->amsdu) {
538 mwifiex_11n_dispatch_pkt(priv, payload); 558 mwifiex_11n_dispatch_pkt(priv, payload);
539 return 0; 559 return ret;
540 } 560 }
541 561
542 start_win = tbl->start_win; 562 start_win = tbl->start_win;
563 prev_start_win = start_win;
543 win_size = tbl->win_size; 564 win_size = tbl->win_size;
544 end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1); 565 end_win = ((start_win + win_size) - 1) & (MAX_TID_VALUE - 1);
545 if (tbl->flags & RXREOR_INIT_WINDOW_SHIFT) { 566 if (tbl->flags & RXREOR_INIT_WINDOW_SHIFT) {
546 init_window_shift = true; 567 init_window_shift = true;
547 tbl->flags &= ~RXREOR_INIT_WINDOW_SHIFT; 568 tbl->flags &= ~RXREOR_INIT_WINDOW_SHIFT;
548 } 569 }
549 mod_timer(&tbl->timer_context.timer,
550 jiffies + msecs_to_jiffies(MIN_FLUSH_TIMER_MS * win_size));
551 570
552 if (tbl->flags & RXREOR_FORCE_NO_DROP) { 571 if (tbl->flags & RXREOR_FORCE_NO_DROP) {
553 dev_dbg(priv->adapter->dev, 572 dev_dbg(priv->adapter->dev,
@@ -568,11 +587,14 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
568 if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) { 587 if ((start_win + TWOPOW11) > (MAX_TID_VALUE - 1)) {
569 if (seq_num >= ((start_win + TWOPOW11) & 588 if (seq_num >= ((start_win + TWOPOW11) &
570 (MAX_TID_VALUE - 1)) && 589 (MAX_TID_VALUE - 1)) &&
571 seq_num < start_win) 590 seq_num < start_win) {
572 return -1; 591 ret = -1;
592 goto done;
593 }
573 } else if ((seq_num < start_win) || 594 } else if ((seq_num < start_win) ||
574 (seq_num > (start_win + TWOPOW11))) { 595 (seq_num >= (start_win + TWOPOW11))) {
575 return -1; 596 ret = -1;
597 goto done;
576 } 598 }
577 } 599 }
578 600
@@ -601,8 +623,10 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
601 else 623 else
602 pkt_index = (seq_num+MAX_TID_VALUE) - start_win; 624 pkt_index = (seq_num+MAX_TID_VALUE) - start_win;
603 625
604 if (tbl->rx_reorder_ptr[pkt_index]) 626 if (tbl->rx_reorder_ptr[pkt_index]) {
605 return -1; 627 ret = -1;
628 goto done;
629 }
606 630
607 tbl->rx_reorder_ptr[pkt_index] = payload; 631 tbl->rx_reorder_ptr[pkt_index] = payload;
608 } 632 }
@@ -613,7 +637,11 @@ int mwifiex_11n_rx_reorder_pkt(struct mwifiex_private *priv,
613 */ 637 */
614 mwifiex_11n_scan_and_dispatch(priv, tbl); 638 mwifiex_11n_scan_and_dispatch(priv, tbl);
615 639
616 return 0; 640done:
641 if (!tbl->timer_context.timer_is_set ||
642 prev_start_win != tbl->start_win)
643 mwifiex_11n_rxreorder_timer_restart(tbl);
644 return ret;
617} 645}
618 646
619/* 647/*
diff --git a/drivers/net/wireless/mwifiex/11n_rxreorder.h b/drivers/net/wireless/mwifiex/11n_rxreorder.h
index 3a87bb0e3a62..63ecea89b4ab 100644
--- a/drivers/net/wireless/mwifiex/11n_rxreorder.h
+++ b/drivers/net/wireless/mwifiex/11n_rxreorder.h
@@ -21,6 +21,8 @@
21#define _MWIFIEX_11N_RXREORDER_H_ 21#define _MWIFIEX_11N_RXREORDER_H_
22 22
23#define MIN_FLUSH_TIMER_MS 50 23#define MIN_FLUSH_TIMER_MS 50
24#define MIN_FLUSH_TIMER_15_MS 15
25#define MWIFIEX_BA_WIN_SIZE_32 32
24 26
25#define PKT_TYPE_BAR 0xE7 27#define PKT_TYPE_BAR 0xE7
26#define MAX_TID_VALUE (2 << 11) 28#define MAX_TID_VALUE (2 << 11)
diff --git a/drivers/net/wireless/mwifiex/main.h b/drivers/net/wireless/mwifiex/main.h
index e2635747d966..f55658d15c60 100644
--- a/drivers/net/wireless/mwifiex/main.h
+++ b/drivers/net/wireless/mwifiex/main.h
@@ -592,6 +592,7 @@ struct reorder_tmr_cnxt {
592 struct timer_list timer; 592 struct timer_list timer;
593 struct mwifiex_rx_reorder_tbl *ptr; 593 struct mwifiex_rx_reorder_tbl *ptr;
594 struct mwifiex_private *priv; 594 struct mwifiex_private *priv;
595 u8 timer_is_set;
595}; 596};
596 597
597struct mwifiex_rx_reorder_tbl { 598struct mwifiex_rx_reorder_tbl {
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 573897b8e878..8444313eabe2 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -1111,6 +1111,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
1111 /* Ovislink */ 1111 /* Ovislink */
1112 { USB_DEVICE(0x1b75, 0x3071) }, 1112 { USB_DEVICE(0x1b75, 0x3071) },
1113 { USB_DEVICE(0x1b75, 0x3072) }, 1113 { USB_DEVICE(0x1b75, 0x3072) },
1114 { USB_DEVICE(0x1b75, 0xa200) },
1114 /* Para */ 1115 /* Para */
1115 { USB_DEVICE(0x20b8, 0x8888) }, 1116 { USB_DEVICE(0x20b8, 0x8888) },
1116 /* Pegatron */ 1117 /* Pegatron */
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 8e68f87ab13c..66ff36447b94 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -158,55 +158,29 @@ void rt2x00queue_align_frame(struct sk_buff *skb)
158 skb_trim(skb, frame_length); 158 skb_trim(skb, frame_length);
159} 159}
160 160
161void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int header_length) 161/*
162 * H/W needs L2 padding between the header and the paylod if header size
163 * is not 4 bytes aligned.
164 */
165void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len)
162{ 166{
163 unsigned int payload_length = skb->len - header_length; 167 unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
164 unsigned int header_align = ALIGN_SIZE(skb, 0);
165 unsigned int payload_align = ALIGN_SIZE(skb, header_length);
166 unsigned int l2pad = payload_length ? L2PAD_SIZE(header_length) : 0;
167 168
168 /* 169 if (!l2pad)
169 * Adjust the header alignment if the payload needs to be moved more
170 * than the header.
171 */
172 if (payload_align > header_align)
173 header_align += 4;
174
175 /* There is nothing to do if no alignment is needed */
176 if (!header_align)
177 return; 170 return;
178 171
179 /* Reserve the amount of space needed in front of the frame */ 172 skb_push(skb, l2pad);
180 skb_push(skb, header_align); 173 memmove(skb->data, skb->data + l2pad, hdr_len);
181
182 /*
183 * Move the header.
184 */
185 memmove(skb->data, skb->data + header_align, header_length);
186
187 /* Move the payload, if present and if required */
188 if (payload_length && payload_align)
189 memmove(skb->data + header_length + l2pad,
190 skb->data + header_length + l2pad + payload_align,
191 payload_length);
192
193 /* Trim the skb to the correct size */
194 skb_trim(skb, header_length + l2pad + payload_length);
195} 174}
196 175
197void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int header_length) 176void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len)
198{ 177{
199 /* 178 unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0;
200 * L2 padding is only present if the skb contains more than just the
201 * IEEE 802.11 header.
202 */
203 unsigned int l2pad = (skb->len > header_length) ?
204 L2PAD_SIZE(header_length) : 0;
205 179
206 if (!l2pad) 180 if (!l2pad)
207 return; 181 return;
208 182
209 memmove(skb->data + l2pad, skb->data, header_length); 183 memmove(skb->data + l2pad, skb->data, hdr_len);
210 skb_pull(skb, l2pad); 184 skb_pull(skb, l2pad);
211} 185}
212 186
diff --git a/drivers/net/wireless/rtlwifi/base.c b/drivers/net/wireless/rtlwifi/base.c
index 58ba71830886..40b6d1d006d7 100644
--- a/drivers/net/wireless/rtlwifi/base.c
+++ b/drivers/net/wireless/rtlwifi/base.c
@@ -467,7 +467,7 @@ static void _rtl_init_deferred_work(struct ieee80211_hw *hw)
467 rtl_easy_concurrent_retrytimer_callback, (unsigned long)hw); 467 rtl_easy_concurrent_retrytimer_callback, (unsigned long)hw);
468 /* <2> work queue */ 468 /* <2> work queue */
469 rtlpriv->works.hw = hw; 469 rtlpriv->works.hw = hw;
470 rtlpriv->works.rtl_wq = alloc_workqueue(rtlpriv->cfg->name, 0, 0); 470 rtlpriv->works.rtl_wq = alloc_workqueue("%s", 0, 0, rtlpriv->cfg->name);
471 INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq, 471 INIT_DELAYED_WORK(&rtlpriv->works.watchdog_wq,
472 (void *)rtl_watchdog_wq_callback); 472 (void *)rtl_watchdog_wq_callback);
473 INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq, 473 INIT_DELAYED_WORK(&rtlpriv->works.ips_nic_off_wq,
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index f6179bc06086..07dae0d44abc 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -1828,3 +1828,9 @@ const struct ieee80211_ops rtl_ops = {
1828 .flush = rtl_op_flush, 1828 .flush = rtl_op_flush,
1829}; 1829};
1830EXPORT_SYMBOL_GPL(rtl_ops); 1830EXPORT_SYMBOL_GPL(rtl_ops);
1831
1832bool rtl_btc_status_false(void)
1833{
1834 return false;
1835}
1836EXPORT_SYMBOL_GPL(rtl_btc_status_false);
diff --git a/drivers/net/wireless/rtlwifi/core.h b/drivers/net/wireless/rtlwifi/core.h
index 59cd3b9dca25..624e1dc16d31 100644
--- a/drivers/net/wireless/rtlwifi/core.h
+++ b/drivers/net/wireless/rtlwifi/core.h
@@ -42,5 +42,6 @@ void rtl_rfreg_delay(struct ieee80211_hw *hw, enum radio_path rfpath, u32 addr,
42 u32 mask, u32 data); 42 u32 mask, u32 data);
43void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data); 43void rtl_bb_delay(struct ieee80211_hw *hw, u32 addr, u32 data);
44bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb); 44bool rtl_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb);
45bool rtl_btc_status_false(void);
45 46
46#endif 47#endif
diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c
index 667aba81246c..846a2e6e34d8 100644
--- a/drivers/net/wireless/rtlwifi/pci.c
+++ b/drivers/net/wireless/rtlwifi/pci.c
@@ -842,7 +842,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw)
842 break; 842 break;
843 } 843 }
844 /* handle command packet here */ 844 /* handle command packet here */
845 if (rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) { 845 if (rtlpriv->cfg->ops->rx_command_packet &&
846 rtlpriv->cfg->ops->rx_command_packet(hw, stats, skb)) {
846 dev_kfree_skb_any(skb); 847 dev_kfree_skb_any(skb);
847 goto end; 848 goto end;
848 } 849 }
@@ -1127,9 +1128,14 @@ static void _rtl_pci_prepare_bcn_tasklet(struct ieee80211_hw *hw)
1127 1128
1128 __skb_queue_tail(&ring->queue, pskb); 1129 __skb_queue_tail(&ring->queue, pskb);
1129 1130
1130 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN, 1131 if (rtlpriv->use_new_trx_flow) {
1131 &temp_one); 1132 temp_one = 4;
1132 1133 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pbuffer_desc, true,
1134 HW_DESC_OWN, (u8 *)&temp_one);
1135 } else {
1136 rtlpriv->cfg->ops->set_desc(hw, (u8 *)pdesc, true, HW_DESC_OWN,
1137 &temp_one);
1138 }
1133 return; 1139 return;
1134} 1140}
1135 1141
@@ -1370,9 +1376,9 @@ static void _rtl_pci_free_tx_ring(struct ieee80211_hw *hw,
1370 ring->desc = NULL; 1376 ring->desc = NULL;
1371 if (rtlpriv->use_new_trx_flow) { 1377 if (rtlpriv->use_new_trx_flow) {
1372 pci_free_consistent(rtlpci->pdev, 1378 pci_free_consistent(rtlpci->pdev,
1373 sizeof(*ring->desc) * ring->entries, 1379 sizeof(*ring->buffer_desc) * ring->entries,
1374 ring->buffer_desc, ring->buffer_desc_dma); 1380 ring->buffer_desc, ring->buffer_desc_dma);
1375 ring->desc = NULL; 1381 ring->buffer_desc = NULL;
1376 } 1382 }
1377} 1383}
1378 1384
@@ -1543,7 +1549,6 @@ int rtl_pci_reset_trx_ring(struct ieee80211_hw *hw)
1543 true, 1549 true,
1544 HW_DESC_TXBUFF_ADDR), 1550 HW_DESC_TXBUFF_ADDR),
1545 skb->len, PCI_DMA_TODEVICE); 1551 skb->len, PCI_DMA_TODEVICE);
1546 ring->idx = (ring->idx + 1) % ring->entries;
1547 kfree_skb(skb); 1552 kfree_skb(skb);
1548 ring->idx = (ring->idx + 1) % ring->entries; 1553 ring->idx = (ring->idx + 1) % ring->entries;
1549 } 1554 }
@@ -1796,7 +1801,8 @@ static int rtl_pci_start(struct ieee80211_hw *hw)
1796 rtl_pci_reset_trx_ring(hw); 1801 rtl_pci_reset_trx_ring(hw);
1797 1802
1798 rtlpci->driver_is_goingto_unload = false; 1803 rtlpci->driver_is_goingto_unload = false;
1799 if (rtlpriv->cfg->ops->get_btc_status()) { 1804 if (rtlpriv->cfg->ops->get_btc_status &&
1805 rtlpriv->cfg->ops->get_btc_status()) {
1800 rtlpriv->btcoexist.btc_ops->btc_init_variables(rtlpriv); 1806 rtlpriv->btcoexist.btc_ops->btc_init_variables(rtlpriv);
1801 rtlpriv->btcoexist.btc_ops->btc_init_hal_vars(rtlpriv); 1807 rtlpriv->btcoexist.btc_ops->btc_init_hal_vars(rtlpriv);
1802 } 1808 }
@@ -2243,6 +2249,16 @@ int rtl_pci_probe(struct pci_dev *pdev,
2243 /*like read eeprom and so on */ 2249 /*like read eeprom and so on */
2244 rtlpriv->cfg->ops->read_eeprom_info(hw); 2250 rtlpriv->cfg->ops->read_eeprom_info(hw);
2245 2251
2252 if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
2253 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
2254 err = -ENODEV;
2255 goto fail3;
2256 }
2257 rtlpriv->cfg->ops->init_sw_leds(hw);
2258
2259 /*aspm */
2260 rtl_pci_init_aspm(hw);
2261
2246 /* Init mac80211 sw */ 2262 /* Init mac80211 sw */
2247 err = rtl_init_core(hw); 2263 err = rtl_init_core(hw);
2248 if (err) { 2264 if (err) {
@@ -2258,16 +2274,6 @@ int rtl_pci_probe(struct pci_dev *pdev,
2258 goto fail3; 2274 goto fail3;
2259 } 2275 }
2260 2276
2261 if (rtlpriv->cfg->ops->init_sw_vars(hw)) {
2262 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, "Can't init_sw_vars\n");
2263 err = -ENODEV;
2264 goto fail3;
2265 }
2266 rtlpriv->cfg->ops->init_sw_leds(hw);
2267
2268 /*aspm */
2269 rtl_pci_init_aspm(hw);
2270
2271 err = ieee80211_register_hw(hw); 2277 err = ieee80211_register_hw(hw);
2272 if (err) { 2278 if (err) {
2273 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 2279 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
index a00861b26ece..29983bc96a89 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.c
@@ -656,7 +656,8 @@ static u8 reserved_page_packet[TOTAL_RESERVED_PKT_LEN] = {
656 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 656 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
657}; 657};
658 658
659void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished) 659void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw,
660 bool (*cmd_send_packet)(struct ieee80211_hw *, struct sk_buff *))
660{ 661{
661 struct rtl_priv *rtlpriv = rtl_priv(hw); 662 struct rtl_priv *rtlpriv = rtl_priv(hw);
662 struct rtl_mac *mac = rtl_mac(rtl_priv(hw)); 663 struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
@@ -722,7 +723,10 @@ void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished)
722 memcpy((u8 *)skb_put(skb, totalpacketlen), 723 memcpy((u8 *)skb_put(skb, totalpacketlen),
723 &reserved_page_packet, totalpacketlen); 724 &reserved_page_packet, totalpacketlen);
724 725
725 rtstatus = rtl_cmd_send_packet(hw, skb); 726 if (cmd_send_packet)
727 rtstatus = cmd_send_packet(hw, skb);
728 else
729 rtstatus = rtl_cmd_send_packet(hw, skb);
726 730
727 if (rtstatus) 731 if (rtstatus)
728 b_dlok = true; 732 b_dlok = true;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
index a815bd6273da..b64ae45dc674 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192c/fw_common.h
@@ -109,7 +109,9 @@ void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, u8 element_id,
109 u32 cmd_len, u8 *p_cmdbuffer); 109 u32 cmd_len, u8 *p_cmdbuffer);
110void rtl92c_firmware_selfreset(struct ieee80211_hw *hw); 110void rtl92c_firmware_selfreset(struct ieee80211_hw *hw);
111void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); 111void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
112void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool b_dl_finished); 112void rtl92c_set_fw_rsvdpagepkt
113 (struct ieee80211_hw *hw,
114 bool (*cmd_send_packet)(struct ieee80211_hw *, struct sk_buff *));
113void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); 115void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
114void usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, void *data, u16 len); 116void usb_writeN_async(struct rtl_priv *rtlpriv, u32 addr, void *data, u16 len);
115void rtl92c_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state); 117void rtl92c_set_p2p_ps_offload_cmd(struct ieee80211_hw *hw, u8 p2p_ps_state);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
index 831df101d7b7..9b660df6fd71 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/def.h
@@ -114,6 +114,8 @@
114 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4) 114 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 16, 4)
115#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \ 115#define GET_C2H_CMD_FEEDBACK_CCX_SEQ(__pcmdfbhdr) \
116 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12) 116 LE_BITS_TO_4BYTE(((__pcmdfbhdr) + 4), 20, 12)
117#define GET_RX_STATUS_DESC_BUFF_ADDR(__pdesc) \
118 SHIFT_AND_MASK_LE(__pdesc + 24, 0, 32)
117 119
118#define CHIP_VER_B BIT(4) 120#define CHIP_VER_B BIT(4)
119#define CHIP_BONDING_IDENTIFIER(_value) (((_value) >> 22) & 0x3) 121#define CHIP_BONDING_IDENTIFIER(_value) (((_value) >> 22) & 0x3)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
index 8ec0f031f48a..55357d69397a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/hw.c
@@ -459,7 +459,7 @@ void rtl92ce_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
459 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, 459 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
460 tmp_reg422 & (~BIT(6))); 460 tmp_reg422 & (~BIT(6)));
461 461
462 rtl92c_set_fw_rsvdpagepkt(hw, 0); 462 rtl92c_set_fw_rsvdpagepkt(hw, NULL);
463 463
464 _rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0); 464 _rtl92ce_set_bcn_ctrl_reg(hw, BIT(3), 0);
465 _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4)); 465 _rtl92ce_set_bcn_ctrl_reg(hw, 0, BIT(4));
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
index d86b5b566444..46ea07605eb4 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/sw.c
@@ -244,6 +244,7 @@ static struct rtl_hal_ops rtl8192ce_hal_ops = {
244 .phy_lc_calibrate = _rtl92ce_phy_lc_calibrate, 244 .phy_lc_calibrate = _rtl92ce_phy_lc_calibrate,
245 .phy_set_bw_mode_callback = rtl92ce_phy_set_bw_mode_callback, 245 .phy_set_bw_mode_callback = rtl92ce_phy_set_bw_mode_callback,
246 .dm_dynamic_txpower = rtl92ce_dm_dynamic_txpower, 246 .dm_dynamic_txpower = rtl92ce_dm_dynamic_txpower,
247 .get_btc_status = rtl_btc_status_false,
247}; 248};
248 249
249static struct rtl_mod_params rtl92ce_mod_params = { 250static struct rtl_mod_params rtl92ce_mod_params = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
index 2fb9c7acb76a..dc3d20b17a26 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ce/trx.c
@@ -728,6 +728,9 @@ u32 rtl92ce_get_desc(u8 *p_desc, bool istx, u8 desc_name)
728 case HW_DESC_RXPKT_LEN: 728 case HW_DESC_RXPKT_LEN:
729 ret = GET_RX_DESC_PKT_LEN(pdesc); 729 ret = GET_RX_DESC_PKT_LEN(pdesc);
730 break; 730 break;
731 case HW_DESC_RXBUFF_ADDR:
732 ret = GET_RX_STATUS_DESC_BUFF_ADDR(pdesc);
733 break;
731 default: 734 default:
732 RT_ASSERT(false, "ERR rxdesc :%d not process\n", 735 RT_ASSERT(false, "ERR rxdesc :%d not process\n",
733 desc_name); 736 desc_name);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
index 04aa0b5f5b3d..873363acbacf 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.c
@@ -1592,6 +1592,20 @@ void rtl92cu_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1592 } 1592 }
1593} 1593}
1594 1594
1595bool usb_cmd_send_packet(struct ieee80211_hw *hw, struct sk_buff *skb)
1596{
1597 /* Currently nothing happens here.
1598 * Traffic stops after some seconds in WPA2 802.11n mode.
1599 * Maybe because rtl8192cu chip should be set from here?
1600 * If I understand correctly, the realtek vendor driver sends some urbs
1601 * if its "here".
1602 *
1603 * This is maybe necessary:
1604 * rtlpriv->cfg->ops->fill_tx_cmddesc(hw, buffer, 1, 1, skb);
1605 */
1606 return true;
1607}
1608
1595void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val) 1609void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1596{ 1610{
1597 struct rtl_priv *rtlpriv = rtl_priv(hw); 1611 struct rtl_priv *rtlpriv = rtl_priv(hw);
@@ -1939,7 +1953,8 @@ void rtl92cu_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
1939 recover = true; 1953 recover = true;
1940 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2, 1954 rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL + 2,
1941 tmp_reg422 & (~BIT(6))); 1955 tmp_reg422 & (~BIT(6)));
1942 rtl92c_set_fw_rsvdpagepkt(hw, 0); 1956 rtl92c_set_fw_rsvdpagepkt(hw,
1957 &usb_cmd_send_packet);
1943 _rtl92cu_set_bcn_ctrl_reg(hw, BIT(3), 0); 1958 _rtl92cu_set_bcn_ctrl_reg(hw, BIT(3), 0);
1944 _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4)); 1959 _rtl92cu_set_bcn_ctrl_reg(hw, 0, BIT(4));
1945 if (recover) 1960 if (recover)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
index 0f7812e0c8aa..c1e33b0228c0 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/hw.h
@@ -104,7 +104,6 @@ bool rtl92cu_gpio_radio_on_off_checking(struct ieee80211_hw *hw, u8 * valid);
104void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid); 104void rtl92cu_set_check_bssid(struct ieee80211_hw *hw, bool check_bssid);
105int rtl92c_download_fw(struct ieee80211_hw *hw); 105int rtl92c_download_fw(struct ieee80211_hw *hw);
106void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode); 106void rtl92c_set_fw_pwrmode_cmd(struct ieee80211_hw *hw, u8 mode);
107void rtl92c_set_fw_rsvdpagepkt(struct ieee80211_hw *hw, bool dl_finished);
108void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus); 107void rtl92c_set_fw_joinbss_report_cmd(struct ieee80211_hw *hw, u8 mstatus);
109void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw, 108void rtl92c_fill_h2c_cmd(struct ieee80211_hw *hw,
110 u8 element_id, u32 cmd_len, u8 *p_cmdbuffer); 109 u8 element_id, u32 cmd_len, u8 *p_cmdbuffer);
diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
index 7c5fbaf5fee0..e06bafee37f9 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c
@@ -101,6 +101,12 @@ static void rtl92cu_deinit_sw_vars(struct ieee80211_hw *hw)
101 } 101 }
102} 102}
103 103
104/* get bt coexist status */
105static bool rtl92cu_get_btc_status(void)
106{
107 return false;
108}
109
104static struct rtl_hal_ops rtl8192cu_hal_ops = { 110static struct rtl_hal_ops rtl8192cu_hal_ops = {
105 .init_sw_vars = rtl92cu_init_sw_vars, 111 .init_sw_vars = rtl92cu_init_sw_vars,
106 .deinit_sw_vars = rtl92cu_deinit_sw_vars, 112 .deinit_sw_vars = rtl92cu_deinit_sw_vars,
@@ -148,6 +154,7 @@ static struct rtl_hal_ops rtl8192cu_hal_ops = {
148 .phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback, 154 .phy_set_bw_mode_callback = rtl92cu_phy_set_bw_mode_callback,
149 .dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower, 155 .dm_dynamic_txpower = rtl92cu_dm_dynamic_txpower,
150 .fill_h2c_cmd = rtl92c_fill_h2c_cmd, 156 .fill_h2c_cmd = rtl92c_fill_h2c_cmd,
157 .get_btc_status = rtl92cu_get_btc_status,
151}; 158};
152 159
153static struct rtl_mod_params rtl92cu_mod_params = { 160static struct rtl_mod_params rtl92cu_mod_params = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
index edab5a5351b5..a0aba088259a 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192de/sw.c
@@ -251,6 +251,7 @@ static struct rtl_hal_ops rtl8192de_hal_ops = {
251 .get_rfreg = rtl92d_phy_query_rf_reg, 251 .get_rfreg = rtl92d_phy_query_rf_reg,
252 .set_rfreg = rtl92d_phy_set_rf_reg, 252 .set_rfreg = rtl92d_phy_set_rf_reg,
253 .linked_set_reg = rtl92d_linked_set_reg, 253 .linked_set_reg = rtl92d_linked_set_reg,
254 .get_btc_status = rtl_btc_status_false,
254}; 255};
255 256
256static struct rtl_mod_params rtl92de_mod_params = { 257static struct rtl_mod_params rtl92de_mod_params = {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
index dfdc9b20e4ad..1a87edca2c3f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192ee/hw.c
@@ -362,7 +362,7 @@ void rtl92ee_get_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
362 } 362 }
363 break; 363 break;
364 default: 364 default:
365 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 365 RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
366 "switch case not process %x\n", variable); 366 "switch case not process %x\n", variable);
367 break; 367 break;
368 } 368 }
@@ -591,7 +591,7 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
591 acm_ctrl &= (~ACMHW_BEQEN); 591 acm_ctrl &= (~ACMHW_BEQEN);
592 break; 592 break;
593 default: 593 default:
594 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 594 RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
595 "switch case not process\n"); 595 "switch case not process\n");
596 break; 596 break;
597 } 597 }
@@ -710,7 +710,7 @@ void rtl92ee_set_hw_reg(struct ieee80211_hw *hw, u8 variable, u8 *val)
710 } 710 }
711 break; 711 break;
712 default: 712 default:
713 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 713 RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
714 "switch case not process %x\n", variable); 714 "switch case not process %x\n", variable);
715 break; 715 break;
716 } 716 }
@@ -2424,7 +2424,7 @@ void rtl92ee_set_key(struct ieee80211_hw *hw, u32 key_index,
2424 enc_algo = CAM_AES; 2424 enc_algo = CAM_AES;
2425 break; 2425 break;
2426 default: 2426 default:
2427 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG, 2427 RT_TRACE(rtlpriv, COMP_ERR, DBG_DMESG,
2428 "switch case not process\n"); 2428 "switch case not process\n");
2429 enc_algo = CAM_TKIP; 2429 enc_algo = CAM_TKIP;
2430 break; 2430 break;
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/def.h b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
index 83c98674bfd3..6e7a70b43949 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/def.h
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/def.h
@@ -446,6 +446,8 @@
446/* DWORD 6 */ 446/* DWORD 6 */
447#define SET_RX_STATUS__DESC_BUFF_ADDR(__pdesc, __val) \ 447#define SET_RX_STATUS__DESC_BUFF_ADDR(__pdesc, __val) \
448 SET_BITS_OFFSET_LE(__pdesc + 24, 0, 32, __val) 448 SET_BITS_OFFSET_LE(__pdesc + 24, 0, 32, __val)
449#define GET_RX_STATUS_DESC_BUFF_ADDR(__pdesc) \
450 SHIFT_AND_MASK_LE(__pdesc + 24, 0, 32)
449 451
450#define SE_RX_HAL_IS_CCK_RATE(_pdesc)\ 452#define SE_RX_HAL_IS_CCK_RATE(_pdesc)\
451 (GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE1M || \ 453 (GET_RX_STATUS_DESC_RX_MCS(_pdesc) == DESC92_RATE1M || \
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
index 00e067044c08..5761d5b49e39 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/hw.c
@@ -1201,6 +1201,9 @@ static int _rtl92se_set_media_status(struct ieee80211_hw *hw,
1201 1201
1202 } 1202 }
1203 1203
1204 if (type != NL80211_IFTYPE_AP &&
1205 rtlpriv->mac80211.link_state < MAC80211_LINKED)
1206 bt_msr = rtl_read_byte(rtlpriv, MSR) & ~MSR_LINK_MASK;
1204 rtl_write_byte(rtlpriv, (MSR), bt_msr); 1207 rtl_write_byte(rtlpriv, (MSR), bt_msr);
1205 1208
1206 temp = rtl_read_dword(rtlpriv, TCR); 1209 temp = rtl_read_dword(rtlpriv, TCR);
@@ -1262,6 +1265,7 @@ void rtl92se_enable_interrupt(struct ieee80211_hw *hw)
1262 rtl_write_dword(rtlpriv, INTA_MASK, rtlpci->irq_mask[0]); 1265 rtl_write_dword(rtlpriv, INTA_MASK, rtlpci->irq_mask[0]);
1263 /* Support Bit 32-37(Assign as Bit 0-5) interrupt setting now */ 1266 /* Support Bit 32-37(Assign as Bit 0-5) interrupt setting now */
1264 rtl_write_dword(rtlpriv, INTA_MASK + 4, rtlpci->irq_mask[1] & 0x3F); 1267 rtl_write_dword(rtlpriv, INTA_MASK + 4, rtlpci->irq_mask[1] & 0x3F);
1268 rtlpci->irq_enabled = true;
1265} 1269}
1266 1270
1267void rtl92se_disable_interrupt(struct ieee80211_hw *hw) 1271void rtl92se_disable_interrupt(struct ieee80211_hw *hw)
@@ -1276,8 +1280,7 @@ void rtl92se_disable_interrupt(struct ieee80211_hw *hw)
1276 rtlpci = rtl_pcidev(rtl_pcipriv(hw)); 1280 rtlpci = rtl_pcidev(rtl_pcipriv(hw));
1277 rtl_write_dword(rtlpriv, INTA_MASK, 0); 1281 rtl_write_dword(rtlpriv, INTA_MASK, 0);
1278 rtl_write_dword(rtlpriv, INTA_MASK + 4, 0); 1282 rtl_write_dword(rtlpriv, INTA_MASK + 4, 0);
1279 1283 rtlpci->irq_enabled = false;
1280 synchronize_irq(rtlpci->pdev->irq);
1281} 1284}
1282 1285
1283static u8 _rtl92s_set_sysclk(struct ieee80211_hw *hw, u8 data) 1286static u8 _rtl92s_set_sysclk(struct ieee80211_hw *hw, u8 data)
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
index 77c5b5f35244..4b4612fe2fdb 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/phy.c
@@ -399,6 +399,8 @@ static bool _rtl92s_phy_sw_chnl_step_by_step(struct ieee80211_hw *hw,
399 case 2: 399 case 2:
400 currentcmd = &postcommoncmd[*step]; 400 currentcmd = &postcommoncmd[*step];
401 break; 401 break;
402 default:
403 return true;
402 } 404 }
403 405
404 if (currentcmd->cmdid == CMDID_END) { 406 if (currentcmd->cmdid == CMDID_END) {
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
index 1bff2a0f7600..fb003868bdef 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/sw.c
@@ -87,11 +87,8 @@ static void rtl92s_init_aspm_vars(struct ieee80211_hw *hw)
87static void rtl92se_fw_cb(const struct firmware *firmware, void *context) 87static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
88{ 88{
89 struct ieee80211_hw *hw = context; 89 struct ieee80211_hw *hw = context;
90 struct rtl_pci_priv *pcipriv = rtl_pcipriv(hw);
91 struct rtl_priv *rtlpriv = rtl_priv(hw); 90 struct rtl_priv *rtlpriv = rtl_priv(hw);
92 struct rtl_pci *rtlpci = rtl_pcidev(pcipriv);
93 struct rt_firmware *pfirmware = NULL; 91 struct rt_firmware *pfirmware = NULL;
94 int err;
95 92
96 RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD, 93 RT_TRACE(rtlpriv, COMP_ERR, DBG_LOUD,
97 "Firmware callback routine entered!\n"); 94 "Firmware callback routine entered!\n");
@@ -112,20 +109,6 @@ static void rtl92se_fw_cb(const struct firmware *firmware, void *context)
112 memcpy(pfirmware->sz_fw_tmpbuffer, firmware->data, firmware->size); 109 memcpy(pfirmware->sz_fw_tmpbuffer, firmware->data, firmware->size);
113 pfirmware->sz_fw_tmpbufferlen = firmware->size; 110 pfirmware->sz_fw_tmpbufferlen = firmware->size;
114 release_firmware(firmware); 111 release_firmware(firmware);
115
116 err = ieee80211_register_hw(hw);
117 if (err) {
118 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
119 "Can't register mac80211 hw\n");
120 return;
121 } else {
122 rtlpriv->mac80211.mac80211_registered = 1;
123 }
124 rtlpci->irq_alloc = 1;
125 set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
126
127 /*init rfkill */
128 rtl_init_rfkill(hw);
129} 112}
130 113
131static int rtl92s_init_sw_vars(struct ieee80211_hw *hw) 114static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
@@ -226,8 +209,8 @@ static int rtl92s_init_sw_vars(struct ieee80211_hw *hw)
226 if (!rtlpriv->rtlhal.pfirmware) 209 if (!rtlpriv->rtlhal.pfirmware)
227 return 1; 210 return 1;
228 211
229 rtlpriv->max_fw_size = RTL8190_MAX_RAW_FIRMWARE_CODE_SIZE; 212 rtlpriv->max_fw_size = RTL8190_MAX_FIRMWARE_CODE_SIZE*2 +
230 213 sizeof(struct fw_hdr);
231 pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n" 214 pr_info("Driver for Realtek RTL8192SE/RTL8191SE\n"
232 "Loading firmware %s\n", rtlpriv->cfg->fw_name); 215 "Loading firmware %s\n", rtlpriv->cfg->fw_name);
233 /* request fw */ 216 /* request fw */
@@ -253,6 +236,19 @@ static void rtl92s_deinit_sw_vars(struct ieee80211_hw *hw)
253 } 236 }
254} 237}
255 238
239static bool rtl92se_is_tx_desc_closed(struct ieee80211_hw *hw, u8 hw_queue,
240 u16 index)
241{
242 struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
243 struct rtl8192_tx_ring *ring = &rtlpci->tx_ring[hw_queue];
244 u8 *entry = (u8 *)(&ring->desc[ring->idx]);
245 u8 own = (u8)rtl92se_get_desc(entry, true, HW_DESC_OWN);
246
247 if (own)
248 return false;
249 return true;
250}
251
256static struct rtl_hal_ops rtl8192se_hal_ops = { 252static struct rtl_hal_ops rtl8192se_hal_ops = {
257 .init_sw_vars = rtl92s_init_sw_vars, 253 .init_sw_vars = rtl92s_init_sw_vars,
258 .deinit_sw_vars = rtl92s_deinit_sw_vars, 254 .deinit_sw_vars = rtl92s_deinit_sw_vars,
@@ -286,6 +282,7 @@ static struct rtl_hal_ops rtl8192se_hal_ops = {
286 .led_control = rtl92se_led_control, 282 .led_control = rtl92se_led_control,
287 .set_desc = rtl92se_set_desc, 283 .set_desc = rtl92se_set_desc,
288 .get_desc = rtl92se_get_desc, 284 .get_desc = rtl92se_get_desc,
285 .is_tx_desc_closed = rtl92se_is_tx_desc_closed,
289 .tx_polling = rtl92se_tx_polling, 286 .tx_polling = rtl92se_tx_polling,
290 .enable_hw_sec = rtl92se_enable_hw_security_config, 287 .enable_hw_sec = rtl92se_enable_hw_security_config,
291 .set_key = rtl92se_set_key, 288 .set_key = rtl92se_set_key,
@@ -294,6 +291,7 @@ static struct rtl_hal_ops rtl8192se_hal_ops = {
294 .set_bbreg = rtl92s_phy_set_bb_reg, 291 .set_bbreg = rtl92s_phy_set_bb_reg,
295 .get_rfreg = rtl92s_phy_query_rf_reg, 292 .get_rfreg = rtl92s_phy_query_rf_reg,
296 .set_rfreg = rtl92s_phy_set_rf_reg, 293 .set_rfreg = rtl92s_phy_set_rf_reg,
294 .get_btc_status = rtl_btc_status_false,
297}; 295};
298 296
299static struct rtl_mod_params rtl92se_mod_params = { 297static struct rtl_mod_params rtl92se_mod_params = {
@@ -322,6 +320,8 @@ static struct rtl_hal_cfg rtl92se_hal_cfg = {
322 .maps[MAC_RCR_ACRC32] = RCR_ACRC32, 320 .maps[MAC_RCR_ACRC32] = RCR_ACRC32,
323 .maps[MAC_RCR_ACF] = RCR_ACF, 321 .maps[MAC_RCR_ACF] = RCR_ACF,
324 .maps[MAC_RCR_AAP] = RCR_AAP, 322 .maps[MAC_RCR_AAP] = RCR_AAP,
323 .maps[MAC_HIMR] = INTA_MASK,
324 .maps[MAC_HIMRE] = INTA_MASK + 4,
325 325
326 .maps[EFUSE_TEST] = REG_EFUSE_TEST, 326 .maps[EFUSE_TEST] = REG_EFUSE_TEST,
327 .maps[EFUSE_CTRL] = REG_EFUSE_CTRL, 327 .maps[EFUSE_CTRL] = REG_EFUSE_CTRL,
diff --git a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
index b358ebce8942..672fd3b02835 100644
--- a/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
+++ b/drivers/net/wireless/rtlwifi/rtl8192se/trx.c
@@ -640,6 +640,9 @@ u32 rtl92se_get_desc(u8 *desc, bool istx, u8 desc_name)
640 case HW_DESC_RXPKT_LEN: 640 case HW_DESC_RXPKT_LEN:
641 ret = GET_RX_STATUS_DESC_PKT_LEN(desc); 641 ret = GET_RX_STATUS_DESC_PKT_LEN(desc);
642 break; 642 break;
643 case HW_DESC_RXBUFF_ADDR:
644 ret = GET_RX_STATUS_DESC_BUFF_ADDR(desc);
645 break;
643 default: 646 default:
644 RT_ASSERT(false, "ERR rxdesc :%d not process\n", 647 RT_ASSERT(false, "ERR rxdesc :%d not process\n",
645 desc_name); 648 desc_name);
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
index 310d3163dc5b..8ec8200002c7 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/hw.c
@@ -3672,8 +3672,9 @@ static void rtl8821ae_update_hal_rate_mask(struct ieee80211_hw *hw,
3672 mac->opmode == NL80211_IFTYPE_ADHOC) 3672 mac->opmode == NL80211_IFTYPE_ADHOC)
3673 macid = sta->aid + 1; 3673 macid = sta->aid + 1;
3674 if (wirelessmode == WIRELESS_MODE_N_5G || 3674 if (wirelessmode == WIRELESS_MODE_N_5G ||
3675 wirelessmode == WIRELESS_MODE_AC_5G) 3675 wirelessmode == WIRELESS_MODE_AC_5G ||
3676 ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ]; 3676 wirelessmode == WIRELESS_MODE_A)
3677 ratr_bitmap = sta->supp_rates[NL80211_BAND_5GHZ] << 4;
3677 else 3678 else
3678 ratr_bitmap = sta->supp_rates[NL80211_BAND_2GHZ]; 3679 ratr_bitmap = sta->supp_rates[NL80211_BAND_2GHZ];
3679 3680
diff --git a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c b/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c
index 9786313dc62f..1e9570fa874f 100644
--- a/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c
+++ b/drivers/net/wireless/rtlwifi/rtl8821ae/phy.c
@@ -1889,15 +1889,18 @@ static void _rtl8821ae_store_tx_power_by_rate(struct ieee80211_hw *hw,
1889 struct rtl_phy *rtlphy = &rtlpriv->phy; 1889 struct rtl_phy *rtlphy = &rtlpriv->phy;
1890 u8 rate_section = _rtl8821ae_get_rate_section_index(regaddr); 1890 u8 rate_section = _rtl8821ae_get_rate_section_index(regaddr);
1891 1891
1892 if (band != BAND_ON_2_4G && band != BAND_ON_5G) 1892 if (band != BAND_ON_2_4G && band != BAND_ON_5G) {
1893 RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid Band %d\n", band); 1893 RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid Band %d\n", band);
1894 1894 band = BAND_ON_2_4G;
1895 if (rfpath >= MAX_RF_PATH) 1895 }
1896 if (rfpath >= MAX_RF_PATH) {
1896 RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid RfPath %d\n", rfpath); 1897 RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid RfPath %d\n", rfpath);
1897 1898 rfpath = MAX_RF_PATH - 1;
1898 if (txnum >= MAX_RF_PATH) 1899 }
1900 if (txnum >= MAX_RF_PATH) {
1899 RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid TxNum %d\n", txnum); 1901 RT_TRACE(rtlpriv, COMP_INIT, DBG_WARNING, "Invalid TxNum %d\n", txnum);
1900 1902 txnum = MAX_RF_PATH - 1;
1903 }
1901 rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] = data; 1904 rtlphy->tx_power_by_rate_offset[band][rfpath][txnum][rate_section] = data;
1902 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, 1905 RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
1903 "TxPwrByRateOffset[Band %d][RfPath %d][TxNum %d][RateSection %d] = 0x%x\n", 1906 "TxPwrByRateOffset[Band %d][RfPath %d][TxNum %d][RateSection %d] = 0x%x\n",
diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
index 10cf69c4bc42..46ee956d0235 100644
--- a/drivers/net/wireless/rtlwifi/usb.c
+++ b/drivers/net/wireless/rtlwifi/usb.c
@@ -1117,7 +1117,18 @@ int rtl_usb_probe(struct usb_interface *intf,
1117 } 1117 }
1118 rtlpriv->cfg->ops->init_sw_leds(hw); 1118 rtlpriv->cfg->ops->init_sw_leds(hw);
1119 1119
1120 err = ieee80211_register_hw(hw);
1121 if (err) {
1122 RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
1123 "Can't register mac80211 hw.\n");
1124 err = -ENODEV;
1125 goto error_out;
1126 }
1127 rtlpriv->mac80211.mac80211_registered = 1;
1128
1129 set_bit(RTL_STATUS_INTERFACE_START, &rtlpriv->status);
1120 return 0; 1130 return 0;
1131
1121error_out: 1132error_out:
1122 rtl_deinit_core(hw); 1133 rtl_deinit_core(hw);
1123 _rtl_usb_io_handler_release(hw); 1134 _rtl_usb_io_handler_release(hw);
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index d4eb8d2e9cb7..083ecc93fe5e 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -176,10 +176,11 @@ struct xenvif_queue { /* Per-queue data for xenvif */
176 char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */ 176 char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */
177 struct xen_netif_rx_back_ring rx; 177 struct xen_netif_rx_back_ring rx;
178 struct sk_buff_head rx_queue; 178 struct sk_buff_head rx_queue;
179 RING_IDX rx_last_skb_slots;
180 unsigned long status;
181 179
182 struct timer_list rx_stalled; 180 unsigned int rx_queue_max;
181 unsigned int rx_queue_len;
182 unsigned long last_rx_time;
183 bool stalled;
183 184
184 struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS]; 185 struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS];
185 186
@@ -199,18 +200,14 @@ struct xenvif_queue { /* Per-queue data for xenvif */
199 struct xenvif_stats stats; 200 struct xenvif_stats stats;
200}; 201};
201 202
203/* Maximum number of Rx slots a to-guest packet may use, including the
204 * slot needed for GSO meta-data.
205 */
206#define XEN_NETBK_RX_SLOTS_MAX (MAX_SKB_FRAGS + 1)
207
202enum state_bit_shift { 208enum state_bit_shift {
203 /* This bit marks that the vif is connected */ 209 /* This bit marks that the vif is connected */
204 VIF_STATUS_CONNECTED, 210 VIF_STATUS_CONNECTED,
205 /* This bit signals the RX thread that queuing was stopped (in
206 * start_xmit), and either the timer fired or an RX interrupt came
207 */
208 QUEUE_STATUS_RX_PURGE_EVENT,
209 /* This bit tells the interrupt handler that this queue was the reason
210 * for the carrier off, so it should kick the thread. Only queues which
211 * brought it down can turn on the carrier.
212 */
213 QUEUE_STATUS_RX_STALLED
214}; 211};
215 212
216struct xenvif { 213struct xenvif {
@@ -228,9 +225,6 @@ struct xenvif {
228 u8 ip_csum:1; 225 u8 ip_csum:1;
229 u8 ipv6_csum:1; 226 u8 ipv6_csum:1;
230 227
231 /* Internal feature information. */
232 u8 can_queue:1; /* can queue packets for receiver? */
233
234 /* Is this interface disabled? True when backend discovers 228 /* Is this interface disabled? True when backend discovers
235 * frontend is rogue. 229 * frontend is rogue.
236 */ 230 */
@@ -240,6 +234,9 @@ struct xenvif {
240 /* Queues */ 234 /* Queues */
241 struct xenvif_queue *queues; 235 struct xenvif_queue *queues;
242 unsigned int num_queues; /* active queues, resource allocated */ 236 unsigned int num_queues; /* active queues, resource allocated */
237 unsigned int stalled_queues;
238
239 spinlock_t lock;
243 240
244#ifdef CONFIG_DEBUG_FS 241#ifdef CONFIG_DEBUG_FS
245 struct dentry *xenvif_dbg_root; 242 struct dentry *xenvif_dbg_root;
@@ -249,6 +246,14 @@ struct xenvif {
249 struct net_device *dev; 246 struct net_device *dev;
250}; 247};
251 248
249struct xenvif_rx_cb {
250 unsigned long expires;
251 int meta_slots_used;
252 bool full_coalesce;
253};
254
255#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
256
252static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif) 257static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif)
253{ 258{
254 return to_xenbus_device(vif->dev->dev.parent); 259 return to_xenbus_device(vif->dev->dev.parent);
@@ -272,8 +277,6 @@ void xenvif_xenbus_fini(void);
272 277
273int xenvif_schedulable(struct xenvif *vif); 278int xenvif_schedulable(struct xenvif *vif);
274 279
275int xenvif_must_stop_queue(struct xenvif_queue *queue);
276
277int xenvif_queue_stopped(struct xenvif_queue *queue); 280int xenvif_queue_stopped(struct xenvif_queue *queue);
278void xenvif_wake_queue(struct xenvif_queue *queue); 281void xenvif_wake_queue(struct xenvif_queue *queue);
279 282
@@ -296,6 +299,8 @@ void xenvif_kick_thread(struct xenvif_queue *queue);
296 299
297int xenvif_dealloc_kthread(void *data); 300int xenvif_dealloc_kthread(void *data);
298 301
302void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb);
303
299/* Determine whether the needed number of slots (req) are available, 304/* Determine whether the needed number of slots (req) are available,
300 * and set req_event if not. 305 * and set req_event if not.
301 */ 306 */
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index f379689dde30..895fe84011e7 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -43,6 +43,9 @@
43#define XENVIF_QUEUE_LENGTH 32 43#define XENVIF_QUEUE_LENGTH 32
44#define XENVIF_NAPI_WEIGHT 64 44#define XENVIF_NAPI_WEIGHT 64
45 45
46/* Number of bytes allowed on the internal guest Rx queue. */
47#define XENVIF_RX_QUEUE_BYTES (XEN_NETIF_RX_RING_SIZE/2 * PAGE_SIZE)
48
46/* This function is used to set SKBTX_DEV_ZEROCOPY as well as 49/* This function is used to set SKBTX_DEV_ZEROCOPY as well as
47 * increasing the inflight counter. We need to increase the inflight 50 * increasing the inflight counter. We need to increase the inflight
48 * counter because core driver calls into xenvif_zerocopy_callback 51 * counter because core driver calls into xenvif_zerocopy_callback
@@ -60,20 +63,11 @@ void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
60 atomic_dec(&queue->inflight_packets); 63 atomic_dec(&queue->inflight_packets);
61} 64}
62 65
63static inline void xenvif_stop_queue(struct xenvif_queue *queue)
64{
65 struct net_device *dev = queue->vif->dev;
66
67 if (!queue->vif->can_queue)
68 return;
69
70 netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
71}
72
73int xenvif_schedulable(struct xenvif *vif) 66int xenvif_schedulable(struct xenvif *vif)
74{ 67{
75 return netif_running(vif->dev) && 68 return netif_running(vif->dev) &&
76 test_bit(VIF_STATUS_CONNECTED, &vif->status); 69 test_bit(VIF_STATUS_CONNECTED, &vif->status) &&
70 !vif->disabled;
77} 71}
78 72
79static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id) 73static irqreturn_t xenvif_tx_interrupt(int irq, void *dev_id)
@@ -114,16 +108,7 @@ int xenvif_poll(struct napi_struct *napi, int budget)
114static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id) 108static irqreturn_t xenvif_rx_interrupt(int irq, void *dev_id)
115{ 109{
116 struct xenvif_queue *queue = dev_id; 110 struct xenvif_queue *queue = dev_id;
117 struct netdev_queue *net_queue =
118 netdev_get_tx_queue(queue->vif->dev, queue->id);
119 111
120 /* QUEUE_STATUS_RX_PURGE_EVENT is only set if either QDisc was off OR
121 * the carrier went down and this queue was previously blocked
122 */
123 if (unlikely(netif_tx_queue_stopped(net_queue) ||
124 (!netif_carrier_ok(queue->vif->dev) &&
125 test_bit(QUEUE_STATUS_RX_STALLED, &queue->status))))
126 set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
127 xenvif_kick_thread(queue); 112 xenvif_kick_thread(queue);
128 113
129 return IRQ_HANDLED; 114 return IRQ_HANDLED;
@@ -151,24 +136,13 @@ void xenvif_wake_queue(struct xenvif_queue *queue)
151 netif_tx_wake_queue(netdev_get_tx_queue(dev, id)); 136 netif_tx_wake_queue(netdev_get_tx_queue(dev, id));
152} 137}
153 138
154/* Callback to wake the queue's thread and turn the carrier off on timeout */
155static void xenvif_rx_stalled(unsigned long data)
156{
157 struct xenvif_queue *queue = (struct xenvif_queue *)data;
158
159 if (xenvif_queue_stopped(queue)) {
160 set_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status);
161 xenvif_kick_thread(queue);
162 }
163}
164
165static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 139static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
166{ 140{
167 struct xenvif *vif = netdev_priv(dev); 141 struct xenvif *vif = netdev_priv(dev);
168 struct xenvif_queue *queue = NULL; 142 struct xenvif_queue *queue = NULL;
169 unsigned int num_queues = vif->num_queues; 143 unsigned int num_queues = vif->num_queues;
170 u16 index; 144 u16 index;
171 int min_slots_needed; 145 struct xenvif_rx_cb *cb;
172 146
173 BUG_ON(skb->dev != dev); 147 BUG_ON(skb->dev != dev);
174 148
@@ -191,30 +165,10 @@ static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
191 !xenvif_schedulable(vif)) 165 !xenvif_schedulable(vif))
192 goto drop; 166 goto drop;
193 167
194 /* At best we'll need one slot for the header and one for each 168 cb = XENVIF_RX_CB(skb);
195 * frag. 169 cb->expires = jiffies + rx_drain_timeout_jiffies;
196 */
197 min_slots_needed = 1 + skb_shinfo(skb)->nr_frags;
198
199 /* If the skb is GSO then we'll also need an extra slot for the
200 * metadata.
201 */
202 if (skb_is_gso(skb))
203 min_slots_needed++;
204 170
205 /* If the skb can't possibly fit in the remaining slots 171 xenvif_rx_queue_tail(queue, skb);
206 * then turn off the queue to give the ring a chance to
207 * drain.
208 */
209 if (!xenvif_rx_ring_slots_available(queue, min_slots_needed)) {
210 queue->rx_stalled.function = xenvif_rx_stalled;
211 queue->rx_stalled.data = (unsigned long)queue;
212 xenvif_stop_queue(queue);
213 mod_timer(&queue->rx_stalled,
214 jiffies + rx_drain_timeout_jiffies);
215 }
216
217 skb_queue_tail(&queue->rx_queue, skb);
218 xenvif_kick_thread(queue); 172 xenvif_kick_thread(queue);
219 173
220 return NETDEV_TX_OK; 174 return NETDEV_TX_OK;
@@ -465,6 +419,8 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
465 vif->queues = NULL; 419 vif->queues = NULL;
466 vif->num_queues = 0; 420 vif->num_queues = 0;
467 421
422 spin_lock_init(&vif->lock);
423
468 dev->netdev_ops = &xenvif_netdev_ops; 424 dev->netdev_ops = &xenvif_netdev_ops;
469 dev->hw_features = NETIF_F_SG | 425 dev->hw_features = NETIF_F_SG |
470 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 426 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -508,6 +464,8 @@ int xenvif_init_queue(struct xenvif_queue *queue)
508 init_timer(&queue->credit_timeout); 464 init_timer(&queue->credit_timeout);
509 queue->credit_window_start = get_jiffies_64(); 465 queue->credit_window_start = get_jiffies_64();
510 466
467 queue->rx_queue_max = XENVIF_RX_QUEUE_BYTES;
468
511 skb_queue_head_init(&queue->rx_queue); 469 skb_queue_head_init(&queue->rx_queue);
512 skb_queue_head_init(&queue->tx_queue); 470 skb_queue_head_init(&queue->tx_queue);
513 471
@@ -539,8 +497,6 @@ int xenvif_init_queue(struct xenvif_queue *queue)
539 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE; 497 queue->grant_tx_handle[i] = NETBACK_INVALID_HANDLE;
540 } 498 }
541 499
542 init_timer(&queue->rx_stalled);
543
544 return 0; 500 return 0;
545} 501}
546 502
@@ -551,7 +507,6 @@ void xenvif_carrier_on(struct xenvif *vif)
551 dev_set_mtu(vif->dev, ETH_DATA_LEN); 507 dev_set_mtu(vif->dev, ETH_DATA_LEN);
552 netdev_update_features(vif->dev); 508 netdev_update_features(vif->dev);
553 set_bit(VIF_STATUS_CONNECTED, &vif->status); 509 set_bit(VIF_STATUS_CONNECTED, &vif->status);
554 netif_carrier_on(vif->dev);
555 if (netif_running(vif->dev)) 510 if (netif_running(vif->dev))
556 xenvif_up(vif); 511 xenvif_up(vif);
557 rtnl_unlock(); 512 rtnl_unlock();
@@ -611,6 +566,8 @@ int xenvif_connect(struct xenvif_queue *queue, unsigned long tx_ring_ref,
611 disable_irq(queue->rx_irq); 566 disable_irq(queue->rx_irq);
612 } 567 }
613 568
569 queue->stalled = true;
570
614 task = kthread_create(xenvif_kthread_guest_rx, 571 task = kthread_create(xenvif_kthread_guest_rx,
615 (void *)queue, "%s-guest-rx", queue->name); 572 (void *)queue, "%s-guest-rx", queue->name);
616 if (IS_ERR(task)) { 573 if (IS_ERR(task)) {
@@ -674,7 +631,6 @@ void xenvif_disconnect(struct xenvif *vif)
674 netif_napi_del(&queue->napi); 631 netif_napi_del(&queue->napi);
675 632
676 if (queue->task) { 633 if (queue->task) {
677 del_timer_sync(&queue->rx_stalled);
678 kthread_stop(queue->task); 634 kthread_stop(queue->task);
679 queue->task = NULL; 635 queue->task = NULL;
680 } 636 }
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 08f65996534c..6563f0713fc0 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -55,13 +55,20 @@
55bool separate_tx_rx_irq = 1; 55bool separate_tx_rx_irq = 1;
56module_param(separate_tx_rx_irq, bool, 0644); 56module_param(separate_tx_rx_irq, bool, 0644);
57 57
58/* When guest ring is filled up, qdisc queues the packets for us, but we have 58/* The time that packets can stay on the guest Rx internal queue
59 * to timeout them, otherwise other guests' packets can get stuck there 59 * before they are dropped.
60 */ 60 */
61unsigned int rx_drain_timeout_msecs = 10000; 61unsigned int rx_drain_timeout_msecs = 10000;
62module_param(rx_drain_timeout_msecs, uint, 0444); 62module_param(rx_drain_timeout_msecs, uint, 0444);
63unsigned int rx_drain_timeout_jiffies; 63unsigned int rx_drain_timeout_jiffies;
64 64
65/* The length of time before the frontend is considered unresponsive
66 * because it isn't providing Rx slots.
67 */
68static unsigned int rx_stall_timeout_msecs = 60000;
69module_param(rx_stall_timeout_msecs, uint, 0444);
70static unsigned int rx_stall_timeout_jiffies;
71
65unsigned int xenvif_max_queues; 72unsigned int xenvif_max_queues;
66module_param_named(max_queues, xenvif_max_queues, uint, 0644); 73module_param_named(max_queues, xenvif_max_queues, uint, 0644);
67MODULE_PARM_DESC(max_queues, 74MODULE_PARM_DESC(max_queues,
@@ -83,7 +90,6 @@ static void make_tx_response(struct xenvif_queue *queue,
83 s8 st); 90 s8 st);
84 91
85static inline int tx_work_todo(struct xenvif_queue *queue); 92static inline int tx_work_todo(struct xenvif_queue *queue);
86static inline int rx_work_todo(struct xenvif_queue *queue);
87 93
88static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue, 94static struct xen_netif_rx_response *make_rx_response(struct xenvif_queue *queue,
89 u16 id, 95 u16 id,
@@ -163,6 +169,69 @@ bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue, int needed)
163 return false; 169 return false;
164} 170}
165 171
172void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
173{
174 unsigned long flags;
175
176 spin_lock_irqsave(&queue->rx_queue.lock, flags);
177
178 __skb_queue_tail(&queue->rx_queue, skb);
179
180 queue->rx_queue_len += skb->len;
181 if (queue->rx_queue_len > queue->rx_queue_max)
182 netif_tx_stop_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
183
184 spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
185}
186
187static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
188{
189 struct sk_buff *skb;
190
191 spin_lock_irq(&queue->rx_queue.lock);
192
193 skb = __skb_dequeue(&queue->rx_queue);
194 if (skb)
195 queue->rx_queue_len -= skb->len;
196
197 spin_unlock_irq(&queue->rx_queue.lock);
198
199 return skb;
200}
201
202static void xenvif_rx_queue_maybe_wake(struct xenvif_queue *queue)
203{
204 spin_lock_irq(&queue->rx_queue.lock);
205
206 if (queue->rx_queue_len < queue->rx_queue_max)
207 netif_tx_wake_queue(netdev_get_tx_queue(queue->vif->dev, queue->id));
208
209 spin_unlock_irq(&queue->rx_queue.lock);
210}
211
212
213static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
214{
215 struct sk_buff *skb;
216 while ((skb = xenvif_rx_dequeue(queue)) != NULL)
217 kfree_skb(skb);
218}
219
220static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
221{
222 struct sk_buff *skb;
223
224 for(;;) {
225 skb = skb_peek(&queue->rx_queue);
226 if (!skb)
227 break;
228 if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
229 break;
230 xenvif_rx_dequeue(queue);
231 kfree_skb(skb);
232 }
233}
234
166/* 235/*
167 * Returns true if we should start a new receive buffer instead of 236 * Returns true if we should start a new receive buffer instead of
168 * adding 'size' bytes to a buffer which currently contains 'offset' 237 * adding 'size' bytes to a buffer which currently contains 'offset'
@@ -237,13 +306,6 @@ static struct xenvif_rx_meta *get_next_rx_buffer(struct xenvif_queue *queue,
237 return meta; 306 return meta;
238} 307}
239 308
240struct xenvif_rx_cb {
241 int meta_slots_used;
242 bool full_coalesce;
243};
244
245#define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb)
246
247/* 309/*
248 * Set up the grant operations for this fragment. If it's a flipping 310 * Set up the grant operations for this fragment. If it's a flipping
249 * interface, we also set up the unmap request from here. 311 * interface, we also set up the unmap request from here.
@@ -587,12 +649,15 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
587 649
588 skb_queue_head_init(&rxq); 650 skb_queue_head_init(&rxq);
589 651
590 while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) { 652 while (xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX)
653 && (skb = xenvif_rx_dequeue(queue)) != NULL) {
591 RING_IDX max_slots_needed; 654 RING_IDX max_slots_needed;
592 RING_IDX old_req_cons; 655 RING_IDX old_req_cons;
593 RING_IDX ring_slots_used; 656 RING_IDX ring_slots_used;
594 int i; 657 int i;
595 658
659 queue->last_rx_time = jiffies;
660
596 /* We need a cheap worse case estimate for the number of 661 /* We need a cheap worse case estimate for the number of
597 * slots we'll use. 662 * slots we'll use.
598 */ 663 */
@@ -634,15 +699,6 @@ static void xenvif_rx_action(struct xenvif_queue *queue)
634 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)) 699 skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6))
635 max_slots_needed++; 700 max_slots_needed++;
636 701
637 /* If the skb may not fit then bail out now */
638 if (!xenvif_rx_ring_slots_available(queue, max_slots_needed)) {
639 skb_queue_head(&queue->rx_queue, skb);
640 need_to_notify = true;
641 queue->rx_last_skb_slots = max_slots_needed;
642 break;
643 } else
644 queue->rx_last_skb_slots = 0;
645
646 old_req_cons = queue->rx.req_cons; 702 old_req_cons = queue->rx.req_cons;
647 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue); 703 XENVIF_RX_CB(skb)->meta_slots_used = xenvif_gop_skb(skb, &npo, queue);
648 ring_slots_used = queue->rx.req_cons - old_req_cons; 704 ring_slots_used = queue->rx.req_cons - old_req_cons;
@@ -1869,12 +1925,6 @@ void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx)
1869 } 1925 }
1870} 1926}
1871 1927
1872static inline int rx_work_todo(struct xenvif_queue *queue)
1873{
1874 return (!skb_queue_empty(&queue->rx_queue) &&
1875 xenvif_rx_ring_slots_available(queue, queue->rx_last_skb_slots));
1876}
1877
1878static inline int tx_work_todo(struct xenvif_queue *queue) 1928static inline int tx_work_todo(struct xenvif_queue *queue)
1879{ 1929{
1880 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx))) 1930 if (likely(RING_HAS_UNCONSUMED_REQUESTS(&queue->tx)))
@@ -1931,92 +1981,121 @@ err:
1931 return err; 1981 return err;
1932} 1982}
1933 1983
1934static void xenvif_start_queue(struct xenvif_queue *queue) 1984static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
1935{ 1985{
1936 if (xenvif_schedulable(queue->vif)) 1986 struct xenvif *vif = queue->vif;
1937 xenvif_wake_queue(queue); 1987
1988 queue->stalled = true;
1989
1990 /* At least one queue has stalled? Disable the carrier. */
1991 spin_lock(&vif->lock);
1992 if (vif->stalled_queues++ == 0) {
1993 netdev_info(vif->dev, "Guest Rx stalled");
1994 netif_carrier_off(vif->dev);
1995 }
1996 spin_unlock(&vif->lock);
1938} 1997}
1939 1998
1940/* Only called from the queue's thread, it handles the situation when the guest 1999static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
1941 * doesn't post enough requests on the receiving ring.
1942 * First xenvif_start_xmit disables QDisc and start a timer, and then either the
1943 * timer fires, or the guest send an interrupt after posting new request. If it
1944 * is the timer, the carrier is turned off here.
1945 * */
1946static void xenvif_rx_purge_event(struct xenvif_queue *queue)
1947{ 2000{
1948 /* Either the last unsuccesful skb or at least 1 slot should fit */ 2001 struct xenvif *vif = queue->vif;
1949 int needed = queue->rx_last_skb_slots ?
1950 queue->rx_last_skb_slots : 1;
1951 2002
1952 /* It is assumed that if the guest post new slots after this, the RX 2003 queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
1953 * interrupt will set the QUEUE_STATUS_RX_PURGE_EVENT bit and wake up 2004 queue->stalled = false;
1954 * the thread again
1955 */
1956 set_bit(QUEUE_STATUS_RX_STALLED, &queue->status);
1957 if (!xenvif_rx_ring_slots_available(queue, needed)) {
1958 rtnl_lock();
1959 if (netif_carrier_ok(queue->vif->dev)) {
1960 /* Timer fired and there are still no slots. Turn off
1961 * everything except the interrupts
1962 */
1963 netif_carrier_off(queue->vif->dev);
1964 skb_queue_purge(&queue->rx_queue);
1965 queue->rx_last_skb_slots = 0;
1966 if (net_ratelimit())
1967 netdev_err(queue->vif->dev, "Carrier off due to lack of guest response on queue %d\n", queue->id);
1968 } else {
1969 /* Probably an another queue already turned the carrier
1970 * off, make sure nothing is stucked in the internal
1971 * queue of this queue
1972 */
1973 skb_queue_purge(&queue->rx_queue);
1974 queue->rx_last_skb_slots = 0;
1975 }
1976 rtnl_unlock();
1977 } else if (!netif_carrier_ok(queue->vif->dev)) {
1978 unsigned int num_queues = queue->vif->num_queues;
1979 unsigned int i;
1980 /* The carrier was down, but an interrupt kicked
1981 * the thread again after new requests were
1982 * posted
1983 */
1984 clear_bit(QUEUE_STATUS_RX_STALLED,
1985 &queue->status);
1986 rtnl_lock();
1987 netif_carrier_on(queue->vif->dev);
1988 netif_tx_wake_all_queues(queue->vif->dev);
1989 rtnl_unlock();
1990 2005
1991 for (i = 0; i < num_queues; i++) { 2006 /* All queues are ready? Enable the carrier. */
1992 struct xenvif_queue *temp = &queue->vif->queues[i]; 2007 spin_lock(&vif->lock);
2008 if (--vif->stalled_queues == 0) {
2009 netdev_info(vif->dev, "Guest Rx ready");
2010 netif_carrier_on(vif->dev);
2011 }
2012 spin_unlock(&vif->lock);
2013}
1993 2014
1994 xenvif_napi_schedule_or_enable_events(temp); 2015static bool xenvif_rx_queue_stalled(struct xenvif_queue *queue)
1995 } 2016{
1996 if (net_ratelimit()) 2017 RING_IDX prod, cons;
1997 netdev_err(queue->vif->dev, "Carrier on again\n"); 2018
1998 } else { 2019 prod = queue->rx.sring->req_prod;
1999 /* Queuing were stopped, but the guest posted 2020 cons = queue->rx.req_cons;
2000 * new requests and sent an interrupt 2021
2001 */ 2022 return !queue->stalled
2002 clear_bit(QUEUE_STATUS_RX_STALLED, 2023 && prod - cons < XEN_NETBK_RX_SLOTS_MAX
2003 &queue->status); 2024 && time_after(jiffies,
2004 del_timer_sync(&queue->rx_stalled); 2025 queue->last_rx_time + rx_stall_timeout_jiffies);
2005 xenvif_start_queue(queue); 2026}
2027
2028static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
2029{
2030 RING_IDX prod, cons;
2031
2032 prod = queue->rx.sring->req_prod;
2033 cons = queue->rx.req_cons;
2034
2035 return queue->stalled
2036 && prod - cons >= XEN_NETBK_RX_SLOTS_MAX;
2037}
2038
2039static bool xenvif_have_rx_work(struct xenvif_queue *queue)
2040{
2041 return (!skb_queue_empty(&queue->rx_queue)
2042 && xenvif_rx_ring_slots_available(queue, XEN_NETBK_RX_SLOTS_MAX))
2043 || xenvif_rx_queue_stalled(queue)
2044 || xenvif_rx_queue_ready(queue)
2045 || kthread_should_stop()
2046 || queue->vif->disabled;
2047}
2048
2049static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
2050{
2051 struct sk_buff *skb;
2052 long timeout;
2053
2054 skb = skb_peek(&queue->rx_queue);
2055 if (!skb)
2056 return MAX_SCHEDULE_TIMEOUT;
2057
2058 timeout = XENVIF_RX_CB(skb)->expires - jiffies;
2059 return timeout < 0 ? 0 : timeout;
2060}
2061
2062/* Wait until the guest Rx thread has work.
2063 *
2064 * The timeout needs to be adjusted based on the current head of the
2065 * queue (and not just the head at the beginning). In particular, if
2066 * the queue is initially empty an infinite timeout is used and this
2067 * needs to be reduced when a skb is queued.
2068 *
2069 * This cannot be done with wait_event_timeout() because it only
2070 * calculates the timeout once.
2071 */
2072static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
2073{
2074 DEFINE_WAIT(wait);
2075
2076 if (xenvif_have_rx_work(queue))
2077 return;
2078
2079 for (;;) {
2080 long ret;
2081
2082 prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
2083 if (xenvif_have_rx_work(queue))
2084 break;
2085 ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
2086 if (!ret)
2087 break;
2006 } 2088 }
2089 finish_wait(&queue->wq, &wait);
2007} 2090}
2008 2091
2009int xenvif_kthread_guest_rx(void *data) 2092int xenvif_kthread_guest_rx(void *data)
2010{ 2093{
2011 struct xenvif_queue *queue = data; 2094 struct xenvif_queue *queue = data;
2012 struct sk_buff *skb; 2095 struct xenvif *vif = queue->vif;
2013 2096
2014 while (!kthread_should_stop()) { 2097 for (;;) {
2015 wait_event_interruptible(queue->wq, 2098 xenvif_wait_for_rx_work(queue);
2016 rx_work_todo(queue) ||
2017 queue->vif->disabled ||
2018 test_bit(QUEUE_STATUS_RX_PURGE_EVENT, &queue->status) ||
2019 kthread_should_stop());
2020 2099
2021 if (kthread_should_stop()) 2100 if (kthread_should_stop())
2022 break; 2101 break;
@@ -2028,35 +2107,38 @@ int xenvif_kthread_guest_rx(void *data)
2028 * context so we defer it here, if this thread is 2107 * context so we defer it here, if this thread is
2029 * associated with queue 0. 2108 * associated with queue 0.
2030 */ 2109 */
2031 if (unlikely(queue->vif->disabled && queue->id == 0)) { 2110 if (unlikely(vif->disabled && queue->id == 0)) {
2032 xenvif_carrier_off(queue->vif); 2111 xenvif_carrier_off(vif);
2033 } else if (unlikely(queue->vif->disabled)) { 2112 xenvif_rx_queue_purge(queue);
2034 /* kthread_stop() would be called upon this thread soon, 2113 continue;
2035 * be a bit proactive
2036 */
2037 skb_queue_purge(&queue->rx_queue);
2038 queue->rx_last_skb_slots = 0;
2039 } else if (unlikely(test_and_clear_bit(QUEUE_STATUS_RX_PURGE_EVENT,
2040 &queue->status))) {
2041 xenvif_rx_purge_event(queue);
2042 } else if (!netif_carrier_ok(queue->vif->dev)) {
2043 /* Another queue stalled and turned the carrier off, so
2044 * purge the internal queue of queues which were not
2045 * blocked
2046 */
2047 skb_queue_purge(&queue->rx_queue);
2048 queue->rx_last_skb_slots = 0;
2049 } 2114 }
2050 2115
2051 if (!skb_queue_empty(&queue->rx_queue)) 2116 if (!skb_queue_empty(&queue->rx_queue))
2052 xenvif_rx_action(queue); 2117 xenvif_rx_action(queue);
2053 2118
2119 /* If the guest hasn't provided any Rx slots for a
2120 * while it's probably not responsive, drop the
2121 * carrier so packets are dropped earlier.
2122 */
2123 if (xenvif_rx_queue_stalled(queue))
2124 xenvif_queue_carrier_off(queue);
2125 else if (xenvif_rx_queue_ready(queue))
2126 xenvif_queue_carrier_on(queue);
2127
2128 /* Queued packets may have foreign pages from other
2129 * domains. These cannot be queued indefinitely as
2130 * this would starve guests of grant refs and transmit
2131 * slots.
2132 */
2133 xenvif_rx_queue_drop_expired(queue);
2134
2135 xenvif_rx_queue_maybe_wake(queue);
2136
2054 cond_resched(); 2137 cond_resched();
2055 } 2138 }
2056 2139
2057 /* Bin any remaining skbs */ 2140 /* Bin any remaining skbs */
2058 while ((skb = skb_dequeue(&queue->rx_queue)) != NULL) 2141 xenvif_rx_queue_purge(queue);
2059 dev_kfree_skb(skb);
2060 2142
2061 return 0; 2143 return 0;
2062} 2144}
@@ -2113,6 +2195,7 @@ static int __init netback_init(void)
2113 goto failed_init; 2195 goto failed_init;
2114 2196
2115 rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs); 2197 rx_drain_timeout_jiffies = msecs_to_jiffies(rx_drain_timeout_msecs);
2198 rx_stall_timeout_jiffies = msecs_to_jiffies(rx_stall_timeout_msecs);
2116 2199
2117#ifdef CONFIG_DEBUG_FS 2200#ifdef CONFIG_DEBUG_FS
2118 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL); 2201 xen_netback_dbg_root = debugfs_create_dir("xen-netback", NULL);
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 8079c31ac5e6..fab0d4b42f58 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -39,7 +39,7 @@ struct backend_info {
39static int connect_rings(struct backend_info *be, struct xenvif_queue *queue); 39static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
40static void connect(struct backend_info *be); 40static void connect(struct backend_info *be);
41static int read_xenbus_vif_flags(struct backend_info *be); 41static int read_xenbus_vif_flags(struct backend_info *be);
42static void backend_create_xenvif(struct backend_info *be); 42static int backend_create_xenvif(struct backend_info *be);
43static void unregister_hotplug_status_watch(struct backend_info *be); 43static void unregister_hotplug_status_watch(struct backend_info *be);
44static void set_backend_state(struct backend_info *be, 44static void set_backend_state(struct backend_info *be,
45 enum xenbus_state state); 45 enum xenbus_state state);
@@ -52,6 +52,7 @@ static int xenvif_read_io_ring(struct seq_file *m, void *v)
52 struct xenvif_queue *queue = m->private; 52 struct xenvif_queue *queue = m->private;
53 struct xen_netif_tx_back_ring *tx_ring = &queue->tx; 53 struct xen_netif_tx_back_ring *tx_ring = &queue->tx;
54 struct xen_netif_rx_back_ring *rx_ring = &queue->rx; 54 struct xen_netif_rx_back_ring *rx_ring = &queue->rx;
55 struct netdev_queue *dev_queue;
55 56
56 if (tx_ring->sring) { 57 if (tx_ring->sring) {
57 struct xen_netif_tx_sring *sring = tx_ring->sring; 58 struct xen_netif_tx_sring *sring = tx_ring->sring;
@@ -112,6 +113,13 @@ static int xenvif_read_io_ring(struct seq_file *m, void *v)
112 queue->credit_timeout.expires, 113 queue->credit_timeout.expires,
113 jiffies); 114 jiffies);
114 115
116 dev_queue = netdev_get_tx_queue(queue->vif->dev, queue->id);
117
118 seq_printf(m, "\nRx internal queue: len %u max %u pkts %u %s\n",
119 queue->rx_queue_len, queue->rx_queue_max,
120 skb_queue_len(&queue->rx_queue),
121 netif_tx_queue_stopped(dev_queue) ? "stopped" : "running");
122
115 return 0; 123 return 0;
116} 124}
117 125
@@ -344,7 +352,9 @@ static int netback_probe(struct xenbus_device *dev,
344 be->state = XenbusStateInitWait; 352 be->state = XenbusStateInitWait;
345 353
346 /* This kicks hotplug scripts, so do it immediately. */ 354 /* This kicks hotplug scripts, so do it immediately. */
347 backend_create_xenvif(be); 355 err = backend_create_xenvif(be);
356 if (err)
357 goto fail;
348 358
349 return 0; 359 return 0;
350 360
@@ -389,19 +399,19 @@ static int netback_uevent(struct xenbus_device *xdev,
389} 399}
390 400
391 401
392static void backend_create_xenvif(struct backend_info *be) 402static int backend_create_xenvif(struct backend_info *be)
393{ 403{
394 int err; 404 int err;
395 long handle; 405 long handle;
396 struct xenbus_device *dev = be->dev; 406 struct xenbus_device *dev = be->dev;
397 407
398 if (be->vif != NULL) 408 if (be->vif != NULL)
399 return; 409 return 0;
400 410
401 err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle); 411 err = xenbus_scanf(XBT_NIL, dev->nodename, "handle", "%li", &handle);
402 if (err != 1) { 412 if (err != 1) {
403 xenbus_dev_fatal(dev, err, "reading handle"); 413 xenbus_dev_fatal(dev, err, "reading handle");
404 return; 414 return (err < 0) ? err : -EINVAL;
405 } 415 }
406 416
407 be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle); 417 be->vif = xenvif_alloc(&dev->dev, dev->otherend_id, handle);
@@ -409,10 +419,11 @@ static void backend_create_xenvif(struct backend_info *be)
409 err = PTR_ERR(be->vif); 419 err = PTR_ERR(be->vif);
410 be->vif = NULL; 420 be->vif = NULL;
411 xenbus_dev_fatal(dev, err, "creating interface"); 421 xenbus_dev_fatal(dev, err, "creating interface");
412 return; 422 return err;
413 } 423 }
414 424
415 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE); 425 kobject_uevent(&dev->dev.kobj, KOBJ_ONLINE);
426 return 0;
416} 427}
417 428
418static void backend_disconnect(struct backend_info *be) 429static void backend_disconnect(struct backend_info *be)
@@ -703,6 +714,7 @@ static void connect(struct backend_info *be)
703 be->vif->queues = vzalloc(requested_num_queues * 714 be->vif->queues = vzalloc(requested_num_queues *
704 sizeof(struct xenvif_queue)); 715 sizeof(struct xenvif_queue));
705 be->vif->num_queues = requested_num_queues; 716 be->vif->num_queues = requested_num_queues;
717 be->vif->stalled_queues = requested_num_queues;
706 718
707 for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) { 719 for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
708 queue = &be->vif->queues[queue_index]; 720 queue = &be->vif->queues[queue_index];
@@ -873,15 +885,10 @@ static int read_xenbus_vif_flags(struct backend_info *be)
873 if (!rx_copy) 885 if (!rx_copy)
874 return -EOPNOTSUPP; 886 return -EOPNOTSUPP;
875 887
876 if (vif->dev->tx_queue_len != 0) { 888 if (xenbus_scanf(XBT_NIL, dev->otherend,
877 if (xenbus_scanf(XBT_NIL, dev->otherend, 889 "feature-rx-notify", "%d", &val) < 0 || val == 0) {
878 "feature-rx-notify", "%d", &val) < 0) 890 xenbus_dev_fatal(dev, -EINVAL, "feature-rx-notify is mandatory");
879 val = 0; 891 return -EINVAL;
880 if (val)
881 vif->can_queue = 1;
882 else
883 /* Must be non-zero for pfifo_fast to work. */
884 vif->dev->tx_queue_len = 1;
885 } 892 }
886 893
887 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg", 894 if (xenbus_scanf(XBT_NIL, dev->otherend, "feature-sg",
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index cca871346a0f..ece8d1804d13 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -496,9 +496,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
496 len = skb_frag_size(frag); 496 len = skb_frag_size(frag);
497 offset = frag->page_offset; 497 offset = frag->page_offset;
498 498
499 /* Data must not cross a page boundary. */
500 BUG_ON(len + offset > PAGE_SIZE<<compound_order(page));
501
502 /* Skip unused frames from start of page */ 499 /* Skip unused frames from start of page */
503 page += offset >> PAGE_SHIFT; 500 page += offset >> PAGE_SHIFT;
504 offset &= ~PAGE_MASK; 501 offset &= ~PAGE_MASK;
@@ -506,8 +503,6 @@ static void xennet_make_frags(struct sk_buff *skb, struct netfront_queue *queue,
506 while (len > 0) { 503 while (len > 0) {
507 unsigned long bytes; 504 unsigned long bytes;
508 505
509 BUG_ON(offset >= PAGE_SIZE);
510
511 bytes = PAGE_SIZE - offset; 506 bytes = PAGE_SIZE - offset;
512 if (bytes > len) 507 if (bytes > len)
513 bytes = len; 508 bytes = len;
diff --git a/drivers/of/address.c b/drivers/of/address.c
index afdb78299f61..06af494184d6 100644
--- a/drivers/of/address.c
+++ b/drivers/of/address.c
@@ -450,6 +450,21 @@ static struct of_bus *of_match_bus(struct device_node *np)
450 return NULL; 450 return NULL;
451} 451}
452 452
453static int of_empty_ranges_quirk(void)
454{
455 if (IS_ENABLED(CONFIG_PPC)) {
456 /* To save cycles, we cache the result */
457 static int quirk_state = -1;
458
459 if (quirk_state < 0)
460 quirk_state =
461 of_machine_is_compatible("Power Macintosh") ||
462 of_machine_is_compatible("MacRISC");
463 return quirk_state;
464 }
465 return false;
466}
467
453static int of_translate_one(struct device_node *parent, struct of_bus *bus, 468static int of_translate_one(struct device_node *parent, struct of_bus *bus,
454 struct of_bus *pbus, __be32 *addr, 469 struct of_bus *pbus, __be32 *addr,
455 int na, int ns, int pna, const char *rprop) 470 int na, int ns, int pna, const char *rprop)
@@ -475,12 +490,10 @@ static int of_translate_one(struct device_node *parent, struct of_bus *bus,
475 * This code is only enabled on powerpc. --gcl 490 * This code is only enabled on powerpc. --gcl
476 */ 491 */
477 ranges = of_get_property(parent, rprop, &rlen); 492 ranges = of_get_property(parent, rprop, &rlen);
478#if !defined(CONFIG_PPC) 493 if (ranges == NULL && !of_empty_ranges_quirk()) {
479 if (ranges == NULL) {
480 pr_err("OF: no ranges; cannot translate\n"); 494 pr_err("OF: no ranges; cannot translate\n");
481 return 1; 495 return 1;
482 } 496 }
483#endif /* !defined(CONFIG_PPC) */
484 if (ranges == NULL || rlen == 0) { 497 if (ranges == NULL || rlen == 0) {
485 offset = of_read_number(addr, na); 498 offset = of_read_number(addr, na);
486 memset(addr, 0, pna * 4); 499 memset(addr, 0, pna * 4);
diff --git a/drivers/of/base.c b/drivers/of/base.c
index 2305dc0382bc..3823edf2d012 100644
--- a/drivers/of/base.c
+++ b/drivers/of/base.c
@@ -1280,52 +1280,6 @@ int of_property_read_string(struct device_node *np, const char *propname,
1280EXPORT_SYMBOL_GPL(of_property_read_string); 1280EXPORT_SYMBOL_GPL(of_property_read_string);
1281 1281
1282/** 1282/**
1283 * of_property_read_string_index - Find and read a string from a multiple
1284 * strings property.
1285 * @np: device node from which the property value is to be read.
1286 * @propname: name of the property to be searched.
1287 * @index: index of the string in the list of strings
1288 * @out_string: pointer to null terminated return string, modified only if
1289 * return value is 0.
1290 *
1291 * Search for a property in a device tree node and retrieve a null
1292 * terminated string value (pointer to data, not a copy) in the list of strings
1293 * contained in that property.
1294 * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
1295 * property does not have a value, and -EILSEQ if the string is not
1296 * null-terminated within the length of the property data.
1297 *
1298 * The out_string pointer is modified only if a valid string can be decoded.
1299 */
1300int of_property_read_string_index(struct device_node *np, const char *propname,
1301 int index, const char **output)
1302{
1303 struct property *prop = of_find_property(np, propname, NULL);
1304 int i = 0;
1305 size_t l = 0, total = 0;
1306 const char *p;
1307
1308 if (!prop)
1309 return -EINVAL;
1310 if (!prop->value)
1311 return -ENODATA;
1312 if (strnlen(prop->value, prop->length) >= prop->length)
1313 return -EILSEQ;
1314
1315 p = prop->value;
1316
1317 for (i = 0; total < prop->length; total += l, p += l) {
1318 l = strlen(p) + 1;
1319 if (i++ == index) {
1320 *output = p;
1321 return 0;
1322 }
1323 }
1324 return -ENODATA;
1325}
1326EXPORT_SYMBOL_GPL(of_property_read_string_index);
1327
1328/**
1329 * of_property_match_string() - Find string in a list and return index 1283 * of_property_match_string() - Find string in a list and return index
1330 * @np: pointer to node containing string list property 1284 * @np: pointer to node containing string list property
1331 * @propname: string list property name 1285 * @propname: string list property name
@@ -1351,7 +1305,7 @@ int of_property_match_string(struct device_node *np, const char *propname,
1351 end = p + prop->length; 1305 end = p + prop->length;
1352 1306
1353 for (i = 0; p < end; i++, p += l) { 1307 for (i = 0; p < end; i++, p += l) {
1354 l = strlen(p) + 1; 1308 l = strnlen(p, end - p) + 1;
1355 if (p + l > end) 1309 if (p + l > end)
1356 return -EILSEQ; 1310 return -EILSEQ;
1357 pr_debug("comparing %s with %s\n", string, p); 1311 pr_debug("comparing %s with %s\n", string, p);
@@ -1363,39 +1317,41 @@ int of_property_match_string(struct device_node *np, const char *propname,
1363EXPORT_SYMBOL_GPL(of_property_match_string); 1317EXPORT_SYMBOL_GPL(of_property_match_string);
1364 1318
1365/** 1319/**
1366 * of_property_count_strings - Find and return the number of strings from a 1320 * of_property_read_string_util() - Utility helper for parsing string properties
1367 * multiple strings property.
1368 * @np: device node from which the property value is to be read. 1321 * @np: device node from which the property value is to be read.
1369 * @propname: name of the property to be searched. 1322 * @propname: name of the property to be searched.
1323 * @out_strs: output array of string pointers.
1324 * @sz: number of array elements to read.
1325 * @skip: Number of strings to skip over at beginning of list.
1370 * 1326 *
1371 * Search for a property in a device tree node and retrieve the number of null 1327 * Don't call this function directly. It is a utility helper for the
1372 * terminated string contain in it. Returns the number of strings on 1328 * of_property_read_string*() family of functions.
1373 * success, -EINVAL if the property does not exist, -ENODATA if property
1374 * does not have a value, and -EILSEQ if the string is not null-terminated
1375 * within the length of the property data.
1376 */ 1329 */
1377int of_property_count_strings(struct device_node *np, const char *propname) 1330int of_property_read_string_helper(struct device_node *np, const char *propname,
1331 const char **out_strs, size_t sz, int skip)
1378{ 1332{
1379 struct property *prop = of_find_property(np, propname, NULL); 1333 struct property *prop = of_find_property(np, propname, NULL);
1380 int i = 0; 1334 int l = 0, i = 0;
1381 size_t l = 0, total = 0; 1335 const char *p, *end;
1382 const char *p;
1383 1336
1384 if (!prop) 1337 if (!prop)
1385 return -EINVAL; 1338 return -EINVAL;
1386 if (!prop->value) 1339 if (!prop->value)
1387 return -ENODATA; 1340 return -ENODATA;
1388 if (strnlen(prop->value, prop->length) >= prop->length)
1389 return -EILSEQ;
1390
1391 p = prop->value; 1341 p = prop->value;
1342 end = p + prop->length;
1392 1343
1393 for (i = 0; total < prop->length; total += l, p += l, i++) 1344 for (i = 0; p < end && (!out_strs || i < skip + sz); i++, p += l) {
1394 l = strlen(p) + 1; 1345 l = strnlen(p, end - p) + 1;
1395 1346 if (p + l > end)
1396 return i; 1347 return -EILSEQ;
1348 if (out_strs && i >= skip)
1349 *out_strs++ = p;
1350 }
1351 i -= skip;
1352 return i <= 0 ? -ENODATA : i;
1397} 1353}
1398EXPORT_SYMBOL_GPL(of_property_count_strings); 1354EXPORT_SYMBOL_GPL(of_property_read_string_helper);
1399 1355
1400void of_print_phandle_args(const char *msg, const struct of_phandle_args *args) 1356void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
1401{ 1357{
diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c
index f297891d8529..d4994177dec2 100644
--- a/drivers/of/dynamic.c
+++ b/drivers/of/dynamic.c
@@ -247,7 +247,7 @@ void of_node_release(struct kobject *kobj)
247 * @allocflags: Allocation flags (typically pass GFP_KERNEL) 247 * @allocflags: Allocation flags (typically pass GFP_KERNEL)
248 * 248 *
249 * Copy a property by dynamically allocating the memory of both the 249 * Copy a property by dynamically allocating the memory of both the
250 * property stucture and the property name & contents. The property's 250 * property structure and the property name & contents. The property's
251 * flags have the OF_DYNAMIC bit set so that we can differentiate between 251 * flags have the OF_DYNAMIC bit set so that we can differentiate between
252 * dynamically allocated properties and not. 252 * dynamically allocated properties and not.
253 * Returns the newly allocated property or NULL on out of memory error. 253 * Returns the newly allocated property or NULL on out of memory error.
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index d1ffca8b34ea..d134710de96d 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -773,7 +773,7 @@ int __init early_init_dt_scan_chosen_serial(void)
773 if (offset < 0) 773 if (offset < 0)
774 return -ENODEV; 774 return -ENODEV;
775 775
776 while (match->compatible) { 776 while (match->compatible[0]) {
777 unsigned long addr; 777 unsigned long addr;
778 if (fdt_node_check_compatible(fdt, offset, match->compatible)) { 778 if (fdt_node_check_compatible(fdt, offset, match->compatible)) {
779 match++; 779 match++;
@@ -964,8 +964,6 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
964int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base, 964int __init __weak early_init_dt_reserve_memory_arch(phys_addr_t base,
965 phys_addr_t size, bool nomap) 965 phys_addr_t size, bool nomap)
966{ 966{
967 if (memblock_is_region_reserved(base, size))
968 return -EBUSY;
969 if (nomap) 967 if (nomap)
970 return memblock_remove(base, size); 968 return memblock_remove(base, size);
971 return memblock_reserve(base, size); 969 return memblock_reserve(base, size);
diff --git a/drivers/of/of_reserved_mem.c b/drivers/of/of_reserved_mem.c
index 59fb12e84e6b..dc566b38645f 100644
--- a/drivers/of/of_reserved_mem.c
+++ b/drivers/of/of_reserved_mem.c
@@ -243,23 +243,27 @@ static inline struct reserved_mem *__find_rmem(struct device_node *node)
243 * This function assign memory region pointed by "memory-region" device tree 243 * This function assign memory region pointed by "memory-region" device tree
244 * property to the given device. 244 * property to the given device.
245 */ 245 */
246void of_reserved_mem_device_init(struct device *dev) 246int of_reserved_mem_device_init(struct device *dev)
247{ 247{
248 struct reserved_mem *rmem; 248 struct reserved_mem *rmem;
249 struct device_node *np; 249 struct device_node *np;
250 int ret;
250 251
251 np = of_parse_phandle(dev->of_node, "memory-region", 0); 252 np = of_parse_phandle(dev->of_node, "memory-region", 0);
252 if (!np) 253 if (!np)
253 return; 254 return -ENODEV;
254 255
255 rmem = __find_rmem(np); 256 rmem = __find_rmem(np);
256 of_node_put(np); 257 of_node_put(np);
257 258
258 if (!rmem || !rmem->ops || !rmem->ops->device_init) 259 if (!rmem || !rmem->ops || !rmem->ops->device_init)
259 return; 260 return -EINVAL;
261
262 ret = rmem->ops->device_init(rmem, dev);
263 if (ret == 0)
264 dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
260 265
261 rmem->ops->device_init(rmem, dev); 266 return ret;
262 dev_info(dev, "assigned reserved memory node %s\n", rmem->name);
263} 267}
264 268
265/** 269/**
diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c
index 78001270a598..e2d79afa9dc6 100644
--- a/drivers/of/selftest.c
+++ b/drivers/of/selftest.c
@@ -339,8 +339,9 @@ static void __init of_selftest_parse_phandle_with_args(void)
339 selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc); 339 selftest(rc == -EINVAL, "expected:%i got:%i\n", -EINVAL, rc);
340} 340}
341 341
342static void __init of_selftest_property_match_string(void) 342static void __init of_selftest_property_string(void)
343{ 343{
344 const char *strings[4];
344 struct device_node *np; 345 struct device_node *np;
345 int rc; 346 int rc;
346 347
@@ -357,13 +358,66 @@ static void __init of_selftest_property_match_string(void)
357 rc = of_property_match_string(np, "phandle-list-names", "third"); 358 rc = of_property_match_string(np, "phandle-list-names", "third");
358 selftest(rc == 2, "third expected:0 got:%i\n", rc); 359 selftest(rc == 2, "third expected:0 got:%i\n", rc);
359 rc = of_property_match_string(np, "phandle-list-names", "fourth"); 360 rc = of_property_match_string(np, "phandle-list-names", "fourth");
360 selftest(rc == -ENODATA, "unmatched string; rc=%i", rc); 361 selftest(rc == -ENODATA, "unmatched string; rc=%i\n", rc);
361 rc = of_property_match_string(np, "missing-property", "blah"); 362 rc = of_property_match_string(np, "missing-property", "blah");
362 selftest(rc == -EINVAL, "missing property; rc=%i", rc); 363 selftest(rc == -EINVAL, "missing property; rc=%i\n", rc);
363 rc = of_property_match_string(np, "empty-property", "blah"); 364 rc = of_property_match_string(np, "empty-property", "blah");
364 selftest(rc == -ENODATA, "empty property; rc=%i", rc); 365 selftest(rc == -ENODATA, "empty property; rc=%i\n", rc);
365 rc = of_property_match_string(np, "unterminated-string", "blah"); 366 rc = of_property_match_string(np, "unterminated-string", "blah");
366 selftest(rc == -EILSEQ, "unterminated string; rc=%i", rc); 367 selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
368
369 /* of_property_count_strings() tests */
370 rc = of_property_count_strings(np, "string-property");
371 selftest(rc == 1, "Incorrect string count; rc=%i\n", rc);
372 rc = of_property_count_strings(np, "phandle-list-names");
373 selftest(rc == 3, "Incorrect string count; rc=%i\n", rc);
374 rc = of_property_count_strings(np, "unterminated-string");
375 selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
376 rc = of_property_count_strings(np, "unterminated-string-list");
377 selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
378
379 /* of_property_read_string_index() tests */
380 rc = of_property_read_string_index(np, "string-property", 0, strings);
381 selftest(rc == 0 && !strcmp(strings[0], "foobar"), "of_property_read_string_index() failure; rc=%i\n", rc);
382 strings[0] = NULL;
383 rc = of_property_read_string_index(np, "string-property", 1, strings);
384 selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
385 rc = of_property_read_string_index(np, "phandle-list-names", 0, strings);
386 selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
387 rc = of_property_read_string_index(np, "phandle-list-names", 1, strings);
388 selftest(rc == 0 && !strcmp(strings[0], "second"), "of_property_read_string_index() failure; rc=%i\n", rc);
389 rc = of_property_read_string_index(np, "phandle-list-names", 2, strings);
390 selftest(rc == 0 && !strcmp(strings[0], "third"), "of_property_read_string_index() failure; rc=%i\n", rc);
391 strings[0] = NULL;
392 rc = of_property_read_string_index(np, "phandle-list-names", 3, strings);
393 selftest(rc == -ENODATA && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
394 strings[0] = NULL;
395 rc = of_property_read_string_index(np, "unterminated-string", 0, strings);
396 selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
397 rc = of_property_read_string_index(np, "unterminated-string-list", 0, strings);
398 selftest(rc == 0 && !strcmp(strings[0], "first"), "of_property_read_string_index() failure; rc=%i\n", rc);
399 strings[0] = NULL;
400 rc = of_property_read_string_index(np, "unterminated-string-list", 2, strings); /* should fail */
401 selftest(rc == -EILSEQ && strings[0] == NULL, "of_property_read_string_index() failure; rc=%i\n", rc);
402 strings[1] = NULL;
403
404 /* of_property_read_string_array() tests */
405 rc = of_property_read_string_array(np, "string-property", strings, 4);
406 selftest(rc == 1, "Incorrect string count; rc=%i\n", rc);
407 rc = of_property_read_string_array(np, "phandle-list-names", strings, 4);
408 selftest(rc == 3, "Incorrect string count; rc=%i\n", rc);
409 rc = of_property_read_string_array(np, "unterminated-string", strings, 4);
410 selftest(rc == -EILSEQ, "unterminated string; rc=%i\n", rc);
411 /* -- An incorrectly formed string should cause a failure */
412 rc = of_property_read_string_array(np, "unterminated-string-list", strings, 4);
413 selftest(rc == -EILSEQ, "unterminated string array; rc=%i\n", rc);
414 /* -- parsing the correctly formed strings should still work: */
415 strings[2] = NULL;
416 rc = of_property_read_string_array(np, "unterminated-string-list", strings, 2);
417 selftest(rc == 2 && strings[2] == NULL, "of_property_read_string_array() failure; rc=%i\n", rc);
418 strings[1] = NULL;
419 rc = of_property_read_string_array(np, "phandle-list-names", strings, 1);
420 selftest(rc == 1 && strings[1] == NULL, "Overwrote end of string array; rc=%i, str='%s'\n", rc, strings[1]);
367} 421}
368 422
369#define propcmp(p1, p2) (((p1)->length == (p2)->length) && \ 423#define propcmp(p1, p2) (((p1)->length == (p2)->length) && \
@@ -842,10 +896,14 @@ static void selftest_data_remove(void)
842 return; 896 return;
843 } 897 }
844 898
845 while (last_node_index >= 0) { 899 while (last_node_index-- > 0) {
846 if (nodes[last_node_index]) { 900 if (nodes[last_node_index]) {
847 np = of_find_node_by_path(nodes[last_node_index]->full_name); 901 np = of_find_node_by_path(nodes[last_node_index]->full_name);
848 if (strcmp(np->full_name, "/aliases") != 0) { 902 if (np == nodes[last_node_index]) {
903 if (of_aliases == np) {
904 of_node_put(of_aliases);
905 of_aliases = NULL;
906 }
849 detach_node_and_children(np); 907 detach_node_and_children(np);
850 } else { 908 } else {
851 for_each_property_of_node(np, prop) { 909 for_each_property_of_node(np, prop) {
@@ -854,7 +912,6 @@ static void selftest_data_remove(void)
854 } 912 }
855 } 913 }
856 } 914 }
857 last_node_index--;
858 } 915 }
859} 916}
860 917
@@ -867,6 +924,8 @@ static int __init of_selftest(void)
867 res = selftest_data_add(); 924 res = selftest_data_add();
868 if (res) 925 if (res)
869 return res; 926 return res;
927 if (!of_aliases)
928 of_aliases = of_find_node_by_path("/aliases");
870 929
871 np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a"); 930 np = of_find_node_by_path("/testcase-data/phandle-tests/consumer-a");
872 if (!np) { 931 if (!np) {
@@ -881,7 +940,7 @@ static int __init of_selftest(void)
881 of_selftest_find_node_by_name(); 940 of_selftest_find_node_by_name();
882 of_selftest_dynamic(); 941 of_selftest_dynamic();
883 of_selftest_parse_phandle_with_args(); 942 of_selftest_parse_phandle_with_args();
884 of_selftest_property_match_string(); 943 of_selftest_property_string();
885 of_selftest_property_copy(); 944 of_selftest_property_copy();
886 of_selftest_changeset(); 945 of_selftest_changeset();
887 of_selftest_parse_interrupts(); 946 of_selftest_parse_interrupts();
diff --git a/drivers/of/testcase-data/tests-phandle.dtsi b/drivers/of/testcase-data/tests-phandle.dtsi
index ce0fe083d406..5b1527e8a7fb 100644
--- a/drivers/of/testcase-data/tests-phandle.dtsi
+++ b/drivers/of/testcase-data/tests-phandle.dtsi
@@ -39,7 +39,9 @@
39 phandle-list-bad-args = <&provider2 1 0>, 39 phandle-list-bad-args = <&provider2 1 0>,
40 <&provider3 0>; 40 <&provider3 0>;
41 empty-property; 41 empty-property;
42 string-property = "foobar";
42 unterminated-string = [40 41 42 43]; 43 unterminated-string = [40 41 42 43];
44 unterminated-string-list = "first", "second", [40 41 42 43];
43 }; 45 };
44 }; 46 };
45 }; 47 };
diff --git a/drivers/pci/access.c b/drivers/pci/access.c
index d292d7cb3417..49dd766852ba 100644
--- a/drivers/pci/access.c
+++ b/drivers/pci/access.c
@@ -444,7 +444,7 @@ static inline int pcie_cap_version(const struct pci_dev *dev)
444 return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS; 444 return pcie_caps_reg(dev) & PCI_EXP_FLAGS_VERS;
445} 445}
446 446
447static inline bool pcie_cap_has_lnkctl(const struct pci_dev *dev) 447bool pcie_cap_has_lnkctl(const struct pci_dev *dev)
448{ 448{
449 int type = pci_pcie_type(dev); 449 int type = pci_pcie_type(dev);
450 450
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index 233fe8a88264..69202d1eb8fb 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -275,15 +275,22 @@ static int imx6_pcie_deassert_core_reset(struct pcie_port *pp)
275 goto err_pcie; 275 goto err_pcie;
276 } 276 }
277 277
278 /* allow the clocks to stabilize */
279 usleep_range(200, 500);
280
281 /* power up core phy and enable ref clock */ 278 /* power up core phy and enable ref clock */
282 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 279 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
283 IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18); 280 IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
281 /*
282 * the async reset input need ref clock to sync internally,
283 * when the ref clock comes after reset, internal synced
284 * reset time is too short, cannot meet the requirement.
285 * add one ~10us delay here.
286 */
287 udelay(10);
284 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, 288 regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
285 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16); 289 IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
286 290
291 /* allow the clocks to stabilize */
292 usleep_range(200, 500);
293
287 /* Some boards don't have PCIe reset GPIO. */ 294 /* Some boards don't have PCIe reset GPIO. */
288 if (gpio_is_valid(imx6_pcie->reset_gpio)) { 295 if (gpio_is_valid(imx6_pcie->reset_gpio)) {
289 gpio_set_value(imx6_pcie->reset_gpio, 0); 296 gpio_set_value(imx6_pcie->reset_gpio, 0);
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 3d43874319be..19bb19c7db4a 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -276,6 +276,7 @@ struct tegra_pcie {
276 276
277 struct resource all; 277 struct resource all;
278 struct resource io; 278 struct resource io;
279 struct resource pio;
279 struct resource mem; 280 struct resource mem;
280 struct resource prefetch; 281 struct resource prefetch;
281 struct resource busn; 282 struct resource busn;
@@ -658,7 +659,6 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
658{ 659{
659 struct tegra_pcie *pcie = sys_to_pcie(sys); 660 struct tegra_pcie *pcie = sys_to_pcie(sys);
660 int err; 661 int err;
661 phys_addr_t io_start;
662 662
663 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem); 663 err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
664 if (err < 0) 664 if (err < 0)
@@ -668,14 +668,12 @@ static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
668 if (err) 668 if (err)
669 return err; 669 return err;
670 670
671 io_start = pci_pio_to_address(pcie->io.start);
672
673 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset); 671 pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
674 pci_add_resource_offset(&sys->resources, &pcie->prefetch, 672 pci_add_resource_offset(&sys->resources, &pcie->prefetch,
675 sys->mem_offset); 673 sys->mem_offset);
676 pci_add_resource(&sys->resources, &pcie->busn); 674 pci_add_resource(&sys->resources, &pcie->busn);
677 675
678 pci_ioremap_io(nr * SZ_64K, io_start); 676 pci_ioremap_io(pcie->pio.start, pcie->io.start);
679 677
680 return 1; 678 return 1;
681} 679}
@@ -786,7 +784,6 @@ static irqreturn_t tegra_pcie_isr(int irq, void *arg)
786static void tegra_pcie_setup_translations(struct tegra_pcie *pcie) 784static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
787{ 785{
788 u32 fpci_bar, size, axi_address; 786 u32 fpci_bar, size, axi_address;
789 phys_addr_t io_start = pci_pio_to_address(pcie->io.start);
790 787
791 /* Bar 0: type 1 extended configuration space */ 788 /* Bar 0: type 1 extended configuration space */
792 fpci_bar = 0xfe100000; 789 fpci_bar = 0xfe100000;
@@ -799,7 +796,7 @@ static void tegra_pcie_setup_translations(struct tegra_pcie *pcie)
799 /* Bar 1: downstream IO bar */ 796 /* Bar 1: downstream IO bar */
800 fpci_bar = 0xfdfc0000; 797 fpci_bar = 0xfdfc0000;
801 size = resource_size(&pcie->io); 798 size = resource_size(&pcie->io);
802 axi_address = io_start; 799 axi_address = pcie->io.start;
803 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START); 800 afi_writel(pcie, axi_address, AFI_AXI_BAR1_START);
804 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ); 801 afi_writel(pcie, size >> 12, AFI_AXI_BAR1_SZ);
805 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1); 802 afi_writel(pcie, fpci_bar, AFI_FPCI_BAR1);
@@ -1690,8 +1687,23 @@ static int tegra_pcie_parse_dt(struct tegra_pcie *pcie)
1690 1687
1691 switch (res.flags & IORESOURCE_TYPE_BITS) { 1688 switch (res.flags & IORESOURCE_TYPE_BITS) {
1692 case IORESOURCE_IO: 1689 case IORESOURCE_IO:
1693 memcpy(&pcie->io, &res, sizeof(res)); 1690 memcpy(&pcie->pio, &res, sizeof(res));
1694 pcie->io.name = np->full_name; 1691 pcie->pio.name = np->full_name;
1692
1693 /*
1694 * The Tegra PCIe host bridge uses this to program the
1695 * mapping of the I/O space to the physical address,
1696 * so we override the .start and .end fields here that
1697 * of_pci_range_to_resource() converted to I/O space.
1698 * We also set the IORESOURCE_MEM type to clarify that
1699 * the resource is in the physical memory space.
1700 */
1701 pcie->io.start = range.cpu_addr;
1702 pcie->io.end = range.cpu_addr + range.size - 1;
1703 pcie->io.flags = IORESOURCE_MEM;
1704 pcie->io.name = "I/O";
1705
1706 memcpy(&res, &pcie->io, sizeof(res));
1695 break; 1707 break;
1696 1708
1697 case IORESOURCE_MEM: 1709 case IORESOURCE_MEM:
diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c
index 9ecabfa8c634..2988fe136c1e 100644
--- a/drivers/pci/host/pci-xgene.c
+++ b/drivers/pci/host/pci-xgene.c
@@ -631,10 +631,15 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev)
631 if (ret) 631 if (ret)
632 return ret; 632 return ret;
633 633
634 bus = pci_scan_root_bus(&pdev->dev, 0, &xgene_pcie_ops, port, &res); 634 bus = pci_create_root_bus(&pdev->dev, 0,
635 &xgene_pcie_ops, port, &res);
635 if (!bus) 636 if (!bus)
636 return -ENOMEM; 637 return -ENOMEM;
637 638
639 pci_scan_child_bus(bus);
640 pci_assign_unassigned_bus_resources(bus);
641 pci_bus_add_devices(bus);
642
638 platform_set_drvdata(pdev, port); 643 platform_set_drvdata(pdev, port);
639 return 0; 644 return 0;
640} 645}
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 3a5e7e28b874..07aa722bb12c 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -262,13 +262,6 @@ static int pciehp_probe(struct pcie_device *dev)
262 goto err_out_none; 262 goto err_out_none;
263 } 263 }
264 264
265 if (!dev->port->subordinate) {
266 /* Can happen if we run out of bus numbers during probe */
267 dev_err(&dev->device,
268 "Hotplug bridge without secondary bus, ignoring\n");
269 goto err_out_none;
270 }
271
272 ctrl = pcie_init(dev); 265 ctrl = pcie_init(dev);
273 if (!ctrl) { 266 if (!ctrl) {
274 dev_err(&dev->device, "Controller initialization failed\n"); 267 dev_err(&dev->device, "Controller initialization failed\n");
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index 9fab30af0e75..084587d7cd13 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -590,6 +590,20 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev)
590 return entry; 590 return entry;
591} 591}
592 592
593static int msi_verify_entries(struct pci_dev *dev)
594{
595 struct msi_desc *entry;
596
597 list_for_each_entry(entry, &dev->msi_list, list) {
598 if (!dev->no_64bit_msi || !entry->msg.address_hi)
599 continue;
600 dev_err(&dev->dev, "Device has broken 64-bit MSI but arch"
601 " tried to assign one above 4G\n");
602 return -EIO;
603 }
604 return 0;
605}
606
593/** 607/**
594 * msi_capability_init - configure device's MSI capability structure 608 * msi_capability_init - configure device's MSI capability structure
595 * @dev: pointer to the pci_dev data structure of MSI device function 609 * @dev: pointer to the pci_dev data structure of MSI device function
@@ -627,6 +641,13 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
627 return ret; 641 return ret;
628 } 642 }
629 643
644 ret = msi_verify_entries(dev);
645 if (ret) {
646 msi_mask_irq(entry, mask, ~mask);
647 free_msi_irqs(dev);
648 return ret;
649 }
650
630 ret = populate_msi_sysfs(dev); 651 ret = populate_msi_sysfs(dev);
631 if (ret) { 652 if (ret) {
632 msi_mask_irq(entry, mask, ~mask); 653 msi_mask_irq(entry, mask, ~mask);
@@ -739,6 +760,11 @@ static int msix_capability_init(struct pci_dev *dev,
739 if (ret) 760 if (ret)
740 goto out_avail; 761 goto out_avail;
741 762
763 /* Check if all MSI entries honor device restrictions */
764 ret = msi_verify_entries(dev);
765 if (ret)
766 goto out_free;
767
742 /* 768 /*
743 * Some devices require MSI-X to be enabled before we can touch the 769 * Some devices require MSI-X to be enabled before we can touch the
744 * MSI-X registers. We need to mask all the vectors to prevent 770 * MSI-X registers. We need to mask all the vectors to prevent
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 92b6d9ab00e4..2c6643fdc0cf 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -185,7 +185,7 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr,
185} 185}
186static DEVICE_ATTR_RO(modalias); 186static DEVICE_ATTR_RO(modalias);
187 187
188static ssize_t enabled_store(struct device *dev, struct device_attribute *attr, 188static ssize_t enable_store(struct device *dev, struct device_attribute *attr,
189 const char *buf, size_t count) 189 const char *buf, size_t count)
190{ 190{
191 struct pci_dev *pdev = to_pci_dev(dev); 191 struct pci_dev *pdev = to_pci_dev(dev);
@@ -210,7 +210,7 @@ static ssize_t enabled_store(struct device *dev, struct device_attribute *attr,
210 return result < 0 ? result : count; 210 return result < 0 ? result : count;
211} 211}
212 212
213static ssize_t enabled_show(struct device *dev, struct device_attribute *attr, 213static ssize_t enable_show(struct device *dev, struct device_attribute *attr,
214 char *buf) 214 char *buf)
215{ 215{
216 struct pci_dev *pdev; 216 struct pci_dev *pdev;
@@ -218,7 +218,7 @@ static ssize_t enabled_show(struct device *dev, struct device_attribute *attr,
218 pdev = to_pci_dev(dev); 218 pdev = to_pci_dev(dev);
219 return sprintf(buf, "%u\n", atomic_read(&pdev->enable_cnt)); 219 return sprintf(buf, "%u\n", atomic_read(&pdev->enable_cnt));
220} 220}
221static DEVICE_ATTR_RW(enabled); 221static DEVICE_ATTR_RW(enable);
222 222
223#ifdef CONFIG_NUMA 223#ifdef CONFIG_NUMA
224static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr, 224static ssize_t numa_node_show(struct device *dev, struct device_attribute *attr,
@@ -563,7 +563,7 @@ static struct attribute *pci_dev_attrs[] = {
563#endif 563#endif
564 &dev_attr_dma_mask_bits.attr, 564 &dev_attr_dma_mask_bits.attr,
565 &dev_attr_consistent_dma_mask_bits.attr, 565 &dev_attr_consistent_dma_mask_bits.attr,
566 &dev_attr_enabled.attr, 566 &dev_attr_enable.attr,
567 &dev_attr_broken_parity_status.attr, 567 &dev_attr_broken_parity_status.attr,
568 &dev_attr_msi_bus.attr, 568 &dev_attr_msi_bus.attr,
569#if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI) 569#if defined(CONFIG_PM_RUNTIME) && defined(CONFIG_ACPI)
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 0601890db22d..4a3902d8e6fe 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -6,6 +6,8 @@
6 6
7extern const unsigned char pcie_link_speed[]; 7extern const unsigned char pcie_link_speed[];
8 8
9bool pcie_cap_has_lnkctl(const struct pci_dev *dev);
10
9/* Functions internal to the PCI core code */ 11/* Functions internal to the PCI core code */
10 12
11int pci_create_sysfs_dev_files(struct pci_dev *pdev); 13int pci_create_sysfs_dev_files(struct pci_dev *pdev);
diff --git a/drivers/pci/pcie/pme.c b/drivers/pci/pcie/pme.c
index a9f9c46e5022..63fc63911295 100644
--- a/drivers/pci/pcie/pme.c
+++ b/drivers/pci/pcie/pme.c
@@ -397,6 +397,7 @@ static int pcie_pme_suspend(struct pcie_device *srv)
397 struct pcie_pme_service_data *data = get_service_data(srv); 397 struct pcie_pme_service_data *data = get_service_data(srv);
398 struct pci_dev *port = srv->port; 398 struct pci_dev *port = srv->port;
399 bool wakeup; 399 bool wakeup;
400 int ret;
400 401
401 if (device_may_wakeup(&port->dev)) { 402 if (device_may_wakeup(&port->dev)) {
402 wakeup = true; 403 wakeup = true;
@@ -407,9 +408,10 @@ static int pcie_pme_suspend(struct pcie_device *srv)
407 } 408 }
408 spin_lock_irq(&data->lock); 409 spin_lock_irq(&data->lock);
409 if (wakeup) { 410 if (wakeup) {
410 enable_irq_wake(srv->irq); 411 ret = enable_irq_wake(srv->irq);
411 data->suspend_level = PME_SUSPEND_WAKEUP; 412 data->suspend_level = PME_SUSPEND_WAKEUP;
412 } else { 413 }
414 if (!wakeup || ret) {
413 struct pci_dev *port = srv->port; 415 struct pci_dev *port = srv->port;
414 416
415 pcie_pme_interrupt_enable(port, false); 417 pcie_pme_interrupt_enable(port, false);
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 5ed99309c758..c8ca98c2b480 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -407,15 +407,16 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child)
407{ 407{
408 struct pci_dev *dev = child->self; 408 struct pci_dev *dev = child->self;
409 u16 mem_base_lo, mem_limit_lo; 409 u16 mem_base_lo, mem_limit_lo;
410 unsigned long base, limit; 410 u64 base64, limit64;
411 dma_addr_t base, limit;
411 struct pci_bus_region region; 412 struct pci_bus_region region;
412 struct resource *res; 413 struct resource *res;
413 414
414 res = child->resource[2]; 415 res = child->resource[2];
415 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo); 416 pci_read_config_word(dev, PCI_PREF_MEMORY_BASE, &mem_base_lo);
416 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo); 417 pci_read_config_word(dev, PCI_PREF_MEMORY_LIMIT, &mem_limit_lo);
417 base = ((unsigned long) mem_base_lo & PCI_PREF_RANGE_MASK) << 16; 418 base64 = (mem_base_lo & PCI_PREF_RANGE_MASK) << 16;
418 limit = ((unsigned long) mem_limit_lo & PCI_PREF_RANGE_MASK) << 16; 419 limit64 = (mem_limit_lo & PCI_PREF_RANGE_MASK) << 16;
419 420
420 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) { 421 if ((mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) == PCI_PREF_RANGE_TYPE_64) {
421 u32 mem_base_hi, mem_limit_hi; 422 u32 mem_base_hi, mem_limit_hi;
@@ -429,17 +430,20 @@ static void pci_read_bridge_mmio_pref(struct pci_bus *child)
429 * this, just assume they are not being used. 430 * this, just assume they are not being used.
430 */ 431 */
431 if (mem_base_hi <= mem_limit_hi) { 432 if (mem_base_hi <= mem_limit_hi) {
432#if BITS_PER_LONG == 64 433 base64 |= (u64) mem_base_hi << 32;
433 base |= ((unsigned long) mem_base_hi) << 32; 434 limit64 |= (u64) mem_limit_hi << 32;
434 limit |= ((unsigned long) mem_limit_hi) << 32;
435#else
436 if (mem_base_hi || mem_limit_hi) {
437 dev_err(&dev->dev, "can't handle 64-bit address space for bridge\n");
438 return;
439 }
440#endif
441 } 435 }
442 } 436 }
437
438 base = (dma_addr_t) base64;
439 limit = (dma_addr_t) limit64;
440
441 if (base != base64) {
442 dev_err(&dev->dev, "can't handle bridge window above 4GB (bus address %#010llx)\n",
443 (unsigned long long) base64);
444 return;
445 }
446
443 if (base <= limit) { 447 if (base <= limit) {
444 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) | 448 res->flags = (mem_base_lo & PCI_PREF_RANGE_TYPE_MASK) |
445 IORESOURCE_MEM | IORESOURCE_PREFETCH; 449 IORESOURCE_MEM | IORESOURCE_PREFETCH;
@@ -1323,7 +1327,7 @@ static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
1323 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or); 1327 ~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
1324 1328
1325 /* Initialize Link Control Register */ 1329 /* Initialize Link Control Register */
1326 if (dev->subordinate) 1330 if (pcie_cap_has_lnkctl(dev))
1327 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL, 1331 pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
1328 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or); 1332 ~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
1329 1333
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index 8c842980834a..f091576b6449 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -258,14 +258,16 @@ static int omap_usb2_probe(struct platform_device *pdev)
258 otg->phy = &phy->phy; 258 otg->phy = &phy->phy;
259 259
260 platform_set_drvdata(pdev, phy); 260 platform_set_drvdata(pdev, phy);
261 pm_runtime_enable(phy->dev);
261 262
262 generic_phy = devm_phy_create(phy->dev, NULL, &ops, NULL); 263 generic_phy = devm_phy_create(phy->dev, NULL, &ops, NULL);
263 if (IS_ERR(generic_phy)) 264 if (IS_ERR(generic_phy)) {
265 pm_runtime_disable(phy->dev);
264 return PTR_ERR(generic_phy); 266 return PTR_ERR(generic_phy);
267 }
265 268
266 phy_set_drvdata(generic_phy, phy); 269 phy_set_drvdata(generic_phy, phy);
267 270
268 pm_runtime_enable(phy->dev);
269 phy_provider = devm_of_phy_provider_register(phy->dev, 271 phy_provider = devm_of_phy_provider_register(phy->dev,
270 of_phy_simple_xlate); 272 of_phy_simple_xlate);
271 if (IS_ERR(phy_provider)) { 273 if (IS_ERR(phy_provider)) {
diff --git a/drivers/pinctrl/pinctrl-baytrail.c b/drivers/pinctrl/pinctrl-baytrail.c
index e12e5b07f6d7..9dc38140194b 100644
--- a/drivers/pinctrl/pinctrl-baytrail.c
+++ b/drivers/pinctrl/pinctrl-baytrail.c
@@ -227,10 +227,14 @@ static int byt_irq_type(struct irq_data *d, unsigned type)
227 spin_lock_irqsave(&vg->lock, flags); 227 spin_lock_irqsave(&vg->lock, flags);
228 value = readl(reg); 228 value = readl(reg);
229 229
230 WARN(value & BYT_DIRECT_IRQ_EN,
231 "Bad pad config for io mode, force direct_irq_en bit clearing");
232
230 /* For level trigges the BYT_TRIG_POS and BYT_TRIG_NEG bits 233 /* For level trigges the BYT_TRIG_POS and BYT_TRIG_NEG bits
231 * are used to indicate high and low level triggering 234 * are used to indicate high and low level triggering
232 */ 235 */
233 value &= ~(BYT_TRIG_POS | BYT_TRIG_NEG | BYT_TRIG_LVL); 236 value &= ~(BYT_DIRECT_IRQ_EN | BYT_TRIG_POS | BYT_TRIG_NEG |
237 BYT_TRIG_LVL);
234 238
235 switch (type) { 239 switch (type) {
236 case IRQ_TYPE_LEVEL_HIGH: 240 case IRQ_TYPE_LEVEL_HIGH:
@@ -318,7 +322,7 @@ static int byt_gpio_direction_output(struct gpio_chip *chip,
318 "Potential Error: Setting GPIO with direct_irq_en to output"); 322 "Potential Error: Setting GPIO with direct_irq_en to output");
319 323
320 reg_val = readl(reg) | BYT_DIR_MASK; 324 reg_val = readl(reg) | BYT_DIR_MASK;
321 reg_val &= ~BYT_OUTPUT_EN; 325 reg_val &= ~(BYT_OUTPUT_EN | BYT_INPUT_EN);
322 326
323 if (value) 327 if (value)
324 writel(reg_val | BYT_LEVEL, reg); 328 writel(reg_val | BYT_LEVEL, reg);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 4dcfb7116a04..a2eabe6ff9ad 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -202,6 +202,7 @@ config TC1100_WMI
202config HP_ACCEL 202config HP_ACCEL
203 tristate "HP laptop accelerometer" 203 tristate "HP laptop accelerometer"
204 depends on INPUT && ACPI 204 depends on INPUT && ACPI
205 depends on SERIO_I8042
205 select SENSORS_LIS3LV02D 206 select SENSORS_LIS3LV02D
206 select NEW_LEDS 207 select NEW_LEDS
207 select LEDS_CLASS 208 select LEDS_CLASS
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index 96a0b75c52c9..26c4fd1394da 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -579,6 +579,17 @@ static const struct dmi_system_id video_vendor_dmi_table[] __initconst = {
579 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5741"), 579 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5741"),
580 }, 580 },
581 }, 581 },
582 {
583 /*
584 * Note no video_set_backlight_video_vendor, we must use the
585 * acer interface, as there is no native backlight interface.
586 */
587 .ident = "Acer KAV80",
588 .matches = {
589 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
590 DMI_MATCH(DMI_PRODUCT_NAME, "KAV80"),
591 },
592 },
582 {} 593 {}
583}; 594};
584 595
diff --git a/drivers/platform/x86/asus-nb-wmi.c b/drivers/platform/x86/asus-nb-wmi.c
index 3a4951f46065..c1a6cd66af42 100644
--- a/drivers/platform/x86/asus-nb-wmi.c
+++ b/drivers/platform/x86/asus-nb-wmi.c
@@ -182,6 +182,15 @@ static const struct dmi_system_id asus_quirks[] = {
182 }, 182 },
183 { 183 {
184 .callback = dmi_matched, 184 .callback = dmi_matched,
185 .ident = "ASUSTeK COMPUTER INC. X550VB",
186 .matches = {
187 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
188 DMI_MATCH(DMI_PRODUCT_NAME, "X550VB"),
189 },
190 .driver_data = &quirk_asus_wapf4,
191 },
192 {
193 .callback = dmi_matched,
185 .ident = "ASUSTeK COMPUTER INC. X55A", 194 .ident = "ASUSTeK COMPUTER INC. X55A",
186 .matches = { 195 .matches = {
187 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), 196 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."),
diff --git a/drivers/platform/x86/hp_accel.c b/drivers/platform/x86/hp_accel.c
index 13e14ec1d3d7..6bec745b6b92 100644
--- a/drivers/platform/x86/hp_accel.c
+++ b/drivers/platform/x86/hp_accel.c
@@ -37,6 +37,8 @@
37#include <linux/leds.h> 37#include <linux/leds.h>
38#include <linux/atomic.h> 38#include <linux/atomic.h>
39#include <linux/acpi.h> 39#include <linux/acpi.h>
40#include <linux/i8042.h>
41#include <linux/serio.h>
40#include "../../misc/lis3lv02d/lis3lv02d.h" 42#include "../../misc/lis3lv02d/lis3lv02d.h"
41 43
42#define DRIVER_NAME "hp_accel" 44#define DRIVER_NAME "hp_accel"
@@ -73,6 +75,13 @@ static inline void delayed_sysfs_set(struct led_classdev *led_cdev,
73 75
74/* HP-specific accelerometer driver ------------------------------------ */ 76/* HP-specific accelerometer driver ------------------------------------ */
75 77
78/* e0 25, e0 26, e0 27, e0 28 are scan codes that the accelerometer with acpi id
79 * HPQ6000 sends through the keyboard bus */
80#define ACCEL_1 0x25
81#define ACCEL_2 0x26
82#define ACCEL_3 0x27
83#define ACCEL_4 0x28
84
76/* For automatic insertion of the module */ 85/* For automatic insertion of the module */
77static const struct acpi_device_id lis3lv02d_device_ids[] = { 86static const struct acpi_device_id lis3lv02d_device_ids[] = {
78 {"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */ 87 {"HPQ0004", 0}, /* HP Mobile Data Protection System PNP */
@@ -294,6 +303,35 @@ static void lis3lv02d_enum_resources(struct acpi_device *device)
294 printk(KERN_DEBUG DRIVER_NAME ": Error getting resources\n"); 303 printk(KERN_DEBUG DRIVER_NAME ": Error getting resources\n");
295} 304}
296 305
306static bool hp_accel_i8042_filter(unsigned char data, unsigned char str,
307 struct serio *port)
308{
309 static bool extended;
310
311 if (str & I8042_STR_AUXDATA)
312 return false;
313
314 if (data == 0xe0) {
315 extended = true;
316 return true;
317 } else if (unlikely(extended)) {
318 extended = false;
319
320 switch (data) {
321 case ACCEL_1:
322 case ACCEL_2:
323 case ACCEL_3:
324 case ACCEL_4:
325 return true;
326 default:
327 serio_interrupt(port, 0xe0, 0);
328 return false;
329 }
330 }
331
332 return false;
333}
334
297static int lis3lv02d_add(struct acpi_device *device) 335static int lis3lv02d_add(struct acpi_device *device)
298{ 336{
299 int ret; 337 int ret;
@@ -326,6 +364,11 @@ static int lis3lv02d_add(struct acpi_device *device)
326 if (ret) 364 if (ret)
327 return ret; 365 return ret;
328 366
367 /* filter to remove HPQ6000 accelerometer data
368 * from keyboard bus stream */
369 if (strstr(dev_name(&device->dev), "HPQ6000"))
370 i8042_install_filter(hp_accel_i8042_filter);
371
329 INIT_WORK(&hpled_led.work, delayed_set_status_worker); 372 INIT_WORK(&hpled_led.work, delayed_set_status_worker);
330 ret = led_classdev_register(NULL, &hpled_led.led_classdev); 373 ret = led_classdev_register(NULL, &hpled_led.led_classdev);
331 if (ret) { 374 if (ret) {
@@ -343,6 +386,7 @@ static int lis3lv02d_remove(struct acpi_device *device)
343 if (!device) 386 if (!device)
344 return -EINVAL; 387 return -EINVAL;
345 388
389 i8042_remove_filter(hp_accel_i8042_filter);
346 lis3lv02d_joystick_disable(&lis3_dev); 390 lis3lv02d_joystick_disable(&lis3_dev);
347 lis3lv02d_poweroff(&lis3_dev); 391 lis3lv02d_poweroff(&lis3_dev);
348 392
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index 02152de135b5..ed494f37c40f 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -837,6 +837,13 @@ static const struct dmi_system_id no_hw_rfkill_list[] = {
837 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 2"), 837 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo Yoga 2"),
838 }, 838 },
839 }, 839 },
840 {
841 .ident = "Lenovo Yoga 3 Pro 1370",
842 .matches = {
843 DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
844 DMI_MATCH(DMI_PRODUCT_VERSION, "Lenovo YOGA 3 Pro-1370"),
845 },
846 },
840 {} 847 {}
841}; 848};
842 849
diff --git a/drivers/platform/x86/samsung-laptop.c b/drivers/platform/x86/samsung-laptop.c
index 5a5966512277..ff765d8e1a09 100644
--- a/drivers/platform/x86/samsung-laptop.c
+++ b/drivers/platform/x86/samsung-laptop.c
@@ -1561,6 +1561,16 @@ static struct dmi_system_id __initdata samsung_dmi_table[] = {
1561 }, 1561 },
1562 { 1562 {
1563 .callback = samsung_dmi_matched, 1563 .callback = samsung_dmi_matched,
1564 .ident = "NC210",
1565 .matches = {
1566 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
1567 DMI_MATCH(DMI_PRODUCT_NAME, "NC210/NC110"),
1568 DMI_MATCH(DMI_BOARD_NAME, "NC210/NC110"),
1569 },
1570 .driver_data = &samsung_broken_acpi_video,
1571 },
1572 {
1573 .callback = samsung_dmi_matched,
1564 .ident = "730U3E/740U3E", 1574 .ident = "730U3E/740U3E",
1565 .matches = { 1575 .matches = {
1566 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."), 1576 DMI_MATCH(DMI_SYS_VENDOR, "SAMSUNG ELECTRONICS CO., LTD."),
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index ef3a1904e92f..ab6151f05420 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -240,6 +240,12 @@ static const struct dmi_system_id toshiba_alt_keymap_dmi[] = {
240 DMI_MATCH(DMI_PRODUCT_NAME, "Qosmio X75-A"), 240 DMI_MATCH(DMI_PRODUCT_NAME, "Qosmio X75-A"),
241 }, 241 },
242 }, 242 },
243 {
244 .matches = {
245 DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"),
246 DMI_MATCH(DMI_PRODUCT_NAME, "TECRA A50-A"),
247 },
248 },
243 {} 249 {}
244}; 250};
245 251
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
index 217da4b2ca86..99a78d365ceb 100644
--- a/drivers/power/ab8500_fg.c
+++ b/drivers/power/ab8500_fg.c
@@ -25,6 +25,7 @@
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/delay.h> 26#include <linux/delay.h>
27#include <linux/time.h> 27#include <linux/time.h>
28#include <linux/time64.h>
28#include <linux/of.h> 29#include <linux/of.h>
29#include <linux/completion.h> 30#include <linux/completion.h>
30#include <linux/mfd/core.h> 31#include <linux/mfd/core.h>
@@ -108,7 +109,7 @@ enum ab8500_fg_calibration_state {
108struct ab8500_fg_avg_cap { 109struct ab8500_fg_avg_cap {
109 int avg; 110 int avg;
110 int samples[NBR_AVG_SAMPLES]; 111 int samples[NBR_AVG_SAMPLES];
111 __kernel_time_t time_stamps[NBR_AVG_SAMPLES]; 112 time64_t time_stamps[NBR_AVG_SAMPLES];
112 int pos; 113 int pos;
113 int nbr_samples; 114 int nbr_samples;
114 int sum; 115 int sum;
@@ -386,15 +387,15 @@ static int ab8500_fg_is_low_curr(struct ab8500_fg *di, int curr)
386 */ 387 */
387static int ab8500_fg_add_cap_sample(struct ab8500_fg *di, int sample) 388static int ab8500_fg_add_cap_sample(struct ab8500_fg *di, int sample)
388{ 389{
389 struct timespec ts; 390 struct timespec64 ts64;
390 struct ab8500_fg_avg_cap *avg = &di->avg_cap; 391 struct ab8500_fg_avg_cap *avg = &di->avg_cap;
391 392
392 getnstimeofday(&ts); 393 getnstimeofday64(&ts64);
393 394
394 do { 395 do {
395 avg->sum += sample - avg->samples[avg->pos]; 396 avg->sum += sample - avg->samples[avg->pos];
396 avg->samples[avg->pos] = sample; 397 avg->samples[avg->pos] = sample;
397 avg->time_stamps[avg->pos] = ts.tv_sec; 398 avg->time_stamps[avg->pos] = ts64.tv_sec;
398 avg->pos++; 399 avg->pos++;
399 400
400 if (avg->pos == NBR_AVG_SAMPLES) 401 if (avg->pos == NBR_AVG_SAMPLES)
@@ -407,7 +408,7 @@ static int ab8500_fg_add_cap_sample(struct ab8500_fg *di, int sample)
407 * Check the time stamp for each sample. If too old, 408 * Check the time stamp for each sample. If too old,
408 * replace with latest sample 409 * replace with latest sample
409 */ 410 */
410 } while (ts.tv_sec - VALID_CAPACITY_SEC > avg->time_stamps[avg->pos]); 411 } while (ts64.tv_sec - VALID_CAPACITY_SEC > avg->time_stamps[avg->pos]);
411 412
412 avg->avg = avg->sum / avg->nbr_samples; 413 avg->avg = avg->sum / avg->nbr_samples;
413 414
@@ -446,14 +447,14 @@ static void ab8500_fg_clear_cap_samples(struct ab8500_fg *di)
446static void ab8500_fg_fill_cap_sample(struct ab8500_fg *di, int sample) 447static void ab8500_fg_fill_cap_sample(struct ab8500_fg *di, int sample)
447{ 448{
448 int i; 449 int i;
449 struct timespec ts; 450 struct timespec64 ts64;
450 struct ab8500_fg_avg_cap *avg = &di->avg_cap; 451 struct ab8500_fg_avg_cap *avg = &di->avg_cap;
451 452
452 getnstimeofday(&ts); 453 getnstimeofday64(&ts64);
453 454
454 for (i = 0; i < NBR_AVG_SAMPLES; i++) { 455 for (i = 0; i < NBR_AVG_SAMPLES; i++) {
455 avg->samples[i] = sample; 456 avg->samples[i] = sample;
456 avg->time_stamps[i] = ts.tv_sec; 457 avg->time_stamps[i] = ts64.tv_sec;
457 } 458 }
458 459
459 avg->pos = 0; 460 avg->pos = 0;
diff --git a/drivers/power/bq2415x_charger.c b/drivers/power/bq2415x_charger.c
index e384844a1ae1..1f49986fc605 100644
--- a/drivers/power/bq2415x_charger.c
+++ b/drivers/power/bq2415x_charger.c
@@ -1579,8 +1579,15 @@ static int bq2415x_probe(struct i2c_client *client,
1579 if (np) { 1579 if (np) {
1580 bq->notify_psy = power_supply_get_by_phandle(np, "ti,usb-charger-detection"); 1580 bq->notify_psy = power_supply_get_by_phandle(np, "ti,usb-charger-detection");
1581 1581
1582 if (!bq->notify_psy) 1582 if (IS_ERR(bq->notify_psy)) {
1583 return -EPROBE_DEFER; 1583 dev_info(&client->dev,
1584 "no 'ti,usb-charger-detection' property (err=%ld)\n",
1585 PTR_ERR(bq->notify_psy));
1586 bq->notify_psy = NULL;
1587 } else if (!bq->notify_psy) {
1588 ret = -EPROBE_DEFER;
1589 goto error_2;
1590 }
1584 } 1591 }
1585 else if (pdata->notify_device) 1592 else if (pdata->notify_device)
1586 bq->notify_psy = power_supply_get_by_name(pdata->notify_device); 1593 bq->notify_psy = power_supply_get_by_name(pdata->notify_device);
@@ -1602,27 +1609,27 @@ static int bq2415x_probe(struct i2c_client *client,
1602 ret = of_property_read_u32(np, "ti,current-limit", 1609 ret = of_property_read_u32(np, "ti,current-limit",
1603 &bq->init_data.current_limit); 1610 &bq->init_data.current_limit);
1604 if (ret) 1611 if (ret)
1605 return ret; 1612 goto error_2;
1606 ret = of_property_read_u32(np, "ti,weak-battery-voltage", 1613 ret = of_property_read_u32(np, "ti,weak-battery-voltage",
1607 &bq->init_data.weak_battery_voltage); 1614 &bq->init_data.weak_battery_voltage);
1608 if (ret) 1615 if (ret)
1609 return ret; 1616 goto error_2;
1610 ret = of_property_read_u32(np, "ti,battery-regulation-voltage", 1617 ret = of_property_read_u32(np, "ti,battery-regulation-voltage",
1611 &bq->init_data.battery_regulation_voltage); 1618 &bq->init_data.battery_regulation_voltage);
1612 if (ret) 1619 if (ret)
1613 return ret; 1620 goto error_2;
1614 ret = of_property_read_u32(np, "ti,charge-current", 1621 ret = of_property_read_u32(np, "ti,charge-current",
1615 &bq->init_data.charge_current); 1622 &bq->init_data.charge_current);
1616 if (ret) 1623 if (ret)
1617 return ret; 1624 goto error_2;
1618 ret = of_property_read_u32(np, "ti,termination-current", 1625 ret = of_property_read_u32(np, "ti,termination-current",
1619 &bq->init_data.termination_current); 1626 &bq->init_data.termination_current);
1620 if (ret) 1627 if (ret)
1621 return ret; 1628 goto error_2;
1622 ret = of_property_read_u32(np, "ti,resistor-sense", 1629 ret = of_property_read_u32(np, "ti,resistor-sense",
1623 &bq->init_data.resistor_sense); 1630 &bq->init_data.resistor_sense);
1624 if (ret) 1631 if (ret)
1625 return ret; 1632 goto error_2;
1626 } else { 1633 } else {
1627 memcpy(&bq->init_data, pdata, sizeof(bq->init_data)); 1634 memcpy(&bq->init_data, pdata, sizeof(bq->init_data));
1628 } 1635 }
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 7098a1ce2d3c..ef8094a61f1e 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -97,6 +97,7 @@ static struct charger_global_desc *g_desc; /* init with setup_charger_manager */
97static bool is_batt_present(struct charger_manager *cm) 97static bool is_batt_present(struct charger_manager *cm)
98{ 98{
99 union power_supply_propval val; 99 union power_supply_propval val;
100 struct power_supply *psy;
100 bool present = false; 101 bool present = false;
101 int i, ret; 102 int i, ret;
102 103
@@ -107,16 +108,27 @@ static bool is_batt_present(struct charger_manager *cm)
107 case CM_NO_BATTERY: 108 case CM_NO_BATTERY:
108 break; 109 break;
109 case CM_FUEL_GAUGE: 110 case CM_FUEL_GAUGE:
110 ret = cm->fuel_gauge->get_property(cm->fuel_gauge, 111 psy = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
112 if (!psy)
113 break;
114
115 ret = psy->get_property(psy,
111 POWER_SUPPLY_PROP_PRESENT, &val); 116 POWER_SUPPLY_PROP_PRESENT, &val);
112 if (ret == 0 && val.intval) 117 if (ret == 0 && val.intval)
113 present = true; 118 present = true;
114 break; 119 break;
115 case CM_CHARGER_STAT: 120 case CM_CHARGER_STAT:
116 for (i = 0; cm->charger_stat[i]; i++) { 121 for (i = 0; cm->desc->psy_charger_stat[i]; i++) {
117 ret = cm->charger_stat[i]->get_property( 122 psy = power_supply_get_by_name(
118 cm->charger_stat[i], 123 cm->desc->psy_charger_stat[i]);
119 POWER_SUPPLY_PROP_PRESENT, &val); 124 if (!psy) {
125 dev_err(cm->dev, "Cannot find power supply \"%s\"\n",
126 cm->desc->psy_charger_stat[i]);
127 continue;
128 }
129
130 ret = psy->get_property(psy, POWER_SUPPLY_PROP_PRESENT,
131 &val);
120 if (ret == 0 && val.intval) { 132 if (ret == 0 && val.intval) {
121 present = true; 133 present = true;
122 break; 134 break;
@@ -139,14 +151,20 @@ static bool is_batt_present(struct charger_manager *cm)
139static bool is_ext_pwr_online(struct charger_manager *cm) 151static bool is_ext_pwr_online(struct charger_manager *cm)
140{ 152{
141 union power_supply_propval val; 153 union power_supply_propval val;
154 struct power_supply *psy;
142 bool online = false; 155 bool online = false;
143 int i, ret; 156 int i, ret;
144 157
145 /* If at least one of them has one, it's yes. */ 158 /* If at least one of them has one, it's yes. */
146 for (i = 0; cm->charger_stat[i]; i++) { 159 for (i = 0; cm->desc->psy_charger_stat[i]; i++) {
147 ret = cm->charger_stat[i]->get_property( 160 psy = power_supply_get_by_name(cm->desc->psy_charger_stat[i]);
148 cm->charger_stat[i], 161 if (!psy) {
149 POWER_SUPPLY_PROP_ONLINE, &val); 162 dev_err(cm->dev, "Cannot find power supply \"%s\"\n",
163 cm->desc->psy_charger_stat[i]);
164 continue;
165 }
166
167 ret = psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &val);
150 if (ret == 0 && val.intval) { 168 if (ret == 0 && val.intval) {
151 online = true; 169 online = true;
152 break; 170 break;
@@ -167,12 +185,14 @@ static bool is_ext_pwr_online(struct charger_manager *cm)
167static int get_batt_uV(struct charger_manager *cm, int *uV) 185static int get_batt_uV(struct charger_manager *cm, int *uV)
168{ 186{
169 union power_supply_propval val; 187 union power_supply_propval val;
188 struct power_supply *fuel_gauge;
170 int ret; 189 int ret;
171 190
172 if (!cm->fuel_gauge) 191 fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
192 if (!fuel_gauge)
173 return -ENODEV; 193 return -ENODEV;
174 194
175 ret = cm->fuel_gauge->get_property(cm->fuel_gauge, 195 ret = fuel_gauge->get_property(fuel_gauge,
176 POWER_SUPPLY_PROP_VOLTAGE_NOW, &val); 196 POWER_SUPPLY_PROP_VOLTAGE_NOW, &val);
177 if (ret) 197 if (ret)
178 return ret; 198 return ret;
@@ -189,6 +209,7 @@ static bool is_charging(struct charger_manager *cm)
189{ 209{
190 int i, ret; 210 int i, ret;
191 bool charging = false; 211 bool charging = false;
212 struct power_supply *psy;
192 union power_supply_propval val; 213 union power_supply_propval val;
193 214
194 /* If there is no battery, it cannot be charged */ 215 /* If there is no battery, it cannot be charged */
@@ -196,17 +217,22 @@ static bool is_charging(struct charger_manager *cm)
196 return false; 217 return false;
197 218
198 /* If at least one of the charger is charging, return yes */ 219 /* If at least one of the charger is charging, return yes */
199 for (i = 0; cm->charger_stat[i]; i++) { 220 for (i = 0; cm->desc->psy_charger_stat[i]; i++) {
200 /* 1. The charger sholuld not be DISABLED */ 221 /* 1. The charger sholuld not be DISABLED */
201 if (cm->emergency_stop) 222 if (cm->emergency_stop)
202 continue; 223 continue;
203 if (!cm->charger_enabled) 224 if (!cm->charger_enabled)
204 continue; 225 continue;
205 226
227 psy = power_supply_get_by_name(cm->desc->psy_charger_stat[i]);
228 if (!psy) {
229 dev_err(cm->dev, "Cannot find power supply \"%s\"\n",
230 cm->desc->psy_charger_stat[i]);
231 continue;
232 }
233
206 /* 2. The charger should be online (ext-power) */ 234 /* 2. The charger should be online (ext-power) */
207 ret = cm->charger_stat[i]->get_property( 235 ret = psy->get_property(psy, POWER_SUPPLY_PROP_ONLINE, &val);
208 cm->charger_stat[i],
209 POWER_SUPPLY_PROP_ONLINE, &val);
210 if (ret) { 236 if (ret) {
211 dev_warn(cm->dev, "Cannot read ONLINE value from %s\n", 237 dev_warn(cm->dev, "Cannot read ONLINE value from %s\n",
212 cm->desc->psy_charger_stat[i]); 238 cm->desc->psy_charger_stat[i]);
@@ -219,9 +245,7 @@ static bool is_charging(struct charger_manager *cm)
219 * 3. The charger should not be FULL, DISCHARGING, 245 * 3. The charger should not be FULL, DISCHARGING,
220 * or NOT_CHARGING. 246 * or NOT_CHARGING.
221 */ 247 */
222 ret = cm->charger_stat[i]->get_property( 248 ret = psy->get_property(psy, POWER_SUPPLY_PROP_STATUS, &val);
223 cm->charger_stat[i],
224 POWER_SUPPLY_PROP_STATUS, &val);
225 if (ret) { 249 if (ret) {
226 dev_warn(cm->dev, "Cannot read STATUS value from %s\n", 250 dev_warn(cm->dev, "Cannot read STATUS value from %s\n",
227 cm->desc->psy_charger_stat[i]); 251 cm->desc->psy_charger_stat[i]);
@@ -248,6 +272,7 @@ static bool is_full_charged(struct charger_manager *cm)
248{ 272{
249 struct charger_desc *desc = cm->desc; 273 struct charger_desc *desc = cm->desc;
250 union power_supply_propval val; 274 union power_supply_propval val;
275 struct power_supply *fuel_gauge;
251 int ret = 0; 276 int ret = 0;
252 int uV; 277 int uV;
253 278
@@ -255,11 +280,15 @@ static bool is_full_charged(struct charger_manager *cm)
255 if (!is_batt_present(cm)) 280 if (!is_batt_present(cm))
256 return false; 281 return false;
257 282
258 if (cm->fuel_gauge && desc->fullbatt_full_capacity > 0) { 283 fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
284 if (!fuel_gauge)
285 return false;
286
287 if (desc->fullbatt_full_capacity > 0) {
259 val.intval = 0; 288 val.intval = 0;
260 289
261 /* Not full if capacity of fuel gauge isn't full */ 290 /* Not full if capacity of fuel gauge isn't full */
262 ret = cm->fuel_gauge->get_property(cm->fuel_gauge, 291 ret = fuel_gauge->get_property(fuel_gauge,
263 POWER_SUPPLY_PROP_CHARGE_FULL, &val); 292 POWER_SUPPLY_PROP_CHARGE_FULL, &val);
264 if (!ret && val.intval > desc->fullbatt_full_capacity) 293 if (!ret && val.intval > desc->fullbatt_full_capacity)
265 return true; 294 return true;
@@ -273,10 +302,10 @@ static bool is_full_charged(struct charger_manager *cm)
273 } 302 }
274 303
275 /* Full, if the capacity is more than fullbatt_soc */ 304 /* Full, if the capacity is more than fullbatt_soc */
276 if (cm->fuel_gauge && desc->fullbatt_soc > 0) { 305 if (desc->fullbatt_soc > 0) {
277 val.intval = 0; 306 val.intval = 0;
278 307
279 ret = cm->fuel_gauge->get_property(cm->fuel_gauge, 308 ret = fuel_gauge->get_property(fuel_gauge,
280 POWER_SUPPLY_PROP_CAPACITY, &val); 309 POWER_SUPPLY_PROP_CAPACITY, &val);
281 if (!ret && val.intval >= desc->fullbatt_soc) 310 if (!ret && val.intval >= desc->fullbatt_soc)
282 return true; 311 return true;
@@ -551,6 +580,20 @@ static int check_charging_duration(struct charger_manager *cm)
551 return ret; 580 return ret;
552} 581}
553 582
583static int cm_get_battery_temperature_by_psy(struct charger_manager *cm,
584 int *temp)
585{
586 struct power_supply *fuel_gauge;
587
588 fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
589 if (!fuel_gauge)
590 return -ENODEV;
591
592 return fuel_gauge->get_property(fuel_gauge,
593 POWER_SUPPLY_PROP_TEMP,
594 (union power_supply_propval *)temp);
595}
596
554static int cm_get_battery_temperature(struct charger_manager *cm, 597static int cm_get_battery_temperature(struct charger_manager *cm,
555 int *temp) 598 int *temp)
556{ 599{
@@ -560,15 +603,18 @@ static int cm_get_battery_temperature(struct charger_manager *cm,
560 return -ENODEV; 603 return -ENODEV;
561 604
562#ifdef CONFIG_THERMAL 605#ifdef CONFIG_THERMAL
563 ret = thermal_zone_get_temp(cm->tzd_batt, (unsigned long *)temp); 606 if (cm->tzd_batt) {
564 if (!ret) 607 ret = thermal_zone_get_temp(cm->tzd_batt, (unsigned long *)temp);
565 /* Calibrate temperature unit */ 608 if (!ret)
566 *temp /= 100; 609 /* Calibrate temperature unit */
567#else 610 *temp /= 100;
568 ret = cm->fuel_gauge->get_property(cm->fuel_gauge, 611 } else
569 POWER_SUPPLY_PROP_TEMP,
570 (union power_supply_propval *)temp);
571#endif 612#endif
613 {
614 /* if-else continued from CONFIG_THERMAL */
615 ret = cm_get_battery_temperature_by_psy(cm, temp);
616 }
617
572 return ret; 618 return ret;
573} 619}
574 620
@@ -827,6 +873,7 @@ static int charger_get_property(struct power_supply *psy,
827 struct charger_manager *cm = container_of(psy, 873 struct charger_manager *cm = container_of(psy,
828 struct charger_manager, charger_psy); 874 struct charger_manager, charger_psy);
829 struct charger_desc *desc = cm->desc; 875 struct charger_desc *desc = cm->desc;
876 struct power_supply *fuel_gauge;
830 int ret = 0; 877 int ret = 0;
831 int uV; 878 int uV;
832 879
@@ -857,14 +904,20 @@ static int charger_get_property(struct power_supply *psy,
857 ret = get_batt_uV(cm, &val->intval); 904 ret = get_batt_uV(cm, &val->intval);
858 break; 905 break;
859 case POWER_SUPPLY_PROP_CURRENT_NOW: 906 case POWER_SUPPLY_PROP_CURRENT_NOW:
860 ret = cm->fuel_gauge->get_property(cm->fuel_gauge, 907 fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
908 if (!fuel_gauge) {
909 ret = -ENODEV;
910 break;
911 }
912 ret = fuel_gauge->get_property(fuel_gauge,
861 POWER_SUPPLY_PROP_CURRENT_NOW, val); 913 POWER_SUPPLY_PROP_CURRENT_NOW, val);
862 break; 914 break;
863 case POWER_SUPPLY_PROP_TEMP: 915 case POWER_SUPPLY_PROP_TEMP:
864 case POWER_SUPPLY_PROP_TEMP_AMBIENT: 916 case POWER_SUPPLY_PROP_TEMP_AMBIENT:
865 return cm_get_battery_temperature(cm, &val->intval); 917 return cm_get_battery_temperature(cm, &val->intval);
866 case POWER_SUPPLY_PROP_CAPACITY: 918 case POWER_SUPPLY_PROP_CAPACITY:
867 if (!cm->fuel_gauge) { 919 fuel_gauge = power_supply_get_by_name(cm->desc->psy_fuel_gauge);
920 if (!fuel_gauge) {
868 ret = -ENODEV; 921 ret = -ENODEV;
869 break; 922 break;
870 } 923 }
@@ -875,7 +928,7 @@ static int charger_get_property(struct power_supply *psy,
875 break; 928 break;
876 } 929 }
877 930
878 ret = cm->fuel_gauge->get_property(cm->fuel_gauge, 931 ret = fuel_gauge->get_property(fuel_gauge,
879 POWER_SUPPLY_PROP_CAPACITY, val); 932 POWER_SUPPLY_PROP_CAPACITY, val);
880 if (ret) 933 if (ret)
881 break; 934 break;
@@ -924,7 +977,14 @@ static int charger_get_property(struct power_supply *psy,
924 break; 977 break;
925 case POWER_SUPPLY_PROP_CHARGE_NOW: 978 case POWER_SUPPLY_PROP_CHARGE_NOW:
926 if (is_charging(cm)) { 979 if (is_charging(cm)) {
927 ret = cm->fuel_gauge->get_property(cm->fuel_gauge, 980 fuel_gauge = power_supply_get_by_name(
981 cm->desc->psy_fuel_gauge);
982 if (!fuel_gauge) {
983 ret = -ENODEV;
984 break;
985 }
986
987 ret = fuel_gauge->get_property(fuel_gauge,
928 POWER_SUPPLY_PROP_CHARGE_NOW, 988 POWER_SUPPLY_PROP_CHARGE_NOW,
929 val); 989 val);
930 if (ret) { 990 if (ret) {
@@ -970,6 +1030,7 @@ static struct power_supply psy_default = {
970 .properties = default_charger_props, 1030 .properties = default_charger_props,
971 .num_properties = ARRAY_SIZE(default_charger_props), 1031 .num_properties = ARRAY_SIZE(default_charger_props),
972 .get_property = charger_get_property, 1032 .get_property = charger_get_property,
1033 .no_thermal = true,
973}; 1034};
974 1035
975/** 1036/**
@@ -1485,14 +1546,15 @@ err:
1485 return ret; 1546 return ret;
1486} 1547}
1487 1548
1488static int cm_init_thermal_data(struct charger_manager *cm) 1549static int cm_init_thermal_data(struct charger_manager *cm,
1550 struct power_supply *fuel_gauge)
1489{ 1551{
1490 struct charger_desc *desc = cm->desc; 1552 struct charger_desc *desc = cm->desc;
1491 union power_supply_propval val; 1553 union power_supply_propval val;
1492 int ret; 1554 int ret;
1493 1555
1494 /* Verify whether fuel gauge provides battery temperature */ 1556 /* Verify whether fuel gauge provides battery temperature */
1495 ret = cm->fuel_gauge->get_property(cm->fuel_gauge, 1557 ret = fuel_gauge->get_property(fuel_gauge,
1496 POWER_SUPPLY_PROP_TEMP, &val); 1558 POWER_SUPPLY_PROP_TEMP, &val);
1497 1559
1498 if (!ret) { 1560 if (!ret) {
@@ -1502,8 +1564,6 @@ static int cm_init_thermal_data(struct charger_manager *cm)
1502 cm->desc->measure_battery_temp = true; 1564 cm->desc->measure_battery_temp = true;
1503 } 1565 }
1504#ifdef CONFIG_THERMAL 1566#ifdef CONFIG_THERMAL
1505 cm->tzd_batt = cm->fuel_gauge->tzd;
1506
1507 if (ret && desc->thermal_zone) { 1567 if (ret && desc->thermal_zone) {
1508 cm->tzd_batt = 1568 cm->tzd_batt =
1509 thermal_zone_get_zone_by_name(desc->thermal_zone); 1569 thermal_zone_get_zone_by_name(desc->thermal_zone);
@@ -1666,6 +1726,7 @@ static int charger_manager_probe(struct platform_device *pdev)
1666 int ret = 0, i = 0; 1726 int ret = 0, i = 0;
1667 int j = 0; 1727 int j = 0;
1668 union power_supply_propval val; 1728 union power_supply_propval val;
1729 struct power_supply *fuel_gauge;
1669 1730
1670 if (g_desc && !rtc_dev && g_desc->rtc_name) { 1731 if (g_desc && !rtc_dev && g_desc->rtc_name) {
1671 rtc_dev = rtc_class_open(g_desc->rtc_name); 1732 rtc_dev = rtc_class_open(g_desc->rtc_name);
@@ -1729,23 +1790,20 @@ static int charger_manager_probe(struct platform_device *pdev)
1729 while (desc->psy_charger_stat[i]) 1790 while (desc->psy_charger_stat[i])
1730 i++; 1791 i++;
1731 1792
1732 cm->charger_stat = devm_kzalloc(&pdev->dev, 1793 /* Check if charger's supplies are present at probe */
1733 sizeof(struct power_supply *) * i, GFP_KERNEL);
1734 if (!cm->charger_stat)
1735 return -ENOMEM;
1736
1737 for (i = 0; desc->psy_charger_stat[i]; i++) { 1794 for (i = 0; desc->psy_charger_stat[i]; i++) {
1738 cm->charger_stat[i] = power_supply_get_by_name( 1795 struct power_supply *psy;
1739 desc->psy_charger_stat[i]); 1796
1740 if (!cm->charger_stat[i]) { 1797 psy = power_supply_get_by_name(desc->psy_charger_stat[i]);
1798 if (!psy) {
1741 dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n", 1799 dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n",
1742 desc->psy_charger_stat[i]); 1800 desc->psy_charger_stat[i]);
1743 return -ENODEV; 1801 return -ENODEV;
1744 } 1802 }
1745 } 1803 }
1746 1804
1747 cm->fuel_gauge = power_supply_get_by_name(desc->psy_fuel_gauge); 1805 fuel_gauge = power_supply_get_by_name(desc->psy_fuel_gauge);
1748 if (!cm->fuel_gauge) { 1806 if (!fuel_gauge) {
1749 dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n", 1807 dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n",
1750 desc->psy_fuel_gauge); 1808 desc->psy_fuel_gauge);
1751 return -ENODEV; 1809 return -ENODEV;
@@ -1788,13 +1846,13 @@ static int charger_manager_probe(struct platform_device *pdev)
1788 cm->charger_psy.num_properties = psy_default.num_properties; 1846 cm->charger_psy.num_properties = psy_default.num_properties;
1789 1847
1790 /* Find which optional psy-properties are available */ 1848 /* Find which optional psy-properties are available */
1791 if (!cm->fuel_gauge->get_property(cm->fuel_gauge, 1849 if (!fuel_gauge->get_property(fuel_gauge,
1792 POWER_SUPPLY_PROP_CHARGE_NOW, &val)) { 1850 POWER_SUPPLY_PROP_CHARGE_NOW, &val)) {
1793 cm->charger_psy.properties[cm->charger_psy.num_properties] = 1851 cm->charger_psy.properties[cm->charger_psy.num_properties] =
1794 POWER_SUPPLY_PROP_CHARGE_NOW; 1852 POWER_SUPPLY_PROP_CHARGE_NOW;
1795 cm->charger_psy.num_properties++; 1853 cm->charger_psy.num_properties++;
1796 } 1854 }
1797 if (!cm->fuel_gauge->get_property(cm->fuel_gauge, 1855 if (!fuel_gauge->get_property(fuel_gauge,
1798 POWER_SUPPLY_PROP_CURRENT_NOW, 1856 POWER_SUPPLY_PROP_CURRENT_NOW,
1799 &val)) { 1857 &val)) {
1800 cm->charger_psy.properties[cm->charger_psy.num_properties] = 1858 cm->charger_psy.properties[cm->charger_psy.num_properties] =
@@ -1802,7 +1860,7 @@ static int charger_manager_probe(struct platform_device *pdev)
1802 cm->charger_psy.num_properties++; 1860 cm->charger_psy.num_properties++;
1803 } 1861 }
1804 1862
1805 ret = cm_init_thermal_data(cm); 1863 ret = cm_init_thermal_data(cm, fuel_gauge);
1806 if (ret) { 1864 if (ret) {
1807 dev_err(&pdev->dev, "Failed to initialize thermal data\n"); 1865 dev_err(&pdev->dev, "Failed to initialize thermal data\n");
1808 cm->desc->measure_battery_temp = false; 1866 cm->desc->measure_battery_temp = false;
@@ -2066,8 +2124,8 @@ static bool find_power_supply(struct charger_manager *cm,
2066 int i; 2124 int i;
2067 bool found = false; 2125 bool found = false;
2068 2126
2069 for (i = 0; cm->charger_stat[i]; i++) { 2127 for (i = 0; cm->desc->psy_charger_stat[i]; i++) {
2070 if (psy == cm->charger_stat[i]) { 2128 if (!strcmp(psy->name, cm->desc->psy_charger_stat[i])) {
2071 found = true; 2129 found = true;
2072 break; 2130 break;
2073 } 2131 }
diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
index 6cb7fe5c022d..694e8cddd5c1 100644
--- a/drivers/power/power_supply_core.c
+++ b/drivers/power/power_supply_core.c
@@ -417,6 +417,9 @@ static int psy_register_thermal(struct power_supply *psy)
417{ 417{
418 int i; 418 int i;
419 419
420 if (psy->no_thermal)
421 return 0;
422
420 /* Register battery zone device psy reports temperature */ 423 /* Register battery zone device psy reports temperature */
421 for (i = 0; i < psy->num_properties; i++) { 424 for (i = 0; i < psy->num_properties; i++) {
422 if (psy->properties[i] == POWER_SUPPLY_PROP_TEMP) { 425 if (psy->properties[i] == POWER_SUPPLY_PROP_TEMP) {
diff --git a/drivers/power/reset/at91-reset.c b/drivers/power/reset/at91-reset.c
index 3611806c9cfd..3cb36693343a 100644
--- a/drivers/power/reset/at91-reset.c
+++ b/drivers/power/reset/at91-reset.c
@@ -100,11 +100,11 @@ static void at91sam9g45_restart(enum reboot_mode mode, const char *cmd)
100 /* Disable SDRAM0 accesses */ 100 /* Disable SDRAM0 accesses */
101 "1: str %3, [%0, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t" 101 "1: str %3, [%0, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t"
102 /* Power down SDRAM0 */ 102 /* Power down SDRAM0 */
103 " str %4, [%0, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t" 103 " str %4, [%0, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t"
104 /* Disable SDRAM1 accesses */ 104 /* Disable SDRAM1 accesses */
105 " strne %3, [%1, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t" 105 " strne %3, [%1, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t"
106 /* Power down SDRAM1 */ 106 /* Power down SDRAM1 */
107 " strne %4, [%1, #" __stringify(AT91_DDRSDRC_RTR) "]\n\t" 107 " strne %4, [%1, #" __stringify(AT91_DDRSDRC_LPR) "]\n\t"
108 /* Reset CPU */ 108 /* Reset CPU */
109 " str %5, [%2, #" __stringify(AT91_RSTC_CR) "]\n\t" 109 " str %5, [%2, #" __stringify(AT91_RSTC_CR) "]\n\t"
110 110
diff --git a/drivers/pwm/Kconfig b/drivers/pwm/Kconfig
index b800783800a3..ef2dd2e4754b 100644
--- a/drivers/pwm/Kconfig
+++ b/drivers/pwm/Kconfig
@@ -83,6 +83,7 @@ config PWM_BFIN
83config PWM_CLPS711X 83config PWM_CLPS711X
84 tristate "CLPS711X PWM support" 84 tristate "CLPS711X PWM support"
85 depends on ARCH_CLPS711X || COMPILE_TEST 85 depends on ARCH_CLPS711X || COMPILE_TEST
86 depends on HAS_IOMEM
86 help 87 help
87 Generic PWM framework driver for Cirrus Logic CLPS711X. 88 Generic PWM framework driver for Cirrus Logic CLPS711X.
88 89
@@ -101,6 +102,7 @@ config PWM_EP93XX
101config PWM_FSL_FTM 102config PWM_FSL_FTM
102 tristate "Freescale FlexTimer Module (FTM) PWM support" 103 tristate "Freescale FlexTimer Module (FTM) PWM support"
103 depends on OF 104 depends on OF
105 select REGMAP_MMIO
104 help 106 help
105 Generic FTM PWM framework driver for Freescale VF610 and 107 Generic FTM PWM framework driver for Freescale VF610 and
106 Layerscape LS-1 SoCs. 108 Layerscape LS-1 SoCs.
@@ -149,7 +151,7 @@ config PWM_LPC32XX
149 151
150config PWM_LPSS 152config PWM_LPSS
151 tristate "Intel LPSS PWM support" 153 tristate "Intel LPSS PWM support"
152 depends on ACPI 154 depends on X86
153 help 155 help
154 Generic PWM framework driver for Intel Low Power Subsystem PWM 156 Generic PWM framework driver for Intel Low Power Subsystem PWM
155 controller. 157 controller.
@@ -157,6 +159,24 @@ config PWM_LPSS
157 To compile this driver as a module, choose M here: the module 159 To compile this driver as a module, choose M here: the module
158 will be called pwm-lpss. 160 will be called pwm-lpss.
159 161
162config PWM_LPSS_PCI
163 tristate "Intel LPSS PWM PCI driver"
164 depends on PWM_LPSS && PCI
165 help
166 The PCI driver for Intel Low Power Subsystem PWM controller.
167
168 To compile this driver as a module, choose M here: the module
169 will be called pwm-lpss-pci.
170
171config PWM_LPSS_PLATFORM
172 tristate "Intel LPSS PWM platform driver"
173 depends on PWM_LPSS && ACPI
174 help
175 The platform driver for Intel Low Power Subsystem PWM controller.
176
177 To compile this driver as a module, choose M here: the module
178 will be called pwm-lpss-platform.
179
160config PWM_MXS 180config PWM_MXS
161 tristate "Freescale MXS PWM support" 181 tristate "Freescale MXS PWM support"
162 depends on ARCH_MXS && OF 182 depends on ARCH_MXS && OF
diff --git a/drivers/pwm/Makefile b/drivers/pwm/Makefile
index f8c577d41091..c458606c3755 100644
--- a/drivers/pwm/Makefile
+++ b/drivers/pwm/Makefile
@@ -13,6 +13,8 @@ obj-$(CONFIG_PWM_JZ4740) += pwm-jz4740.o
13obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o 13obj-$(CONFIG_PWM_LP3943) += pwm-lp3943.o
14obj-$(CONFIG_PWM_LPC32XX) += pwm-lpc32xx.o 14obj-$(CONFIG_PWM_LPC32XX) += pwm-lpc32xx.o
15obj-$(CONFIG_PWM_LPSS) += pwm-lpss.o 15obj-$(CONFIG_PWM_LPSS) += pwm-lpss.o
16obj-$(CONFIG_PWM_LPSS_PCI) += pwm-lpss-pci.o
17obj-$(CONFIG_PWM_LPSS_PLATFORM) += pwm-lpss-platform.o
16obj-$(CONFIG_PWM_MXS) += pwm-mxs.o 18obj-$(CONFIG_PWM_MXS) += pwm-mxs.o
17obj-$(CONFIG_PWM_PCA9685) += pwm-pca9685.o 19obj-$(CONFIG_PWM_PCA9685) += pwm-pca9685.o
18obj-$(CONFIG_PWM_PUV3) += pwm-puv3.o 20obj-$(CONFIG_PWM_PUV3) += pwm-puv3.o
diff --git a/drivers/pwm/core.c b/drivers/pwm/core.c
index d2c35920ff08..966497d10c6e 100644
--- a/drivers/pwm/core.c
+++ b/drivers/pwm/core.c
@@ -236,7 +236,7 @@ int pwmchip_add(struct pwm_chip *chip)
236 int ret; 236 int ret;
237 237
238 if (!chip || !chip->dev || !chip->ops || !chip->ops->config || 238 if (!chip || !chip->dev || !chip->ops || !chip->ops->config ||
239 !chip->ops->enable || !chip->ops->disable) 239 !chip->ops->enable || !chip->ops->disable || !chip->npwm)
240 return -EINVAL; 240 return -EINVAL;
241 241
242 mutex_lock(&pwm_lock); 242 mutex_lock(&pwm_lock);
@@ -602,12 +602,9 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
602 struct pwm_device *pwm = ERR_PTR(-EPROBE_DEFER); 602 struct pwm_device *pwm = ERR_PTR(-EPROBE_DEFER);
603 const char *dev_id = dev ? dev_name(dev) : NULL; 603 const char *dev_id = dev ? dev_name(dev) : NULL;
604 struct pwm_chip *chip = NULL; 604 struct pwm_chip *chip = NULL;
605 unsigned int index = 0;
606 unsigned int best = 0; 605 unsigned int best = 0;
607 struct pwm_lookup *p; 606 struct pwm_lookup *p, *chosen = NULL;
608 unsigned int match; 607 unsigned int match;
609 unsigned int period;
610 enum pwm_polarity polarity;
611 608
612 /* look up via DT first */ 609 /* look up via DT first */
613 if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node) 610 if (IS_ENABLED(CONFIG_OF) && dev && dev->of_node)
@@ -653,10 +650,7 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
653 } 650 }
654 651
655 if (match > best) { 652 if (match > best) {
656 chip = pwmchip_find_by_name(p->provider); 653 chosen = p;
657 index = p->index;
658 period = p->period;
659 polarity = p->polarity;
660 654
661 if (match != 3) 655 if (match != 3)
662 best = match; 656 best = match;
@@ -665,17 +659,22 @@ struct pwm_device *pwm_get(struct device *dev, const char *con_id)
665 } 659 }
666 } 660 }
667 661
668 mutex_unlock(&pwm_lookup_lock); 662 if (!chosen)
663 goto out;
669 664
670 if (chip) 665 chip = pwmchip_find_by_name(chosen->provider);
671 pwm = pwm_request_from_chip(chip, index, con_id ?: dev_id); 666 if (!chip)
672 if (IS_ERR(pwm)) 667 goto out;
673 return pwm;
674 668
675 pwm_set_period(pwm, period); 669 pwm = pwm_request_from_chip(chip, chosen->index, con_id ?: dev_id);
676 pwm_set_polarity(pwm, polarity); 670 if (IS_ERR(pwm))
671 goto out;
677 672
673 pwm_set_period(pwm, chosen->period);
674 pwm_set_polarity(pwm, chosen->polarity);
678 675
676out:
677 mutex_unlock(&pwm_lookup_lock);
679 return pwm; 678 return pwm;
680} 679}
681EXPORT_SYMBOL_GPL(pwm_get); 680EXPORT_SYMBOL_GPL(pwm_get);
diff --git a/drivers/pwm/pwm-atmel.c b/drivers/pwm/pwm-atmel.c
index 6e700a541ca3..d3c22de9ee47 100644
--- a/drivers/pwm/pwm-atmel.c
+++ b/drivers/pwm/pwm-atmel.c
@@ -102,7 +102,7 @@ static int atmel_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
102 int duty_ns, int period_ns) 102 int duty_ns, int period_ns)
103{ 103{
104 struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip); 104 struct atmel_pwm_chip *atmel_pwm = to_atmel_pwm_chip(chip);
105 unsigned long clk_rate, prd, dty; 105 unsigned long prd, dty;
106 unsigned long long div; 106 unsigned long long div;
107 unsigned int pres = 0; 107 unsigned int pres = 0;
108 u32 val; 108 u32 val;
@@ -113,20 +113,18 @@ static int atmel_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
113 return -EBUSY; 113 return -EBUSY;
114 } 114 }
115 115
116 clk_rate = clk_get_rate(atmel_pwm->clk); 116 /* Calculate the period cycles and prescale value */
117 div = clk_rate; 117 div = (unsigned long long)clk_get_rate(atmel_pwm->clk) * period_ns;
118 do_div(div, NSEC_PER_SEC);
118 119
119 /* Calculate the period cycles */
120 while (div > PWM_MAX_PRD) { 120 while (div > PWM_MAX_PRD) {
121 div = clk_rate / (1 << pres); 121 div >>= 1;
122 div = div * period_ns; 122 pres++;
123 /* 1/Hz = 100000000 ns */ 123 }
124 do_div(div, 1000000000); 124
125 125 if (pres > PRD_MAX_PRES) {
126 if (pres++ > PRD_MAX_PRES) { 126 dev_err(chip->dev, "pres exceeds the maximum value\n");
127 dev_err(chip->dev, "pres exceeds the maximum value\n"); 127 return -EINVAL;
128 return -EINVAL;
129 }
130 } 128 }
131 129
132 /* Calculate the duty cycles */ 130 /* Calculate the duty cycles */
diff --git a/drivers/pwm/pwm-fsl-ftm.c b/drivers/pwm/pwm-fsl-ftm.c
index a18bc8fea385..0f2cc7ef7784 100644
--- a/drivers/pwm/pwm-fsl-ftm.c
+++ b/drivers/pwm/pwm-fsl-ftm.c
@@ -18,14 +18,14 @@
18#include <linux/of_address.h> 18#include <linux/of_address.h>
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/pwm.h> 20#include <linux/pwm.h>
21#include <linux/regmap.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
22 23
23#define FTM_SC 0x00 24#define FTM_SC 0x00
24#define FTM_SC_CLK_MASK 0x3 25#define FTM_SC_CLK_MASK_SHIFT 3
25#define FTM_SC_CLK_SHIFT 3 26#define FTM_SC_CLK_MASK (3 << FTM_SC_CLK_MASK_SHIFT)
26#define FTM_SC_CLK(c) (((c) + 1) << FTM_SC_CLK_SHIFT) 27#define FTM_SC_CLK(c) (((c) + 1) << FTM_SC_CLK_MASK_SHIFT)
27#define FTM_SC_PS_MASK 0x7 28#define FTM_SC_PS_MASK 0x7
28#define FTM_SC_PS_SHIFT 0
29 29
30#define FTM_CNT 0x04 30#define FTM_CNT 0x04
31#define FTM_MOD 0x08 31#define FTM_MOD 0x08
@@ -83,7 +83,7 @@ struct fsl_pwm_chip {
83 unsigned int cnt_select; 83 unsigned int cnt_select;
84 unsigned int clk_ps; 84 unsigned int clk_ps;
85 85
86 void __iomem *base; 86 struct regmap *regmap;
87 87
88 int period_ns; 88 int period_ns;
89 89
@@ -219,10 +219,11 @@ static unsigned long fsl_pwm_calculate_duty(struct fsl_pwm_chip *fpc,
219 unsigned long period_ns, 219 unsigned long period_ns,
220 unsigned long duty_ns) 220 unsigned long duty_ns)
221{ 221{
222 unsigned long long val, duty; 222 unsigned long long duty;
223 u32 val;
223 224
224 val = readl(fpc->base + FTM_MOD); 225 regmap_read(fpc->regmap, FTM_MOD, &val);
225 duty = duty_ns * (val + 1); 226 duty = (unsigned long long)duty_ns * (val + 1);
226 do_div(duty, period_ns); 227 do_div(duty, period_ns);
227 228
228 return (unsigned long)duty; 229 return (unsigned long)duty;
@@ -232,7 +233,7 @@ static int fsl_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
232 int duty_ns, int period_ns) 233 int duty_ns, int period_ns)
233{ 234{
234 struct fsl_pwm_chip *fpc = to_fsl_chip(chip); 235 struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
235 u32 val, period, duty; 236 u32 period, duty;
236 237
237 mutex_lock(&fpc->lock); 238 mutex_lock(&fpc->lock);
238 239
@@ -257,11 +258,9 @@ static int fsl_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
257 return -EINVAL; 258 return -EINVAL;
258 } 259 }
259 260
260 val = readl(fpc->base + FTM_SC); 261 regmap_update_bits(fpc->regmap, FTM_SC, FTM_SC_PS_MASK,
261 val &= ~(FTM_SC_PS_MASK << FTM_SC_PS_SHIFT); 262 fpc->clk_ps);
262 val |= fpc->clk_ps; 263 regmap_write(fpc->regmap, FTM_MOD, period - 1);
263 writel(val, fpc->base + FTM_SC);
264 writel(period - 1, fpc->base + FTM_MOD);
265 264
266 fpc->period_ns = period_ns; 265 fpc->period_ns = period_ns;
267 } 266 }
@@ -270,8 +269,9 @@ static int fsl_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
270 269
271 duty = fsl_pwm_calculate_duty(fpc, period_ns, duty_ns); 270 duty = fsl_pwm_calculate_duty(fpc, period_ns, duty_ns);
272 271
273 writel(FTM_CSC_MSB | FTM_CSC_ELSB, fpc->base + FTM_CSC(pwm->hwpwm)); 272 regmap_write(fpc->regmap, FTM_CSC(pwm->hwpwm),
274 writel(duty, fpc->base + FTM_CV(pwm->hwpwm)); 273 FTM_CSC_MSB | FTM_CSC_ELSB);
274 regmap_write(fpc->regmap, FTM_CV(pwm->hwpwm), duty);
275 275
276 return 0; 276 return 0;
277} 277}
@@ -283,31 +283,28 @@ static int fsl_pwm_set_polarity(struct pwm_chip *chip,
283 struct fsl_pwm_chip *fpc = to_fsl_chip(chip); 283 struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
284 u32 val; 284 u32 val;
285 285
286 val = readl(fpc->base + FTM_POL); 286 regmap_read(fpc->regmap, FTM_POL, &val);
287 287
288 if (polarity == PWM_POLARITY_INVERSED) 288 if (polarity == PWM_POLARITY_INVERSED)
289 val |= BIT(pwm->hwpwm); 289 val |= BIT(pwm->hwpwm);
290 else 290 else
291 val &= ~BIT(pwm->hwpwm); 291 val &= ~BIT(pwm->hwpwm);
292 292
293 writel(val, fpc->base + FTM_POL); 293 regmap_write(fpc->regmap, FTM_POL, val);
294 294
295 return 0; 295 return 0;
296} 296}
297 297
298static int fsl_counter_clock_enable(struct fsl_pwm_chip *fpc) 298static int fsl_counter_clock_enable(struct fsl_pwm_chip *fpc)
299{ 299{
300 u32 val;
301 int ret; 300 int ret;
302 301
303 if (fpc->use_count != 0) 302 if (fpc->use_count != 0)
304 return 0; 303 return 0;
305 304
306 /* select counter clock source */ 305 /* select counter clock source */
307 val = readl(fpc->base + FTM_SC); 306 regmap_update_bits(fpc->regmap, FTM_SC, FTM_SC_CLK_MASK,
308 val &= ~(FTM_SC_CLK_MASK << FTM_SC_CLK_SHIFT); 307 FTM_SC_CLK(fpc->cnt_select));
309 val |= FTM_SC_CLK(fpc->cnt_select);
310 writel(val, fpc->base + FTM_SC);
311 308
312 ret = clk_prepare_enable(fpc->clk[fpc->cnt_select]); 309 ret = clk_prepare_enable(fpc->clk[fpc->cnt_select]);
313 if (ret) 310 if (ret)
@@ -327,13 +324,10 @@ static int fsl_counter_clock_enable(struct fsl_pwm_chip *fpc)
327static int fsl_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) 324static int fsl_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
328{ 325{
329 struct fsl_pwm_chip *fpc = to_fsl_chip(chip); 326 struct fsl_pwm_chip *fpc = to_fsl_chip(chip);
330 u32 val;
331 int ret; 327 int ret;
332 328
333 mutex_lock(&fpc->lock); 329 mutex_lock(&fpc->lock);
334 val = readl(fpc->base + FTM_OUTMASK); 330 regmap_update_bits(fpc->regmap, FTM_OUTMASK, BIT(pwm->hwpwm), 0);
335 val &= ~BIT(pwm->hwpwm);
336 writel(val, fpc->base + FTM_OUTMASK);
337 331
338 ret = fsl_counter_clock_enable(fpc); 332 ret = fsl_counter_clock_enable(fpc);
339 mutex_unlock(&fpc->lock); 333 mutex_unlock(&fpc->lock);
@@ -343,8 +337,6 @@ static int fsl_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
343 337
344static void fsl_counter_clock_disable(struct fsl_pwm_chip *fpc) 338static void fsl_counter_clock_disable(struct fsl_pwm_chip *fpc)
345{ 339{
346 u32 val;
347
348 /* 340 /*
349 * already disabled, do nothing 341 * already disabled, do nothing
350 */ 342 */
@@ -356,9 +348,7 @@ static void fsl_counter_clock_disable(struct fsl_pwm_chip *fpc)
356 return; 348 return;
357 349
358 /* no users left, disable PWM counter clock */ 350 /* no users left, disable PWM counter clock */
359 val = readl(fpc->base + FTM_SC); 351 regmap_update_bits(fpc->regmap, FTM_SC, FTM_SC_CLK_MASK, 0);
360 val &= ~(FTM_SC_CLK_MASK << FTM_SC_CLK_SHIFT);
361 writel(val, fpc->base + FTM_SC);
362 352
363 clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]); 353 clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_CNTEN]);
364 clk_disable_unprepare(fpc->clk[fpc->cnt_select]); 354 clk_disable_unprepare(fpc->clk[fpc->cnt_select]);
@@ -370,14 +360,12 @@ static void fsl_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
370 u32 val; 360 u32 val;
371 361
372 mutex_lock(&fpc->lock); 362 mutex_lock(&fpc->lock);
373 val = readl(fpc->base + FTM_OUTMASK); 363 regmap_update_bits(fpc->regmap, FTM_OUTMASK, BIT(pwm->hwpwm),
374 val |= BIT(pwm->hwpwm); 364 BIT(pwm->hwpwm));
375 writel(val, fpc->base + FTM_OUTMASK);
376 365
377 fsl_counter_clock_disable(fpc); 366 fsl_counter_clock_disable(fpc);
378 367
379 val = readl(fpc->base + FTM_OUTMASK); 368 regmap_read(fpc->regmap, FTM_OUTMASK, &val);
380
381 if ((val & 0xFF) == 0xFF) 369 if ((val & 0xFF) == 0xFF)
382 fpc->period_ns = 0; 370 fpc->period_ns = 0;
383 371
@@ -402,19 +390,28 @@ static int fsl_pwm_init(struct fsl_pwm_chip *fpc)
402 if (ret) 390 if (ret)
403 return ret; 391 return ret;
404 392
405 writel(0x00, fpc->base + FTM_CNTIN); 393 regmap_write(fpc->regmap, FTM_CNTIN, 0x00);
406 writel(0x00, fpc->base + FTM_OUTINIT); 394 regmap_write(fpc->regmap, FTM_OUTINIT, 0x00);
407 writel(0xFF, fpc->base + FTM_OUTMASK); 395 regmap_write(fpc->regmap, FTM_OUTMASK, 0xFF);
408 396
409 clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_SYS]); 397 clk_disable_unprepare(fpc->clk[FSL_PWM_CLK_SYS]);
410 398
411 return 0; 399 return 0;
412} 400}
413 401
402static const struct regmap_config fsl_pwm_regmap_config = {
403 .reg_bits = 32,
404 .reg_stride = 4,
405 .val_bits = 32,
406
407 .max_register = FTM_PWMLOAD,
408};
409
414static int fsl_pwm_probe(struct platform_device *pdev) 410static int fsl_pwm_probe(struct platform_device *pdev)
415{ 411{
416 struct fsl_pwm_chip *fpc; 412 struct fsl_pwm_chip *fpc;
417 struct resource *res; 413 struct resource *res;
414 void __iomem *base;
418 int ret; 415 int ret;
419 416
420 fpc = devm_kzalloc(&pdev->dev, sizeof(*fpc), GFP_KERNEL); 417 fpc = devm_kzalloc(&pdev->dev, sizeof(*fpc), GFP_KERNEL);
@@ -426,9 +423,16 @@ static int fsl_pwm_probe(struct platform_device *pdev)
426 fpc->chip.dev = &pdev->dev; 423 fpc->chip.dev = &pdev->dev;
427 424
428 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 425 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
429 fpc->base = devm_ioremap_resource(&pdev->dev, res); 426 base = devm_ioremap_resource(&pdev->dev, res);
430 if (IS_ERR(fpc->base)) 427 if (IS_ERR(base))
431 return PTR_ERR(fpc->base); 428 return PTR_ERR(base);
429
430 fpc->regmap = devm_regmap_init_mmio_clk(&pdev->dev, NULL, base,
431 &fsl_pwm_regmap_config);
432 if (IS_ERR(fpc->regmap)) {
433 dev_err(&pdev->dev, "regmap init failed\n");
434 return PTR_ERR(fpc->regmap);
435 }
432 436
433 fpc->clk[FSL_PWM_CLK_SYS] = devm_clk_get(&pdev->dev, "ftm_sys"); 437 fpc->clk[FSL_PWM_CLK_SYS] = devm_clk_get(&pdev->dev, "ftm_sys");
434 if (IS_ERR(fpc->clk[FSL_PWM_CLK_SYS])) { 438 if (IS_ERR(fpc->clk[FSL_PWM_CLK_SYS])) {
diff --git a/drivers/pwm/pwm-imx.c b/drivers/pwm/pwm-imx.c
index 5449d9150d40..f8b5f109c1ab 100644
--- a/drivers/pwm/pwm-imx.c
+++ b/drivers/pwm/pwm-imx.c
@@ -14,6 +14,7 @@
14#include <linux/slab.h> 14#include <linux/slab.h>
15#include <linux/err.h> 15#include <linux/err.h>
16#include <linux/clk.h> 16#include <linux/clk.h>
17#include <linux/delay.h>
17#include <linux/io.h> 18#include <linux/io.h>
18#include <linux/pwm.h> 19#include <linux/pwm.h>
19#include <linux/of.h> 20#include <linux/of.h>
@@ -21,24 +22,30 @@
21 22
22/* i.MX1 and i.MX21 share the same PWM function block: */ 23/* i.MX1 and i.MX21 share the same PWM function block: */
23 24
24#define MX1_PWMC 0x00 /* PWM Control Register */ 25#define MX1_PWMC 0x00 /* PWM Control Register */
25#define MX1_PWMS 0x04 /* PWM Sample Register */ 26#define MX1_PWMS 0x04 /* PWM Sample Register */
26#define MX1_PWMP 0x08 /* PWM Period Register */ 27#define MX1_PWMP 0x08 /* PWM Period Register */
27 28
28#define MX1_PWMC_EN (1 << 4) 29#define MX1_PWMC_EN (1 << 4)
29 30
30/* i.MX27, i.MX31, i.MX35 share the same PWM function block: */ 31/* i.MX27, i.MX31, i.MX35 share the same PWM function block: */
31 32
32#define MX3_PWMCR 0x00 /* PWM Control Register */ 33#define MX3_PWMCR 0x00 /* PWM Control Register */
33#define MX3_PWMSAR 0x0C /* PWM Sample Register */ 34#define MX3_PWMSR 0x04 /* PWM Status Register */
34#define MX3_PWMPR 0x10 /* PWM Period Register */ 35#define MX3_PWMSAR 0x0C /* PWM Sample Register */
35#define MX3_PWMCR_PRESCALER(x) (((x - 1) & 0xFFF) << 4) 36#define MX3_PWMPR 0x10 /* PWM Period Register */
36#define MX3_PWMCR_DOZEEN (1 << 24) 37#define MX3_PWMCR_PRESCALER(x) ((((x) - 1) & 0xFFF) << 4)
37#define MX3_PWMCR_WAITEN (1 << 23) 38#define MX3_PWMCR_DOZEEN (1 << 24)
39#define MX3_PWMCR_WAITEN (1 << 23)
38#define MX3_PWMCR_DBGEN (1 << 22) 40#define MX3_PWMCR_DBGEN (1 << 22)
39#define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16) 41#define MX3_PWMCR_CLKSRC_IPG_HIGH (2 << 16)
40#define MX3_PWMCR_CLKSRC_IPG (1 << 16) 42#define MX3_PWMCR_CLKSRC_IPG (1 << 16)
41#define MX3_PWMCR_EN (1 << 0) 43#define MX3_PWMCR_SWR (1 << 3)
44#define MX3_PWMCR_EN (1 << 0)
45#define MX3_PWMSR_FIFOAV_4WORDS 0x4
46#define MX3_PWMSR_FIFOAV_MASK 0x7
47
48#define MX3_PWM_SWR_LOOP 5
42 49
43struct imx_chip { 50struct imx_chip {
44 struct clk *clk_per; 51 struct clk *clk_per;
@@ -103,9 +110,43 @@ static int imx_pwm_config_v2(struct pwm_chip *chip,
103 struct pwm_device *pwm, int duty_ns, int period_ns) 110 struct pwm_device *pwm, int duty_ns, int period_ns)
104{ 111{
105 struct imx_chip *imx = to_imx_chip(chip); 112 struct imx_chip *imx = to_imx_chip(chip);
113 struct device *dev = chip->dev;
106 unsigned long long c; 114 unsigned long long c;
107 unsigned long period_cycles, duty_cycles, prescale; 115 unsigned long period_cycles, duty_cycles, prescale;
108 u32 cr; 116 unsigned int period_ms;
117 bool enable = test_bit(PWMF_ENABLED, &pwm->flags);
118 int wait_count = 0, fifoav;
119 u32 cr, sr;
120
121 /*
122 * i.MX PWMv2 has a 4-word sample FIFO.
123 * In order to avoid FIFO overflow issue, we do software reset
124 * to clear all sample FIFO if the controller is disabled or
125 * wait for a full PWM cycle to get a relinquished FIFO slot
126 * when the controller is enabled and the FIFO is fully loaded.
127 */
128 if (enable) {
129 sr = readl(imx->mmio_base + MX3_PWMSR);
130 fifoav = sr & MX3_PWMSR_FIFOAV_MASK;
131 if (fifoav == MX3_PWMSR_FIFOAV_4WORDS) {
132 period_ms = DIV_ROUND_UP(pwm->period, NSEC_PER_MSEC);
133 msleep(period_ms);
134
135 sr = readl(imx->mmio_base + MX3_PWMSR);
136 if (fifoav == (sr & MX3_PWMSR_FIFOAV_MASK))
137 dev_warn(dev, "there is no free FIFO slot\n");
138 }
139 } else {
140 writel(MX3_PWMCR_SWR, imx->mmio_base + MX3_PWMCR);
141 do {
142 usleep_range(200, 1000);
143 cr = readl(imx->mmio_base + MX3_PWMCR);
144 } while ((cr & MX3_PWMCR_SWR) &&
145 (wait_count++ < MX3_PWM_SWR_LOOP));
146
147 if (cr & MX3_PWMCR_SWR)
148 dev_warn(dev, "software reset timeout\n");
149 }
109 150
110 c = clk_get_rate(imx->clk_per); 151 c = clk_get_rate(imx->clk_per);
111 c = c * period_ns; 152 c = c * period_ns;
@@ -135,7 +176,7 @@ static int imx_pwm_config_v2(struct pwm_chip *chip,
135 MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN | 176 MX3_PWMCR_DOZEEN | MX3_PWMCR_WAITEN |
136 MX3_PWMCR_DBGEN | MX3_PWMCR_CLKSRC_IPG_HIGH; 177 MX3_PWMCR_DBGEN | MX3_PWMCR_CLKSRC_IPG_HIGH;
137 178
138 if (test_bit(PWMF_ENABLED, &pwm->flags)) 179 if (enable)
139 cr |= MX3_PWMCR_EN; 180 cr |= MX3_PWMCR_EN;
140 181
141 writel(cr, imx->mmio_base + MX3_PWMCR); 182 writel(cr, imx->mmio_base + MX3_PWMCR);
diff --git a/drivers/pwm/pwm-lpss-pci.c b/drivers/pwm/pwm-lpss-pci.c
new file mode 100644
index 000000000000..cf20d2beacdd
--- /dev/null
+++ b/drivers/pwm/pwm-lpss-pci.c
@@ -0,0 +1,64 @@
1/*
2 * Intel Low Power Subsystem PWM controller PCI driver
3 *
4 * Copyright (C) 2014, Intel Corporation
5 *
6 * Derived from the original pwm-lpss.c
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/pci.h>
16
17#include "pwm-lpss.h"
18
19static int pwm_lpss_probe_pci(struct pci_dev *pdev,
20 const struct pci_device_id *id)
21{
22 const struct pwm_lpss_boardinfo *info;
23 struct pwm_lpss_chip *lpwm;
24 int err;
25
26 err = pcim_enable_device(pdev);
27 if (err < 0)
28 return err;
29
30 info = (struct pwm_lpss_boardinfo *)id->driver_data;
31 lpwm = pwm_lpss_probe(&pdev->dev, &pdev->resource[0], info);
32 if (IS_ERR(lpwm))
33 return PTR_ERR(lpwm);
34
35 pci_set_drvdata(pdev, lpwm);
36 return 0;
37}
38
39static void pwm_lpss_remove_pci(struct pci_dev *pdev)
40{
41 struct pwm_lpss_chip *lpwm = pci_get_drvdata(pdev);
42
43 pwm_lpss_remove(lpwm);
44}
45
46static const struct pci_device_id pwm_lpss_pci_ids[] = {
47 { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&pwm_lpss_byt_info},
48 { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&pwm_lpss_byt_info},
49 { PCI_VDEVICE(INTEL, 0x2288), (unsigned long)&pwm_lpss_bsw_info},
50 { PCI_VDEVICE(INTEL, 0x2289), (unsigned long)&pwm_lpss_bsw_info},
51 { },
52};
53MODULE_DEVICE_TABLE(pci, pwm_lpss_pci_ids);
54
55static struct pci_driver pwm_lpss_driver_pci = {
56 .name = "pwm-lpss",
57 .id_table = pwm_lpss_pci_ids,
58 .probe = pwm_lpss_probe_pci,
59 .remove = pwm_lpss_remove_pci,
60};
61module_pci_driver(pwm_lpss_driver_pci);
62
63MODULE_DESCRIPTION("PWM PCI driver for Intel LPSS");
64MODULE_LICENSE("GPL v2");
diff --git a/drivers/pwm/pwm-lpss-platform.c b/drivers/pwm/pwm-lpss-platform.c
new file mode 100644
index 000000000000..18a9c880a76d
--- /dev/null
+++ b/drivers/pwm/pwm-lpss-platform.c
@@ -0,0 +1,68 @@
1/*
2 * Intel Low Power Subsystem PWM controller driver
3 *
4 * Copyright (C) 2014, Intel Corporation
5 *
6 * Derived from the original pwm-lpss.c
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/acpi.h>
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/platform_device.h>
17
18#include "pwm-lpss.h"
19
20static int pwm_lpss_probe_platform(struct platform_device *pdev)
21{
22 const struct pwm_lpss_boardinfo *info;
23 const struct acpi_device_id *id;
24 struct pwm_lpss_chip *lpwm;
25 struct resource *r;
26
27 id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
28 if (!id)
29 return -ENODEV;
30
31 info = (const struct pwm_lpss_boardinfo *)id->driver_data;
32 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
33
34 lpwm = pwm_lpss_probe(&pdev->dev, r, info);
35 if (IS_ERR(lpwm))
36 return PTR_ERR(lpwm);
37
38 platform_set_drvdata(pdev, lpwm);
39 return 0;
40}
41
42static int pwm_lpss_remove_platform(struct platform_device *pdev)
43{
44 struct pwm_lpss_chip *lpwm = platform_get_drvdata(pdev);
45
46 return pwm_lpss_remove(lpwm);
47}
48
49static const struct acpi_device_id pwm_lpss_acpi_match[] = {
50 { "80860F09", (unsigned long)&pwm_lpss_byt_info },
51 { "80862288", (unsigned long)&pwm_lpss_bsw_info },
52 { },
53};
54MODULE_DEVICE_TABLE(acpi, pwm_lpss_acpi_match);
55
56static struct platform_driver pwm_lpss_driver_platform = {
57 .driver = {
58 .name = "pwm-lpss",
59 .acpi_match_table = pwm_lpss_acpi_match,
60 },
61 .probe = pwm_lpss_probe_platform,
62 .remove = pwm_lpss_remove_platform,
63};
64module_platform_driver(pwm_lpss_driver_platform);
65
66MODULE_DESCRIPTION("PWM platform driver for Intel LPSS");
67MODULE_LICENSE("GPL v2");
68MODULE_ALIAS("platform:pwm-lpss");
diff --git a/drivers/pwm/pwm-lpss.c b/drivers/pwm/pwm-lpss.c
index 4df994f72d96..e9798253a16f 100644
--- a/drivers/pwm/pwm-lpss.c
+++ b/drivers/pwm/pwm-lpss.c
@@ -13,15 +13,11 @@
13 * published by the Free Software Foundation. 13 * published by the Free Software Foundation.
14 */ 14 */
15 15
16#include <linux/acpi.h> 16#include <linux/io.h>
17#include <linux/device.h>
18#include <linux/kernel.h> 17#include <linux/kernel.h>
19#include <linux/module.h> 18#include <linux/module.h>
20#include <linux/pwm.h>
21#include <linux/platform_device.h>
22#include <linux/pci.h>
23 19
24static int pci_drv, plat_drv; /* So we know which drivers registered */ 20#include "pwm-lpss.h"
25 21
26#define PWM 0x00000000 22#define PWM 0x00000000
27#define PWM_ENABLE BIT(31) 23#define PWM_ENABLE BIT(31)
@@ -39,14 +35,17 @@ struct pwm_lpss_chip {
39 unsigned long clk_rate; 35 unsigned long clk_rate;
40}; 36};
41 37
42struct pwm_lpss_boardinfo { 38/* BayTrail */
43 unsigned long clk_rate; 39const struct pwm_lpss_boardinfo pwm_lpss_byt_info = {
40 .clk_rate = 25000000
44}; 41};
42EXPORT_SYMBOL_GPL(pwm_lpss_byt_info);
45 43
46/* BayTrail */ 44/* Braswell */
47static const struct pwm_lpss_boardinfo byt_info = { 45const struct pwm_lpss_boardinfo pwm_lpss_bsw_info = {
48 25000000 46 .clk_rate = 19200000
49}; 47};
48EXPORT_SYMBOL_GPL(pwm_lpss_bsw_info);
50 49
51static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip) 50static inline struct pwm_lpss_chip *to_lpwm(struct pwm_chip *chip)
52{ 51{
@@ -118,9 +117,8 @@ static const struct pwm_ops pwm_lpss_ops = {
118 .owner = THIS_MODULE, 117 .owner = THIS_MODULE,
119}; 118};
120 119
121static struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, 120struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
122 struct resource *r, 121 const struct pwm_lpss_boardinfo *info)
123 const struct pwm_lpss_boardinfo *info)
124{ 122{
125 struct pwm_lpss_chip *lpwm; 123 struct pwm_lpss_chip *lpwm;
126 int ret; 124 int ret;
@@ -147,8 +145,9 @@ static struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev,
147 145
148 return lpwm; 146 return lpwm;
149} 147}
148EXPORT_SYMBOL_GPL(pwm_lpss_probe);
150 149
151static int pwm_lpss_remove(struct pwm_lpss_chip *lpwm) 150int pwm_lpss_remove(struct pwm_lpss_chip *lpwm)
152{ 151{
153 u32 ctrl; 152 u32 ctrl;
154 153
@@ -157,114 +156,8 @@ static int pwm_lpss_remove(struct pwm_lpss_chip *lpwm)
157 156
158 return pwmchip_remove(&lpwm->chip); 157 return pwmchip_remove(&lpwm->chip);
159} 158}
160 159EXPORT_SYMBOL_GPL(pwm_lpss_remove);
161static int pwm_lpss_probe_pci(struct pci_dev *pdev,
162 const struct pci_device_id *id)
163{
164 const struct pwm_lpss_boardinfo *info;
165 struct pwm_lpss_chip *lpwm;
166 int err;
167
168 err = pci_enable_device(pdev);
169 if (err < 0)
170 return err;
171
172 info = (struct pwm_lpss_boardinfo *)id->driver_data;
173 lpwm = pwm_lpss_probe(&pdev->dev, &pdev->resource[0], info);
174 if (IS_ERR(lpwm))
175 return PTR_ERR(lpwm);
176
177 pci_set_drvdata(pdev, lpwm);
178 return 0;
179}
180
181static void pwm_lpss_remove_pci(struct pci_dev *pdev)
182{
183 struct pwm_lpss_chip *lpwm = pci_get_drvdata(pdev);
184
185 pwm_lpss_remove(lpwm);
186 pci_disable_device(pdev);
187}
188
189static struct pci_device_id pwm_lpss_pci_ids[] = {
190 { PCI_VDEVICE(INTEL, 0x0f08), (unsigned long)&byt_info},
191 { PCI_VDEVICE(INTEL, 0x0f09), (unsigned long)&byt_info},
192 { },
193};
194MODULE_DEVICE_TABLE(pci, pwm_lpss_pci_ids);
195
196static struct pci_driver pwm_lpss_driver_pci = {
197 .name = "pwm-lpss",
198 .id_table = pwm_lpss_pci_ids,
199 .probe = pwm_lpss_probe_pci,
200 .remove = pwm_lpss_remove_pci,
201};
202
203static int pwm_lpss_probe_platform(struct platform_device *pdev)
204{
205 const struct pwm_lpss_boardinfo *info;
206 const struct acpi_device_id *id;
207 struct pwm_lpss_chip *lpwm;
208 struct resource *r;
209
210 id = acpi_match_device(pdev->dev.driver->acpi_match_table, &pdev->dev);
211 if (!id)
212 return -ENODEV;
213
214 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
215
216 info = (struct pwm_lpss_boardinfo *)id->driver_data;
217 lpwm = pwm_lpss_probe(&pdev->dev, r, info);
218 if (IS_ERR(lpwm))
219 return PTR_ERR(lpwm);
220
221 platform_set_drvdata(pdev, lpwm);
222 return 0;
223}
224
225static int pwm_lpss_remove_platform(struct platform_device *pdev)
226{
227 struct pwm_lpss_chip *lpwm = platform_get_drvdata(pdev);
228
229 return pwm_lpss_remove(lpwm);
230}
231
232static const struct acpi_device_id pwm_lpss_acpi_match[] = {
233 { "80860F09", (unsigned long)&byt_info },
234 { },
235};
236MODULE_DEVICE_TABLE(acpi, pwm_lpss_acpi_match);
237
238static struct platform_driver pwm_lpss_driver_platform = {
239 .driver = {
240 .name = "pwm-lpss",
241 .acpi_match_table = pwm_lpss_acpi_match,
242 },
243 .probe = pwm_lpss_probe_platform,
244 .remove = pwm_lpss_remove_platform,
245};
246
247static int __init pwm_init(void)
248{
249 pci_drv = pci_register_driver(&pwm_lpss_driver_pci);
250 plat_drv = platform_driver_register(&pwm_lpss_driver_platform);
251 if (pci_drv && plat_drv)
252 return pci_drv;
253
254 return 0;
255}
256module_init(pwm_init);
257
258static void __exit pwm_exit(void)
259{
260 if (!pci_drv)
261 pci_unregister_driver(&pwm_lpss_driver_pci);
262 if (!plat_drv)
263 platform_driver_unregister(&pwm_lpss_driver_platform);
264}
265module_exit(pwm_exit);
266 160
267MODULE_DESCRIPTION("PWM driver for Intel LPSS"); 161MODULE_DESCRIPTION("PWM driver for Intel LPSS");
268MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); 162MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
269MODULE_LICENSE("GPL v2"); 163MODULE_LICENSE("GPL v2");
270MODULE_ALIAS("platform:pwm-lpss");
diff --git a/drivers/pwm/pwm-lpss.h b/drivers/pwm/pwm-lpss.h
new file mode 100644
index 000000000000..aa041bb1b67d
--- /dev/null
+++ b/drivers/pwm/pwm-lpss.h
@@ -0,0 +1,32 @@
1/*
2 * Intel Low Power Subsystem PWM controller driver
3 *
4 * Copyright (C) 2014, Intel Corporation
5 *
6 * Derived from the original pwm-lpss.c
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#ifndef __PWM_LPSS_H
14#define __PWM_LPSS_H
15
16#include <linux/device.h>
17#include <linux/pwm.h>
18
19struct pwm_lpss_chip;
20
21struct pwm_lpss_boardinfo {
22 unsigned long clk_rate;
23};
24
25extern const struct pwm_lpss_boardinfo pwm_lpss_byt_info;
26extern const struct pwm_lpss_boardinfo pwm_lpss_bsw_info;
27
28struct pwm_lpss_chip *pwm_lpss_probe(struct device *dev, struct resource *r,
29 const struct pwm_lpss_boardinfo *info);
30int pwm_lpss_remove(struct pwm_lpss_chip *lpwm);
31
32#endif /* __PWM_LPSS_H */
diff --git a/drivers/pwm/pwm-rockchip.c b/drivers/pwm/pwm-rockchip.c
index bdd8644c01cf..9442df244101 100644
--- a/drivers/pwm/pwm-rockchip.c
+++ b/drivers/pwm/pwm-rockchip.c
@@ -24,7 +24,9 @@
24#define PWM_ENABLE (1 << 0) 24#define PWM_ENABLE (1 << 0)
25#define PWM_CONTINUOUS (1 << 1) 25#define PWM_CONTINUOUS (1 << 1)
26#define PWM_DUTY_POSITIVE (1 << 3) 26#define PWM_DUTY_POSITIVE (1 << 3)
27#define PWM_DUTY_NEGATIVE (0 << 3)
27#define PWM_INACTIVE_NEGATIVE (0 << 4) 28#define PWM_INACTIVE_NEGATIVE (0 << 4)
29#define PWM_INACTIVE_POSITIVE (1 << 4)
28#define PWM_OUTPUT_LEFT (0 << 5) 30#define PWM_OUTPUT_LEFT (0 << 5)
29#define PWM_LP_DISABLE (0 << 8) 31#define PWM_LP_DISABLE (0 << 8)
30 32
@@ -45,8 +47,10 @@ struct rockchip_pwm_regs {
45struct rockchip_pwm_data { 47struct rockchip_pwm_data {
46 struct rockchip_pwm_regs regs; 48 struct rockchip_pwm_regs regs;
47 unsigned int prescaler; 49 unsigned int prescaler;
50 const struct pwm_ops *ops;
48 51
49 void (*set_enable)(struct pwm_chip *chip, bool enable); 52 void (*set_enable)(struct pwm_chip *chip,
53 struct pwm_device *pwm, bool enable);
50}; 54};
51 55
52static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *c) 56static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *c)
@@ -54,7 +58,8 @@ static inline struct rockchip_pwm_chip *to_rockchip_pwm_chip(struct pwm_chip *c)
54 return container_of(c, struct rockchip_pwm_chip, chip); 58 return container_of(c, struct rockchip_pwm_chip, chip);
55} 59}
56 60
57static void rockchip_pwm_set_enable_v1(struct pwm_chip *chip, bool enable) 61static void rockchip_pwm_set_enable_v1(struct pwm_chip *chip,
62 struct pwm_device *pwm, bool enable)
58{ 63{
59 struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); 64 struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
60 u32 enable_conf = PWM_CTRL_OUTPUT_EN | PWM_CTRL_TIMER_EN; 65 u32 enable_conf = PWM_CTRL_OUTPUT_EN | PWM_CTRL_TIMER_EN;
@@ -70,14 +75,19 @@ static void rockchip_pwm_set_enable_v1(struct pwm_chip *chip, bool enable)
70 writel_relaxed(val, pc->base + pc->data->regs.ctrl); 75 writel_relaxed(val, pc->base + pc->data->regs.ctrl);
71} 76}
72 77
73static void rockchip_pwm_set_enable_v2(struct pwm_chip *chip, bool enable) 78static void rockchip_pwm_set_enable_v2(struct pwm_chip *chip,
79 struct pwm_device *pwm, bool enable)
74{ 80{
75 struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); 81 struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
76 u32 enable_conf = PWM_OUTPUT_LEFT | PWM_LP_DISABLE | PWM_ENABLE | 82 u32 enable_conf = PWM_OUTPUT_LEFT | PWM_LP_DISABLE | PWM_ENABLE |
77 PWM_CONTINUOUS | PWM_DUTY_POSITIVE | 83 PWM_CONTINUOUS;
78 PWM_INACTIVE_NEGATIVE;
79 u32 val; 84 u32 val;
80 85
86 if (pwm->polarity == PWM_POLARITY_INVERSED)
87 enable_conf |= PWM_DUTY_NEGATIVE | PWM_INACTIVE_POSITIVE;
88 else
89 enable_conf |= PWM_DUTY_POSITIVE | PWM_INACTIVE_NEGATIVE;
90
81 val = readl_relaxed(pc->base + pc->data->regs.ctrl); 91 val = readl_relaxed(pc->base + pc->data->regs.ctrl);
82 92
83 if (enable) 93 if (enable)
@@ -124,6 +134,19 @@ static int rockchip_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
124 return 0; 134 return 0;
125} 135}
126 136
137static int rockchip_pwm_set_polarity(struct pwm_chip *chip,
138 struct pwm_device *pwm,
139 enum pwm_polarity polarity)
140{
141 /*
142 * No action needed here because pwm->polarity will be set by the core
143 * and the core will only change polarity when the PWM is not enabled.
144 * We'll handle things in set_enable().
145 */
146
147 return 0;
148}
149
127static int rockchip_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm) 150static int rockchip_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
128{ 151{
129 struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); 152 struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
@@ -133,7 +156,7 @@ static int rockchip_pwm_enable(struct pwm_chip *chip, struct pwm_device *pwm)
133 if (ret) 156 if (ret)
134 return ret; 157 return ret;
135 158
136 pc->data->set_enable(chip, true); 159 pc->data->set_enable(chip, pwm, true);
137 160
138 return 0; 161 return 0;
139} 162}
@@ -142,18 +165,26 @@ static void rockchip_pwm_disable(struct pwm_chip *chip, struct pwm_device *pwm)
142{ 165{
143 struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip); 166 struct rockchip_pwm_chip *pc = to_rockchip_pwm_chip(chip);
144 167
145 pc->data->set_enable(chip, false); 168 pc->data->set_enable(chip, pwm, false);
146 169
147 clk_disable(pc->clk); 170 clk_disable(pc->clk);
148} 171}
149 172
150static const struct pwm_ops rockchip_pwm_ops = { 173static const struct pwm_ops rockchip_pwm_ops_v1 = {
151 .config = rockchip_pwm_config, 174 .config = rockchip_pwm_config,
152 .enable = rockchip_pwm_enable, 175 .enable = rockchip_pwm_enable,
153 .disable = rockchip_pwm_disable, 176 .disable = rockchip_pwm_disable,
154 .owner = THIS_MODULE, 177 .owner = THIS_MODULE,
155}; 178};
156 179
180static const struct pwm_ops rockchip_pwm_ops_v2 = {
181 .config = rockchip_pwm_config,
182 .set_polarity = rockchip_pwm_set_polarity,
183 .enable = rockchip_pwm_enable,
184 .disable = rockchip_pwm_disable,
185 .owner = THIS_MODULE,
186};
187
157static const struct rockchip_pwm_data pwm_data_v1 = { 188static const struct rockchip_pwm_data pwm_data_v1 = {
158 .regs = { 189 .regs = {
159 .duty = 0x04, 190 .duty = 0x04,
@@ -162,6 +193,7 @@ static const struct rockchip_pwm_data pwm_data_v1 = {
162 .ctrl = 0x0c, 193 .ctrl = 0x0c,
163 }, 194 },
164 .prescaler = 2, 195 .prescaler = 2,
196 .ops = &rockchip_pwm_ops_v1,
165 .set_enable = rockchip_pwm_set_enable_v1, 197 .set_enable = rockchip_pwm_set_enable_v1,
166}; 198};
167 199
@@ -173,6 +205,7 @@ static const struct rockchip_pwm_data pwm_data_v2 = {
173 .ctrl = 0x0c, 205 .ctrl = 0x0c,
174 }, 206 },
175 .prescaler = 1, 207 .prescaler = 1,
208 .ops = &rockchip_pwm_ops_v2,
176 .set_enable = rockchip_pwm_set_enable_v2, 209 .set_enable = rockchip_pwm_set_enable_v2,
177}; 210};
178 211
@@ -184,6 +217,7 @@ static const struct rockchip_pwm_data pwm_data_vop = {
184 .ctrl = 0x00, 217 .ctrl = 0x00,
185 }, 218 },
186 .prescaler = 1, 219 .prescaler = 1,
220 .ops = &rockchip_pwm_ops_v2,
187 .set_enable = rockchip_pwm_set_enable_v2, 221 .set_enable = rockchip_pwm_set_enable_v2,
188}; 222};
189 223
@@ -227,10 +261,15 @@ static int rockchip_pwm_probe(struct platform_device *pdev)
227 261
228 pc->data = id->data; 262 pc->data = id->data;
229 pc->chip.dev = &pdev->dev; 263 pc->chip.dev = &pdev->dev;
230 pc->chip.ops = &rockchip_pwm_ops; 264 pc->chip.ops = pc->data->ops;
231 pc->chip.base = -1; 265 pc->chip.base = -1;
232 pc->chip.npwm = 1; 266 pc->chip.npwm = 1;
233 267
268 if (pc->data->ops->set_polarity) {
269 pc->chip.of_xlate = of_pwm_xlate_with_flags;
270 pc->chip.of_pwm_n_cells = 3;
271 }
272
234 ret = pwmchip_add(&pc->chip); 273 ret = pwmchip_add(&pc->chip);
235 if (ret < 0) { 274 if (ret < 0) {
236 clk_unprepare(pc->clk); 275 clk_unprepare(pc->clk);
diff --git a/drivers/regulator/max1586.c b/drivers/regulator/max1586.c
index 86db310d5304..d2a8c64cae42 100644
--- a/drivers/regulator/max1586.c
+++ b/drivers/regulator/max1586.c
@@ -163,7 +163,7 @@ static int of_get_max1586_platform_data(struct device *dev,
163 struct max1586_platform_data *pdata) 163 struct max1586_platform_data *pdata)
164{ 164{
165 struct max1586_subdev_data *sub; 165 struct max1586_subdev_data *sub;
166 struct of_regulator_match rmatch[ARRAY_SIZE(max1586_reg)]; 166 struct of_regulator_match rmatch[ARRAY_SIZE(max1586_reg)] = { };
167 struct device_node *np = dev->of_node; 167 struct device_node *np = dev->of_node;
168 int i, matched; 168 int i, matched;
169 169
diff --git a/drivers/regulator/max77686.c b/drivers/regulator/max77686.c
index ef1af2debbd2..f69320e1738f 100644
--- a/drivers/regulator/max77686.c
+++ b/drivers/regulator/max77686.c
@@ -395,7 +395,7 @@ static int max77686_pmic_dt_parse_pdata(struct platform_device *pdev,
395 struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent); 395 struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
396 struct device_node *pmic_np, *regulators_np; 396 struct device_node *pmic_np, *regulators_np;
397 struct max77686_regulator_data *rdata; 397 struct max77686_regulator_data *rdata;
398 struct of_regulator_match rmatch; 398 struct of_regulator_match rmatch = { };
399 unsigned int i; 399 unsigned int i;
400 400
401 pmic_np = iodev->dev->of_node; 401 pmic_np = iodev->dev->of_node;
diff --git a/drivers/regulator/max77693.c b/drivers/regulator/max77693.c
index c67ff05fc1dd..d158f71fa128 100644
--- a/drivers/regulator/max77693.c
+++ b/drivers/regulator/max77693.c
@@ -227,7 +227,7 @@ static int max77693_pmic_probe(struct platform_device *pdev)
227 struct max77693_dev *iodev = dev_get_drvdata(pdev->dev.parent); 227 struct max77693_dev *iodev = dev_get_drvdata(pdev->dev.parent);
228 struct max77693_regulator_data *rdata = NULL; 228 struct max77693_regulator_data *rdata = NULL;
229 int num_rdata, i; 229 int num_rdata, i;
230 struct regulator_config config; 230 struct regulator_config config = { };
231 231
232 num_rdata = max77693_pmic_init_rdata(&pdev->dev, &rdata); 232 num_rdata = max77693_pmic_init_rdata(&pdev->dev, &rdata);
233 if (!rdata || num_rdata <= 0) { 233 if (!rdata || num_rdata <= 0) {
diff --git a/drivers/regulator/max77802.c b/drivers/regulator/max77802.c
index d89792b084e9..45fa240fe243 100644
--- a/drivers/regulator/max77802.c
+++ b/drivers/regulator/max77802.c
@@ -454,7 +454,7 @@ static int max77802_pmic_dt_parse_pdata(struct platform_device *pdev,
454 struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent); 454 struct max77686_dev *iodev = dev_get_drvdata(pdev->dev.parent);
455 struct device_node *pmic_np, *regulators_np; 455 struct device_node *pmic_np, *regulators_np;
456 struct max77686_regulator_data *rdata; 456 struct max77686_regulator_data *rdata;
457 struct of_regulator_match rmatch; 457 struct of_regulator_match rmatch = { };
458 unsigned int i; 458 unsigned int i;
459 459
460 pmic_np = iodev->dev->of_node; 460 pmic_np = iodev->dev->of_node;
diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
index 2fc411188794..7eee2ca18541 100644
--- a/drivers/regulator/max8660.c
+++ b/drivers/regulator/max8660.c
@@ -335,7 +335,7 @@ static int max8660_pdata_from_dt(struct device *dev,
335 int matched, i; 335 int matched, i;
336 struct device_node *np; 336 struct device_node *np;
337 struct max8660_subdev_data *sub; 337 struct max8660_subdev_data *sub;
338 struct of_regulator_match rmatch[ARRAY_SIZE(max8660_reg)]; 338 struct of_regulator_match rmatch[ARRAY_SIZE(max8660_reg)] = { };
339 339
340 np = of_get_child_by_name(dev->of_node, "regulators"); 340 np = of_get_child_by_name(dev->of_node, "regulators");
341 if (!np) { 341 if (!np) {
diff --git a/drivers/regulator/of_regulator.c b/drivers/regulator/of_regulator.c
index 7a51814abdc5..5a1d4afa4776 100644
--- a/drivers/regulator/of_regulator.c
+++ b/drivers/regulator/of_regulator.c
@@ -211,7 +211,8 @@ struct regulator_init_data *regulator_of_get_init_data(struct device *dev,
211 search = dev->of_node; 211 search = dev->of_node;
212 212
213 if (!search) { 213 if (!search) {
214 dev_err(dev, "Failed to find regulator container node\n"); 214 dev_dbg(dev, "Failed to find regulator container node '%s'\n",
215 desc->regulators_node);
215 return NULL; 216 return NULL;
216 } 217 }
217 218
diff --git a/drivers/regulator/rk808-regulator.c b/drivers/regulator/rk808-regulator.c
index e305416d7697..196a5c8838c4 100644
--- a/drivers/regulator/rk808-regulator.c
+++ b/drivers/regulator/rk808-regulator.c
@@ -44,7 +44,7 @@ static const int rk808_buck_config_regs[] = {
44}; 44};
45 45
46static const struct regulator_linear_range rk808_buck_voltage_ranges[] = { 46static const struct regulator_linear_range rk808_buck_voltage_ranges[] = {
47 REGULATOR_LINEAR_RANGE(700000, 0, 63, 12500), 47 REGULATOR_LINEAR_RANGE(712500, 0, 63, 12500),
48}; 48};
49 49
50static const struct regulator_linear_range rk808_buck4_voltage_ranges[] = { 50static const struct regulator_linear_range rk808_buck4_voltage_ranges[] = {
diff --git a/drivers/regulator/s2mpa01.c b/drivers/regulator/s2mpa01.c
index 4acefa6b462e..7633b9bfbe6e 100644
--- a/drivers/regulator/s2mpa01.c
+++ b/drivers/regulator/s2mpa01.c
@@ -341,7 +341,7 @@ static int s2mpa01_pmic_probe(struct platform_device *pdev)
341{ 341{
342 struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent); 342 struct sec_pmic_dev *iodev = dev_get_drvdata(pdev->dev.parent);
343 struct sec_platform_data *pdata = dev_get_platdata(iodev->dev); 343 struct sec_platform_data *pdata = dev_get_platdata(iodev->dev);
344 struct of_regulator_match rdata[S2MPA01_REGULATOR_MAX]; 344 struct of_regulator_match rdata[S2MPA01_REGULATOR_MAX] = { };
345 struct device_node *reg_np = NULL; 345 struct device_node *reg_np = NULL;
346 struct regulator_config config = { }; 346 struct regulator_config config = { };
347 struct s2mpa01_info *s2mpa01; 347 struct s2mpa01_info *s2mpa01;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 8cd0beebdc3f..6dd12ddbabc6 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -830,7 +830,7 @@ config RTC_DRV_DA9063
830 830
831config RTC_DRV_EFI 831config RTC_DRV_EFI
832 tristate "EFI RTC" 832 tristate "EFI RTC"
833 depends on EFI 833 depends on EFI && !X86
834 help 834 help
835 If you say yes here you will get support for the EFI 835 If you say yes here you will get support for the EFI
836 Real Time Clock. 836 Real Time Clock.
@@ -1320,7 +1320,7 @@ config RTC_DRV_LPC32XX
1320 1320
1321config RTC_DRV_PM8XXX 1321config RTC_DRV_PM8XXX
1322 tristate "Qualcomm PMIC8XXX RTC" 1322 tristate "Qualcomm PMIC8XXX RTC"
1323 depends on MFD_PM8XXX 1323 depends on MFD_PM8XXX || MFD_SPMI_PMIC
1324 help 1324 help
1325 If you say yes here you get support for the 1325 If you say yes here you get support for the
1326 Qualcomm PMIC8XXX RTC. 1326 Qualcomm PMIC8XXX RTC.
diff --git a/drivers/rtc/rtc-bq32k.c b/drivers/rtc/rtc-bq32k.c
index 314129e66d6e..92679df6d6e2 100644
--- a/drivers/rtc/rtc-bq32k.c
+++ b/drivers/rtc/rtc-bq32k.c
@@ -160,7 +160,7 @@ static int trickle_charger_of_init(struct device *dev, struct device_node *node)
160 dev_err(dev, "bq32k: diode and resistor mismatch\n"); 160 dev_err(dev, "bq32k: diode and resistor mismatch\n");
161 return -EINVAL; 161 return -EINVAL;
162 } 162 }
163 reg = 0x25; 163 reg = 0x45;
164 break; 164 break;
165 165
166 default: 166 default:
diff --git a/drivers/rtc/rtc-efi.c b/drivers/rtc/rtc-efi.c
index c384fec6d173..53b589dc34eb 100644
--- a/drivers/rtc/rtc-efi.c
+++ b/drivers/rtc/rtc-efi.c
@@ -236,3 +236,4 @@ MODULE_ALIAS("platform:rtc-efi");
236MODULE_AUTHOR("dann frazier <dannf@hp.com>"); 236MODULE_AUTHOR("dann frazier <dannf@hp.com>");
237MODULE_LICENSE("GPL"); 237MODULE_LICENSE("GPL");
238MODULE_DESCRIPTION("EFI RTC driver"); 238MODULE_DESCRIPTION("EFI RTC driver");
239MODULE_ALIAS("platform:rtc-efi");
diff --git a/drivers/rtc/rtc-pm8xxx.c b/drivers/rtc/rtc-pm8xxx.c
index 197699f358c7..5adcf111fc14 100644
--- a/drivers/rtc/rtc-pm8xxx.c
+++ b/drivers/rtc/rtc-pm8xxx.c
@@ -27,21 +27,36 @@
27 27
28/* RTC_CTRL register bit fields */ 28/* RTC_CTRL register bit fields */
29#define PM8xxx_RTC_ENABLE BIT(7) 29#define PM8xxx_RTC_ENABLE BIT(7)
30#define PM8xxx_RTC_ALARM_ENABLE BIT(1)
31#define PM8xxx_RTC_ALARM_CLEAR BIT(0) 30#define PM8xxx_RTC_ALARM_CLEAR BIT(0)
32 31
33#define NUM_8_BIT_RTC_REGS 0x4 32#define NUM_8_BIT_RTC_REGS 0x4
34 33
35/** 34/**
35 * struct pm8xxx_rtc_regs - describe RTC registers per PMIC versions
36 * @ctrl: base address of control register
37 * @write: base address of write register
38 * @read: base address of read register
39 * @alarm_ctrl: base address of alarm control register
40 * @alarm_ctrl2: base address of alarm control2 register
41 * @alarm_rw: base address of alarm read-write register
42 * @alarm_en: alarm enable mask
43 */
44struct pm8xxx_rtc_regs {
45 unsigned int ctrl;
46 unsigned int write;
47 unsigned int read;
48 unsigned int alarm_ctrl;
49 unsigned int alarm_ctrl2;
50 unsigned int alarm_rw;
51 unsigned int alarm_en;
52};
53
54/**
36 * struct pm8xxx_rtc - rtc driver internal structure 55 * struct pm8xxx_rtc - rtc driver internal structure
37 * @rtc: rtc device for this driver. 56 * @rtc: rtc device for this driver.
38 * @regmap: regmap used to access RTC registers 57 * @regmap: regmap used to access RTC registers
39 * @allow_set_time: indicates whether writing to the RTC is allowed 58 * @allow_set_time: indicates whether writing to the RTC is allowed
40 * @rtc_alarm_irq: rtc alarm irq number. 59 * @rtc_alarm_irq: rtc alarm irq number.
41 * @rtc_base: address of rtc control register.
42 * @rtc_read_base: base address of read registers.
43 * @rtc_write_base: base address of write registers.
44 * @alarm_rw_base: base address of alarm registers.
45 * @ctrl_reg: rtc control register. 60 * @ctrl_reg: rtc control register.
46 * @rtc_dev: device structure. 61 * @rtc_dev: device structure.
47 * @ctrl_reg_lock: spinlock protecting access to ctrl_reg. 62 * @ctrl_reg_lock: spinlock protecting access to ctrl_reg.
@@ -51,11 +66,7 @@ struct pm8xxx_rtc {
51 struct regmap *regmap; 66 struct regmap *regmap;
52 bool allow_set_time; 67 bool allow_set_time;
53 int rtc_alarm_irq; 68 int rtc_alarm_irq;
54 int rtc_base; 69 const struct pm8xxx_rtc_regs *regs;
55 int rtc_read_base;
56 int rtc_write_base;
57 int alarm_rw_base;
58 u8 ctrl_reg;
59 struct device *rtc_dev; 70 struct device *rtc_dev;
60 spinlock_t ctrl_reg_lock; 71 spinlock_t ctrl_reg_lock;
61}; 72};
@@ -71,8 +82,10 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
71{ 82{
72 int rc, i; 83 int rc, i;
73 unsigned long secs, irq_flags; 84 unsigned long secs, irq_flags;
74 u8 value[NUM_8_BIT_RTC_REGS], alarm_enabled = 0, ctrl_reg; 85 u8 value[NUM_8_BIT_RTC_REGS], alarm_enabled = 0;
86 unsigned int ctrl_reg;
75 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); 87 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
88 const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
76 89
77 if (!rtc_dd->allow_set_time) 90 if (!rtc_dd->allow_set_time)
78 return -EACCES; 91 return -EACCES;
@@ -87,30 +100,30 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
87 dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs); 100 dev_dbg(dev, "Seconds value to be written to RTC = %lu\n", secs);
88 101
89 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); 102 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
90 ctrl_reg = rtc_dd->ctrl_reg;
91 103
92 if (ctrl_reg & PM8xxx_RTC_ALARM_ENABLE) { 104 rc = regmap_read(rtc_dd->regmap, regs->ctrl, &ctrl_reg);
105 if (rc)
106 goto rtc_rw_fail;
107
108 if (ctrl_reg & regs->alarm_en) {
93 alarm_enabled = 1; 109 alarm_enabled = 1;
94 ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE; 110 ctrl_reg &= ~regs->alarm_en;
95 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); 111 rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg);
96 if (rc) { 112 if (rc) {
97 dev_err(dev, "Write to RTC control register failed\n"); 113 dev_err(dev, "Write to RTC control register failed\n");
98 goto rtc_rw_fail; 114 goto rtc_rw_fail;
99 } 115 }
100 rtc_dd->ctrl_reg = ctrl_reg;
101 } else {
102 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
103 } 116 }
104 117
105 /* Write 0 to Byte[0] */ 118 /* Write 0 to Byte[0] */
106 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_write_base, 0); 119 rc = regmap_write(rtc_dd->regmap, regs->write, 0);
107 if (rc) { 120 if (rc) {
108 dev_err(dev, "Write to RTC write data register failed\n"); 121 dev_err(dev, "Write to RTC write data register failed\n");
109 goto rtc_rw_fail; 122 goto rtc_rw_fail;
110 } 123 }
111 124
112 /* Write Byte[1], Byte[2], Byte[3] */ 125 /* Write Byte[1], Byte[2], Byte[3] */
113 rc = regmap_bulk_write(rtc_dd->regmap, rtc_dd->rtc_write_base + 1, 126 rc = regmap_bulk_write(rtc_dd->regmap, regs->write + 1,
114 &value[1], sizeof(value) - 1); 127 &value[1], sizeof(value) - 1);
115 if (rc) { 128 if (rc) {
116 dev_err(dev, "Write to RTC write data register failed\n"); 129 dev_err(dev, "Write to RTC write data register failed\n");
@@ -118,25 +131,23 @@ static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm)
118 } 131 }
119 132
120 /* Write Byte[0] */ 133 /* Write Byte[0] */
121 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_write_base, value[0]); 134 rc = regmap_write(rtc_dd->regmap, regs->write, value[0]);
122 if (rc) { 135 if (rc) {
123 dev_err(dev, "Write to RTC write data register failed\n"); 136 dev_err(dev, "Write to RTC write data register failed\n");
124 goto rtc_rw_fail; 137 goto rtc_rw_fail;
125 } 138 }
126 139
127 if (alarm_enabled) { 140 if (alarm_enabled) {
128 ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE; 141 ctrl_reg |= regs->alarm_en;
129 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); 142 rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg);
130 if (rc) { 143 if (rc) {
131 dev_err(dev, "Write to RTC control register failed\n"); 144 dev_err(dev, "Write to RTC control register failed\n");
132 goto rtc_rw_fail; 145 goto rtc_rw_fail;
133 } 146 }
134 rtc_dd->ctrl_reg = ctrl_reg;
135 } 147 }
136 148
137rtc_rw_fail: 149rtc_rw_fail:
138 if (alarm_enabled) 150 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
139 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
140 151
141 return rc; 152 return rc;
142} 153}
@@ -148,9 +159,9 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
148 unsigned long secs; 159 unsigned long secs;
149 unsigned int reg; 160 unsigned int reg;
150 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); 161 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
162 const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
151 163
152 rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->rtc_read_base, 164 rc = regmap_bulk_read(rtc_dd->regmap, regs->read, value, sizeof(value));
153 value, sizeof(value));
154 if (rc) { 165 if (rc) {
155 dev_err(dev, "RTC read data register failed\n"); 166 dev_err(dev, "RTC read data register failed\n");
156 return rc; 167 return rc;
@@ -160,14 +171,14 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
160 * Read the LSB again and check if there has been a carry over. 171 * Read the LSB again and check if there has been a carry over.
161 * If there is, redo the read operation. 172 * If there is, redo the read operation.
162 */ 173 */
163 rc = regmap_read(rtc_dd->regmap, rtc_dd->rtc_read_base, &reg); 174 rc = regmap_read(rtc_dd->regmap, regs->read, &reg);
164 if (rc < 0) { 175 if (rc < 0) {
165 dev_err(dev, "RTC read data register failed\n"); 176 dev_err(dev, "RTC read data register failed\n");
166 return rc; 177 return rc;
167 } 178 }
168 179
169 if (unlikely(reg < value[0])) { 180 if (unlikely(reg < value[0])) {
170 rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->rtc_read_base, 181 rc = regmap_bulk_read(rtc_dd->regmap, regs->read,
171 value, sizeof(value)); 182 value, sizeof(value));
172 if (rc) { 183 if (rc) {
173 dev_err(dev, "RTC read data register failed\n"); 184 dev_err(dev, "RTC read data register failed\n");
@@ -195,9 +206,11 @@ static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm)
195static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) 206static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
196{ 207{
197 int rc, i; 208 int rc, i;
198 u8 value[NUM_8_BIT_RTC_REGS], ctrl_reg; 209 u8 value[NUM_8_BIT_RTC_REGS];
210 unsigned int ctrl_reg;
199 unsigned long secs, irq_flags; 211 unsigned long secs, irq_flags;
200 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); 212 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
213 const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
201 214
202 rtc_tm_to_time(&alarm->time, &secs); 215 rtc_tm_to_time(&alarm->time, &secs);
203 216
@@ -208,28 +221,28 @@ static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm)
208 221
209 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); 222 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
210 223
211 rc = regmap_bulk_write(rtc_dd->regmap, rtc_dd->alarm_rw_base, value, 224 rc = regmap_bulk_write(rtc_dd->regmap, regs->alarm_rw, value,
212 sizeof(value)); 225 sizeof(value));
213 if (rc) { 226 if (rc) {
214 dev_err(dev, "Write to RTC ALARM register failed\n"); 227 dev_err(dev, "Write to RTC ALARM register failed\n");
215 goto rtc_rw_fail; 228 goto rtc_rw_fail;
216 } 229 }
217 230
218 ctrl_reg = rtc_dd->ctrl_reg; 231 rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
232 if (rc)
233 goto rtc_rw_fail;
219 234
220 if (alarm->enabled) 235 if (alarm->enabled)
221 ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE; 236 ctrl_reg |= regs->alarm_en;
222 else 237 else
223 ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE; 238 ctrl_reg &= ~regs->alarm_en;
224 239
225 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); 240 rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
226 if (rc) { 241 if (rc) {
227 dev_err(dev, "Write to RTC control register failed\n"); 242 dev_err(dev, "Write to RTC alarm control register failed\n");
228 goto rtc_rw_fail; 243 goto rtc_rw_fail;
229 } 244 }
230 245
231 rtc_dd->ctrl_reg = ctrl_reg;
232
233 dev_dbg(dev, "Alarm Set for h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n", 246 dev_dbg(dev, "Alarm Set for h:r:s=%d:%d:%d, d/m/y=%d/%d/%d\n",
234 alarm->time.tm_hour, alarm->time.tm_min, 247 alarm->time.tm_hour, alarm->time.tm_min,
235 alarm->time.tm_sec, alarm->time.tm_mday, 248 alarm->time.tm_sec, alarm->time.tm_mday,
@@ -245,8 +258,9 @@ static int pm8xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm)
245 u8 value[NUM_8_BIT_RTC_REGS]; 258 u8 value[NUM_8_BIT_RTC_REGS];
246 unsigned long secs; 259 unsigned long secs;
247 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); 260 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
261 const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
248 262
249 rc = regmap_bulk_read(rtc_dd->regmap, rtc_dd->alarm_rw_base, value, 263 rc = regmap_bulk_read(rtc_dd->regmap, regs->alarm_rw, value,
250 sizeof(value)); 264 sizeof(value));
251 if (rc) { 265 if (rc) {
252 dev_err(dev, "RTC alarm time read failed\n"); 266 dev_err(dev, "RTC alarm time read failed\n");
@@ -276,25 +290,26 @@ static int pm8xxx_rtc_alarm_irq_enable(struct device *dev, unsigned int enable)
276 int rc; 290 int rc;
277 unsigned long irq_flags; 291 unsigned long irq_flags;
278 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); 292 struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev);
279 u8 ctrl_reg; 293 const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
294 unsigned int ctrl_reg;
280 295
281 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); 296 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
282 297
283 ctrl_reg = rtc_dd->ctrl_reg; 298 rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
299 if (rc)
300 goto rtc_rw_fail;
284 301
285 if (enable) 302 if (enable)
286 ctrl_reg |= PM8xxx_RTC_ALARM_ENABLE; 303 ctrl_reg |= regs->alarm_en;
287 else 304 else
288 ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE; 305 ctrl_reg &= ~regs->alarm_en;
289 306
290 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); 307 rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
291 if (rc) { 308 if (rc) {
292 dev_err(dev, "Write to RTC control register failed\n"); 309 dev_err(dev, "Write to RTC control register failed\n");
293 goto rtc_rw_fail; 310 goto rtc_rw_fail;
294 } 311 }
295 312
296 rtc_dd->ctrl_reg = ctrl_reg;
297
298rtc_rw_fail: 313rtc_rw_fail:
299 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); 314 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
300 return rc; 315 return rc;
@@ -311,6 +326,7 @@ static const struct rtc_class_ops pm8xxx_rtc_ops = {
311static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id) 326static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id)
312{ 327{
313 struct pm8xxx_rtc *rtc_dd = dev_id; 328 struct pm8xxx_rtc *rtc_dd = dev_id;
329 const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
314 unsigned int ctrl_reg; 330 unsigned int ctrl_reg;
315 int rc; 331 int rc;
316 unsigned long irq_flags; 332 unsigned long irq_flags;
@@ -320,48 +336,100 @@ static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id)
320 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags); 336 spin_lock_irqsave(&rtc_dd->ctrl_reg_lock, irq_flags);
321 337
322 /* Clear the alarm enable bit */ 338 /* Clear the alarm enable bit */
323 ctrl_reg = rtc_dd->ctrl_reg; 339 rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg);
324 ctrl_reg &= ~PM8xxx_RTC_ALARM_ENABLE; 340 if (rc) {
341 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
342 goto rtc_alarm_handled;
343 }
344
345 ctrl_reg &= ~regs->alarm_en;
325 346
326 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg); 347 rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl, ctrl_reg);
327 if (rc) { 348 if (rc) {
328 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); 349 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
329 dev_err(rtc_dd->rtc_dev, 350 dev_err(rtc_dd->rtc_dev,
330 "Write to RTC control register failed\n"); 351 "Write to alarm control register failed\n");
331 goto rtc_alarm_handled; 352 goto rtc_alarm_handled;
332 } 353 }
333 354
334 rtc_dd->ctrl_reg = ctrl_reg;
335 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags); 355 spin_unlock_irqrestore(&rtc_dd->ctrl_reg_lock, irq_flags);
336 356
337 /* Clear RTC alarm register */ 357 /* Clear RTC alarm register */
338 rc = regmap_read(rtc_dd->regmap, 358 rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl2, &ctrl_reg);
339 rtc_dd->rtc_base + PM8XXX_ALARM_CTRL_OFFSET,
340 &ctrl_reg);
341 if (rc) { 359 if (rc) {
342 dev_err(rtc_dd->rtc_dev, 360 dev_err(rtc_dd->rtc_dev,
343 "RTC Alarm control register read failed\n"); 361 "RTC Alarm control2 register read failed\n");
344 goto rtc_alarm_handled; 362 goto rtc_alarm_handled;
345 } 363 }
346 364
347 ctrl_reg &= ~PM8xxx_RTC_ALARM_CLEAR; 365 ctrl_reg |= PM8xxx_RTC_ALARM_CLEAR;
348 rc = regmap_write(rtc_dd->regmap, 366 rc = regmap_write(rtc_dd->regmap, regs->alarm_ctrl2, ctrl_reg);
349 rtc_dd->rtc_base + PM8XXX_ALARM_CTRL_OFFSET,
350 ctrl_reg);
351 if (rc) 367 if (rc)
352 dev_err(rtc_dd->rtc_dev, 368 dev_err(rtc_dd->rtc_dev,
353 "Write to RTC Alarm control register failed\n"); 369 "Write to RTC Alarm control2 register failed\n");
354 370
355rtc_alarm_handled: 371rtc_alarm_handled:
356 return IRQ_HANDLED; 372 return IRQ_HANDLED;
357} 373}
358 374
375static int pm8xxx_rtc_enable(struct pm8xxx_rtc *rtc_dd)
376{
377 const struct pm8xxx_rtc_regs *regs = rtc_dd->regs;
378 unsigned int ctrl_reg;
379 int rc;
380
381 /* Check if the RTC is on, else turn it on */
382 rc = regmap_read(rtc_dd->regmap, regs->ctrl, &ctrl_reg);
383 if (rc)
384 return rc;
385
386 if (!(ctrl_reg & PM8xxx_RTC_ENABLE)) {
387 ctrl_reg |= PM8xxx_RTC_ENABLE;
388 rc = regmap_write(rtc_dd->regmap, regs->ctrl, ctrl_reg);
389 if (rc)
390 return rc;
391 }
392
393 return 0;
394}
395
396static const struct pm8xxx_rtc_regs pm8921_regs = {
397 .ctrl = 0x11d,
398 .write = 0x11f,
399 .read = 0x123,
400 .alarm_rw = 0x127,
401 .alarm_ctrl = 0x11d,
402 .alarm_ctrl2 = 0x11e,
403 .alarm_en = BIT(1),
404};
405
406static const struct pm8xxx_rtc_regs pm8058_regs = {
407 .ctrl = 0x1e8,
408 .write = 0x1ea,
409 .read = 0x1ee,
410 .alarm_rw = 0x1f2,
411 .alarm_ctrl = 0x1e8,
412 .alarm_ctrl2 = 0x1e9,
413 .alarm_en = BIT(1),
414};
415
416static const struct pm8xxx_rtc_regs pm8941_regs = {
417 .ctrl = 0x6046,
418 .write = 0x6040,
419 .read = 0x6048,
420 .alarm_rw = 0x6140,
421 .alarm_ctrl = 0x6146,
422 .alarm_ctrl2 = 0x6148,
423 .alarm_en = BIT(7),
424};
425
359/* 426/*
360 * Hardcoded RTC bases until IORESOURCE_REG mapping is figured out 427 * Hardcoded RTC bases until IORESOURCE_REG mapping is figured out
361 */ 428 */
362static const struct of_device_id pm8xxx_id_table[] = { 429static const struct of_device_id pm8xxx_id_table[] = {
363 { .compatible = "qcom,pm8921-rtc", .data = (void *) 0x11D }, 430 { .compatible = "qcom,pm8921-rtc", .data = &pm8921_regs },
364 { .compatible = "qcom,pm8058-rtc", .data = (void *) 0x1E8 }, 431 { .compatible = "qcom,pm8058-rtc", .data = &pm8058_regs },
432 { .compatible = "qcom,pm8941-rtc", .data = &pm8941_regs },
365 { }, 433 { },
366}; 434};
367MODULE_DEVICE_TABLE(of, pm8xxx_id_table); 435MODULE_DEVICE_TABLE(of, pm8xxx_id_table);
@@ -369,7 +437,6 @@ MODULE_DEVICE_TABLE(of, pm8xxx_id_table);
369static int pm8xxx_rtc_probe(struct platform_device *pdev) 437static int pm8xxx_rtc_probe(struct platform_device *pdev)
370{ 438{
371 int rc; 439 int rc;
372 unsigned int ctrl_reg;
373 struct pm8xxx_rtc *rtc_dd; 440 struct pm8xxx_rtc *rtc_dd;
374 const struct of_device_id *match; 441 const struct of_device_id *match;
375 442
@@ -399,33 +466,12 @@ static int pm8xxx_rtc_probe(struct platform_device *pdev)
399 rtc_dd->allow_set_time = of_property_read_bool(pdev->dev.of_node, 466 rtc_dd->allow_set_time = of_property_read_bool(pdev->dev.of_node,
400 "allow-set-time"); 467 "allow-set-time");
401 468
402 rtc_dd->rtc_base = (long) match->data; 469 rtc_dd->regs = match->data;
403
404 /* Setup RTC register addresses */
405 rtc_dd->rtc_write_base = rtc_dd->rtc_base + PM8XXX_RTC_WRITE_OFFSET;
406 rtc_dd->rtc_read_base = rtc_dd->rtc_base + PM8XXX_RTC_READ_OFFSET;
407 rtc_dd->alarm_rw_base = rtc_dd->rtc_base + PM8XXX_ALARM_RW_OFFSET;
408
409 rtc_dd->rtc_dev = &pdev->dev; 470 rtc_dd->rtc_dev = &pdev->dev;
410 471
411 /* Check if the RTC is on, else turn it on */ 472 rc = pm8xxx_rtc_enable(rtc_dd);
412 rc = regmap_read(rtc_dd->regmap, rtc_dd->rtc_base, &ctrl_reg); 473 if (rc)
413 if (rc) {
414 dev_err(&pdev->dev, "RTC control register read failed!\n");
415 return rc; 474 return rc;
416 }
417
418 if (!(ctrl_reg & PM8xxx_RTC_ENABLE)) {
419 ctrl_reg |= PM8xxx_RTC_ENABLE;
420 rc = regmap_write(rtc_dd->regmap, rtc_dd->rtc_base, ctrl_reg);
421 if (rc) {
422 dev_err(&pdev->dev,
423 "Write to RTC control register failed\n");
424 return rc;
425 }
426 }
427
428 rtc_dd->ctrl_reg = ctrl_reg;
429 475
430 platform_set_drvdata(pdev, rtc_dd); 476 platform_set_drvdata(pdev, rtc_dd);
431 477
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index a6b1252c9941..806072238c00 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -535,13 +535,15 @@ static int s3c_rtc_probe(struct platform_device *pdev)
535 } 535 }
536 clk_prepare_enable(info->rtc_clk); 536 clk_prepare_enable(info->rtc_clk);
537 537
538 info->rtc_src_clk = devm_clk_get(&pdev->dev, "rtc_src"); 538 if (info->data->needs_src_clk) {
539 if (IS_ERR(info->rtc_src_clk)) { 539 info->rtc_src_clk = devm_clk_get(&pdev->dev, "rtc_src");
540 dev_err(&pdev->dev, "failed to find rtc source clock\n"); 540 if (IS_ERR(info->rtc_src_clk)) {
541 return PTR_ERR(info->rtc_src_clk); 541 dev_err(&pdev->dev,
542 "failed to find rtc source clock\n");
543 return PTR_ERR(info->rtc_src_clk);
544 }
545 clk_prepare_enable(info->rtc_src_clk);
542 } 546 }
543 clk_prepare_enable(info->rtc_src_clk);
544
545 547
546 /* check to see if everything is setup correctly */ 548 /* check to see if everything is setup correctly */
547 if (info->data->enable) 549 if (info->data->enable)
diff --git a/drivers/s390/char/Kconfig b/drivers/s390/char/Kconfig
index dc24ecfac2d1..db2cb1f8a1b5 100644
--- a/drivers/s390/char/Kconfig
+++ b/drivers/s390/char/Kconfig
@@ -105,7 +105,7 @@ config SCLP_ASYNC
105config HMC_DRV 105config HMC_DRV
106 def_tristate m 106 def_tristate m
107 prompt "Support for file transfers from HMC drive CD/DVD-ROM" 107 prompt "Support for file transfers from HMC drive CD/DVD-ROM"
108 depends on 64BIT 108 depends on S390 && 64BIT
109 select CRC16 109 select CRC16
110 help 110 help
111 This option enables support for file transfers from a Hardware 111 This option enables support for file transfers from a Hardware
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/kvm/virtio_ccw.c
index 6cbe6ef3c889..bda52f18e967 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/kvm/virtio_ccw.c
@@ -888,7 +888,6 @@ static void virtio_ccw_int_handler(struct ccw_device *cdev,
888 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev); 888 struct virtio_ccw_device *vcdev = dev_get_drvdata(&cdev->dev);
889 int i; 889 int i;
890 struct virtqueue *vq; 890 struct virtqueue *vq;
891 struct virtio_driver *drv;
892 891
893 if (!vcdev) 892 if (!vcdev)
894 return; 893 return;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index ca75c7ca2559..ef355c13ccc4 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -480,9 +480,7 @@ void bnx2fc_rec_compl(struct bnx2fc_els_cb_arg *cb_arg)
480 bnx2fc_initiate_cleanup(orig_io_req); 480 bnx2fc_initiate_cleanup(orig_io_req);
481 /* Post a new IO req with the same sc_cmd */ 481 /* Post a new IO req with the same sc_cmd */
482 BNX2FC_IO_DBG(rec_req, "Post IO request again\n"); 482 BNX2FC_IO_DBG(rec_req, "Post IO request again\n");
483 spin_unlock_bh(&tgt->tgt_lock);
484 rc = bnx2fc_post_io_req(tgt, new_io_req); 483 rc = bnx2fc_post_io_req(tgt, new_io_req);
485 spin_lock_bh(&tgt->tgt_lock);
486 if (!rc) 484 if (!rc)
487 goto free_frame; 485 goto free_frame;
488 BNX2FC_IO_DBG(rec_req, "REC: io post err\n"); 486 BNX2FC_IO_DBG(rec_req, "REC: io post err\n");
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index 79e5c94107a9..72533c58c1f3 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -412,6 +412,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
412 struct fc_frame_header *fh; 412 struct fc_frame_header *fh;
413 struct fcoe_rcv_info *fr; 413 struct fcoe_rcv_info *fr;
414 struct fcoe_percpu_s *bg; 414 struct fcoe_percpu_s *bg;
415 struct sk_buff *tmp_skb;
415 unsigned short oxid; 416 unsigned short oxid;
416 417
417 interface = container_of(ptype, struct bnx2fc_interface, 418 interface = container_of(ptype, struct bnx2fc_interface,
@@ -424,6 +425,12 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
424 goto err; 425 goto err;
425 } 426 }
426 427
428 tmp_skb = skb_share_check(skb, GFP_ATOMIC);
429 if (!tmp_skb)
430 goto err;
431
432 skb = tmp_skb;
433
427 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) { 434 if (unlikely(eth_hdr(skb)->h_proto != htons(ETH_P_FCOE))) {
428 printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n"); 435 printk(KERN_ERR PFX "bnx2fc_rcv: Wrong FC type frame\n");
429 goto err; 436 goto err;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 0679782d9d15..5b99844ef6bf 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -1894,18 +1894,24 @@ int bnx2fc_queuecommand(struct Scsi_Host *host,
1894 goto exit_qcmd; 1894 goto exit_qcmd;
1895 } 1895 }
1896 } 1896 }
1897
1898 spin_lock_bh(&tgt->tgt_lock);
1899
1897 io_req = bnx2fc_cmd_alloc(tgt); 1900 io_req = bnx2fc_cmd_alloc(tgt);
1898 if (!io_req) { 1901 if (!io_req) {
1899 rc = SCSI_MLQUEUE_HOST_BUSY; 1902 rc = SCSI_MLQUEUE_HOST_BUSY;
1900 goto exit_qcmd; 1903 goto exit_qcmd_tgtlock;
1901 } 1904 }
1902 io_req->sc_cmd = sc_cmd; 1905 io_req->sc_cmd = sc_cmd;
1903 1906
1904 if (bnx2fc_post_io_req(tgt, io_req)) { 1907 if (bnx2fc_post_io_req(tgt, io_req)) {
1905 printk(KERN_ERR PFX "Unable to post io_req\n"); 1908 printk(KERN_ERR PFX "Unable to post io_req\n");
1906 rc = SCSI_MLQUEUE_HOST_BUSY; 1909 rc = SCSI_MLQUEUE_HOST_BUSY;
1907 goto exit_qcmd; 1910 goto exit_qcmd_tgtlock;
1908 } 1911 }
1912
1913exit_qcmd_tgtlock:
1914 spin_unlock_bh(&tgt->tgt_lock);
1909exit_qcmd: 1915exit_qcmd:
1910 return rc; 1916 return rc;
1911} 1917}
@@ -2020,6 +2026,8 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
2020 int task_idx, index; 2026 int task_idx, index;
2021 u16 xid; 2027 u16 xid;
2022 2028
2029 /* bnx2fc_post_io_req() is called with the tgt_lock held */
2030
2023 /* Initialize rest of io_req fields */ 2031 /* Initialize rest of io_req fields */
2024 io_req->cmd_type = BNX2FC_SCSI_CMD; 2032 io_req->cmd_type = BNX2FC_SCSI_CMD;
2025 io_req->port = port; 2033 io_req->port = port;
@@ -2047,9 +2055,7 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
2047 /* Build buffer descriptor list for firmware from sg list */ 2055 /* Build buffer descriptor list for firmware from sg list */
2048 if (bnx2fc_build_bd_list_from_sg(io_req)) { 2056 if (bnx2fc_build_bd_list_from_sg(io_req)) {
2049 printk(KERN_ERR PFX "BD list creation failed\n"); 2057 printk(KERN_ERR PFX "BD list creation failed\n");
2050 spin_lock_bh(&tgt->tgt_lock);
2051 kref_put(&io_req->refcount, bnx2fc_cmd_release); 2058 kref_put(&io_req->refcount, bnx2fc_cmd_release);
2052 spin_unlock_bh(&tgt->tgt_lock);
2053 return -EAGAIN; 2059 return -EAGAIN;
2054 } 2060 }
2055 2061
@@ -2061,19 +2067,15 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
2061 task = &(task_page[index]); 2067 task = &(task_page[index]);
2062 bnx2fc_init_task(io_req, task); 2068 bnx2fc_init_task(io_req, task);
2063 2069
2064 spin_lock_bh(&tgt->tgt_lock);
2065
2066 if (tgt->flush_in_prog) { 2070 if (tgt->flush_in_prog) {
2067 printk(KERN_ERR PFX "Flush in progress..Host Busy\n"); 2071 printk(KERN_ERR PFX "Flush in progress..Host Busy\n");
2068 kref_put(&io_req->refcount, bnx2fc_cmd_release); 2072 kref_put(&io_req->refcount, bnx2fc_cmd_release);
2069 spin_unlock_bh(&tgt->tgt_lock);
2070 return -EAGAIN; 2073 return -EAGAIN;
2071 } 2074 }
2072 2075
2073 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) { 2076 if (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags)) {
2074 printk(KERN_ERR PFX "Session not ready...post_io\n"); 2077 printk(KERN_ERR PFX "Session not ready...post_io\n");
2075 kref_put(&io_req->refcount, bnx2fc_cmd_release); 2078 kref_put(&io_req->refcount, bnx2fc_cmd_release);
2076 spin_unlock_bh(&tgt->tgt_lock);
2077 return -EAGAIN; 2079 return -EAGAIN;
2078 } 2080 }
2079 2081
@@ -2091,6 +2093,5 @@ int bnx2fc_post_io_req(struct bnx2fc_rport *tgt,
2091 2093
2092 /* Ring doorbell */ 2094 /* Ring doorbell */
2093 bnx2fc_ring_doorbell(tgt); 2095 bnx2fc_ring_doorbell(tgt);
2094 spin_unlock_bh(&tgt->tgt_lock);
2095 return 0; 2096 return 0;
2096} 2097}
diff --git a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
index 3e0a0d315f72..15081257cfc8 100644
--- a/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
+++ b/drivers/scsi/cxgbi/cxgb4i/cxgb4i.c
@@ -828,6 +828,8 @@ static void do_act_open_rpl(struct cxgbi_device *cdev, struct sk_buff *skb)
828 if (status == CPL_ERR_RTX_NEG_ADVICE) 828 if (status == CPL_ERR_RTX_NEG_ADVICE)
829 goto rel_skb; 829 goto rel_skb;
830 830
831 module_put(THIS_MODULE);
832
831 if (status && status != CPL_ERR_TCAM_FULL && 833 if (status && status != CPL_ERR_TCAM_FULL &&
832 status != CPL_ERR_CONN_EXIST && 834 status != CPL_ERR_CONN_EXIST &&
833 status != CPL_ERR_ARP_MISS) 835 status != CPL_ERR_ARP_MISS)
@@ -936,20 +938,23 @@ static void do_abort_req_rss(struct cxgbi_device *cdev, struct sk_buff *skb)
936 cxgbi_sock_get(csk); 938 cxgbi_sock_get(csk);
937 spin_lock_bh(&csk->lock); 939 spin_lock_bh(&csk->lock);
938 940
939 if (!cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD)) { 941 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD);
940 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD); 942
941 cxgbi_sock_set_state(csk, CTP_ABORTING); 943 if (!cxgbi_sock_flag(csk, CTPF_TX_DATA_SENT)) {
942 goto done; 944 send_tx_flowc_wr(csk);
945 cxgbi_sock_set_flag(csk, CTPF_TX_DATA_SENT);
943 } 946 }
944 947
945 cxgbi_sock_clear_flag(csk, CTPF_ABORT_REQ_RCVD); 948 cxgbi_sock_set_flag(csk, CTPF_ABORT_REQ_RCVD);
949 cxgbi_sock_set_state(csk, CTP_ABORTING);
950
946 send_abort_rpl(csk, rst_status); 951 send_abort_rpl(csk, rst_status);
947 952
948 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { 953 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
949 csk->err = abort_status_to_errno(csk, req->status, &rst_status); 954 csk->err = abort_status_to_errno(csk, req->status, &rst_status);
950 cxgbi_sock_closed(csk); 955 cxgbi_sock_closed(csk);
951 } 956 }
952done: 957
953 spin_unlock_bh(&csk->lock); 958 spin_unlock_bh(&csk->lock);
954 cxgbi_sock_put(csk); 959 cxgbi_sock_put(csk);
955rel_skb: 960rel_skb:
diff --git a/drivers/scsi/cxgbi/libcxgbi.c b/drivers/scsi/cxgbi/libcxgbi.c
index 54fa6e0bc1bb..7da59c38a69e 100644
--- a/drivers/scsi/cxgbi/libcxgbi.c
+++ b/drivers/scsi/cxgbi/libcxgbi.c
@@ -399,6 +399,35 @@ EXPORT_SYMBOL_GPL(cxgbi_hbas_add);
399 * If the source port is outside our allocation range, the caller is 399 * If the source port is outside our allocation range, the caller is
400 * responsible for keeping track of their port usage. 400 * responsible for keeping track of their port usage.
401 */ 401 */
402
403static struct cxgbi_sock *find_sock_on_port(struct cxgbi_device *cdev,
404 unsigned char port_id)
405{
406 struct cxgbi_ports_map *pmap = &cdev->pmap;
407 unsigned int i;
408 unsigned int used;
409
410 if (!pmap->max_connect || !pmap->used)
411 return NULL;
412
413 spin_lock_bh(&pmap->lock);
414 used = pmap->used;
415 for (i = 0; used && i < pmap->max_connect; i++) {
416 struct cxgbi_sock *csk = pmap->port_csk[i];
417
418 if (csk) {
419 if (csk->port_id == port_id) {
420 spin_unlock_bh(&pmap->lock);
421 return csk;
422 }
423 used--;
424 }
425 }
426 spin_unlock_bh(&pmap->lock);
427
428 return NULL;
429}
430
402static int sock_get_port(struct cxgbi_sock *csk) 431static int sock_get_port(struct cxgbi_sock *csk)
403{ 432{
404 struct cxgbi_device *cdev = csk->cdev; 433 struct cxgbi_device *cdev = csk->cdev;
@@ -749,6 +778,7 @@ static struct cxgbi_sock *cxgbi_check_route6(struct sockaddr *dst_addr)
749 csk->daddr6.sin6_addr = daddr6->sin6_addr; 778 csk->daddr6.sin6_addr = daddr6->sin6_addr;
750 csk->daddr6.sin6_port = daddr6->sin6_port; 779 csk->daddr6.sin6_port = daddr6->sin6_port;
751 csk->daddr6.sin6_family = daddr6->sin6_family; 780 csk->daddr6.sin6_family = daddr6->sin6_family;
781 csk->saddr6.sin6_family = daddr6->sin6_family;
752 csk->saddr6.sin6_addr = pref_saddr; 782 csk->saddr6.sin6_addr = pref_saddr;
753 783
754 neigh_release(n); 784 neigh_release(n);
@@ -786,7 +816,7 @@ static void cxgbi_inform_iscsi_conn_closing(struct cxgbi_sock *csk)
786 read_lock_bh(&csk->callback_lock); 816 read_lock_bh(&csk->callback_lock);
787 if (csk->user_data) 817 if (csk->user_data)
788 iscsi_conn_failure(csk->user_data, 818 iscsi_conn_failure(csk->user_data,
789 ISCSI_ERR_CONN_FAILED); 819 ISCSI_ERR_TCP_CONN_CLOSE);
790 read_unlock_bh(&csk->callback_lock); 820 read_unlock_bh(&csk->callback_lock);
791 } 821 }
792} 822}
@@ -875,18 +905,16 @@ void cxgbi_sock_rcv_abort_rpl(struct cxgbi_sock *csk)
875{ 905{
876 cxgbi_sock_get(csk); 906 cxgbi_sock_get(csk);
877 spin_lock_bh(&csk->lock); 907 spin_lock_bh(&csk->lock);
908
909 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD);
878 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) { 910 if (cxgbi_sock_flag(csk, CTPF_ABORT_RPL_PENDING)) {
879 if (!cxgbi_sock_flag(csk, CTPF_ABORT_RPL_RCVD)) 911 cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING);
880 cxgbi_sock_set_flag(csk, CTPF_ABORT_RPL_RCVD); 912 if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD))
881 else { 913 pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
882 cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_RCVD); 914 csk, csk->state, csk->flags, csk->tid);
883 cxgbi_sock_clear_flag(csk, CTPF_ABORT_RPL_PENDING); 915 cxgbi_sock_closed(csk);
884 if (cxgbi_sock_flag(csk, CTPF_ABORT_REQ_RCVD))
885 pr_err("csk 0x%p,%u,0x%lx,%u,ABT_RPL_RSS.\n",
886 csk, csk->state, csk->flags, csk->tid);
887 cxgbi_sock_closed(csk);
888 }
889 } 916 }
917
890 spin_unlock_bh(&csk->lock); 918 spin_unlock_bh(&csk->lock);
891 cxgbi_sock_put(csk); 919 cxgbi_sock_put(csk);
892} 920}
@@ -2647,12 +2675,14 @@ int cxgbi_get_host_param(struct Scsi_Host *shost, enum iscsi_host_param param,
2647 break; 2675 break;
2648 case ISCSI_HOST_PARAM_IPADDRESS: 2676 case ISCSI_HOST_PARAM_IPADDRESS:
2649 { 2677 {
2650 __be32 addr; 2678 struct cxgbi_sock *csk = find_sock_on_port(chba->cdev,
2651 2679 chba->port_id);
2652 addr = cxgbi_get_iscsi_ipv4(chba); 2680 if (csk) {
2653 len = sprintf(buf, "%pI4", &addr); 2681 len = sprintf(buf, "%pIS",
2682 (struct sockaddr *)&csk->saddr);
2683 }
2654 log_debug(1 << CXGBI_DBG_ISCSI, 2684 log_debug(1 << CXGBI_DBG_ISCSI,
2655 "hba %s, ipv4 %pI4.\n", chba->ndev->name, &addr); 2685 "hba %s, addr %s.\n", chba->ndev->name, buf);
2656 break; 2686 break;
2657 } 2687 }
2658 default: 2688 default:
diff --git a/drivers/scsi/cxgbi/libcxgbi.h b/drivers/scsi/cxgbi/libcxgbi.h
index 1d98fad6a0ab..2c7cb1c0c453 100644
--- a/drivers/scsi/cxgbi/libcxgbi.h
+++ b/drivers/scsi/cxgbi/libcxgbi.h
@@ -700,11 +700,6 @@ static inline void cxgbi_set_iscsi_ipv4(struct cxgbi_hba *chba, __be32 ipaddr)
700 chba->ndev->name); 700 chba->ndev->name);
701} 701}
702 702
703static inline __be32 cxgbi_get_iscsi_ipv4(struct cxgbi_hba *chba)
704{
705 return chba->ipv4addr;
706}
707
708struct cxgbi_device *cxgbi_device_register(unsigned int, unsigned int); 703struct cxgbi_device *cxgbi_device_register(unsigned int, unsigned int);
709void cxgbi_device_unregister(struct cxgbi_device *); 704void cxgbi_device_unregister(struct cxgbi_device *);
710void cxgbi_device_unregister_all(unsigned int flag); 705void cxgbi_device_unregister_all(unsigned int flag);
diff --git a/drivers/scsi/device_handler/scsi_dh_alua.c b/drivers/scsi/device_handler/scsi_dh_alua.c
index e99507ed0e3c..fd78bdc53528 100644
--- a/drivers/scsi/device_handler/scsi_dh_alua.c
+++ b/drivers/scsi/device_handler/scsi_dh_alua.c
@@ -474,6 +474,13 @@ static int alua_check_sense(struct scsi_device *sdev,
474 * LUN Not Ready -- Offline 474 * LUN Not Ready -- Offline
475 */ 475 */
476 return SUCCESS; 476 return SUCCESS;
477 if (sdev->allow_restart &&
478 sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x02)
479 /*
480 * if the device is not started, we need to wake
481 * the error handler to start the motor
482 */
483 return FAILED;
477 break; 484 break;
478 case UNIT_ATTENTION: 485 case UNIT_ATTENTION:
479 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) 486 if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00)
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index f6a69a3b1b3f..5640ad1c8214 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -4453,7 +4453,7 @@ static int megasas_init_fw(struct megasas_instance *instance)
4453 instance->msixentry[i].entry = i; 4453 instance->msixentry[i].entry = i;
4454 i = pci_enable_msix_range(instance->pdev, instance->msixentry, 4454 i = pci_enable_msix_range(instance->pdev, instance->msixentry,
4455 1, instance->msix_vectors); 4455 1, instance->msix_vectors);
4456 if (i) 4456 if (i > 0)
4457 instance->msix_vectors = i; 4457 instance->msix_vectors = i;
4458 else 4458 else
4459 instance->msix_vectors = 0; 4459 instance->msix_vectors = 0;
diff --git a/drivers/scsi/osd/Kbuild b/drivers/scsi/osd/Kbuild
index 5fd73d77c3af..58cecd45b0f5 100644
--- a/drivers/scsi/osd/Kbuild
+++ b/drivers/scsi/osd/Kbuild
@@ -4,7 +4,7 @@
4# Copyright (C) 2008 Panasas Inc. All rights reserved. 4# Copyright (C) 2008 Panasas Inc. All rights reserved.
5# 5#
6# Authors: 6# Authors:
7# Boaz Harrosh <bharrosh@panasas.com> 7# Boaz Harrosh <ooo@electrozaur.com>
8# Benny Halevy <bhalevy@panasas.com> 8# Benny Halevy <bhalevy@panasas.com>
9# 9#
10# This program is free software; you can redistribute it and/or modify 10# This program is free software; you can redistribute it and/or modify
diff --git a/drivers/scsi/osd/Kconfig b/drivers/scsi/osd/Kconfig
index a0703514eb0f..347cc5e33749 100644
--- a/drivers/scsi/osd/Kconfig
+++ b/drivers/scsi/osd/Kconfig
@@ -4,7 +4,7 @@
4# Copyright (C) 2008 Panasas Inc. All rights reserved. 4# Copyright (C) 2008 Panasas Inc. All rights reserved.
5# 5#
6# Authors: 6# Authors:
7# Boaz Harrosh <bharrosh@panasas.com> 7# Boaz Harrosh <ooo@electrozaur.com>
8# Benny Halevy <bhalevy@panasas.com> 8# Benny Halevy <bhalevy@panasas.com>
9# 9#
10# This program is free software; you can redistribute it and/or modify 10# This program is free software; you can redistribute it and/or modify
diff --git a/drivers/scsi/osd/osd_debug.h b/drivers/scsi/osd/osd_debug.h
index 579e491f11df..26341261bb5c 100644
--- a/drivers/scsi/osd/osd_debug.h
+++ b/drivers/scsi/osd/osd_debug.h
@@ -4,7 +4,7 @@
4 * Copyright (C) 2008 Panasas Inc. All rights reserved. 4 * Copyright (C) 2008 Panasas Inc. All rights reserved.
5 * 5 *
6 * Authors: 6 * Authors:
7 * Boaz Harrosh <bharrosh@panasas.com> 7 * Boaz Harrosh <ooo@electrozaur.com>
8 * Benny Halevy <bhalevy@panasas.com> 8 * Benny Halevy <bhalevy@panasas.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index fd19fd8468ac..488c3929f19a 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -7,7 +7,7 @@
7 * Copyright (C) 2008 Panasas Inc. All rights reserved. 7 * Copyright (C) 2008 Panasas Inc. All rights reserved.
8 * 8 *
9 * Authors: 9 * Authors:
10 * Boaz Harrosh <bharrosh@panasas.com> 10 * Boaz Harrosh <ooo@electrozaur.com>
11 * Benny Halevy <bhalevy@panasas.com> 11 * Benny Halevy <bhalevy@panasas.com>
12 * 12 *
13 * This program is free software; you can redistribute it and/or modify 13 * This program is free software; you can redistribute it and/or modify
@@ -57,7 +57,7 @@
57 57
58enum { OSD_REQ_RETRIES = 1 }; 58enum { OSD_REQ_RETRIES = 1 };
59 59
60MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>"); 60MODULE_AUTHOR("Boaz Harrosh <ooo@electrozaur.com>");
61MODULE_DESCRIPTION("open-osd initiator library libosd.ko"); 61MODULE_DESCRIPTION("open-osd initiator library libosd.ko");
62MODULE_LICENSE("GPL"); 62MODULE_LICENSE("GPL");
63 63
diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c
index e1d9a4c4c4b3..92cdd4b06526 100644
--- a/drivers/scsi/osd/osd_uld.c
+++ b/drivers/scsi/osd/osd_uld.c
@@ -10,7 +10,7 @@
10 * Copyright (C) 2008 Panasas Inc. All rights reserved. 10 * Copyright (C) 2008 Panasas Inc. All rights reserved.
11 * 11 *
12 * Authors: 12 * Authors:
13 * Boaz Harrosh <bharrosh@panasas.com> 13 * Boaz Harrosh <ooo@electrozaur.com>
14 * Benny Halevy <bhalevy@panasas.com> 14 * Benny Halevy <bhalevy@panasas.com>
15 * 15 *
16 * This program is free software; you can redistribute it and/or modify 16 * This program is free software; you can redistribute it and/or modify
@@ -74,7 +74,7 @@
74static const char osd_name[] = "osd"; 74static const char osd_name[] = "osd";
75static const char *osd_version_string = "open-osd 0.2.1"; 75static const char *osd_version_string = "open-osd 0.2.1";
76 76
77MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>"); 77MODULE_AUTHOR("Boaz Harrosh <ooo@electrozaur.com>");
78MODULE_DESCRIPTION("open-osd Upper-Layer-Driver osd.ko"); 78MODULE_DESCRIPTION("open-osd Upper-Layer-Driver osd.ko");
79MODULE_LICENSE("GPL"); 79MODULE_LICENSE("GPL");
80MODULE_ALIAS_CHARDEV_MAJOR(SCSI_OSD_MAJOR); 80MODULE_ALIAS_CHARDEV_MAJOR(SCSI_OSD_MAJOR);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 829752cfd73f..a902fa1db7af 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -112,6 +112,7 @@ static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
112 struct qla_tgt_cmd *cmd); 112 struct qla_tgt_cmd *cmd);
113static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 113static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
114 struct atio_from_isp *atio, uint16_t status, int qfull); 114 struct atio_from_isp *atio, uint16_t status, int qfull);
115static void qlt_disable_vha(struct scsi_qla_host *vha);
115/* 116/*
116 * Global Variables 117 * Global Variables
117 */ 118 */
@@ -210,7 +211,7 @@ static inline void qlt_decr_num_pend_cmds(struct scsi_qla_host *vha)
210 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags); 211 spin_unlock_irqrestore(&vha->hw->tgt.q_full_lock, flags);
211} 212}
212 213
213void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha, 214static void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
214 struct atio_from_isp *atio) 215 struct atio_from_isp *atio)
215{ 216{
216 ql_dbg(ql_dbg_tgt, vha, 0xe072, 217 ql_dbg(ql_dbg_tgt, vha, 0xe072,
@@ -433,7 +434,7 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
433#if 0 /* FIXME: Re-enable Global event handling.. */ 434#if 0 /* FIXME: Re-enable Global event handling.. */
434 /* Global event */ 435 /* Global event */
435 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count); 436 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
436 qlt_clear_tgt_db(ha->tgt.qla_tgt, 1); 437 qlt_clear_tgt_db(ha->tgt.qla_tgt);
437 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { 438 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
438 sess = list_entry(ha->tgt.qla_tgt->sess_list.next, 439 sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
439 typeof(*sess), sess_list_entry); 440 typeof(*sess), sess_list_entry);
@@ -515,7 +516,7 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
515} 516}
516 517
517/* ha->hardware_lock supposed to be held on entry */ 518/* ha->hardware_lock supposed to be held on entry */
518static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only) 519static void qlt_clear_tgt_db(struct qla_tgt *tgt)
519{ 520{
520 struct qla_tgt_sess *sess; 521 struct qla_tgt_sess *sess;
521 522
@@ -867,7 +868,7 @@ int qlt_stop_phase1(struct qla_tgt *tgt)
867 mutex_lock(&vha->vha_tgt.tgt_mutex); 868 mutex_lock(&vha->vha_tgt.tgt_mutex);
868 spin_lock_irqsave(&ha->hardware_lock, flags); 869 spin_lock_irqsave(&ha->hardware_lock, flags);
869 tgt->tgt_stop = 1; 870 tgt->tgt_stop = 1;
870 qlt_clear_tgt_db(tgt, true); 871 qlt_clear_tgt_db(tgt);
871 spin_unlock_irqrestore(&ha->hardware_lock, flags); 872 spin_unlock_irqrestore(&ha->hardware_lock, flags);
872 mutex_unlock(&vha->vha_tgt.tgt_mutex); 873 mutex_unlock(&vha->vha_tgt.tgt_mutex);
873 mutex_unlock(&qla_tgt_mutex); 874 mutex_unlock(&qla_tgt_mutex);
@@ -1462,12 +1463,13 @@ out_err:
1462 return -1; 1463 return -1;
1463} 1464}
1464 1465
1465static inline void qlt_unmap_sg(struct scsi_qla_host *vha, 1466static void qlt_unmap_sg(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
1466 struct qla_tgt_cmd *cmd)
1467{ 1467{
1468 struct qla_hw_data *ha = vha->hw; 1468 struct qla_hw_data *ha = vha->hw;
1469 1469
1470 BUG_ON(!cmd->sg_mapped); 1470 if (!cmd->sg_mapped)
1471 return;
1472
1471 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction); 1473 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
1472 cmd->sg_mapped = 0; 1474 cmd->sg_mapped = 0;
1473 1475
@@ -2428,8 +2430,7 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2428 return 0; 2430 return 0;
2429 2431
2430out_unmap_unlock: 2432out_unmap_unlock:
2431 if (cmd->sg_mapped) 2433 qlt_unmap_sg(vha, cmd);
2432 qlt_unmap_sg(vha, cmd);
2433 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2434 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2434 2435
2435 return res; 2436 return res;
@@ -2506,8 +2507,7 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2506 return res; 2507 return res;
2507 2508
2508out_unlock_free_unmap: 2509out_unlock_free_unmap:
2509 if (cmd->sg_mapped) 2510 qlt_unmap_sg(vha, cmd);
2510 qlt_unmap_sg(vha, cmd);
2511 spin_unlock_irqrestore(&ha->hardware_lock, flags); 2511 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2512 2512
2513 return res; 2513 return res;
@@ -2741,8 +2741,7 @@ done:
2741 if (!ha_locked && !in_interrupt()) 2741 if (!ha_locked && !in_interrupt())
2742 msleep(250); /* just in case */ 2742 msleep(250); /* just in case */
2743 2743
2744 if (cmd->sg_mapped) 2744 qlt_unmap_sg(vha, cmd);
2745 qlt_unmap_sg(vha, cmd);
2746 vha->hw->tgt.tgt_ops->free_cmd(cmd); 2745 vha->hw->tgt.tgt_ops->free_cmd(cmd);
2747 } 2746 }
2748 return; 2747 return;
@@ -3087,8 +3086,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3087 tfo = se_cmd->se_tfo; 3086 tfo = se_cmd->se_tfo;
3088 cmd->cmd_sent_to_fw = 0; 3087 cmd->cmd_sent_to_fw = 0;
3089 3088
3090 if (cmd->sg_mapped) 3089 qlt_unmap_sg(vha, cmd);
3091 qlt_unmap_sg(vha, cmd);
3092 3090
3093 if (unlikely(status != CTIO_SUCCESS)) { 3091 if (unlikely(status != CTIO_SUCCESS)) {
3094 switch (status & 0xFFFF) { 3092 switch (status & 0xFFFF) {
@@ -5343,7 +5341,7 @@ void qlt_lport_deregister(struct scsi_qla_host *vha)
5343EXPORT_SYMBOL(qlt_lport_deregister); 5341EXPORT_SYMBOL(qlt_lport_deregister);
5344 5342
5345/* Must be called under HW lock */ 5343/* Must be called under HW lock */
5346void qlt_set_mode(struct scsi_qla_host *vha) 5344static void qlt_set_mode(struct scsi_qla_host *vha)
5347{ 5345{
5348 struct qla_hw_data *ha = vha->hw; 5346 struct qla_hw_data *ha = vha->hw;
5349 5347
@@ -5364,7 +5362,7 @@ void qlt_set_mode(struct scsi_qla_host *vha)
5364} 5362}
5365 5363
5366/* Must be called under HW lock */ 5364/* Must be called under HW lock */
5367void qlt_clear_mode(struct scsi_qla_host *vha) 5365static void qlt_clear_mode(struct scsi_qla_host *vha)
5368{ 5366{
5369 struct qla_hw_data *ha = vha->hw; 5367 struct qla_hw_data *ha = vha->hw;
5370 5368
@@ -5428,8 +5426,7 @@ EXPORT_SYMBOL(qlt_enable_vha);
5428 * 5426 *
5429 * Disable Target Mode and reset the adapter 5427 * Disable Target Mode and reset the adapter
5430 */ 5428 */
5431void 5429static void qlt_disable_vha(struct scsi_qla_host *vha)
5432qlt_disable_vha(struct scsi_qla_host *vha)
5433{ 5430{
5434 struct qla_hw_data *ha = vha->hw; 5431 struct qla_hw_data *ha = vha->hw;
5435 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 5432 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 8ff330f7d6f5..332086776dfe 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -1001,11 +1001,11 @@ struct qla_tgt_prm {
1001 struct qla_tgt *tgt; 1001 struct qla_tgt *tgt;
1002 void *pkt; 1002 void *pkt;
1003 struct scatterlist *sg; /* cmd data buffer SG vector */ 1003 struct scatterlist *sg; /* cmd data buffer SG vector */
1004 unsigned char *sense_buffer;
1004 int seg_cnt; 1005 int seg_cnt;
1005 int req_cnt; 1006 int req_cnt;
1006 uint16_t rq_result; 1007 uint16_t rq_result;
1007 uint16_t scsi_status; 1008 uint16_t scsi_status;
1008 unsigned char *sense_buffer;
1009 int sense_buffer_len; 1009 int sense_buffer_len;
1010 int residual; 1010 int residual;
1011 int add_status_pkt; 1011 int add_status_pkt;
@@ -1033,10 +1033,6 @@ struct qla_tgt_srr_ctio {
1033 1033
1034 1034
1035extern struct qla_tgt_data qla_target; 1035extern struct qla_tgt_data qla_target;
1036/*
1037 * Internal function prototypes
1038 */
1039void qlt_disable_vha(struct scsi_qla_host *);
1040 1036
1041/* 1037/*
1042 * Function prototypes for qla_target.c logic used by qla2xxx LLD code. 1038 * Function prototypes for qla_target.c logic used by qla2xxx LLD code.
@@ -1049,8 +1045,6 @@ extern void qlt_lport_deregister(struct scsi_qla_host *);
1049extern void qlt_unreg_sess(struct qla_tgt_sess *); 1045extern void qlt_unreg_sess(struct qla_tgt_sess *);
1050extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); 1046extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
1051extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *); 1047extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *);
1052extern void qlt_set_mode(struct scsi_qla_host *ha);
1053extern void qlt_clear_mode(struct scsi_qla_host *ha);
1054extern int __init qlt_init(void); 1048extern int __init qlt_init(void);
1055extern void qlt_exit(void); 1049extern void qlt_exit(void);
1056extern void qlt_update_vp_map(struct scsi_qla_host *, int); 1050extern void qlt_update_vp_map(struct scsi_qla_host *, int);
@@ -1083,13 +1077,9 @@ static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
1083/* 1077/*
1084 * Exported symbols from qla_target.c LLD logic used by qla2xxx code.. 1078 * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
1085 */ 1079 */
1086extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *,
1087 struct atio_from_isp *);
1088extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); 1080extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
1089extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); 1081extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
1090extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); 1082extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
1091extern int qlt_rdy_to_xfer_dif(struct qla_tgt_cmd *);
1092extern int qlt_xmit_response_dif(struct qla_tgt_cmd *, int, uint8_t);
1093extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); 1083extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
1094extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); 1084extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
1095extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); 1085extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index 031b2961c6b7..73f9feecda72 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -786,7 +786,16 @@ static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
786 pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id); 786 pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
787 787
788 node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id); 788 node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
789 WARN_ON(node && (node != se_nacl)); 789 if (WARN_ON(node && (node != se_nacl))) {
790 /*
791 * The nacl no longer matches what we think it should be.
792 * Most likely a new dynamic acl has been added while
793 * someone dropped the hardware lock. It clearly is a
794 * bug elsewhere, but this bit can't make things worse.
795 */
796 btree_insert32(&lport->lport_fcport_map, nacl->nport_id,
797 node, GFP_ATOMIC);
798 }
790 799
791 pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n", 800 pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
792 se_nacl, nacl->nport_wwnn, nacl->nport_id); 801 se_nacl, nacl->nport_wwnn, nacl->nport_id);
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 49014a143c6a..c1d04d4d3c6c 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -202,6 +202,7 @@ static struct {
202 {"IOMEGA", "Io20S *F", NULL, BLIST_KEY}, 202 {"IOMEGA", "Io20S *F", NULL, BLIST_KEY},
203 {"INSITE", "Floptical F*8I", NULL, BLIST_KEY}, 203 {"INSITE", "Floptical F*8I", NULL, BLIST_KEY},
204 {"INSITE", "I325VM", NULL, BLIST_KEY}, 204 {"INSITE", "I325VM", NULL, BLIST_KEY},
205 {"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
205 {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36}, 206 {"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
206 {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN}, 207 {"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
207 {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN}, 208 {"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 9a6f8468225f..bc5ff6ff9c79 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -459,14 +459,6 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
459 if (! scsi_command_normalize_sense(scmd, &sshdr)) 459 if (! scsi_command_normalize_sense(scmd, &sshdr))
460 return FAILED; /* no valid sense data */ 460 return FAILED; /* no valid sense data */
461 461
462 if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
463 /*
464 * nasty: for mid-layer issued TURs, we need to return the
465 * actual sense data without any recovery attempt. For eh
466 * issued ones, we need to try to recover and interpret
467 */
468 return SUCCESS;
469
470 scsi_report_sense(sdev, &sshdr); 462 scsi_report_sense(sdev, &sshdr);
471 463
472 if (scsi_sense_is_deferred(&sshdr)) 464 if (scsi_sense_is_deferred(&sshdr))
@@ -482,6 +474,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
482 /* handler does not care. Drop down to default handling */ 474 /* handler does not care. Drop down to default handling */
483 } 475 }
484 476
477 if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
478 /*
479 * nasty: for mid-layer issued TURs, we need to return the
480 * actual sense data without any recovery attempt. For eh
481 * issued ones, we need to try to recover and interpret
482 */
483 return SUCCESS;
484
485 /* 485 /*
486 * Previous logic looked for FILEMARK, EOM or ILI which are 486 * Previous logic looked for FILEMARK, EOM or ILI which are
487 * mainly associated with tapes and returned SUCCESS. 487 * mainly associated with tapes and returned SUCCESS.
@@ -2001,8 +2001,10 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
2001 * is no point trying to lock the door of an off-line device. 2001 * is no point trying to lock the door of an off-line device.
2002 */ 2002 */
2003 shost_for_each_device(sdev, shost) { 2003 shost_for_each_device(sdev, shost) {
2004 if (scsi_device_online(sdev) && sdev->locked) 2004 if (scsi_device_online(sdev) && sdev->was_reset && sdev->locked) {
2005 scsi_eh_lock_door(sdev); 2005 scsi_eh_lock_door(sdev);
2006 sdev->was_reset = 0;
2007 }
2006 } 2008 }
2007 2009
2008 /* 2010 /*
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 9eff8a375132..50a6e1ac8d9c 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1893,6 +1893,11 @@ static int scsi_queue_rq(struct blk_mq_hw_ctx *hctx, struct request *req,
1893 blk_mq_start_request(req); 1893 blk_mq_start_request(req);
1894 } 1894 }
1895 1895
1896 if (blk_queue_tagged(q))
1897 req->cmd_flags |= REQ_QUEUED;
1898 else
1899 req->cmd_flags &= ~REQ_QUEUED;
1900
1896 scsi_init_cmd_errh(cmd); 1901 scsi_init_cmd_errh(cmd);
1897 cmd->scsi_done = scsi_mq_done; 1902 cmd->scsi_done = scsi_mq_done;
1898 1903
diff --git a/drivers/scsi/ufs/ufshcd-pltfrm.c b/drivers/scsi/ufs/ufshcd-pltfrm.c
index 8adf067ff019..1c3467b82566 100644
--- a/drivers/scsi/ufs/ufshcd-pltfrm.c
+++ b/drivers/scsi/ufs/ufshcd-pltfrm.c
@@ -102,7 +102,6 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
102 clkfreq = devm_kzalloc(dev, sz * sizeof(*clkfreq), 102 clkfreq = devm_kzalloc(dev, sz * sizeof(*clkfreq),
103 GFP_KERNEL); 103 GFP_KERNEL);
104 if (!clkfreq) { 104 if (!clkfreq) {
105 dev_err(dev, "%s: no memory\n", "freq-table-hz");
106 ret = -ENOMEM; 105 ret = -ENOMEM;
107 goto out; 106 goto out;
108 } 107 }
@@ -112,19 +111,19 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
112 if (ret && (ret != -EINVAL)) { 111 if (ret && (ret != -EINVAL)) {
113 dev_err(dev, "%s: error reading array %d\n", 112 dev_err(dev, "%s: error reading array %d\n",
114 "freq-table-hz", ret); 113 "freq-table-hz", ret);
115 goto free_clkfreq; 114 return ret;
116 } 115 }
117 116
118 for (i = 0; i < sz; i += 2) { 117 for (i = 0; i < sz; i += 2) {
119 ret = of_property_read_string_index(np, 118 ret = of_property_read_string_index(np,
120 "clock-names", i/2, (const char **)&name); 119 "clock-names", i/2, (const char **)&name);
121 if (ret) 120 if (ret)
122 goto free_clkfreq; 121 goto out;
123 122
124 clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL); 123 clki = devm_kzalloc(dev, sizeof(*clki), GFP_KERNEL);
125 if (!clki) { 124 if (!clki) {
126 ret = -ENOMEM; 125 ret = -ENOMEM;
127 goto free_clkfreq; 126 goto out;
128 } 127 }
129 128
130 clki->min_freq = clkfreq[i]; 129 clki->min_freq = clkfreq[i];
@@ -134,8 +133,6 @@ static int ufshcd_parse_clock_info(struct ufs_hba *hba)
134 clki->min_freq, clki->max_freq, clki->name); 133 clki->min_freq, clki->max_freq, clki->name);
135 list_add_tail(&clki->list, &hba->clk_list_head); 134 list_add_tail(&clki->list, &hba->clk_list_head);
136 } 135 }
137free_clkfreq:
138 kfree(clkfreq);
139out: 136out:
140 return ret; 137 return ret;
141} 138}
@@ -162,10 +159,8 @@ static int ufshcd_populate_vreg(struct device *dev, const char *name,
162 } 159 }
163 160
164 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL); 161 vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
165 if (!vreg) { 162 if (!vreg)
166 dev_err(dev, "No memory for %s regulator\n", name); 163 return -ENOMEM;
167 goto out;
168 }
169 164
170 vreg->name = kstrdup(name, GFP_KERNEL); 165 vreg->name = kstrdup(name, GFP_KERNEL);
171 166
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 497c38a4a866..605ca60e8a10 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -744,6 +744,8 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
744 if (!ufshcd_is_clkgating_allowed(hba)) 744 if (!ufshcd_is_clkgating_allowed(hba))
745 return; 745 return;
746 device_remove_file(hba->dev, &hba->clk_gating.delay_attr); 746 device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
747 cancel_work_sync(&hba->clk_gating.ungate_work);
748 cancel_delayed_work_sync(&hba->clk_gating.gate_work);
747} 749}
748 750
749/* Must be called with host lock acquired */ 751/* Must be called with host lock acquired */
@@ -2246,6 +2248,22 @@ static int ufshcd_uic_hibern8_exit(struct ufs_hba *hba)
2246 return ret; 2248 return ret;
2247} 2249}
2248 2250
2251 /**
2252 * ufshcd_init_pwr_info - setting the POR (power on reset)
2253 * values in hba power info
2254 * @hba: per-adapter instance
2255 */
2256static void ufshcd_init_pwr_info(struct ufs_hba *hba)
2257{
2258 hba->pwr_info.gear_rx = UFS_PWM_G1;
2259 hba->pwr_info.gear_tx = UFS_PWM_G1;
2260 hba->pwr_info.lane_rx = 1;
2261 hba->pwr_info.lane_tx = 1;
2262 hba->pwr_info.pwr_rx = SLOWAUTO_MODE;
2263 hba->pwr_info.pwr_tx = SLOWAUTO_MODE;
2264 hba->pwr_info.hs_rate = 0;
2265}
2266
2249/** 2267/**
2250 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device 2268 * ufshcd_get_max_pwr_mode - reads the max power mode negotiated with device
2251 * @hba: per-adapter instance 2269 * @hba: per-adapter instance
@@ -2844,8 +2862,13 @@ static void ufshcd_slave_destroy(struct scsi_device *sdev)
2844 hba = shost_priv(sdev->host); 2862 hba = shost_priv(sdev->host);
2845 scsi_deactivate_tcq(sdev, hba->nutrs); 2863 scsi_deactivate_tcq(sdev, hba->nutrs);
2846 /* Drop the reference as it won't be needed anymore */ 2864 /* Drop the reference as it won't be needed anymore */
2847 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) 2865 if (ufshcd_scsi_to_upiu_lun(sdev->lun) == UFS_UPIU_UFS_DEVICE_WLUN) {
2866 unsigned long flags;
2867
2868 spin_lock_irqsave(hba->host->host_lock, flags);
2848 hba->sdev_ufs_device = NULL; 2869 hba->sdev_ufs_device = NULL;
2870 spin_unlock_irqrestore(hba->host->host_lock, flags);
2871 }
2849} 2872}
2850 2873
2851/** 2874/**
@@ -4062,6 +4085,8 @@ static void ufshcd_init_icc_levels(struct ufs_hba *hba)
4062static int ufshcd_scsi_add_wlus(struct ufs_hba *hba) 4085static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
4063{ 4086{
4064 int ret = 0; 4087 int ret = 0;
4088 struct scsi_device *sdev_rpmb;
4089 struct scsi_device *sdev_boot;
4065 4090
4066 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0, 4091 hba->sdev_ufs_device = __scsi_add_device(hba->host, 0, 0,
4067 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL); 4092 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_UFS_DEVICE_WLUN), NULL);
@@ -4070,26 +4095,27 @@ static int ufshcd_scsi_add_wlus(struct ufs_hba *hba)
4070 hba->sdev_ufs_device = NULL; 4095 hba->sdev_ufs_device = NULL;
4071 goto out; 4096 goto out;
4072 } 4097 }
4098 scsi_device_put(hba->sdev_ufs_device);
4073 4099
4074 hba->sdev_boot = __scsi_add_device(hba->host, 0, 0, 4100 sdev_boot = __scsi_add_device(hba->host, 0, 0,
4075 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL); 4101 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_BOOT_WLUN), NULL);
4076 if (IS_ERR(hba->sdev_boot)) { 4102 if (IS_ERR(sdev_boot)) {
4077 ret = PTR_ERR(hba->sdev_boot); 4103 ret = PTR_ERR(sdev_boot);
4078 hba->sdev_boot = NULL;
4079 goto remove_sdev_ufs_device; 4104 goto remove_sdev_ufs_device;
4080 } 4105 }
4106 scsi_device_put(sdev_boot);
4081 4107
4082 hba->sdev_rpmb = __scsi_add_device(hba->host, 0, 0, 4108 sdev_rpmb = __scsi_add_device(hba->host, 0, 0,
4083 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL); 4109 ufshcd_upiu_wlun_to_scsi_wlun(UFS_UPIU_RPMB_WLUN), NULL);
4084 if (IS_ERR(hba->sdev_rpmb)) { 4110 if (IS_ERR(sdev_rpmb)) {
4085 ret = PTR_ERR(hba->sdev_rpmb); 4111 ret = PTR_ERR(sdev_rpmb);
4086 hba->sdev_rpmb = NULL;
4087 goto remove_sdev_boot; 4112 goto remove_sdev_boot;
4088 } 4113 }
4114 scsi_device_put(sdev_rpmb);
4089 goto out; 4115 goto out;
4090 4116
4091remove_sdev_boot: 4117remove_sdev_boot:
4092 scsi_remove_device(hba->sdev_boot); 4118 scsi_remove_device(sdev_boot);
4093remove_sdev_ufs_device: 4119remove_sdev_ufs_device:
4094 scsi_remove_device(hba->sdev_ufs_device); 4120 scsi_remove_device(hba->sdev_ufs_device);
4095out: 4121out:
@@ -4097,30 +4123,6 @@ out:
4097} 4123}
4098 4124
4099/** 4125/**
4100 * ufshcd_scsi_remove_wlus - Removes the W-LUs which were added by
4101 * ufshcd_scsi_add_wlus()
4102 * @hba: per-adapter instance
4103 *
4104 */
4105static void ufshcd_scsi_remove_wlus(struct ufs_hba *hba)
4106{
4107 if (hba->sdev_ufs_device) {
4108 scsi_remove_device(hba->sdev_ufs_device);
4109 hba->sdev_ufs_device = NULL;
4110 }
4111
4112 if (hba->sdev_boot) {
4113 scsi_remove_device(hba->sdev_boot);
4114 hba->sdev_boot = NULL;
4115 }
4116
4117 if (hba->sdev_rpmb) {
4118 scsi_remove_device(hba->sdev_rpmb);
4119 hba->sdev_rpmb = NULL;
4120 }
4121}
4122
4123/**
4124 * ufshcd_probe_hba - probe hba to detect device and initialize 4126 * ufshcd_probe_hba - probe hba to detect device and initialize
4125 * @hba: per-adapter instance 4127 * @hba: per-adapter instance
4126 * 4128 *
@@ -4134,6 +4136,8 @@ static int ufshcd_probe_hba(struct ufs_hba *hba)
4134 if (ret) 4136 if (ret)
4135 goto out; 4137 goto out;
4136 4138
4139 ufshcd_init_pwr_info(hba);
4140
4137 /* UniPro link is active now */ 4141 /* UniPro link is active now */
4138 ufshcd_set_link_active(hba); 4142 ufshcd_set_link_active(hba);
4139 4143
@@ -4264,12 +4268,18 @@ static int ufshcd_config_vreg_load(struct device *dev, struct ufs_vreg *vreg,
4264static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba, 4268static inline int ufshcd_config_vreg_lpm(struct ufs_hba *hba,
4265 struct ufs_vreg *vreg) 4269 struct ufs_vreg *vreg)
4266{ 4270{
4271 if (!vreg)
4272 return 0;
4273
4267 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA); 4274 return ufshcd_config_vreg_load(hba->dev, vreg, UFS_VREG_LPM_LOAD_UA);
4268} 4275}
4269 4276
4270static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba, 4277static inline int ufshcd_config_vreg_hpm(struct ufs_hba *hba,
4271 struct ufs_vreg *vreg) 4278 struct ufs_vreg *vreg)
4272{ 4279{
4280 if (!vreg)
4281 return 0;
4282
4273 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA); 4283 return ufshcd_config_vreg_load(hba->dev, vreg, vreg->max_uA);
4274} 4284}
4275 4285
@@ -4471,7 +4481,7 @@ out:
4471 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled) 4481 if (!IS_ERR_OR_NULL(clki->clk) && clki->enabled)
4472 clk_disable_unprepare(clki->clk); 4482 clk_disable_unprepare(clki->clk);
4473 } 4483 }
4474 } else if (!ret && on) { 4484 } else if (on) {
4475 spin_lock_irqsave(hba->host->host_lock, flags); 4485 spin_lock_irqsave(hba->host->host_lock, flags);
4476 hba->clk_gating.state = CLKS_ON; 4486 hba->clk_gating.state = CLKS_ON;
4477 spin_unlock_irqrestore(hba->host->host_lock, flags); 4487 spin_unlock_irqrestore(hba->host->host_lock, flags);
@@ -4675,11 +4685,25 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
4675{ 4685{
4676 unsigned char cmd[6] = { START_STOP }; 4686 unsigned char cmd[6] = { START_STOP };
4677 struct scsi_sense_hdr sshdr; 4687 struct scsi_sense_hdr sshdr;
4678 struct scsi_device *sdp = hba->sdev_ufs_device; 4688 struct scsi_device *sdp;
4689 unsigned long flags;
4679 int ret; 4690 int ret;
4680 4691
4681 if (!sdp || !scsi_device_online(sdp)) 4692 spin_lock_irqsave(hba->host->host_lock, flags);
4682 return -ENODEV; 4693 sdp = hba->sdev_ufs_device;
4694 if (sdp) {
4695 ret = scsi_device_get(sdp);
4696 if (!ret && !scsi_device_online(sdp)) {
4697 ret = -ENODEV;
4698 scsi_device_put(sdp);
4699 }
4700 } else {
4701 ret = -ENODEV;
4702 }
4703 spin_unlock_irqrestore(hba->host->host_lock, flags);
4704
4705 if (ret)
4706 return ret;
4683 4707
4684 /* 4708 /*
4685 * If scsi commands fail, the scsi mid-layer schedules scsi error- 4709 * If scsi commands fail, the scsi mid-layer schedules scsi error-
@@ -4718,6 +4742,7 @@ static int ufshcd_set_dev_pwr_mode(struct ufs_hba *hba,
4718 if (!ret) 4742 if (!ret)
4719 hba->curr_dev_pwr_mode = pwr_mode; 4743 hba->curr_dev_pwr_mode = pwr_mode;
4720out: 4744out:
4745 scsi_device_put(sdp);
4721 hba->host->eh_noresume = 0; 4746 hba->host->eh_noresume = 0;
4722 return ret; 4747 return ret;
4723} 4748}
@@ -5087,7 +5112,7 @@ int ufshcd_system_suspend(struct ufs_hba *hba)
5087 int ret = 0; 5112 int ret = 0;
5088 5113
5089 if (!hba || !hba->is_powered) 5114 if (!hba || !hba->is_powered)
5090 goto out; 5115 return 0;
5091 5116
5092 if (pm_runtime_suspended(hba->dev)) { 5117 if (pm_runtime_suspended(hba->dev)) {
5093 if (hba->rpm_lvl == hba->spm_lvl) 5118 if (hba->rpm_lvl == hba->spm_lvl)
@@ -5231,7 +5256,6 @@ EXPORT_SYMBOL(ufshcd_shutdown);
5231void ufshcd_remove(struct ufs_hba *hba) 5256void ufshcd_remove(struct ufs_hba *hba)
5232{ 5257{
5233 scsi_remove_host(hba->host); 5258 scsi_remove_host(hba->host);
5234 ufshcd_scsi_remove_wlus(hba);
5235 /* disable interrupts */ 5259 /* disable interrupts */
5236 ufshcd_disable_intr(hba, hba->intr_mask); 5260 ufshcd_disable_intr(hba, hba->intr_mask);
5237 ufshcd_hba_stop(hba); 5261 ufshcd_hba_stop(hba);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index 58ecdff5065c..4a574aa45855 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -392,8 +392,6 @@ struct ufs_hba {
392 * "UFS device" W-LU. 392 * "UFS device" W-LU.
393 */ 393 */
394 struct scsi_device *sdev_ufs_device; 394 struct scsi_device *sdev_ufs_device;
395 struct scsi_device *sdev_rpmb;
396 struct scsi_device *sdev_boot;
397 395
398 enum ufs_dev_pwr_mode curr_dev_pwr_mode; 396 enum ufs_dev_pwr_mode curr_dev_pwr_mode;
399 enum uic_link_state uic_link_state; 397 enum uic_link_state uic_link_state;
diff --git a/drivers/soc/versatile/soc-realview.c b/drivers/soc/versatile/soc-realview.c
index cea8ea3491d2..1a07bf540fec 100644
--- a/drivers/soc/versatile/soc-realview.c
+++ b/drivers/soc/versatile/soc-realview.c
@@ -26,6 +26,7 @@ static const struct of_device_id realview_soc_of_match[] = {
26 { .compatible = "arm,realview-pb11mp-soc", }, 26 { .compatible = "arm,realview-pb11mp-soc", },
27 { .compatible = "arm,realview-pba8-soc", }, 27 { .compatible = "arm,realview-pba8-soc", },
28 { .compatible = "arm,realview-pbx-soc", }, 28 { .compatible = "arm,realview-pbx-soc", },
29 { }
29}; 30};
30 31
31static u32 realview_coreid; 32static u32 realview_coreid;
diff --git a/drivers/spi/spi-dw.c b/drivers/spi/spi-dw.c
index 729215885250..d0d5542efc06 100644
--- a/drivers/spi/spi-dw.c
+++ b/drivers/spi/spi-dw.c
@@ -376,9 +376,6 @@ static void pump_transfers(unsigned long data)
376 chip = dws->cur_chip; 376 chip = dws->cur_chip;
377 spi = message->spi; 377 spi = message->spi;
378 378
379 if (unlikely(!chip->clk_div))
380 chip->clk_div = dws->max_freq / chip->speed_hz;
381
382 if (message->state == ERROR_STATE) { 379 if (message->state == ERROR_STATE) {
383 message->status = -EIO; 380 message->status = -EIO;
384 goto early_exit; 381 goto early_exit;
@@ -419,7 +416,7 @@ static void pump_transfers(unsigned long data)
419 if (transfer->speed_hz) { 416 if (transfer->speed_hz) {
420 speed = chip->speed_hz; 417 speed = chip->speed_hz;
421 418
422 if (transfer->speed_hz != speed) { 419 if ((transfer->speed_hz != speed) || (!chip->clk_div)) {
423 speed = transfer->speed_hz; 420 speed = transfer->speed_hz;
424 421
425 /* clk_div doesn't support odd number */ 422 /* clk_div doesn't support odd number */
@@ -581,7 +578,6 @@ static int dw_spi_setup(struct spi_device *spi)
581 dev_err(&spi->dev, "No max speed HZ parameter\n"); 578 dev_err(&spi->dev, "No max speed HZ parameter\n");
582 return -EINVAL; 579 return -EINVAL;
583 } 580 }
584 chip->speed_hz = spi->max_speed_hz;
585 581
586 chip->tmode = 0; /* Tx & Rx */ 582 chip->tmode = 0; /* Tx & Rx */
587 /* Default SPI mode is SCPOL = 0, SCPH = 0 */ 583 /* Default SPI mode is SCPOL = 0, SCPH = 0 */
@@ -669,6 +665,7 @@ int dw_spi_add_host(struct device *dev, struct dw_spi *dws)
669 master->cleanup = dw_spi_cleanup; 665 master->cleanup = dw_spi_cleanup;
670 master->transfer_one_message = dw_spi_transfer_one_message; 666 master->transfer_one_message = dw_spi_transfer_one_message;
671 master->max_speed_hz = dws->max_freq; 667 master->max_speed_hz = dws->max_freq;
668 master->dev.of_node = dev->of_node;
672 669
673 /* Basic HW init */ 670 /* Basic HW init */
674 spi_hw_init(dws); 671 spi_hw_init(dws);
diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c
index 448216025ce8..831ceb4a91f6 100644
--- a/drivers/spi/spi-fsl-dspi.c
+++ b/drivers/spi/spi-fsl-dspi.c
@@ -46,7 +46,7 @@
46 46
47#define SPI_TCR 0x08 47#define SPI_TCR 0x08
48 48
49#define SPI_CTAR(x) (0x0c + (x * 4)) 49#define SPI_CTAR(x) (0x0c + (((x) & 0x3) * 4))
50#define SPI_CTAR_FMSZ(x) (((x) & 0x0000000f) << 27) 50#define SPI_CTAR_FMSZ(x) (((x) & 0x0000000f) << 27)
51#define SPI_CTAR_CPOL(x) ((x) << 26) 51#define SPI_CTAR_CPOL(x) ((x) << 26)
52#define SPI_CTAR_CPHA(x) ((x) << 25) 52#define SPI_CTAR_CPHA(x) ((x) << 25)
@@ -70,7 +70,7 @@
70 70
71#define SPI_PUSHR 0x34 71#define SPI_PUSHR 0x34
72#define SPI_PUSHR_CONT (1 << 31) 72#define SPI_PUSHR_CONT (1 << 31)
73#define SPI_PUSHR_CTAS(x) (((x) & 0x00000007) << 28) 73#define SPI_PUSHR_CTAS(x) (((x) & 0x00000003) << 28)
74#define SPI_PUSHR_EOQ (1 << 27) 74#define SPI_PUSHR_EOQ (1 << 27)
75#define SPI_PUSHR_CTCNT (1 << 26) 75#define SPI_PUSHR_CTCNT (1 << 26)
76#define SPI_PUSHR_PCS(x) (((1 << x) & 0x0000003f) << 16) 76#define SPI_PUSHR_PCS(x) (((1 << x) & 0x0000003f) << 16)
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index 835cdda6f4f5..c76b7d7879df 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -454,7 +454,7 @@ static int orion_spi_probe(struct platform_device *pdev)
454 spi->master = master; 454 spi->master = master;
455 455
456 of_id = of_match_device(orion_spi_of_match_table, &pdev->dev); 456 of_id = of_match_device(orion_spi_of_match_table, &pdev->dev);
457 devdata = of_id->data; 457 devdata = (of_id) ? of_id->data : &orion_spi_dev_data;
458 spi->devdata = devdata; 458 spi->devdata = devdata;
459 459
460 spi->clk = devm_clk_get(&pdev->dev, NULL); 460 spi->clk = devm_clk_get(&pdev->dev, NULL);
diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
index f35f723816ea..fc2dd8441608 100644
--- a/drivers/spi/spi-pl022.c
+++ b/drivers/spi/spi-pl022.c
@@ -1106,7 +1106,7 @@ err_rxdesc:
1106 pl022->sgt_tx.nents, DMA_TO_DEVICE); 1106 pl022->sgt_tx.nents, DMA_TO_DEVICE);
1107err_tx_sgmap: 1107err_tx_sgmap:
1108 dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl, 1108 dma_unmap_sg(rxchan->device->dev, pl022->sgt_rx.sgl,
1109 pl022->sgt_tx.nents, DMA_FROM_DEVICE); 1109 pl022->sgt_rx.nents, DMA_FROM_DEVICE);
1110err_rx_sgmap: 1110err_rx_sgmap:
1111 sg_free_table(&pl022->sgt_tx); 1111 sg_free_table(&pl022->sgt_tx);
1112err_alloc_tx_sg: 1112err_alloc_tx_sg:
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index d8a105f76837..9e9e0f971e6c 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -1274,7 +1274,9 @@ static int pxa2xx_spi_suspend(struct device *dev)
1274 if (status != 0) 1274 if (status != 0)
1275 return status; 1275 return status;
1276 write_SSCR0(0, drv_data->ioaddr); 1276 write_SSCR0(0, drv_data->ioaddr);
1277 clk_disable_unprepare(ssp->clk); 1277
1278 if (!pm_runtime_suspended(dev))
1279 clk_disable_unprepare(ssp->clk);
1278 1280
1279 return 0; 1281 return 0;
1280} 1282}
@@ -1288,7 +1290,8 @@ static int pxa2xx_spi_resume(struct device *dev)
1288 pxa2xx_spi_dma_resume(drv_data); 1290 pxa2xx_spi_dma_resume(drv_data);
1289 1291
1290 /* Enable the SSP clock */ 1292 /* Enable the SSP clock */
1291 clk_prepare_enable(ssp->clk); 1293 if (!pm_runtime_suspended(dev))
1294 clk_prepare_enable(ssp->clk);
1292 1295
1293 /* Restore LPSS private register bits */ 1296 /* Restore LPSS private register bits */
1294 lpss_ssp_setup(drv_data); 1297 lpss_ssp_setup(drv_data);
diff --git a/drivers/spi/spi-rockchip.c b/drivers/spi/spi-rockchip.c
index f96ea8a38d64..87bc16f491f0 100644
--- a/drivers/spi/spi-rockchip.c
+++ b/drivers/spi/spi-rockchip.c
@@ -145,6 +145,9 @@
145#define RXBUSY (1 << 0) 145#define RXBUSY (1 << 0)
146#define TXBUSY (1 << 1) 146#define TXBUSY (1 << 1)
147 147
148/* sclk_out: spi master internal logic in rk3x can support 50Mhz */
149#define MAX_SCLK_OUT 50000000
150
148enum rockchip_ssi_type { 151enum rockchip_ssi_type {
149 SSI_MOTO_SPI = 0, 152 SSI_MOTO_SPI = 0,
150 SSI_TI_SSP, 153 SSI_TI_SSP,
@@ -325,6 +328,8 @@ static int rockchip_spi_unprepare_message(struct spi_master *master,
325 328
326 spin_unlock_irqrestore(&rs->lock, flags); 329 spin_unlock_irqrestore(&rs->lock, flags);
327 330
331 spi_enable_chip(rs, 0);
332
328 return 0; 333 return 0;
329} 334}
330 335
@@ -381,6 +386,8 @@ static int rockchip_spi_pio_transfer(struct rockchip_spi *rs)
381 if (rs->tx) 386 if (rs->tx)
382 wait_for_idle(rs); 387 wait_for_idle(rs);
383 388
389 spi_enable_chip(rs, 0);
390
384 return 0; 391 return 0;
385} 392}
386 393
@@ -392,8 +399,10 @@ static void rockchip_spi_dma_rxcb(void *data)
392 spin_lock_irqsave(&rs->lock, flags); 399 spin_lock_irqsave(&rs->lock, flags);
393 400
394 rs->state &= ~RXBUSY; 401 rs->state &= ~RXBUSY;
395 if (!(rs->state & TXBUSY)) 402 if (!(rs->state & TXBUSY)) {
403 spi_enable_chip(rs, 0);
396 spi_finalize_current_transfer(rs->master); 404 spi_finalize_current_transfer(rs->master);
405 }
397 406
398 spin_unlock_irqrestore(&rs->lock, flags); 407 spin_unlock_irqrestore(&rs->lock, flags);
399} 408}
@@ -409,8 +418,10 @@ static void rockchip_spi_dma_txcb(void *data)
409 spin_lock_irqsave(&rs->lock, flags); 418 spin_lock_irqsave(&rs->lock, flags);
410 419
411 rs->state &= ~TXBUSY; 420 rs->state &= ~TXBUSY;
412 if (!(rs->state & RXBUSY)) 421 if (!(rs->state & RXBUSY)) {
422 spi_enable_chip(rs, 0);
413 spi_finalize_current_transfer(rs->master); 423 spi_finalize_current_transfer(rs->master);
424 }
414 425
415 spin_unlock_irqrestore(&rs->lock, flags); 426 spin_unlock_irqrestore(&rs->lock, flags);
416} 427}
@@ -496,12 +507,19 @@ static void rockchip_spi_config(struct rockchip_spi *rs)
496 dmacr |= RF_DMA_EN; 507 dmacr |= RF_DMA_EN;
497 } 508 }
498 509
510 if (WARN_ON(rs->speed > MAX_SCLK_OUT))
511 rs->speed = MAX_SCLK_OUT;
512
513 /* the minimum divsor is 2 */
514 if (rs->max_freq < 2 * rs->speed) {
515 clk_set_rate(rs->spiclk, 2 * rs->speed);
516 rs->max_freq = clk_get_rate(rs->spiclk);
517 }
518
499 /* div doesn't support odd number */ 519 /* div doesn't support odd number */
500 div = max_t(u32, rs->max_freq / rs->speed, 1); 520 div = max_t(u32, rs->max_freq / rs->speed, 1);
501 div = (div + 1) & 0xfffe; 521 div = (div + 1) & 0xfffe;
502 522
503 spi_enable_chip(rs, 0);
504
505 writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0); 523 writel_relaxed(cr0, rs->regs + ROCKCHIP_SPI_CTRLR0);
506 524
507 writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1); 525 writel_relaxed(rs->len - 1, rs->regs + ROCKCHIP_SPI_CTRLR1);
@@ -515,8 +533,6 @@ static void rockchip_spi_config(struct rockchip_spi *rs)
515 spi_set_clk(rs, div); 533 spi_set_clk(rs, div);
516 534
517 dev_dbg(rs->dev, "cr0 0x%x, div %d\n", cr0, div); 535 dev_dbg(rs->dev, "cr0 0x%x, div %d\n", cr0, div);
518
519 spi_enable_chip(rs, 1);
520} 536}
521 537
522static int rockchip_spi_transfer_one( 538static int rockchip_spi_transfer_one(
@@ -524,7 +540,7 @@ static int rockchip_spi_transfer_one(
524 struct spi_device *spi, 540 struct spi_device *spi,
525 struct spi_transfer *xfer) 541 struct spi_transfer *xfer)
526{ 542{
527 int ret = 0; 543 int ret = 1;
528 struct rockchip_spi *rs = spi_master_get_devdata(master); 544 struct rockchip_spi *rs = spi_master_get_devdata(master);
529 545
530 WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) && 546 WARN_ON(readl_relaxed(rs->regs + ROCKCHIP_SPI_SSIENR) &&
@@ -556,17 +572,27 @@ static int rockchip_spi_transfer_one(
556 rs->tmode = CR0_XFM_RO; 572 rs->tmode = CR0_XFM_RO;
557 573
558 /* we need prepare dma before spi was enabled */ 574 /* we need prepare dma before spi was enabled */
559 if (master->can_dma && master->can_dma(master, spi, xfer)) { 575 if (master->can_dma && master->can_dma(master, spi, xfer))
560 rs->use_dma = 1; 576 rs->use_dma = 1;
561 rockchip_spi_prepare_dma(rs); 577 else
562 } else {
563 rs->use_dma = 0; 578 rs->use_dma = 0;
564 }
565 579
566 rockchip_spi_config(rs); 580 rockchip_spi_config(rs);
567 581
568 if (!rs->use_dma) 582 if (rs->use_dma) {
583 if (rs->tmode == CR0_XFM_RO) {
584 /* rx: dma must be prepared first */
585 rockchip_spi_prepare_dma(rs);
586 spi_enable_chip(rs, 1);
587 } else {
588 /* tx or tr: spi must be enabled first */
589 spi_enable_chip(rs, 1);
590 rockchip_spi_prepare_dma(rs);
591 }
592 } else {
593 spi_enable_chip(rs, 1);
569 ret = rockchip_spi_pio_transfer(rs); 594 ret = rockchip_spi_pio_transfer(rs);
595 }
570 596
571 return ret; 597 return ret;
572} 598}
diff --git a/drivers/spi/spi-sirf.c b/drivers/spi/spi-sirf.c
index 39e2c0a55a28..f63de781c729 100644
--- a/drivers/spi/spi-sirf.c
+++ b/drivers/spi/spi-sirf.c
@@ -562,9 +562,9 @@ spi_sirfsoc_setup_transfer(struct spi_device *spi, struct spi_transfer *t)
562 562
563 sspi->word_width = DIV_ROUND_UP(bits_per_word, 8); 563 sspi->word_width = DIV_ROUND_UP(bits_per_word, 8);
564 txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | 564 txfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
565 sspi->word_width; 565 (sspi->word_width >> 1);
566 rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) | 566 rxfifo_ctrl = SIRFSOC_SPI_FIFO_THD(SIRFSOC_SPI_FIFO_SIZE / 2) |
567 sspi->word_width; 567 (sspi->word_width >> 1);
568 568
569 if (!(spi->mode & SPI_CS_HIGH)) 569 if (!(spi->mode & SPI_CS_HIGH))
570 regval |= SIRFSOC_SPI_CS_IDLE_STAT; 570 regval |= SIRFSOC_SPI_CS_IDLE_STAT;
diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
index ebcb33df2eb2..50f20f243981 100644
--- a/drivers/spi/spi.c
+++ b/drivers/spi/spi.c
@@ -615,13 +615,13 @@ static int spi_map_buf(struct spi_master *master, struct device *dev,
615 sg_free_table(sgt); 615 sg_free_table(sgt);
616 return -ENOMEM; 616 return -ENOMEM;
617 } 617 }
618 sg_buf = page_address(vm_page) + 618 sg_set_page(&sgt->sgl[i], vm_page,
619 ((size_t)buf & ~PAGE_MASK); 619 min, offset_in_page(buf));
620 } else { 620 } else {
621 sg_buf = buf; 621 sg_buf = buf;
622 sg_set_buf(&sgt->sgl[i], sg_buf, min);
622 } 623 }
623 624
624 sg_set_buf(&sgt->sgl[i], sg_buf, min);
625 625
626 buf += min; 626 buf += min;
627 len -= min; 627 len -= min;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index e3bc23bb5883..e50039fb1474 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -82,10 +82,11 @@ struct spidev_data {
82 struct spi_device *spi; 82 struct spi_device *spi;
83 struct list_head device_entry; 83 struct list_head device_entry;
84 84
85 /* buffer is NULL unless this device is open (users > 0) */ 85 /* TX/RX buffers are NULL unless this device is open (users > 0) */
86 struct mutex buf_lock; 86 struct mutex buf_lock;
87 unsigned users; 87 unsigned users;
88 u8 *buffer; 88 u8 *tx_buffer;
89 u8 *rx_buffer;
89}; 90};
90 91
91static LIST_HEAD(device_list); 92static LIST_HEAD(device_list);
@@ -135,7 +136,7 @@ static inline ssize_t
135spidev_sync_write(struct spidev_data *spidev, size_t len) 136spidev_sync_write(struct spidev_data *spidev, size_t len)
136{ 137{
137 struct spi_transfer t = { 138 struct spi_transfer t = {
138 .tx_buf = spidev->buffer, 139 .tx_buf = spidev->tx_buffer,
139 .len = len, 140 .len = len,
140 }; 141 };
141 struct spi_message m; 142 struct spi_message m;
@@ -149,7 +150,7 @@ static inline ssize_t
149spidev_sync_read(struct spidev_data *spidev, size_t len) 150spidev_sync_read(struct spidev_data *spidev, size_t len)
150{ 151{
151 struct spi_transfer t = { 152 struct spi_transfer t = {
152 .rx_buf = spidev->buffer, 153 .rx_buf = spidev->rx_buffer,
153 .len = len, 154 .len = len,
154 }; 155 };
155 struct spi_message m; 156 struct spi_message m;
@@ -179,7 +180,7 @@ spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
179 if (status > 0) { 180 if (status > 0) {
180 unsigned long missing; 181 unsigned long missing;
181 182
182 missing = copy_to_user(buf, spidev->buffer, status); 183 missing = copy_to_user(buf, spidev->rx_buffer, status);
183 if (missing == status) 184 if (missing == status)
184 status = -EFAULT; 185 status = -EFAULT;
185 else 186 else
@@ -206,7 +207,7 @@ spidev_write(struct file *filp, const char __user *buf,
206 spidev = filp->private_data; 207 spidev = filp->private_data;
207 208
208 mutex_lock(&spidev->buf_lock); 209 mutex_lock(&spidev->buf_lock);
209 missing = copy_from_user(spidev->buffer, buf, count); 210 missing = copy_from_user(spidev->tx_buffer, buf, count);
210 if (missing == 0) 211 if (missing == 0)
211 status = spidev_sync_write(spidev, count); 212 status = spidev_sync_write(spidev, count);
212 else 213 else
@@ -224,7 +225,7 @@ static int spidev_message(struct spidev_data *spidev,
224 struct spi_transfer *k_tmp; 225 struct spi_transfer *k_tmp;
225 struct spi_ioc_transfer *u_tmp; 226 struct spi_ioc_transfer *u_tmp;
226 unsigned n, total; 227 unsigned n, total;
227 u8 *buf; 228 u8 *tx_buf, *rx_buf;
228 int status = -EFAULT; 229 int status = -EFAULT;
229 230
230 spi_message_init(&msg); 231 spi_message_init(&msg);
@@ -236,7 +237,8 @@ static int spidev_message(struct spidev_data *spidev,
236 * We walk the array of user-provided transfers, using each one 237 * We walk the array of user-provided transfers, using each one
237 * to initialize a kernel version of the same transfer. 238 * to initialize a kernel version of the same transfer.
238 */ 239 */
239 buf = spidev->buffer; 240 tx_buf = spidev->tx_buffer;
241 rx_buf = spidev->rx_buffer;
240 total = 0; 242 total = 0;
241 for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers; 243 for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
242 n; 244 n;
@@ -250,20 +252,21 @@ static int spidev_message(struct spidev_data *spidev,
250 } 252 }
251 253
252 if (u_tmp->rx_buf) { 254 if (u_tmp->rx_buf) {
253 k_tmp->rx_buf = buf; 255 k_tmp->rx_buf = rx_buf;
254 if (!access_ok(VERIFY_WRITE, (u8 __user *) 256 if (!access_ok(VERIFY_WRITE, (u8 __user *)
255 (uintptr_t) u_tmp->rx_buf, 257 (uintptr_t) u_tmp->rx_buf,
256 u_tmp->len)) 258 u_tmp->len))
257 goto done; 259 goto done;
258 } 260 }
259 if (u_tmp->tx_buf) { 261 if (u_tmp->tx_buf) {
260 k_tmp->tx_buf = buf; 262 k_tmp->tx_buf = tx_buf;
261 if (copy_from_user(buf, (const u8 __user *) 263 if (copy_from_user(tx_buf, (const u8 __user *)
262 (uintptr_t) u_tmp->tx_buf, 264 (uintptr_t) u_tmp->tx_buf,
263 u_tmp->len)) 265 u_tmp->len))
264 goto done; 266 goto done;
265 } 267 }
266 buf += k_tmp->len; 268 tx_buf += k_tmp->len;
269 rx_buf += k_tmp->len;
267 270
268 k_tmp->cs_change = !!u_tmp->cs_change; 271 k_tmp->cs_change = !!u_tmp->cs_change;
269 k_tmp->tx_nbits = u_tmp->tx_nbits; 272 k_tmp->tx_nbits = u_tmp->tx_nbits;
@@ -290,17 +293,17 @@ static int spidev_message(struct spidev_data *spidev,
290 goto done; 293 goto done;
291 294
292 /* copy any rx data out of bounce buffer */ 295 /* copy any rx data out of bounce buffer */
293 buf = spidev->buffer; 296 rx_buf = spidev->rx_buffer;
294 for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) { 297 for (n = n_xfers, u_tmp = u_xfers; n; n--, u_tmp++) {
295 if (u_tmp->rx_buf) { 298 if (u_tmp->rx_buf) {
296 if (__copy_to_user((u8 __user *) 299 if (__copy_to_user((u8 __user *)
297 (uintptr_t) u_tmp->rx_buf, buf, 300 (uintptr_t) u_tmp->rx_buf, rx_buf,
298 u_tmp->len)) { 301 u_tmp->len)) {
299 status = -EFAULT; 302 status = -EFAULT;
300 goto done; 303 goto done;
301 } 304 }
302 } 305 }
303 buf += u_tmp->len; 306 rx_buf += u_tmp->len;
304 } 307 }
305 status = total; 308 status = total;
306 309
@@ -508,22 +511,41 @@ static int spidev_open(struct inode *inode, struct file *filp)
508 break; 511 break;
509 } 512 }
510 } 513 }
511 if (status == 0) { 514
512 if (!spidev->buffer) { 515 if (status) {
513 spidev->buffer = kmalloc(bufsiz, GFP_KERNEL); 516 pr_debug("spidev: nothing for minor %d\n", iminor(inode));
514 if (!spidev->buffer) { 517 goto err_find_dev;
518 }
519
520 if (!spidev->tx_buffer) {
521 spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
522 if (!spidev->tx_buffer) {
515 dev_dbg(&spidev->spi->dev, "open/ENOMEM\n"); 523 dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
516 status = -ENOMEM; 524 status = -ENOMEM;
525 goto err_find_dev;
517 } 526 }
518 } 527 }
519 if (status == 0) { 528
520 spidev->users++; 529 if (!spidev->rx_buffer) {
521 filp->private_data = spidev; 530 spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
522 nonseekable_open(inode, filp); 531 if (!spidev->rx_buffer) {
532 dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
533 status = -ENOMEM;
534 goto err_alloc_rx_buf;
523 } 535 }
524 } else 536 }
525 pr_debug("spidev: nothing for minor %d\n", iminor(inode)); 537
538 spidev->users++;
539 filp->private_data = spidev;
540 nonseekable_open(inode, filp);
541
542 mutex_unlock(&device_list_lock);
543 return 0;
526 544
545err_alloc_rx_buf:
546 kfree(spidev->tx_buffer);
547 spidev->tx_buffer = NULL;
548err_find_dev:
527 mutex_unlock(&device_list_lock); 549 mutex_unlock(&device_list_lock);
528 return status; 550 return status;
529} 551}
@@ -542,8 +564,11 @@ static int spidev_release(struct inode *inode, struct file *filp)
542 if (!spidev->users) { 564 if (!spidev->users) {
543 int dofree; 565 int dofree;
544 566
545 kfree(spidev->buffer); 567 kfree(spidev->tx_buffer);
546 spidev->buffer = NULL; 568 spidev->tx_buffer = NULL;
569
570 kfree(spidev->rx_buffer);
571 spidev->rx_buffer = NULL;
547 572
548 /* ... after we unbound from the underlying device? */ 573 /* ... after we unbound from the underlying device? */
549 spin_lock_irq(&spidev->spi_lock); 574 spin_lock_irq(&spidev->spi_lock);
diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c
index 28b93d39a94e..a673ffa34aa3 100644
--- a/drivers/staging/android/logger.c
+++ b/drivers/staging/android/logger.c
@@ -420,7 +420,7 @@ static ssize_t logger_write_iter(struct kiocb *iocb, struct iov_iter *from)
420 struct logger_log *log = file_get_log(iocb->ki_filp); 420 struct logger_log *log = file_get_log(iocb->ki_filp);
421 struct logger_entry header; 421 struct logger_entry header;
422 struct timespec now; 422 struct timespec now;
423 size_t len, count; 423 size_t len, count, w_off;
424 424
425 count = min_t(size_t, iocb->ki_nbytes, LOGGER_ENTRY_MAX_PAYLOAD); 425 count = min_t(size_t, iocb->ki_nbytes, LOGGER_ENTRY_MAX_PAYLOAD);
426 426
@@ -452,11 +452,14 @@ static ssize_t logger_write_iter(struct kiocb *iocb, struct iov_iter *from)
452 memcpy(log->buffer + log->w_off, &header, len); 452 memcpy(log->buffer + log->w_off, &header, len);
453 memcpy(log->buffer, (char *)&header + len, sizeof(header) - len); 453 memcpy(log->buffer, (char *)&header + len, sizeof(header) - len);
454 454
455 len = min(count, log->size - log->w_off); 455 /* Work with a copy until we are ready to commit the whole entry */
456 w_off = logger_offset(log, log->w_off + sizeof(struct logger_entry));
456 457
457 if (copy_from_iter(log->buffer + log->w_off, len, from) != len) { 458 len = min(count, log->size - w_off);
459
460 if (copy_from_iter(log->buffer + w_off, len, from) != len) {
458 /* 461 /*
459 * Note that by not updating w_off, this abandons the 462 * Note that by not updating log->w_off, this abandons the
460 * portion of the new entry that *was* successfully 463 * portion of the new entry that *was* successfully
461 * copied, just above. This is intentional to avoid 464 * copied, just above. This is intentional to avoid
462 * message corruption from missing fragments. 465 * message corruption from missing fragments.
@@ -470,7 +473,7 @@ static ssize_t logger_write_iter(struct kiocb *iocb, struct iov_iter *from)
470 return -EFAULT; 473 return -EFAULT;
471 } 474 }
472 475
473 log->w_off = logger_offset(log, log->w_off + count); 476 log->w_off = logger_offset(log, w_off + count);
474 mutex_unlock(&log->mutex); 477 mutex_unlock(&log->mutex);
475 478
476 /* wake up any blocked readers */ 479 /* wake up any blocked readers */
diff --git a/drivers/staging/comedi/Kconfig b/drivers/staging/comedi/Kconfig
index a8bc2b567789..152f4c12ea43 100644
--- a/drivers/staging/comedi/Kconfig
+++ b/drivers/staging/comedi/Kconfig
@@ -426,6 +426,7 @@ config COMEDI_AIO_IIRO_16
426 426
427config COMEDI_II_PCI20KC 427config COMEDI_II_PCI20KC
428 tristate "Intelligent Instruments PCI-20001C carrier support" 428 tristate "Intelligent Instruments PCI-20001C carrier support"
429 depends on HAS_IOMEM
429 ---help--- 430 ---help---
430 Enable support for Intelligent Instruments PCI-20001C carrier 431 Enable support for Intelligent Instruments PCI-20001C carrier
431 PCI-20001, PCI-20006 and PCI-20341 432 PCI-20001, PCI-20006 and PCI-20341
@@ -667,7 +668,6 @@ config COMEDI_ADDI_APCI_2200
667config COMEDI_ADDI_APCI_3120 668config COMEDI_ADDI_APCI_3120
668 tristate "ADDI-DATA APCI_3120/3001 support" 669 tristate "ADDI-DATA APCI_3120/3001 support"
669 depends on HAS_DMA 670 depends on HAS_DMA
670 depends on VIRT_TO_BUS
671 ---help--- 671 ---help---
672 Enable support for ADDI-DATA APCI_3120/3001 cards 672 Enable support for ADDI-DATA APCI_3120/3001 cards
673 673
diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c
index 495969f46e76..9c32f0276009 100644
--- a/drivers/staging/comedi/comedi_fops.c
+++ b/drivers/staging/comedi/comedi_fops.c
@@ -1462,10 +1462,7 @@ static int __comedi_get_user_chanlist(struct comedi_device *dev,
1462 unsigned int *chanlist; 1462 unsigned int *chanlist;
1463 int ret; 1463 int ret;
1464 1464
1465 /* user_chanlist could be NULL for do_cmdtest ioctls */ 1465 cmd->chanlist = NULL;
1466 if (!user_chanlist)
1467 return 0;
1468
1469 chanlist = memdup_user(user_chanlist, 1466 chanlist = memdup_user(user_chanlist,
1470 cmd->chanlist_len * sizeof(unsigned int)); 1467 cmd->chanlist_len * sizeof(unsigned int));
1471 if (IS_ERR(chanlist)) 1468 if (IS_ERR(chanlist))
@@ -1609,13 +1606,18 @@ static int do_cmdtest_ioctl(struct comedi_device *dev,
1609 1606
1610 s = &dev->subdevices[cmd.subdev]; 1607 s = &dev->subdevices[cmd.subdev];
1611 1608
1612 /* load channel/gain list */ 1609 /* user_chanlist can be NULL for COMEDI_CMDTEST ioctl */
1613 ret = __comedi_get_user_chanlist(dev, s, user_chanlist, &cmd); 1610 if (user_chanlist) {
1614 if (ret) 1611 /* load channel/gain list */
1615 return ret; 1612 ret = __comedi_get_user_chanlist(dev, s, user_chanlist, &cmd);
1613 if (ret)
1614 return ret;
1615 }
1616 1616
1617 ret = s->do_cmdtest(dev, s, &cmd); 1617 ret = s->do_cmdtest(dev, s, &cmd);
1618 1618
1619 kfree(cmd.chanlist); /* free kernel copy of user chanlist */
1620
1619 /* restore chanlist pointer before copying back */ 1621 /* restore chanlist pointer before copying back */
1620 cmd.chanlist = (unsigned int __force *)user_chanlist; 1622 cmd.chanlist = (unsigned int __force *)user_chanlist;
1621 1623
@@ -1642,7 +1644,7 @@ static int do_cmdtest_ioctl(struct comedi_device *dev,
1642 1644
1643*/ 1645*/
1644 1646
1645static int do_lock_ioctl(struct comedi_device *dev, unsigned int arg, 1647static int do_lock_ioctl(struct comedi_device *dev, unsigned long arg,
1646 void *file) 1648 void *file)
1647{ 1649{
1648 int ret = 0; 1650 int ret = 0;
@@ -1679,7 +1681,7 @@ static int do_lock_ioctl(struct comedi_device *dev, unsigned int arg,
1679 This function isn't protected by the semaphore, since 1681 This function isn't protected by the semaphore, since
1680 we already own the lock. 1682 we already own the lock.
1681*/ 1683*/
1682static int do_unlock_ioctl(struct comedi_device *dev, unsigned int arg, 1684static int do_unlock_ioctl(struct comedi_device *dev, unsigned long arg,
1683 void *file) 1685 void *file)
1684{ 1686{
1685 struct comedi_subdevice *s; 1687 struct comedi_subdevice *s;
@@ -1714,7 +1716,7 @@ static int do_unlock_ioctl(struct comedi_device *dev, unsigned int arg,
1714 nothing 1716 nothing
1715 1717
1716*/ 1718*/
1717static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg, 1719static int do_cancel_ioctl(struct comedi_device *dev, unsigned long arg,
1718 void *file) 1720 void *file)
1719{ 1721{
1720 struct comedi_subdevice *s; 1722 struct comedi_subdevice *s;
@@ -1751,7 +1753,7 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg,
1751 nothing 1753 nothing
1752 1754
1753*/ 1755*/
1754static int do_poll_ioctl(struct comedi_device *dev, unsigned int arg, 1756static int do_poll_ioctl(struct comedi_device *dev, unsigned long arg,
1755 void *file) 1757 void *file)
1756{ 1758{
1757 struct comedi_subdevice *s; 1759 struct comedi_subdevice *s;
diff --git a/drivers/staging/iio/adc/mxs-lradc.c b/drivers/staging/iio/adc/mxs-lradc.c
index 32a19264a170..2a29b9baec0d 100644
--- a/drivers/staging/iio/adc/mxs-lradc.c
+++ b/drivers/staging/iio/adc/mxs-lradc.c
@@ -1559,14 +1559,16 @@ static int mxs_lradc_probe(struct platform_device *pdev)
1559 /* Grab all IRQ sources */ 1559 /* Grab all IRQ sources */
1560 for (i = 0; i < of_cfg->irq_count; i++) { 1560 for (i = 0; i < of_cfg->irq_count; i++) {
1561 lradc->irq[i] = platform_get_irq(pdev, i); 1561 lradc->irq[i] = platform_get_irq(pdev, i);
1562 if (lradc->irq[i] < 0) 1562 if (lradc->irq[i] < 0) {
1563 return lradc->irq[i]; 1563 ret = lradc->irq[i];
1564 goto err_clk;
1565 }
1564 1566
1565 ret = devm_request_irq(dev, lradc->irq[i], 1567 ret = devm_request_irq(dev, lradc->irq[i],
1566 mxs_lradc_handle_irq, 0, 1568 mxs_lradc_handle_irq, 0,
1567 of_cfg->irq_name[i], iio); 1569 of_cfg->irq_name[i], iio);
1568 if (ret) 1570 if (ret)
1569 return ret; 1571 goto err_clk;
1570 } 1572 }
1571 1573
1572 lradc->vref_mv = of_cfg->vref_mv; 1574 lradc->vref_mv = of_cfg->vref_mv;
@@ -1588,7 +1590,7 @@ static int mxs_lradc_probe(struct platform_device *pdev)
1588 &mxs_lradc_trigger_handler, 1590 &mxs_lradc_trigger_handler,
1589 &mxs_lradc_buffer_ops); 1591 &mxs_lradc_buffer_ops);
1590 if (ret) 1592 if (ret)
1591 return ret; 1593 goto err_clk;
1592 1594
1593 ret = mxs_lradc_trigger_init(iio); 1595 ret = mxs_lradc_trigger_init(iio);
1594 if (ret) 1596 if (ret)
@@ -1643,6 +1645,8 @@ err_dev:
1643 mxs_lradc_trigger_remove(iio); 1645 mxs_lradc_trigger_remove(iio);
1644err_trig: 1646err_trig:
1645 iio_triggered_buffer_cleanup(iio); 1647 iio_triggered_buffer_cleanup(iio);
1648err_clk:
1649 clk_disable_unprepare(lradc->clk);
1646 return ret; 1650 return ret;
1647} 1651}
1648 1652
diff --git a/drivers/staging/iio/impedance-analyzer/ad5933.c b/drivers/staging/iio/impedance-analyzer/ad5933.c
index d0c89d0457de..b6bd609c3655 100644
--- a/drivers/staging/iio/impedance-analyzer/ad5933.c
+++ b/drivers/staging/iio/impedance-analyzer/ad5933.c
@@ -115,6 +115,7 @@ static const struct iio_chan_spec ad5933_channels[] = {
115 .channel = 0, 115 .channel = 0,
116 .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED), 116 .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
117 .address = AD5933_REG_TEMP_DATA, 117 .address = AD5933_REG_TEMP_DATA,
118 .scan_index = -1,
118 .scan_type = { 119 .scan_type = {
119 .sign = 's', 120 .sign = 's',
120 .realbits = 14, 121 .realbits = 14,
@@ -124,9 +125,7 @@ static const struct iio_chan_spec ad5933_channels[] = {
124 .type = IIO_VOLTAGE, 125 .type = IIO_VOLTAGE,
125 .indexed = 1, 126 .indexed = 1,
126 .channel = 0, 127 .channel = 0,
127 .extend_name = "real_raw", 128 .extend_name = "real",
128 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
129 BIT(IIO_CHAN_INFO_SCALE),
130 .address = AD5933_REG_REAL_DATA, 129 .address = AD5933_REG_REAL_DATA,
131 .scan_index = 0, 130 .scan_index = 0,
132 .scan_type = { 131 .scan_type = {
@@ -138,9 +137,7 @@ static const struct iio_chan_spec ad5933_channels[] = {
138 .type = IIO_VOLTAGE, 137 .type = IIO_VOLTAGE,
139 .indexed = 1, 138 .indexed = 1,
140 .channel = 0, 139 .channel = 0,
141 .extend_name = "imag_raw", 140 .extend_name = "imag",
142 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) |
143 BIT(IIO_CHAN_INFO_SCALE),
144 .address = AD5933_REG_IMAG_DATA, 141 .address = AD5933_REG_IMAG_DATA,
145 .scan_index = 1, 142 .scan_index = 1,
146 .scan_type = { 143 .scan_type = {
@@ -749,14 +746,14 @@ static int ad5933_probe(struct i2c_client *client,
749 indio_dev->name = id->name; 746 indio_dev->name = id->name;
750 indio_dev->modes = INDIO_DIRECT_MODE; 747 indio_dev->modes = INDIO_DIRECT_MODE;
751 indio_dev->channels = ad5933_channels; 748 indio_dev->channels = ad5933_channels;
752 indio_dev->num_channels = 1; /* only register temp0_input */ 749 indio_dev->num_channels = ARRAY_SIZE(ad5933_channels);
753 750
754 ret = ad5933_register_ring_funcs_and_init(indio_dev); 751 ret = ad5933_register_ring_funcs_and_init(indio_dev);
755 if (ret) 752 if (ret)
756 goto error_disable_reg; 753 goto error_disable_reg;
757 754
758 /* skip temp0_input, register in0_(real|imag)_raw */ 755 ret = iio_buffer_register(indio_dev, ad5933_channels,
759 ret = iio_buffer_register(indio_dev, &ad5933_channels[1], 2); 756 ARRAY_SIZE(ad5933_channels));
760 if (ret) 757 if (ret)
761 goto error_unreg_ring; 758 goto error_unreg_ring;
762 759
diff --git a/drivers/staging/iio/meter/ade7758.h b/drivers/staging/iio/meter/ade7758.h
index 07318203a836..e8c98cf57070 100644
--- a/drivers/staging/iio/meter/ade7758.h
+++ b/drivers/staging/iio/meter/ade7758.h
@@ -119,7 +119,6 @@ struct ade7758_state {
119 u8 *tx; 119 u8 *tx;
120 u8 *rx; 120 u8 *rx;
121 struct mutex buf_lock; 121 struct mutex buf_lock;
122 const struct iio_chan_spec *ade7758_ring_channels;
123 struct spi_transfer ring_xfer[4]; 122 struct spi_transfer ring_xfer[4];
124 struct spi_message ring_msg; 123 struct spi_message ring_msg;
125 /* 124 /*
diff --git a/drivers/staging/iio/meter/ade7758_core.c b/drivers/staging/iio/meter/ade7758_core.c
index abc60067cd72..fb373b89dcc2 100644
--- a/drivers/staging/iio/meter/ade7758_core.c
+++ b/drivers/staging/iio/meter/ade7758_core.c
@@ -634,9 +634,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
634 .type = IIO_VOLTAGE, 634 .type = IIO_VOLTAGE,
635 .indexed = 1, 635 .indexed = 1,
636 .channel = 0, 636 .channel = 0,
637 .extend_name = "raw",
638 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
639 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
640 .address = AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE), 637 .address = AD7758_WT(AD7758_PHASE_A, AD7758_VOLTAGE),
641 .scan_index = 0, 638 .scan_index = 0,
642 .scan_type = { 639 .scan_type = {
@@ -648,9 +645,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
648 .type = IIO_CURRENT, 645 .type = IIO_CURRENT,
649 .indexed = 1, 646 .indexed = 1,
650 .channel = 0, 647 .channel = 0,
651 .extend_name = "raw",
652 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
653 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
654 .address = AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT), 648 .address = AD7758_WT(AD7758_PHASE_A, AD7758_CURRENT),
655 .scan_index = 1, 649 .scan_index = 1,
656 .scan_type = { 650 .scan_type = {
@@ -662,9 +656,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
662 .type = IIO_POWER, 656 .type = IIO_POWER,
663 .indexed = 1, 657 .indexed = 1,
664 .channel = 0, 658 .channel = 0,
665 .extend_name = "apparent_raw", 659 .extend_name = "apparent",
666 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
667 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
668 .address = AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR), 660 .address = AD7758_WT(AD7758_PHASE_A, AD7758_APP_PWR),
669 .scan_index = 2, 661 .scan_index = 2,
670 .scan_type = { 662 .scan_type = {
@@ -676,9 +668,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
676 .type = IIO_POWER, 668 .type = IIO_POWER,
677 .indexed = 1, 669 .indexed = 1,
678 .channel = 0, 670 .channel = 0,
679 .extend_name = "active_raw", 671 .extend_name = "active",
680 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
681 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
682 .address = AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR), 672 .address = AD7758_WT(AD7758_PHASE_A, AD7758_ACT_PWR),
683 .scan_index = 3, 673 .scan_index = 3,
684 .scan_type = { 674 .scan_type = {
@@ -690,9 +680,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
690 .type = IIO_POWER, 680 .type = IIO_POWER,
691 .indexed = 1, 681 .indexed = 1,
692 .channel = 0, 682 .channel = 0,
693 .extend_name = "reactive_raw", 683 .extend_name = "reactive",
694 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
695 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
696 .address = AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR), 684 .address = AD7758_WT(AD7758_PHASE_A, AD7758_REACT_PWR),
697 .scan_index = 4, 685 .scan_index = 4,
698 .scan_type = { 686 .scan_type = {
@@ -704,9 +692,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
704 .type = IIO_VOLTAGE, 692 .type = IIO_VOLTAGE,
705 .indexed = 1, 693 .indexed = 1,
706 .channel = 1, 694 .channel = 1,
707 .extend_name = "raw",
708 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
709 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
710 .address = AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE), 695 .address = AD7758_WT(AD7758_PHASE_B, AD7758_VOLTAGE),
711 .scan_index = 5, 696 .scan_index = 5,
712 .scan_type = { 697 .scan_type = {
@@ -718,9 +703,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
718 .type = IIO_CURRENT, 703 .type = IIO_CURRENT,
719 .indexed = 1, 704 .indexed = 1,
720 .channel = 1, 705 .channel = 1,
721 .extend_name = "raw",
722 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
723 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
724 .address = AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT), 706 .address = AD7758_WT(AD7758_PHASE_B, AD7758_CURRENT),
725 .scan_index = 6, 707 .scan_index = 6,
726 .scan_type = { 708 .scan_type = {
@@ -732,9 +714,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
732 .type = IIO_POWER, 714 .type = IIO_POWER,
733 .indexed = 1, 715 .indexed = 1,
734 .channel = 1, 716 .channel = 1,
735 .extend_name = "apparent_raw", 717 .extend_name = "apparent",
736 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
737 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
738 .address = AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR), 718 .address = AD7758_WT(AD7758_PHASE_B, AD7758_APP_PWR),
739 .scan_index = 7, 719 .scan_index = 7,
740 .scan_type = { 720 .scan_type = {
@@ -746,9 +726,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
746 .type = IIO_POWER, 726 .type = IIO_POWER,
747 .indexed = 1, 727 .indexed = 1,
748 .channel = 1, 728 .channel = 1,
749 .extend_name = "active_raw", 729 .extend_name = "active",
750 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
751 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
752 .address = AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR), 730 .address = AD7758_WT(AD7758_PHASE_B, AD7758_ACT_PWR),
753 .scan_index = 8, 731 .scan_index = 8,
754 .scan_type = { 732 .scan_type = {
@@ -760,9 +738,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
760 .type = IIO_POWER, 738 .type = IIO_POWER,
761 .indexed = 1, 739 .indexed = 1,
762 .channel = 1, 740 .channel = 1,
763 .extend_name = "reactive_raw", 741 .extend_name = "reactive",
764 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
765 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
766 .address = AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR), 742 .address = AD7758_WT(AD7758_PHASE_B, AD7758_REACT_PWR),
767 .scan_index = 9, 743 .scan_index = 9,
768 .scan_type = { 744 .scan_type = {
@@ -774,9 +750,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
774 .type = IIO_VOLTAGE, 750 .type = IIO_VOLTAGE,
775 .indexed = 1, 751 .indexed = 1,
776 .channel = 2, 752 .channel = 2,
777 .extend_name = "raw",
778 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
779 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
780 .address = AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE), 753 .address = AD7758_WT(AD7758_PHASE_C, AD7758_VOLTAGE),
781 .scan_index = 10, 754 .scan_index = 10,
782 .scan_type = { 755 .scan_type = {
@@ -788,9 +761,6 @@ static const struct iio_chan_spec ade7758_channels[] = {
788 .type = IIO_CURRENT, 761 .type = IIO_CURRENT,
789 .indexed = 1, 762 .indexed = 1,
790 .channel = 2, 763 .channel = 2,
791 .extend_name = "raw",
792 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
793 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
794 .address = AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT), 764 .address = AD7758_WT(AD7758_PHASE_C, AD7758_CURRENT),
795 .scan_index = 11, 765 .scan_index = 11,
796 .scan_type = { 766 .scan_type = {
@@ -802,9 +772,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
802 .type = IIO_POWER, 772 .type = IIO_POWER,
803 .indexed = 1, 773 .indexed = 1,
804 .channel = 2, 774 .channel = 2,
805 .extend_name = "apparent_raw", 775 .extend_name = "apparent",
806 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
807 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
808 .address = AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR), 776 .address = AD7758_WT(AD7758_PHASE_C, AD7758_APP_PWR),
809 .scan_index = 12, 777 .scan_index = 12,
810 .scan_type = { 778 .scan_type = {
@@ -816,9 +784,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
816 .type = IIO_POWER, 784 .type = IIO_POWER,
817 .indexed = 1, 785 .indexed = 1,
818 .channel = 2, 786 .channel = 2,
819 .extend_name = "active_raw", 787 .extend_name = "active",
820 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
821 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
822 .address = AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR), 788 .address = AD7758_WT(AD7758_PHASE_C, AD7758_ACT_PWR),
823 .scan_index = 13, 789 .scan_index = 13,
824 .scan_type = { 790 .scan_type = {
@@ -830,9 +796,7 @@ static const struct iio_chan_spec ade7758_channels[] = {
830 .type = IIO_POWER, 796 .type = IIO_POWER,
831 .indexed = 1, 797 .indexed = 1,
832 .channel = 2, 798 .channel = 2,
833 .extend_name = "reactive_raw", 799 .extend_name = "reactive",
834 .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
835 .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_SCALE),
836 .address = AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR), 800 .address = AD7758_WT(AD7758_PHASE_C, AD7758_REACT_PWR),
837 .scan_index = 14, 801 .scan_index = 14,
838 .scan_type = { 802 .scan_type = {
@@ -873,13 +837,14 @@ static int ade7758_probe(struct spi_device *spi)
873 goto error_free_rx; 837 goto error_free_rx;
874 } 838 }
875 st->us = spi; 839 st->us = spi;
876 st->ade7758_ring_channels = &ade7758_channels[0];
877 mutex_init(&st->buf_lock); 840 mutex_init(&st->buf_lock);
878 841
879 indio_dev->name = spi->dev.driver->name; 842 indio_dev->name = spi->dev.driver->name;
880 indio_dev->dev.parent = &spi->dev; 843 indio_dev->dev.parent = &spi->dev;
881 indio_dev->info = &ade7758_info; 844 indio_dev->info = &ade7758_info;
882 indio_dev->modes = INDIO_DIRECT_MODE; 845 indio_dev->modes = INDIO_DIRECT_MODE;
846 indio_dev->channels = ade7758_channels;
847 indio_dev->num_channels = ARRAY_SIZE(ade7758_channels);
883 848
884 ret = ade7758_configure_ring(indio_dev); 849 ret = ade7758_configure_ring(indio_dev);
885 if (ret) 850 if (ret)
diff --git a/drivers/staging/iio/meter/ade7758_ring.c b/drivers/staging/iio/meter/ade7758_ring.c
index c0accf8cce93..6e9006490742 100644
--- a/drivers/staging/iio/meter/ade7758_ring.c
+++ b/drivers/staging/iio/meter/ade7758_ring.c
@@ -85,17 +85,16 @@ static irqreturn_t ade7758_trigger_handler(int irq, void *p)
85 **/ 85 **/
86static int ade7758_ring_preenable(struct iio_dev *indio_dev) 86static int ade7758_ring_preenable(struct iio_dev *indio_dev)
87{ 87{
88 struct ade7758_state *st = iio_priv(indio_dev);
89 unsigned channel; 88 unsigned channel;
90 89
91 if (!bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength)) 90 if (bitmap_empty(indio_dev->active_scan_mask, indio_dev->masklength))
92 return -EINVAL; 91 return -EINVAL;
93 92
94 channel = find_first_bit(indio_dev->active_scan_mask, 93 channel = find_first_bit(indio_dev->active_scan_mask,
95 indio_dev->masklength); 94 indio_dev->masklength);
96 95
97 ade7758_write_waveform_type(&indio_dev->dev, 96 ade7758_write_waveform_type(&indio_dev->dev,
98 st->ade7758_ring_channels[channel].address); 97 indio_dev->channels[channel].address);
99 98
100 return 0; 99 return 0;
101} 100}
diff --git a/drivers/staging/rtl8188eu/core/rtw_cmd.c b/drivers/staging/rtl8188eu/core/rtw_cmd.c
index 9935e66935af..eddef9cd2e16 100644
--- a/drivers/staging/rtl8188eu/core/rtw_cmd.c
+++ b/drivers/staging/rtl8188eu/core/rtw_cmd.c
@@ -275,11 +275,11 @@ u8 rtw_sitesurvey_cmd(struct adapter *padapter, struct ndis_802_11_ssid *ssid,
275 if (check_fwstate(pmlmepriv, _FW_LINKED) == true) 275 if (check_fwstate(pmlmepriv, _FW_LINKED) == true)
276 rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1); 276 rtw_lps_ctrl_wk_cmd(padapter, LPS_CTRL_SCAN, 1);
277 277
278 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 278 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
279 if (ph2c == NULL) 279 if (ph2c == NULL)
280 return _FAIL; 280 return _FAIL;
281 281
282 psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_KERNEL); 282 psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
283 if (psurveyPara == NULL) { 283 if (psurveyPara == NULL) {
284 kfree(ph2c); 284 kfree(ph2c);
285 return _FAIL; 285 return _FAIL;
@@ -405,7 +405,7 @@ u8 rtw_joinbss_cmd(struct adapter *padapter, struct wlan_network *pnetwork)
405 else 405 else
406 RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.Ssid)); 406 RT_TRACE(_module_rtl871x_cmd_c_, _drv_notice_, ("+Join cmd: SSid =[%s]\n", pmlmepriv->assoc_ssid.Ssid));
407 407
408 pcmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 408 pcmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
409 if (pcmd == NULL) { 409 if (pcmd == NULL) {
410 res = _FAIL; 410 res = _FAIL;
411 RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd: memory allocate for cmd_obj fail!!!\n")); 411 RT_TRACE(_module_rtl871x_cmd_c_, _drv_err_, ("rtw_joinbss_cmd: memory allocate for cmd_obj fail!!!\n"));
@@ -755,13 +755,13 @@ u8 rtw_dynamic_chk_wk_cmd(struct adapter *padapter)
755 u8 res = _SUCCESS; 755 u8 res = _SUCCESS;
756 756
757 757
758 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 758 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
759 if (ph2c == NULL) { 759 if (ph2c == NULL) {
760 res = _FAIL; 760 res = _FAIL;
761 goto exit; 761 goto exit;
762 } 762 }
763 763
764 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL); 764 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
765 if (pdrvextra_cmd_parm == NULL) { 765 if (pdrvextra_cmd_parm == NULL) {
766 kfree(ph2c); 766 kfree(ph2c);
767 res = _FAIL; 767 res = _FAIL;
@@ -967,13 +967,13 @@ u8 rtw_lps_ctrl_wk_cmd(struct adapter *padapter, u8 lps_ctrl_type, u8 enqueue)
967 u8 res = _SUCCESS; 967 u8 res = _SUCCESS;
968 968
969 if (enqueue) { 969 if (enqueue) {
970 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 970 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
971 if (ph2c == NULL) { 971 if (ph2c == NULL) {
972 res = _FAIL; 972 res = _FAIL;
973 goto exit; 973 goto exit;
974 } 974 }
975 975
976 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL); 976 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
977 if (pdrvextra_cmd_parm == NULL) { 977 if (pdrvextra_cmd_parm == NULL) {
978 kfree(ph2c); 978 kfree(ph2c);
979 res = _FAIL; 979 res = _FAIL;
@@ -1010,13 +1010,13 @@ u8 rtw_rpt_timer_cfg_cmd(struct adapter *padapter, u16 min_time)
1010 1010
1011 u8 res = _SUCCESS; 1011 u8 res = _SUCCESS;
1012 1012
1013 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 1013 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
1014 if (ph2c == NULL) { 1014 if (ph2c == NULL) {
1015 res = _FAIL; 1015 res = _FAIL;
1016 goto exit; 1016 goto exit;
1017 } 1017 }
1018 1018
1019 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL); 1019 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
1020 if (pdrvextra_cmd_parm == NULL) { 1020 if (pdrvextra_cmd_parm == NULL) {
1021 kfree(ph2c); 1021 kfree(ph2c);
1022 res = _FAIL; 1022 res = _FAIL;
@@ -1088,13 +1088,13 @@ u8 rtw_ps_cmd(struct adapter *padapter)
1088 1088
1089 u8 res = _SUCCESS; 1089 u8 res = _SUCCESS;
1090 1090
1091 ppscmd = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 1091 ppscmd = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
1092 if (ppscmd == NULL) { 1092 if (ppscmd == NULL) {
1093 res = _FAIL; 1093 res = _FAIL;
1094 goto exit; 1094 goto exit;
1095 } 1095 }
1096 1096
1097 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_KERNEL); 1097 pdrvextra_cmd_parm = kzalloc(sizeof(struct drvextra_cmd_parm), GFP_ATOMIC);
1098 if (pdrvextra_cmd_parm == NULL) { 1098 if (pdrvextra_cmd_parm == NULL) {
1099 kfree(ppscmd); 1099 kfree(ppscmd);
1100 res = _FAIL; 1100 res = _FAIL;
diff --git a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
index 5ba5099ec20d..70b1bc3e0e63 100644
--- a/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
+++ b/drivers/staging/rtl8188eu/core/rtw_mlme_ext.c
@@ -4241,12 +4241,12 @@ void report_survey_event(struct adapter *padapter,
4241 pcmdpriv = &padapter->cmdpriv; 4241 pcmdpriv = &padapter->cmdpriv;
4242 4242
4243 4243
4244 pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 4244 pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
4245 if (pcmd_obj == NULL) 4245 if (pcmd_obj == NULL)
4246 return; 4246 return;
4247 4247
4248 cmdsz = (sizeof(struct survey_event) + sizeof(struct C2HEvent_Header)); 4248 cmdsz = (sizeof(struct survey_event) + sizeof(struct C2HEvent_Header));
4249 pevtcmd = kzalloc(cmdsz, GFP_KERNEL); 4249 pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
4250 if (pevtcmd == NULL) { 4250 if (pevtcmd == NULL) {
4251 kfree(pcmd_obj); 4251 kfree(pcmd_obj);
4252 return; 4252 return;
@@ -4339,12 +4339,12 @@ void report_join_res(struct adapter *padapter, int res)
4339 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info); 4339 struct mlme_ext_info *pmlmeinfo = &(pmlmeext->mlmext_info);
4340 struct cmd_priv *pcmdpriv = &padapter->cmdpriv; 4340 struct cmd_priv *pcmdpriv = &padapter->cmdpriv;
4341 4341
4342 pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 4342 pcmd_obj = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
4343 if (pcmd_obj == NULL) 4343 if (pcmd_obj == NULL)
4344 return; 4344 return;
4345 4345
4346 cmdsz = (sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header)); 4346 cmdsz = (sizeof(struct joinbss_event) + sizeof(struct C2HEvent_Header));
4347 pevtcmd = kzalloc(cmdsz, GFP_KERNEL); 4347 pevtcmd = kzalloc(cmdsz, GFP_ATOMIC);
4348 if (pevtcmd == NULL) { 4348 if (pevtcmd == NULL) {
4349 kfree(pcmd_obj); 4349 kfree(pcmd_obj);
4350 return; 4350 return;
@@ -4854,11 +4854,11 @@ void survey_timer_hdl(void *function_context)
4854 pmlmeext->scan_abort = false;/* reset */ 4854 pmlmeext->scan_abort = false;/* reset */
4855 } 4855 }
4856 4856
4857 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_KERNEL); 4857 ph2c = kzalloc(sizeof(struct cmd_obj), GFP_ATOMIC);
4858 if (ph2c == NULL) 4858 if (ph2c == NULL)
4859 goto exit_survey_timer_hdl; 4859 goto exit_survey_timer_hdl;
4860 4860
4861 psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_KERNEL); 4861 psurveyPara = kzalloc(sizeof(struct sitesurvey_parm), GFP_ATOMIC);
4862 if (psurveyPara == NULL) { 4862 if (psurveyPara == NULL) {
4863 kfree(ph2c); 4863 kfree(ph2c);
4864 goto exit_survey_timer_hdl; 4864 goto exit_survey_timer_hdl;
diff --git a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
index 33ccbbbd8ed6..d300369977fa 100644
--- a/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
+++ b/drivers/staging/rtl8188eu/core/rtw_wlan_util.c
@@ -935,7 +935,7 @@ int rtw_check_bcn_info(struct adapter *Adapter, u8 *pframe, u32 packet_len)
935 return true; 935 return true;
936 } 936 }
937 937
938 bssid = kzalloc(sizeof(struct wlan_bssid_ex), GFP_KERNEL); 938 bssid = kzalloc(sizeof(struct wlan_bssid_ex), GFP_ATOMIC);
939 939
940 subtype = GetFrameSubType(pframe) >> 4; 940 subtype = GetFrameSubType(pframe) >> 4;
941 941
diff --git a/drivers/staging/rtl8188eu/os_dep/usb_intf.c b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
index 407a318b09db..2f87150a21b7 100644
--- a/drivers/staging/rtl8188eu/os_dep/usb_intf.c
+++ b/drivers/staging/rtl8188eu/os_dep/usb_intf.c
@@ -47,6 +47,7 @@ static struct usb_device_id rtw_usb_id_tbl[] = {
47 {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */ 47 {USB_DEVICE(0x07b8, 0x8179)}, /* Abocom - Abocom */
48 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */ 48 {USB_DEVICE(0x2001, 0x330F)}, /* DLink DWA-125 REV D1 */
49 {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */ 49 {USB_DEVICE(0x2001, 0x3310)}, /* Dlink DWA-123 REV D1 */
50 {USB_DEVICE(0x2001, 0x3311)}, /* DLink GO-USB-N150 REV B1 */
50 {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */ 51 {USB_DEVICE(0x0df6, 0x0076)}, /* Sitecom N150 v2 */
51 {} /* Terminating entry */ 52 {} /* Terminating entry */
52}; 53};
diff --git a/drivers/staging/rtl8723au/include/rtw_eeprom.h b/drivers/staging/rtl8723au/include/rtw_eeprom.h
index e5121a2a64b4..a86f36e49dd1 100644
--- a/drivers/staging/rtl8723au/include/rtw_eeprom.h
+++ b/drivers/staging/rtl8723au/include/rtw_eeprom.h
@@ -107,12 +107,12 @@ enum rt_customer_id
107}; 107};
108 108
109struct eeprom_priv { 109struct eeprom_priv {
110 u8 mac_addr[6]; /* PermanentAddress */
110 u8 bautoload_fail_flag; 111 u8 bautoload_fail_flag;
111 u8 bloadfile_fail_flag; 112 u8 bloadfile_fail_flag;
112 u8 bloadmac_fail_flag; 113 u8 bloadmac_fail_flag;
113 /* u8 bempty; */ 114 /* u8 bempty; */
114 /* u8 sys_config; */ 115 /* u8 sys_config; */
115 u8 mac_addr[6]; /* PermanentAddress */
116 /* u8 config0; */ 116 /* u8 config0; */
117 u16 channel_plan; 117 u16 channel_plan;
118 /* u8 country_string[3]; */ 118 /* u8 country_string[3]; */
diff --git a/drivers/target/Kconfig b/drivers/target/Kconfig
index dc2d84ac5a0e..81d44c477a5b 100644
--- a/drivers/target/Kconfig
+++ b/drivers/target/Kconfig
@@ -31,6 +31,13 @@ config TCM_PSCSI
31 Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered 31 Say Y here to enable the TCM/pSCSI subsystem plugin for non-buffered
32 passthrough access to Linux/SCSI device 32 passthrough access to Linux/SCSI device
33 33
34config TCM_USER
35 tristate "TCM/USER Subsystem Plugin for Linux"
36 depends on UIO && NET
37 help
38 Say Y here to enable the TCM/USER subsystem plugin for a userspace
39 process to handle requests
40
34source "drivers/target/loopback/Kconfig" 41source "drivers/target/loopback/Kconfig"
35source "drivers/target/tcm_fc/Kconfig" 42source "drivers/target/tcm_fc/Kconfig"
36source "drivers/target/iscsi/Kconfig" 43source "drivers/target/iscsi/Kconfig"
diff --git a/drivers/target/Makefile b/drivers/target/Makefile
index 85b012d2f89b..bbb4a7d638ef 100644
--- a/drivers/target/Makefile
+++ b/drivers/target/Makefile
@@ -22,6 +22,7 @@ obj-$(CONFIG_TARGET_CORE) += target_core_mod.o
22obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o 22obj-$(CONFIG_TCM_IBLOCK) += target_core_iblock.o
23obj-$(CONFIG_TCM_FILEIO) += target_core_file.o 23obj-$(CONFIG_TCM_FILEIO) += target_core_file.o
24obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o 24obj-$(CONFIG_TCM_PSCSI) += target_core_pscsi.o
25obj-$(CONFIG_TCM_USER) += target_core_user.o
25 26
26# Fabric modules 27# Fabric modules
27obj-$(CONFIG_LOOPBACK_TARGET) += loopback/ 28obj-$(CONFIG_LOOPBACK_TARGET) += loopback/
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 260c3e1e312c..73e58d22e325 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -3491,7 +3491,7 @@ iscsit_build_sendtargets_response(struct iscsi_cmd *cmd,
3491 len = sprintf(buf, "TargetAddress=" 3491 len = sprintf(buf, "TargetAddress="
3492 "%s:%hu,%hu", 3492 "%s:%hu,%hu",
3493 inaddr_any ? conn->local_ip : np->np_ip, 3493 inaddr_any ? conn->local_ip : np->np_ip,
3494 inaddr_any ? conn->local_port : np->np_port, 3494 np->np_port,
3495 tpg->tpgt); 3495 tpg->tpgt);
3496 len += 1; 3496 len += 1;
3497 3497
@@ -3709,7 +3709,6 @@ static inline void iscsit_thread_check_cpumask(
3709 struct task_struct *p, 3709 struct task_struct *p,
3710 int mode) 3710 int mode)
3711{ 3711{
3712 char buf[128];
3713 /* 3712 /*
3714 * mode == 1 signals iscsi_target_tx_thread() usage. 3713 * mode == 1 signals iscsi_target_tx_thread() usage.
3715 * mode == 0 signals iscsi_target_rx_thread() usage. 3714 * mode == 0 signals iscsi_target_rx_thread() usage.
@@ -3728,8 +3727,6 @@ static inline void iscsit_thread_check_cpumask(
3728 * both TX and RX kthreads are scheduled to run on the 3727 * both TX and RX kthreads are scheduled to run on the
3729 * same CPU. 3728 * same CPU.
3730 */ 3729 */
3731 memset(buf, 0, 128);
3732 cpumask_scnprintf(buf, 128, conn->conn_cpumask);
3733 set_cpus_allowed_ptr(p, conn->conn_cpumask); 3730 set_cpus_allowed_ptr(p, conn->conn_cpumask);
3734} 3731}
3735 3732
@@ -4326,8 +4323,7 @@ int iscsit_close_connection(
4326 if (conn->conn_tx_hash.tfm) 4323 if (conn->conn_tx_hash.tfm)
4327 crypto_free_hash(conn->conn_tx_hash.tfm); 4324 crypto_free_hash(conn->conn_tx_hash.tfm);
4328 4325
4329 if (conn->conn_cpumask) 4326 free_cpumask_var(conn->conn_cpumask);
4330 free_cpumask_var(conn->conn_cpumask);
4331 4327
4332 kfree(conn->conn_ops); 4328 kfree(conn->conn_ops);
4333 conn->conn_ops = NULL; 4329 conn->conn_ops = NULL;
diff --git a/drivers/target/iscsi/iscsi_target_configfs.c b/drivers/target/iscsi/iscsi_target_configfs.c
index ae03f3e5de1e..9059c1e0b26e 100644
--- a/drivers/target/iscsi/iscsi_target_configfs.c
+++ b/drivers/target/iscsi/iscsi_target_configfs.c
@@ -669,12 +669,10 @@ static ssize_t lio_target_nacl_show_info(
669 } else { 669 } else {
670 sess = se_sess->fabric_sess_ptr; 670 sess = se_sess->fabric_sess_ptr;
671 671
672 if (sess->sess_ops->InitiatorName) 672 rb += sprintf(page+rb, "InitiatorName: %s\n",
673 rb += sprintf(page+rb, "InitiatorName: %s\n", 673 sess->sess_ops->InitiatorName);
674 sess->sess_ops->InitiatorName); 674 rb += sprintf(page+rb, "InitiatorAlias: %s\n",
675 if (sess->sess_ops->InitiatorAlias) 675 sess->sess_ops->InitiatorAlias);
676 rb += sprintf(page+rb, "InitiatorAlias: %s\n",
677 sess->sess_ops->InitiatorAlias);
678 676
679 rb += sprintf(page+rb, "LIO Session ID: %u " 677 rb += sprintf(page+rb, "LIO Session ID: %u "
680 "ISID: 0x%02x %02x %02x %02x %02x %02x " 678 "ISID: 0x%02x %02x %02x %02x %02x %02x "
diff --git a/drivers/target/iscsi/iscsi_target_erl0.c b/drivers/target/iscsi/iscsi_target_erl0.c
index 0d1e6ee3e992..a0ae5fc0ad75 100644
--- a/drivers/target/iscsi/iscsi_target_erl0.c
+++ b/drivers/target/iscsi/iscsi_target_erl0.c
@@ -345,7 +345,6 @@ static int iscsit_dataout_check_datasn(
345 struct iscsi_cmd *cmd, 345 struct iscsi_cmd *cmd,
346 unsigned char *buf) 346 unsigned char *buf)
347{ 347{
348 int dump = 0, recovery = 0;
349 u32 data_sn = 0; 348 u32 data_sn = 0;
350 struct iscsi_conn *conn = cmd->conn; 349 struct iscsi_conn *conn = cmd->conn;
351 struct iscsi_data *hdr = (struct iscsi_data *) buf; 350 struct iscsi_data *hdr = (struct iscsi_data *) buf;
@@ -370,13 +369,11 @@ static int iscsit_dataout_check_datasn(
370 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x" 369 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
371 " higher than expected 0x%08x.\n", cmd->init_task_tag, 370 " higher than expected 0x%08x.\n", cmd->init_task_tag,
372 be32_to_cpu(hdr->datasn), data_sn); 371 be32_to_cpu(hdr->datasn), data_sn);
373 recovery = 1;
374 goto recover; 372 goto recover;
375 } else if (be32_to_cpu(hdr->datasn) < data_sn) { 373 } else if (be32_to_cpu(hdr->datasn) < data_sn) {
376 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x" 374 pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
377 " lower than expected 0x%08x, discarding payload.\n", 375 " lower than expected 0x%08x, discarding payload.\n",
378 cmd->init_task_tag, be32_to_cpu(hdr->datasn), data_sn); 376 cmd->init_task_tag, be32_to_cpu(hdr->datasn), data_sn);
379 dump = 1;
380 goto dump; 377 goto dump;
381 } 378 }
382 379
@@ -392,8 +389,7 @@ dump:
392 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0) 389 if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
393 return DATAOUT_CANNOT_RECOVER; 390 return DATAOUT_CANNOT_RECOVER;
394 391
395 return (recovery || dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : 392 return DATAOUT_WITHIN_COMMAND_RECOVERY;
396 DATAOUT_NORMAL;
397} 393}
398 394
399static int iscsit_dataout_pre_datapduinorder_yes( 395static int iscsit_dataout_pre_datapduinorder_yes(
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 5e71ac609418..480f2e0ecc11 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -978,8 +978,7 @@ int iscsit_setup_np(
978 return 0; 978 return 0;
979fail: 979fail:
980 np->np_socket = NULL; 980 np->np_socket = NULL;
981 if (sock) 981 sock_release(sock);
982 sock_release(sock);
983 return ret; 982 return ret;
984} 983}
985 984
@@ -1190,8 +1189,7 @@ old_sess_out:
1190 if (!IS_ERR(conn->conn_tx_hash.tfm)) 1189 if (!IS_ERR(conn->conn_tx_hash.tfm))
1191 crypto_free_hash(conn->conn_tx_hash.tfm); 1190 crypto_free_hash(conn->conn_tx_hash.tfm);
1192 1191
1193 if (conn->conn_cpumask) 1192 free_cpumask_var(conn->conn_cpumask);
1194 free_cpumask_var(conn->conn_cpumask);
1195 1193
1196 kfree(conn->conn_ops); 1194 kfree(conn->conn_ops);
1197 1195
@@ -1268,8 +1266,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1268 iscsit_put_transport(conn->conn_transport); 1266 iscsit_put_transport(conn->conn_transport);
1269 kfree(conn); 1267 kfree(conn);
1270 conn = NULL; 1268 conn = NULL;
1271 if (ret == -ENODEV)
1272 goto out;
1273 /* Get another socket */ 1269 /* Get another socket */
1274 return 1; 1270 return 1;
1275 } 1271 }
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 73355f4fca74..ce87ce9bdb9c 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1481,8 +1481,9 @@ void iscsit_collect_login_stats(
1481 if (conn->param_list) 1481 if (conn->param_list)
1482 intrname = iscsi_find_param_from_key(INITIATORNAME, 1482 intrname = iscsi_find_param_from_key(INITIATORNAME,
1483 conn->param_list); 1483 conn->param_list);
1484 strcpy(ls->last_intr_fail_name, 1484 strlcpy(ls->last_intr_fail_name,
1485 (intrname ? intrname->value : "Unknown")); 1485 (intrname ? intrname->value : "Unknown"),
1486 sizeof(ls->last_intr_fail_name));
1486 1487
1487 ls->last_intr_fail_ip_family = conn->login_family; 1488 ls->last_intr_fail_ip_family = conn->login_family;
1488 1489
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 340de9d92b15..ab3ab27d49b7 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -153,18 +153,11 @@ static int tcm_loop_change_queue_type(struct scsi_device *sdev, int tag)
153/* 153/*
154 * Locate the SAM Task Attr from struct scsi_cmnd * 154 * Locate the SAM Task Attr from struct scsi_cmnd *
155 */ 155 */
156static int tcm_loop_sam_attr(struct scsi_cmnd *sc) 156static int tcm_loop_sam_attr(struct scsi_cmnd *sc, int tag)
157{ 157{
158 if (sc->device->tagged_supported) { 158 if (sc->device->tagged_supported &&
159 switch (sc->tag) { 159 sc->device->ordered_tags && tag >= 0)
160 case HEAD_OF_QUEUE_TAG: 160 return MSG_ORDERED_TAG;
161 return MSG_HEAD_TAG;
162 case ORDERED_QUEUE_TAG:
163 return MSG_ORDERED_TAG;
164 default:
165 break;
166 }
167 }
168 161
169 return MSG_SIMPLE_TAG; 162 return MSG_SIMPLE_TAG;
170} 163}
@@ -227,7 +220,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
227 220
228 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd, 221 rc = target_submit_cmd_map_sgls(se_cmd, tl_nexus->se_sess, sc->cmnd,
229 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun, 222 &tl_cmd->tl_sense_buf[0], tl_cmd->sc->device->lun,
230 transfer_length, tcm_loop_sam_attr(sc), 223 transfer_length, tcm_loop_sam_attr(sc, tl_cmd->sc_cmd_tag),
231 sc->sc_data_direction, 0, 224 sc->sc_data_direction, 0,
232 scsi_sglist(sc), scsi_sg_count(sc), 225 scsi_sglist(sc), scsi_sg_count(sc),
233 sgl_bidi, sgl_bidi_count, 226 sgl_bidi, sgl_bidi_count,
@@ -266,7 +259,7 @@ static int tcm_loop_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *sc)
266 } 259 }
267 260
268 tl_cmd->sc = sc; 261 tl_cmd->sc = sc;
269 tl_cmd->sc_cmd_tag = sc->tag; 262 tl_cmd->sc_cmd_tag = sc->request->tag;
270 INIT_WORK(&tl_cmd->work, tcm_loop_submission_work); 263 INIT_WORK(&tl_cmd->work, tcm_loop_submission_work);
271 queue_work(tcm_loop_workqueue, &tl_cmd->work); 264 queue_work(tcm_loop_workqueue, &tl_cmd->work);
272 return 0; 265 return 0;
@@ -370,7 +363,7 @@ static int tcm_loop_abort_task(struct scsi_cmnd *sc)
370 */ 363 */
371 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id]; 364 tl_tpg = &tl_hba->tl_hba_tpgs[sc->device->id];
372 ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun, 365 ret = tcm_loop_issue_tmr(tl_tpg, tl_nexus, sc->device->lun,
373 sc->tag, TMR_ABORT_TASK); 366 sc->request->tag, TMR_ABORT_TASK);
374 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED; 367 return (ret == TMR_FUNCTION_COMPLETE) ? SUCCESS : FAILED;
375} 368}
376 369
@@ -960,8 +953,7 @@ static int tcm_loop_port_link(
960 struct tcm_loop_tpg, tl_se_tpg); 953 struct tcm_loop_tpg, tl_se_tpg);
961 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba; 954 struct tcm_loop_hba *tl_hba = tl_tpg->tl_hba;
962 955
963 atomic_inc(&tl_tpg->tl_tpg_port_count); 956 atomic_inc_mb(&tl_tpg->tl_tpg_port_count);
964 smp_mb__after_atomic();
965 /* 957 /*
966 * Add Linux/SCSI struct scsi_device by HCTL 958 * Add Linux/SCSI struct scsi_device by HCTL
967 */ 959 */
@@ -995,8 +987,7 @@ static void tcm_loop_port_unlink(
995 scsi_remove_device(sd); 987 scsi_remove_device(sd);
996 scsi_device_put(sd); 988 scsi_device_put(sd);
997 989
998 atomic_dec(&tl_tpg->tl_tpg_port_count); 990 atomic_dec_mb(&tl_tpg->tl_tpg_port_count);
999 smp_mb__after_atomic();
1000 991
1001 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n"); 992 pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
1002} 993}
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index fbc5ebb5f761..fb87780929d2 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -392,8 +392,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
392 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id) 392 if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
393 continue; 393 continue;
394 394
395 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 395 atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
396 smp_mb__after_atomic();
397 396
398 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 397 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
399 398
@@ -403,8 +402,7 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd)
403 found = true; 402 found = true;
404 403
405 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 404 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
406 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 405 atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
407 smp_mb__after_atomic();
408 break; 406 break;
409 } 407 }
410 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 408 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
@@ -998,8 +996,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
998 * every I_T nexus other than the I_T nexus on which the SET 996 * every I_T nexus other than the I_T nexus on which the SET
999 * TARGET PORT GROUPS command 997 * TARGET PORT GROUPS command
1000 */ 998 */
1001 atomic_inc(&mem->tg_pt_gp_mem_ref_cnt); 999 atomic_inc_mb(&mem->tg_pt_gp_mem_ref_cnt);
1002 smp_mb__after_atomic();
1003 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1000 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1004 1001
1005 spin_lock_bh(&port->sep_alua_lock); 1002 spin_lock_bh(&port->sep_alua_lock);
@@ -1028,8 +1025,7 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
1028 spin_unlock_bh(&port->sep_alua_lock); 1025 spin_unlock_bh(&port->sep_alua_lock);
1029 1026
1030 spin_lock(&tg_pt_gp->tg_pt_gp_lock); 1027 spin_lock(&tg_pt_gp->tg_pt_gp_lock);
1031 atomic_dec(&mem->tg_pt_gp_mem_ref_cnt); 1028 atomic_dec_mb(&mem->tg_pt_gp_mem_ref_cnt);
1032 smp_mb__after_atomic();
1033 } 1029 }
1034 spin_unlock(&tg_pt_gp->tg_pt_gp_lock); 1030 spin_unlock(&tg_pt_gp->tg_pt_gp_lock);
1035 /* 1031 /*
@@ -1063,7 +1059,6 @@ static void core_alua_do_transition_tg_pt_work(struct work_struct *work)
1063 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state)); 1059 core_alua_dump_state(tg_pt_gp->tg_pt_gp_alua_pending_state));
1064 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1060 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1065 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1061 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt);
1066 smp_mb__after_atomic();
1067 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1062 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1068 1063
1069 if (tg_pt_gp->tg_pt_gp_transition_complete) 1064 if (tg_pt_gp->tg_pt_gp_transition_complete)
@@ -1125,7 +1120,6 @@ static int core_alua_do_transition_tg_pt(
1125 */ 1120 */
1126 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1121 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1127 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1122 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
1128 smp_mb__after_atomic();
1129 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1123 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1130 1124
1131 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) { 1125 if (!explicit && tg_pt_gp->tg_pt_gp_implicit_trans_secs) {
@@ -1168,7 +1162,6 @@ int core_alua_do_port_transition(
1168 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock); 1162 spin_lock(&local_lu_gp_mem->lu_gp_mem_lock);
1169 lu_gp = local_lu_gp_mem->lu_gp; 1163 lu_gp = local_lu_gp_mem->lu_gp;
1170 atomic_inc(&lu_gp->lu_gp_ref_cnt); 1164 atomic_inc(&lu_gp->lu_gp_ref_cnt);
1171 smp_mb__after_atomic();
1172 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock); 1165 spin_unlock(&local_lu_gp_mem->lu_gp_mem_lock);
1173 /* 1166 /*
1174 * For storage objects that are members of the 'default_lu_gp', 1167 * For storage objects that are members of the 'default_lu_gp',
@@ -1184,8 +1177,7 @@ int core_alua_do_port_transition(
1184 l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl; 1177 l_tg_pt_gp->tg_pt_gp_alua_nacl = l_nacl;
1185 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp, 1178 rc = core_alua_do_transition_tg_pt(l_tg_pt_gp,
1186 new_state, explicit); 1179 new_state, explicit);
1187 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1180 atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1188 smp_mb__after_atomic();
1189 return rc; 1181 return rc;
1190 } 1182 }
1191 /* 1183 /*
@@ -1198,8 +1190,7 @@ int core_alua_do_port_transition(
1198 lu_gp_mem_list) { 1190 lu_gp_mem_list) {
1199 1191
1200 dev = lu_gp_mem->lu_gp_mem_dev; 1192 dev = lu_gp_mem->lu_gp_mem_dev;
1201 atomic_inc(&lu_gp_mem->lu_gp_mem_ref_cnt); 1193 atomic_inc_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1202 smp_mb__after_atomic();
1203 spin_unlock(&lu_gp->lu_gp_lock); 1194 spin_unlock(&lu_gp->lu_gp_lock);
1204 1195
1205 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1196 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
@@ -1227,8 +1218,7 @@ int core_alua_do_port_transition(
1227 tg_pt_gp->tg_pt_gp_alua_port = NULL; 1218 tg_pt_gp->tg_pt_gp_alua_port = NULL;
1228 tg_pt_gp->tg_pt_gp_alua_nacl = NULL; 1219 tg_pt_gp->tg_pt_gp_alua_nacl = NULL;
1229 } 1220 }
1230 atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt); 1221 atomic_inc_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1231 smp_mb__after_atomic();
1232 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1222 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1233 /* 1223 /*
1234 * core_alua_do_transition_tg_pt() will always return 1224 * core_alua_do_transition_tg_pt() will always return
@@ -1238,16 +1228,14 @@ int core_alua_do_port_transition(
1238 new_state, explicit); 1228 new_state, explicit);
1239 1229
1240 spin_lock(&dev->t10_alua.tg_pt_gps_lock); 1230 spin_lock(&dev->t10_alua.tg_pt_gps_lock);
1241 atomic_dec(&tg_pt_gp->tg_pt_gp_ref_cnt); 1231 atomic_dec_mb(&tg_pt_gp->tg_pt_gp_ref_cnt);
1242 smp_mb__after_atomic();
1243 if (rc) 1232 if (rc)
1244 break; 1233 break;
1245 } 1234 }
1246 spin_unlock(&dev->t10_alua.tg_pt_gps_lock); 1235 spin_unlock(&dev->t10_alua.tg_pt_gps_lock);
1247 1236
1248 spin_lock(&lu_gp->lu_gp_lock); 1237 spin_lock(&lu_gp->lu_gp_lock);
1249 atomic_dec(&lu_gp_mem->lu_gp_mem_ref_cnt); 1238 atomic_dec_mb(&lu_gp_mem->lu_gp_mem_ref_cnt);
1250 smp_mb__after_atomic();
1251 } 1239 }
1252 spin_unlock(&lu_gp->lu_gp_lock); 1240 spin_unlock(&lu_gp->lu_gp_lock);
1253 1241
@@ -1260,8 +1248,7 @@ int core_alua_do_port_transition(
1260 core_alua_dump_state(new_state)); 1248 core_alua_dump_state(new_state));
1261 } 1249 }
1262 1250
1263 atomic_dec(&lu_gp->lu_gp_ref_cnt); 1251 atomic_dec_mb(&lu_gp->lu_gp_ref_cnt);
1264 smp_mb__after_atomic();
1265 return rc; 1252 return rc;
1266} 1253}
1267 1254
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 756def38c77a..79f9296a08ae 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -665,6 +665,9 @@ SE_DEV_ATTR(is_nonrot, S_IRUGO | S_IWUSR);
665DEF_DEV_ATTRIB(emulate_rest_reord); 665DEF_DEV_ATTRIB(emulate_rest_reord);
666SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR); 666SE_DEV_ATTR(emulate_rest_reord, S_IRUGO | S_IWUSR);
667 667
668DEF_DEV_ATTRIB(force_pr_aptpl);
669SE_DEV_ATTR(force_pr_aptpl, S_IRUGO | S_IWUSR);
670
668DEF_DEV_ATTRIB_RO(hw_block_size); 671DEF_DEV_ATTRIB_RO(hw_block_size);
669SE_DEV_ATTR_RO(hw_block_size); 672SE_DEV_ATTR_RO(hw_block_size);
670 673
@@ -719,6 +722,7 @@ static struct configfs_attribute *target_core_dev_attrib_attrs[] = {
719 &target_core_dev_attrib_hw_pi_prot_type.attr, 722 &target_core_dev_attrib_hw_pi_prot_type.attr,
720 &target_core_dev_attrib_pi_prot_format.attr, 723 &target_core_dev_attrib_pi_prot_format.attr,
721 &target_core_dev_attrib_enforce_pr_isids.attr, 724 &target_core_dev_attrib_enforce_pr_isids.attr,
725 &target_core_dev_attrib_force_pr_aptpl.attr,
722 &target_core_dev_attrib_is_nonrot.attr, 726 &target_core_dev_attrib_is_nonrot.attr,
723 &target_core_dev_attrib_emulate_rest_reord.attr, 727 &target_core_dev_attrib_emulate_rest_reord.attr,
724 &target_core_dev_attrib_hw_block_size.attr, 728 &target_core_dev_attrib_hw_block_size.attr,
@@ -1263,7 +1267,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1263{ 1267{
1264 unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL; 1268 unsigned char *i_fabric = NULL, *i_port = NULL, *isid = NULL;
1265 unsigned char *t_fabric = NULL, *t_port = NULL; 1269 unsigned char *t_fabric = NULL, *t_port = NULL;
1266 char *orig, *ptr, *arg_p, *opts; 1270 char *orig, *ptr, *opts;
1267 substring_t args[MAX_OPT_ARGS]; 1271 substring_t args[MAX_OPT_ARGS];
1268 unsigned long long tmp_ll; 1272 unsigned long long tmp_ll;
1269 u64 sa_res_key = 0; 1273 u64 sa_res_key = 0;
@@ -1295,14 +1299,14 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1295 token = match_token(ptr, tokens, args); 1299 token = match_token(ptr, tokens, args);
1296 switch (token) { 1300 switch (token) {
1297 case Opt_initiator_fabric: 1301 case Opt_initiator_fabric:
1298 i_fabric = match_strdup(&args[0]); 1302 i_fabric = match_strdup(args);
1299 if (!i_fabric) { 1303 if (!i_fabric) {
1300 ret = -ENOMEM; 1304 ret = -ENOMEM;
1301 goto out; 1305 goto out;
1302 } 1306 }
1303 break; 1307 break;
1304 case Opt_initiator_node: 1308 case Opt_initiator_node:
1305 i_port = match_strdup(&args[0]); 1309 i_port = match_strdup(args);
1306 if (!i_port) { 1310 if (!i_port) {
1307 ret = -ENOMEM; 1311 ret = -ENOMEM;
1308 goto out; 1312 goto out;
@@ -1316,7 +1320,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1316 } 1320 }
1317 break; 1321 break;
1318 case Opt_initiator_sid: 1322 case Opt_initiator_sid:
1319 isid = match_strdup(&args[0]); 1323 isid = match_strdup(args);
1320 if (!isid) { 1324 if (!isid) {
1321 ret = -ENOMEM; 1325 ret = -ENOMEM;
1322 goto out; 1326 goto out;
@@ -1330,15 +1334,9 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1330 } 1334 }
1331 break; 1335 break;
1332 case Opt_sa_res_key: 1336 case Opt_sa_res_key:
1333 arg_p = match_strdup(&args[0]); 1337 ret = kstrtoull(args->from, 0, &tmp_ll);
1334 if (!arg_p) {
1335 ret = -ENOMEM;
1336 goto out;
1337 }
1338 ret = kstrtoull(arg_p, 0, &tmp_ll);
1339 if (ret < 0) { 1338 if (ret < 0) {
1340 pr_err("kstrtoull() failed for" 1339 pr_err("kstrtoull() failed for sa_res_key=\n");
1341 " sa_res_key=\n");
1342 goto out; 1340 goto out;
1343 } 1341 }
1344 sa_res_key = (u64)tmp_ll; 1342 sa_res_key = (u64)tmp_ll;
@@ -1370,14 +1368,14 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1370 * PR APTPL Metadata for Target Port 1368 * PR APTPL Metadata for Target Port
1371 */ 1369 */
1372 case Opt_target_fabric: 1370 case Opt_target_fabric:
1373 t_fabric = match_strdup(&args[0]); 1371 t_fabric = match_strdup(args);
1374 if (!t_fabric) { 1372 if (!t_fabric) {
1375 ret = -ENOMEM; 1373 ret = -ENOMEM;
1376 goto out; 1374 goto out;
1377 } 1375 }
1378 break; 1376 break;
1379 case Opt_target_node: 1377 case Opt_target_node:
1380 t_port = match_strdup(&args[0]); 1378 t_port = match_strdup(args);
1381 if (!t_port) { 1379 if (!t_port) {
1382 ret = -ENOMEM; 1380 ret = -ENOMEM;
1383 goto out; 1381 goto out;
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 98da90167159..c45f9e907e44 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -224,8 +224,7 @@ struct se_dev_entry *core_get_se_deve_from_rtpi(
224 if (port->sep_rtpi != rtpi) 224 if (port->sep_rtpi != rtpi)
225 continue; 225 continue;
226 226
227 atomic_inc(&deve->pr_ref_count); 227 atomic_inc_mb(&deve->pr_ref_count);
228 smp_mb__after_atomic();
229 spin_unlock_irq(&nacl->device_list_lock); 228 spin_unlock_irq(&nacl->device_list_lock);
230 229
231 return deve; 230 return deve;
@@ -1019,6 +1018,23 @@ int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
1019 return 0; 1018 return 0;
1020} 1019}
1021 1020
1021int se_dev_set_force_pr_aptpl(struct se_device *dev, int flag)
1022{
1023 if ((flag != 0) && (flag != 1)) {
1024 printk(KERN_ERR "Illegal value %d\n", flag);
1025 return -EINVAL;
1026 }
1027 if (dev->export_count) {
1028 pr_err("dev[%p]: Unable to set force_pr_aptpl while"
1029 " export_count is %d\n", dev, dev->export_count);
1030 return -EINVAL;
1031 }
1032
1033 dev->dev_attrib.force_pr_aptpl = flag;
1034 pr_debug("dev[%p]: SE Device force_pr_aptpl: %d\n", dev, flag);
1035 return 0;
1036}
1037
1022int se_dev_set_is_nonrot(struct se_device *dev, int flag) 1038int se_dev_set_is_nonrot(struct se_device *dev, int flag)
1023{ 1039{
1024 if ((flag != 0) && (flag != 1)) { 1040 if ((flag != 0) && (flag != 1)) {
@@ -1250,24 +1266,16 @@ struct se_lun *core_dev_add_lun(
1250 * 1266 *
1251 * 1267 *
1252 */ 1268 */
1253int core_dev_del_lun( 1269void core_dev_del_lun(
1254 struct se_portal_group *tpg, 1270 struct se_portal_group *tpg,
1255 u32 unpacked_lun) 1271 struct se_lun *lun)
1256{ 1272{
1257 struct se_lun *lun; 1273 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivating %s Logical Unit from"
1258
1259 lun = core_tpg_pre_dellun(tpg, unpacked_lun);
1260 if (IS_ERR(lun))
1261 return PTR_ERR(lun);
1262
1263 core_tpg_post_dellun(tpg, lun);
1264
1265 pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
1266 " device object\n", tpg->se_tpg_tfo->get_fabric_name(), 1274 " device object\n", tpg->se_tpg_tfo->get_fabric_name(),
1267 tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, 1275 tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
1268 tpg->se_tpg_tfo->get_fabric_name()); 1276 tpg->se_tpg_tfo->get_fabric_name());
1269 1277
1270 return 0; 1278 core_tpg_remove_lun(tpg, lun);
1271} 1279}
1272 1280
1273struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun) 1281struct se_lun *core_get_lun_from_tpg(struct se_portal_group *tpg, u32 unpacked_lun)
@@ -1396,8 +1404,7 @@ int core_dev_add_initiator_node_lun_acl(
1396 1404
1397 spin_lock(&lun->lun_acl_lock); 1405 spin_lock(&lun->lun_acl_lock);
1398 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list); 1406 list_add_tail(&lacl->lacl_list, &lun->lun_acl_list);
1399 atomic_inc(&lun->lun_acl_count); 1407 atomic_inc_mb(&lun->lun_acl_count);
1400 smp_mb__after_atomic();
1401 spin_unlock(&lun->lun_acl_lock); 1408 spin_unlock(&lun->lun_acl_lock);
1402 1409
1403 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for " 1410 pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
@@ -1409,7 +1416,8 @@ int core_dev_add_initiator_node_lun_acl(
1409 * Check to see if there are any existing persistent reservation APTPL 1416 * Check to see if there are any existing persistent reservation APTPL
1410 * pre-registrations that need to be enabled for this LUN ACL.. 1417 * pre-registrations that need to be enabled for this LUN ACL..
1411 */ 1418 */
1412 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, lacl); 1419 core_scsi3_check_aptpl_registration(lun->lun_se_dev, tpg, lun, nacl,
1420 lacl->mapped_lun);
1413 return 0; 1421 return 0;
1414} 1422}
1415 1423
@@ -1430,8 +1438,7 @@ int core_dev_del_initiator_node_lun_acl(
1430 1438
1431 spin_lock(&lun->lun_acl_lock); 1439 spin_lock(&lun->lun_acl_lock);
1432 list_del(&lacl->lacl_list); 1440 list_del(&lacl->lacl_list);
1433 atomic_dec(&lun->lun_acl_count); 1441 atomic_dec_mb(&lun->lun_acl_count);
1434 smp_mb__after_atomic();
1435 spin_unlock(&lun->lun_acl_lock); 1442 spin_unlock(&lun->lun_acl_lock);
1436 1443
1437 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun, 1444 core_disable_device_list_for_node(lun, NULL, lacl->mapped_lun,
@@ -1554,6 +1561,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
1554 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC; 1561 dev->dev_attrib.emulate_3pc = DA_EMULATE_3PC;
1555 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT; 1562 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE0_PROT;
1556 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS; 1563 dev->dev_attrib.enforce_pr_isids = DA_ENFORCE_PR_ISIDS;
1564 dev->dev_attrib.force_pr_aptpl = DA_FORCE_PR_APTPL;
1557 dev->dev_attrib.is_nonrot = DA_IS_NONROT; 1565 dev->dev_attrib.is_nonrot = DA_IS_NONROT;
1558 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD; 1566 dev->dev_attrib.emulate_rest_reord = DA_EMULATE_REST_REORD;
1559 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT; 1567 dev->dev_attrib.max_unmap_lba_count = DA_MAX_UNMAP_LBA_COUNT;
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 7de9f0475d05..0c3f90130b7d 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -320,7 +320,7 @@ static struct config_group *target_fabric_make_mappedlun(
320 struct se_node_acl, acl_group); 320 struct se_node_acl, acl_group);
321 struct se_portal_group *se_tpg = se_nacl->se_tpg; 321 struct se_portal_group *se_tpg = se_nacl->se_tpg;
322 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf; 322 struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
323 struct se_lun_acl *lacl; 323 struct se_lun_acl *lacl = NULL;
324 struct config_item *acl_ci; 324 struct config_item *acl_ci;
325 struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL; 325 struct config_group *lacl_cg = NULL, *ml_stat_grp = NULL;
326 char *buf; 326 char *buf;
@@ -406,6 +406,7 @@ static struct config_group *target_fabric_make_mappedlun(
406out: 406out:
407 if (lacl_cg) 407 if (lacl_cg)
408 kfree(lacl_cg->default_groups); 408 kfree(lacl_cg->default_groups);
409 kfree(lacl);
409 kfree(buf); 410 kfree(buf);
410 return ERR_PTR(ret); 411 return ERR_PTR(ret);
411} 412}
@@ -821,7 +822,7 @@ static int target_fabric_port_unlink(
821 tf->tf_ops.fabric_pre_unlink(se_tpg, lun); 822 tf->tf_ops.fabric_pre_unlink(se_tpg, lun);
822 } 823 }
823 824
824 core_dev_del_lun(se_tpg, lun->unpacked_lun); 825 core_dev_del_lun(se_tpg, lun);
825 return 0; 826 return 0;
826} 827}
827 828
@@ -910,16 +911,12 @@ static struct config_group *target_fabric_make_lun(
910 GFP_KERNEL); 911 GFP_KERNEL);
911 if (!port_stat_grp->default_groups) { 912 if (!port_stat_grp->default_groups) {
912 pr_err("Unable to allocate port_stat_grp->default_groups\n"); 913 pr_err("Unable to allocate port_stat_grp->default_groups\n");
913 errno = -ENOMEM; 914 kfree(lun_cg->default_groups);
914 goto out; 915 return ERR_PTR(-ENOMEM);
915 } 916 }
916 target_stat_setup_port_default_groups(lun); 917 target_stat_setup_port_default_groups(lun);
917 918
918 return &lun->lun_group; 919 return &lun->lun_group;
919out:
920 if (lun_cg)
921 kfree(lun_cg->default_groups);
922 return ERR_PTR(errno);
923} 920}
924 921
925static void target_fabric_drop_lun( 922static void target_fabric_drop_lun(
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 0d1cf8b4f49f..35bfe77160d8 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -394,9 +394,9 @@ char *iscsi_parse_pr_out_transport_id(
394 * If the caller wants the TransportID Length, we set that value for the 394 * If the caller wants the TransportID Length, we set that value for the
395 * entire iSCSI Tarnsport ID now. 395 * entire iSCSI Tarnsport ID now.
396 */ 396 */
397 if (out_tid_len != NULL) { 397 if (out_tid_len) {
398 add_len = ((buf[2] >> 8) & 0xff); 398 /* The shift works thanks to integer promotion rules */
399 add_len |= (buf[3] & 0xff); 399 add_len = (buf[2] << 8) | buf[3];
400 400
401 tid_len = strlen(&buf[4]); 401 tid_len = strlen(&buf[4]);
402 tid_len += 4; /* Add four bytes for iSCSI Transport ID header */ 402 tid_len += 4; /* Add four bytes for iSCSI Transport ID header */
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 7d6cddaec525..72c83d98662b 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -415,7 +415,7 @@ fd_execute_sync_cache(struct se_cmd *cmd)
415 } else { 415 } else {
416 start = cmd->t_task_lba * dev->dev_attrib.block_size; 416 start = cmd->t_task_lba * dev->dev_attrib.block_size;
417 if (cmd->data_length) 417 if (cmd->data_length)
418 end = start + cmd->data_length; 418 end = start + cmd->data_length - 1;
419 else 419 else
420 end = LLONG_MAX; 420 end = LLONG_MAX;
421 } 421 }
@@ -680,7 +680,12 @@ fd_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
680 struct fd_dev *fd_dev = FD_DEV(dev); 680 struct fd_dev *fd_dev = FD_DEV(dev);
681 loff_t start = cmd->t_task_lba * 681 loff_t start = cmd->t_task_lba *
682 dev->dev_attrib.block_size; 682 dev->dev_attrib.block_size;
683 loff_t end = start + cmd->data_length; 683 loff_t end;
684
685 if (cmd->data_length)
686 end = start + cmd->data_length - 1;
687 else
688 end = LLONG_MAX;
684 689
685 vfs_fsync_range(fd_dev->fd_file, start, end, 1); 690 vfs_fsync_range(fd_dev->fd_file, start, end, 1);
686 } 691 }
@@ -762,7 +767,9 @@ static ssize_t fd_set_configfs_dev_params(struct se_device *dev,
762 fd_dev->fbd_flags |= FBDF_HAS_SIZE; 767 fd_dev->fbd_flags |= FBDF_HAS_SIZE;
763 break; 768 break;
764 case Opt_fd_buffered_io: 769 case Opt_fd_buffered_io:
765 match_int(args, &arg); 770 ret = match_int(args, &arg);
771 if (ret)
772 goto out;
766 if (arg != 1) { 773 if (arg != 1) {
767 pr_err("bogus fd_buffered_io=%d value\n", arg); 774 pr_err("bogus fd_buffered_io=%d value\n", arg);
768 ret = -EINVAL; 775 ret = -EINVAL;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index de9cab708f45..e31f42f369ff 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -38,6 +38,7 @@ int se_dev_set_emulate_3pc(struct se_device *, int);
38int se_dev_set_pi_prot_type(struct se_device *, int); 38int se_dev_set_pi_prot_type(struct se_device *, int);
39int se_dev_set_pi_prot_format(struct se_device *, int); 39int se_dev_set_pi_prot_format(struct se_device *, int);
40int se_dev_set_enforce_pr_isids(struct se_device *, int); 40int se_dev_set_enforce_pr_isids(struct se_device *, int);
41int se_dev_set_force_pr_aptpl(struct se_device *, int);
41int se_dev_set_is_nonrot(struct se_device *, int); 42int se_dev_set_is_nonrot(struct se_device *, int);
42int se_dev_set_emulate_rest_reord(struct se_device *dev, int); 43int se_dev_set_emulate_rest_reord(struct se_device *dev, int);
43int se_dev_set_queue_depth(struct se_device *, u32); 44int se_dev_set_queue_depth(struct se_device *, u32);
@@ -46,7 +47,7 @@ int se_dev_set_fabric_max_sectors(struct se_device *, u32);
46int se_dev_set_optimal_sectors(struct se_device *, u32); 47int se_dev_set_optimal_sectors(struct se_device *, u32);
47int se_dev_set_block_size(struct se_device *, u32); 48int se_dev_set_block_size(struct se_device *, u32);
48struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32); 49struct se_lun *core_dev_add_lun(struct se_portal_group *, struct se_device *, u32);
49int core_dev_del_lun(struct se_portal_group *, u32); 50void core_dev_del_lun(struct se_portal_group *, struct se_lun *);
50struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32); 51struct se_lun *core_get_lun_from_tpg(struct se_portal_group *, u32);
51struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *, 52struct se_lun_acl *core_dev_init_initiator_node_lun_acl(struct se_portal_group *,
52 struct se_node_acl *, u32, int *); 53 struct se_node_acl *, u32, int *);
@@ -82,8 +83,7 @@ void core_tpg_wait_for_nacl_pr_ref(struct se_node_acl *);
82struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32); 83struct se_lun *core_tpg_alloc_lun(struct se_portal_group *, u32);
83int core_tpg_add_lun(struct se_portal_group *, struct se_lun *, 84int core_tpg_add_lun(struct se_portal_group *, struct se_lun *,
84 u32, struct se_device *); 85 u32, struct se_device *);
85struct se_lun *core_tpg_pre_dellun(struct se_portal_group *, u32 unpacked_lun); 86void core_tpg_remove_lun(struct se_portal_group *, struct se_lun *);
86int core_tpg_post_dellun(struct se_portal_group *, struct se_lun *);
87 87
88/* target_core_transport.c */ 88/* target_core_transport.c */
89extern struct kmem_cache *se_tmr_req_cache; 89extern struct kmem_cache *se_tmr_req_cache;
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index df357862286e..9f93b8234095 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -674,8 +674,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
674 */ 674 */
675 spin_lock(&dev->se_port_lock); 675 spin_lock(&dev->se_port_lock);
676 list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) { 676 list_for_each_entry_safe(port, port_tmp, &dev->dev_sep_list, sep_list) {
677 atomic_inc(&port->sep_tg_pt_ref_cnt); 677 atomic_inc_mb(&port->sep_tg_pt_ref_cnt);
678 smp_mb__after_atomic();
679 spin_unlock(&dev->se_port_lock); 678 spin_unlock(&dev->se_port_lock);
680 679
681 spin_lock_bh(&port->sep_alua_lock); 680 spin_lock_bh(&port->sep_alua_lock);
@@ -709,8 +708,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
709 if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname)) 708 if (strcmp(nacl->initiatorname, nacl_tmp->initiatorname))
710 continue; 709 continue;
711 710
712 atomic_inc(&deve_tmp->pr_ref_count); 711 atomic_inc_mb(&deve_tmp->pr_ref_count);
713 smp_mb__after_atomic();
714 spin_unlock_bh(&port->sep_alua_lock); 712 spin_unlock_bh(&port->sep_alua_lock);
715 /* 713 /*
716 * Grab a configfs group dependency that is released 714 * Grab a configfs group dependency that is released
@@ -722,10 +720,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
722 if (ret < 0) { 720 if (ret < 0) {
723 pr_err("core_scsi3_lunacl_depend" 721 pr_err("core_scsi3_lunacl_depend"
724 "_item() failed\n"); 722 "_item() failed\n");
725 atomic_dec(&port->sep_tg_pt_ref_cnt); 723 atomic_dec_mb(&port->sep_tg_pt_ref_cnt);
726 smp_mb__after_atomic(); 724 atomic_dec_mb(&deve_tmp->pr_ref_count);
727 atomic_dec(&deve_tmp->pr_ref_count);
728 smp_mb__after_atomic();
729 goto out; 725 goto out;
730 } 726 }
731 /* 727 /*
@@ -739,10 +735,8 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
739 nacl_tmp, deve_tmp, NULL, 735 nacl_tmp, deve_tmp, NULL,
740 sa_res_key, all_tg_pt, aptpl); 736 sa_res_key, all_tg_pt, aptpl);
741 if (!pr_reg_atp) { 737 if (!pr_reg_atp) {
742 atomic_dec(&port->sep_tg_pt_ref_cnt); 738 atomic_dec_mb(&port->sep_tg_pt_ref_cnt);
743 smp_mb__after_atomic(); 739 atomic_dec_mb(&deve_tmp->pr_ref_count);
744 atomic_dec(&deve_tmp->pr_ref_count);
745 smp_mb__after_atomic();
746 core_scsi3_lunacl_undepend_item(deve_tmp); 740 core_scsi3_lunacl_undepend_item(deve_tmp);
747 goto out; 741 goto out;
748 } 742 }
@@ -754,8 +748,7 @@ static struct t10_pr_registration *__core_scsi3_alloc_registration(
754 spin_unlock_bh(&port->sep_alua_lock); 748 spin_unlock_bh(&port->sep_alua_lock);
755 749
756 spin_lock(&dev->se_port_lock); 750 spin_lock(&dev->se_port_lock);
757 atomic_dec(&port->sep_tg_pt_ref_cnt); 751 atomic_dec_mb(&port->sep_tg_pt_ref_cnt);
758 smp_mb__after_atomic();
759 } 752 }
760 spin_unlock(&dev->se_port_lock); 753 spin_unlock(&dev->se_port_lock);
761 754
@@ -902,6 +895,7 @@ static int __core_scsi3_check_aptpl_registration(
902 spin_lock(&pr_tmpl->aptpl_reg_lock); 895 spin_lock(&pr_tmpl->aptpl_reg_lock);
903 list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list, 896 list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
904 pr_reg_aptpl_list) { 897 pr_reg_aptpl_list) {
898
905 if (!strcmp(pr_reg->pr_iport, i_port) && 899 if (!strcmp(pr_reg->pr_iport, i_port) &&
906 (pr_reg->pr_res_mapped_lun == deve->mapped_lun) && 900 (pr_reg->pr_res_mapped_lun == deve->mapped_lun) &&
907 !(strcmp(pr_reg->pr_tport, t_port)) && 901 !(strcmp(pr_reg->pr_tport, t_port)) &&
@@ -944,10 +938,10 @@ int core_scsi3_check_aptpl_registration(
944 struct se_device *dev, 938 struct se_device *dev,
945 struct se_portal_group *tpg, 939 struct se_portal_group *tpg,
946 struct se_lun *lun, 940 struct se_lun *lun,
947 struct se_lun_acl *lun_acl) 941 struct se_node_acl *nacl,
942 u32 mapped_lun)
948{ 943{
949 struct se_node_acl *nacl = lun_acl->se_lun_nacl; 944 struct se_dev_entry *deve = nacl->device_list[mapped_lun];
950 struct se_dev_entry *deve = nacl->device_list[lun_acl->mapped_lun];
951 945
952 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 946 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
953 return 0; 947 return 0;
@@ -1109,8 +1103,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
1109 if (dev->dev_attrib.enforce_pr_isids) 1103 if (dev->dev_attrib.enforce_pr_isids)
1110 continue; 1104 continue;
1111 } 1105 }
1112 atomic_inc(&pr_reg->pr_res_holders); 1106 atomic_inc_mb(&pr_reg->pr_res_holders);
1113 smp_mb__after_atomic();
1114 spin_unlock(&pr_tmpl->registration_lock); 1107 spin_unlock(&pr_tmpl->registration_lock);
1115 return pr_reg; 1108 return pr_reg;
1116 } 1109 }
@@ -1124,8 +1117,7 @@ static struct t10_pr_registration *__core_scsi3_locate_pr_reg(
1124 if (strcmp(isid, pr_reg->pr_reg_isid)) 1117 if (strcmp(isid, pr_reg->pr_reg_isid))
1125 continue; 1118 continue;
1126 1119
1127 atomic_inc(&pr_reg->pr_res_holders); 1120 atomic_inc_mb(&pr_reg->pr_res_holders);
1128 smp_mb__after_atomic();
1129 spin_unlock(&pr_tmpl->registration_lock); 1121 spin_unlock(&pr_tmpl->registration_lock);
1130 return pr_reg; 1122 return pr_reg;
1131 } 1123 }
@@ -1154,8 +1146,7 @@ static struct t10_pr_registration *core_scsi3_locate_pr_reg(
1154 1146
1155static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg) 1147static void core_scsi3_put_pr_reg(struct t10_pr_registration *pr_reg)
1156{ 1148{
1157 atomic_dec(&pr_reg->pr_res_holders); 1149 atomic_dec_mb(&pr_reg->pr_res_holders);
1158 smp_mb__after_atomic();
1159} 1150}
1160 1151
1161static int core_scsi3_check_implicit_release( 1152static int core_scsi3_check_implicit_release(
@@ -1348,8 +1339,7 @@ static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
1348 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, 1339 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
1349 &tpg->tpg_group.cg_item); 1340 &tpg->tpg_group.cg_item);
1350 1341
1351 atomic_dec(&tpg->tpg_pr_ref_count); 1342 atomic_dec_mb(&tpg->tpg_pr_ref_count);
1352 smp_mb__after_atomic();
1353} 1343}
1354 1344
1355static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl) 1345static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
@@ -1368,16 +1358,14 @@ static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
1368 struct se_portal_group *tpg = nacl->se_tpg; 1358 struct se_portal_group *tpg = nacl->se_tpg;
1369 1359
1370 if (nacl->dynamic_node_acl) { 1360 if (nacl->dynamic_node_acl) {
1371 atomic_dec(&nacl->acl_pr_ref_count); 1361 atomic_dec_mb(&nacl->acl_pr_ref_count);
1372 smp_mb__after_atomic();
1373 return; 1362 return;
1374 } 1363 }
1375 1364
1376 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, 1365 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
1377 &nacl->acl_group.cg_item); 1366 &nacl->acl_group.cg_item);
1378 1367
1379 atomic_dec(&nacl->acl_pr_ref_count); 1368 atomic_dec_mb(&nacl->acl_pr_ref_count);
1380 smp_mb__after_atomic();
1381} 1369}
1382 1370
1383static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve) 1371static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
@@ -1407,8 +1395,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
1407 * For nacl->dynamic_node_acl=1 1395 * For nacl->dynamic_node_acl=1
1408 */ 1396 */
1409 if (!lun_acl) { 1397 if (!lun_acl) {
1410 atomic_dec(&se_deve->pr_ref_count); 1398 atomic_dec_mb(&se_deve->pr_ref_count);
1411 smp_mb__after_atomic();
1412 return; 1399 return;
1413 } 1400 }
1414 nacl = lun_acl->se_lun_nacl; 1401 nacl = lun_acl->se_lun_nacl;
@@ -1417,8 +1404,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
1417 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys, 1404 configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
1418 &lun_acl->se_lun_group.cg_item); 1405 &lun_acl->se_lun_group.cg_item);
1419 1406
1420 atomic_dec(&se_deve->pr_ref_count); 1407 atomic_dec_mb(&se_deve->pr_ref_count);
1421 smp_mb__after_atomic();
1422} 1408}
1423 1409
1424static sense_reason_t 1410static sense_reason_t
@@ -1551,15 +1537,13 @@ core_scsi3_decode_spec_i_port(
1551 if (!i_str) 1537 if (!i_str)
1552 continue; 1538 continue;
1553 1539
1554 atomic_inc(&tmp_tpg->tpg_pr_ref_count); 1540 atomic_inc_mb(&tmp_tpg->tpg_pr_ref_count);
1555 smp_mb__after_atomic();
1556 spin_unlock(&dev->se_port_lock); 1541 spin_unlock(&dev->se_port_lock);
1557 1542
1558 if (core_scsi3_tpg_depend_item(tmp_tpg)) { 1543 if (core_scsi3_tpg_depend_item(tmp_tpg)) {
1559 pr_err(" core_scsi3_tpg_depend_item()" 1544 pr_err(" core_scsi3_tpg_depend_item()"
1560 " for tmp_tpg\n"); 1545 " for tmp_tpg\n");
1561 atomic_dec(&tmp_tpg->tpg_pr_ref_count); 1546 atomic_dec_mb(&tmp_tpg->tpg_pr_ref_count);
1562 smp_mb__after_atomic();
1563 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1547 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1564 goto out_unmap; 1548 goto out_unmap;
1565 } 1549 }
@@ -1571,10 +1555,8 @@ core_scsi3_decode_spec_i_port(
1571 spin_lock_irq(&tmp_tpg->acl_node_lock); 1555 spin_lock_irq(&tmp_tpg->acl_node_lock);
1572 dest_node_acl = __core_tpg_get_initiator_node_acl( 1556 dest_node_acl = __core_tpg_get_initiator_node_acl(
1573 tmp_tpg, i_str); 1557 tmp_tpg, i_str);
1574 if (dest_node_acl) { 1558 if (dest_node_acl)
1575 atomic_inc(&dest_node_acl->acl_pr_ref_count); 1559 atomic_inc_mb(&dest_node_acl->acl_pr_ref_count);
1576 smp_mb__after_atomic();
1577 }
1578 spin_unlock_irq(&tmp_tpg->acl_node_lock); 1560 spin_unlock_irq(&tmp_tpg->acl_node_lock);
1579 1561
1580 if (!dest_node_acl) { 1562 if (!dest_node_acl) {
@@ -1586,8 +1568,7 @@ core_scsi3_decode_spec_i_port(
1586 if (core_scsi3_nodeacl_depend_item(dest_node_acl)) { 1568 if (core_scsi3_nodeacl_depend_item(dest_node_acl)) {
1587 pr_err("configfs_depend_item() failed" 1569 pr_err("configfs_depend_item() failed"
1588 " for dest_node_acl->acl_group\n"); 1570 " for dest_node_acl->acl_group\n");
1589 atomic_dec(&dest_node_acl->acl_pr_ref_count); 1571 atomic_dec_mb(&dest_node_acl->acl_pr_ref_count);
1590 smp_mb__after_atomic();
1591 core_scsi3_tpg_undepend_item(tmp_tpg); 1572 core_scsi3_tpg_undepend_item(tmp_tpg);
1592 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1573 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1593 goto out_unmap; 1574 goto out_unmap;
@@ -1646,8 +1627,7 @@ core_scsi3_decode_spec_i_port(
1646 if (core_scsi3_lunacl_depend_item(dest_se_deve)) { 1627 if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
1647 pr_err("core_scsi3_lunacl_depend_item()" 1628 pr_err("core_scsi3_lunacl_depend_item()"
1648 " failed\n"); 1629 " failed\n");
1649 atomic_dec(&dest_se_deve->pr_ref_count); 1630 atomic_dec_mb(&dest_se_deve->pr_ref_count);
1650 smp_mb__after_atomic();
1651 core_scsi3_nodeacl_undepend_item(dest_node_acl); 1631 core_scsi3_nodeacl_undepend_item(dest_node_acl);
1652 core_scsi3_tpg_undepend_item(dest_tpg); 1632 core_scsi3_tpg_undepend_item(dest_tpg);
1653 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 1633 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -2758,7 +2738,8 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
2758 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder; 2738 struct t10_pr_registration *pr_reg, *pr_reg_tmp, *pr_reg_n, *pr_res_holder;
2759 struct t10_reservation *pr_tmpl = &dev->t10_pr; 2739 struct t10_reservation *pr_tmpl = &dev->t10_pr;
2760 u32 pr_res_mapped_lun = 0; 2740 u32 pr_res_mapped_lun = 0;
2761 int all_reg = 0, calling_it_nexus = 0, released_regs = 0; 2741 int all_reg = 0, calling_it_nexus = 0;
2742 bool sa_res_key_unmatched = sa_res_key != 0;
2762 int prh_type = 0, prh_scope = 0; 2743 int prh_type = 0, prh_scope = 0;
2763 2744
2764 if (!se_sess) 2745 if (!se_sess)
@@ -2833,6 +2814,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
2833 if (!all_reg) { 2814 if (!all_reg) {
2834 if (pr_reg->pr_res_key != sa_res_key) 2815 if (pr_reg->pr_res_key != sa_res_key)
2835 continue; 2816 continue;
2817 sa_res_key_unmatched = false;
2836 2818
2837 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; 2819 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
2838 pr_reg_nacl = pr_reg->pr_reg_nacl; 2820 pr_reg_nacl = pr_reg->pr_reg_nacl;
@@ -2840,7 +2822,6 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
2840 __core_scsi3_free_registration(dev, pr_reg, 2822 __core_scsi3_free_registration(dev, pr_reg,
2841 (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : 2823 (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list :
2842 NULL, calling_it_nexus); 2824 NULL, calling_it_nexus);
2843 released_regs++;
2844 } else { 2825 } else {
2845 /* 2826 /*
2846 * Case for any existing all registrants type 2827 * Case for any existing all registrants type
@@ -2858,6 +2839,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
2858 if ((sa_res_key) && 2839 if ((sa_res_key) &&
2859 (pr_reg->pr_res_key != sa_res_key)) 2840 (pr_reg->pr_res_key != sa_res_key))
2860 continue; 2841 continue;
2842 sa_res_key_unmatched = false;
2861 2843
2862 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0; 2844 calling_it_nexus = (pr_reg_n == pr_reg) ? 1 : 0;
2863 if (calling_it_nexus) 2845 if (calling_it_nexus)
@@ -2868,7 +2850,6 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
2868 __core_scsi3_free_registration(dev, pr_reg, 2850 __core_scsi3_free_registration(dev, pr_reg,
2869 (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list : 2851 (preempt_type == PREEMPT_AND_ABORT) ? &preempt_and_abort_list :
2870 NULL, 0); 2852 NULL, 0);
2871 released_regs++;
2872 } 2853 }
2873 if (!calling_it_nexus) 2854 if (!calling_it_nexus)
2874 core_scsi3_ua_allocate(pr_reg_nacl, 2855 core_scsi3_ua_allocate(pr_reg_nacl,
@@ -2883,7 +2864,7 @@ core_scsi3_pro_preempt(struct se_cmd *cmd, int type, int scope, u64 res_key,
2883 * registered reservation key, then the device server shall 2864 * registered reservation key, then the device server shall
2884 * complete the command with RESERVATION CONFLICT status. 2865 * complete the command with RESERVATION CONFLICT status.
2885 */ 2866 */
2886 if (!released_regs) { 2867 if (sa_res_key_unmatched) {
2887 spin_unlock(&dev->dev_reservation_lock); 2868 spin_unlock(&dev->dev_reservation_lock);
2888 core_scsi3_put_pr_reg(pr_reg_n); 2869 core_scsi3_put_pr_reg(pr_reg_n);
2889 return TCM_RESERVATION_CONFLICT; 2870 return TCM_RESERVATION_CONFLICT;
@@ -3167,15 +3148,13 @@ core_scsi3_emulate_pro_register_and_move(struct se_cmd *cmd, u64 res_key,
3167 if (!dest_tf_ops) 3148 if (!dest_tf_ops)
3168 continue; 3149 continue;
3169 3150
3170 atomic_inc(&dest_se_tpg->tpg_pr_ref_count); 3151 atomic_inc_mb(&dest_se_tpg->tpg_pr_ref_count);
3171 smp_mb__after_atomic();
3172 spin_unlock(&dev->se_port_lock); 3152 spin_unlock(&dev->se_port_lock);
3173 3153
3174 if (core_scsi3_tpg_depend_item(dest_se_tpg)) { 3154 if (core_scsi3_tpg_depend_item(dest_se_tpg)) {
3175 pr_err("core_scsi3_tpg_depend_item() failed" 3155 pr_err("core_scsi3_tpg_depend_item() failed"
3176 " for dest_se_tpg\n"); 3156 " for dest_se_tpg\n");
3177 atomic_dec(&dest_se_tpg->tpg_pr_ref_count); 3157 atomic_dec_mb(&dest_se_tpg->tpg_pr_ref_count);
3178 smp_mb__after_atomic();
3179 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3158 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3180 goto out_put_pr_reg; 3159 goto out_put_pr_reg;
3181 } 3160 }
@@ -3271,10 +3250,8 @@ after_iport_check:
3271 spin_lock_irq(&dest_se_tpg->acl_node_lock); 3250 spin_lock_irq(&dest_se_tpg->acl_node_lock);
3272 dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg, 3251 dest_node_acl = __core_tpg_get_initiator_node_acl(dest_se_tpg,
3273 initiator_str); 3252 initiator_str);
3274 if (dest_node_acl) { 3253 if (dest_node_acl)
3275 atomic_inc(&dest_node_acl->acl_pr_ref_count); 3254 atomic_inc_mb(&dest_node_acl->acl_pr_ref_count);
3276 smp_mb__after_atomic();
3277 }
3278 spin_unlock_irq(&dest_se_tpg->acl_node_lock); 3255 spin_unlock_irq(&dest_se_tpg->acl_node_lock);
3279 3256
3280 if (!dest_node_acl) { 3257 if (!dest_node_acl) {
@@ -3288,8 +3265,7 @@ after_iport_check:
3288 if (core_scsi3_nodeacl_depend_item(dest_node_acl)) { 3265 if (core_scsi3_nodeacl_depend_item(dest_node_acl)) {
3289 pr_err("core_scsi3_nodeacl_depend_item() for" 3266 pr_err("core_scsi3_nodeacl_depend_item() for"
3290 " dest_node_acl\n"); 3267 " dest_node_acl\n");
3291 atomic_dec(&dest_node_acl->acl_pr_ref_count); 3268 atomic_dec_mb(&dest_node_acl->acl_pr_ref_count);
3292 smp_mb__after_atomic();
3293 dest_node_acl = NULL; 3269 dest_node_acl = NULL;
3294 ret = TCM_INVALID_PARAMETER_LIST; 3270 ret = TCM_INVALID_PARAMETER_LIST;
3295 goto out; 3271 goto out;
@@ -3313,8 +3289,7 @@ after_iport_check:
3313 3289
3314 if (core_scsi3_lunacl_depend_item(dest_se_deve)) { 3290 if (core_scsi3_lunacl_depend_item(dest_se_deve)) {
3315 pr_err("core_scsi3_lunacl_depend_item() failed\n"); 3291 pr_err("core_scsi3_lunacl_depend_item() failed\n");
3316 atomic_dec(&dest_se_deve->pr_ref_count); 3292 atomic_dec_mb(&dest_se_deve->pr_ref_count);
3317 smp_mb__after_atomic();
3318 dest_se_deve = NULL; 3293 dest_se_deve = NULL;
3319 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3294 ret = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3320 goto out; 3295 goto out;
@@ -3497,6 +3472,7 @@ static unsigned long long core_scsi3_extract_reservation_key(unsigned char *cdb)
3497sense_reason_t 3472sense_reason_t
3498target_scsi3_emulate_pr_out(struct se_cmd *cmd) 3473target_scsi3_emulate_pr_out(struct se_cmd *cmd)
3499{ 3474{
3475 struct se_device *dev = cmd->se_dev;
3500 unsigned char *cdb = &cmd->t_task_cdb[0]; 3476 unsigned char *cdb = &cmd->t_task_cdb[0];
3501 unsigned char *buf; 3477 unsigned char *buf;
3502 u64 res_key, sa_res_key; 3478 u64 res_key, sa_res_key;
@@ -3561,6 +3537,13 @@ target_scsi3_emulate_pr_out(struct se_cmd *cmd)
3561 aptpl = (buf[17] & 0x01); 3537 aptpl = (buf[17] & 0x01);
3562 unreg = (buf[17] & 0x02); 3538 unreg = (buf[17] & 0x02);
3563 } 3539 }
3540 /*
3541 * If the backend device has been configured to force APTPL metadata
3542 * write-out, go ahead and propigate aptpl=1 down now.
3543 */
3544 if (dev->dev_attrib.force_pr_aptpl)
3545 aptpl = 1;
3546
3564 transport_kunmap_data_sg(cmd); 3547 transport_kunmap_data_sg(cmd);
3565 buf = NULL; 3548 buf = NULL;
3566 3549
@@ -3803,7 +3786,7 @@ core_scsi3_pri_report_capabilities(struct se_cmd *cmd)
3803 if (!buf) 3786 if (!buf)
3804 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; 3787 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
3805 3788
3806 buf[0] = ((add_len << 8) & 0xff); 3789 buf[0] = ((add_len >> 8) & 0xff);
3807 buf[1] = (add_len & 0xff); 3790 buf[1] = (add_len & 0xff);
3808 buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */ 3791 buf[2] |= 0x10; /* CRH: Compatible Reservation Hanlding bit. */
3809 buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */ 3792 buf[2] |= 0x08; /* SIP_C: Specify Initiator Ports Capable bit */
@@ -3879,8 +3862,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3879 se_tpg = pr_reg->pr_reg_nacl->se_tpg; 3862 se_tpg = pr_reg->pr_reg_nacl->se_tpg;
3880 add_desc_len = 0; 3863 add_desc_len = 0;
3881 3864
3882 atomic_inc(&pr_reg->pr_res_holders); 3865 atomic_inc_mb(&pr_reg->pr_res_holders);
3883 smp_mb__after_atomic();
3884 spin_unlock(&pr_tmpl->registration_lock); 3866 spin_unlock(&pr_tmpl->registration_lock);
3885 /* 3867 /*
3886 * Determine expected length of $FABRIC_MOD specific 3868 * Determine expected length of $FABRIC_MOD specific
@@ -3893,8 +3875,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3893 pr_warn("SPC-3 PRIN READ_FULL_STATUS ran" 3875 pr_warn("SPC-3 PRIN READ_FULL_STATUS ran"
3894 " out of buffer: %d\n", cmd->data_length); 3876 " out of buffer: %d\n", cmd->data_length);
3895 spin_lock(&pr_tmpl->registration_lock); 3877 spin_lock(&pr_tmpl->registration_lock);
3896 atomic_dec(&pr_reg->pr_res_holders); 3878 atomic_dec_mb(&pr_reg->pr_res_holders);
3897 smp_mb__after_atomic();
3898 break; 3879 break;
3899 } 3880 }
3900 /* 3881 /*
@@ -3955,8 +3936,7 @@ core_scsi3_pri_read_full_status(struct se_cmd *cmd)
3955 se_nacl, pr_reg, &format_code, &buf[off+4]); 3936 se_nacl, pr_reg, &format_code, &buf[off+4]);
3956 3937
3957 spin_lock(&pr_tmpl->registration_lock); 3938 spin_lock(&pr_tmpl->registration_lock);
3958 atomic_dec(&pr_reg->pr_res_holders); 3939 atomic_dec_mb(&pr_reg->pr_res_holders);
3959 smp_mb__after_atomic();
3960 /* 3940 /*
3961 * Set the ADDITIONAL DESCRIPTOR LENGTH 3941 * Set the ADDITIONAL DESCRIPTOR LENGTH
3962 */ 3942 */
diff --git a/drivers/target/target_core_pr.h b/drivers/target/target_core_pr.h
index 2ee2936fa0bd..749fd7bb7510 100644
--- a/drivers/target/target_core_pr.h
+++ b/drivers/target/target_core_pr.h
@@ -60,7 +60,7 @@ extern int core_scsi3_alloc_aptpl_registration(
60 unsigned char *, u16, u32, int, int, u8); 60 unsigned char *, u16, u32, int, int, u8);
61extern int core_scsi3_check_aptpl_registration(struct se_device *, 61extern int core_scsi3_check_aptpl_registration(struct se_device *,
62 struct se_portal_group *, struct se_lun *, 62 struct se_portal_group *, struct se_lun *,
63 struct se_lun_acl *); 63 struct se_node_acl *, u32);
64extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *, 64extern void core_scsi3_free_pr_reg_from_nacl(struct se_device *,
65 struct se_node_acl *); 65 struct se_node_acl *);
66extern void core_scsi3_free_all_registrations(struct se_device *); 66extern void core_scsi3_free_all_registrations(struct se_device *);
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 70d9f6dabba0..7c8291f0bbbc 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -749,14 +749,18 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev,
749 ret = -EINVAL; 749 ret = -EINVAL;
750 goto out; 750 goto out;
751 } 751 }
752 match_int(args, &arg); 752 ret = match_int(args, &arg);
753 if (ret)
754 goto out;
753 pdv->pdv_host_id = arg; 755 pdv->pdv_host_id = arg;
754 pr_debug("PSCSI[%d]: Referencing SCSI Host ID:" 756 pr_debug("PSCSI[%d]: Referencing SCSI Host ID:"
755 " %d\n", phv->phv_host_id, pdv->pdv_host_id); 757 " %d\n", phv->phv_host_id, pdv->pdv_host_id);
756 pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID; 758 pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
757 break; 759 break;
758 case Opt_scsi_channel_id: 760 case Opt_scsi_channel_id:
759 match_int(args, &arg); 761 ret = match_int(args, &arg);
762 if (ret)
763 goto out;
760 pdv->pdv_channel_id = arg; 764 pdv->pdv_channel_id = arg;
761 pr_debug("PSCSI[%d]: Referencing SCSI Channel" 765 pr_debug("PSCSI[%d]: Referencing SCSI Channel"
762 " ID: %d\n", phv->phv_host_id, 766 " ID: %d\n", phv->phv_host_id,
@@ -764,7 +768,9 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev,
764 pdv->pdv_flags |= PDF_HAS_CHANNEL_ID; 768 pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
765 break; 769 break;
766 case Opt_scsi_target_id: 770 case Opt_scsi_target_id:
767 match_int(args, &arg); 771 ret = match_int(args, &arg);
772 if (ret)
773 goto out;
768 pdv->pdv_target_id = arg; 774 pdv->pdv_target_id = arg;
769 pr_debug("PSCSI[%d]: Referencing SCSI Target" 775 pr_debug("PSCSI[%d]: Referencing SCSI Target"
770 " ID: %d\n", phv->phv_host_id, 776 " ID: %d\n", phv->phv_host_id,
@@ -772,7 +778,9 @@ static ssize_t pscsi_set_configfs_dev_params(struct se_device *dev,
772 pdv->pdv_flags |= PDF_HAS_TARGET_ID; 778 pdv->pdv_flags |= PDF_HAS_TARGET_ID;
773 break; 779 break;
774 case Opt_scsi_lun_id: 780 case Opt_scsi_lun_id:
775 match_int(args, &arg); 781 ret = match_int(args, &arg);
782 if (ret)
783 goto out;
776 pdv->pdv_lun_id = arg; 784 pdv->pdv_lun_id = arg;
777 pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:" 785 pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:"
778 " %d\n", phv->phv_host_id, pdv->pdv_lun_id); 786 " %d\n", phv->phv_host_id, pdv->pdv_lun_id);
diff --git a/drivers/target/target_core_sbc.c b/drivers/target/target_core_sbc.c
index bd78d9235ac6..ebe62afb957d 100644
--- a/drivers/target/target_core_sbc.c
+++ b/drivers/target/target_core_sbc.c
@@ -948,7 +948,7 @@ sbc_parse_cdb(struct se_cmd *cmd, struct sbc_ops *ops)
948 } 948 }
949 949
950 /* reject any command that we don't have a handler for */ 950 /* reject any command that we don't have a handler for */
951 if (!(cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) && !cmd->execute_cmd) 951 if (!cmd->execute_cmd)
952 return TCM_UNSUPPORTED_SCSI_OPCODE; 952 return TCM_UNSUPPORTED_SCSI_OPCODE;
953 953
954 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) { 954 if (cmd->se_cmd_flags & SCF_SCSI_DATA_CDB) {
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index f7cd95e8111a..fa5e157db47b 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -64,21 +64,17 @@ int core_tmr_alloc_req(
64} 64}
65EXPORT_SYMBOL(core_tmr_alloc_req); 65EXPORT_SYMBOL(core_tmr_alloc_req);
66 66
67void core_tmr_release_req( 67void core_tmr_release_req(struct se_tmr_req *tmr)
68 struct se_tmr_req *tmr)
69{ 68{
70 struct se_device *dev = tmr->tmr_dev; 69 struct se_device *dev = tmr->tmr_dev;
71 unsigned long flags; 70 unsigned long flags;
72 71
73 if (!dev) { 72 if (dev) {
74 kfree(tmr); 73 spin_lock_irqsave(&dev->se_tmr_lock, flags);
75 return; 74 list_del(&tmr->tmr_list);
75 spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
76 } 76 }
77 77
78 spin_lock_irqsave(&dev->se_tmr_lock, flags);
79 list_del(&tmr->tmr_list);
80 spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
81
82 kfree(tmr); 78 kfree(tmr);
83} 79}
84 80
@@ -90,9 +86,8 @@ static void core_tmr_handle_tas_abort(
90 bool remove = true; 86 bool remove = true;
91 /* 87 /*
92 * TASK ABORTED status (TAS) bit support 88 * TASK ABORTED status (TAS) bit support
93 */ 89 */
94 if ((tmr_nacl && 90 if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
95 (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
96 remove = false; 91 remove = false;
97 transport_send_task_abort(cmd); 92 transport_send_task_abort(cmd);
98 } 93 }
@@ -120,13 +115,12 @@ void core_tmr_abort_task(
120 struct se_tmr_req *tmr, 115 struct se_tmr_req *tmr,
121 struct se_session *se_sess) 116 struct se_session *se_sess)
122{ 117{
123 struct se_cmd *se_cmd, *tmp_cmd; 118 struct se_cmd *se_cmd;
124 unsigned long flags; 119 unsigned long flags;
125 int ref_tag; 120 int ref_tag;
126 121
127 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags); 122 spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
128 list_for_each_entry_safe(se_cmd, tmp_cmd, 123 list_for_each_entry(se_cmd, &se_sess->sess_cmd_list, se_cmd_list) {
129 &se_sess->sess_cmd_list, se_cmd_list) {
130 124
131 if (dev != se_cmd->se_dev) 125 if (dev != se_cmd->se_dev)
132 continue; 126 continue;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index be783f717f19..0696de9553d3 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -40,6 +40,7 @@
40#include <target/target_core_fabric.h> 40#include <target/target_core_fabric.h>
41 41
42#include "target_core_internal.h" 42#include "target_core_internal.h"
43#include "target_core_pr.h"
43 44
44extern struct se_device *g_lun0_dev; 45extern struct se_device *g_lun0_dev;
45 46
@@ -166,6 +167,13 @@ void core_tpg_add_node_to_devs(
166 167
167 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun, 168 core_enable_device_list_for_node(lun, NULL, lun->unpacked_lun,
168 lun_access, acl, tpg); 169 lun_access, acl, tpg);
170 /*
171 * Check to see if there are any existing persistent reservation
172 * APTPL pre-registrations that need to be enabled for this dynamic
173 * LUN ACL now..
174 */
175 core_scsi3_check_aptpl_registration(dev, tpg, lun, acl,
176 lun->unpacked_lun);
169 spin_lock(&tpg->tpg_lun_lock); 177 spin_lock(&tpg->tpg_lun_lock);
170 } 178 }
171 spin_unlock(&tpg->tpg_lun_lock); 179 spin_unlock(&tpg->tpg_lun_lock);
@@ -335,7 +343,7 @@ void core_tpg_clear_object_luns(struct se_portal_group *tpg)
335 continue; 343 continue;
336 344
337 spin_unlock(&tpg->tpg_lun_lock); 345 spin_unlock(&tpg->tpg_lun_lock);
338 core_dev_del_lun(tpg, lun->unpacked_lun); 346 core_dev_del_lun(tpg, lun);
339 spin_lock(&tpg->tpg_lun_lock); 347 spin_lock(&tpg->tpg_lun_lock);
340 } 348 }
341 spin_unlock(&tpg->tpg_lun_lock); 349 spin_unlock(&tpg->tpg_lun_lock);
@@ -663,13 +671,6 @@ static int core_tpg_setup_virtual_lun0(struct se_portal_group *se_tpg)
663 return 0; 671 return 0;
664} 672}
665 673
666static void core_tpg_release_virtual_lun0(struct se_portal_group *se_tpg)
667{
668 struct se_lun *lun = &se_tpg->tpg_virt_lun0;
669
670 core_tpg_post_dellun(se_tpg, lun);
671}
672
673int core_tpg_register( 674int core_tpg_register(
674 struct target_core_fabric_ops *tfo, 675 struct target_core_fabric_ops *tfo,
675 struct se_wwn *se_wwn, 676 struct se_wwn *se_wwn,
@@ -773,7 +774,7 @@ int core_tpg_deregister(struct se_portal_group *se_tpg)
773 spin_unlock_irq(&se_tpg->acl_node_lock); 774 spin_unlock_irq(&se_tpg->acl_node_lock);
774 775
775 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) 776 if (se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL)
776 core_tpg_release_virtual_lun0(se_tpg); 777 core_tpg_remove_lun(se_tpg, &se_tpg->tpg_virt_lun0);
777 778
778 se_tpg->se_tpg_fabric_ptr = NULL; 779 se_tpg->se_tpg_fabric_ptr = NULL;
779 array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG); 780 array_free(se_tpg->tpg_lun_list, TRANSPORT_MAX_LUNS_PER_TPG);
@@ -838,37 +839,7 @@ int core_tpg_add_lun(
838 return 0; 839 return 0;
839} 840}
840 841
841struct se_lun *core_tpg_pre_dellun( 842void core_tpg_remove_lun(
842 struct se_portal_group *tpg,
843 u32 unpacked_lun)
844{
845 struct se_lun *lun;
846
847 if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
848 pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
849 "-1: %u for Target Portal Group: %u\n",
850 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
851 TRANSPORT_MAX_LUNS_PER_TPG-1,
852 tpg->se_tpg_tfo->tpg_get_tag(tpg));
853 return ERR_PTR(-EOVERFLOW);
854 }
855
856 spin_lock(&tpg->tpg_lun_lock);
857 lun = tpg->tpg_lun_list[unpacked_lun];
858 if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
859 pr_err("%s Logical Unit Number: %u is not active on"
860 " Target Portal Group: %u, ignoring request.\n",
861 tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
862 tpg->se_tpg_tfo->tpg_get_tag(tpg));
863 spin_unlock(&tpg->tpg_lun_lock);
864 return ERR_PTR(-ENODEV);
865 }
866 spin_unlock(&tpg->tpg_lun_lock);
867
868 return lun;
869}
870
871int core_tpg_post_dellun(
872 struct se_portal_group *tpg, 843 struct se_portal_group *tpg,
873 struct se_lun *lun) 844 struct se_lun *lun)
874{ 845{
@@ -882,6 +853,4 @@ int core_tpg_post_dellun(
882 spin_unlock(&tpg->tpg_lun_lock); 853 spin_unlock(&tpg->tpg_lun_lock);
883 854
884 percpu_ref_exit(&lun->lun_ref); 855 percpu_ref_exit(&lun->lun_ref);
885
886 return 0;
887} 856}
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 7fa62fc93e0b..be877bf6f730 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -232,6 +232,10 @@ void transport_subsystem_check_init(void)
232 if (ret != 0) 232 if (ret != 0)
233 pr_err("Unable to load target_core_pscsi\n"); 233 pr_err("Unable to load target_core_pscsi\n");
234 234
235 ret = request_module("target_core_user");
236 if (ret != 0)
237 pr_err("Unable to load target_core_user\n");
238
235 sub_api_initialized = 1; 239 sub_api_initialized = 1;
236} 240}
237 241
@@ -752,8 +756,7 @@ void target_qf_do_work(struct work_struct *work)
752 756
753 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) { 757 list_for_each_entry_safe(cmd, cmd_tmp, &qf_cmd_list, se_qf_node) {
754 list_del(&cmd->se_qf_node); 758 list_del(&cmd->se_qf_node);
755 atomic_dec(&dev->dev_qf_count); 759 atomic_dec_mb(&dev->dev_qf_count);
756 smp_mb__after_atomic();
757 760
758 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue" 761 pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
759 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd, 762 " context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
@@ -1166,7 +1169,6 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
1166 * Dormant to Active status. 1169 * Dormant to Active status.
1167 */ 1170 */
1168 cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id); 1171 cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
1169 smp_mb__after_atomic();
1170 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n", 1172 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
1171 cmd->se_ordered_id, cmd->sam_task_attr, 1173 cmd->se_ordered_id, cmd->sam_task_attr,
1172 dev->transport->name); 1174 dev->transport->name);
@@ -1722,8 +1724,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1722 cmd->t_task_cdb[0], cmd->se_ordered_id); 1724 cmd->t_task_cdb[0], cmd->se_ordered_id);
1723 return false; 1725 return false;
1724 case MSG_ORDERED_TAG: 1726 case MSG_ORDERED_TAG:
1725 atomic_inc(&dev->dev_ordered_sync); 1727 atomic_inc_mb(&dev->dev_ordered_sync);
1726 smp_mb__after_atomic();
1727 1728
1728 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, " 1729 pr_debug("Added ORDERED for CDB: 0x%02x to ordered list, "
1729 " se_ordered_id: %u\n", 1730 " se_ordered_id: %u\n",
@@ -1740,8 +1741,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
1740 /* 1741 /*
1741 * For SIMPLE and UNTAGGED Task Attribute commands 1742 * For SIMPLE and UNTAGGED Task Attribute commands
1742 */ 1743 */
1743 atomic_inc(&dev->simple_cmds); 1744 atomic_inc_mb(&dev->simple_cmds);
1744 smp_mb__after_atomic();
1745 break; 1745 break;
1746 } 1746 }
1747 1747
@@ -1845,8 +1845,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
1845 return; 1845 return;
1846 1846
1847 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) { 1847 if (cmd->sam_task_attr == MSG_SIMPLE_TAG) {
1848 atomic_dec(&dev->simple_cmds); 1848 atomic_dec_mb(&dev->simple_cmds);
1849 smp_mb__after_atomic();
1850 dev->dev_cur_ordered_id++; 1849 dev->dev_cur_ordered_id++;
1851 pr_debug("Incremented dev->dev_cur_ordered_id: %u for" 1850 pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
1852 " SIMPLE: %u\n", dev->dev_cur_ordered_id, 1851 " SIMPLE: %u\n", dev->dev_cur_ordered_id,
@@ -1857,8 +1856,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
1857 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id, 1856 " HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
1858 cmd->se_ordered_id); 1857 cmd->se_ordered_id);
1859 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) { 1858 } else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
1860 atomic_dec(&dev->dev_ordered_sync); 1859 atomic_dec_mb(&dev->dev_ordered_sync);
1861 smp_mb__after_atomic();
1862 1860
1863 dev->dev_cur_ordered_id++; 1861 dev->dev_cur_ordered_id++;
1864 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:" 1862 pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
@@ -1877,8 +1875,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
1877 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) { 1875 if (cmd->se_cmd_flags & SCF_TRANSPORT_TASK_SENSE) {
1878 trace_target_cmd_complete(cmd); 1876 trace_target_cmd_complete(cmd);
1879 ret = cmd->se_tfo->queue_status(cmd); 1877 ret = cmd->se_tfo->queue_status(cmd);
1880 if (ret) 1878 goto out;
1881 goto out;
1882 } 1879 }
1883 1880
1884 switch (cmd->data_direction) { 1881 switch (cmd->data_direction) {
@@ -1916,8 +1913,7 @@ static void transport_handle_queue_full(
1916{ 1913{
1917 spin_lock_irq(&dev->qf_cmd_lock); 1914 spin_lock_irq(&dev->qf_cmd_lock);
1918 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list); 1915 list_add_tail(&cmd->se_qf_node, &cmd->se_dev->qf_cmd_list);
1919 atomic_inc(&dev->dev_qf_count); 1916 atomic_inc_mb(&dev->dev_qf_count);
1920 smp_mb__after_atomic();
1921 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock); 1917 spin_unlock_irq(&cmd->se_dev->qf_cmd_lock);
1922 1918
1923 schedule_work(&cmd->se_dev->qf_work_queue); 1919 schedule_work(&cmd->se_dev->qf_work_queue);
@@ -2296,7 +2292,7 @@ transport_generic_new_cmd(struct se_cmd *cmd)
2296 * and let it call back once the write buffers are ready. 2292 * and let it call back once the write buffers are ready.
2297 */ 2293 */
2298 target_add_to_state_list(cmd); 2294 target_add_to_state_list(cmd);
2299 if (cmd->data_direction != DMA_TO_DEVICE) { 2295 if (cmd->data_direction != DMA_TO_DEVICE || cmd->data_length == 0) {
2300 target_execute_cmd(cmd); 2296 target_execute_cmd(cmd);
2301 return 0; 2297 return 0;
2302 } 2298 }
@@ -2896,7 +2892,6 @@ void transport_send_task_abort(struct se_cmd *cmd)
2896 if (cmd->se_tfo->write_pending_status(cmd) != 0) { 2892 if (cmd->se_tfo->write_pending_status(cmd) != 0) {
2897 cmd->transport_state |= CMD_T_ABORTED; 2893 cmd->transport_state |= CMD_T_ABORTED;
2898 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS; 2894 cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
2899 smp_mb__after_atomic();
2900 return; 2895 return;
2901 } 2896 }
2902 } 2897 }
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index 101858e245b3..1738b1646988 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -161,8 +161,7 @@ int core_scsi3_ua_allocate(
161 spin_unlock(&deve->ua_lock); 161 spin_unlock(&deve->ua_lock);
162 spin_unlock_irq(&nacl->device_list_lock); 162 spin_unlock_irq(&nacl->device_list_lock);
163 163
164 atomic_inc(&deve->ua_count); 164 atomic_inc_mb(&deve->ua_count);
165 smp_mb__after_atomic();
166 return 0; 165 return 0;
167 } 166 }
168 list_add_tail(&ua->ua_nacl_list, &deve->ua_list); 167 list_add_tail(&ua->ua_nacl_list, &deve->ua_list);
@@ -174,8 +173,7 @@ int core_scsi3_ua_allocate(
174 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun, 173 nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
175 asc, ascq); 174 asc, ascq);
176 175
177 atomic_inc(&deve->ua_count); 176 atomic_inc_mb(&deve->ua_count);
178 smp_mb__after_atomic();
179 return 0; 177 return 0;
180} 178}
181 179
@@ -189,8 +187,7 @@ void core_scsi3_ua_release_all(
189 list_del(&ua->ua_nacl_list); 187 list_del(&ua->ua_nacl_list);
190 kmem_cache_free(se_ua_cache, ua); 188 kmem_cache_free(se_ua_cache, ua);
191 189
192 atomic_dec(&deve->ua_count); 190 atomic_dec_mb(&deve->ua_count);
193 smp_mb__after_atomic();
194 } 191 }
195 spin_unlock(&deve->ua_lock); 192 spin_unlock(&deve->ua_lock);
196} 193}
@@ -250,8 +247,7 @@ void core_scsi3_ua_for_check_condition(
250 list_del(&ua->ua_nacl_list); 247 list_del(&ua->ua_nacl_list);
251 kmem_cache_free(se_ua_cache, ua); 248 kmem_cache_free(se_ua_cache, ua);
252 249
253 atomic_dec(&deve->ua_count); 250 atomic_dec_mb(&deve->ua_count);
254 smp_mb__after_atomic();
255 } 251 }
256 spin_unlock(&deve->ua_lock); 252 spin_unlock(&deve->ua_lock);
257 spin_unlock_irq(&nacl->device_list_lock); 253 spin_unlock_irq(&nacl->device_list_lock);
@@ -309,8 +305,7 @@ int core_scsi3_ua_clear_for_request_sense(
309 list_del(&ua->ua_nacl_list); 305 list_del(&ua->ua_nacl_list);
310 kmem_cache_free(se_ua_cache, ua); 306 kmem_cache_free(se_ua_cache, ua);
311 307
312 atomic_dec(&deve->ua_count); 308 atomic_dec_mb(&deve->ua_count);
313 smp_mb__after_atomic();
314 } 309 }
315 spin_unlock(&deve->ua_lock); 310 spin_unlock(&deve->ua_lock);
316 spin_unlock_irq(&nacl->device_list_lock); 311 spin_unlock_irq(&nacl->device_list_lock);
diff --git a/drivers/target/target_core_ua.h b/drivers/target/target_core_ua.h
index be912b36daae..a6b56b364e7a 100644
--- a/drivers/target/target_core_ua.h
+++ b/drivers/target/target_core_ua.h
@@ -1,4 +1,5 @@
1#ifndef TARGET_CORE_UA_H 1#ifndef TARGET_CORE_UA_H
2#define TARGET_CORE_UA_H
2 3
3/* 4/*
4 * From spc4r17, Table D.1: ASC and ASCQ Assignement 5 * From spc4r17, Table D.1: ASC and ASCQ Assignement
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
new file mode 100644
index 000000000000..9a1b314f6482
--- /dev/null
+++ b/drivers/target/target_core_user.c
@@ -0,0 +1,1167 @@
1/*
2 * Copyright (C) 2013 Shaohua Li <shli@kernel.org>
3 * Copyright (C) 2014 Red Hat, Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/spinlock.h>
20#include <linux/module.h>
21#include <linux/idr.h>
22#include <linux/timer.h>
23#include <linux/parser.h>
24#include <scsi/scsi.h>
25#include <scsi/scsi_host.h>
26#include <linux/uio_driver.h>
27#include <net/genetlink.h>
28#include <target/target_core_base.h>
29#include <target/target_core_fabric.h>
30#include <target/target_core_backend.h>
31#include <linux/target_core_user.h>
32
33/*
34 * Define a shared-memory interface for LIO to pass SCSI commands and
35 * data to userspace for processing. This is to allow backends that
36 * are too complex for in-kernel support to be possible.
37 *
38 * It uses the UIO framework to do a lot of the device-creation and
39 * introspection work for us.
40 *
41 * See the .h file for how the ring is laid out. Note that while the
42 * command ring is defined, the particulars of the data area are
43 * not. Offset values in the command entry point to other locations
44 * internal to the mmap()ed area. There is separate space outside the
45 * command ring for data buffers. This leaves maximum flexibility for
46 * moving buffer allocations, or even page flipping or other
47 * allocation techniques, without altering the command ring layout.
48 *
49 * SECURITY:
50 * The user process must be assumed to be malicious. There's no way to
51 * prevent it breaking the command ring protocol if it wants, but in
52 * order to prevent other issues we must only ever read *data* from
53 * the shared memory area, not offsets or sizes. This applies to
54 * command ring entries as well as the mailbox. Extra code needed for
55 * this may have a 'UAM' comment.
56 */
57
58
59#define TCMU_TIME_OUT (30 * MSEC_PER_SEC)
60
61#define CMDR_SIZE (16 * 4096)
62#define DATA_SIZE (257 * 4096)
63
64#define TCMU_RING_SIZE (CMDR_SIZE + DATA_SIZE)
65
66static struct device *tcmu_root_device;
67
68struct tcmu_hba {
69 u32 host_id;
70};
71
72/* User wants all cmds or just some */
73enum passthru_level {
74 TCMU_PASS_ALL = 0,
75 TCMU_PASS_IO,
76 TCMU_PASS_INVALID,
77};
78
79#define TCMU_CONFIG_LEN 256
80
81struct tcmu_dev {
82 struct se_device se_dev;
83
84 char *name;
85 struct se_hba *hba;
86
87#define TCMU_DEV_BIT_OPEN 0
88#define TCMU_DEV_BIT_BROKEN 1
89 unsigned long flags;
90 enum passthru_level pass_level;
91
92 struct uio_info uio_info;
93
94 struct tcmu_mailbox *mb_addr;
95 size_t dev_size;
96 u32 cmdr_size;
97 u32 cmdr_last_cleaned;
98 /* Offset of data ring from start of mb */
99 size_t data_off;
100 size_t data_size;
101 /* Ring head + tail values. */
102 /* Must add data_off and mb_addr to get the address */
103 size_t data_head;
104 size_t data_tail;
105
106 wait_queue_head_t wait_cmdr;
107 /* TODO should this be a mutex? */
108 spinlock_t cmdr_lock;
109
110 struct idr commands;
111 spinlock_t commands_lock;
112
113 struct timer_list timeout;
114
115 char dev_config[TCMU_CONFIG_LEN];
116};
117
118#define TCMU_DEV(_se_dev) container_of(_se_dev, struct tcmu_dev, se_dev)
119
120#define CMDR_OFF sizeof(struct tcmu_mailbox)
121
122struct tcmu_cmd {
123 struct se_cmd *se_cmd;
124 struct tcmu_dev *tcmu_dev;
125
126 uint16_t cmd_id;
127
128 /* Can't use se_cmd->data_length when cleaning up expired cmds, because if
129 cmd has been completed then accessing se_cmd is off limits */
130 size_t data_length;
131
132 unsigned long deadline;
133
134#define TCMU_CMD_BIT_EXPIRED 0
135 unsigned long flags;
136};
137
138static struct kmem_cache *tcmu_cmd_cache;
139
140/* multicast group */
141enum tcmu_multicast_groups {
142 TCMU_MCGRP_CONFIG,
143};
144
145static const struct genl_multicast_group tcmu_mcgrps[] = {
146 [TCMU_MCGRP_CONFIG] = { .name = "config", },
147};
148
149/* Our generic netlink family */
150static struct genl_family tcmu_genl_family = {
151 .id = GENL_ID_GENERATE,
152 .hdrsize = 0,
153 .name = "TCM-USER",
154 .version = 1,
155 .maxattr = TCMU_ATTR_MAX,
156 .mcgrps = tcmu_mcgrps,
157 .n_mcgrps = ARRAY_SIZE(tcmu_mcgrps),
158};
159
160static struct tcmu_cmd *tcmu_alloc_cmd(struct se_cmd *se_cmd)
161{
162 struct se_device *se_dev = se_cmd->se_dev;
163 struct tcmu_dev *udev = TCMU_DEV(se_dev);
164 struct tcmu_cmd *tcmu_cmd;
165 int cmd_id;
166
167 tcmu_cmd = kmem_cache_zalloc(tcmu_cmd_cache, GFP_KERNEL);
168 if (!tcmu_cmd)
169 return NULL;
170
171 tcmu_cmd->se_cmd = se_cmd;
172 tcmu_cmd->tcmu_dev = udev;
173 tcmu_cmd->data_length = se_cmd->data_length;
174
175 tcmu_cmd->deadline = jiffies + msecs_to_jiffies(TCMU_TIME_OUT);
176
177 idr_preload(GFP_KERNEL);
178 spin_lock_irq(&udev->commands_lock);
179 cmd_id = idr_alloc(&udev->commands, tcmu_cmd, 0,
180 USHRT_MAX, GFP_NOWAIT);
181 spin_unlock_irq(&udev->commands_lock);
182 idr_preload_end();
183
184 if (cmd_id < 0) {
185 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
186 return NULL;
187 }
188 tcmu_cmd->cmd_id = cmd_id;
189
190 return tcmu_cmd;
191}
192
193static inline void tcmu_flush_dcache_range(void *vaddr, size_t size)
194{
195 unsigned long offset = (unsigned long) vaddr & ~PAGE_MASK;
196
197 size = round_up(size+offset, PAGE_SIZE);
198 vaddr -= offset;
199
200 while (size) {
201 flush_dcache_page(virt_to_page(vaddr));
202 size -= PAGE_SIZE;
203 }
204}
205
206/*
207 * Some ring helper functions. We don't assume size is a power of 2 so
208 * we can't use circ_buf.h.
209 */
210static inline size_t spc_used(size_t head, size_t tail, size_t size)
211{
212 int diff = head - tail;
213
214 if (diff >= 0)
215 return diff;
216 else
217 return size + diff;
218}
219
220static inline size_t spc_free(size_t head, size_t tail, size_t size)
221{
222 /* Keep 1 byte unused or we can't tell full from empty */
223 return (size - spc_used(head, tail, size) - 1);
224}
225
226static inline size_t head_to_end(size_t head, size_t size)
227{
228 return size - head;
229}
230
231#define UPDATE_HEAD(head, used, size) smp_store_release(&head, ((head % size) + used) % size)
232
233/*
234 * We can't queue a command until we have space available on the cmd ring *and* space
235 * space avail on the data ring.
236 *
237 * Called with ring lock held.
238 */
239static bool is_ring_space_avail(struct tcmu_dev *udev, size_t cmd_size, size_t data_needed)
240{
241 struct tcmu_mailbox *mb = udev->mb_addr;
242 size_t space;
243 u32 cmd_head;
244 size_t cmd_needed;
245
246 tcmu_flush_dcache_range(mb, sizeof(*mb));
247
248 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
249
250 /*
251 * If cmd end-of-ring space is too small then we need space for a NOP plus
252 * original cmd - cmds are internally contiguous.
253 */
254 if (head_to_end(cmd_head, udev->cmdr_size) >= cmd_size)
255 cmd_needed = cmd_size;
256 else
257 cmd_needed = cmd_size + head_to_end(cmd_head, udev->cmdr_size);
258
259 space = spc_free(cmd_head, udev->cmdr_last_cleaned, udev->cmdr_size);
260 if (space < cmd_needed) {
261 pr_debug("no cmd space: %u %u %u\n", cmd_head,
262 udev->cmdr_last_cleaned, udev->cmdr_size);
263 return false;
264 }
265
266 space = spc_free(udev->data_head, udev->data_tail, udev->data_size);
267 if (space < data_needed) {
268 pr_debug("no data space: %zu %zu %zu\n", udev->data_head,
269 udev->data_tail, udev->data_size);
270 return false;
271 }
272
273 return true;
274}
275
276static int tcmu_queue_cmd_ring(struct tcmu_cmd *tcmu_cmd)
277{
278 struct tcmu_dev *udev = tcmu_cmd->tcmu_dev;
279 struct se_cmd *se_cmd = tcmu_cmd->se_cmd;
280 size_t base_command_size, command_size;
281 struct tcmu_mailbox *mb;
282 struct tcmu_cmd_entry *entry;
283 int i;
284 struct scatterlist *sg;
285 struct iovec *iov;
286 int iov_cnt = 0;
287 uint32_t cmd_head;
288 uint64_t cdb_off;
289
290 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags))
291 return -EINVAL;
292
293 /*
294 * Must be a certain minimum size for response sense info, but
295 * also may be larger if the iov array is large.
296 *
297 * iovs = sgl_nents+1, for end-of-ring case, plus another 1
298 * b/c size == offsetof one-past-element.
299 */
300 base_command_size = max(offsetof(struct tcmu_cmd_entry,
301 req.iov[se_cmd->t_data_nents + 2]),
302 sizeof(struct tcmu_cmd_entry));
303 command_size = base_command_size
304 + round_up(scsi_command_size(se_cmd->t_task_cdb), TCMU_OP_ALIGN_SIZE);
305
306 WARN_ON(command_size & (TCMU_OP_ALIGN_SIZE-1));
307
308 spin_lock_irq(&udev->cmdr_lock);
309
310 mb = udev->mb_addr;
311 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
312 if ((command_size > (udev->cmdr_size / 2))
313 || tcmu_cmd->data_length > (udev->data_size - 1))
314 pr_warn("TCMU: Request of size %zu/%zu may be too big for %u/%zu "
315 "cmd/data ring buffers\n", command_size, tcmu_cmd->data_length,
316 udev->cmdr_size, udev->data_size);
317
318 while (!is_ring_space_avail(udev, command_size, tcmu_cmd->data_length)) {
319 int ret;
320 DEFINE_WAIT(__wait);
321
322 prepare_to_wait(&udev->wait_cmdr, &__wait, TASK_INTERRUPTIBLE);
323
324 pr_debug("sleeping for ring space\n");
325 spin_unlock_irq(&udev->cmdr_lock);
326 ret = schedule_timeout(msecs_to_jiffies(TCMU_TIME_OUT));
327 finish_wait(&udev->wait_cmdr, &__wait);
328 if (!ret) {
329 pr_warn("tcmu: command timed out\n");
330 return -ETIMEDOUT;
331 }
332
333 spin_lock_irq(&udev->cmdr_lock);
334
335 /* We dropped cmdr_lock, cmd_head is stale */
336 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
337 }
338
339 /* Insert a PAD if end-of-ring space is too small */
340 if (head_to_end(cmd_head, udev->cmdr_size) < command_size) {
341 size_t pad_size = head_to_end(cmd_head, udev->cmdr_size);
342
343 entry = (void *) mb + CMDR_OFF + cmd_head;
344 tcmu_flush_dcache_range(entry, sizeof(*entry));
345 tcmu_hdr_set_op(&entry->hdr, TCMU_OP_PAD);
346 tcmu_hdr_set_len(&entry->hdr, pad_size);
347
348 UPDATE_HEAD(mb->cmd_head, pad_size, udev->cmdr_size);
349
350 cmd_head = mb->cmd_head % udev->cmdr_size; /* UAM */
351 WARN_ON(cmd_head != 0);
352 }
353
354 entry = (void *) mb + CMDR_OFF + cmd_head;
355 tcmu_flush_dcache_range(entry, sizeof(*entry));
356 tcmu_hdr_set_op(&entry->hdr, TCMU_OP_CMD);
357 tcmu_hdr_set_len(&entry->hdr, command_size);
358 entry->cmd_id = tcmu_cmd->cmd_id;
359
360 /*
361 * Fix up iovecs, and handle if allocation in data ring wrapped.
362 */
363 iov = &entry->req.iov[0];
364 for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) {
365 size_t copy_bytes = min((size_t)sg->length,
366 head_to_end(udev->data_head, udev->data_size));
367 void *from = kmap_atomic(sg_page(sg)) + sg->offset;
368 void *to = (void *) mb + udev->data_off + udev->data_head;
369
370 if (tcmu_cmd->se_cmd->data_direction == DMA_TO_DEVICE) {
371 memcpy(to, from, copy_bytes);
372 tcmu_flush_dcache_range(to, copy_bytes);
373 }
374
375 /* Even iov_base is relative to mb_addr */
376 iov->iov_len = copy_bytes;
377 iov->iov_base = (void *) udev->data_off + udev->data_head;
378 iov_cnt++;
379 iov++;
380
381 UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
382
383 /* Uh oh, we wrapped the buffer. Must split sg across 2 iovs. */
384 if (sg->length != copy_bytes) {
385 from += copy_bytes;
386 copy_bytes = sg->length - copy_bytes;
387
388 iov->iov_len = copy_bytes;
389 iov->iov_base = (void *) udev->data_off + udev->data_head;
390
391 if (se_cmd->data_direction == DMA_TO_DEVICE) {
392 to = (void *) mb + udev->data_off + udev->data_head;
393 memcpy(to, from, copy_bytes);
394 tcmu_flush_dcache_range(to, copy_bytes);
395 }
396
397 iov_cnt++;
398 iov++;
399
400 UPDATE_HEAD(udev->data_head, copy_bytes, udev->data_size);
401 }
402
403 kunmap_atomic(from);
404 }
405 entry->req.iov_cnt = iov_cnt;
406
407 /* All offsets relative to mb_addr, not start of entry! */
408 cdb_off = CMDR_OFF + cmd_head + base_command_size;
409 memcpy((void *) mb + cdb_off, se_cmd->t_task_cdb, scsi_command_size(se_cmd->t_task_cdb));
410 entry->req.cdb_off = cdb_off;
411 tcmu_flush_dcache_range(entry, sizeof(*entry));
412
413 UPDATE_HEAD(mb->cmd_head, command_size, udev->cmdr_size);
414 tcmu_flush_dcache_range(mb, sizeof(*mb));
415
416 spin_unlock_irq(&udev->cmdr_lock);
417
418 /* TODO: only if FLUSH and FUA? */
419 uio_event_notify(&udev->uio_info);
420
421 mod_timer(&udev->timeout,
422 round_jiffies_up(jiffies + msecs_to_jiffies(TCMU_TIME_OUT)));
423
424 return 0;
425}
426
427static int tcmu_queue_cmd(struct se_cmd *se_cmd)
428{
429 struct se_device *se_dev = se_cmd->se_dev;
430 struct tcmu_dev *udev = TCMU_DEV(se_dev);
431 struct tcmu_cmd *tcmu_cmd;
432 int ret;
433
434 tcmu_cmd = tcmu_alloc_cmd(se_cmd);
435 if (!tcmu_cmd)
436 return -ENOMEM;
437
438 ret = tcmu_queue_cmd_ring(tcmu_cmd);
439 if (ret < 0) {
440 pr_err("TCMU: Could not queue command\n");
441 spin_lock_irq(&udev->commands_lock);
442 idr_remove(&udev->commands, tcmu_cmd->cmd_id);
443 spin_unlock_irq(&udev->commands_lock);
444
445 kmem_cache_free(tcmu_cmd_cache, tcmu_cmd);
446 }
447
448 return ret;
449}
450
451static void tcmu_handle_completion(struct tcmu_cmd *cmd, struct tcmu_cmd_entry *entry)
452{
453 struct se_cmd *se_cmd = cmd->se_cmd;
454 struct tcmu_dev *udev = cmd->tcmu_dev;
455
456 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags)) {
457 /* cmd has been completed already from timeout, just reclaim data
458 ring space */
459 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
460 return;
461 }
462
463 if (entry->rsp.scsi_status == SAM_STAT_CHECK_CONDITION) {
464 memcpy(se_cmd->sense_buffer, entry->rsp.sense_buffer,
465 se_cmd->scsi_sense_length);
466
467 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
468 }
469 else if (se_cmd->data_direction == DMA_FROM_DEVICE) {
470 struct scatterlist *sg;
471 int i;
472
473 /* It'd be easier to look at entry's iovec again, but UAM */
474 for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, i) {
475 size_t copy_bytes;
476 void *to;
477 void *from;
478
479 copy_bytes = min((size_t)sg->length,
480 head_to_end(udev->data_tail, udev->data_size));
481
482 to = kmap_atomic(sg_page(sg)) + sg->offset;
483 WARN_ON(sg->length + sg->offset > PAGE_SIZE);
484 from = (void *) udev->mb_addr + udev->data_off + udev->data_tail;
485 tcmu_flush_dcache_range(from, copy_bytes);
486 memcpy(to, from, copy_bytes);
487
488 UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
489
490 /* Uh oh, wrapped the data buffer for this sg's data */
491 if (sg->length != copy_bytes) {
492 from = (void *) udev->mb_addr + udev->data_off + udev->data_tail;
493 WARN_ON(udev->data_tail);
494 to += copy_bytes;
495 copy_bytes = sg->length - copy_bytes;
496 tcmu_flush_dcache_range(from, copy_bytes);
497 memcpy(to, from, copy_bytes);
498
499 UPDATE_HEAD(udev->data_tail, copy_bytes, udev->data_size);
500 }
501
502 kunmap_atomic(to);
503 }
504
505 } else if (se_cmd->data_direction == DMA_TO_DEVICE) {
506 UPDATE_HEAD(udev->data_tail, cmd->data_length, udev->data_size);
507 } else {
508 pr_warn("TCMU: data direction was %d!\n", se_cmd->data_direction);
509 }
510
511 target_complete_cmd(cmd->se_cmd, entry->rsp.scsi_status);
512 cmd->se_cmd = NULL;
513
514 kmem_cache_free(tcmu_cmd_cache, cmd);
515}
516
517static unsigned int tcmu_handle_completions(struct tcmu_dev *udev)
518{
519 struct tcmu_mailbox *mb;
520 LIST_HEAD(cpl_cmds);
521 unsigned long flags;
522 int handled = 0;
523
524 if (test_bit(TCMU_DEV_BIT_BROKEN, &udev->flags)) {
525 pr_err("ring broken, not handling completions\n");
526 return 0;
527 }
528
529 spin_lock_irqsave(&udev->cmdr_lock, flags);
530
531 mb = udev->mb_addr;
532 tcmu_flush_dcache_range(mb, sizeof(*mb));
533
534 while (udev->cmdr_last_cleaned != ACCESS_ONCE(mb->cmd_tail)) {
535
536 struct tcmu_cmd_entry *entry = (void *) mb + CMDR_OFF + udev->cmdr_last_cleaned;
537 struct tcmu_cmd *cmd;
538
539 tcmu_flush_dcache_range(entry, sizeof(*entry));
540
541 if (tcmu_hdr_get_op(&entry->hdr) == TCMU_OP_PAD) {
542 UPDATE_HEAD(udev->cmdr_last_cleaned, tcmu_hdr_get_len(&entry->hdr), udev->cmdr_size);
543 continue;
544 }
545 WARN_ON(tcmu_hdr_get_op(&entry->hdr) != TCMU_OP_CMD);
546
547 spin_lock(&udev->commands_lock);
548 cmd = idr_find(&udev->commands, entry->cmd_id);
549 if (cmd)
550 idr_remove(&udev->commands, cmd->cmd_id);
551 spin_unlock(&udev->commands_lock);
552
553 if (!cmd) {
554 pr_err("cmd_id not found, ring is broken\n");
555 set_bit(TCMU_DEV_BIT_BROKEN, &udev->flags);
556 break;
557 }
558
559 tcmu_handle_completion(cmd, entry);
560
561 UPDATE_HEAD(udev->cmdr_last_cleaned, tcmu_hdr_get_len(&entry->hdr), udev->cmdr_size);
562
563 handled++;
564 }
565
566 if (mb->cmd_tail == mb->cmd_head)
567 del_timer(&udev->timeout); /* no more pending cmds */
568
569 spin_unlock_irqrestore(&udev->cmdr_lock, flags);
570
571 wake_up(&udev->wait_cmdr);
572
573 return handled;
574}
575
576static int tcmu_check_expired_cmd(int id, void *p, void *data)
577{
578 struct tcmu_cmd *cmd = p;
579
580 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
581 return 0;
582
583 if (!time_after(cmd->deadline, jiffies))
584 return 0;
585
586 set_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags);
587 target_complete_cmd(cmd->se_cmd, SAM_STAT_CHECK_CONDITION);
588 cmd->se_cmd = NULL;
589
590 kmem_cache_free(tcmu_cmd_cache, cmd);
591
592 return 0;
593}
594
595static void tcmu_device_timedout(unsigned long data)
596{
597 struct tcmu_dev *udev = (struct tcmu_dev *)data;
598 unsigned long flags;
599 int handled;
600
601 handled = tcmu_handle_completions(udev);
602
603 pr_warn("%d completions handled from timeout\n", handled);
604
605 spin_lock_irqsave(&udev->commands_lock, flags);
606 idr_for_each(&udev->commands, tcmu_check_expired_cmd, NULL);
607 spin_unlock_irqrestore(&udev->commands_lock, flags);
608
609 /*
610 * We don't need to wakeup threads on wait_cmdr since they have their
611 * own timeout.
612 */
613}
614
615static int tcmu_attach_hba(struct se_hba *hba, u32 host_id)
616{
617 struct tcmu_hba *tcmu_hba;
618
619 tcmu_hba = kzalloc(sizeof(struct tcmu_hba), GFP_KERNEL);
620 if (!tcmu_hba)
621 return -ENOMEM;
622
623 tcmu_hba->host_id = host_id;
624 hba->hba_ptr = tcmu_hba;
625
626 return 0;
627}
628
629static void tcmu_detach_hba(struct se_hba *hba)
630{
631 kfree(hba->hba_ptr);
632 hba->hba_ptr = NULL;
633}
634
635static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
636{
637 struct tcmu_dev *udev;
638
639 udev = kzalloc(sizeof(struct tcmu_dev), GFP_KERNEL);
640 if (!udev)
641 return NULL;
642
643 udev->name = kstrdup(name, GFP_KERNEL);
644 if (!udev->name) {
645 kfree(udev);
646 return NULL;
647 }
648
649 udev->hba = hba;
650
651 init_waitqueue_head(&udev->wait_cmdr);
652 spin_lock_init(&udev->cmdr_lock);
653
654 idr_init(&udev->commands);
655 spin_lock_init(&udev->commands_lock);
656
657 setup_timer(&udev->timeout, tcmu_device_timedout,
658 (unsigned long)udev);
659
660 udev->pass_level = TCMU_PASS_ALL;
661
662 return &udev->se_dev;
663}
664
665static int tcmu_irqcontrol(struct uio_info *info, s32 irq_on)
666{
667 struct tcmu_dev *tcmu_dev = container_of(info, struct tcmu_dev, uio_info);
668
669 tcmu_handle_completions(tcmu_dev);
670
671 return 0;
672}
673
674/*
675 * mmap code from uio.c. Copied here because we want to hook mmap()
676 * and this stuff must come along.
677 */
678static int tcmu_find_mem_index(struct vm_area_struct *vma)
679{
680 struct tcmu_dev *udev = vma->vm_private_data;
681 struct uio_info *info = &udev->uio_info;
682
683 if (vma->vm_pgoff < MAX_UIO_MAPS) {
684 if (info->mem[vma->vm_pgoff].size == 0)
685 return -1;
686 return (int)vma->vm_pgoff;
687 }
688 return -1;
689}
690
691static int tcmu_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
692{
693 struct tcmu_dev *udev = vma->vm_private_data;
694 struct uio_info *info = &udev->uio_info;
695 struct page *page;
696 unsigned long offset;
697 void *addr;
698
699 int mi = tcmu_find_mem_index(vma);
700 if (mi < 0)
701 return VM_FAULT_SIGBUS;
702
703 /*
704 * We need to subtract mi because userspace uses offset = N*PAGE_SIZE
705 * to use mem[N].
706 */
707 offset = (vmf->pgoff - mi) << PAGE_SHIFT;
708
709 addr = (void *)(unsigned long)info->mem[mi].addr + offset;
710 if (info->mem[mi].memtype == UIO_MEM_LOGICAL)
711 page = virt_to_page(addr);
712 else
713 page = vmalloc_to_page(addr);
714 get_page(page);
715 vmf->page = page;
716 return 0;
717}
718
719static const struct vm_operations_struct tcmu_vm_ops = {
720 .fault = tcmu_vma_fault,
721};
722
723static int tcmu_mmap(struct uio_info *info, struct vm_area_struct *vma)
724{
725 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
726
727 vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP;
728 vma->vm_ops = &tcmu_vm_ops;
729
730 vma->vm_private_data = udev;
731
732 /* Ensure the mmap is exactly the right size */
733 if (vma_pages(vma) != (TCMU_RING_SIZE >> PAGE_SHIFT))
734 return -EINVAL;
735
736 return 0;
737}
738
739static int tcmu_open(struct uio_info *info, struct inode *inode)
740{
741 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
742
743 /* O_EXCL not supported for char devs, so fake it? */
744 if (test_and_set_bit(TCMU_DEV_BIT_OPEN, &udev->flags))
745 return -EBUSY;
746
747 pr_debug("open\n");
748
749 return 0;
750}
751
752static int tcmu_release(struct uio_info *info, struct inode *inode)
753{
754 struct tcmu_dev *udev = container_of(info, struct tcmu_dev, uio_info);
755
756 clear_bit(TCMU_DEV_BIT_OPEN, &udev->flags);
757
758 pr_debug("close\n");
759
760 return 0;
761}
762
763static int tcmu_netlink_event(enum tcmu_genl_cmd cmd, const char *name, int minor)
764{
765 struct sk_buff *skb;
766 void *msg_header;
767 int ret = -ENOMEM;
768
769 skb = genlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL);
770 if (!skb)
771 return ret;
772
773 msg_header = genlmsg_put(skb, 0, 0, &tcmu_genl_family, 0, cmd);
774 if (!msg_header)
775 goto free_skb;
776
777 ret = nla_put_string(skb, TCMU_ATTR_DEVICE, name);
778 if (ret < 0)
779 goto free_skb;
780
781 ret = nla_put_u32(skb, TCMU_ATTR_MINOR, minor);
782 if (ret < 0)
783 goto free_skb;
784
785 ret = genlmsg_end(skb, msg_header);
786 if (ret < 0)
787 goto free_skb;
788
789 ret = genlmsg_multicast(&tcmu_genl_family, skb, 0,
790 TCMU_MCGRP_CONFIG, GFP_KERNEL);
791
792 /* We don't care if no one is listening */
793 if (ret == -ESRCH)
794 ret = 0;
795
796 return ret;
797free_skb:
798 nlmsg_free(skb);
799 return ret;
800}
801
802static int tcmu_configure_device(struct se_device *dev)
803{
804 struct tcmu_dev *udev = TCMU_DEV(dev);
805 struct tcmu_hba *hba = udev->hba->hba_ptr;
806 struct uio_info *info;
807 struct tcmu_mailbox *mb;
808 size_t size;
809 size_t used;
810 int ret = 0;
811 char *str;
812
813 info = &udev->uio_info;
814
815 size = snprintf(NULL, 0, "tcm-user/%u/%s/%s", hba->host_id, udev->name,
816 udev->dev_config);
817 size += 1; /* for \0 */
818 str = kmalloc(size, GFP_KERNEL);
819 if (!str)
820 return -ENOMEM;
821
822 used = snprintf(str, size, "tcm-user/%u/%s", hba->host_id, udev->name);
823
824 if (udev->dev_config[0])
825 snprintf(str + used, size - used, "/%s", udev->dev_config);
826
827 info->name = str;
828
829 udev->mb_addr = vzalloc(TCMU_RING_SIZE);
830 if (!udev->mb_addr) {
831 ret = -ENOMEM;
832 goto err_vzalloc;
833 }
834
835 /* mailbox fits in first part of CMDR space */
836 udev->cmdr_size = CMDR_SIZE - CMDR_OFF;
837 udev->data_off = CMDR_SIZE;
838 udev->data_size = TCMU_RING_SIZE - CMDR_SIZE;
839
840 mb = udev->mb_addr;
841 mb->version = 1;
842 mb->cmdr_off = CMDR_OFF;
843 mb->cmdr_size = udev->cmdr_size;
844
845 WARN_ON(!PAGE_ALIGNED(udev->data_off));
846 WARN_ON(udev->data_size % PAGE_SIZE);
847
848 info->version = "1";
849
850 info->mem[0].name = "tcm-user command & data buffer";
851 info->mem[0].addr = (phys_addr_t) udev->mb_addr;
852 info->mem[0].size = TCMU_RING_SIZE;
853 info->mem[0].memtype = UIO_MEM_VIRTUAL;
854
855 info->irqcontrol = tcmu_irqcontrol;
856 info->irq = UIO_IRQ_CUSTOM;
857
858 info->mmap = tcmu_mmap;
859 info->open = tcmu_open;
860 info->release = tcmu_release;
861
862 ret = uio_register_device(tcmu_root_device, info);
863 if (ret)
864 goto err_register;
865
866 /* Other attributes can be configured in userspace */
867 dev->dev_attrib.hw_block_size = 512;
868 dev->dev_attrib.hw_max_sectors = 128;
869 dev->dev_attrib.hw_queue_depth = 128;
870
871 ret = tcmu_netlink_event(TCMU_CMD_ADDED_DEVICE, udev->uio_info.name,
872 udev->uio_info.uio_dev->minor);
873 if (ret)
874 goto err_netlink;
875
876 return 0;
877
878err_netlink:
879 uio_unregister_device(&udev->uio_info);
880err_register:
881 vfree(udev->mb_addr);
882err_vzalloc:
883 kfree(info->name);
884
885 return ret;
886}
887
888static int tcmu_check_pending_cmd(int id, void *p, void *data)
889{
890 struct tcmu_cmd *cmd = p;
891
892 if (test_bit(TCMU_CMD_BIT_EXPIRED, &cmd->flags))
893 return 0;
894 return -EINVAL;
895}
896
897static void tcmu_free_device(struct se_device *dev)
898{
899 struct tcmu_dev *udev = TCMU_DEV(dev);
900 int i;
901
902 del_timer_sync(&udev->timeout);
903
904 vfree(udev->mb_addr);
905
906 /* Upper layer should drain all requests before calling this */
907 spin_lock_irq(&udev->commands_lock);
908 i = idr_for_each(&udev->commands, tcmu_check_pending_cmd, NULL);
909 idr_destroy(&udev->commands);
910 spin_unlock_irq(&udev->commands_lock);
911 WARN_ON(i);
912
913 /* Device was configured */
914 if (udev->uio_info.uio_dev) {
915 tcmu_netlink_event(TCMU_CMD_REMOVED_DEVICE, udev->uio_info.name,
916 udev->uio_info.uio_dev->minor);
917
918 uio_unregister_device(&udev->uio_info);
919 kfree(udev->uio_info.name);
920 kfree(udev->name);
921 }
922
923 kfree(udev);
924}
925
926enum {
927 Opt_dev_config, Opt_dev_size, Opt_err, Opt_pass_level,
928};
929
930static match_table_t tokens = {
931 {Opt_dev_config, "dev_config=%s"},
932 {Opt_dev_size, "dev_size=%u"},
933 {Opt_pass_level, "pass_level=%u"},
934 {Opt_err, NULL}
935};
936
937static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
938 const char *page, ssize_t count)
939{
940 struct tcmu_dev *udev = TCMU_DEV(dev);
941 char *orig, *ptr, *opts, *arg_p;
942 substring_t args[MAX_OPT_ARGS];
943 int ret = 0, token;
944 int arg;
945
946 opts = kstrdup(page, GFP_KERNEL);
947 if (!opts)
948 return -ENOMEM;
949
950 orig = opts;
951
952 while ((ptr = strsep(&opts, ",\n")) != NULL) {
953 if (!*ptr)
954 continue;
955
956 token = match_token(ptr, tokens, args);
957 switch (token) {
958 case Opt_dev_config:
959 if (match_strlcpy(udev->dev_config, &args[0],
960 TCMU_CONFIG_LEN) == 0) {
961 ret = -EINVAL;
962 break;
963 }
964 pr_debug("TCMU: Referencing Path: %s\n", udev->dev_config);
965 break;
966 case Opt_dev_size:
967 arg_p = match_strdup(&args[0]);
968 if (!arg_p) {
969 ret = -ENOMEM;
970 break;
971 }
972 ret = kstrtoul(arg_p, 0, (unsigned long *) &udev->dev_size);
973 kfree(arg_p);
974 if (ret < 0)
975 pr_err("kstrtoul() failed for dev_size=\n");
976 break;
977 case Opt_pass_level:
978 match_int(args, &arg);
979 if (arg >= TCMU_PASS_INVALID) {
980 pr_warn("TCMU: Invalid pass_level: %d\n", arg);
981 break;
982 }
983
984 pr_debug("TCMU: Setting pass_level to %d\n", arg);
985 udev->pass_level = arg;
986 break;
987 default:
988 break;
989 }
990 }
991
992 kfree(orig);
993 return (!ret) ? count : ret;
994}
995
996static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
997{
998 struct tcmu_dev *udev = TCMU_DEV(dev);
999 ssize_t bl = 0;
1000
1001 bl = sprintf(b + bl, "Config: %s ",
1002 udev->dev_config[0] ? udev->dev_config : "NULL");
1003 bl += sprintf(b + bl, "Size: %zu PassLevel: %u\n",
1004 udev->dev_size, udev->pass_level);
1005
1006 return bl;
1007}
1008
1009static sector_t tcmu_get_blocks(struct se_device *dev)
1010{
1011 struct tcmu_dev *udev = TCMU_DEV(dev);
1012
1013 return div_u64(udev->dev_size - dev->dev_attrib.block_size,
1014 dev->dev_attrib.block_size);
1015}
1016
1017static sense_reason_t
1018tcmu_execute_rw(struct se_cmd *se_cmd, struct scatterlist *sgl, u32 sgl_nents,
1019 enum dma_data_direction data_direction)
1020{
1021 int ret;
1022
1023 ret = tcmu_queue_cmd(se_cmd);
1024
1025 if (ret != 0)
1026 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1027 else
1028 return TCM_NO_SENSE;
1029}
1030
1031static sense_reason_t
1032tcmu_pass_op(struct se_cmd *se_cmd)
1033{
1034 int ret = tcmu_queue_cmd(se_cmd);
1035
1036 if (ret != 0)
1037 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
1038 else
1039 return TCM_NO_SENSE;
1040}
1041
1042static struct sbc_ops tcmu_sbc_ops = {
1043 .execute_rw = tcmu_execute_rw,
1044 .execute_sync_cache = tcmu_pass_op,
1045 .execute_write_same = tcmu_pass_op,
1046 .execute_write_same_unmap = tcmu_pass_op,
1047 .execute_unmap = tcmu_pass_op,
1048};
1049
1050static sense_reason_t
1051tcmu_parse_cdb(struct se_cmd *cmd)
1052{
1053 unsigned char *cdb = cmd->t_task_cdb;
1054 struct tcmu_dev *udev = TCMU_DEV(cmd->se_dev);
1055 sense_reason_t ret;
1056
1057 switch (udev->pass_level) {
1058 case TCMU_PASS_ALL:
1059 /* We're just like pscsi, then */
1060 /*
1061 * For REPORT LUNS we always need to emulate the response, for everything
1062 * else, pass it up.
1063 */
1064 switch (cdb[0]) {
1065 case REPORT_LUNS:
1066 cmd->execute_cmd = spc_emulate_report_luns;
1067 break;
1068 case READ_6:
1069 case READ_10:
1070 case READ_12:
1071 case READ_16:
1072 case WRITE_6:
1073 case WRITE_10:
1074 case WRITE_12:
1075 case WRITE_16:
1076 case WRITE_VERIFY:
1077 cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
1078 /* FALLTHROUGH */
1079 default:
1080 cmd->execute_cmd = tcmu_pass_op;
1081 }
1082 ret = TCM_NO_SENSE;
1083 break;
1084 case TCMU_PASS_IO:
1085 ret = sbc_parse_cdb(cmd, &tcmu_sbc_ops);
1086 break;
1087 default:
1088 pr_err("Unknown tcm-user pass level %d\n", udev->pass_level);
1089 ret = TCM_CHECK_CONDITION_ABORT_CMD;
1090 }
1091
1092 return ret;
1093}
1094
1095static struct se_subsystem_api tcmu_template = {
1096 .name = "user",
1097 .inquiry_prod = "USER",
1098 .inquiry_rev = TCMU_VERSION,
1099 .owner = THIS_MODULE,
1100 .transport_type = TRANSPORT_PLUGIN_VHBA_PDEV,
1101 .attach_hba = tcmu_attach_hba,
1102 .detach_hba = tcmu_detach_hba,
1103 .alloc_device = tcmu_alloc_device,
1104 .configure_device = tcmu_configure_device,
1105 .free_device = tcmu_free_device,
1106 .parse_cdb = tcmu_parse_cdb,
1107 .set_configfs_dev_params = tcmu_set_configfs_dev_params,
1108 .show_configfs_dev_params = tcmu_show_configfs_dev_params,
1109 .get_device_type = sbc_get_device_type,
1110 .get_blocks = tcmu_get_blocks,
1111};
1112
1113static int __init tcmu_module_init(void)
1114{
1115 int ret;
1116
1117 BUILD_BUG_ON((sizeof(struct tcmu_cmd_entry) % TCMU_OP_ALIGN_SIZE) != 0);
1118
1119 tcmu_cmd_cache = kmem_cache_create("tcmu_cmd_cache",
1120 sizeof(struct tcmu_cmd),
1121 __alignof__(struct tcmu_cmd),
1122 0, NULL);
1123 if (!tcmu_cmd_cache)
1124 return -ENOMEM;
1125
1126 tcmu_root_device = root_device_register("tcm_user");
1127 if (IS_ERR(tcmu_root_device)) {
1128 ret = PTR_ERR(tcmu_root_device);
1129 goto out_free_cache;
1130 }
1131
1132 ret = genl_register_family(&tcmu_genl_family);
1133 if (ret < 0) {
1134 goto out_unreg_device;
1135 }
1136
1137 ret = transport_subsystem_register(&tcmu_template);
1138 if (ret)
1139 goto out_unreg_genl;
1140
1141 return 0;
1142
1143out_unreg_genl:
1144 genl_unregister_family(&tcmu_genl_family);
1145out_unreg_device:
1146 root_device_unregister(tcmu_root_device);
1147out_free_cache:
1148 kmem_cache_destroy(tcmu_cmd_cache);
1149
1150 return ret;
1151}
1152
1153static void __exit tcmu_module_exit(void)
1154{
1155 transport_subsystem_release(&tcmu_template);
1156 genl_unregister_family(&tcmu_genl_family);
1157 root_device_unregister(tcmu_root_device);
1158 kmem_cache_destroy(tcmu_cmd_cache);
1159}
1160
1161MODULE_DESCRIPTION("TCM USER subsystem plugin");
1162MODULE_AUTHOR("Shaohua Li <shli@kernel.org>");
1163MODULE_AUTHOR("Andy Grover <agrover@redhat.com>");
1164MODULE_LICENSE("GPL");
1165
1166module_init(tcmu_module_init);
1167module_exit(tcmu_module_exit);
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 21ce50880c79..ccee7e332a4d 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -98,7 +98,7 @@ static void ft_tport_delete(struct ft_tport *tport)
98 ft_sess_delete_all(tport); 98 ft_sess_delete_all(tport);
99 lport = tport->lport; 99 lport = tport->lport;
100 BUG_ON(tport != lport->prov[FC_TYPE_FCP]); 100 BUG_ON(tport != lport->prov[FC_TYPE_FCP]);
101 rcu_assign_pointer(lport->prov[FC_TYPE_FCP], NULL); 101 RCU_INIT_POINTER(lport->prov[FC_TYPE_FCP], NULL);
102 102
103 tpg = tport->tpg; 103 tpg = tport->tpg;
104 if (tpg) { 104 if (tpg) {
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index ef5587fe2c69..f554d25b4399 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -84,6 +84,16 @@ config THERMAL_GOV_STEP_WISE
84 Enable this to manage platform thermals using a simple linear 84 Enable this to manage platform thermals using a simple linear
85 governor. 85 governor.
86 86
87config THERMAL_GOV_BANG_BANG
88 bool "Bang Bang thermal governor"
89 default n
90 help
91 Enable this to manage platform thermals using bang bang governor.
92
93 Say 'Y' here if you want to use two point temperature regulation
94 used for fans without throttling. Some fan drivers depend on this
95 governor to be enabled (e.g. acerhdf).
96
87config THERMAL_GOV_USER_SPACE 97config THERMAL_GOV_USER_SPACE
88 bool "User_space thermal governor" 98 bool "User_space thermal governor"
89 help 99 help
@@ -207,21 +217,6 @@ config X86_PKG_TEMP_THERMAL
207 two trip points which can be set by user to get notifications via thermal 217 two trip points which can be set by user to get notifications via thermal
208 notification methods. 218 notification methods.
209 219
210config ACPI_INT3403_THERMAL
211 tristate "ACPI INT3403 thermal driver"
212 depends on X86 && ACPI
213 help
214 Newer laptops and tablets that use ACPI may have thermal sensors
215 outside the core CPU/SOC for thermal safety reasons. These
216 temperature sensors are also exposed for the OS to use via the so
217 called INT3403 ACPI object. This driver will, on devices that have
218 such sensors, expose the temperature information from these sensors
219 to userspace via the normal thermal framework. This means that a wide
220 range of applications and GUI widgets can show this information to
221 the user or use this information for making decisions. For example,
222 the Intel Thermal Daemon can use this information to allow the user
223 to select his laptop to run without turning on the fans.
224
225config INTEL_SOC_DTS_THERMAL 220config INTEL_SOC_DTS_THERMAL
226 tristate "Intel SoCs DTS thermal driver" 221 tristate "Intel SoCs DTS thermal driver"
227 depends on X86 && IOSF_MBI 222 depends on X86 && IOSF_MBI
@@ -234,6 +229,30 @@ config INTEL_SOC_DTS_THERMAL
234 notification methods.The other trip is a critical trip point, which 229 notification methods.The other trip is a critical trip point, which
235 was set by the driver based on the TJ MAX temperature. 230 was set by the driver based on the TJ MAX temperature.
236 231
232config INT340X_THERMAL
233 tristate "ACPI INT340X thermal drivers"
234 depends on X86 && ACPI
235 select THERMAL_GOV_USER_SPACE
236 select ACPI_THERMAL_REL
237 select ACPI_FAN
238 help
239 Newer laptops and tablets that use ACPI may have thermal sensors and
240 other devices with thermal control capabilities outside the core
241 CPU/SOC, for thermal safety reasons.
242 They are exposed for the OS to use via the INT3400 ACPI device object
243 as the master, and INT3401~INT340B ACPI device objects as the slaves.
244 Enable this to expose the temperature information and cooling ability
245 from these objects to userspace via the normal thermal framework.
246 This means that a wide range of applications and GUI widgets can show
247 the information to the user or use this information for making
248 decisions. For example, the Intel Thermal Daemon can use this
249 information to allow the user to select his laptop to run without
250 turning on the fans.
251
252config ACPI_THERMAL_REL
253 tristate
254 depends on ACPI
255
237menu "Texas Instruments thermal drivers" 256menu "Texas Instruments thermal drivers"
238source "drivers/thermal/ti-soc-thermal/Kconfig" 257source "drivers/thermal/ti-soc-thermal/Kconfig"
239endmenu 258endmenu
diff --git a/drivers/thermal/Makefile b/drivers/thermal/Makefile
index 31e232f84b6b..39c4fe87da2f 100644
--- a/drivers/thermal/Makefile
+++ b/drivers/thermal/Makefile
@@ -11,6 +11,7 @@ thermal_sys-$(CONFIG_THERMAL_OF) += of-thermal.o
11 11
12# governors 12# governors
13thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o 13thermal_sys-$(CONFIG_THERMAL_GOV_FAIR_SHARE) += fair_share.o
14thermal_sys-$(CONFIG_THERMAL_GOV_BANG_BANG) += gov_bang_bang.o
14thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o 15thermal_sys-$(CONFIG_THERMAL_GOV_STEP_WISE) += step_wise.o
15thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o 16thermal_sys-$(CONFIG_THERMAL_GOV_USER_SPACE) += user_space.o
16 17
@@ -31,5 +32,5 @@ obj-$(CONFIG_INTEL_POWERCLAMP) += intel_powerclamp.o
31obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o 32obj-$(CONFIG_X86_PKG_TEMP_THERMAL) += x86_pkg_temp_thermal.o
32obj-$(CONFIG_INTEL_SOC_DTS_THERMAL) += intel_soc_dts_thermal.o 33obj-$(CONFIG_INTEL_SOC_DTS_THERMAL) += intel_soc_dts_thermal.o
33obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/ 34obj-$(CONFIG_TI_SOC_THERMAL) += ti-soc-thermal/
34obj-$(CONFIG_ACPI_INT3403_THERMAL) += int3403_thermal.o 35obj-$(CONFIG_INT340X_THERMAL) += int340x_thermal/
35obj-$(CONFIG_ST_THERMAL) += st/ 36obj-$(CONFIG_ST_THERMAL) += st/
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 1ab0018271c5..ad09e51ffae4 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -50,15 +50,14 @@ struct cpufreq_cooling_device {
50 unsigned int cpufreq_state; 50 unsigned int cpufreq_state;
51 unsigned int cpufreq_val; 51 unsigned int cpufreq_val;
52 struct cpumask allowed_cpus; 52 struct cpumask allowed_cpus;
53 struct list_head node;
53}; 54};
54static DEFINE_IDR(cpufreq_idr); 55static DEFINE_IDR(cpufreq_idr);
55static DEFINE_MUTEX(cooling_cpufreq_lock); 56static DEFINE_MUTEX(cooling_cpufreq_lock);
56 57
57static unsigned int cpufreq_dev_count; 58static unsigned int cpufreq_dev_count;
58 59
59/* notify_table passes value to the CPUFREQ_ADJUST callback function. */ 60static LIST_HEAD(cpufreq_dev_list);
60#define NOTIFY_INVALID NULL
61static struct cpufreq_cooling_device *notify_device;
62 61
63/** 62/**
64 * get_idr - function to get a unique id. 63 * get_idr - function to get a unique id.
@@ -287,15 +286,12 @@ static int cpufreq_apply_cooling(struct cpufreq_cooling_device *cpufreq_device,
287 286
288 cpufreq_device->cpufreq_state = cooling_state; 287 cpufreq_device->cpufreq_state = cooling_state;
289 cpufreq_device->cpufreq_val = clip_freq; 288 cpufreq_device->cpufreq_val = clip_freq;
290 notify_device = cpufreq_device;
291 289
292 for_each_cpu(cpuid, mask) { 290 for_each_cpu(cpuid, mask) {
293 if (is_cpufreq_valid(cpuid)) 291 if (is_cpufreq_valid(cpuid))
294 cpufreq_update_policy(cpuid); 292 cpufreq_update_policy(cpuid);
295 } 293 }
296 294
297 notify_device = NOTIFY_INVALID;
298
299 return 0; 295 return 0;
300} 296}
301 297
@@ -316,21 +312,28 @@ static int cpufreq_thermal_notifier(struct notifier_block *nb,
316{ 312{
317 struct cpufreq_policy *policy = data; 313 struct cpufreq_policy *policy = data;
318 unsigned long max_freq = 0; 314 unsigned long max_freq = 0;
315 struct cpufreq_cooling_device *cpufreq_dev;
319 316
320 if (event != CPUFREQ_ADJUST || notify_device == NOTIFY_INVALID) 317 if (event != CPUFREQ_ADJUST)
321 return 0; 318 return 0;
322 319
323 if (cpumask_test_cpu(policy->cpu, &notify_device->allowed_cpus)) 320 mutex_lock(&cooling_cpufreq_lock);
324 max_freq = notify_device->cpufreq_val; 321 list_for_each_entry(cpufreq_dev, &cpufreq_dev_list, node) {
325 else 322 if (!cpumask_test_cpu(policy->cpu,
326 return 0; 323 &cpufreq_dev->allowed_cpus))
324 continue;
325
326 if (!cpufreq_dev->cpufreq_val)
327 cpufreq_dev->cpufreq_val = get_cpu_frequency(
328 cpumask_any(&cpufreq_dev->allowed_cpus),
329 cpufreq_dev->cpufreq_state);
327 330
328 /* Never exceed user_policy.max */ 331 max_freq = cpufreq_dev->cpufreq_val;
329 if (max_freq > policy->user_policy.max)
330 max_freq = policy->user_policy.max;
331 332
332 if (policy->max != max_freq) 333 if (policy->max != max_freq)
333 cpufreq_verify_within_limits(policy, 0, max_freq); 334 cpufreq_verify_within_limits(policy, 0, max_freq);
335 }
336 mutex_unlock(&cooling_cpufreq_lock);
334 337
335 return 0; 338 return 0;
336} 339}
@@ -486,6 +489,7 @@ __cpufreq_cooling_register(struct device_node *np,
486 cpufreq_register_notifier(&thermal_cpufreq_notifier_block, 489 cpufreq_register_notifier(&thermal_cpufreq_notifier_block,
487 CPUFREQ_POLICY_NOTIFIER); 490 CPUFREQ_POLICY_NOTIFIER);
488 cpufreq_dev_count++; 491 cpufreq_dev_count++;
492 list_add(&cpufreq_dev->node, &cpufreq_dev_list);
489 493
490 mutex_unlock(&cooling_cpufreq_lock); 494 mutex_unlock(&cooling_cpufreq_lock);
491 495
@@ -549,6 +553,7 @@ void cpufreq_cooling_unregister(struct thermal_cooling_device *cdev)
549 553
550 cpufreq_dev = cdev->devdata; 554 cpufreq_dev = cdev->devdata;
551 mutex_lock(&cooling_cpufreq_lock); 555 mutex_lock(&cooling_cpufreq_lock);
556 list_del(&cpufreq_dev->node);
552 cpufreq_dev_count--; 557 cpufreq_dev_count--;
553 558
554 /* Unregister the notifier for the last cpufreq cooling device */ 559 /* Unregister the notifier for the last cpufreq cooling device */
diff --git a/drivers/thermal/fair_share.c b/drivers/thermal/fair_share.c
index 944ba2f340c8..6e0a3fbfae86 100644
--- a/drivers/thermal/fair_share.c
+++ b/drivers/thermal/fair_share.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/thermal.h> 25#include <linux/thermal.h>
26#include <trace/events/thermal.h>
26 27
27#include "thermal_core.h" 28#include "thermal_core.h"
28 29
@@ -34,6 +35,7 @@ static int get_trip_level(struct thermal_zone_device *tz)
34{ 35{
35 int count = 0; 36 int count = 0;
36 unsigned long trip_temp; 37 unsigned long trip_temp;
38 enum thermal_trip_type trip_type;
37 39
38 if (tz->trips == 0 || !tz->ops->get_trip_temp) 40 if (tz->trips == 0 || !tz->ops->get_trip_temp)
39 return 0; 41 return 0;
@@ -43,6 +45,16 @@ static int get_trip_level(struct thermal_zone_device *tz)
43 if (tz->temperature < trip_temp) 45 if (tz->temperature < trip_temp)
44 break; 46 break;
45 } 47 }
48
49 /*
50 * count > 0 only if temperature is greater than first trip
51 * point, in which case, trip_point = count - 1
52 */
53 if (count > 0) {
54 tz->ops->get_trip_type(tz, count - 1, &trip_type);
55 trace_thermal_zone_trip(tz, count - 1, trip_type);
56 }
57
46 return count; 58 return count;
47} 59}
48 60
diff --git a/drivers/thermal/gov_bang_bang.c b/drivers/thermal/gov_bang_bang.c
new file mode 100644
index 000000000000..c5dd76b2ee74
--- /dev/null
+++ b/drivers/thermal/gov_bang_bang.c
@@ -0,0 +1,131 @@
1/*
2 * gov_bang_bang.c - A simple thermal throttling governor using hysteresis
3 *
4 * Copyright (C) 2014 Peter Feuerer <peter@piie.net>
5 *
6 * Based on step_wise.c with following Copyrights:
7 * Copyright (C) 2012 Intel Corp
8 * Copyright (C) 2012 Durgadoss R <durgadoss.r@intel.com>
9 *
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, version 2.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
18 * the GNU General Public License for more details.
19 *
20 */
21
22#include <linux/thermal.h>
23
24#include "thermal_core.h"
25
26static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
27{
28 long trip_temp;
29 unsigned long trip_hyst;
30 struct thermal_instance *instance;
31
32 tz->ops->get_trip_temp(tz, trip, &trip_temp);
33 tz->ops->get_trip_hyst(tz, trip, &trip_hyst);
34
35 dev_dbg(&tz->device, "Trip%d[temp=%ld]:temp=%d:hyst=%ld\n",
36 trip, trip_temp, tz->temperature,
37 trip_hyst);
38
39 mutex_lock(&tz->lock);
40
41 list_for_each_entry(instance, &tz->thermal_instances, tz_node) {
42 if (instance->trip != trip)
43 continue;
44
45 /* in case fan is in initial state, switch the fan off */
46 if (instance->target == THERMAL_NO_TARGET)
47 instance->target = 0;
48
49 /* in case fan is neither on nor off set the fan to active */
50 if (instance->target != 0 && instance->target != 1) {
51 pr_warn("Thermal instance %s controlled by bang-bang has unexpected state: %ld\n",
52 instance->name, instance->target);
53 instance->target = 1;
54 }
55
56 /*
57 * enable fan when temperature exceeds trip_temp and disable
58 * the fan in case it falls below trip_temp minus hysteresis
59 */
60 if (instance->target == 0 && tz->temperature >= trip_temp)
61 instance->target = 1;
62 else if (instance->target == 1 &&
63 tz->temperature < trip_temp - trip_hyst)
64 instance->target = 0;
65
66 dev_dbg(&instance->cdev->device, "target=%d\n",
67 (int)instance->target);
68
69 instance->cdev->updated = false; /* cdev needs update */
70 }
71
72 mutex_unlock(&tz->lock);
73}
74
75/**
76 * bang_bang_control - controls devices associated with the given zone
77 * @tz - thermal_zone_device
78 * @trip - the trip point
79 *
80 * Regulation Logic: a two point regulation, deliver cooling state depending
81 * on the previous state shown in this diagram:
82 *
83 * Fan: OFF ON
84 *
85 * |
86 * |
87 * trip_temp: +---->+
88 * | | ^
89 * | | |
90 * | | Temperature
91 * (trip_temp - hyst): +<----+
92 * |
93 * |
94 * |
95 *
96 * * If the fan is not running and temperature exceeds trip_temp, the fan
97 * gets turned on.
98 * * In case the fan is running, temperature must fall below
99 * (trip_temp - hyst) so that the fan gets turned off again.
100 *
101 */
102static int bang_bang_control(struct thermal_zone_device *tz, int trip)
103{
104 struct thermal_instance *instance;
105
106 thermal_zone_trip_update(tz, trip);
107
108 mutex_lock(&tz->lock);
109
110 list_for_each_entry(instance, &tz->thermal_instances, tz_node)
111 thermal_cdev_update(instance->cdev);
112
113 mutex_unlock(&tz->lock);
114
115 return 0;
116}
117
118static struct thermal_governor thermal_gov_bang_bang = {
119 .name = "bang_bang",
120 .throttle = bang_bang_control,
121};
122
123int thermal_gov_bang_bang_register(void)
124{
125 return thermal_register_governor(&thermal_gov_bang_bang);
126}
127
128void thermal_gov_bang_bang_unregister(void)
129{
130 thermal_unregister_governor(&thermal_gov_bang_bang);
131}
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index 2c516f2eebed..5a1f1070b702 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -19,6 +19,7 @@
19#include <linux/mfd/syscon.h> 19#include <linux/mfd/syscon.h>
20#include <linux/module.h> 20#include <linux/module.h>
21#include <linux/of.h> 21#include <linux/of.h>
22#include <linux/of_device.h>
22#include <linux/platform_device.h> 23#include <linux/platform_device.h>
23#include <linux/regmap.h> 24#include <linux/regmap.h>
24#include <linux/slab.h> 25#include <linux/slab.h>
@@ -31,6 +32,11 @@
31 32
32#define MISC0 0x0150 33#define MISC0 0x0150
33#define MISC0_REFTOP_SELBIASOFF (1 << 3) 34#define MISC0_REFTOP_SELBIASOFF (1 << 3)
35#define MISC1 0x0160
36#define MISC1_IRQ_TEMPHIGH (1 << 29)
37/* Below LOW and PANIC bits are only for TEMPMON_IMX6SX */
38#define MISC1_IRQ_TEMPLOW (1 << 28)
39#define MISC1_IRQ_TEMPPANIC (1 << 27)
34 40
35#define TEMPSENSE0 0x0180 41#define TEMPSENSE0 0x0180
36#define TEMPSENSE0_ALARM_VALUE_SHIFT 20 42#define TEMPSENSE0_ALARM_VALUE_SHIFT 20
@@ -43,6 +49,12 @@
43 49
44#define TEMPSENSE1 0x0190 50#define TEMPSENSE1 0x0190
45#define TEMPSENSE1_MEASURE_FREQ 0xffff 51#define TEMPSENSE1_MEASURE_FREQ 0xffff
52/* Below TEMPSENSE2 is only for TEMPMON_IMX6SX */
53#define TEMPSENSE2 0x0290
54#define TEMPSENSE2_LOW_VALUE_SHIFT 0
55#define TEMPSENSE2_LOW_VALUE_MASK 0xfff
56#define TEMPSENSE2_PANIC_VALUE_SHIFT 16
57#define TEMPSENSE2_PANIC_VALUE_MASK 0xfff0000
46 58
47#define OCOTP_ANA1 0x04e0 59#define OCOTP_ANA1 0x04e0
48 60
@@ -66,6 +78,21 @@ enum imx_thermal_trip {
66#define FACTOR1 15976 78#define FACTOR1 15976
67#define FACTOR2 4297157 79#define FACTOR2 4297157
68 80
81#define TEMPMON_IMX6Q 1
82#define TEMPMON_IMX6SX 2
83
84struct thermal_soc_data {
85 u32 version;
86};
87
88static struct thermal_soc_data thermal_imx6q_data = {
89 .version = TEMPMON_IMX6Q,
90};
91
92static struct thermal_soc_data thermal_imx6sx_data = {
93 .version = TEMPMON_IMX6SX,
94};
95
69struct imx_thermal_data { 96struct imx_thermal_data {
70 struct thermal_zone_device *tz; 97 struct thermal_zone_device *tz;
71 struct thermal_cooling_device *cdev; 98 struct thermal_cooling_device *cdev;
@@ -79,8 +106,21 @@ struct imx_thermal_data {
79 bool irq_enabled; 106 bool irq_enabled;
80 int irq; 107 int irq;
81 struct clk *thermal_clk; 108 struct clk *thermal_clk;
109 const struct thermal_soc_data *socdata;
82}; 110};
83 111
112static void imx_set_panic_temp(struct imx_thermal_data *data,
113 signed long panic_temp)
114{
115 struct regmap *map = data->tempmon;
116 int critical_value;
117
118 critical_value = (data->c2 - panic_temp) / data->c1;
119 regmap_write(map, TEMPSENSE2 + REG_CLR, TEMPSENSE2_PANIC_VALUE_MASK);
120 regmap_write(map, TEMPSENSE2 + REG_SET, critical_value <<
121 TEMPSENSE2_PANIC_VALUE_SHIFT);
122}
123
84static void imx_set_alarm_temp(struct imx_thermal_data *data, 124static void imx_set_alarm_temp(struct imx_thermal_data *data,
85 signed long alarm_temp) 125 signed long alarm_temp)
86{ 126{
@@ -142,13 +182,17 @@ static int imx_get_temp(struct thermal_zone_device *tz, unsigned long *temp)
142 /* See imx_get_sensor_data() for formula derivation */ 182 /* See imx_get_sensor_data() for formula derivation */
143 *temp = data->c2 - n_meas * data->c1; 183 *temp = data->c2 - n_meas * data->c1;
144 184
145 /* Update alarm value to next higher trip point */ 185 /* Update alarm value to next higher trip point for TEMPMON_IMX6Q */
146 if (data->alarm_temp == data->temp_passive && *temp >= data->temp_passive) 186 if (data->socdata->version == TEMPMON_IMX6Q) {
147 imx_set_alarm_temp(data, data->temp_critical); 187 if (data->alarm_temp == data->temp_passive &&
148 if (data->alarm_temp == data->temp_critical && *temp < data->temp_passive) { 188 *temp >= data->temp_passive)
149 imx_set_alarm_temp(data, data->temp_passive); 189 imx_set_alarm_temp(data, data->temp_critical);
150 dev_dbg(&tz->device, "thermal alarm off: T < %lu\n", 190 if (data->alarm_temp == data->temp_critical &&
151 data->alarm_temp / 1000); 191 *temp < data->temp_passive) {
192 imx_set_alarm_temp(data, data->temp_passive);
193 dev_dbg(&tz->device, "thermal alarm off: T < %lu\n",
194 data->alarm_temp / 1000);
195 }
152 } 196 }
153 197
154 if (*temp != data->last_temp) { 198 if (*temp != data->last_temp) {
@@ -398,14 +442,27 @@ static irqreturn_t imx_thermal_alarm_irq_thread(int irq, void *dev)
398 return IRQ_HANDLED; 442 return IRQ_HANDLED;
399} 443}
400 444
445static const struct of_device_id of_imx_thermal_match[] = {
446 { .compatible = "fsl,imx6q-tempmon", .data = &thermal_imx6q_data, },
447 { .compatible = "fsl,imx6sx-tempmon", .data = &thermal_imx6sx_data, },
448 { /* end */ }
449};
450MODULE_DEVICE_TABLE(of, of_imx_thermal_match);
451
401static int imx_thermal_probe(struct platform_device *pdev) 452static int imx_thermal_probe(struct platform_device *pdev)
402{ 453{
454 const struct of_device_id *of_id =
455 of_match_device(of_imx_thermal_match, &pdev->dev);
403 struct imx_thermal_data *data; 456 struct imx_thermal_data *data;
404 struct cpumask clip_cpus; 457 struct cpumask clip_cpus;
405 struct regmap *map; 458 struct regmap *map;
406 int measure_freq; 459 int measure_freq;
407 int ret; 460 int ret;
408 461
462 if (!cpufreq_get_current_driver()) {
463 dev_dbg(&pdev->dev, "no cpufreq driver!");
464 return -EPROBE_DEFER;
465 }
409 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); 466 data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL);
410 if (!data) 467 if (!data)
411 return -ENOMEM; 468 return -ENOMEM;
@@ -418,6 +475,20 @@ static int imx_thermal_probe(struct platform_device *pdev)
418 } 475 }
419 data->tempmon = map; 476 data->tempmon = map;
420 477
478 data->socdata = of_id->data;
479
480 /* make sure the IRQ flag is clear before enabling irq on i.MX6SX */
481 if (data->socdata->version == TEMPMON_IMX6SX) {
482 regmap_write(map, MISC1 + REG_CLR, MISC1_IRQ_TEMPHIGH |
483 MISC1_IRQ_TEMPLOW | MISC1_IRQ_TEMPPANIC);
484 /*
485 * reset value of LOW ALARM is incorrect, set it to lowest
486 * value to avoid false trigger of low alarm.
487 */
488 regmap_write(map, TEMPSENSE2 + REG_SET,
489 TEMPSENSE2_LOW_VALUE_MASK);
490 }
491
421 data->irq = platform_get_irq(pdev, 0); 492 data->irq = platform_get_irq(pdev, 0);
422 if (data->irq < 0) 493 if (data->irq < 0)
423 return data->irq; 494 return data->irq;
@@ -454,6 +525,30 @@ static int imx_thermal_probe(struct platform_device *pdev)
454 return ret; 525 return ret;
455 } 526 }
456 527
528 data->thermal_clk = devm_clk_get(&pdev->dev, NULL);
529 if (IS_ERR(data->thermal_clk)) {
530 ret = PTR_ERR(data->thermal_clk);
531 if (ret != -EPROBE_DEFER)
532 dev_err(&pdev->dev,
533 "failed to get thermal clk: %d\n", ret);
534 cpufreq_cooling_unregister(data->cdev);
535 return ret;
536 }
537
538 /*
539 * Thermal sensor needs clk on to get correct value, normally
540 * we should enable its clk before taking measurement and disable
541 * clk after measurement is done, but if alarm function is enabled,
542 * hardware will auto measure the temperature periodically, so we
543 * need to keep the clk always on for alarm function.
544 */
545 ret = clk_prepare_enable(data->thermal_clk);
546 if (ret) {
547 dev_err(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
548 cpufreq_cooling_unregister(data->cdev);
549 return ret;
550 }
551
457 data->tz = thermal_zone_device_register("imx_thermal_zone", 552 data->tz = thermal_zone_device_register("imx_thermal_zone",
458 IMX_TRIP_NUM, 553 IMX_TRIP_NUM,
459 BIT(IMX_TRIP_PASSIVE), data, 554 BIT(IMX_TRIP_PASSIVE), data,
@@ -464,31 +559,20 @@ static int imx_thermal_probe(struct platform_device *pdev)
464 ret = PTR_ERR(data->tz); 559 ret = PTR_ERR(data->tz);
465 dev_err(&pdev->dev, 560 dev_err(&pdev->dev,
466 "failed to register thermal zone device %d\n", ret); 561 "failed to register thermal zone device %d\n", ret);
562 clk_disable_unprepare(data->thermal_clk);
467 cpufreq_cooling_unregister(data->cdev); 563 cpufreq_cooling_unregister(data->cdev);
468 return ret; 564 return ret;
469 } 565 }
470 566
471 data->thermal_clk = devm_clk_get(&pdev->dev, NULL);
472 if (IS_ERR(data->thermal_clk)) {
473 dev_warn(&pdev->dev, "failed to get thermal clk!\n");
474 } else {
475 /*
476 * Thermal sensor needs clk on to get correct value, normally
477 * we should enable its clk before taking measurement and disable
478 * clk after measurement is done, but if alarm function is enabled,
479 * hardware will auto measure the temperature periodically, so we
480 * need to keep the clk always on for alarm function.
481 */
482 ret = clk_prepare_enable(data->thermal_clk);
483 if (ret)
484 dev_warn(&pdev->dev, "failed to enable thermal clk: %d\n", ret);
485 }
486
487 /* Enable measurements at ~ 10 Hz */ 567 /* Enable measurements at ~ 10 Hz */
488 regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ); 568 regmap_write(map, TEMPSENSE1 + REG_CLR, TEMPSENSE1_MEASURE_FREQ);
489 measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */ 569 measure_freq = DIV_ROUND_UP(32768, 10); /* 10 Hz */
490 regmap_write(map, TEMPSENSE1 + REG_SET, measure_freq); 570 regmap_write(map, TEMPSENSE1 + REG_SET, measure_freq);
491 imx_set_alarm_temp(data, data->temp_passive); 571 imx_set_alarm_temp(data, data->temp_passive);
572
573 if (data->socdata->version == TEMPMON_IMX6SX)
574 imx_set_panic_temp(data, data->temp_critical);
575
492 regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN); 576 regmap_write(map, TEMPSENSE0 + REG_CLR, TEMPSENSE0_POWER_DOWN);
493 regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP); 577 regmap_write(map, TEMPSENSE0 + REG_SET, TEMPSENSE0_MEASURE_TEMP);
494 578
@@ -550,12 +634,6 @@ static int imx_thermal_resume(struct device *dev)
550static SIMPLE_DEV_PM_OPS(imx_thermal_pm_ops, 634static SIMPLE_DEV_PM_OPS(imx_thermal_pm_ops,
551 imx_thermal_suspend, imx_thermal_resume); 635 imx_thermal_suspend, imx_thermal_resume);
552 636
553static const struct of_device_id of_imx_thermal_match[] = {
554 { .compatible = "fsl,imx6q-tempmon", },
555 { /* end */ }
556};
557MODULE_DEVICE_TABLE(of, of_imx_thermal_match);
558
559static struct platform_driver imx_thermal = { 637static struct platform_driver imx_thermal = {
560 .driver = { 638 .driver = {
561 .name = "imx_thermal", 639 .name = "imx_thermal",
diff --git a/drivers/thermal/int3403_thermal.c b/drivers/thermal/int3403_thermal.c
deleted file mode 100644
index 17554eeb3953..000000000000
--- a/drivers/thermal/int3403_thermal.c
+++ /dev/null
@@ -1,296 +0,0 @@
1/*
2 * ACPI INT3403 thermal driver
3 * Copyright (c) 2013, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/types.h>
19#include <linux/acpi.h>
20#include <linux/thermal.h>
21
22#define INT3403_TYPE_SENSOR 0x03
23#define INT3403_PERF_CHANGED_EVENT 0x80
24#define INT3403_THERMAL_EVENT 0x90
25
26#define DECI_KELVIN_TO_MILLI_CELSIUS(t, off) (((t) - (off)) * 100)
27#define KELVIN_OFFSET 2732
28#define MILLI_CELSIUS_TO_DECI_KELVIN(t, off) (((t) / 100) + (off))
29
30#define ACPI_INT3403_CLASS "int3403"
31#define ACPI_INT3403_FILE_STATE "state"
32
33struct int3403_sensor {
34 struct thermal_zone_device *tzone;
35 unsigned long *thresholds;
36 unsigned long crit_temp;
37 int crit_trip_id;
38 unsigned long psv_temp;
39 int psv_trip_id;
40};
41
42static int sys_get_curr_temp(struct thermal_zone_device *tzone,
43 unsigned long *temp)
44{
45 struct acpi_device *device = tzone->devdata;
46 unsigned long long tmp;
47 acpi_status status;
48
49 status = acpi_evaluate_integer(device->handle, "_TMP", NULL, &tmp);
50 if (ACPI_FAILURE(status))
51 return -EIO;
52
53 *temp = DECI_KELVIN_TO_MILLI_CELSIUS(tmp, KELVIN_OFFSET);
54
55 return 0;
56}
57
58static int sys_get_trip_hyst(struct thermal_zone_device *tzone,
59 int trip, unsigned long *temp)
60{
61 struct acpi_device *device = tzone->devdata;
62 unsigned long long hyst;
63 acpi_status status;
64
65 status = acpi_evaluate_integer(device->handle, "GTSH", NULL, &hyst);
66 if (ACPI_FAILURE(status))
67 return -EIO;
68
69 /*
70 * Thermal hysteresis represents a temperature difference.
71 * Kelvin and Celsius have same degree size. So the
72 * conversion here between tenths of degree Kelvin unit
73 * and Milli-Celsius unit is just to multiply 100.
74 */
75 *temp = hyst * 100;
76
77 return 0;
78}
79
80static int sys_get_trip_temp(struct thermal_zone_device *tzone,
81 int trip, unsigned long *temp)
82{
83 struct acpi_device *device = tzone->devdata;
84 struct int3403_sensor *obj = acpi_driver_data(device);
85
86 if (trip == obj->crit_trip_id)
87 *temp = obj->crit_temp;
88 else if (trip == obj->psv_trip_id)
89 *temp = obj->psv_temp;
90 else {
91 /*
92 * get_trip_temp is a mandatory callback but
93 * PATx method doesn't return any value, so return
94 * cached value, which was last set from user space.
95 */
96 *temp = obj->thresholds[trip];
97 }
98
99 return 0;
100}
101
102static int sys_get_trip_type(struct thermal_zone_device *thermal,
103 int trip, enum thermal_trip_type *type)
104{
105 struct acpi_device *device = thermal->devdata;
106 struct int3403_sensor *obj = acpi_driver_data(device);
107
108 /* Mandatory callback, may not mean much here */
109 if (trip == obj->crit_trip_id)
110 *type = THERMAL_TRIP_CRITICAL;
111 else
112 *type = THERMAL_TRIP_PASSIVE;
113
114 return 0;
115}
116
117int sys_set_trip_temp(struct thermal_zone_device *tzone, int trip,
118 unsigned long temp)
119{
120 struct acpi_device *device = tzone->devdata;
121 acpi_status status;
122 char name[10];
123 int ret = 0;
124 struct int3403_sensor *obj = acpi_driver_data(device);
125
126 snprintf(name, sizeof(name), "PAT%d", trip);
127 if (acpi_has_method(device->handle, name)) {
128 status = acpi_execute_simple_method(device->handle, name,
129 MILLI_CELSIUS_TO_DECI_KELVIN(temp,
130 KELVIN_OFFSET));
131 if (ACPI_FAILURE(status))
132 ret = -EIO;
133 else
134 obj->thresholds[trip] = temp;
135 } else {
136 ret = -EIO;
137 dev_err(&device->dev, "sys_set_trip_temp: method not found\n");
138 }
139
140 return ret;
141}
142
143static struct thermal_zone_device_ops tzone_ops = {
144 .get_temp = sys_get_curr_temp,
145 .get_trip_temp = sys_get_trip_temp,
146 .get_trip_type = sys_get_trip_type,
147 .set_trip_temp = sys_set_trip_temp,
148 .get_trip_hyst = sys_get_trip_hyst,
149};
150
151static void acpi_thermal_notify(struct acpi_device *device, u32 event)
152{
153 struct int3403_sensor *obj;
154
155 if (!device)
156 return;
157
158 obj = acpi_driver_data(device);
159 if (!obj)
160 return;
161
162 switch (event) {
163 case INT3403_PERF_CHANGED_EVENT:
164 break;
165 case INT3403_THERMAL_EVENT:
166 thermal_zone_device_update(obj->tzone);
167 break;
168 default:
169 dev_err(&device->dev, "Unsupported event [0x%x]\n", event);
170 break;
171 }
172}
173
174static int sys_get_trip_crt(struct acpi_device *device, unsigned long *temp)
175{
176 unsigned long long crt;
177 acpi_status status;
178
179 status = acpi_evaluate_integer(device->handle, "_CRT", NULL, &crt);
180 if (ACPI_FAILURE(status))
181 return -EIO;
182
183 *temp = DECI_KELVIN_TO_MILLI_CELSIUS(crt, KELVIN_OFFSET);
184
185 return 0;
186}
187
188static int sys_get_trip_psv(struct acpi_device *device, unsigned long *temp)
189{
190 unsigned long long psv;
191 acpi_status status;
192
193 status = acpi_evaluate_integer(device->handle, "_PSV", NULL, &psv);
194 if (ACPI_FAILURE(status))
195 return -EIO;
196
197 *temp = DECI_KELVIN_TO_MILLI_CELSIUS(psv, KELVIN_OFFSET);
198
199 return 0;
200}
201
202static int acpi_int3403_add(struct acpi_device *device)
203{
204 int result = 0;
205 unsigned long long ptyp;
206 acpi_status status;
207 struct int3403_sensor *obj;
208 unsigned long long trip_cnt;
209 int trip_mask = 0;
210
211 if (!device)
212 return -EINVAL;
213
214 status = acpi_evaluate_integer(device->handle, "PTYP", NULL, &ptyp);
215 if (ACPI_FAILURE(status))
216 return -EINVAL;
217
218 if (ptyp != INT3403_TYPE_SENSOR)
219 return -EINVAL;
220
221 obj = devm_kzalloc(&device->dev, sizeof(*obj), GFP_KERNEL);
222 if (!obj)
223 return -ENOMEM;
224
225 device->driver_data = obj;
226
227 status = acpi_evaluate_integer(device->handle, "PATC", NULL,
228 &trip_cnt);
229 if (ACPI_FAILURE(status))
230 trip_cnt = 0;
231
232 if (trip_cnt) {
233 /* We have to cache, thresholds can't be readback */
234 obj->thresholds = devm_kzalloc(&device->dev,
235 sizeof(*obj->thresholds) * trip_cnt,
236 GFP_KERNEL);
237 if (!obj->thresholds)
238 return -ENOMEM;
239 trip_mask = BIT(trip_cnt) - 1;
240 }
241
242 obj->psv_trip_id = -1;
243 if (!sys_get_trip_psv(device, &obj->psv_temp))
244 obj->psv_trip_id = trip_cnt++;
245
246 obj->crit_trip_id = -1;
247 if (!sys_get_trip_crt(device, &obj->crit_temp))
248 obj->crit_trip_id = trip_cnt++;
249
250 obj->tzone = thermal_zone_device_register(acpi_device_bid(device),
251 trip_cnt, trip_mask, device, &tzone_ops,
252 NULL, 0, 0);
253 if (IS_ERR(obj->tzone)) {
254 result = PTR_ERR(obj->tzone);
255 return result;
256 }
257
258 strcpy(acpi_device_name(device), "INT3403");
259 strcpy(acpi_device_class(device), ACPI_INT3403_CLASS);
260
261 return 0;
262}
263
264static int acpi_int3403_remove(struct acpi_device *device)
265{
266 struct int3403_sensor *obj;
267
268 obj = acpi_driver_data(device);
269 thermal_zone_device_unregister(obj->tzone);
270
271 return 0;
272}
273
274ACPI_MODULE_NAME("int3403");
275static const struct acpi_device_id int3403_device_ids[] = {
276 {"INT3403", 0},
277 {"", 0},
278};
279MODULE_DEVICE_TABLE(acpi, int3403_device_ids);
280
281static struct acpi_driver acpi_int3403_driver = {
282 .name = "INT3403",
283 .class = ACPI_INT3403_CLASS,
284 .ids = int3403_device_ids,
285 .ops = {
286 .add = acpi_int3403_add,
287 .remove = acpi_int3403_remove,
288 .notify = acpi_thermal_notify,
289 },
290};
291
292module_acpi_driver(acpi_int3403_driver);
293
294MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
295MODULE_LICENSE("GPL v2");
296MODULE_DESCRIPTION("ACPI INT3403 thermal driver");
diff --git a/drivers/thermal/int340x_thermal/Makefile b/drivers/thermal/int340x_thermal/Makefile
new file mode 100644
index 000000000000..ffe40bffaf1a
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_INT340X_THERMAL) += int3400_thermal.o
2obj-$(CONFIG_INT340X_THERMAL) += int3402_thermal.o
3obj-$(CONFIG_INT340X_THERMAL) += int3403_thermal.o
4obj-$(CONFIG_ACPI_THERMAL_REL) += acpi_thermal_rel.o
diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.c b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
new file mode 100644
index 000000000000..0d8db808f0ae
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.c
@@ -0,0 +1,400 @@
1/* acpi_thermal_rel.c driver for exporting ACPI thermal relationship
2 *
3 * Copyright (c) 2014 Intel Corp
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 */
10
11/*
12 * Two functionalities included:
13 * 1. Export _TRT, _ART, via misc device interface to the userspace.
14 * 2. Provide parsing result to kernel drivers
15 *
16 */
17#include <linux/init.h>
18#include <linux/export.h>
19#include <linux/module.h>
20#include <linux/device.h>
21#include <linux/platform_device.h>
22#include <linux/io.h>
23#include <linux/acpi.h>
24#include <linux/uaccess.h>
25#include <linux/miscdevice.h>
26#include "acpi_thermal_rel.h"
27
28static acpi_handle acpi_thermal_rel_handle;
29static DEFINE_SPINLOCK(acpi_thermal_rel_chrdev_lock);
30static int acpi_thermal_rel_chrdev_count; /* #times opened */
31static int acpi_thermal_rel_chrdev_exclu; /* already open exclusive? */
32
33static int acpi_thermal_rel_open(struct inode *inode, struct file *file)
34{
35 spin_lock(&acpi_thermal_rel_chrdev_lock);
36 if (acpi_thermal_rel_chrdev_exclu ||
37 (acpi_thermal_rel_chrdev_count && (file->f_flags & O_EXCL))) {
38 spin_unlock(&acpi_thermal_rel_chrdev_lock);
39 return -EBUSY;
40 }
41
42 if (file->f_flags & O_EXCL)
43 acpi_thermal_rel_chrdev_exclu = 1;
44 acpi_thermal_rel_chrdev_count++;
45
46 spin_unlock(&acpi_thermal_rel_chrdev_lock);
47
48 return nonseekable_open(inode, file);
49}
50
51static int acpi_thermal_rel_release(struct inode *inode, struct file *file)
52{
53 spin_lock(&acpi_thermal_rel_chrdev_lock);
54 acpi_thermal_rel_chrdev_count--;
55 acpi_thermal_rel_chrdev_exclu = 0;
56 spin_unlock(&acpi_thermal_rel_chrdev_lock);
57
58 return 0;
59}
60
61/**
62 * acpi_parse_trt - Thermal Relationship Table _TRT for passive cooling
63 *
64 * @handle: ACPI handle of the device contains _TRT
65 * @art_count: the number of valid entries resulted from parsing _TRT
66 * @artp: pointer to pointer of array of art entries in parsing result
67 * @create_dev: whether to create platform devices for target and source
68 *
69 */
70int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trtp,
71 bool create_dev)
72{
73 acpi_status status;
74 int result = 0;
75 int i;
76 int nr_bad_entries = 0;
77 struct trt *trts;
78 struct acpi_device *adev;
79 union acpi_object *p;
80 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
81 struct acpi_buffer element = { 0, NULL };
82 struct acpi_buffer trt_format = { sizeof("RRNNNNNN"), "RRNNNNNN" };
83
84 if (!acpi_has_method(handle, "_TRT"))
85 return 0;
86
87 status = acpi_evaluate_object(handle, "_TRT", NULL, &buffer);
88 if (ACPI_FAILURE(status))
89 return -ENODEV;
90
91 p = buffer.pointer;
92 if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
93 pr_err("Invalid _TRT data\n");
94 result = -EFAULT;
95 goto end;
96 }
97
98 *trt_count = p->package.count;
99 trts = kzalloc(*trt_count * sizeof(struct trt), GFP_KERNEL);
100 if (!trts) {
101 result = -ENOMEM;
102 goto end;
103 }
104
105 for (i = 0; i < *trt_count; i++) {
106 struct trt *trt = &trts[i - nr_bad_entries];
107
108 element.length = sizeof(struct trt);
109 element.pointer = trt;
110
111 status = acpi_extract_package(&(p->package.elements[i]),
112 &trt_format, &element);
113 if (ACPI_FAILURE(status)) {
114 nr_bad_entries++;
115 pr_warn("_TRT package %d is invalid, ignored\n", i);
116 continue;
117 }
118 if (!create_dev)
119 continue;
120
121 result = acpi_bus_get_device(trt->source, &adev);
122 if (!result)
123 acpi_create_platform_device(adev);
124 else
125 pr_warn("Failed to get source ACPI device\n");
126
127 result = acpi_bus_get_device(trt->target, &adev);
128 if (!result)
129 acpi_create_platform_device(adev);
130 else
131 pr_warn("Failed to get target ACPI device\n");
132 }
133
134 *trtp = trts;
135 /* don't count bad entries */
136 *trt_count -= nr_bad_entries;
137end:
138 kfree(buffer.pointer);
139 return result;
140}
141EXPORT_SYMBOL(acpi_parse_trt);
142
143/**
144 * acpi_parse_art - Parse Active Relationship Table _ART
145 *
146 * @handle: ACPI handle of the device contains _ART
147 * @art_count: the number of valid entries resulted from parsing _ART
148 * @artp: pointer to pointer of array of art entries in parsing result
149 * @create_dev: whether to create platform devices for target and source
150 *
151 */
152int acpi_parse_art(acpi_handle handle, int *art_count, struct art **artp,
153 bool create_dev)
154{
155 acpi_status status;
156 int result = 0;
157 int i;
158 int nr_bad_entries = 0;
159 struct art *arts;
160 struct acpi_device *adev;
161 union acpi_object *p;
162 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
163 struct acpi_buffer element = { 0, NULL };
164 struct acpi_buffer art_format = {
165 sizeof("RRNNNNNNNNNNN"), "RRNNNNNNNNNNN" };
166
167 if (!acpi_has_method(handle, "_ART"))
168 return 0;
169
170 status = acpi_evaluate_object(handle, "_ART", NULL, &buffer);
171 if (ACPI_FAILURE(status))
172 return -ENODEV;
173
174 p = buffer.pointer;
175 if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
176 pr_err("Invalid _ART data\n");
177 result = -EFAULT;
178 goto end;
179 }
180
181 /* ignore p->package.elements[0], as this is _ART Revision field */
182 *art_count = p->package.count - 1;
183 arts = kzalloc(*art_count * sizeof(struct art), GFP_KERNEL);
184 if (!arts) {
185 result = -ENOMEM;
186 goto end;
187 }
188
189 for (i = 0; i < *art_count; i++) {
190 struct art *art = &arts[i - nr_bad_entries];
191
192 element.length = sizeof(struct art);
193 element.pointer = art;
194
195 status = acpi_extract_package(&(p->package.elements[i + 1]),
196 &art_format, &element);
197 if (ACPI_FAILURE(status)) {
198 pr_warn("_ART package %d is invalid, ignored", i);
199 nr_bad_entries++;
200 continue;
201 }
202 if (!create_dev)
203 continue;
204
205 if (art->source) {
206 result = acpi_bus_get_device(art->source, &adev);
207 if (!result)
208 acpi_create_platform_device(adev);
209 else
210 pr_warn("Failed to get source ACPI device\n");
211 }
212 if (art->target) {
213 result = acpi_bus_get_device(art->target, &adev);
214 if (!result)
215 acpi_create_platform_device(adev);
216 else
217 pr_warn("Failed to get source ACPI device\n");
218 }
219 }
220
221 *artp = arts;
222 /* don't count bad entries */
223 *art_count -= nr_bad_entries;
224end:
225 kfree(buffer.pointer);
226 return result;
227}
228EXPORT_SYMBOL(acpi_parse_art);
229
230
231/* get device name from acpi handle */
232static void get_single_name(acpi_handle handle, char *name)
233{
234 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER};
235
236 if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer)))
237 pr_warn("Failed get name from handle\n");
238 else {
239 memcpy(name, buffer.pointer, ACPI_NAME_SIZE);
240 kfree(buffer.pointer);
241 }
242}
243
244static int fill_art(char __user *ubuf)
245{
246 int i;
247 int ret;
248 int count;
249 int art_len;
250 struct art *arts = NULL;
251 union art_object *art_user;
252
253 ret = acpi_parse_art(acpi_thermal_rel_handle, &count, &arts, false);
254 if (ret)
255 goto free_art;
256 art_len = count * sizeof(union art_object);
257 art_user = kzalloc(art_len, GFP_KERNEL);
258 if (!art_user) {
259 ret = -ENOMEM;
260 goto free_art;
261 }
262 /* now fill in user art data */
263 for (i = 0; i < count; i++) {
264 /* userspace art needs device name instead of acpi reference */
265 get_single_name(arts[i].source, art_user[i].source_device);
266 get_single_name(arts[i].target, art_user[i].target_device);
267 /* copy the rest int data in addition to source and target */
268 memcpy(&art_user[i].weight, &arts[i].weight,
269 sizeof(u64) * (ACPI_NR_ART_ELEMENTS - 2));
270 }
271
272 if (copy_to_user(ubuf, art_user, art_len))
273 ret = -EFAULT;
274 kfree(art_user);
275free_art:
276 kfree(arts);
277 return ret;
278}
279
280static int fill_trt(char __user *ubuf)
281{
282 int i;
283 int ret;
284 int count;
285 int trt_len;
286 struct trt *trts = NULL;
287 union trt_object *trt_user;
288
289 ret = acpi_parse_trt(acpi_thermal_rel_handle, &count, &trts, false);
290 if (ret)
291 goto free_trt;
292 trt_len = count * sizeof(union trt_object);
293 trt_user = kzalloc(trt_len, GFP_KERNEL);
294 if (!trt_user) {
295 ret = -ENOMEM;
296 goto free_trt;
297 }
298 /* now fill in user trt data */
299 for (i = 0; i < count; i++) {
300 /* userspace trt needs device name instead of acpi reference */
301 get_single_name(trts[i].source, trt_user[i].source_device);
302 get_single_name(trts[i].target, trt_user[i].target_device);
303 trt_user[i].sample_period = trts[i].sample_period;
304 trt_user[i].influence = trts[i].influence;
305 }
306
307 if (copy_to_user(ubuf, trt_user, trt_len))
308 ret = -EFAULT;
309 kfree(trt_user);
310free_trt:
311 kfree(trts);
312 return ret;
313}
314
315static long acpi_thermal_rel_ioctl(struct file *f, unsigned int cmd,
316 unsigned long __arg)
317{
318 int ret = 0;
319 unsigned long length = 0;
320 unsigned long count = 0;
321 char __user *arg = (void __user *)__arg;
322 struct trt *trts;
323 struct art *arts;
324
325 switch (cmd) {
326 case ACPI_THERMAL_GET_TRT_COUNT:
327 ret = acpi_parse_trt(acpi_thermal_rel_handle, (int *)&count,
328 &trts, false);
329 kfree(trts);
330 if (!ret)
331 return put_user(count, (unsigned long __user *)__arg);
332 return ret;
333 case ACPI_THERMAL_GET_TRT_LEN:
334 ret = acpi_parse_trt(acpi_thermal_rel_handle, (int *)&count,
335 &trts, false);
336 kfree(trts);
337 length = count * sizeof(union trt_object);
338 if (!ret)
339 return put_user(length, (unsigned long __user *)__arg);
340 return ret;
341 case ACPI_THERMAL_GET_TRT:
342 return fill_trt(arg);
343 case ACPI_THERMAL_GET_ART_COUNT:
344 ret = acpi_parse_art(acpi_thermal_rel_handle, (int *)&count,
345 &arts, false);
346 kfree(arts);
347 if (!ret)
348 return put_user(count, (unsigned long __user *)__arg);
349 return ret;
350 case ACPI_THERMAL_GET_ART_LEN:
351 ret = acpi_parse_art(acpi_thermal_rel_handle, (int *)&count,
352 &arts, false);
353 kfree(arts);
354 length = count * sizeof(union art_object);
355 if (!ret)
356 return put_user(length, (unsigned long __user *)__arg);
357 return ret;
358
359 case ACPI_THERMAL_GET_ART:
360 return fill_art(arg);
361
362 default:
363 return -ENOTTY;
364 }
365}
366
367static const struct file_operations acpi_thermal_rel_fops = {
368 .owner = THIS_MODULE,
369 .open = acpi_thermal_rel_open,
370 .release = acpi_thermal_rel_release,
371 .unlocked_ioctl = acpi_thermal_rel_ioctl,
372 .llseek = no_llseek,
373};
374
375static struct miscdevice acpi_thermal_rel_misc_device = {
376 .minor = MISC_DYNAMIC_MINOR,
377 "acpi_thermal_rel",
378 &acpi_thermal_rel_fops
379};
380
381int acpi_thermal_rel_misc_device_add(acpi_handle handle)
382{
383 acpi_thermal_rel_handle = handle;
384
385 return misc_register(&acpi_thermal_rel_misc_device);
386}
387EXPORT_SYMBOL(acpi_thermal_rel_misc_device_add);
388
389int acpi_thermal_rel_misc_device_remove(acpi_handle handle)
390{
391 misc_deregister(&acpi_thermal_rel_misc_device);
392
393 return 0;
394}
395EXPORT_SYMBOL(acpi_thermal_rel_misc_device_remove);
396
397MODULE_AUTHOR("Zhang Rui <rui.zhang@intel.com>");
398MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@intel.com");
399MODULE_DESCRIPTION("Intel acpi thermal rel misc dev driver");
400MODULE_LICENSE("GPL v2");
diff --git a/drivers/thermal/int340x_thermal/acpi_thermal_rel.h b/drivers/thermal/int340x_thermal/acpi_thermal_rel.h
new file mode 100644
index 000000000000..f00700bc9d79
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/acpi_thermal_rel.h
@@ -0,0 +1,84 @@
1#ifndef __ACPI_ACPI_THERMAL_H
2#define __ACPI_ACPI_THERMAL_H
3
4#include <asm/ioctl.h>
5
6#define ACPI_THERMAL_MAGIC 's'
7
8#define ACPI_THERMAL_GET_TRT_LEN _IOR(ACPI_THERMAL_MAGIC, 1, unsigned long)
9#define ACPI_THERMAL_GET_ART_LEN _IOR(ACPI_THERMAL_MAGIC, 2, unsigned long)
10#define ACPI_THERMAL_GET_TRT_COUNT _IOR(ACPI_THERMAL_MAGIC, 3, unsigned long)
11#define ACPI_THERMAL_GET_ART_COUNT _IOR(ACPI_THERMAL_MAGIC, 4, unsigned long)
12
13#define ACPI_THERMAL_GET_TRT _IOR(ACPI_THERMAL_MAGIC, 5, unsigned long)
14#define ACPI_THERMAL_GET_ART _IOR(ACPI_THERMAL_MAGIC, 6, unsigned long)
15
16struct art {
17 acpi_handle source;
18 acpi_handle target;
19 u64 weight;
20 u64 ac0_max;
21 u64 ac1_max;
22 u64 ac2_max;
23 u64 ac3_max;
24 u64 ac4_max;
25 u64 ac5_max;
26 u64 ac6_max;
27 u64 ac7_max;
28 u64 ac8_max;
29 u64 ac9_max;
30} __packed;
31
32struct trt {
33 acpi_handle source;
34 acpi_handle target;
35 u64 influence;
36 u64 sample_period;
37 u64 reverved1;
38 u64 reverved2;
39 u64 reverved3;
40 u64 reverved4;
41} __packed;
42
43#define ACPI_NR_ART_ELEMENTS 13
44/* for usrspace */
45union art_object {
46 struct {
47 char source_device[8]; /* ACPI single name */
48 char target_device[8]; /* ACPI single name */
49 u64 weight;
50 u64 ac0_max_level;
51 u64 ac1_max_level;
52 u64 ac2_max_level;
53 u64 ac3_max_level;
54 u64 ac4_max_level;
55 u64 ac5_max_level;
56 u64 ac6_max_level;
57 u64 ac7_max_level;
58 u64 ac8_max_level;
59 u64 ac9_max_level;
60 };
61 u64 __data[ACPI_NR_ART_ELEMENTS];
62};
63
64union trt_object {
65 struct {
66 char source_device[8]; /* ACPI single name */
67 char target_device[8]; /* ACPI single name */
68 u64 influence;
69 u64 sample_period;
70 u64 reserved[4];
71 };
72 u64 __data[8];
73};
74
75#ifdef __KERNEL__
76int acpi_thermal_rel_misc_device_add(acpi_handle handle);
77int acpi_thermal_rel_misc_device_remove(acpi_handle handle);
78int acpi_parse_art(acpi_handle handle, int *art_count, struct art **arts,
79 bool create_dev);
80int acpi_parse_trt(acpi_handle handle, int *trt_count, struct trt **trts,
81 bool create_dev);
82#endif
83
84#endif /* __ACPI_ACPI_THERMAL_H */
diff --git a/drivers/thermal/int340x_thermal/int3400_thermal.c b/drivers/thermal/int340x_thermal/int3400_thermal.c
new file mode 100644
index 000000000000..edc1cce117ba
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/int3400_thermal.c
@@ -0,0 +1,271 @@
1/*
2 * INT3400 thermal driver
3 *
4 * Copyright (C) 2014, Intel Corporation
5 * Authors: Zhang Rui <rui.zhang@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/acpi.h>
16#include <linux/thermal.h>
17#include "acpi_thermal_rel.h"
18
19enum int3400_thermal_uuid {
20 INT3400_THERMAL_PASSIVE_1,
21 INT3400_THERMAL_PASSIVE_2,
22 INT3400_THERMAL_ACTIVE,
23 INT3400_THERMAL_CRITICAL,
24 INT3400_THERMAL_COOLING_MODE,
25 INT3400_THERMAL_MAXIMUM_UUID,
26};
27
28static u8 *int3400_thermal_uuids[INT3400_THERMAL_MAXIMUM_UUID] = {
29 "42A441D6-AE6A-462b-A84B-4A8CE79027D3",
30 "9E04115A-AE87-4D1C-9500-0F3E340BFE75",
31 "3A95C389-E4B8-4629-A526-C52C88626BAE",
32 "97C68AE7-15FA-499c-B8C9-5DA81D606E0A",
33 "16CAF1B7-DD38-40ed-B1C1-1B8A1913D531",
34};
35
36struct int3400_thermal_priv {
37 struct acpi_device *adev;
38 struct thermal_zone_device *thermal;
39 int mode;
40 int art_count;
41 struct art *arts;
42 int trt_count;
43 struct trt *trts;
44 u8 uuid_bitmap;
45 int rel_misc_dev_res;
46};
47
48static int int3400_thermal_get_uuids(struct int3400_thermal_priv *priv)
49{
50 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL};
51 union acpi_object *obja, *objb;
52 int i, j;
53 int result = 0;
54 acpi_status status;
55
56 status = acpi_evaluate_object(priv->adev->handle, "IDSP", NULL, &buf);
57 if (ACPI_FAILURE(status))
58 return -ENODEV;
59
60 obja = (union acpi_object *)buf.pointer;
61 if (obja->type != ACPI_TYPE_PACKAGE) {
62 result = -EINVAL;
63 goto end;
64 }
65
66 for (i = 0; i < obja->package.count; i++) {
67 objb = &obja->package.elements[i];
68 if (objb->type != ACPI_TYPE_BUFFER) {
69 result = -EINVAL;
70 goto end;
71 }
72
73 /* UUID must be 16 bytes */
74 if (objb->buffer.length != 16) {
75 result = -EINVAL;
76 goto end;
77 }
78
79 for (j = 0; j < INT3400_THERMAL_MAXIMUM_UUID; j++) {
80 u8 uuid[16];
81
82 acpi_str_to_uuid(int3400_thermal_uuids[j], uuid);
83 if (!strncmp(uuid, objb->buffer.pointer, 16)) {
84 priv->uuid_bitmap |= (1 << j);
85 break;
86 }
87 }
88 }
89
90end:
91 kfree(buf.pointer);
92 return result;
93}
94
95static int int3400_thermal_run_osc(acpi_handle handle,
96 enum int3400_thermal_uuid uuid, bool enable)
97{
98 u32 ret, buf[2];
99 acpi_status status;
100 int result = 0;
101 struct acpi_osc_context context = {
102 .uuid_str = int3400_thermal_uuids[uuid],
103 .rev = 1,
104 .cap.length = 8,
105 };
106
107 buf[OSC_QUERY_DWORD] = 0;
108 buf[OSC_SUPPORT_DWORD] = enable;
109
110 context.cap.pointer = buf;
111
112 status = acpi_run_osc(handle, &context);
113 if (ACPI_SUCCESS(status)) {
114 ret = *((u32 *)(context.ret.pointer + 4));
115 if (ret != enable)
116 result = -EPERM;
117 } else
118 result = -EPERM;
119
120 kfree(context.ret.pointer);
121 return result;
122}
123
124static int int3400_thermal_get_temp(struct thermal_zone_device *thermal,
125 unsigned long *temp)
126{
127 *temp = 20 * 1000; /* faked temp sensor with 20C */
128 return 0;
129}
130
131static int int3400_thermal_get_mode(struct thermal_zone_device *thermal,
132 enum thermal_device_mode *mode)
133{
134 struct int3400_thermal_priv *priv = thermal->devdata;
135
136 if (!priv)
137 return -EINVAL;
138
139 *mode = priv->mode;
140
141 return 0;
142}
143
144static int int3400_thermal_set_mode(struct thermal_zone_device *thermal,
145 enum thermal_device_mode mode)
146{
147 struct int3400_thermal_priv *priv = thermal->devdata;
148 bool enable;
149 int result = 0;
150
151 if (!priv)
152 return -EINVAL;
153
154 if (mode == THERMAL_DEVICE_ENABLED)
155 enable = true;
156 else if (mode == THERMAL_DEVICE_DISABLED)
157 enable = false;
158 else
159 return -EINVAL;
160
161 if (enable != priv->mode) {
162 priv->mode = enable;
163 /* currently, only PASSIVE COOLING is supported */
164 result = int3400_thermal_run_osc(priv->adev->handle,
165 INT3400_THERMAL_PASSIVE_1, enable);
166 }
167 return result;
168}
169
170static struct thermal_zone_device_ops int3400_thermal_ops = {
171 .get_temp = int3400_thermal_get_temp,
172};
173
174static struct thermal_zone_params int3400_thermal_params = {
175 .governor_name = "user_space",
176 .no_hwmon = true,
177};
178
179static int int3400_thermal_probe(struct platform_device *pdev)
180{
181 struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
182 struct int3400_thermal_priv *priv;
183 int result;
184
185 if (!adev)
186 return -ENODEV;
187
188 priv = kzalloc(sizeof(struct int3400_thermal_priv), GFP_KERNEL);
189 if (!priv)
190 return -ENOMEM;
191
192 priv->adev = adev;
193
194 result = int3400_thermal_get_uuids(priv);
195 if (result)
196 goto free_priv;
197
198 result = acpi_parse_art(priv->adev->handle, &priv->art_count,
199 &priv->arts, true);
200 if (result)
201 goto free_priv;
202
203
204 result = acpi_parse_trt(priv->adev->handle, &priv->trt_count,
205 &priv->trts, true);
206 if (result)
207 goto free_art;
208
209 platform_set_drvdata(pdev, priv);
210
211 if (priv->uuid_bitmap & 1 << INT3400_THERMAL_PASSIVE_1) {
212 int3400_thermal_ops.get_mode = int3400_thermal_get_mode;
213 int3400_thermal_ops.set_mode = int3400_thermal_set_mode;
214 }
215 priv->thermal = thermal_zone_device_register("INT3400 Thermal", 0, 0,
216 priv, &int3400_thermal_ops,
217 &int3400_thermal_params, 0, 0);
218 if (IS_ERR(priv->thermal)) {
219 result = PTR_ERR(priv->thermal);
220 goto free_trt;
221 }
222
223 priv->rel_misc_dev_res = acpi_thermal_rel_misc_device_add(
224 priv->adev->handle);
225
226 return 0;
227free_trt:
228 kfree(priv->trts);
229free_art:
230 kfree(priv->arts);
231free_priv:
232 kfree(priv);
233 return result;
234}
235
236static int int3400_thermal_remove(struct platform_device *pdev)
237{
238 struct int3400_thermal_priv *priv = platform_get_drvdata(pdev);
239
240 if (!priv->rel_misc_dev_res)
241 acpi_thermal_rel_misc_device_remove(priv->adev->handle);
242
243 thermal_zone_device_unregister(priv->thermal);
244 kfree(priv->trts);
245 kfree(priv->arts);
246 kfree(priv);
247 return 0;
248}
249
250static const struct acpi_device_id int3400_thermal_match[] = {
251 {"INT3400", 0},
252 {}
253};
254
255MODULE_DEVICE_TABLE(acpi, int3400_thermal_match);
256
257static struct platform_driver int3400_thermal_driver = {
258 .probe = int3400_thermal_probe,
259 .remove = int3400_thermal_remove,
260 .driver = {
261 .name = "int3400 thermal",
262 .owner = THIS_MODULE,
263 .acpi_match_table = ACPI_PTR(int3400_thermal_match),
264 },
265};
266
267module_platform_driver(int3400_thermal_driver);
268
269MODULE_DESCRIPTION("INT3400 Thermal driver");
270MODULE_AUTHOR("Zhang Rui <rui.zhang@intel.com>");
271MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/int340x_thermal/int3402_thermal.c b/drivers/thermal/int340x_thermal/int3402_thermal.c
new file mode 100644
index 000000000000..a5d08c14ba24
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/int3402_thermal.c
@@ -0,0 +1,242 @@
1/*
2 * INT3402 thermal driver for memory temperature reporting
3 *
4 * Copyright (C) 2014, Intel Corporation
5 * Authors: Aaron Lu <aaron.lu@intel.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 */
12
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/acpi.h>
16#include <linux/thermal.h>
17
18#define ACPI_ACTIVE_COOLING_MAX_NR 10
19
20struct active_trip {
21 unsigned long temp;
22 int id;
23 bool valid;
24};
25
26struct int3402_thermal_data {
27 unsigned long *aux_trips;
28 int aux_trip_nr;
29 unsigned long psv_temp;
30 int psv_trip_id;
31 unsigned long crt_temp;
32 int crt_trip_id;
33 unsigned long hot_temp;
34 int hot_trip_id;
35 struct active_trip act_trips[ACPI_ACTIVE_COOLING_MAX_NR];
36 acpi_handle *handle;
37};
38
39static int int3402_thermal_get_zone_temp(struct thermal_zone_device *zone,
40 unsigned long *temp)
41{
42 struct int3402_thermal_data *d = zone->devdata;
43 unsigned long long tmp;
44 acpi_status status;
45
46 status = acpi_evaluate_integer(d->handle, "_TMP", NULL, &tmp);
47 if (ACPI_FAILURE(status))
48 return -ENODEV;
49
50 /* _TMP returns the temperature in tenths of degrees Kelvin */
51 *temp = DECI_KELVIN_TO_MILLICELSIUS(tmp);
52
53 return 0;
54}
55
56static int int3402_thermal_get_trip_temp(struct thermal_zone_device *zone,
57 int trip, unsigned long *temp)
58{
59 struct int3402_thermal_data *d = zone->devdata;
60 int i;
61
62 if (trip < d->aux_trip_nr)
63 *temp = d->aux_trips[trip];
64 else if (trip == d->crt_trip_id)
65 *temp = d->crt_temp;
66 else if (trip == d->psv_trip_id)
67 *temp = d->psv_temp;
68 else if (trip == d->hot_trip_id)
69 *temp = d->hot_temp;
70 else {
71 for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) {
72 if (d->act_trips[i].valid &&
73 d->act_trips[i].id == trip) {
74 *temp = d->act_trips[i].temp;
75 break;
76 }
77 }
78 if (i == ACPI_ACTIVE_COOLING_MAX_NR)
79 return -EINVAL;
80 }
81 return 0;
82}
83
84static int int3402_thermal_get_trip_type(struct thermal_zone_device *zone,
85 int trip, enum thermal_trip_type *type)
86{
87 struct int3402_thermal_data *d = zone->devdata;
88 int i;
89
90 if (trip < d->aux_trip_nr)
91 *type = THERMAL_TRIP_PASSIVE;
92 else if (trip == d->crt_trip_id)
93 *type = THERMAL_TRIP_CRITICAL;
94 else if (trip == d->hot_trip_id)
95 *type = THERMAL_TRIP_HOT;
96 else if (trip == d->psv_trip_id)
97 *type = THERMAL_TRIP_PASSIVE;
98 else {
99 for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) {
100 if (d->act_trips[i].valid &&
101 d->act_trips[i].id == trip) {
102 *type = THERMAL_TRIP_ACTIVE;
103 break;
104 }
105 }
106 if (i == ACPI_ACTIVE_COOLING_MAX_NR)
107 return -EINVAL;
108 }
109 return 0;
110}
111
112static int int3402_thermal_set_trip_temp(struct thermal_zone_device *zone, int trip,
113 unsigned long temp)
114{
115 struct int3402_thermal_data *d = zone->devdata;
116 acpi_status status;
117 char name[10];
118
119 snprintf(name, sizeof(name), "PAT%d", trip);
120 status = acpi_execute_simple_method(d->handle, name,
121 MILLICELSIUS_TO_DECI_KELVIN(temp));
122 if (ACPI_FAILURE(status))
123 return -EIO;
124
125 d->aux_trips[trip] = temp;
126 return 0;
127}
128
129static struct thermal_zone_device_ops int3402_thermal_zone_ops = {
130 .get_temp = int3402_thermal_get_zone_temp,
131 .get_trip_temp = int3402_thermal_get_trip_temp,
132 .get_trip_type = int3402_thermal_get_trip_type,
133 .set_trip_temp = int3402_thermal_set_trip_temp,
134};
135
136static struct thermal_zone_params int3402_thermal_params = {
137 .governor_name = "user_space",
138 .no_hwmon = true,
139};
140
141static int int3402_thermal_get_temp(acpi_handle handle, char *name,
142 unsigned long *temp)
143{
144 unsigned long long r;
145 acpi_status status;
146
147 status = acpi_evaluate_integer(handle, name, NULL, &r);
148 if (ACPI_FAILURE(status))
149 return -EIO;
150
151 *temp = DECI_KELVIN_TO_MILLICELSIUS(r);
152 return 0;
153}
154
155static int int3402_thermal_probe(struct platform_device *pdev)
156{
157 struct acpi_device *adev = ACPI_COMPANION(&pdev->dev);
158 struct int3402_thermal_data *d;
159 struct thermal_zone_device *zone;
160 acpi_status status;
161 unsigned long long trip_cnt;
162 int trip_mask = 0, i;
163
164 if (!acpi_has_method(adev->handle, "_TMP"))
165 return -ENODEV;
166
167 d = devm_kzalloc(&pdev->dev, sizeof(*d), GFP_KERNEL);
168 if (!d)
169 return -ENOMEM;
170
171 status = acpi_evaluate_integer(adev->handle, "PATC", NULL, &trip_cnt);
172 if (ACPI_FAILURE(status))
173 trip_cnt = 0;
174 else {
175 d->aux_trips = devm_kzalloc(&pdev->dev,
176 sizeof(*d->aux_trips) * trip_cnt, GFP_KERNEL);
177 if (!d->aux_trips)
178 return -ENOMEM;
179 trip_mask = trip_cnt - 1;
180 d->handle = adev->handle;
181 d->aux_trip_nr = trip_cnt;
182 }
183
184 d->crt_trip_id = -1;
185 if (!int3402_thermal_get_temp(adev->handle, "_CRT", &d->crt_temp))
186 d->crt_trip_id = trip_cnt++;
187 d->hot_trip_id = -1;
188 if (!int3402_thermal_get_temp(adev->handle, "_HOT", &d->hot_temp))
189 d->hot_trip_id = trip_cnt++;
190 d->psv_trip_id = -1;
191 if (!int3402_thermal_get_temp(adev->handle, "_PSV", &d->psv_temp))
192 d->psv_trip_id = trip_cnt++;
193 for (i = 0; i < ACPI_ACTIVE_COOLING_MAX_NR; i++) {
194 char name[5] = { '_', 'A', 'C', '0' + i, '\0' };
195 if (int3402_thermal_get_temp(adev->handle, name,
196 &d->act_trips[i].temp))
197 break;
198 d->act_trips[i].id = trip_cnt++;
199 d->act_trips[i].valid = true;
200 }
201
202 zone = thermal_zone_device_register(acpi_device_bid(adev), trip_cnt,
203 trip_mask, d,
204 &int3402_thermal_zone_ops,
205 &int3402_thermal_params,
206 0, 0);
207 if (IS_ERR(zone))
208 return PTR_ERR(zone);
209 platform_set_drvdata(pdev, zone);
210
211 return 0;
212}
213
214static int int3402_thermal_remove(struct platform_device *pdev)
215{
216 struct thermal_zone_device *zone = platform_get_drvdata(pdev);
217
218 thermal_zone_device_unregister(zone);
219 return 0;
220}
221
222static const struct acpi_device_id int3402_thermal_match[] = {
223 {"INT3402", 0},
224 {}
225};
226
227MODULE_DEVICE_TABLE(acpi, int3402_thermal_match);
228
229static struct platform_driver int3402_thermal_driver = {
230 .probe = int3402_thermal_probe,
231 .remove = int3402_thermal_remove,
232 .driver = {
233 .name = "int3402 thermal",
234 .owner = THIS_MODULE,
235 .acpi_match_table = int3402_thermal_match,
236 },
237};
238
239module_platform_driver(int3402_thermal_driver);
240
241MODULE_DESCRIPTION("INT3402 Thermal driver");
242MODULE_LICENSE("GPL");
diff --git a/drivers/thermal/int340x_thermal/int3403_thermal.c b/drivers/thermal/int340x_thermal/int3403_thermal.c
new file mode 100644
index 000000000000..6e9fb62eb817
--- /dev/null
+++ b/drivers/thermal/int340x_thermal/int3403_thermal.c
@@ -0,0 +1,483 @@
1/*
2 * ACPI INT3403 thermal driver
3 * Copyright (c) 2013, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/kernel.h>
16#include <linux/module.h>
17#include <linux/init.h>
18#include <linux/types.h>
19#include <linux/acpi.h>
20#include <linux/thermal.h>
21#include <linux/platform_device.h>
22
23#define INT3403_TYPE_SENSOR 0x03
24#define INT3403_TYPE_CHARGER 0x0B
25#define INT3403_TYPE_BATTERY 0x0C
26#define INT3403_PERF_CHANGED_EVENT 0x80
27#define INT3403_THERMAL_EVENT 0x90
28
29#define DECI_KELVIN_TO_MILLI_CELSIUS(t, off) (((t) - (off)) * 100)
30#define KELVIN_OFFSET 2732
31#define MILLI_CELSIUS_TO_DECI_KELVIN(t, off) (((t) / 100) + (off))
32
33struct int3403_sensor {
34 struct thermal_zone_device *tzone;
35 unsigned long *thresholds;
36 unsigned long crit_temp;
37 int crit_trip_id;
38 unsigned long psv_temp;
39 int psv_trip_id;
40
41};
42
43struct int3403_performance_state {
44 u64 performance;
45 u64 power;
46 u64 latency;
47 u64 linear;
48 u64 control;
49 u64 raw_performace;
50 char *raw_unit;
51 int reserved;
52};
53
54struct int3403_cdev {
55 struct thermal_cooling_device *cdev;
56 unsigned long max_state;
57};
58
59struct int3403_priv {
60 struct platform_device *pdev;
61 struct acpi_device *adev;
62 unsigned long long type;
63 void *priv;
64};
65
66static int sys_get_curr_temp(struct thermal_zone_device *tzone,
67 unsigned long *temp)
68{
69 struct int3403_priv *priv = tzone->devdata;
70 struct acpi_device *device = priv->adev;
71 unsigned long long tmp;
72 acpi_status status;
73
74 status = acpi_evaluate_integer(device->handle, "_TMP", NULL, &tmp);
75 if (ACPI_FAILURE(status))
76 return -EIO;
77
78 *temp = DECI_KELVIN_TO_MILLI_CELSIUS(tmp, KELVIN_OFFSET);
79
80 return 0;
81}
82
83static int sys_get_trip_hyst(struct thermal_zone_device *tzone,
84 int trip, unsigned long *temp)
85{
86 struct int3403_priv *priv = tzone->devdata;
87 struct acpi_device *device = priv->adev;
88 unsigned long long hyst;
89 acpi_status status;
90
91 status = acpi_evaluate_integer(device->handle, "GTSH", NULL, &hyst);
92 if (ACPI_FAILURE(status))
93 return -EIO;
94
95 /*
96 * Thermal hysteresis represents a temperature difference.
97 * Kelvin and Celsius have same degree size. So the
98 * conversion here between tenths of degree Kelvin unit
99 * and Milli-Celsius unit is just to multiply 100.
100 */
101 *temp = hyst * 100;
102
103 return 0;
104}
105
106static int sys_get_trip_temp(struct thermal_zone_device *tzone,
107 int trip, unsigned long *temp)
108{
109 struct int3403_priv *priv = tzone->devdata;
110 struct int3403_sensor *obj = priv->priv;
111
112 if (priv->type != INT3403_TYPE_SENSOR || !obj)
113 return -EINVAL;
114
115 if (trip == obj->crit_trip_id)
116 *temp = obj->crit_temp;
117 else if (trip == obj->psv_trip_id)
118 *temp = obj->psv_temp;
119 else {
120 /*
121 * get_trip_temp is a mandatory callback but
122 * PATx method doesn't return any value, so return
123 * cached value, which was last set from user space
124 */
125 *temp = obj->thresholds[trip];
126 }
127
128 return 0;
129}
130
131static int sys_get_trip_type(struct thermal_zone_device *thermal,
132 int trip, enum thermal_trip_type *type)
133{
134 struct int3403_priv *priv = thermal->devdata;
135 struct int3403_sensor *obj = priv->priv;
136
137 /* Mandatory callback, may not mean much here */
138 if (trip == obj->crit_trip_id)
139 *type = THERMAL_TRIP_CRITICAL;
140 else
141 *type = THERMAL_TRIP_PASSIVE;
142
143 return 0;
144}
145
146int sys_set_trip_temp(struct thermal_zone_device *tzone, int trip,
147 unsigned long temp)
148{
149 struct int3403_priv *priv = tzone->devdata;
150 struct acpi_device *device = priv->adev;
151 struct int3403_sensor *obj = priv->priv;
152 acpi_status status;
153 char name[10];
154 int ret = 0;
155
156 snprintf(name, sizeof(name), "PAT%d", trip);
157 if (acpi_has_method(device->handle, name)) {
158 status = acpi_execute_simple_method(device->handle, name,
159 MILLI_CELSIUS_TO_DECI_KELVIN(temp,
160 KELVIN_OFFSET));
161 if (ACPI_FAILURE(status))
162 ret = -EIO;
163 else
164 obj->thresholds[trip] = temp;
165 } else {
166 ret = -EIO;
167 dev_err(&device->dev, "sys_set_trip_temp: method not found\n");
168 }
169
170 return ret;
171}
172
173static struct thermal_zone_device_ops tzone_ops = {
174 .get_temp = sys_get_curr_temp,
175 .get_trip_temp = sys_get_trip_temp,
176 .get_trip_type = sys_get_trip_type,
177 .set_trip_temp = sys_set_trip_temp,
178 .get_trip_hyst = sys_get_trip_hyst,
179};
180
181static struct thermal_zone_params int3403_thermal_params = {
182 .governor_name = "user_space",
183 .no_hwmon = true,
184};
185
186static void int3403_notify(acpi_handle handle,
187 u32 event, void *data)
188{
189 struct int3403_priv *priv = data;
190 struct int3403_sensor *obj;
191
192 if (!priv)
193 return;
194
195 obj = priv->priv;
196 if (priv->type != INT3403_TYPE_SENSOR || !obj)
197 return;
198
199 switch (event) {
200 case INT3403_PERF_CHANGED_EVENT:
201 break;
202 case INT3403_THERMAL_EVENT:
203 thermal_zone_device_update(obj->tzone);
204 break;
205 default:
206 dev_err(&priv->pdev->dev, "Unsupported event [0x%x]\n", event);
207 break;
208 }
209}
210
211static int sys_get_trip_crt(struct acpi_device *device, unsigned long *temp)
212{
213 unsigned long long crt;
214 acpi_status status;
215
216 status = acpi_evaluate_integer(device->handle, "_CRT", NULL, &crt);
217 if (ACPI_FAILURE(status))
218 return -EIO;
219
220 *temp = DECI_KELVIN_TO_MILLI_CELSIUS(crt, KELVIN_OFFSET);
221
222 return 0;
223}
224
225static int sys_get_trip_psv(struct acpi_device *device, unsigned long *temp)
226{
227 unsigned long long psv;
228 acpi_status status;
229
230 status = acpi_evaluate_integer(device->handle, "_PSV", NULL, &psv);
231 if (ACPI_FAILURE(status))
232 return -EIO;
233
234 *temp = DECI_KELVIN_TO_MILLI_CELSIUS(psv, KELVIN_OFFSET);
235
236 return 0;
237}
238
239static int int3403_sensor_add(struct int3403_priv *priv)
240{
241 int result = 0;
242 acpi_status status;
243 struct int3403_sensor *obj;
244 unsigned long long trip_cnt;
245 int trip_mask = 0;
246
247 obj = devm_kzalloc(&priv->pdev->dev, sizeof(*obj), GFP_KERNEL);
248 if (!obj)
249 return -ENOMEM;
250
251 priv->priv = obj;
252
253 status = acpi_evaluate_integer(priv->adev->handle, "PATC", NULL,
254 &trip_cnt);
255 if (ACPI_FAILURE(status))
256 trip_cnt = 0;
257
258 if (trip_cnt) {
259 /* We have to cache, thresholds can't be readback */
260 obj->thresholds = devm_kzalloc(&priv->pdev->dev,
261 sizeof(*obj->thresholds) * trip_cnt,
262 GFP_KERNEL);
263 if (!obj->thresholds) {
264 result = -ENOMEM;
265 goto err_free_obj;
266 }
267 trip_mask = BIT(trip_cnt) - 1;
268 }
269
270 obj->psv_trip_id = -1;
271 if (!sys_get_trip_psv(priv->adev, &obj->psv_temp))
272 obj->psv_trip_id = trip_cnt++;
273
274 obj->crit_trip_id = -1;
275 if (!sys_get_trip_crt(priv->adev, &obj->crit_temp))
276 obj->crit_trip_id = trip_cnt++;
277
278 obj->tzone = thermal_zone_device_register(acpi_device_bid(priv->adev),
279 trip_cnt, trip_mask, priv, &tzone_ops,
280 &int3403_thermal_params, 0, 0);
281 if (IS_ERR(obj->tzone)) {
282 result = PTR_ERR(obj->tzone);
283 obj->tzone = NULL;
284 goto err_free_obj;
285 }
286
287 result = acpi_install_notify_handler(priv->adev->handle,
288 ACPI_DEVICE_NOTIFY, int3403_notify,
289 (void *)priv);
290 if (result)
291 goto err_free_obj;
292
293 return 0;
294
295 err_free_obj:
296 if (obj->tzone)
297 thermal_zone_device_unregister(obj->tzone);
298 return result;
299}
300
301static int int3403_sensor_remove(struct int3403_priv *priv)
302{
303 struct int3403_sensor *obj = priv->priv;
304
305 thermal_zone_device_unregister(obj->tzone);
306 return 0;
307}
308
309/* INT3403 Cooling devices */
310static int int3403_get_max_state(struct thermal_cooling_device *cdev,
311 unsigned long *state)
312{
313 struct int3403_priv *priv = cdev->devdata;
314 struct int3403_cdev *obj = priv->priv;
315
316 *state = obj->max_state;
317 return 0;
318}
319
320static int int3403_get_cur_state(struct thermal_cooling_device *cdev,
321 unsigned long *state)
322{
323 struct int3403_priv *priv = cdev->devdata;
324 unsigned long long level;
325 acpi_status status;
326
327 status = acpi_evaluate_integer(priv->adev->handle, "PPPC", NULL, &level);
328 if (ACPI_SUCCESS(status)) {
329 *state = level;
330 return 0;
331 } else
332 return -EINVAL;
333}
334
335static int
336int3403_set_cur_state(struct thermal_cooling_device *cdev, unsigned long state)
337{
338 struct int3403_priv *priv = cdev->devdata;
339 acpi_status status;
340
341 status = acpi_execute_simple_method(priv->adev->handle, "SPPC", state);
342 if (ACPI_SUCCESS(status))
343 return 0;
344 else
345 return -EINVAL;
346}
347
348static const struct thermal_cooling_device_ops int3403_cooling_ops = {
349 .get_max_state = int3403_get_max_state,
350 .get_cur_state = int3403_get_cur_state,
351 .set_cur_state = int3403_set_cur_state,
352};
353
354static int int3403_cdev_add(struct int3403_priv *priv)
355{
356 int result = 0;
357 acpi_status status;
358 struct int3403_cdev *obj;
359 struct acpi_buffer buf = { ACPI_ALLOCATE_BUFFER, NULL };
360 union acpi_object *p;
361
362 obj = devm_kzalloc(&priv->pdev->dev, sizeof(*obj), GFP_KERNEL);
363 if (!obj)
364 return -ENOMEM;
365
366 status = acpi_evaluate_object(priv->adev->handle, "PPSS", NULL, &buf);
367 if (ACPI_FAILURE(status))
368 return -ENODEV;
369
370 p = buf.pointer;
371 if (!p || (p->type != ACPI_TYPE_PACKAGE)) {
372 printk(KERN_WARNING "Invalid PPSS data\n");
373 return -EFAULT;
374 }
375
376 obj->max_state = p->package.count - 1;
377 obj->cdev =
378 thermal_cooling_device_register(acpi_device_bid(priv->adev),
379 priv, &int3403_cooling_ops);
380 if (IS_ERR(obj->cdev))
381 result = PTR_ERR(obj->cdev);
382
383 priv->priv = obj;
384
385 /* TODO: add ACPI notification support */
386
387 return result;
388}
389
390static int int3403_cdev_remove(struct int3403_priv *priv)
391{
392 struct int3403_cdev *obj = priv->priv;
393
394 thermal_cooling_device_unregister(obj->cdev);
395 return 0;
396}
397
398static int int3403_add(struct platform_device *pdev)
399{
400 struct int3403_priv *priv;
401 int result = 0;
402 acpi_status status;
403
404 priv = devm_kzalloc(&pdev->dev, sizeof(struct int3403_priv),
405 GFP_KERNEL);
406 if (!priv)
407 return -ENOMEM;
408
409 priv->pdev = pdev;
410 priv->adev = ACPI_COMPANION(&(pdev->dev));
411 if (!priv->adev) {
412 result = -EINVAL;
413 goto err;
414 }
415
416 status = acpi_evaluate_integer(priv->adev->handle, "PTYP",
417 NULL, &priv->type);
418 if (ACPI_FAILURE(status)) {
419 result = -EINVAL;
420 goto err;
421 }
422
423 platform_set_drvdata(pdev, priv);
424 switch (priv->type) {
425 case INT3403_TYPE_SENSOR:
426 result = int3403_sensor_add(priv);
427 break;
428 case INT3403_TYPE_CHARGER:
429 case INT3403_TYPE_BATTERY:
430 result = int3403_cdev_add(priv);
431 break;
432 default:
433 result = -EINVAL;
434 }
435
436 if (result)
437 goto err;
438 return result;
439
440err:
441 return result;
442}
443
444static int int3403_remove(struct platform_device *pdev)
445{
446 struct int3403_priv *priv = platform_get_drvdata(pdev);
447
448 switch (priv->type) {
449 case INT3403_TYPE_SENSOR:
450 int3403_sensor_remove(priv);
451 break;
452 case INT3403_TYPE_CHARGER:
453 case INT3403_TYPE_BATTERY:
454 int3403_cdev_remove(priv);
455 break;
456 default:
457 break;
458 }
459
460 return 0;
461}
462
463static const struct acpi_device_id int3403_device_ids[] = {
464 {"INT3403", 0},
465 {"", 0},
466};
467MODULE_DEVICE_TABLE(acpi, int3403_device_ids);
468
469static struct platform_driver int3403_driver = {
470 .probe = int3403_add,
471 .remove = int3403_remove,
472 .driver = {
473 .name = "int3403 thermal",
474 .owner = THIS_MODULE,
475 .acpi_match_table = int3403_device_ids,
476 },
477};
478
479module_platform_driver(int3403_driver);
480
481MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
482MODULE_LICENSE("GPL v2");
483MODULE_DESCRIPTION("ACPI INT3403 thermal driver");
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 4b2b999b7611..62143ba31001 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -387,20 +387,27 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id,
387 int (*get_trend)(void *, long *)) 387 int (*get_trend)(void *, long *))
388{ 388{
389 struct device_node *np, *child, *sensor_np; 389 struct device_node *np, *child, *sensor_np;
390 struct thermal_zone_device *tzd = ERR_PTR(-ENODEV);
390 391
391 np = of_find_node_by_name(NULL, "thermal-zones"); 392 np = of_find_node_by_name(NULL, "thermal-zones");
392 if (!np) 393 if (!np)
393 return ERR_PTR(-ENODEV); 394 return ERR_PTR(-ENODEV);
394 395
395 if (!dev || !dev->of_node) 396 if (!dev || !dev->of_node) {
397 of_node_put(np);
396 return ERR_PTR(-EINVAL); 398 return ERR_PTR(-EINVAL);
399 }
397 400
398 sensor_np = dev->of_node; 401 sensor_np = of_node_get(dev->of_node);
399 402
400 for_each_child_of_node(np, child) { 403 for_each_child_of_node(np, child) {
401 struct of_phandle_args sensor_specs; 404 struct of_phandle_args sensor_specs;
402 int ret, id; 405 int ret, id;
403 406
407 /* Check whether child is enabled or not */
408 if (!of_device_is_available(child))
409 continue;
410
404 /* For now, thermal framework supports only 1 sensor per zone */ 411 /* For now, thermal framework supports only 1 sensor per zone */
405 ret = of_parse_phandle_with_args(child, "thermal-sensors", 412 ret = of_parse_phandle_with_args(child, "thermal-sensors",
406 "#thermal-sensor-cells", 413 "#thermal-sensor-cells",
@@ -418,16 +425,21 @@ thermal_zone_of_sensor_register(struct device *dev, int sensor_id,
418 } 425 }
419 426
420 if (sensor_specs.np == sensor_np && id == sensor_id) { 427 if (sensor_specs.np == sensor_np && id == sensor_id) {
421 of_node_put(np); 428 tzd = thermal_zone_of_add_sensor(child, sensor_np,
422 return thermal_zone_of_add_sensor(child, sensor_np, 429 data,
423 data, 430 get_temp,
424 get_temp, 431 get_trend);
425 get_trend); 432 of_node_put(sensor_specs.np);
433 of_node_put(child);
434 goto exit;
426 } 435 }
436 of_node_put(sensor_specs.np);
427 } 437 }
438exit:
439 of_node_put(sensor_np);
428 of_node_put(np); 440 of_node_put(np);
429 441
430 return ERR_PTR(-ENODEV); 442 return tzd;
431} 443}
432EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_register); 444EXPORT_SYMBOL_GPL(thermal_zone_of_sensor_register);
433 445
@@ -619,6 +631,7 @@ static int thermal_of_populate_trip(struct device_node *np,
619 631
620 /* Required for cooling map matching */ 632 /* Required for cooling map matching */
621 trip->np = np; 633 trip->np = np;
634 of_node_get(np);
622 635
623 return 0; 636 return 0;
624} 637}
@@ -726,9 +739,14 @@ finish:
726 return tz; 739 return tz;
727 740
728free_tbps: 741free_tbps:
742 for (i = 0; i < tz->num_tbps; i++)
743 of_node_put(tz->tbps[i].cooling_device);
729 kfree(tz->tbps); 744 kfree(tz->tbps);
730free_trips: 745free_trips:
746 for (i = 0; i < tz->ntrips; i++)
747 of_node_put(tz->trips[i].np);
731 kfree(tz->trips); 748 kfree(tz->trips);
749 of_node_put(gchild);
732free_tz: 750free_tz:
733 kfree(tz); 751 kfree(tz);
734 of_node_put(child); 752 of_node_put(child);
@@ -738,7 +756,13 @@ free_tz:
738 756
739static inline void of_thermal_free_zone(struct __thermal_zone *tz) 757static inline void of_thermal_free_zone(struct __thermal_zone *tz)
740{ 758{
759 int i;
760
761 for (i = 0; i < tz->num_tbps; i++)
762 of_node_put(tz->tbps[i].cooling_device);
741 kfree(tz->tbps); 763 kfree(tz->tbps);
764 for (i = 0; i < tz->ntrips; i++)
765 of_node_put(tz->trips[i].np);
742 kfree(tz->trips); 766 kfree(tz->trips);
743 kfree(tz); 767 kfree(tz);
744} 768}
@@ -771,6 +795,10 @@ int __init of_parse_thermal_zones(void)
771 struct thermal_zone_device *zone; 795 struct thermal_zone_device *zone;
772 struct thermal_zone_params *tzp; 796 struct thermal_zone_params *tzp;
773 797
798 /* Check whether child is enabled or not */
799 if (!of_device_is_available(child))
800 continue;
801
774 tz = thermal_of_build_thermal_zone(child); 802 tz = thermal_of_build_thermal_zone(child);
775 if (IS_ERR(tz)) { 803 if (IS_ERR(tz)) {
776 pr_err("failed to build thermal zone %s: %ld\n", 804 pr_err("failed to build thermal zone %s: %ld\n",
@@ -806,10 +834,13 @@ int __init of_parse_thermal_zones(void)
806 /* attempting to build remaining zones still */ 834 /* attempting to build remaining zones still */
807 } 835 }
808 } 836 }
837 of_node_put(np);
809 838
810 return 0; 839 return 0;
811 840
812exit_free: 841exit_free:
842 of_node_put(child);
843 of_node_put(np);
813 of_thermal_free_zone(tz); 844 of_thermal_free_zone(tz);
814 845
815 /* no memory available, so free what we have built */ 846 /* no memory available, so free what we have built */
@@ -838,6 +869,10 @@ void of_thermal_destroy_zones(void)
838 for_each_child_of_node(np, child) { 869 for_each_child_of_node(np, child) {
839 struct thermal_zone_device *zone; 870 struct thermal_zone_device *zone;
840 871
872 /* Check whether child is enabled or not */
873 if (!of_device_is_available(child))
874 continue;
875
841 zone = thermal_zone_get_zone_by_name(child->name); 876 zone = thermal_zone_get_zone_by_name(child->name);
842 if (IS_ERR(zone)) 877 if (IS_ERR(zone))
843 continue; 878 continue;
@@ -847,4 +882,5 @@ void of_thermal_destroy_zones(void)
847 kfree(zone->ops); 882 kfree(zone->ops);
848 of_thermal_free_zone(zone->devdata); 883 of_thermal_free_zone(zone->devdata);
849 } 884 }
885 of_node_put(np);
850} 886}
diff --git a/drivers/thermal/samsung/exynos_thermal_common.c b/drivers/thermal/samsung/exynos_thermal_common.c
index 3f5ad25ddca8..b6be572704a4 100644
--- a/drivers/thermal/samsung/exynos_thermal_common.c
+++ b/drivers/thermal/samsung/exynos_thermal_common.c
@@ -417,13 +417,10 @@ void exynos_unregister_thermal(struct thermal_sensor_conf *sensor_conf)
417 417
418 th_zone = sensor_conf->pzone_data; 418 th_zone = sensor_conf->pzone_data;
419 419
420 if (th_zone->therm_dev) 420 thermal_zone_device_unregister(th_zone->therm_dev);
421 thermal_zone_device_unregister(th_zone->therm_dev);
422 421
423 for (i = 0; i < th_zone->cool_dev_size; i++) { 422 for (i = 0; i < th_zone->cool_dev_size; ++i)
424 if (th_zone->cool_dev[i]) 423 cpufreq_cooling_unregister(th_zone->cool_dev[i]);
425 cpufreq_cooling_unregister(th_zone->cool_dev[i]);
426 }
427 424
428 dev_info(sensor_conf->dev, 425 dev_info(sensor_conf->dev,
429 "Exynos: Kernel Thermal management unregistered\n"); 426 "Exynos: Kernel Thermal management unregistered\n");
diff --git a/drivers/thermal/samsung/exynos_thermal_common.h b/drivers/thermal/samsung/exynos_thermal_common.h
index 3eb2ed9ea3a4..158f5aa8dc5d 100644
--- a/drivers/thermal/samsung/exynos_thermal_common.h
+++ b/drivers/thermal/samsung/exynos_thermal_common.h
@@ -27,7 +27,7 @@
27#define SENSOR_NAME_LEN 16 27#define SENSOR_NAME_LEN 16
28#define MAX_TRIP_COUNT 8 28#define MAX_TRIP_COUNT 8
29#define MAX_COOLING_DEVICE 4 29#define MAX_COOLING_DEVICE 4
30#define MAX_THRESHOLD_LEVS 5 30#define MAX_TRIMINFO_CTRL_REG 2
31 31
32#define ACTIVE_INTERVAL 500 32#define ACTIVE_INTERVAL 500
33#define IDLE_INTERVAL 10000 33#define IDLE_INTERVAL 10000
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index acbff14da3a4..49c09243fd38 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -77,16 +77,6 @@ static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
77 struct exynos_tmu_platform_data *pdata = data->pdata; 77 struct exynos_tmu_platform_data *pdata = data->pdata;
78 int temp_code; 78 int temp_code;
79 79
80 if (pdata->cal_mode == HW_MODE)
81 return temp;
82
83 if (data->soc == SOC_ARCH_EXYNOS4210)
84 /* temp should range between 25 and 125 */
85 if (temp < 25 || temp > 125) {
86 temp_code = -EINVAL;
87 goto out;
88 }
89
90 switch (pdata->cal_type) { 80 switch (pdata->cal_type) {
91 case TYPE_TWO_POINT_TRIMMING: 81 case TYPE_TWO_POINT_TRIMMING:
92 temp_code = (temp - pdata->first_point_trim) * 82 temp_code = (temp - pdata->first_point_trim) *
@@ -101,7 +91,7 @@ static int temp_to_code(struct exynos_tmu_data *data, u8 temp)
101 temp_code = temp + pdata->default_temp_offset; 91 temp_code = temp + pdata->default_temp_offset;
102 break; 92 break;
103 } 93 }
104out: 94
105 return temp_code; 95 return temp_code;
106} 96}
107 97
@@ -114,16 +104,6 @@ static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
114 struct exynos_tmu_platform_data *pdata = data->pdata; 104 struct exynos_tmu_platform_data *pdata = data->pdata;
115 int temp; 105 int temp;
116 106
117 if (pdata->cal_mode == HW_MODE)
118 return temp_code;
119
120 if (data->soc == SOC_ARCH_EXYNOS4210)
121 /* temp_code should range between 75 and 175 */
122 if (temp_code < 75 || temp_code > 175) {
123 temp = -ENODATA;
124 goto out;
125 }
126
127 switch (pdata->cal_type) { 107 switch (pdata->cal_type) {
128 case TYPE_TWO_POINT_TRIMMING: 108 case TYPE_TWO_POINT_TRIMMING:
129 temp = (temp_code - data->temp_error1) * 109 temp = (temp_code - data->temp_error1) *
@@ -138,18 +118,35 @@ static int code_to_temp(struct exynos_tmu_data *data, u8 temp_code)
138 temp = temp_code - pdata->default_temp_offset; 118 temp = temp_code - pdata->default_temp_offset;
139 break; 119 break;
140 } 120 }
141out: 121
142 return temp; 122 return temp;
143} 123}
144 124
125static void exynos_tmu_clear_irqs(struct exynos_tmu_data *data)
126{
127 const struct exynos_tmu_registers *reg = data->pdata->registers;
128 unsigned int val_irq;
129
130 val_irq = readl(data->base + reg->tmu_intstat);
131 /*
132 * Clear the interrupts. Please note that the documentation for
133 * Exynos3250, Exynos4412, Exynos5250 and Exynos5260 incorrectly
134 * states that INTCLEAR register has a different placing of bits
135 * responsible for FALL IRQs than INTSTAT register. Exynos5420
136 * and Exynos5440 documentation is correct (Exynos4210 doesn't
137 * support FALL IRQs at all).
138 */
139 writel(val_irq, data->base + reg->tmu_intclear);
140}
141
145static int exynos_tmu_initialize(struct platform_device *pdev) 142static int exynos_tmu_initialize(struct platform_device *pdev)
146{ 143{
147 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 144 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
148 struct exynos_tmu_platform_data *pdata = data->pdata; 145 struct exynos_tmu_platform_data *pdata = data->pdata;
149 const struct exynos_tmu_registers *reg = pdata->registers; 146 const struct exynos_tmu_registers *reg = pdata->registers;
150 unsigned int status, trim_info = 0, con; 147 unsigned int status, trim_info = 0, con, ctrl;
151 unsigned int rising_threshold = 0, falling_threshold = 0; 148 unsigned int rising_threshold = 0, falling_threshold = 0;
152 int ret = 0, threshold_code, i, trigger_levs = 0; 149 int ret = 0, threshold_code, i;
153 150
154 mutex_lock(&data->lock); 151 mutex_lock(&data->lock);
155 clk_enable(data->clk); 152 clk_enable(data->clk);
@@ -164,11 +161,17 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
164 } 161 }
165 } 162 }
166 163
167 if (TMU_SUPPORTS(pdata, TRIM_RELOAD)) 164 if (TMU_SUPPORTS(pdata, TRIM_RELOAD)) {
168 __raw_writel(1, data->base + reg->triminfo_ctrl); 165 for (i = 0; i < reg->triminfo_ctrl_count; i++) {
169 166 if (pdata->triminfo_reload[i]) {
170 if (pdata->cal_mode == HW_MODE) 167 ctrl = readl(data->base +
171 goto skip_calib_data; 168 reg->triminfo_ctrl[i]);
169 ctrl |= pdata->triminfo_reload[i];
170 writel(ctrl, data->base +
171 reg->triminfo_ctrl[i]);
172 }
173 }
174 }
172 175
173 /* Save trimming info in order to perform calibration */ 176 /* Save trimming info in order to perform calibration */
174 if (data->soc == SOC_ARCH_EXYNOS5440) { 177 if (data->soc == SOC_ARCH_EXYNOS5440) {
@@ -197,7 +200,7 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
197 trim_info = readl(data->base + reg->triminfo_data); 200 trim_info = readl(data->base + reg->triminfo_data);
198 } 201 }
199 data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK; 202 data->temp_error1 = trim_info & EXYNOS_TMU_TEMP_MASK;
200 data->temp_error2 = ((trim_info >> reg->triminfo_85_shift) & 203 data->temp_error2 = ((trim_info >> EXYNOS_TRIMINFO_85_SHIFT) &
201 EXYNOS_TMU_TEMP_MASK); 204 EXYNOS_TMU_TEMP_MASK);
202 205
203 if (!data->temp_error1 || 206 if (!data->temp_error1 ||
@@ -207,67 +210,33 @@ static int exynos_tmu_initialize(struct platform_device *pdev)
207 210
208 if (!data->temp_error2) 211 if (!data->temp_error2)
209 data->temp_error2 = 212 data->temp_error2 =
210 (pdata->efuse_value >> reg->triminfo_85_shift) & 213 (pdata->efuse_value >> EXYNOS_TRIMINFO_85_SHIFT) &
211 EXYNOS_TMU_TEMP_MASK; 214 EXYNOS_TMU_TEMP_MASK;
212 215
213skip_calib_data:
214 if (pdata->max_trigger_level > MAX_THRESHOLD_LEVS) {
215 dev_err(&pdev->dev, "Invalid max trigger level\n");
216 ret = -EINVAL;
217 goto out;
218 }
219
220 for (i = 0; i < pdata->max_trigger_level; i++) {
221 if (!pdata->trigger_levels[i])
222 continue;
223
224 if ((pdata->trigger_type[i] == HW_TRIP) &&
225 (!pdata->trigger_levels[pdata->max_trigger_level - 1])) {
226 dev_err(&pdev->dev, "Invalid hw trigger level\n");
227 ret = -EINVAL;
228 goto out;
229 }
230
231 /* Count trigger levels except the HW trip*/
232 if (!(pdata->trigger_type[i] == HW_TRIP))
233 trigger_levs++;
234 }
235
236 rising_threshold = readl(data->base + reg->threshold_th0); 216 rising_threshold = readl(data->base + reg->threshold_th0);
237 217
238 if (data->soc == SOC_ARCH_EXYNOS4210) { 218 if (data->soc == SOC_ARCH_EXYNOS4210) {
239 /* Write temperature code for threshold */ 219 /* Write temperature code for threshold */
240 threshold_code = temp_to_code(data, pdata->threshold); 220 threshold_code = temp_to_code(data, pdata->threshold);
241 if (threshold_code < 0) {
242 ret = threshold_code;
243 goto out;
244 }
245 writeb(threshold_code, 221 writeb(threshold_code,
246 data->base + reg->threshold_temp); 222 data->base + reg->threshold_temp);
247 for (i = 0; i < trigger_levs; i++) 223 for (i = 0; i < pdata->non_hw_trigger_levels; i++)
248 writeb(pdata->trigger_levels[i], data->base + 224 writeb(pdata->trigger_levels[i], data->base +
249 reg->threshold_th0 + i * sizeof(reg->threshold_th0)); 225 reg->threshold_th0 + i * sizeof(reg->threshold_th0));
250 226
251 writel(reg->intclr_rise_mask, data->base + reg->tmu_intclear); 227 exynos_tmu_clear_irqs(data);
252 } else { 228 } else {
253 /* Write temperature code for rising and falling threshold */ 229 /* Write temperature code for rising and falling threshold */
254 for (i = 0; 230 for (i = 0; i < pdata->non_hw_trigger_levels; i++) {
255 i < trigger_levs && i < EXYNOS_MAX_TRIGGER_PER_REG; i++) {
256 threshold_code = temp_to_code(data, 231 threshold_code = temp_to_code(data,
257 pdata->trigger_levels[i]); 232 pdata->trigger_levels[i]);
258 if (threshold_code < 0) {
259 ret = threshold_code;
260 goto out;
261 }
262 rising_threshold &= ~(0xff << 8 * i); 233 rising_threshold &= ~(0xff << 8 * i);
263 rising_threshold |= threshold_code << 8 * i; 234 rising_threshold |= threshold_code << 8 * i;
264 if (pdata->threshold_falling) { 235 if (pdata->threshold_falling) {
265 threshold_code = temp_to_code(data, 236 threshold_code = temp_to_code(data,
266 pdata->trigger_levels[i] - 237 pdata->trigger_levels[i] -
267 pdata->threshold_falling); 238 pdata->threshold_falling);
268 if (threshold_code > 0) 239 falling_threshold |= threshold_code << 8 * i;
269 falling_threshold |=
270 threshold_code << 8 * i;
271 } 240 }
272 } 241 }
273 242
@@ -276,9 +245,7 @@ skip_calib_data:
276 writel(falling_threshold, 245 writel(falling_threshold,
277 data->base + reg->threshold_th1); 246 data->base + reg->threshold_th1);
278 247
279 writel((reg->intclr_rise_mask << reg->intclr_rise_shift) | 248 exynos_tmu_clear_irqs(data);
280 (reg->intclr_fall_mask << reg->intclr_fall_shift),
281 data->base + reg->tmu_intclear);
282 249
283 /* if last threshold limit is also present */ 250 /* if last threshold limit is also present */
284 i = pdata->max_trigger_level - 1; 251 i = pdata->max_trigger_level - 1;
@@ -286,10 +253,6 @@ skip_calib_data:
286 (pdata->trigger_type[i] == HW_TRIP)) { 253 (pdata->trigger_type[i] == HW_TRIP)) {
287 threshold_code = temp_to_code(data, 254 threshold_code = temp_to_code(data,
288 pdata->trigger_levels[i]); 255 pdata->trigger_levels[i]);
289 if (threshold_code < 0) {
290 ret = threshold_code;
291 goto out;
292 }
293 if (i == EXYNOS_MAX_TRIGGER_PER_REG - 1) { 256 if (i == EXYNOS_MAX_TRIGGER_PER_REG - 1) {
294 /* 1-4 level to be assigned in th0 reg */ 257 /* 1-4 level to be assigned in th0 reg */
295 rising_threshold &= ~(0xff << 8 * i); 258 rising_threshold &= ~(0xff << 8 * i);
@@ -325,7 +288,7 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
325 struct exynos_tmu_data *data = platform_get_drvdata(pdev); 288 struct exynos_tmu_data *data = platform_get_drvdata(pdev);
326 struct exynos_tmu_platform_data *pdata = data->pdata; 289 struct exynos_tmu_platform_data *pdata = data->pdata;
327 const struct exynos_tmu_registers *reg = pdata->registers; 290 const struct exynos_tmu_registers *reg = pdata->registers;
328 unsigned int con, interrupt_en, cal_val; 291 unsigned int con, interrupt_en;
329 292
330 mutex_lock(&data->lock); 293 mutex_lock(&data->lock);
331 clk_enable(data->clk); 294 clk_enable(data->clk);
@@ -335,15 +298,11 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
335 if (pdata->test_mux) 298 if (pdata->test_mux)
336 con |= (pdata->test_mux << reg->test_mux_addr_shift); 299 con |= (pdata->test_mux << reg->test_mux_addr_shift);
337 300
338 if (pdata->reference_voltage) { 301 con &= ~(EXYNOS_TMU_REF_VOLTAGE_MASK << EXYNOS_TMU_REF_VOLTAGE_SHIFT);
339 con &= ~(reg->buf_vref_sel_mask << reg->buf_vref_sel_shift); 302 con |= pdata->reference_voltage << EXYNOS_TMU_REF_VOLTAGE_SHIFT;
340 con |= pdata->reference_voltage << reg->buf_vref_sel_shift;
341 }
342 303
343 if (pdata->gain) { 304 con &= ~(EXYNOS_TMU_BUF_SLOPE_SEL_MASK << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
344 con &= ~(reg->buf_slope_sel_mask << reg->buf_slope_sel_shift); 305 con |= (pdata->gain << EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT);
345 con |= (pdata->gain << reg->buf_slope_sel_shift);
346 }
347 306
348 if (pdata->noise_cancel_mode) { 307 if (pdata->noise_cancel_mode) {
349 con &= ~(reg->therm_trip_mode_mask << 308 con &= ~(reg->therm_trip_mode_mask <<
@@ -351,29 +310,8 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
351 con |= (pdata->noise_cancel_mode << reg->therm_trip_mode_shift); 310 con |= (pdata->noise_cancel_mode << reg->therm_trip_mode_shift);
352 } 311 }
353 312
354 if (pdata->cal_mode == HW_MODE) {
355 con &= ~(reg->calib_mode_mask << reg->calib_mode_shift);
356 cal_val = 0;
357 switch (pdata->cal_type) {
358 case TYPE_TWO_POINT_TRIMMING:
359 cal_val = 3;
360 break;
361 case TYPE_ONE_POINT_TRIMMING_85:
362 cal_val = 2;
363 break;
364 case TYPE_ONE_POINT_TRIMMING_25:
365 cal_val = 1;
366 break;
367 case TYPE_NONE:
368 break;
369 default:
370 dev_err(&pdev->dev, "Invalid calibration type, using none\n");
371 }
372 con |= cal_val << reg->calib_mode_shift;
373 }
374
375 if (on) { 313 if (on) {
376 con |= (1 << reg->core_en_shift); 314 con |= (1 << EXYNOS_TMU_CORE_EN_SHIFT);
377 interrupt_en = 315 interrupt_en =
378 pdata->trigger_enable[3] << reg->inten_rise3_shift | 316 pdata->trigger_enable[3] << reg->inten_rise3_shift |
379 pdata->trigger_enable[2] << reg->inten_rise2_shift | 317 pdata->trigger_enable[2] << reg->inten_rise2_shift |
@@ -383,7 +321,7 @@ static void exynos_tmu_control(struct platform_device *pdev, bool on)
383 interrupt_en |= 321 interrupt_en |=
384 interrupt_en << reg->inten_fall0_shift; 322 interrupt_en << reg->inten_fall0_shift;
385 } else { 323 } else {
386 con &= ~(1 << reg->core_en_shift); 324 con &= ~(1 << EXYNOS_TMU_CORE_EN_SHIFT);
387 interrupt_en = 0; /* Disable all interrupts */ 325 interrupt_en = 0; /* Disable all interrupts */
388 } 326 }
389 writel(interrupt_en, data->base + reg->tmu_inten); 327 writel(interrupt_en, data->base + reg->tmu_inten);
@@ -404,8 +342,16 @@ static int exynos_tmu_read(struct exynos_tmu_data *data)
404 clk_enable(data->clk); 342 clk_enable(data->clk);
405 343
406 temp_code = readb(data->base + reg->tmu_cur_temp); 344 temp_code = readb(data->base + reg->tmu_cur_temp);
407 temp = code_to_temp(data, temp_code);
408 345
346 if (data->soc == SOC_ARCH_EXYNOS4210)
347 /* temp_code should range between 75 and 175 */
348 if (temp_code < 75 || temp_code > 175) {
349 temp = -ENODATA;
350 goto out;
351 }
352
353 temp = code_to_temp(data, temp_code);
354out:
409 clk_disable(data->clk); 355 clk_disable(data->clk);
410 mutex_unlock(&data->lock); 356 mutex_unlock(&data->lock);
411 357
@@ -465,7 +411,7 @@ static void exynos_tmu_work(struct work_struct *work)
465 struct exynos_tmu_data, irq_work); 411 struct exynos_tmu_data, irq_work);
466 struct exynos_tmu_platform_data *pdata = data->pdata; 412 struct exynos_tmu_platform_data *pdata = data->pdata;
467 const struct exynos_tmu_registers *reg = pdata->registers; 413 const struct exynos_tmu_registers *reg = pdata->registers;
468 unsigned int val_irq, val_type; 414 unsigned int val_type;
469 415
470 if (!IS_ERR(data->clk_sec)) 416 if (!IS_ERR(data->clk_sec))
471 clk_enable(data->clk_sec); 417 clk_enable(data->clk_sec);
@@ -483,9 +429,7 @@ static void exynos_tmu_work(struct work_struct *work)
483 clk_enable(data->clk); 429 clk_enable(data->clk);
484 430
485 /* TODO: take action based on particular interrupt */ 431 /* TODO: take action based on particular interrupt */
486 val_irq = readl(data->base + reg->tmu_intstat); 432 exynos_tmu_clear_irqs(data);
487 /* clear the interrupts */
488 writel(val_irq, data->base + reg->tmu_intclear);
489 433
490 clk_disable(data->clk); 434 clk_disable(data->clk);
491 mutex_unlock(&data->lock); 435 mutex_unlock(&data->lock);
diff --git a/drivers/thermal/samsung/exynos_tmu.h b/drivers/thermal/samsung/exynos_tmu.h
index 1b4a6444ea61..c58c7663a3fe 100644
--- a/drivers/thermal/samsung/exynos_tmu.h
+++ b/drivers/thermal/samsung/exynos_tmu.h
@@ -34,11 +34,6 @@ enum calibration_type {
34 TYPE_NONE, 34 TYPE_NONE,
35}; 35};
36 36
37enum calibration_mode {
38 SW_MODE,
39 HW_MODE,
40};
41
42enum soc_type { 37enum soc_type {
43 SOC_ARCH_EXYNOS3250 = 1, 38 SOC_ARCH_EXYNOS3250 = 1,
44 SOC_ARCH_EXYNOS4210, 39 SOC_ARCH_EXYNOS4210,
@@ -82,46 +77,19 @@ enum soc_type {
82 * bitfields. The register validity, offsets and bitfield values may vary 77 * bitfields. The register validity, offsets and bitfield values may vary
83 * slightly across different exynos SOC's. 78 * slightly across different exynos SOC's.
84 * @triminfo_data: register containing 2 pont trimming data 79 * @triminfo_data: register containing 2 pont trimming data
85 * @triminfo_25_shift: shift bit of the 25 C trim value in triminfo_data reg.
86 * @triminfo_85_shift: shift bit of the 85 C trim value in triminfo_data reg.
87 * @triminfo_ctrl: trim info controller register. 80 * @triminfo_ctrl: trim info controller register.
88 * @triminfo_reload_shift: shift of triminfo reload enable bit in triminfo_ctrl 81 * @triminfo_ctrl_count: the number of trim info controller register.
89 reg.
90 * @tmu_ctrl: TMU main controller register. 82 * @tmu_ctrl: TMU main controller register.
91 * @test_mux_addr_shift: shift bits of test mux address. 83 * @test_mux_addr_shift: shift bits of test mux address.
92 * @buf_vref_sel_shift: shift bits of reference voltage in tmu_ctrl register.
93 * @buf_vref_sel_mask: mask bits of reference voltage in tmu_ctrl register.
94 * @therm_trip_mode_shift: shift bits of tripping mode in tmu_ctrl register. 84 * @therm_trip_mode_shift: shift bits of tripping mode in tmu_ctrl register.
95 * @therm_trip_mode_mask: mask bits of tripping mode in tmu_ctrl register. 85 * @therm_trip_mode_mask: mask bits of tripping mode in tmu_ctrl register.
96 * @therm_trip_en_shift: shift bits of tripping enable in tmu_ctrl register. 86 * @therm_trip_en_shift: shift bits of tripping enable in tmu_ctrl register.
97 * @buf_slope_sel_shift: shift bits of amplifier gain value in tmu_ctrl
98 register.
99 * @buf_slope_sel_mask: mask bits of amplifier gain value in tmu_ctrl register.
100 * @calib_mode_shift: shift bits of calibration mode value in tmu_ctrl
101 register.
102 * @calib_mode_mask: mask bits of calibration mode value in tmu_ctrl
103 register.
104 * @therm_trip_tq_en_shift: shift bits of thermal trip enable by TQ pin in
105 tmu_ctrl register.
106 * @core_en_shift: shift bits of TMU core enable bit in tmu_ctrl register.
107 * @tmu_status: register drescribing the TMU status. 87 * @tmu_status: register drescribing the TMU status.
108 * @tmu_cur_temp: register containing the current temperature of the TMU. 88 * @tmu_cur_temp: register containing the current temperature of the TMU.
109 * @tmu_cur_temp_shift: shift bits of current temp value in tmu_cur_temp
110 register.
111 * @threshold_temp: register containing the base threshold level. 89 * @threshold_temp: register containing the base threshold level.
112 * @threshold_th0: Register containing first set of rising levels. 90 * @threshold_th0: Register containing first set of rising levels.
113 * @threshold_th0_l0_shift: shift bits of level0 threshold temperature.
114 * @threshold_th0_l1_shift: shift bits of level1 threshold temperature.
115 * @threshold_th0_l2_shift: shift bits of level2 threshold temperature.
116 * @threshold_th0_l3_shift: shift bits of level3 threshold temperature.
117 * @threshold_th1: Register containing second set of rising levels. 91 * @threshold_th1: Register containing second set of rising levels.
118 * @threshold_th1_l0_shift: shift bits of level0 threshold temperature.
119 * @threshold_th1_l1_shift: shift bits of level1 threshold temperature.
120 * @threshold_th1_l2_shift: shift bits of level2 threshold temperature.
121 * @threshold_th1_l3_shift: shift bits of level3 threshold temperature.
122 * @threshold_th2: Register containing third set of rising levels. 92 * @threshold_th2: Register containing third set of rising levels.
123 * @threshold_th2_l0_shift: shift bits of level0 threshold temperature.
124 * @threshold_th3: Register containing fourth set of rising levels.
125 * @threshold_th3_l0_shift: shift bits of level0 threshold temperature. 93 * @threshold_th3_l0_shift: shift bits of level0 threshold temperature.
126 * @tmu_inten: register containing the different threshold interrupt 94 * @tmu_inten: register containing the different threshold interrupt
127 enable bits. 95 enable bits.
@@ -130,68 +98,35 @@ enum soc_type {
130 * @inten_rise2_shift: shift bits of rising 2 interrupt bits. 98 * @inten_rise2_shift: shift bits of rising 2 interrupt bits.
131 * @inten_rise3_shift: shift bits of rising 3 interrupt bits. 99 * @inten_rise3_shift: shift bits of rising 3 interrupt bits.
132 * @inten_fall0_shift: shift bits of falling 0 interrupt bits. 100 * @inten_fall0_shift: shift bits of falling 0 interrupt bits.
133 * @inten_fall1_shift: shift bits of falling 1 interrupt bits.
134 * @inten_fall2_shift: shift bits of falling 2 interrupt bits.
135 * @inten_fall3_shift: shift bits of falling 3 interrupt bits.
136 * @tmu_intstat: Register containing the interrupt status values. 101 * @tmu_intstat: Register containing the interrupt status values.
137 * @tmu_intclear: Register for clearing the raised interrupt status. 102 * @tmu_intclear: Register for clearing the raised interrupt status.
138 * @intclr_fall_shift: shift bits for interrupt clear fall 0
139 * @intclr_rise_shift: shift bits of all rising interrupt bits.
140 * @intclr_rise_mask: mask bits of all rising interrupt bits.
141 * @intclr_fall_mask: mask bits of all rising interrupt bits.
142 * @emul_con: TMU emulation controller register. 103 * @emul_con: TMU emulation controller register.
143 * @emul_temp_shift: shift bits of emulation temperature. 104 * @emul_temp_shift: shift bits of emulation temperature.
144 * @emul_time_shift: shift bits of emulation time. 105 * @emul_time_shift: shift bits of emulation time.
145 * @emul_time_mask: mask bits of emulation time.
146 * @tmu_irqstatus: register to find which TMU generated interrupts. 106 * @tmu_irqstatus: register to find which TMU generated interrupts.
147 * @tmu_pmin: register to get/set the Pmin value. 107 * @tmu_pmin: register to get/set the Pmin value.
148 */ 108 */
149struct exynos_tmu_registers { 109struct exynos_tmu_registers {
150 u32 triminfo_data; 110 u32 triminfo_data;
151 u32 triminfo_25_shift;
152 u32 triminfo_85_shift;
153 111
154 u32 triminfo_ctrl; 112 u32 triminfo_ctrl[MAX_TRIMINFO_CTRL_REG];
155 u32 triminfo_ctrl1; 113 u32 triminfo_ctrl_count;
156 u32 triminfo_reload_shift;
157 114
158 u32 tmu_ctrl; 115 u32 tmu_ctrl;
159 u32 test_mux_addr_shift; 116 u32 test_mux_addr_shift;
160 u32 buf_vref_sel_shift;
161 u32 buf_vref_sel_mask;
162 u32 therm_trip_mode_shift; 117 u32 therm_trip_mode_shift;
163 u32 therm_trip_mode_mask; 118 u32 therm_trip_mode_mask;
164 u32 therm_trip_en_shift; 119 u32 therm_trip_en_shift;
165 u32 buf_slope_sel_shift;
166 u32 buf_slope_sel_mask;
167 u32 calib_mode_shift;
168 u32 calib_mode_mask;
169 u32 therm_trip_tq_en_shift;
170 u32 core_en_shift;
171 120
172 u32 tmu_status; 121 u32 tmu_status;
173 122
174 u32 tmu_cur_temp; 123 u32 tmu_cur_temp;
175 u32 tmu_cur_temp_shift;
176 124
177 u32 threshold_temp; 125 u32 threshold_temp;
178 126
179 u32 threshold_th0; 127 u32 threshold_th0;
180 u32 threshold_th0_l0_shift;
181 u32 threshold_th0_l1_shift;
182 u32 threshold_th0_l2_shift;
183 u32 threshold_th0_l3_shift;
184
185 u32 threshold_th1; 128 u32 threshold_th1;
186 u32 threshold_th1_l0_shift;
187 u32 threshold_th1_l1_shift;
188 u32 threshold_th1_l2_shift;
189 u32 threshold_th1_l3_shift;
190
191 u32 threshold_th2; 129 u32 threshold_th2;
192 u32 threshold_th2_l0_shift;
193
194 u32 threshold_th3;
195 u32 threshold_th3_l0_shift; 130 u32 threshold_th3_l0_shift;
196 131
197 u32 tmu_inten; 132 u32 tmu_inten;
@@ -200,22 +135,14 @@ struct exynos_tmu_registers {
200 u32 inten_rise2_shift; 135 u32 inten_rise2_shift;
201 u32 inten_rise3_shift; 136 u32 inten_rise3_shift;
202 u32 inten_fall0_shift; 137 u32 inten_fall0_shift;
203 u32 inten_fall1_shift;
204 u32 inten_fall2_shift;
205 u32 inten_fall3_shift;
206 138
207 u32 tmu_intstat; 139 u32 tmu_intstat;
208 140
209 u32 tmu_intclear; 141 u32 tmu_intclear;
210 u32 intclr_fall_shift;
211 u32 intclr_rise_shift;
212 u32 intclr_fall_mask;
213 u32 intclr_rise_mask;
214 142
215 u32 emul_con; 143 u32 emul_con;
216 u32 emul_temp_shift; 144 u32 emul_temp_shift;
217 u32 emul_time_shift; 145 u32 emul_time_shift;
218 u32 emul_time_mask;
219 146
220 u32 tmu_irqstatus; 147 u32 tmu_irqstatus;
221 u32 tmu_pmin; 148 u32 tmu_pmin;
@@ -250,11 +177,12 @@ struct exynos_tmu_registers {
250 * 1 = enable trigger_level[] interrupt, 177 * 1 = enable trigger_level[] interrupt,
251 * 0 = disable trigger_level[] interrupt 178 * 0 = disable trigger_level[] interrupt
252 * @max_trigger_level: max trigger level supported by the TMU 179 * @max_trigger_level: max trigger level supported by the TMU
180 * @non_hw_trigger_levels: number of defined non-hardware trigger levels
253 * @gain: gain of amplifier in the positive-TC generator block 181 * @gain: gain of amplifier in the positive-TC generator block
254 * 0 <= gain <= 15 182 * 0 < gain <= 15
255 * @reference_voltage: reference voltage of amplifier 183 * @reference_voltage: reference voltage of amplifier
256 * in the positive-TC generator block 184 * in the positive-TC generator block
257 * 0 <= reference_voltage <= 31 185 * 0 < reference_voltage <= 31
258 * @noise_cancel_mode: noise cancellation mode 186 * @noise_cancel_mode: noise cancellation mode
259 * 000, 100, 101, 110 and 111 can be different modes 187 * 000, 100, 101, 110 and 111 can be different modes
260 * @type: determines the type of SOC 188 * @type: determines the type of SOC
@@ -265,8 +193,8 @@ struct exynos_tmu_registers {
265 * @second_point_trim: temp value of the second point trimming 193 * @second_point_trim: temp value of the second point trimming
266 * @default_temp_offset: default temperature offset in case of no trimming 194 * @default_temp_offset: default temperature offset in case of no trimming
267 * @test_mux; information if SoC supports test MUX 195 * @test_mux; information if SoC supports test MUX
196 * @triminfo_reload: reload value to read TRIMINFO register
268 * @cal_type: calibration type for temperature 197 * @cal_type: calibration type for temperature
269 * @cal_mode: calibration mode for temperature
270 * @freq_clip_table: Table representing frequency reduction percentage. 198 * @freq_clip_table: Table representing frequency reduction percentage.
271 * @freq_tab_count: Count of the above table as frequency reduction may 199 * @freq_tab_count: Count of the above table as frequency reduction may
272 * applicable to only some of the trigger levels. 200 * applicable to only some of the trigger levels.
@@ -284,6 +212,7 @@ struct exynos_tmu_platform_data {
284 enum trigger_type trigger_type[MAX_TRIP_COUNT]; 212 enum trigger_type trigger_type[MAX_TRIP_COUNT];
285 bool trigger_enable[MAX_TRIP_COUNT]; 213 bool trigger_enable[MAX_TRIP_COUNT];
286 u8 max_trigger_level; 214 u8 max_trigger_level;
215 u8 non_hw_trigger_levels;
287 u8 gain; 216 u8 gain;
288 u8 reference_voltage; 217 u8 reference_voltage;
289 u8 noise_cancel_mode; 218 u8 noise_cancel_mode;
@@ -295,9 +224,9 @@ struct exynos_tmu_platform_data {
295 u8 second_point_trim; 224 u8 second_point_trim;
296 u8 default_temp_offset; 225 u8 default_temp_offset;
297 u8 test_mux; 226 u8 test_mux;
227 u8 triminfo_reload[MAX_TRIMINFO_CTRL_REG];
298 228
299 enum calibration_type cal_type; 229 enum calibration_type cal_type;
300 enum calibration_mode cal_mode;
301 enum soc_type type; 230 enum soc_type type;
302 struct freq_clip_table freq_tab[4]; 231 struct freq_clip_table freq_tab[4];
303 unsigned int freq_tab_count; 232 unsigned int freq_tab_count;
diff --git a/drivers/thermal/samsung/exynos_tmu_data.c b/drivers/thermal/samsung/exynos_tmu_data.c
index aa8e0dee2055..1724f6cdaef8 100644
--- a/drivers/thermal/samsung/exynos_tmu_data.c
+++ b/drivers/thermal/samsung/exynos_tmu_data.c
@@ -27,14 +27,7 @@
27#if defined(CONFIG_CPU_EXYNOS4210) 27#if defined(CONFIG_CPU_EXYNOS4210)
28static const struct exynos_tmu_registers exynos4210_tmu_registers = { 28static const struct exynos_tmu_registers exynos4210_tmu_registers = {
29 .triminfo_data = EXYNOS_TMU_REG_TRIMINFO, 29 .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
30 .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
31 .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
32 .tmu_ctrl = EXYNOS_TMU_REG_CONTROL, 30 .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
33 .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
34 .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
35 .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT,
36 .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK,
37 .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT,
38 .tmu_status = EXYNOS_TMU_REG_STATUS, 31 .tmu_status = EXYNOS_TMU_REG_STATUS,
39 .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP, 32 .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
40 .threshold_temp = EXYNOS4210_TMU_REG_THRESHOLD_TEMP, 33 .threshold_temp = EXYNOS4210_TMU_REG_THRESHOLD_TEMP,
@@ -46,7 +39,6 @@ static const struct exynos_tmu_registers exynos4210_tmu_registers = {
46 .inten_rise3_shift = EXYNOS_TMU_INTEN_RISE3_SHIFT, 39 .inten_rise3_shift = EXYNOS_TMU_INTEN_RISE3_SHIFT,
47 .tmu_intstat = EXYNOS_TMU_REG_INTSTAT, 40 .tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
48 .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR, 41 .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
49 .intclr_rise_mask = EXYNOS4210_TMU_TRIG_LEVEL_MASK,
50}; 42};
51 43
52struct exynos_tmu_init_data const exynos4210_default_tmu_data = { 44struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
@@ -64,6 +56,7 @@ struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
64 .trigger_type[1] = THROTTLE_ACTIVE, 56 .trigger_type[1] = THROTTLE_ACTIVE,
65 .trigger_type[2] = SW_TRIP, 57 .trigger_type[2] = SW_TRIP,
66 .max_trigger_level = 4, 58 .max_trigger_level = 4,
59 .non_hw_trigger_levels = 3,
67 .gain = 15, 60 .gain = 15,
68 .reference_voltage = 7, 61 .reference_voltage = 7,
69 .cal_type = TYPE_ONE_POINT_TRIMMING, 62 .cal_type = TYPE_ONE_POINT_TRIMMING,
@@ -93,18 +86,14 @@ struct exynos_tmu_init_data const exynos4210_default_tmu_data = {
93#if defined(CONFIG_SOC_EXYNOS3250) 86#if defined(CONFIG_SOC_EXYNOS3250)
94static const struct exynos_tmu_registers exynos3250_tmu_registers = { 87static const struct exynos_tmu_registers exynos3250_tmu_registers = {
95 .triminfo_data = EXYNOS_TMU_REG_TRIMINFO, 88 .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
96 .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT, 89 .triminfo_ctrl[0] = EXYNOS_TMU_TRIMINFO_CON1,
97 .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT, 90 .triminfo_ctrl[1] = EXYNOS_TMU_TRIMINFO_CON2,
91 .triminfo_ctrl_count = 2,
98 .tmu_ctrl = EXYNOS_TMU_REG_CONTROL, 92 .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
99 .test_mux_addr_shift = EXYNOS4412_MUX_ADDR_SHIFT, 93 .test_mux_addr_shift = EXYNOS4412_MUX_ADDR_SHIFT,
100 .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
101 .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
102 .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT, 94 .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
103 .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK, 95 .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
104 .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT, 96 .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
105 .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT,
106 .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK,
107 .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT,
108 .tmu_status = EXYNOS_TMU_REG_STATUS, 97 .tmu_status = EXYNOS_TMU_REG_STATUS,
109 .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP, 98 .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
110 .threshold_th0 = EXYNOS_THD_TEMP_RISE, 99 .threshold_th0 = EXYNOS_THD_TEMP_RISE,
@@ -116,14 +105,9 @@ static const struct exynos_tmu_registers exynos3250_tmu_registers = {
116 .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT, 105 .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT,
117 .tmu_intstat = EXYNOS_TMU_REG_INTSTAT, 106 .tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
118 .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR, 107 .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
119 .intclr_fall_shift = EXYNOS_TMU_CLEAR_FALL_INT_SHIFT,
120 .intclr_rise_shift = EXYNOS_TMU_RISE_INT_SHIFT,
121 .intclr_rise_mask = EXYNOS_TMU_RISE_INT_MASK,
122 .intclr_fall_mask = EXYNOS_TMU_FALL_INT_MASK,
123 .emul_con = EXYNOS_EMUL_CON, 108 .emul_con = EXYNOS_EMUL_CON,
124 .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT, 109 .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
125 .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT, 110 .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT,
126 .emul_time_mask = EXYNOS_EMUL_TIME_MASK,
127}; 111};
128 112
129#define EXYNOS3250_TMU_DATA \ 113#define EXYNOS3250_TMU_DATA \
@@ -141,6 +125,7 @@ static const struct exynos_tmu_registers exynos3250_tmu_registers = {
141 .trigger_type[2] = SW_TRIP, \ 125 .trigger_type[2] = SW_TRIP, \
142 .trigger_type[3] = HW_TRIP, \ 126 .trigger_type[3] = HW_TRIP, \
143 .max_trigger_level = 4, \ 127 .max_trigger_level = 4, \
128 .non_hw_trigger_levels = 3, \
144 .gain = 8, \ 129 .gain = 8, \
145 .reference_voltage = 16, \ 130 .reference_voltage = 16, \
146 .noise_cancel_mode = 4, \ 131 .noise_cancel_mode = 4, \
@@ -160,8 +145,10 @@ static const struct exynos_tmu_registers exynos3250_tmu_registers = {
160 .temp_level = 95, \ 145 .temp_level = 95, \
161 }, \ 146 }, \
162 .freq_tab_count = 2, \ 147 .freq_tab_count = 2, \
148 .triminfo_reload[0] = EXYNOS_TRIMINFO_RELOAD_ENABLE, \
149 .triminfo_reload[1] = EXYNOS_TRIMINFO_RELOAD_ENABLE, \
163 .registers = &exynos3250_tmu_registers, \ 150 .registers = &exynos3250_tmu_registers, \
164 .features = (TMU_SUPPORT_EMULATION | \ 151 .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
165 TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \ 152 TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
166 TMU_SUPPORT_EMUL_TIME) 153 TMU_SUPPORT_EMUL_TIME)
167#endif 154#endif
@@ -182,20 +169,13 @@ struct exynos_tmu_init_data const exynos3250_default_tmu_data = {
182#if defined(CONFIG_SOC_EXYNOS4412) || defined(CONFIG_SOC_EXYNOS5250) 169#if defined(CONFIG_SOC_EXYNOS4412) || defined(CONFIG_SOC_EXYNOS5250)
183static const struct exynos_tmu_registers exynos4412_tmu_registers = { 170static const struct exynos_tmu_registers exynos4412_tmu_registers = {
184 .triminfo_data = EXYNOS_TMU_REG_TRIMINFO, 171 .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
185 .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT, 172 .triminfo_ctrl[0] = EXYNOS_TMU_TRIMINFO_CON2,
186 .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT, 173 .triminfo_ctrl_count = 1,
187 .triminfo_ctrl = EXYNOS_TMU_TRIMINFO_CON,
188 .triminfo_reload_shift = EXYNOS_TRIMINFO_RELOAD_SHIFT,
189 .tmu_ctrl = EXYNOS_TMU_REG_CONTROL, 174 .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
190 .test_mux_addr_shift = EXYNOS4412_MUX_ADDR_SHIFT, 175 .test_mux_addr_shift = EXYNOS4412_MUX_ADDR_SHIFT,
191 .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
192 .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
193 .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT, 176 .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
194 .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK, 177 .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
195 .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT, 178 .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
196 .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT,
197 .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK,
198 .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT,
199 .tmu_status = EXYNOS_TMU_REG_STATUS, 179 .tmu_status = EXYNOS_TMU_REG_STATUS,
200 .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP, 180 .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
201 .threshold_th0 = EXYNOS_THD_TEMP_RISE, 181 .threshold_th0 = EXYNOS_THD_TEMP_RISE,
@@ -208,14 +188,9 @@ static const struct exynos_tmu_registers exynos4412_tmu_registers = {
208 .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT, 188 .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT,
209 .tmu_intstat = EXYNOS_TMU_REG_INTSTAT, 189 .tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
210 .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR, 190 .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
211 .intclr_fall_shift = EXYNOS_TMU_CLEAR_FALL_INT_SHIFT,
212 .intclr_rise_shift = EXYNOS_TMU_RISE_INT_SHIFT,
213 .intclr_rise_mask = EXYNOS_TMU_RISE_INT_MASK,
214 .intclr_fall_mask = EXYNOS_TMU_FALL_INT_MASK,
215 .emul_con = EXYNOS_EMUL_CON, 191 .emul_con = EXYNOS_EMUL_CON,
216 .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT, 192 .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
217 .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT, 193 .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT,
218 .emul_time_mask = EXYNOS_EMUL_TIME_MASK,
219}; 194};
220 195
221#define EXYNOS4412_TMU_DATA \ 196#define EXYNOS4412_TMU_DATA \
@@ -233,6 +208,7 @@ static const struct exynos_tmu_registers exynos4412_tmu_registers = {
233 .trigger_type[2] = SW_TRIP, \ 208 .trigger_type[2] = SW_TRIP, \
234 .trigger_type[3] = HW_TRIP, \ 209 .trigger_type[3] = HW_TRIP, \
235 .max_trigger_level = 4, \ 210 .max_trigger_level = 4, \
211 .non_hw_trigger_levels = 3, \
236 .gain = 8, \ 212 .gain = 8, \
237 .reference_voltage = 16, \ 213 .reference_voltage = 16, \
238 .noise_cancel_mode = 4, \ 214 .noise_cancel_mode = 4, \
@@ -252,6 +228,7 @@ static const struct exynos_tmu_registers exynos4412_tmu_registers = {
252 .temp_level = 95, \ 228 .temp_level = 95, \
253 }, \ 229 }, \
254 .freq_tab_count = 2, \ 230 .freq_tab_count = 2, \
231 .triminfo_reload[0] = EXYNOS_TRIMINFO_RELOAD_ENABLE, \
255 .registers = &exynos4412_tmu_registers, \ 232 .registers = &exynos4412_tmu_registers, \
256 .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \ 233 .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \
257 TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \ 234 TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \
@@ -286,18 +263,10 @@ struct exynos_tmu_init_data const exynos5250_default_tmu_data = {
286#if defined(CONFIG_SOC_EXYNOS5260) 263#if defined(CONFIG_SOC_EXYNOS5260)
287static const struct exynos_tmu_registers exynos5260_tmu_registers = { 264static const struct exynos_tmu_registers exynos5260_tmu_registers = {
288 .triminfo_data = EXYNOS_TMU_REG_TRIMINFO, 265 .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
289 .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
290 .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
291 .tmu_ctrl = EXYNOS_TMU_REG_CONTROL, 266 .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
292 .tmu_ctrl = EXYNOS_TMU_REG_CONTROL1,
293 .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
294 .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
295 .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT, 267 .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
296 .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK, 268 .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
297 .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT, 269 .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
298 .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT,
299 .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK,
300 .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT,
301 .tmu_status = EXYNOS_TMU_REG_STATUS, 270 .tmu_status = EXYNOS_TMU_REG_STATUS,
302 .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP, 271 .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
303 .threshold_th0 = EXYNOS_THD_TEMP_RISE, 272 .threshold_th0 = EXYNOS_THD_TEMP_RISE,
@@ -310,14 +279,9 @@ static const struct exynos_tmu_registers exynos5260_tmu_registers = {
310 .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT, 279 .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT,
311 .tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT, 280 .tmu_intstat = EXYNOS5260_TMU_REG_INTSTAT,
312 .tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR, 281 .tmu_intclear = EXYNOS5260_TMU_REG_INTCLEAR,
313 .intclr_fall_shift = EXYNOS5420_TMU_CLEAR_FALL_INT_SHIFT,
314 .intclr_rise_shift = EXYNOS_TMU_RISE_INT_SHIFT,
315 .intclr_rise_mask = EXYNOS5260_TMU_RISE_INT_MASK,
316 .intclr_fall_mask = EXYNOS5260_TMU_FALL_INT_MASK,
317 .emul_con = EXYNOS5260_EMUL_CON, 282 .emul_con = EXYNOS5260_EMUL_CON,
318 .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT, 283 .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
319 .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT, 284 .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT,
320 .emul_time_mask = EXYNOS_EMUL_TIME_MASK,
321}; 285};
322 286
323#define __EXYNOS5260_TMU_DATA \ 287#define __EXYNOS5260_TMU_DATA \
@@ -335,6 +299,7 @@ static const struct exynos_tmu_registers exynos5260_tmu_registers = {
335 .trigger_type[2] = SW_TRIP, \ 299 .trigger_type[2] = SW_TRIP, \
336 .trigger_type[3] = HW_TRIP, \ 300 .trigger_type[3] = HW_TRIP, \
337 .max_trigger_level = 4, \ 301 .max_trigger_level = 4, \
302 .non_hw_trigger_levels = 3, \
338 .gain = 8, \ 303 .gain = 8, \
339 .reference_voltage = 16, \ 304 .reference_voltage = 16, \
340 .noise_cancel_mode = 4, \ 305 .noise_cancel_mode = 4, \
@@ -359,9 +324,8 @@ static const struct exynos_tmu_registers exynos5260_tmu_registers = {
359#define EXYNOS5260_TMU_DATA \ 324#define EXYNOS5260_TMU_DATA \
360 __EXYNOS5260_TMU_DATA \ 325 __EXYNOS5260_TMU_DATA \
361 .type = SOC_ARCH_EXYNOS5260, \ 326 .type = SOC_ARCH_EXYNOS5260, \
362 .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \ 327 .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_FALLING_TRIP | \
363 TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \ 328 TMU_SUPPORT_READY_STATUS | TMU_SUPPORT_EMUL_TIME)
364 TMU_SUPPORT_EMUL_TIME)
365 329
366struct exynos_tmu_init_data const exynos5260_default_tmu_data = { 330struct exynos_tmu_init_data const exynos5260_default_tmu_data = {
367 .tmu_data = { 331 .tmu_data = {
@@ -378,17 +342,10 @@ struct exynos_tmu_init_data const exynos5260_default_tmu_data = {
378#if defined(CONFIG_SOC_EXYNOS5420) 342#if defined(CONFIG_SOC_EXYNOS5420)
379static const struct exynos_tmu_registers exynos5420_tmu_registers = { 343static const struct exynos_tmu_registers exynos5420_tmu_registers = {
380 .triminfo_data = EXYNOS_TMU_REG_TRIMINFO, 344 .triminfo_data = EXYNOS_TMU_REG_TRIMINFO,
381 .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
382 .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
383 .tmu_ctrl = EXYNOS_TMU_REG_CONTROL, 345 .tmu_ctrl = EXYNOS_TMU_REG_CONTROL,
384 .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
385 .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
386 .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT, 346 .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
387 .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK, 347 .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
388 .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT, 348 .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
389 .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT,
390 .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK,
391 .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT,
392 .tmu_status = EXYNOS_TMU_REG_STATUS, 349 .tmu_status = EXYNOS_TMU_REG_STATUS,
393 .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP, 350 .tmu_cur_temp = EXYNOS_TMU_REG_CURRENT_TEMP,
394 .threshold_th0 = EXYNOS_THD_TEMP_RISE, 351 .threshold_th0 = EXYNOS_THD_TEMP_RISE,
@@ -402,14 +359,9 @@ static const struct exynos_tmu_registers exynos5420_tmu_registers = {
402 .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT, 359 .inten_fall0_shift = EXYNOS_TMU_INTEN_FALL0_SHIFT,
403 .tmu_intstat = EXYNOS_TMU_REG_INTSTAT, 360 .tmu_intstat = EXYNOS_TMU_REG_INTSTAT,
404 .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR, 361 .tmu_intclear = EXYNOS_TMU_REG_INTCLEAR,
405 .intclr_fall_shift = EXYNOS5420_TMU_CLEAR_FALL_INT_SHIFT,
406 .intclr_rise_shift = EXYNOS_TMU_RISE_INT_SHIFT,
407 .intclr_rise_mask = EXYNOS_TMU_RISE_INT_MASK,
408 .intclr_fall_mask = EXYNOS_TMU_FALL_INT_MASK,
409 .emul_con = EXYNOS_EMUL_CON, 362 .emul_con = EXYNOS_EMUL_CON,
410 .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT, 363 .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
411 .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT, 364 .emul_time_shift = EXYNOS_EMUL_TIME_SHIFT,
412 .emul_time_mask = EXYNOS_EMUL_TIME_MASK,
413}; 365};
414 366
415#define __EXYNOS5420_TMU_DATA \ 367#define __EXYNOS5420_TMU_DATA \
@@ -427,6 +379,7 @@ static const struct exynos_tmu_registers exynos5420_tmu_registers = {
427 .trigger_type[2] = SW_TRIP, \ 379 .trigger_type[2] = SW_TRIP, \
428 .trigger_type[3] = HW_TRIP, \ 380 .trigger_type[3] = HW_TRIP, \
429 .max_trigger_level = 4, \ 381 .max_trigger_level = 4, \
382 .non_hw_trigger_levels = 3, \
430 .gain = 8, \ 383 .gain = 8, \
431 .reference_voltage = 16, \ 384 .reference_voltage = 16, \
432 .noise_cancel_mode = 4, \ 385 .noise_cancel_mode = 4, \
@@ -451,16 +404,15 @@ static const struct exynos_tmu_registers exynos5420_tmu_registers = {
451#define EXYNOS5420_TMU_DATA \ 404#define EXYNOS5420_TMU_DATA \
452 __EXYNOS5420_TMU_DATA \ 405 __EXYNOS5420_TMU_DATA \
453 .type = SOC_ARCH_EXYNOS5250, \ 406 .type = SOC_ARCH_EXYNOS5250, \
454 .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \ 407 .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_FALLING_TRIP | \
455 TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \ 408 TMU_SUPPORT_READY_STATUS | TMU_SUPPORT_EMUL_TIME)
456 TMU_SUPPORT_EMUL_TIME)
457 409
458#define EXYNOS5420_TMU_DATA_SHARED \ 410#define EXYNOS5420_TMU_DATA_SHARED \
459 __EXYNOS5420_TMU_DATA \ 411 __EXYNOS5420_TMU_DATA \
460 .type = SOC_ARCH_EXYNOS5420_TRIMINFO, \ 412 .type = SOC_ARCH_EXYNOS5420_TRIMINFO, \
461 .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_TRIM_RELOAD | \ 413 .features = (TMU_SUPPORT_EMULATION | TMU_SUPPORT_FALLING_TRIP | \
462 TMU_SUPPORT_FALLING_TRIP | TMU_SUPPORT_READY_STATUS | \ 414 TMU_SUPPORT_READY_STATUS | TMU_SUPPORT_EMUL_TIME | \
463 TMU_SUPPORT_EMUL_TIME | TMU_SUPPORT_ADDRESS_MULTIPLE) 415 TMU_SUPPORT_ADDRESS_MULTIPLE)
464 416
465struct exynos_tmu_init_data const exynos5420_default_tmu_data = { 417struct exynos_tmu_init_data const exynos5420_default_tmu_data = {
466 .tmu_data = { 418 .tmu_data = {
@@ -477,19 +429,10 @@ struct exynos_tmu_init_data const exynos5420_default_tmu_data = {
477#if defined(CONFIG_SOC_EXYNOS5440) 429#if defined(CONFIG_SOC_EXYNOS5440)
478static const struct exynos_tmu_registers exynos5440_tmu_registers = { 430static const struct exynos_tmu_registers exynos5440_tmu_registers = {
479 .triminfo_data = EXYNOS5440_TMU_S0_7_TRIM, 431 .triminfo_data = EXYNOS5440_TMU_S0_7_TRIM,
480 .triminfo_25_shift = EXYNOS_TRIMINFO_25_SHIFT,
481 .triminfo_85_shift = EXYNOS_TRIMINFO_85_SHIFT,
482 .tmu_ctrl = EXYNOS5440_TMU_S0_7_CTRL, 432 .tmu_ctrl = EXYNOS5440_TMU_S0_7_CTRL,
483 .buf_vref_sel_shift = EXYNOS_TMU_REF_VOLTAGE_SHIFT,
484 .buf_vref_sel_mask = EXYNOS_TMU_REF_VOLTAGE_MASK,
485 .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT, 433 .therm_trip_mode_shift = EXYNOS_TMU_TRIP_MODE_SHIFT,
486 .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK, 434 .therm_trip_mode_mask = EXYNOS_TMU_TRIP_MODE_MASK,
487 .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT, 435 .therm_trip_en_shift = EXYNOS_TMU_THERM_TRIP_EN_SHIFT,
488 .buf_slope_sel_shift = EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT,
489 .buf_slope_sel_mask = EXYNOS_TMU_BUF_SLOPE_SEL_MASK,
490 .calib_mode_shift = EXYNOS_TMU_CALIB_MODE_SHIFT,
491 .calib_mode_mask = EXYNOS_TMU_CALIB_MODE_MASK,
492 .core_en_shift = EXYNOS_TMU_CORE_EN_SHIFT,
493 .tmu_status = EXYNOS5440_TMU_S0_7_STATUS, 436 .tmu_status = EXYNOS5440_TMU_S0_7_STATUS,
494 .tmu_cur_temp = EXYNOS5440_TMU_S0_7_TEMP, 437 .tmu_cur_temp = EXYNOS5440_TMU_S0_7_TEMP,
495 .threshold_th0 = EXYNOS5440_TMU_S0_7_TH0, 438 .threshold_th0 = EXYNOS5440_TMU_S0_7_TH0,
@@ -504,10 +447,6 @@ static const struct exynos_tmu_registers exynos5440_tmu_registers = {
504 .inten_fall0_shift = EXYNOS5440_TMU_INTEN_FALL0_SHIFT, 447 .inten_fall0_shift = EXYNOS5440_TMU_INTEN_FALL0_SHIFT,
505 .tmu_intstat = EXYNOS5440_TMU_S0_7_IRQ, 448 .tmu_intstat = EXYNOS5440_TMU_S0_7_IRQ,
506 .tmu_intclear = EXYNOS5440_TMU_S0_7_IRQ, 449 .tmu_intclear = EXYNOS5440_TMU_S0_7_IRQ,
507 .intclr_fall_shift = EXYNOS5440_TMU_CLEAR_FALL_INT_SHIFT,
508 .intclr_rise_shift = EXYNOS5440_TMU_RISE_INT_SHIFT,
509 .intclr_rise_mask = EXYNOS5440_TMU_RISE_INT_MASK,
510 .intclr_fall_mask = EXYNOS5440_TMU_FALL_INT_MASK,
511 .tmu_irqstatus = EXYNOS5440_TMU_IRQ_STATUS, 450 .tmu_irqstatus = EXYNOS5440_TMU_IRQ_STATUS,
512 .emul_con = EXYNOS5440_TMU_S0_7_DEBUG, 451 .emul_con = EXYNOS5440_TMU_S0_7_DEBUG,
513 .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT, 452 .emul_temp_shift = EXYNOS_EMUL_DATA_SHIFT,
@@ -521,11 +460,11 @@ static const struct exynos_tmu_registers exynos5440_tmu_registers = {
521 .trigger_type[0] = SW_TRIP, \ 460 .trigger_type[0] = SW_TRIP, \
522 .trigger_type[4] = HW_TRIP, \ 461 .trigger_type[4] = HW_TRIP, \
523 .max_trigger_level = 5, \ 462 .max_trigger_level = 5, \
463 .non_hw_trigger_levels = 1, \
524 .gain = 5, \ 464 .gain = 5, \
525 .reference_voltage = 16, \ 465 .reference_voltage = 16, \
526 .noise_cancel_mode = 4, \ 466 .noise_cancel_mode = 4, \
527 .cal_type = TYPE_ONE_POINT_TRIMMING, \ 467 .cal_type = TYPE_ONE_POINT_TRIMMING, \
528 .cal_mode = 0, \
529 .efuse_value = 0x5b2d, \ 468 .efuse_value = 0x5b2d, \
530 .min_efuse_value = 16, \ 469 .min_efuse_value = 16, \
531 .max_efuse_value = 76, \ 470 .max_efuse_value = 76, \
diff --git a/drivers/thermal/samsung/exynos_tmu_data.h b/drivers/thermal/samsung/exynos_tmu_data.h
index f0979e598491..63de598c9c2c 100644
--- a/drivers/thermal/samsung/exynos_tmu_data.h
+++ b/drivers/thermal/samsung/exynos_tmu_data.h
@@ -39,55 +39,31 @@
39#define EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT 8 39#define EXYNOS_TMU_BUF_SLOPE_SEL_SHIFT 8
40#define EXYNOS_TMU_CORE_EN_SHIFT 0 40#define EXYNOS_TMU_CORE_EN_SHIFT 0
41 41
42/* Exynos3250 specific registers */
43#define EXYNOS_TMU_TRIMINFO_CON1 0x10
44
42/* Exynos4210 specific registers */ 45/* Exynos4210 specific registers */
43#define EXYNOS4210_TMU_REG_THRESHOLD_TEMP 0x44 46#define EXYNOS4210_TMU_REG_THRESHOLD_TEMP 0x44
44#define EXYNOS4210_TMU_REG_TRIG_LEVEL0 0x50 47#define EXYNOS4210_TMU_REG_TRIG_LEVEL0 0x50
45#define EXYNOS4210_TMU_REG_TRIG_LEVEL1 0x54 48
46#define EXYNOS4210_TMU_REG_TRIG_LEVEL2 0x58 49/* Exynos5250, Exynos4412, Exynos3250 specific registers */
47#define EXYNOS4210_TMU_REG_TRIG_LEVEL3 0x5C 50#define EXYNOS_TMU_TRIMINFO_CON2 0x14
48#define EXYNOS4210_TMU_REG_PAST_TEMP0 0x60
49#define EXYNOS4210_TMU_REG_PAST_TEMP1 0x64
50#define EXYNOS4210_TMU_REG_PAST_TEMP2 0x68
51#define EXYNOS4210_TMU_REG_PAST_TEMP3 0x6C
52
53#define EXYNOS4210_TMU_TRIG_LEVEL0_MASK 0x1
54#define EXYNOS4210_TMU_TRIG_LEVEL1_MASK 0x10
55#define EXYNOS4210_TMU_TRIG_LEVEL2_MASK 0x100
56#define EXYNOS4210_TMU_TRIG_LEVEL3_MASK 0x1000
57#define EXYNOS4210_TMU_TRIG_LEVEL_MASK 0x1111
58#define EXYNOS4210_TMU_INTCLEAR_VAL 0x1111
59
60/* Exynos5250 and Exynos4412 specific registers */
61#define EXYNOS_TMU_TRIMINFO_CON 0x14
62#define EXYNOS_THD_TEMP_RISE 0x50 51#define EXYNOS_THD_TEMP_RISE 0x50
63#define EXYNOS_THD_TEMP_FALL 0x54 52#define EXYNOS_THD_TEMP_FALL 0x54
64#define EXYNOS_EMUL_CON 0x80 53#define EXYNOS_EMUL_CON 0x80
65 54
66#define EXYNOS_TRIMINFO_RELOAD_SHIFT 1 55#define EXYNOS_TRIMINFO_RELOAD_ENABLE 1
67#define EXYNOS_TRIMINFO_25_SHIFT 0 56#define EXYNOS_TRIMINFO_25_SHIFT 0
68#define EXYNOS_TRIMINFO_85_SHIFT 8 57#define EXYNOS_TRIMINFO_85_SHIFT 8
69#define EXYNOS_TMU_RISE_INT_MASK 0x111
70#define EXYNOS_TMU_RISE_INT_SHIFT 0
71#define EXYNOS_TMU_FALL_INT_MASK 0x111
72#define EXYNOS_TMU_CLEAR_RISE_INT 0x111
73#define EXYNOS_TMU_CLEAR_FALL_INT (0x111 << 12)
74#define EXYNOS_TMU_CLEAR_FALL_INT_SHIFT 12
75#define EXYNOS5420_TMU_CLEAR_FALL_INT_SHIFT 16
76#define EXYNOS5440_TMU_CLEAR_FALL_INT_SHIFT 4
77#define EXYNOS_TMU_TRIP_MODE_SHIFT 13 58#define EXYNOS_TMU_TRIP_MODE_SHIFT 13
78#define EXYNOS_TMU_TRIP_MODE_MASK 0x7 59#define EXYNOS_TMU_TRIP_MODE_MASK 0x7
79#define EXYNOS_TMU_THERM_TRIP_EN_SHIFT 12 60#define EXYNOS_TMU_THERM_TRIP_EN_SHIFT 12
80#define EXYNOS_TMU_CALIB_MODE_SHIFT 4
81#define EXYNOS_TMU_CALIB_MODE_MASK 0x3
82 61
83#define EXYNOS_TMU_INTEN_RISE0_SHIFT 0 62#define EXYNOS_TMU_INTEN_RISE0_SHIFT 0
84#define EXYNOS_TMU_INTEN_RISE1_SHIFT 4 63#define EXYNOS_TMU_INTEN_RISE1_SHIFT 4
85#define EXYNOS_TMU_INTEN_RISE2_SHIFT 8 64#define EXYNOS_TMU_INTEN_RISE2_SHIFT 8
86#define EXYNOS_TMU_INTEN_RISE3_SHIFT 12 65#define EXYNOS_TMU_INTEN_RISE3_SHIFT 12
87#define EXYNOS_TMU_INTEN_FALL0_SHIFT 16 66#define EXYNOS_TMU_INTEN_FALL0_SHIFT 16
88#define EXYNOS_TMU_INTEN_FALL1_SHIFT 20
89#define EXYNOS_TMU_INTEN_FALL2_SHIFT 24
90#define EXYNOS_TMU_INTEN_FALL3_SHIFT 28
91 67
92#define EXYNOS_EMUL_TIME 0x57F0 68#define EXYNOS_EMUL_TIME 0x57F0
93#define EXYNOS_EMUL_TIME_MASK 0xffff 69#define EXYNOS_EMUL_TIME_MASK 0xffff
@@ -99,14 +75,9 @@
99#define EXYNOS_MAX_TRIGGER_PER_REG 4 75#define EXYNOS_MAX_TRIGGER_PER_REG 4
100 76
101/* Exynos5260 specific */ 77/* Exynos5260 specific */
102#define EXYNOS_TMU_REG_CONTROL1 0x24
103#define EXYNOS5260_TMU_REG_INTEN 0xC0 78#define EXYNOS5260_TMU_REG_INTEN 0xC0
104#define EXYNOS5260_TMU_REG_INTSTAT 0xC4 79#define EXYNOS5260_TMU_REG_INTSTAT 0xC4
105#define EXYNOS5260_TMU_REG_INTCLEAR 0xC8 80#define EXYNOS5260_TMU_REG_INTCLEAR 0xC8
106#define EXYNOS5260_TMU_CLEAR_RISE_INT 0x1111
107#define EXYNOS5260_TMU_CLEAR_FALL_INT (0x1111 << 16)
108#define EXYNOS5260_TMU_RISE_INT_MASK 0x1111
109#define EXYNOS5260_TMU_FALL_INT_MASK 0x1111
110#define EXYNOS5260_EMUL_CON 0x100 81#define EXYNOS5260_EMUL_CON 0x100
111 82
112/* Exynos4412 specific */ 83/* Exynos4412 specific */
@@ -122,29 +93,17 @@
122#define EXYNOS5440_TMU_S0_7_TH0 0x110 93#define EXYNOS5440_TMU_S0_7_TH0 0x110
123#define EXYNOS5440_TMU_S0_7_TH1 0x130 94#define EXYNOS5440_TMU_S0_7_TH1 0x130
124#define EXYNOS5440_TMU_S0_7_TH2 0x150 95#define EXYNOS5440_TMU_S0_7_TH2 0x150
125#define EXYNOS5440_TMU_S0_7_EVTEN 0x1F0
126#define EXYNOS5440_TMU_S0_7_IRQEN 0x210 96#define EXYNOS5440_TMU_S0_7_IRQEN 0x210
127#define EXYNOS5440_TMU_S0_7_IRQ 0x230 97#define EXYNOS5440_TMU_S0_7_IRQ 0x230
128/* exynos5440 common registers */ 98/* exynos5440 common registers */
129#define EXYNOS5440_TMU_IRQ_STATUS 0x000 99#define EXYNOS5440_TMU_IRQ_STATUS 0x000
130#define EXYNOS5440_TMU_PMIN 0x004 100#define EXYNOS5440_TMU_PMIN 0x004
131#define EXYNOS5440_TMU_TEMP 0x008
132 101
133#define EXYNOS5440_TMU_RISE_INT_MASK 0xf
134#define EXYNOS5440_TMU_RISE_INT_SHIFT 0
135#define EXYNOS5440_TMU_FALL_INT_MASK 0xf
136#define EXYNOS5440_TMU_INTEN_RISE0_SHIFT 0 102#define EXYNOS5440_TMU_INTEN_RISE0_SHIFT 0
137#define EXYNOS5440_TMU_INTEN_RISE1_SHIFT 1 103#define EXYNOS5440_TMU_INTEN_RISE1_SHIFT 1
138#define EXYNOS5440_TMU_INTEN_RISE2_SHIFT 2 104#define EXYNOS5440_TMU_INTEN_RISE2_SHIFT 2
139#define EXYNOS5440_TMU_INTEN_RISE3_SHIFT 3 105#define EXYNOS5440_TMU_INTEN_RISE3_SHIFT 3
140#define EXYNOS5440_TMU_INTEN_FALL0_SHIFT 4 106#define EXYNOS5440_TMU_INTEN_FALL0_SHIFT 4
141#define EXYNOS5440_TMU_INTEN_FALL1_SHIFT 5
142#define EXYNOS5440_TMU_INTEN_FALL2_SHIFT 6
143#define EXYNOS5440_TMU_INTEN_FALL3_SHIFT 7
144#define EXYNOS5440_TMU_TH_RISE0_SHIFT 0
145#define EXYNOS5440_TMU_TH_RISE1_SHIFT 8
146#define EXYNOS5440_TMU_TH_RISE2_SHIFT 16
147#define EXYNOS5440_TMU_TH_RISE3_SHIFT 24
148#define EXYNOS5440_TMU_TH_RISE4_SHIFT 24 107#define EXYNOS5440_TMU_TH_RISE4_SHIFT 24
149#define EXYNOS5440_EFUSE_SWAP_OFFSET 8 108#define EXYNOS5440_EFUSE_SWAP_OFFSET 8
150 109
diff --git a/drivers/thermal/st/st_thermal.c b/drivers/thermal/st/st_thermal.c
index 90163b384660..d1ec5804c0bb 100644
--- a/drivers/thermal/st/st_thermal.c
+++ b/drivers/thermal/st/st_thermal.c
@@ -275,6 +275,7 @@ int st_thermal_unregister(struct platform_device *pdev)
275} 275}
276EXPORT_SYMBOL_GPL(st_thermal_unregister); 276EXPORT_SYMBOL_GPL(st_thermal_unregister);
277 277
278#ifdef CONFIG_PM_SLEEP
278static int st_thermal_suspend(struct device *dev) 279static int st_thermal_suspend(struct device *dev)
279{ 280{
280 struct platform_device *pdev = to_platform_device(dev); 281 struct platform_device *pdev = to_platform_device(dev);
@@ -305,6 +306,8 @@ static int st_thermal_resume(struct device *dev)
305 306
306 return 0; 307 return 0;
307} 308}
309#endif
310
308SIMPLE_DEV_PM_OPS(st_thermal_pm_ops, st_thermal_suspend, st_thermal_resume); 311SIMPLE_DEV_PM_OPS(st_thermal_pm_ops, st_thermal_suspend, st_thermal_resume);
309EXPORT_SYMBOL_GPL(st_thermal_pm_ops); 312EXPORT_SYMBOL_GPL(st_thermal_pm_ops);
310 313
diff --git a/drivers/thermal/step_wise.c b/drivers/thermal/step_wise.c
index f251521baaa2..fdd1f523a1ed 100644
--- a/drivers/thermal/step_wise.c
+++ b/drivers/thermal/step_wise.c
@@ -23,6 +23,7 @@
23 */ 23 */
24 24
25#include <linux/thermal.h> 25#include <linux/thermal.h>
26#include <trace/events/thermal.h>
26 27
27#include "thermal_core.h" 28#include "thermal_core.h"
28 29
@@ -76,7 +77,7 @@ static unsigned long get_target_state(struct thermal_instance *instance,
76 next_target = instance->upper; 77 next_target = instance->upper;
77 break; 78 break;
78 case THERMAL_TREND_DROPPING: 79 case THERMAL_TREND_DROPPING:
79 if (cur_state == instance->lower) { 80 if (cur_state <= instance->lower) {
80 if (!throttle) 81 if (!throttle)
81 next_target = THERMAL_NO_TARGET; 82 next_target = THERMAL_NO_TARGET;
82 } else { 83 } else {
@@ -129,8 +130,10 @@ static void thermal_zone_trip_update(struct thermal_zone_device *tz, int trip)
129 130
130 trend = get_tz_trend(tz, trip); 131 trend = get_tz_trend(tz, trip);
131 132
132 if (tz->temperature >= trip_temp) 133 if (tz->temperature >= trip_temp) {
133 throttle = true; 134 throttle = true;
135 trace_thermal_zone_trip(tz, trip, trip_type);
136 }
134 137
135 dev_dbg(&tz->device, "Trip%d[type=%d,temp=%ld]:trend=%d,throttle=%d\n", 138 dev_dbg(&tz->device, "Trip%d[type=%d,temp=%ld]:trend=%d,throttle=%d\n",
136 trip, trip_type, trip_temp, trend, throttle); 139 trip, trip_type, trip_temp, trend, throttle);
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 1e23f4f8d2c2..43b90709585f 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -38,6 +38,9 @@
38#include <net/netlink.h> 38#include <net/netlink.h>
39#include <net/genetlink.h> 39#include <net/genetlink.h>
40 40
41#define CREATE_TRACE_POINTS
42#include <trace/events/thermal.h>
43
41#include "thermal_core.h" 44#include "thermal_core.h"
42#include "thermal_hwmon.h" 45#include "thermal_hwmon.h"
43 46
@@ -368,6 +371,8 @@ static void handle_critical_trips(struct thermal_zone_device *tz,
368 if (tz->temperature < trip_temp) 371 if (tz->temperature < trip_temp)
369 return; 372 return;
370 373
374 trace_thermal_zone_trip(tz, trip, trip_type);
375
371 if (tz->ops->notify) 376 if (tz->ops->notify)
372 tz->ops->notify(tz, trip, trip_type); 377 tz->ops->notify(tz, trip, trip_type);
373 378
@@ -463,6 +468,7 @@ static void update_temperature(struct thermal_zone_device *tz)
463 tz->temperature = temp; 468 tz->temperature = temp;
464 mutex_unlock(&tz->lock); 469 mutex_unlock(&tz->lock);
465 470
471 trace_thermal_temperature(tz);
466 dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n", 472 dev_dbg(&tz->device, "last_temperature=%d, current_temperature=%d\n",
467 tz->last_temperature, tz->temperature); 473 tz->last_temperature, tz->temperature);
468} 474}
@@ -1287,6 +1293,7 @@ void thermal_cdev_update(struct thermal_cooling_device *cdev)
1287 mutex_unlock(&cdev->lock); 1293 mutex_unlock(&cdev->lock);
1288 cdev->ops->set_cur_state(cdev, target); 1294 cdev->ops->set_cur_state(cdev, target);
1289 cdev->updated = true; 1295 cdev->updated = true;
1296 trace_cdev_update(cdev, target);
1290 dev_dbg(&cdev->device, "set to state %lu\n", target); 1297 dev_dbg(&cdev->device, "set to state %lu\n", target);
1291} 1298}
1292EXPORT_SYMBOL(thermal_cdev_update); 1299EXPORT_SYMBOL(thermal_cdev_update);
@@ -1568,8 +1575,7 @@ struct thermal_zone_device *thermal_zone_device_register(const char *type,
1568 1575
1569 thermal_zone_device_update(tz); 1576 thermal_zone_device_update(tz);
1570 1577
1571 if (!result) 1578 return tz;
1572 return tz;
1573 1579
1574unregister: 1580unregister:
1575 release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id); 1581 release_idr(&thermal_tz_idr, &thermal_idr_lock, tz->id);
@@ -1790,6 +1796,10 @@ static int __init thermal_register_governors(void)
1790 if (result) 1796 if (result)
1791 return result; 1797 return result;
1792 1798
1799 result = thermal_gov_bang_bang_register();
1800 if (result)
1801 return result;
1802
1793 return thermal_gov_user_space_register(); 1803 return thermal_gov_user_space_register();
1794} 1804}
1795 1805
@@ -1797,6 +1807,7 @@ static void thermal_unregister_governors(void)
1797{ 1807{
1798 thermal_gov_step_wise_unregister(); 1808 thermal_gov_step_wise_unregister();
1799 thermal_gov_fair_share_unregister(); 1809 thermal_gov_fair_share_unregister();
1810 thermal_gov_bang_bang_unregister();
1800 thermal_gov_user_space_unregister(); 1811 thermal_gov_user_space_unregister();
1801} 1812}
1802 1813
diff --git a/drivers/thermal/thermal_core.h b/drivers/thermal/thermal_core.h
index 3db339fb636f..d15d243de27a 100644
--- a/drivers/thermal/thermal_core.h
+++ b/drivers/thermal/thermal_core.h
@@ -69,6 +69,14 @@ static inline int thermal_gov_fair_share_register(void) { return 0; }
69static inline void thermal_gov_fair_share_unregister(void) {} 69static inline void thermal_gov_fair_share_unregister(void) {}
70#endif /* CONFIG_THERMAL_GOV_FAIR_SHARE */ 70#endif /* CONFIG_THERMAL_GOV_FAIR_SHARE */
71 71
72#ifdef CONFIG_THERMAL_GOV_BANG_BANG
73int thermal_gov_bang_bang_register(void);
74void thermal_gov_bang_bang_unregister(void);
75#else
76static inline int thermal_gov_bang_bang_register(void) { return 0; }
77static inline void thermal_gov_bang_bang_unregister(void) {}
78#endif /* CONFIG_THERMAL_GOV_BANG_BANG */
79
72#ifdef CONFIG_THERMAL_GOV_USER_SPACE 80#ifdef CONFIG_THERMAL_GOV_USER_SPACE
73int thermal_gov_user_space_register(void); 81int thermal_gov_user_space_register(void);
74void thermal_gov_user_space_unregister(void); 82void thermal_gov_user_space_unregister(void);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 89c4cee253e3..2e900a98c3e3 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -2413,12 +2413,17 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
2413 2413
2414 poll_wait(file, &tty->read_wait, wait); 2414 poll_wait(file, &tty->read_wait, wait);
2415 poll_wait(file, &tty->write_wait, wait); 2415 poll_wait(file, &tty->write_wait, wait);
2416 if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
2417 mask |= POLLHUP;
2416 if (input_available_p(tty, 1)) 2418 if (input_available_p(tty, 1))
2417 mask |= POLLIN | POLLRDNORM; 2419 mask |= POLLIN | POLLRDNORM;
2420 else if (mask & POLLHUP) {
2421 tty_flush_to_ldisc(tty);
2422 if (input_available_p(tty, 1))
2423 mask |= POLLIN | POLLRDNORM;
2424 }
2418 if (tty->packet && tty->link->ctrl_status) 2425 if (tty->packet && tty->link->ctrl_status)
2419 mask |= POLLPRI | POLLIN | POLLRDNORM; 2426 mask |= POLLPRI | POLLIN | POLLRDNORM;
2420 if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
2421 mask |= POLLHUP;
2422 if (tty_hung_up_p(file)) 2427 if (tty_hung_up_p(file))
2423 mask |= POLLHUP; 2428 mask |= POLLHUP;
2424 if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) { 2429 if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
diff --git a/drivers/tty/serial/8250/8250_mtk.c b/drivers/tty/serial/8250/8250_mtk.c
index 8f37d57165ec..de7aae523b37 100644
--- a/drivers/tty/serial/8250/8250_mtk.c
+++ b/drivers/tty/serial/8250/8250_mtk.c
@@ -81,7 +81,7 @@ mtk8250_set_termios(struct uart_port *port, struct ktermios *termios,
81 /* Set to highest baudrate supported */ 81 /* Set to highest baudrate supported */
82 if (baud >= 1152000) 82 if (baud >= 1152000)
83 baud = 921600; 83 baud = 921600;
84 quot = DIV_ROUND_CLOSEST(port->uartclk, 256 * baud); 84 quot = (port->uartclk / (256 * baud)) + 1;
85 } 85 }
86 86
87 /* 87 /*
diff --git a/drivers/tty/serial/of_serial.c b/drivers/tty/serial/of_serial.c
index 8bc2563335ae..bf355050eab6 100644
--- a/drivers/tty/serial/of_serial.c
+++ b/drivers/tty/serial/of_serial.c
@@ -158,7 +158,7 @@ static int of_platform_serial_probe(struct platform_device *ofdev)
158 if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL)) 158 if (of_find_property(ofdev->dev.of_node, "used-by-rtas", NULL))
159 return -EBUSY; 159 return -EBUSY;
160 160
161 info = kmalloc(sizeof(*info), GFP_KERNEL); 161 info = kzalloc(sizeof(*info), GFP_KERNEL);
162 if (info == NULL) 162 if (info == NULL)
163 return -ENOMEM; 163 return -ENOMEM;
164 164
@@ -240,32 +240,6 @@ static int of_platform_serial_remove(struct platform_device *ofdev)
240 return 0; 240 return 0;
241} 241}
242 242
243#ifdef CONFIG_PM_SLEEP
244static int of_serial_suspend(struct device *dev)
245{
246 struct of_serial_info *info = dev_get_drvdata(dev);
247
248 serial8250_suspend_port(info->line);
249 if (info->clk)
250 clk_disable_unprepare(info->clk);
251
252 return 0;
253}
254
255static int of_serial_resume(struct device *dev)
256{
257 struct of_serial_info *info = dev_get_drvdata(dev);
258
259 if (info->clk)
260 clk_prepare_enable(info->clk);
261
262 serial8250_resume_port(info->line);
263
264 return 0;
265}
266#endif
267static SIMPLE_DEV_PM_OPS(of_serial_pm_ops, of_serial_suspend, of_serial_resume);
268
269/* 243/*
270 * A few common types, add more as needed. 244 * A few common types, add more as needed.
271 */ 245 */
@@ -297,7 +271,6 @@ static struct platform_driver of_platform_serial_driver = {
297 .name = "of_serial", 271 .name = "of_serial",
298 .owner = THIS_MODULE, 272 .owner = THIS_MODULE,
299 .of_match_table = of_platform_serial_table, 273 .of_match_table = of_platform_serial_table,
300 .pm = &of_serial_pm_ops,
301 }, 274 },
302 .probe = of_platform_serial_probe, 275 .probe = of_platform_serial_probe,
303 .remove = of_platform_serial_remove, 276 .remove = of_platform_serial_remove,
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index df3a8c74358e..eaeb9a02c7fe 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -363,7 +363,7 @@ uart_get_baud_rate(struct uart_port *port, struct ktermios *termios,
363 * The spd_hi, spd_vhi, spd_shi, spd_warp kludge... 363 * The spd_hi, spd_vhi, spd_shi, spd_warp kludge...
364 * Die! Die! Die! 364 * Die! Die! Die!
365 */ 365 */
366 if (baud == 38400) 366 if (try == 0 && baud == 38400)
367 baud = altbaud; 367 baud = altbaud;
368 368
369 /* 369 /*
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 16a2c0237dd6..0508a1d8e4cd 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1709,6 +1709,8 @@ int tty_release(struct inode *inode, struct file *filp)
1709 int pty_master, tty_closing, o_tty_closing, do_sleep; 1709 int pty_master, tty_closing, o_tty_closing, do_sleep;
1710 int idx; 1710 int idx;
1711 char buf[64]; 1711 char buf[64];
1712 long timeout = 0;
1713 int once = 1;
1712 1714
1713 if (tty_paranoia_check(tty, inode, __func__)) 1715 if (tty_paranoia_check(tty, inode, __func__))
1714 return 0; 1716 return 0;
@@ -1789,11 +1791,18 @@ int tty_release(struct inode *inode, struct file *filp)
1789 if (!do_sleep) 1791 if (!do_sleep)
1790 break; 1792 break;
1791 1793
1792 printk(KERN_WARNING "%s: %s: read/write wait queue active!\n", 1794 if (once) {
1793 __func__, tty_name(tty, buf)); 1795 once = 0;
1796 printk(KERN_WARNING "%s: %s: read/write wait queue active!\n",
1797 __func__, tty_name(tty, buf));
1798 }
1794 tty_unlock_pair(tty, o_tty); 1799 tty_unlock_pair(tty, o_tty);
1795 mutex_unlock(&tty_mutex); 1800 mutex_unlock(&tty_mutex);
1796 schedule(); 1801 schedule_timeout_killable(timeout);
1802 if (timeout < 120 * HZ)
1803 timeout = 2 * timeout + 1;
1804 else
1805 timeout = MAX_SCHEDULE_TIMEOUT;
1797 } 1806 }
1798 1807
1799 /* 1808 /*
diff --git a/drivers/tty/vt/consolemap.c b/drivers/tty/vt/consolemap.c
index 610b720d3b91..59b25e039968 100644
--- a/drivers/tty/vt/consolemap.c
+++ b/drivers/tty/vt/consolemap.c
@@ -539,6 +539,12 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
539 539
540 /* Save original vc_unipagdir_loc in case we allocate a new one */ 540 /* Save original vc_unipagdir_loc in case we allocate a new one */
541 p = *vc->vc_uni_pagedir_loc; 541 p = *vc->vc_uni_pagedir_loc;
542
543 if (!p) {
544 err = -EINVAL;
545
546 goto out_unlock;
547 }
542 548
543 if (p->refcount > 1) { 549 if (p->refcount > 1) {
544 int j, k; 550 int j, k;
@@ -623,6 +629,7 @@ int con_set_unimap(struct vc_data *vc, ushort ct, struct unipair __user *list)
623 set_inverse_transl(vc, p, i); /* Update inverse translations */ 629 set_inverse_transl(vc, p, i); /* Update inverse translations */
624 set_inverse_trans_unicode(vc, p); 630 set_inverse_trans_unicode(vc, p);
625 631
632out_unlock:
626 console_unlock(); 633 console_unlock();
627 return err; 634 return err;
628} 635}
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index a673e5b6a2e0..60fa6278fbce 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -28,18 +28,6 @@
28 28
29#define UIO_MAX_DEVICES (1U << MINORBITS) 29#define UIO_MAX_DEVICES (1U << MINORBITS)
30 30
31struct uio_device {
32 struct module *owner;
33 struct device *dev;
34 int minor;
35 atomic_t event;
36 struct fasync_struct *async_queue;
37 wait_queue_head_t wait;
38 struct uio_info *info;
39 struct kobject *map_dir;
40 struct kobject *portio_dir;
41};
42
43static int uio_major; 31static int uio_major;
44static struct cdev *uio_cdev; 32static struct cdev *uio_cdev;
45static DEFINE_IDR(uio_idr); 33static DEFINE_IDR(uio_idr);
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 3df5005c554d..9bdc6bd73432 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -742,7 +742,6 @@ static int ci_hdrc_remove(struct platform_device *pdev)
742 ci_role_destroy(ci); 742 ci_role_destroy(ci);
743 ci_hdrc_enter_lpm(ci, true); 743 ci_hdrc_enter_lpm(ci, true);
744 usb_phy_shutdown(ci->transceiver); 744 usb_phy_shutdown(ci->transceiver);
745 kfree(ci->hw_bank.regmap);
746 745
747 return 0; 746 return 0;
748} 747}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index e934e19f49f5..077d58ac3dcb 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -60,6 +60,9 @@ static struct acm *acm_table[ACM_TTY_MINORS];
60 60
61static DEFINE_MUTEX(acm_table_lock); 61static DEFINE_MUTEX(acm_table_lock);
62 62
63static void acm_tty_set_termios(struct tty_struct *tty,
64 struct ktermios *termios_old);
65
63/* 66/*
64 * acm_table accessors 67 * acm_table accessors
65 */ 68 */
@@ -145,8 +148,15 @@ static int acm_ctrl_msg(struct acm *acm, int request, int value,
145/* devices aren't required to support these requests. 148/* devices aren't required to support these requests.
146 * the cdc acm descriptor tells whether they do... 149 * the cdc acm descriptor tells whether they do...
147 */ 150 */
148#define acm_set_control(acm, control) \ 151static inline int acm_set_control(struct acm *acm, int control)
149 acm_ctrl_msg(acm, USB_CDC_REQ_SET_CONTROL_LINE_STATE, control, NULL, 0) 152{
153 if (acm->quirks & QUIRK_CONTROL_LINE_STATE)
154 return -EOPNOTSUPP;
155
156 return acm_ctrl_msg(acm, USB_CDC_REQ_SET_CONTROL_LINE_STATE,
157 control, NULL, 0);
158}
159
150#define acm_set_line(acm, line) \ 160#define acm_set_line(acm, line) \
151 acm_ctrl_msg(acm, USB_CDC_REQ_SET_LINE_CODING, 0, line, sizeof *(line)) 161 acm_ctrl_msg(acm, USB_CDC_REQ_SET_LINE_CODING, 0, line, sizeof *(line))
152#define acm_send_break(acm, ms) \ 162#define acm_send_break(acm, ms) \
@@ -554,6 +564,8 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
554 goto error_submit_urb; 564 goto error_submit_urb;
555 } 565 }
556 566
567 acm_tty_set_termios(tty, NULL);
568
557 /* 569 /*
558 * Unthrottle device in case the TTY was closed while throttled. 570 * Unthrottle device in case the TTY was closed while throttled.
559 */ 571 */
@@ -980,11 +992,12 @@ static void acm_tty_set_termios(struct tty_struct *tty,
980 /* FIXME: Needs to clear unsupported bits in the termios */ 992 /* FIXME: Needs to clear unsupported bits in the termios */
981 acm->clocal = ((termios->c_cflag & CLOCAL) != 0); 993 acm->clocal = ((termios->c_cflag & CLOCAL) != 0);
982 994
983 if (!newline.dwDTERate) { 995 if (C_BAUD(tty) == B0) {
984 newline.dwDTERate = acm->line.dwDTERate; 996 newline.dwDTERate = acm->line.dwDTERate;
985 newctrl &= ~ACM_CTRL_DTR; 997 newctrl &= ~ACM_CTRL_DTR;
986 } else 998 } else if (termios_old && (termios_old->c_cflag & CBAUD) == B0) {
987 newctrl |= ACM_CTRL_DTR; 999 newctrl |= ACM_CTRL_DTR;
1000 }
988 1001
989 if (newctrl != acm->ctrlout) 1002 if (newctrl != acm->ctrlout)
990 acm_set_control(acm, acm->ctrlout = newctrl); 1003 acm_set_control(acm, acm->ctrlout = newctrl);
@@ -1314,6 +1327,7 @@ made_compressed_probe:
1314 tty_port_init(&acm->port); 1327 tty_port_init(&acm->port);
1315 acm->port.ops = &acm_port_ops; 1328 acm->port.ops = &acm_port_ops;
1316 init_usb_anchor(&acm->delayed); 1329 init_usb_anchor(&acm->delayed);
1330 acm->quirks = quirks;
1317 1331
1318 buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma); 1332 buf = usb_alloc_coherent(usb_dev, ctrlsize, GFP_KERNEL, &acm->ctrl_dma);
1319 if (!buf) { 1333 if (!buf) {
@@ -1681,6 +1695,9 @@ static const struct usb_device_id acm_ids[] = {
1681 { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */ 1695 { USB_DEVICE(0x0572, 0x1328), /* Shiro / Aztech USB MODEM UM-3100 */
1682 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */ 1696 .driver_info = NO_UNION_NORMAL, /* has no union descriptor */
1683 }, 1697 },
1698 { USB_DEVICE(0x20df, 0x0001), /* Simtec Electronics Entropy Key */
1699 .driver_info = QUIRK_CONTROL_LINE_STATE, },
1700 { USB_DEVICE(0x2184, 0x001c) }, /* GW Instek AFG-2225 */
1684 { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */ 1701 { USB_DEVICE(0x22b8, 0x6425), /* Motorola MOTOMAGX phones */
1685 }, 1702 },
1686 /* Motorola H24 HSPA module: */ 1703 /* Motorola H24 HSPA module: */
diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h
index fc75651afe1c..d3251ebd09e2 100644
--- a/drivers/usb/class/cdc-acm.h
+++ b/drivers/usb/class/cdc-acm.h
@@ -121,6 +121,7 @@ struct acm {
121 unsigned int throttle_req:1; /* throttle requested */ 121 unsigned int throttle_req:1; /* throttle requested */
122 u8 bInterval; 122 u8 bInterval;
123 struct usb_anchor delayed; /* writes queued for a device about to be woken */ 123 struct usb_anchor delayed; /* writes queued for a device about to be woken */
124 unsigned long quirks;
124}; 125};
125 126
126#define CDC_DATA_INTERFACE_TYPE 0x0a 127#define CDC_DATA_INTERFACE_TYPE 0x0a
@@ -132,3 +133,4 @@ struct acm {
132#define NOT_A_MODEM BIT(3) 133#define NOT_A_MODEM BIT(3)
133#define NO_DATA_INTERFACE BIT(4) 134#define NO_DATA_INTERFACE BIT(4)
134#define IGNORE_DEVICE BIT(5) 135#define IGNORE_DEVICE BIT(5)
136#define QUIRK_CONTROL_LINE_STATE BIT(6)
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index b84fb141e122..a6efb4184f2b 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -2060,6 +2060,8 @@ int usb_alloc_streams(struct usb_interface *interface,
2060 return -EINVAL; 2060 return -EINVAL;
2061 if (dev->speed != USB_SPEED_SUPER) 2061 if (dev->speed != USB_SPEED_SUPER)
2062 return -EINVAL; 2062 return -EINVAL;
2063 if (dev->state < USB_STATE_CONFIGURED)
2064 return -ENODEV;
2063 2065
2064 for (i = 0; i < num_eps; i++) { 2066 for (i = 0; i < num_eps; i++) {
2065 /* Streams only apply to bulk endpoints. */ 2067 /* Streams only apply to bulk endpoints. */
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 11e80ac31324..b649fef2e35d 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -4468,9 +4468,6 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1,
4468 if (retval) 4468 if (retval)
4469 goto fail; 4469 goto fail;
4470 4470
4471 if (hcd->usb_phy && !hdev->parent)
4472 usb_phy_notify_connect(hcd->usb_phy, udev->speed);
4473
4474 /* 4471 /*
4475 * Some superspeed devices have finished the link training process 4472 * Some superspeed devices have finished the link training process
4476 * and attached to a superspeed hub port, but the device descriptor 4473 * and attached to a superspeed hub port, but the device descriptor
@@ -4627,8 +4624,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
4627 4624
4628 /* Disconnect any existing devices under this port */ 4625 /* Disconnect any existing devices under this port */
4629 if (udev) { 4626 if (udev) {
4630 if (hcd->usb_phy && !hdev->parent && 4627 if (hcd->usb_phy && !hdev->parent)
4631 !(portstatus & USB_PORT_STAT_CONNECTION))
4632 usb_phy_notify_disconnect(hcd->usb_phy, udev->speed); 4628 usb_phy_notify_disconnect(hcd->usb_phy, udev->speed);
4633 usb_disconnect(&port_dev->child); 4629 usb_disconnect(&port_dev->child);
4634 } 4630 }
@@ -4783,6 +4779,10 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus,
4783 port_dev->child = NULL; 4779 port_dev->child = NULL;
4784 spin_unlock_irq(&device_state_lock); 4780 spin_unlock_irq(&device_state_lock);
4785 mutex_unlock(&usb_port_peer_mutex); 4781 mutex_unlock(&usb_port_peer_mutex);
4782 } else {
4783 if (hcd->usb_phy && !hdev->parent)
4784 usb_phy_notify_connect(hcd->usb_phy,
4785 udev->speed);
4786 } 4786 }
4787 } 4787 }
4788 4788
diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c
index 5ae883dc21f5..96fafed92b76 100644
--- a/drivers/usb/core/quirks.c
+++ b/drivers/usb/core/quirks.c
@@ -44,6 +44,9 @@ static const struct usb_device_id usb_quirk_list[] = {
44 /* Creative SB Audigy 2 NX */ 44 /* Creative SB Audigy 2 NX */
45 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME }, 45 { USB_DEVICE(0x041e, 0x3020), .driver_info = USB_QUIRK_RESET_RESUME },
46 46
47 /* Microsoft Wireless Laser Mouse 6000 Receiver */
48 { USB_DEVICE(0x045e, 0x00e1), .driver_info = USB_QUIRK_RESET_RESUME },
49
47 /* Microsoft LifeCam-VX700 v2.0 */ 50 /* Microsoft LifeCam-VX700 v2.0 */
48 { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME }, 51 { USB_DEVICE(0x045e, 0x0770), .driver_info = USB_QUIRK_RESET_RESUME },
49 52
@@ -97,6 +100,12 @@ static const struct usb_device_id usb_quirk_list[] = {
97 { USB_DEVICE(0x04f3, 0x0089), .driver_info = 100 { USB_DEVICE(0x04f3, 0x0089), .driver_info =
98 USB_QUIRK_DEVICE_QUALIFIER }, 101 USB_QUIRK_DEVICE_QUALIFIER },
99 102
103 { USB_DEVICE(0x04f3, 0x009b), .driver_info =
104 USB_QUIRK_DEVICE_QUALIFIER },
105
106 { USB_DEVICE(0x04f3, 0x016f), .driver_info =
107 USB_QUIRK_DEVICE_QUALIFIER },
108
100 /* Roland SC-8820 */ 109 /* Roland SC-8820 */
101 { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME }, 110 { USB_DEVICE(0x0582, 0x0007), .driver_info = USB_QUIRK_RESET_RESUME },
102 111
diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h
index bf015ab3b44c..55c90c53f2d6 100644
--- a/drivers/usb/dwc2/core.h
+++ b/drivers/usb/dwc2/core.h
@@ -619,7 +619,7 @@ struct dwc2_hsotg {
619 unsigned port_suspend_change:1; 619 unsigned port_suspend_change:1;
620 unsigned port_over_current_change:1; 620 unsigned port_over_current_change:1;
621 unsigned port_l1_change:1; 621 unsigned port_l1_change:1;
622 unsigned reserved:26; 622 unsigned reserved:25;
623 } b; 623 } b;
624 } flags; 624 } flags;
625 625
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 7b5856fadd93..8b5c079c7b7d 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -2327,7 +2327,7 @@ irq_retry:
2327 2327
2328 u32 usb_status = readl(hsotg->regs + GOTGCTL); 2328 u32 usb_status = readl(hsotg->regs + GOTGCTL);
2329 2329
2330 dev_info(hsotg->dev, "%s: USBRst\n", __func__); 2330 dev_dbg(hsotg->dev, "%s: USBRst\n", __func__);
2331 dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n", 2331 dev_dbg(hsotg->dev, "GNPTXSTS=%08x\n",
2332 readl(hsotg->regs + GNPTXSTS)); 2332 readl(hsotg->regs + GNPTXSTS));
2333 2333
@@ -2561,8 +2561,10 @@ static int s3c_hsotg_ep_enable(struct usb_ep *ep,
2561 hs_ep->fifo_size = val; 2561 hs_ep->fifo_size = val;
2562 break; 2562 break;
2563 } 2563 }
2564 if (i == 8) 2564 if (i == 8) {
2565 return -ENOMEM; 2565 ret = -ENOMEM;
2566 goto error;
2567 }
2566 } 2568 }
2567 2569
2568 /* for non control endpoints, set PID to D0 */ 2570 /* for non control endpoints, set PID to D0 */
@@ -2579,6 +2581,7 @@ static int s3c_hsotg_ep_enable(struct usb_ep *ep,
2579 /* enable the endpoint interrupt */ 2581 /* enable the endpoint interrupt */
2580 s3c_hsotg_ctrl_epint(hsotg, index, dir_in, 1); 2582 s3c_hsotg_ctrl_epint(hsotg, index, dir_in, 1);
2581 2583
2584error:
2582 spin_unlock_irqrestore(&hsotg->lock, flags); 2585 spin_unlock_irqrestore(&hsotg->lock, flags);
2583 return ret; 2586 return ret;
2584} 2587}
@@ -2934,9 +2937,7 @@ static int s3c_hsotg_udc_stop(struct usb_gadget *gadget,
2934 2937
2935 spin_lock_irqsave(&hsotg->lock, flags); 2938 spin_lock_irqsave(&hsotg->lock, flags);
2936 2939
2937 if (!driver) 2940 hsotg->driver = NULL;
2938 hsotg->driver = NULL;
2939
2940 hsotg->gadget.speed = USB_SPEED_UNKNOWN; 2941 hsotg->gadget.speed = USB_SPEED_UNKNOWN;
2941 2942
2942 spin_unlock_irqrestore(&hsotg->lock, flags); 2943 spin_unlock_irqrestore(&hsotg->lock, flags);
@@ -3567,6 +3568,7 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
3567 s3c_hsotg_initep(hsotg, &hsotg->eps[epnum], epnum); 3568 s3c_hsotg_initep(hsotg, &hsotg->eps[epnum], epnum);
3568 3569
3569 /* disable power and clock */ 3570 /* disable power and clock */
3571 s3c_hsotg_phy_disable(hsotg);
3570 3572
3571 ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), 3573 ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies),
3572 hsotg->supplies); 3574 hsotg->supplies);
@@ -3575,8 +3577,6 @@ static int s3c_hsotg_probe(struct platform_device *pdev)
3575 goto err_ep_mem; 3577 goto err_ep_mem;
3576 } 3578 }
3577 3579
3578 s3c_hsotg_phy_disable(hsotg);
3579
3580 ret = usb_add_gadget_udc(&pdev->dev, &hsotg->gadget); 3580 ret = usb_add_gadget_udc(&pdev->dev, &hsotg->gadget);
3581 if (ret) 3581 if (ret)
3582 goto err_ep_mem; 3582 goto err_ep_mem;
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 2f537d588225..a0aa9f3da441 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -597,7 +597,7 @@ static int dwc3_omap_prepare(struct device *dev)
597{ 597{
598 struct dwc3_omap *omap = dev_get_drvdata(dev); 598 struct dwc3_omap *omap = dev_get_drvdata(dev);
599 599
600 dwc3_omap_write_irqmisc_set(omap, 0x00); 600 dwc3_omap_disable_irqs(omap);
601 601
602 return 0; 602 return 0;
603} 603}
@@ -605,19 +605,8 @@ static int dwc3_omap_prepare(struct device *dev)
605static void dwc3_omap_complete(struct device *dev) 605static void dwc3_omap_complete(struct device *dev)
606{ 606{
607 struct dwc3_omap *omap = dev_get_drvdata(dev); 607 struct dwc3_omap *omap = dev_get_drvdata(dev);
608 u32 reg;
609 608
610 reg = (USBOTGSS_IRQMISC_OEVT | 609 dwc3_omap_enable_irqs(omap);
611 USBOTGSS_IRQMISC_DRVVBUS_RISE |
612 USBOTGSS_IRQMISC_CHRGVBUS_RISE |
613 USBOTGSS_IRQMISC_DISCHRGVBUS_RISE |
614 USBOTGSS_IRQMISC_IDPULLUP_RISE |
615 USBOTGSS_IRQMISC_DRVVBUS_FALL |
616 USBOTGSS_IRQMISC_CHRGVBUS_FALL |
617 USBOTGSS_IRQMISC_DISCHRGVBUS_FALL |
618 USBOTGSS_IRQMISC_IDPULLUP_FALL);
619
620 dwc3_omap_write_irqmisc_set(omap, reg);
621} 610}
622 611
623static int dwc3_omap_suspend(struct device *dev) 612static int dwc3_omap_suspend(struct device *dev)
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index 436fb08c40b8..a36cf66302fb 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -30,6 +30,7 @@
30#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd 30#define PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3 0xabcd
31#define PCI_DEVICE_ID_INTEL_BYT 0x0f37 31#define PCI_DEVICE_ID_INTEL_BYT 0x0f37
32#define PCI_DEVICE_ID_INTEL_MRFLD 0x119e 32#define PCI_DEVICE_ID_INTEL_MRFLD 0x119e
33#define PCI_DEVICE_ID_INTEL_BSW 0x22B7
33 34
34struct dwc3_pci { 35struct dwc3_pci {
35 struct device *dev; 36 struct device *dev;
@@ -181,6 +182,7 @@ static const struct pci_device_id dwc3_pci_id_table[] = {
181 PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS, 182 PCI_DEVICE(PCI_VENDOR_ID_SYNOPSYS,
182 PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3), 183 PCI_DEVICE_ID_SYNOPSYS_HAPSUSB3),
183 }, 184 },
185 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BSW), },
184 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), }, 186 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BYT), },
185 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), }, 187 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_MRFLD), },
186 { } /* Terminating Entry */ 188 { } /* Terminating Entry */
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index b35938777dde..df38e7ef4976 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -256,7 +256,7 @@ static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
256 256
257 /* stall is always issued on EP0 */ 257 /* stall is always issued on EP0 */
258 dep = dwc->eps[0]; 258 dep = dwc->eps[0];
259 __dwc3_gadget_ep_set_halt(dep, 1); 259 __dwc3_gadget_ep_set_halt(dep, 1, false);
260 dep->flags = DWC3_EP_ENABLED; 260 dep->flags = DWC3_EP_ENABLED;
261 dwc->delayed_status = false; 261 dwc->delayed_status = false;
262 262
@@ -271,7 +271,7 @@ static void dwc3_ep0_stall_and_restart(struct dwc3 *dwc)
271 dwc3_ep0_out_start(dwc); 271 dwc3_ep0_out_start(dwc);
272} 272}
273 273
274int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value) 274int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
275{ 275{
276 struct dwc3_ep *dep = to_dwc3_ep(ep); 276 struct dwc3_ep *dep = to_dwc3_ep(ep);
277 struct dwc3 *dwc = dep->dwc; 277 struct dwc3 *dwc = dep->dwc;
@@ -281,6 +281,20 @@ int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
281 return 0; 281 return 0;
282} 282}
283 283
284int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value)
285{
286 struct dwc3_ep *dep = to_dwc3_ep(ep);
287 struct dwc3 *dwc = dep->dwc;
288 unsigned long flags;
289 int ret;
290
291 spin_lock_irqsave(&dwc->lock, flags);
292 ret = __dwc3_gadget_ep0_set_halt(ep, value);
293 spin_unlock_irqrestore(&dwc->lock, flags);
294
295 return ret;
296}
297
284void dwc3_ep0_out_start(struct dwc3 *dwc) 298void dwc3_ep0_out_start(struct dwc3 *dwc)
285{ 299{
286 int ret; 300 int ret;
@@ -466,7 +480,7 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc,
466 return -EINVAL; 480 return -EINVAL;
467 if (set == 0 && (dep->flags & DWC3_EP_WEDGE)) 481 if (set == 0 && (dep->flags & DWC3_EP_WEDGE))
468 break; 482 break;
469 ret = __dwc3_gadget_ep_set_halt(dep, set); 483 ret = __dwc3_gadget_ep_set_halt(dep, set, true);
470 if (ret) 484 if (ret)
471 return -EINVAL; 485 return -EINVAL;
472 break; 486 break;
@@ -775,11 +789,12 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
775 789
776 dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS; 790 dwc->ep0_next_event = DWC3_EP0_NRDY_STATUS;
777 791
778 r = next_request(&ep0->request_list);
779 ur = &r->request;
780
781 trb = dwc->ep0_trb; 792 trb = dwc->ep0_trb;
782 793
794 r = next_request(&ep0->request_list);
795 if (!r)
796 return;
797
783 status = DWC3_TRB_SIZE_TRBSTS(trb->size); 798 status = DWC3_TRB_SIZE_TRBSTS(trb->size);
784 if (status == DWC3_TRBSTS_SETUP_PENDING) { 799 if (status == DWC3_TRBSTS_SETUP_PENDING) {
785 dwc3_trace(trace_dwc3_ep0, "Setup Pending received"); 800 dwc3_trace(trace_dwc3_ep0, "Setup Pending received");
@@ -790,6 +805,8 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
790 return; 805 return;
791 } 806 }
792 807
808 ur = &r->request;
809
793 length = trb->size & DWC3_TRB_SIZE_MASK; 810 length = trb->size & DWC3_TRB_SIZE_MASK;
794 811
795 if (dwc->ep0_bounced) { 812 if (dwc->ep0_bounced) {
@@ -811,12 +828,19 @@ static void dwc3_ep0_complete_data(struct dwc3 *dwc,
811 828
812 dwc3_ep0_stall_and_restart(dwc); 829 dwc3_ep0_stall_and_restart(dwc);
813 } else { 830 } else {
814 /* 831 dwc3_gadget_giveback(ep0, r, 0);
815 * handle the case where we have to send a zero packet. This 832
816 * seems to be case when req.length > maxpacket. Could it be? 833 if (IS_ALIGNED(ur->length, ep0->endpoint.maxpacket) &&
817 */ 834 ur->length && ur->zero) {
818 if (r) 835 int ret;
819 dwc3_gadget_giveback(ep0, r, 0); 836
837 dwc->ep0_next_event = DWC3_EP0_COMPLETE;
838
839 ret = dwc3_ep0_start_trans(dwc, epnum,
840 dwc->ctrl_req_addr, 0,
841 DWC3_TRBCTL_CONTROL_DATA);
842 WARN_ON(ret < 0);
843 }
820 } 844 }
821} 845}
822 846
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 3818b26bfc05..546ea5431b8c 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -525,12 +525,11 @@ static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
525 if (!usb_endpoint_xfer_isoc(desc)) 525 if (!usb_endpoint_xfer_isoc(desc))
526 return 0; 526 return 0;
527 527
528 memset(&trb_link, 0, sizeof(trb_link));
529
530 /* Link TRB for ISOC. The HWO bit is never reset */ 528 /* Link TRB for ISOC. The HWO bit is never reset */
531 trb_st_hw = &dep->trb_pool[0]; 529 trb_st_hw = &dep->trb_pool[0];
532 530
533 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1]; 531 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
532 memset(trb_link, 0, sizeof(*trb_link));
534 533
535 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 534 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
536 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw)); 535 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
@@ -581,7 +580,7 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
581 580
582 /* make sure HW endpoint isn't stalled */ 581 /* make sure HW endpoint isn't stalled */
583 if (dep->flags & DWC3_EP_STALL) 582 if (dep->flags & DWC3_EP_STALL)
584 __dwc3_gadget_ep_set_halt(dep, 0); 583 __dwc3_gadget_ep_set_halt(dep, 0, false);
585 584
586 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA); 585 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
587 reg &= ~DWC3_DALEPENA_EP(dep->number); 586 reg &= ~DWC3_DALEPENA_EP(dep->number);
@@ -1202,15 +1201,28 @@ out0:
1202 return ret; 1201 return ret;
1203} 1202}
1204 1203
1205int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value) 1204int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
1206{ 1205{
1207 struct dwc3_gadget_ep_cmd_params params; 1206 struct dwc3_gadget_ep_cmd_params params;
1208 struct dwc3 *dwc = dep->dwc; 1207 struct dwc3 *dwc = dep->dwc;
1209 int ret; 1208 int ret;
1210 1209
1210 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1211 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1212 return -EINVAL;
1213 }
1214
1211 memset(&params, 0x00, sizeof(params)); 1215 memset(&params, 0x00, sizeof(params));
1212 1216
1213 if (value) { 1217 if (value) {
1218 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
1219 (!list_empty(&dep->req_queued) ||
1220 !list_empty(&dep->request_list)))) {
1221 dev_dbg(dwc->dev, "%s: pending request, cannot halt\n",
1222 dep->name);
1223 return -EAGAIN;
1224 }
1225
1214 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, 1226 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1215 DWC3_DEPCMD_SETSTALL, &params); 1227 DWC3_DEPCMD_SETSTALL, &params);
1216 if (ret) 1228 if (ret)
@@ -1241,15 +1253,7 @@ static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1241 int ret; 1253 int ret;
1242 1254
1243 spin_lock_irqsave(&dwc->lock, flags); 1255 spin_lock_irqsave(&dwc->lock, flags);
1244 1256 ret = __dwc3_gadget_ep_set_halt(dep, value, false);
1245 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1246 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1247 ret = -EINVAL;
1248 goto out;
1249 }
1250
1251 ret = __dwc3_gadget_ep_set_halt(dep, value);
1252out:
1253 spin_unlock_irqrestore(&dwc->lock, flags); 1257 spin_unlock_irqrestore(&dwc->lock, flags);
1254 1258
1255 return ret; 1259 return ret;
@@ -1260,15 +1264,18 @@ static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1260 struct dwc3_ep *dep = to_dwc3_ep(ep); 1264 struct dwc3_ep *dep = to_dwc3_ep(ep);
1261 struct dwc3 *dwc = dep->dwc; 1265 struct dwc3 *dwc = dep->dwc;
1262 unsigned long flags; 1266 unsigned long flags;
1267 int ret;
1263 1268
1264 spin_lock_irqsave(&dwc->lock, flags); 1269 spin_lock_irqsave(&dwc->lock, flags);
1265 dep->flags |= DWC3_EP_WEDGE; 1270 dep->flags |= DWC3_EP_WEDGE;
1266 spin_unlock_irqrestore(&dwc->lock, flags);
1267 1271
1268 if (dep->number == 0 || dep->number == 1) 1272 if (dep->number == 0 || dep->number == 1)
1269 return dwc3_gadget_ep0_set_halt(ep, 1); 1273 ret = __dwc3_gadget_ep0_set_halt(ep, 1);
1270 else 1274 else
1271 return dwc3_gadget_ep_set_halt(ep, 1); 1275 ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
1276 spin_unlock_irqrestore(&dwc->lock, flags);
1277
1278 return ret;
1272} 1279}
1273 1280
1274/* -------------------------------------------------------------------------- */ 1281/* -------------------------------------------------------------------------- */
diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h
index 178ad8982206..18ae3eaa8b6f 100644
--- a/drivers/usb/dwc3/gadget.h
+++ b/drivers/usb/dwc3/gadget.h
@@ -82,10 +82,11 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
82void dwc3_ep0_interrupt(struct dwc3 *dwc, 82void dwc3_ep0_interrupt(struct dwc3 *dwc,
83 const struct dwc3_event_depevt *event); 83 const struct dwc3_event_depevt *event);
84void dwc3_ep0_out_start(struct dwc3 *dwc); 84void dwc3_ep0_out_start(struct dwc3 *dwc);
85int __dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
85int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value); 86int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value);
86int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request, 87int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request,
87 gfp_t gfp_flags); 88 gfp_t gfp_flags);
88int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value); 89int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol);
89 90
90/** 91/**
91 * dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW 92 * dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW
diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h
index 78aff1da089a..60b0f41eafc4 100644
--- a/drivers/usb/dwc3/trace.h
+++ b/drivers/usb/dwc3/trace.h
@@ -73,15 +73,23 @@ DECLARE_EVENT_CLASS(dwc3_log_ctrl,
73 TP_PROTO(struct usb_ctrlrequest *ctrl), 73 TP_PROTO(struct usb_ctrlrequest *ctrl),
74 TP_ARGS(ctrl), 74 TP_ARGS(ctrl),
75 TP_STRUCT__entry( 75 TP_STRUCT__entry(
76 __field(struct usb_ctrlrequest *, ctrl) 76 __field(__u8, bRequestType)
77 __field(__u8, bRequest)
78 __field(__le16, wValue)
79 __field(__le16, wIndex)
80 __field(__le16, wLength)
77 ), 81 ),
78 TP_fast_assign( 82 TP_fast_assign(
79 __entry->ctrl = ctrl; 83 __entry->bRequestType = ctrl->bRequestType;
84 __entry->bRequest = ctrl->bRequest;
85 __entry->wValue = ctrl->wValue;
86 __entry->wIndex = ctrl->wIndex;
87 __entry->wLength = ctrl->wLength;
80 ), 88 ),
81 TP_printk("bRequestType %02x bRequest %02x wValue %04x wIndex %04x wLength %d", 89 TP_printk("bRequestType %02x bRequest %02x wValue %04x wIndex %04x wLength %d",
82 __entry->ctrl->bRequestType, __entry->ctrl->bRequest, 90 __entry->bRequestType, __entry->bRequest,
83 le16_to_cpu(__entry->ctrl->wValue), le16_to_cpu(__entry->ctrl->wIndex), 91 le16_to_cpu(__entry->wValue), le16_to_cpu(__entry->wIndex),
84 le16_to_cpu(__entry->ctrl->wLength) 92 le16_to_cpu(__entry->wLength)
85 ) 93 )
86); 94);
87 95
@@ -94,15 +102,22 @@ DECLARE_EVENT_CLASS(dwc3_log_request,
94 TP_PROTO(struct dwc3_request *req), 102 TP_PROTO(struct dwc3_request *req),
95 TP_ARGS(req), 103 TP_ARGS(req),
96 TP_STRUCT__entry( 104 TP_STRUCT__entry(
105 __dynamic_array(char, name, DWC3_MSG_MAX)
97 __field(struct dwc3_request *, req) 106 __field(struct dwc3_request *, req)
107 __field(unsigned, actual)
108 __field(unsigned, length)
109 __field(int, status)
98 ), 110 ),
99 TP_fast_assign( 111 TP_fast_assign(
112 snprintf(__get_str(name), DWC3_MSG_MAX, "%s", req->dep->name);
100 __entry->req = req; 113 __entry->req = req;
114 __entry->actual = req->request.actual;
115 __entry->length = req->request.length;
116 __entry->status = req->request.status;
101 ), 117 ),
102 TP_printk("%s: req %p length %u/%u ==> %d", 118 TP_printk("%s: req %p length %u/%u ==> %d",
103 __entry->req->dep->name, __entry->req, 119 __get_str(name), __entry->req, __entry->actual, __entry->length,
104 __entry->req->request.actual, __entry->req->request.length, 120 __entry->status
105 __entry->req->request.status
106 ) 121 )
107); 122);
108 123
@@ -158,17 +173,17 @@ DECLARE_EVENT_CLASS(dwc3_log_gadget_ep_cmd,
158 struct dwc3_gadget_ep_cmd_params *params), 173 struct dwc3_gadget_ep_cmd_params *params),
159 TP_ARGS(dep, cmd, params), 174 TP_ARGS(dep, cmd, params),
160 TP_STRUCT__entry( 175 TP_STRUCT__entry(
161 __field(struct dwc3_ep *, dep) 176 __dynamic_array(char, name, DWC3_MSG_MAX)
162 __field(unsigned int, cmd) 177 __field(unsigned int, cmd)
163 __field(struct dwc3_gadget_ep_cmd_params *, params) 178 __field(struct dwc3_gadget_ep_cmd_params *, params)
164 ), 179 ),
165 TP_fast_assign( 180 TP_fast_assign(
166 __entry->dep = dep; 181 snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name);
167 __entry->cmd = cmd; 182 __entry->cmd = cmd;
168 __entry->params = params; 183 __entry->params = params;
169 ), 184 ),
170 TP_printk("%s: cmd '%s' [%d] params %08x %08x %08x\n", 185 TP_printk("%s: cmd '%s' [%d] params %08x %08x %08x\n",
171 __entry->dep->name, dwc3_gadget_ep_cmd_string(__entry->cmd), 186 __get_str(name), dwc3_gadget_ep_cmd_string(__entry->cmd),
172 __entry->cmd, __entry->params->param0, 187 __entry->cmd, __entry->params->param0,
173 __entry->params->param1, __entry->params->param2 188 __entry->params->param1, __entry->params->param2
174 ) 189 )
@@ -184,16 +199,24 @@ DECLARE_EVENT_CLASS(dwc3_log_trb,
184 TP_PROTO(struct dwc3_ep *dep, struct dwc3_trb *trb), 199 TP_PROTO(struct dwc3_ep *dep, struct dwc3_trb *trb),
185 TP_ARGS(dep, trb), 200 TP_ARGS(dep, trb),
186 TP_STRUCT__entry( 201 TP_STRUCT__entry(
187 __field(struct dwc3_ep *, dep) 202 __dynamic_array(char, name, DWC3_MSG_MAX)
188 __field(struct dwc3_trb *, trb) 203 __field(struct dwc3_trb *, trb)
204 __field(u32, bpl)
205 __field(u32, bph)
206 __field(u32, size)
207 __field(u32, ctrl)
189 ), 208 ),
190 TP_fast_assign( 209 TP_fast_assign(
191 __entry->dep = dep; 210 snprintf(__get_str(name), DWC3_MSG_MAX, "%s", dep->name);
192 __entry->trb = trb; 211 __entry->trb = trb;
212 __entry->bpl = trb->bpl;
213 __entry->bph = trb->bph;
214 __entry->size = trb->size;
215 __entry->ctrl = trb->ctrl;
193 ), 216 ),
194 TP_printk("%s: trb %p bph %08x bpl %08x size %08x ctrl %08x\n", 217 TP_printk("%s: trb %p bph %08x bpl %08x size %08x ctrl %08x\n",
195 __entry->dep->name, __entry->trb, __entry->trb->bph, 218 __get_str(name), __entry->trb, __entry->bph, __entry->bpl,
196 __entry->trb->bpl, __entry->trb->size, __entry->trb->ctrl 219 __entry->size, __entry->ctrl
197 ) 220 )
198); 221);
199 222
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index a8c18df171c3..f6a51fddd5b5 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -560,7 +560,7 @@ static int bos_desc(struct usb_composite_dev *cdev)
560 usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE; 560 usb_ext->bLength = USB_DT_USB_EXT_CAP_SIZE;
561 usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY; 561 usb_ext->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
562 usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT; 562 usb_ext->bDevCapabilityType = USB_CAP_TYPE_EXT;
563 usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT); 563 usb_ext->bmAttributes = cpu_to_le32(USB_LPM_SUPPORT | USB_BESL_SUPPORT);
564 564
565 /* 565 /*
566 * The Superspeed USB Capability descriptor shall be implemented by all 566 * The Superspeed USB Capability descriptor shall be implemented by all
diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c
index 6da4685490ef..aad8165e98ef 100644
--- a/drivers/usb/gadget/function/f_acm.c
+++ b/drivers/usb/gadget/function/f_acm.c
@@ -433,12 +433,12 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
433 dev_vdbg(&cdev->gadget->dev, 433 dev_vdbg(&cdev->gadget->dev,
434 "reset acm control interface %d\n", intf); 434 "reset acm control interface %d\n", intf);
435 usb_ep_disable(acm->notify); 435 usb_ep_disable(acm->notify);
436 } else { 436 }
437 dev_vdbg(&cdev->gadget->dev, 437
438 "init acm ctrl interface %d\n", intf); 438 if (!acm->notify->desc)
439 if (config_ep_by_speed(cdev->gadget, f, acm->notify)) 439 if (config_ep_by_speed(cdev->gadget, f, acm->notify))
440 return -EINVAL; 440 return -EINVAL;
441 } 441
442 usb_ep_enable(acm->notify); 442 usb_ep_enable(acm->notify);
443 acm->notify->driver_data = acm; 443 acm->notify->driver_data = acm;
444 444
diff --git a/drivers/usb/gadget/function/f_eem.c b/drivers/usb/gadget/function/f_eem.c
index 4d8b236ea608..c9e90de5bdd9 100644
--- a/drivers/usb/gadget/function/f_eem.c
+++ b/drivers/usb/gadget/function/f_eem.c
@@ -325,7 +325,6 @@ static int eem_bind(struct usb_configuration *c, struct usb_function *f)
325 return 0; 325 return 0;
326 326
327fail: 327fail:
328 usb_free_all_descriptors(f);
329 if (eem->port.out_ep) 328 if (eem->port.out_ep)
330 eem->port.out_ep->driver_data = NULL; 329 eem->port.out_ep->driver_data = NULL;
331 if (eem->port.in_ep) 330 if (eem->port.in_ep)
diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c
index 7c6771d027a2..63314ede7ba6 100644
--- a/drivers/usb/gadget/function/f_fs.c
+++ b/drivers/usb/gadget/function/f_fs.c
@@ -647,15 +647,26 @@ static void ffs_user_copy_worker(struct work_struct *work)
647 if (io_data->read && ret > 0) { 647 if (io_data->read && ret > 0) {
648 int i; 648 int i;
649 size_t pos = 0; 649 size_t pos = 0;
650
651 /*
652 * Since req->length may be bigger than io_data->len (after
653 * being rounded up to maxpacketsize), we may end up with more
654 * data then user space has space for.
655 */
656 ret = min_t(int, ret, io_data->len);
657
650 use_mm(io_data->mm); 658 use_mm(io_data->mm);
651 for (i = 0; i < io_data->nr_segs; i++) { 659 for (i = 0; i < io_data->nr_segs; i++) {
660 size_t len = min_t(size_t, ret - pos,
661 io_data->iovec[i].iov_len);
662 if (!len)
663 break;
652 if (unlikely(copy_to_user(io_data->iovec[i].iov_base, 664 if (unlikely(copy_to_user(io_data->iovec[i].iov_base,
653 &io_data->buf[pos], 665 &io_data->buf[pos], len))) {
654 io_data->iovec[i].iov_len))) {
655 ret = -EFAULT; 666 ret = -EFAULT;
656 break; 667 break;
657 } 668 }
658 pos += io_data->iovec[i].iov_len; 669 pos += len;
659 } 670 }
660 unuse_mm(io_data->mm); 671 unuse_mm(io_data->mm);
661 } 672 }
@@ -687,7 +698,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
687 struct ffs_epfile *epfile = file->private_data; 698 struct ffs_epfile *epfile = file->private_data;
688 struct ffs_ep *ep; 699 struct ffs_ep *ep;
689 char *data = NULL; 700 char *data = NULL;
690 ssize_t ret, data_len; 701 ssize_t ret, data_len = -EINVAL;
691 int halt; 702 int halt;
692 703
693 /* Are we still active? */ 704 /* Are we still active? */
@@ -787,13 +798,30 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
787 /* Fire the request */ 798 /* Fire the request */
788 struct usb_request *req; 799 struct usb_request *req;
789 800
801 /*
802 * Sanity Check: even though data_len can't be used
803 * uninitialized at the time I write this comment, some
804 * compilers complain about this situation.
805 * In order to keep the code clean from warnings, data_len is
806 * being initialized to -EINVAL during its declaration, which
807 * means we can't rely on compiler anymore to warn no future
808 * changes won't result in data_len being used uninitialized.
809 * For such reason, we're adding this redundant sanity check
810 * here.
811 */
812 if (unlikely(data_len == -EINVAL)) {
813 WARN(1, "%s: data_len == -EINVAL\n", __func__);
814 ret = -EINVAL;
815 goto error_lock;
816 }
817
790 if (io_data->aio) { 818 if (io_data->aio) {
791 req = usb_ep_alloc_request(ep->ep, GFP_KERNEL); 819 req = usb_ep_alloc_request(ep->ep, GFP_KERNEL);
792 if (unlikely(!req)) 820 if (unlikely(!req))
793 goto error_lock; 821 goto error_lock;
794 822
795 req->buf = data; 823 req->buf = data;
796 req->length = io_data->len; 824 req->length = data_len;
797 825
798 io_data->buf = data; 826 io_data->buf = data;
799 io_data->ep = ep->ep; 827 io_data->ep = ep->ep;
@@ -815,7 +843,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
815 843
816 req = ep->req; 844 req = ep->req;
817 req->buf = data; 845 req->buf = data;
818 req->length = io_data->len; 846 req->length = data_len;
819 847
820 req->context = &done; 848 req->context = &done;
821 req->complete = ffs_epfile_io_complete; 849 req->complete = ffs_epfile_io_complete;
@@ -2663,8 +2691,6 @@ static inline struct f_fs_opts *ffs_do_functionfs_bind(struct usb_function *f,
2663 func->conf = c; 2691 func->conf = c;
2664 func->gadget = c->cdev->gadget; 2692 func->gadget = c->cdev->gadget;
2665 2693
2666 ffs_data_get(func->ffs);
2667
2668 /* 2694 /*
2669 * in drivers/usb/gadget/configfs.c:configfs_composite_bind() 2695 * in drivers/usb/gadget/configfs.c:configfs_composite_bind()
2670 * configurations are bound in sequence with list_for_each_entry, 2696 * configurations are bound in sequence with list_for_each_entry,
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index a95290a1289f..59ab62c92b66 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -621,12 +621,14 @@ static int __init hidg_bind(struct usb_configuration *c, struct usb_function *f)
621 dev = MKDEV(major, hidg->minor); 621 dev = MKDEV(major, hidg->minor);
622 status = cdev_add(&hidg->cdev, dev, 1); 622 status = cdev_add(&hidg->cdev, dev, 1);
623 if (status) 623 if (status)
624 goto fail; 624 goto fail_free_descs;
625 625
626 device_create(hidg_class, NULL, dev, NULL, "%s%d", "hidg", hidg->minor); 626 device_create(hidg_class, NULL, dev, NULL, "%s%d", "hidg", hidg->minor);
627 627
628 return 0; 628 return 0;
629 629
630fail_free_descs:
631 usb_free_all_descriptors(f);
630fail: 632fail:
631 ERROR(f->config->cdev, "hidg_bind FAILED\n"); 633 ERROR(f->config->cdev, "hidg_bind FAILED\n");
632 if (hidg->req != NULL) { 634 if (hidg->req != NULL) {
@@ -635,7 +637,6 @@ fail:
635 usb_ep_free_request(hidg->in_ep, hidg->req); 637 usb_ep_free_request(hidg->in_ep, hidg->req);
636 } 638 }
637 639
638 usb_free_all_descriptors(f);
639 return status; 640 return status;
640} 641}
641 642
diff --git a/drivers/usb/gadget/function/f_loopback.c b/drivers/usb/gadget/function/f_loopback.c
index bf04389137e6..298b46112b1a 100644
--- a/drivers/usb/gadget/function/f_loopback.c
+++ b/drivers/usb/gadget/function/f_loopback.c
@@ -253,22 +253,13 @@ static void loopback_complete(struct usb_ep *ep, struct usb_request *req)
253 253
254 case 0: /* normal completion? */ 254 case 0: /* normal completion? */
255 if (ep == loop->out_ep) { 255 if (ep == loop->out_ep) {
256 /* loop this OUT packet back IN to the host */
257 req->zero = (req->actual < req->length); 256 req->zero = (req->actual < req->length);
258 req->length = req->actual; 257 req->length = req->actual;
259 status = usb_ep_queue(loop->in_ep, req, GFP_ATOMIC);
260 if (status == 0)
261 return;
262
263 /* "should never get here" */
264 ERROR(cdev, "can't loop %s to %s: %d\n",
265 ep->name, loop->in_ep->name,
266 status);
267 } 258 }
268 259
269 /* queue the buffer for some later OUT packet */ 260 /* queue the buffer for some later OUT packet */
270 req->length = buflen; 261 req->length = buflen;
271 status = usb_ep_queue(loop->out_ep, req, GFP_ATOMIC); 262 status = usb_ep_queue(ep, req, GFP_ATOMIC);
272 if (status == 0) 263 if (status == 0)
273 return; 264 return;
274 265
@@ -308,60 +299,66 @@ static inline struct usb_request *lb_alloc_ep_req(struct usb_ep *ep, int len)
308 return alloc_ep_req(ep, len, buflen); 299 return alloc_ep_req(ep, len, buflen);
309} 300}
310 301
311static int 302static int enable_endpoint(struct usb_composite_dev *cdev, struct f_loopback *loop,
312enable_loopback(struct usb_composite_dev *cdev, struct f_loopback *loop) 303 struct usb_ep *ep)
313{ 304{
314 int result = 0;
315 struct usb_ep *ep;
316 struct usb_request *req; 305 struct usb_request *req;
317 unsigned i; 306 unsigned i;
307 int result;
318 308
319 /* one endpoint writes data back IN to the host */ 309 /*
320 ep = loop->in_ep; 310 * one endpoint writes data back IN to the host while another endpoint
311 * just reads OUT packets
312 */
321 result = config_ep_by_speed(cdev->gadget, &(loop->function), ep); 313 result = config_ep_by_speed(cdev->gadget, &(loop->function), ep);
322 if (result) 314 if (result)
323 return result; 315 goto fail0;
324 result = usb_ep_enable(ep); 316 result = usb_ep_enable(ep);
325 if (result < 0) 317 if (result < 0)
326 return result;
327 ep->driver_data = loop;
328
329 /* one endpoint just reads OUT packets */
330 ep = loop->out_ep;
331 result = config_ep_by_speed(cdev->gadget, &(loop->function), ep);
332 if (result)
333 goto fail0; 318 goto fail0;
334
335 result = usb_ep_enable(ep);
336 if (result < 0) {
337fail0:
338 ep = loop->in_ep;
339 usb_ep_disable(ep);
340 ep->driver_data = NULL;
341 return result;
342 }
343 ep->driver_data = loop; 319 ep->driver_data = loop;
344 320
345 /* allocate a bunch of read buffers and queue them all at once. 321 /*
322 * allocate a bunch of read buffers and queue them all at once.
346 * we buffer at most 'qlen' transfers; fewer if any need more 323 * we buffer at most 'qlen' transfers; fewer if any need more
347 * than 'buflen' bytes each. 324 * than 'buflen' bytes each.
348 */ 325 */
349 for (i = 0; i < qlen && result == 0; i++) { 326 for (i = 0; i < qlen && result == 0; i++) {
350 req = lb_alloc_ep_req(ep, 0); 327 req = lb_alloc_ep_req(ep, 0);
351 if (req) { 328 if (!req)
352 req->complete = loopback_complete; 329 goto fail1;
353 result = usb_ep_queue(ep, req, GFP_ATOMIC); 330
354 if (result) 331 req->complete = loopback_complete;
355 ERROR(cdev, "%s queue req --> %d\n", 332 result = usb_ep_queue(ep, req, GFP_ATOMIC);
356 ep->name, result); 333 if (result) {
357 } else { 334 ERROR(cdev, "%s queue req --> %d\n",
358 usb_ep_disable(ep); 335 ep->name, result);
359 ep->driver_data = NULL; 336 goto fail1;
360 result = -ENOMEM;
361 goto fail0;
362 } 337 }
363 } 338 }
364 339
340 return 0;
341
342fail1:
343 usb_ep_disable(ep);
344
345fail0:
346 return result;
347}
348
349static int
350enable_loopback(struct usb_composite_dev *cdev, struct f_loopback *loop)
351{
352 int result = 0;
353
354 result = enable_endpoint(cdev, loop, loop->in_ep);
355 if (result)
356 return result;
357
358 result = enable_endpoint(cdev, loop, loop->out_ep);
359 if (result)
360 return result;
361
365 DBG(cdev, "%s enabled\n", loop->function.name); 362 DBG(cdev, "%s enabled\n", loop->function.name);
366 return result; 363 return result;
367} 364}
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 146f48cc65d7..16361b0a8b46 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -1461,7 +1461,6 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
1461 return 0; 1461 return 0;
1462 1462
1463fail: 1463fail:
1464 usb_free_all_descriptors(f);
1465 if (ncm->notify_req) { 1464 if (ncm->notify_req) {
1466 kfree(ncm->notify_req->buf); 1465 kfree(ncm->notify_req->buf);
1467 usb_ep_free_request(ncm->notify, ncm->notify_req); 1466 usb_ep_free_request(ncm->notify, ncm->notify_req);
diff --git a/drivers/usb/gadget/function/f_obex.c b/drivers/usb/gadget/function/f_obex.c
index 5f40080c92cc..a1b79c53499c 100644
--- a/drivers/usb/gadget/function/f_obex.c
+++ b/drivers/usb/gadget/function/f_obex.c
@@ -35,6 +35,7 @@ struct f_obex {
35 struct gserial port; 35 struct gserial port;
36 u8 ctrl_id; 36 u8 ctrl_id;
37 u8 data_id; 37 u8 data_id;
38 u8 cur_alt;
38 u8 port_num; 39 u8 port_num;
39 u8 can_activate; 40 u8 can_activate;
40}; 41};
@@ -235,6 +236,8 @@ static int obex_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
235 } else 236 } else
236 goto fail; 237 goto fail;
237 238
239 obex->cur_alt = alt;
240
238 return 0; 241 return 0;
239 242
240fail: 243fail:
@@ -245,10 +248,7 @@ static int obex_get_alt(struct usb_function *f, unsigned intf)
245{ 248{
246 struct f_obex *obex = func_to_obex(f); 249 struct f_obex *obex = func_to_obex(f);
247 250
248 if (intf == obex->ctrl_id) 251 return obex->cur_alt;
249 return 0;
250
251 return obex->port.in->driver_data ? 1 : 0;
252} 252}
253 253
254static void obex_disable(struct usb_function *f) 254static void obex_disable(struct usb_function *f)
@@ -397,7 +397,6 @@ static int obex_bind(struct usb_configuration *c, struct usb_function *f)
397 return 0; 397 return 0;
398 398
399fail: 399fail:
400 usb_free_all_descriptors(f);
401 /* we might as well release our claims on endpoints */ 400 /* we might as well release our claims on endpoints */
402 if (obex->port.out) 401 if (obex->port.out)
403 obex->port.out->driver_data = NULL; 402 obex->port.out->driver_data = NULL;
diff --git a/drivers/usb/gadget/function/f_phonet.c b/drivers/usb/gadget/function/f_phonet.c
index b9cfc1571d71..1ec8b7ffdccd 100644
--- a/drivers/usb/gadget/function/f_phonet.c
+++ b/drivers/usb/gadget/function/f_phonet.c
@@ -570,8 +570,8 @@ static int pn_bind(struct usb_configuration *c, struct usb_function *f)
570err_req: 570err_req:
571 for (i = 0; i < phonet_rxq_size && fp->out_reqv[i]; i++) 571 for (i = 0; i < phonet_rxq_size && fp->out_reqv[i]; i++)
572 usb_ep_free_request(fp->out_ep, fp->out_reqv[i]); 572 usb_ep_free_request(fp->out_ep, fp->out_reqv[i]);
573err:
574 usb_free_all_descriptors(f); 573 usb_free_all_descriptors(f);
574err:
575 if (fp->out_ep) 575 if (fp->out_ep)
576 fp->out_ep->driver_data = NULL; 576 fp->out_ep->driver_data = NULL;
577 if (fp->in_ep) 577 if (fp->in_ep)
diff --git a/drivers/usb/gadget/function/f_rndis.c b/drivers/usb/gadget/function/f_rndis.c
index ddb09dc6d1f2..f13fc6a58565 100644
--- a/drivers/usb/gadget/function/f_rndis.c
+++ b/drivers/usb/gadget/function/f_rndis.c
@@ -802,8 +802,10 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
802 802
803 if (rndis->manufacturer && rndis->vendorID && 803 if (rndis->manufacturer && rndis->vendorID &&
804 rndis_set_param_vendor(rndis->config, rndis->vendorID, 804 rndis_set_param_vendor(rndis->config, rndis->vendorID,
805 rndis->manufacturer)) 805 rndis->manufacturer)) {
806 goto fail; 806 status = -EINVAL;
807 goto fail_free_descs;
808 }
807 809
808 /* NOTE: all that is done without knowing or caring about 810 /* NOTE: all that is done without knowing or caring about
809 * the network link ... which is unavailable to this code 811 * the network link ... which is unavailable to this code
@@ -817,10 +819,11 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
817 rndis->notify->name); 819 rndis->notify->name);
818 return 0; 820 return 0;
819 821
822fail_free_descs:
823 usb_free_all_descriptors(f);
820fail: 824fail:
821 kfree(f->os_desc_table); 825 kfree(f->os_desc_table);
822 f->os_desc_n = 0; 826 f->os_desc_n = 0;
823 usb_free_all_descriptors(f);
824 827
825 if (rndis->notify_req) { 828 if (rndis->notify_req) {
826 kfree(rndis->notify_req->buf); 829 kfree(rndis->notify_req->buf);
diff --git a/drivers/usb/gadget/function/f_subset.c b/drivers/usb/gadget/function/f_subset.c
index 1ea8baf33333..e3dfa675ff06 100644
--- a/drivers/usb/gadget/function/f_subset.c
+++ b/drivers/usb/gadget/function/f_subset.c
@@ -380,7 +380,6 @@ geth_bind(struct usb_configuration *c, struct usb_function *f)
380 return 0; 380 return 0;
381 381
382fail: 382fail:
383 usb_free_all_descriptors(f);
384 /* we might as well release our claims on endpoints */ 383 /* we might as well release our claims on endpoints */
385 if (geth->port.out_ep) 384 if (geth->port.out_ep)
386 geth->port.out_ep->driver_data = NULL; 385 geth->port.out_ep->driver_data = NULL;
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index a5a27a504d67..33e16658e5cf 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -512,6 +512,11 @@ static int snd_uac2_remove(struct platform_device *pdev)
512 return 0; 512 return 0;
513} 513}
514 514
515static void snd_uac2_release(struct device *dev)
516{
517 dev_dbg(dev, "releasing '%s'\n", dev_name(dev));
518}
519
515static int alsa_uac2_init(struct audio_dev *agdev) 520static int alsa_uac2_init(struct audio_dev *agdev)
516{ 521{
517 struct snd_uac2_chip *uac2 = &agdev->uac2; 522 struct snd_uac2_chip *uac2 = &agdev->uac2;
@@ -523,6 +528,7 @@ static int alsa_uac2_init(struct audio_dev *agdev)
523 528
524 uac2->pdev.id = 0; 529 uac2->pdev.id = 0;
525 uac2->pdev.name = uac2_name; 530 uac2->pdev.name = uac2_name;
531 uac2->pdev.dev.release = snd_uac2_release;
526 532
527 /* Register snd_uac2 driver */ 533 /* Register snd_uac2 driver */
528 err = platform_driver_register(&uac2->pdrv); 534 err = platform_driver_register(&uac2->pdrv);
@@ -772,6 +778,7 @@ struct usb_endpoint_descriptor fs_epout_desc = {
772 778
773 .bEndpointAddress = USB_DIR_OUT, 779 .bEndpointAddress = USB_DIR_OUT,
774 .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, 780 .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
781 .wMaxPacketSize = cpu_to_le16(1023),
775 .bInterval = 1, 782 .bInterval = 1,
776}; 783};
777 784
@@ -780,6 +787,7 @@ struct usb_endpoint_descriptor hs_epout_desc = {
780 .bDescriptorType = USB_DT_ENDPOINT, 787 .bDescriptorType = USB_DT_ENDPOINT,
781 788
782 .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, 789 .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
790 .wMaxPacketSize = cpu_to_le16(1024),
783 .bInterval = 4, 791 .bInterval = 4,
784}; 792};
785 793
@@ -847,6 +855,7 @@ struct usb_endpoint_descriptor fs_epin_desc = {
847 855
848 .bEndpointAddress = USB_DIR_IN, 856 .bEndpointAddress = USB_DIR_IN,
849 .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, 857 .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
858 .wMaxPacketSize = cpu_to_le16(1023),
850 .bInterval = 1, 859 .bInterval = 1,
851}; 860};
852 861
@@ -855,6 +864,7 @@ struct usb_endpoint_descriptor hs_epin_desc = {
855 .bDescriptorType = USB_DT_ENDPOINT, 864 .bDescriptorType = USB_DT_ENDPOINT,
856 865
857 .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC, 866 .bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
867 .wMaxPacketSize = cpu_to_le16(1024),
858 .bInterval = 4, 868 .bInterval = 4,
859}; 869};
860 870
@@ -947,6 +957,9 @@ free_ep(struct uac2_rtd_params *prm, struct usb_ep *ep)
947 struct snd_uac2_chip *uac2 = prm->uac2; 957 struct snd_uac2_chip *uac2 = prm->uac2;
948 int i; 958 int i;
949 959
960 if (!prm->ep_enabled)
961 return;
962
950 prm->ep_enabled = false; 963 prm->ep_enabled = false;
951 964
952 for (i = 0; i < USB_XFERS; i++) { 965 for (i = 0; i < USB_XFERS; i++) {
@@ -1071,7 +1084,7 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
1071 prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL); 1084 prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL);
1072 if (!prm->rbuf) { 1085 if (!prm->rbuf) {
1073 prm->max_psize = 0; 1086 prm->max_psize = 0;
1074 goto err; 1087 goto err_free_descs;
1075 } 1088 }
1076 1089
1077 prm = &agdev->uac2.p_prm; 1090 prm = &agdev->uac2.p_prm;
@@ -1079,17 +1092,19 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
1079 prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL); 1092 prm->rbuf = kzalloc(prm->max_psize * USB_XFERS, GFP_KERNEL);
1080 if (!prm->rbuf) { 1093 if (!prm->rbuf) {
1081 prm->max_psize = 0; 1094 prm->max_psize = 0;
1082 goto err; 1095 goto err_free_descs;
1083 } 1096 }
1084 1097
1085 ret = alsa_uac2_init(agdev); 1098 ret = alsa_uac2_init(agdev);
1086 if (ret) 1099 if (ret)
1087 goto err; 1100 goto err_free_descs;
1088 return 0; 1101 return 0;
1102
1103err_free_descs:
1104 usb_free_all_descriptors(fn);
1089err: 1105err:
1090 kfree(agdev->uac2.p_prm.rbuf); 1106 kfree(agdev->uac2.p_prm.rbuf);
1091 kfree(agdev->uac2.c_prm.rbuf); 1107 kfree(agdev->uac2.c_prm.rbuf);
1092 usb_free_all_descriptors(fn);
1093 if (agdev->in_ep) 1108 if (agdev->in_ep)
1094 agdev->in_ep->driver_data = NULL; 1109 agdev->in_ep->driver_data = NULL;
1095 if (agdev->out_ep) 1110 if (agdev->out_ep)
diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c
index e126439e4b65..945b3bd2ca98 100644
--- a/drivers/usb/gadget/function/f_uvc.c
+++ b/drivers/usb/gadget/function/f_uvc.c
@@ -279,27 +279,41 @@ uvc_function_get_alt(struct usb_function *f, unsigned interface)
279 else if (interface != uvc->streaming_intf) 279 else if (interface != uvc->streaming_intf)
280 return -EINVAL; 280 return -EINVAL;
281 else 281 else
282 return uvc->state == UVC_STATE_STREAMING ? 1 : 0; 282 return uvc->video.ep->driver_data ? 1 : 0;
283} 283}
284 284
285static int 285static int
286uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt) 286uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
287{ 287{
288 struct uvc_device *uvc = to_uvc(f); 288 struct uvc_device *uvc = to_uvc(f);
289 struct usb_composite_dev *cdev = f->config->cdev;
289 struct v4l2_event v4l2_event; 290 struct v4l2_event v4l2_event;
290 struct uvc_event *uvc_event = (void *)&v4l2_event.u.data; 291 struct uvc_event *uvc_event = (void *)&v4l2_event.u.data;
291 int ret; 292 int ret;
292 293
293 INFO(f->config->cdev, "uvc_function_set_alt(%u, %u)\n", interface, alt); 294 INFO(cdev, "uvc_function_set_alt(%u, %u)\n", interface, alt);
294 295
295 if (interface == uvc->control_intf) { 296 if (interface == uvc->control_intf) {
296 if (alt) 297 if (alt)
297 return -EINVAL; 298 return -EINVAL;
298 299
300 if (uvc->control_ep->driver_data) {
301 INFO(cdev, "reset UVC Control\n");
302 usb_ep_disable(uvc->control_ep);
303 uvc->control_ep->driver_data = NULL;
304 }
305
306 if (!uvc->control_ep->desc)
307 if (config_ep_by_speed(cdev->gadget, f, uvc->control_ep))
308 return -EINVAL;
309
310 usb_ep_enable(uvc->control_ep);
311 uvc->control_ep->driver_data = uvc;
312
299 if (uvc->state == UVC_STATE_DISCONNECTED) { 313 if (uvc->state == UVC_STATE_DISCONNECTED) {
300 memset(&v4l2_event, 0, sizeof(v4l2_event)); 314 memset(&v4l2_event, 0, sizeof(v4l2_event));
301 v4l2_event.type = UVC_EVENT_CONNECT; 315 v4l2_event.type = UVC_EVENT_CONNECT;
302 uvc_event->speed = f->config->cdev->gadget->speed; 316 uvc_event->speed = cdev->gadget->speed;
303 v4l2_event_queue(uvc->vdev, &v4l2_event); 317 v4l2_event_queue(uvc->vdev, &v4l2_event);
304 318
305 uvc->state = UVC_STATE_CONNECTED; 319 uvc->state = UVC_STATE_CONNECTED;
@@ -321,8 +335,10 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
321 if (uvc->state != UVC_STATE_STREAMING) 335 if (uvc->state != UVC_STATE_STREAMING)
322 return 0; 336 return 0;
323 337
324 if (uvc->video.ep) 338 if (uvc->video.ep) {
325 usb_ep_disable(uvc->video.ep); 339 usb_ep_disable(uvc->video.ep);
340 uvc->video.ep->driver_data = NULL;
341 }
326 342
327 memset(&v4l2_event, 0, sizeof(v4l2_event)); 343 memset(&v4l2_event, 0, sizeof(v4l2_event));
328 v4l2_event.type = UVC_EVENT_STREAMOFF; 344 v4l2_event.type = UVC_EVENT_STREAMOFF;
@@ -335,14 +351,22 @@ uvc_function_set_alt(struct usb_function *f, unsigned interface, unsigned alt)
335 if (uvc->state != UVC_STATE_CONNECTED) 351 if (uvc->state != UVC_STATE_CONNECTED)
336 return 0; 352 return 0;
337 353
338 if (uvc->video.ep) { 354 if (!uvc->video.ep)
339 ret = config_ep_by_speed(f->config->cdev->gadget, 355 return -EINVAL;
340 &(uvc->func), uvc->video.ep); 356
341 if (ret) 357 if (uvc->video.ep->driver_data) {
342 return ret; 358 INFO(cdev, "reset UVC\n");
343 usb_ep_enable(uvc->video.ep); 359 usb_ep_disable(uvc->video.ep);
360 uvc->video.ep->driver_data = NULL;
344 } 361 }
345 362
363 ret = config_ep_by_speed(f->config->cdev->gadget,
364 &(uvc->func), uvc->video.ep);
365 if (ret)
366 return ret;
367 usb_ep_enable(uvc->video.ep);
368 uvc->video.ep->driver_data = uvc;
369
346 memset(&v4l2_event, 0, sizeof(v4l2_event)); 370 memset(&v4l2_event, 0, sizeof(v4l2_event));
347 v4l2_event.type = UVC_EVENT_STREAMON; 371 v4l2_event.type = UVC_EVENT_STREAMON;
348 v4l2_event_queue(uvc->vdev, &v4l2_event); 372 v4l2_event_queue(uvc->vdev, &v4l2_event);
@@ -366,6 +390,16 @@ uvc_function_disable(struct usb_function *f)
366 v4l2_event_queue(uvc->vdev, &v4l2_event); 390 v4l2_event_queue(uvc->vdev, &v4l2_event);
367 391
368 uvc->state = UVC_STATE_DISCONNECTED; 392 uvc->state = UVC_STATE_DISCONNECTED;
393
394 if (uvc->video.ep->driver_data) {
395 usb_ep_disable(uvc->video.ep);
396 uvc->video.ep->driver_data = NULL;
397 }
398
399 if (uvc->control_ep->driver_data) {
400 usb_ep_disable(uvc->control_ep);
401 uvc->control_ep->driver_data = NULL;
402 }
369} 403}
370 404
371/* -------------------------------------------------------------------------- 405/* --------------------------------------------------------------------------
diff --git a/drivers/usb/gadget/function/uvc_video.c b/drivers/usb/gadget/function/uvc_video.c
index c3e1f27dbbef..9cb86bc1a9a5 100644
--- a/drivers/usb/gadget/function/uvc_video.c
+++ b/drivers/usb/gadget/function/uvc_video.c
@@ -352,7 +352,8 @@ int uvcg_video_enable(struct uvc_video *video, int enable)
352 352
353 if (!enable) { 353 if (!enable) {
354 for (i = 0; i < UVC_NUM_REQUESTS; ++i) 354 for (i = 0; i < UVC_NUM_REQUESTS; ++i)
355 usb_ep_dequeue(video->ep, video->req[i]); 355 if (video->req[i])
356 usb_ep_dequeue(video->ep, video->req[i]);
356 357
357 uvc_video_free_requests(video); 358 uvc_video_free_requests(video);
358 uvcg_queue_enable(&video->queue, 0); 359 uvcg_queue_enable(&video->queue, 0);
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 3ea287b0e448..217365d35a25 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -357,6 +357,7 @@ config USB_EG20T
357 357
358config USB_GADGET_XILINX 358config USB_GADGET_XILINX
359 tristate "Xilinx USB Driver" 359 tristate "Xilinx USB Driver"
360 depends on HAS_DMA
360 depends on OF || COMPILE_TEST 361 depends on OF || COMPILE_TEST
361 help 362 help
362 USB peripheral controller driver for Xilinx USB2 device. 363 USB peripheral controller driver for Xilinx USB2 device.
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index f107bb60a5ab..f2054659f25b 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -507,6 +507,11 @@ static ssize_t usb_udc_softconn_store(struct device *dev,
507{ 507{
508 struct usb_udc *udc = container_of(dev, struct usb_udc, dev); 508 struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
509 509
510 if (!udc->driver) {
511 dev_err(dev, "soft-connect without a gadget driver\n");
512 return -EOPNOTSUPP;
513 }
514
510 if (sysfs_streq(buf, "connect")) { 515 if (sysfs_streq(buf, "connect")) {
511 usb_gadget_udc_start(udc->gadget, udc->driver); 516 usb_gadget_udc_start(udc->gadget, udc->driver);
512 usb_gadget_connect(udc->gadget); 517 usb_gadget_connect(udc->gadget);
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index a8a30b1d4167..a3ca1375dd52 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -234,7 +234,7 @@ config USB_EHCI_SH
234 234
235config USB_EHCI_EXYNOS 235config USB_EHCI_EXYNOS
236 tristate "EHCI support for Samsung S5P/EXYNOS SoC Series" 236 tristate "EHCI support for Samsung S5P/EXYNOS SoC Series"
237 depends on PLAT_S5P || ARCH_EXYNOS 237 depends on ARCH_S5PV210 || ARCH_EXYNOS
238 help 238 help
239 Enable support for the Samsung Exynos SOC's on-chip EHCI controller. 239 Enable support for the Samsung Exynos SOC's on-chip EHCI controller.
240 240
@@ -550,7 +550,7 @@ config USB_OHCI_SH
550 550
551config USB_OHCI_EXYNOS 551config USB_OHCI_EXYNOS
552 tristate "OHCI support for Samsung S5P/EXYNOS SoC Series" 552 tristate "OHCI support for Samsung S5P/EXYNOS SoC Series"
553 depends on PLAT_S5P || ARCH_EXYNOS 553 depends on ARCH_S5PV210 || ARCH_EXYNOS
554 help 554 help
555 Enable support for the Samsung Exynos SOC's on-chip OHCI controller. 555 Enable support for the Samsung Exynos SOC's on-chip OHCI controller.
556 556
diff --git a/drivers/usb/host/hwa-hc.c b/drivers/usb/host/hwa-hc.c
index d0d8fadf7066..1db0626c8bf4 100644
--- a/drivers/usb/host/hwa-hc.c
+++ b/drivers/usb/host/hwa-hc.c
@@ -607,7 +607,7 @@ found:
607 wa->wa_descr = wa_descr = (struct usb_wa_descriptor *) hdr; 607 wa->wa_descr = wa_descr = (struct usb_wa_descriptor *) hdr;
608 if (le16_to_cpu(wa_descr->bcdWAVersion) > 0x0100) 608 if (le16_to_cpu(wa_descr->bcdWAVersion) > 0x0100)
609 dev_warn(dev, "Wire Adapter v%d.%d newer than groked v1.0\n", 609 dev_warn(dev, "Wire Adapter v%d.%d newer than groked v1.0\n",
610 le16_to_cpu(wa_descr->bcdWAVersion) & 0xff00 >> 8, 610 (le16_to_cpu(wa_descr->bcdWAVersion) & 0xff00) >> 8,
611 le16_to_cpu(wa_descr->bcdWAVersion) & 0x00ff); 611 le16_to_cpu(wa_descr->bcdWAVersion) & 0x00ff);
612 result = 0; 612 result = 0;
613error: 613error:
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 696160d48ae8..388cfd83b6b6 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -22,7 +22,6 @@
22 22
23 23
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/device.h>
26#include <asm/unaligned.h> 25#include <asm/unaligned.h>
27 26
28#include "xhci.h" 27#include "xhci.h"
@@ -1149,9 +1148,7 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1149 * including the USB 3.0 roothub, but only if CONFIG_PM_RUNTIME 1148 * including the USB 3.0 roothub, but only if CONFIG_PM_RUNTIME
1150 * is enabled, so also enable remote wake here. 1149 * is enabled, so also enable remote wake here.
1151 */ 1150 */
1152 if (hcd->self.root_hub->do_remote_wakeup 1151 if (hcd->self.root_hub->do_remote_wakeup) {
1153 && device_may_wakeup(hcd->self.controller)) {
1154
1155 if (t1 & PORT_CONNECT) { 1152 if (t1 & PORT_CONNECT) {
1156 t2 |= PORT_WKOC_E | PORT_WKDISC_E; 1153 t2 |= PORT_WKOC_E | PORT_WKDISC_E;
1157 t2 &= ~PORT_WKCONN_E; 1154 t2 &= ~PORT_WKCONN_E;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 280dde93abe5..142b601f9563 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -128,20 +128,6 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
128 xhci->quirks |= XHCI_AVOID_BEI; 128 xhci->quirks |= XHCI_AVOID_BEI;
129 } 129 }
130 if (pdev->vendor == PCI_VENDOR_ID_INTEL && 130 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
131 (pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_XHCI ||
132 pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI)) {
133 /* Workaround for occasional spurious wakeups from S5 (or
134 * any other sleep) on Haswell machines with LPT and LPT-LP
135 * with the new Intel BIOS
136 */
137 /* Limit the quirk to only known vendors, as this triggers
138 * yet another BIOS bug on some other machines
139 * https://bugzilla.kernel.org/show_bug.cgi?id=66171
140 */
141 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)
142 xhci->quirks |= XHCI_SPURIOUS_WAKEUP;
143 }
144 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
145 pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) { 131 pdev->device == PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_XHCI) {
146 xhci->quirks |= XHCI_SPURIOUS_REBOOT; 132 xhci->quirks |= XHCI_SPURIOUS_REBOOT;
147 } 133 }
@@ -162,6 +148,10 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
162 pdev->device == 0x3432) 148 pdev->device == 0x3432)
163 xhci->quirks |= XHCI_BROKEN_STREAMS; 149 xhci->quirks |= XHCI_BROKEN_STREAMS;
164 150
151 if (pdev->vendor == PCI_VENDOR_ID_ASMEDIA &&
152 pdev->device == 0x1042)
153 xhci->quirks |= XHCI_BROKEN_STREAMS;
154
165 if (xhci->quirks & XHCI_RESET_ON_RESUME) 155 if (xhci->quirks & XHCI_RESET_ON_RESUME)
166 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks, 156 xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
167 "QUIRK: Resetting on resume"); 157 "QUIRK: Resetting on resume");
@@ -291,7 +281,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
291 if (xhci->quirks & XHCI_COMP_MODE_QUIRK) 281 if (xhci->quirks & XHCI_COMP_MODE_QUIRK)
292 pdev->no_d3cold = true; 282 pdev->no_d3cold = true;
293 283
294 return xhci_suspend(xhci); 284 return xhci_suspend(xhci, do_wakeup);
295} 285}
296 286
297static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) 287static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 3d78b0cd674b..646300cbe5f7 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -204,7 +204,15 @@ static int xhci_plat_suspend(struct device *dev)
204 struct usb_hcd *hcd = dev_get_drvdata(dev); 204 struct usb_hcd *hcd = dev_get_drvdata(dev);
205 struct xhci_hcd *xhci = hcd_to_xhci(hcd); 205 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
206 206
207 return xhci_suspend(xhci); 207 /*
208 * xhci_suspend() needs `do_wakeup` to know whether host is allowed
209 * to do wakeup during suspend. Since xhci_plat_suspend is currently
210 * only designed for system suspend, device_may_wakeup() is enough
211 * to dertermine whether host is allowed to do wakeup. Need to
212 * reconsider this when xhci_plat_suspend enlarges its scope, e.g.,
213 * also applies to runtime suspend.
214 */
215 return xhci_suspend(xhci, device_may_wakeup(dev));
208} 216}
209 217
210static int xhci_plat_resume(struct device *dev) 218static int xhci_plat_resume(struct device *dev)
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index bc6fcbc16f61..06433aec81d7 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1067,9 +1067,8 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
1067 false); 1067 false);
1068 xhci_ring_cmd_db(xhci); 1068 xhci_ring_cmd_db(xhci);
1069 } else { 1069 } else {
1070 /* Clear our internal halted state and restart the ring(s) */ 1070 /* Clear our internal halted state */
1071 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED; 1071 xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
1072 ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
1073 } 1072 }
1074} 1073}
1075 1074
@@ -1823,22 +1822,13 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
1823 ep->stopped_td = td; 1822 ep->stopped_td = td;
1824 return 0; 1823 return 0;
1825 } else { 1824 } else {
1826 if (trb_comp_code == COMP_STALL) { 1825 if (trb_comp_code == COMP_STALL ||
1827 /* The transfer is completed from the driver's 1826 xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
1828 * perspective, but we need to issue a set dequeue 1827 trb_comp_code)) {
1829 * command for this stalled endpoint to move the dequeue 1828 /* Issue a reset endpoint command to clear the host side
1830 * pointer past the TD. We can't do that here because 1829 * halt, followed by a set dequeue command to move the
1831 * the halt condition must be cleared first. Let the 1830 * dequeue pointer past the TD.
1832 * USB class driver clear the stall later. 1831 * The class driver clears the device side halt later.
1833 */
1834 ep->stopped_td = td;
1835 ep->stopped_stream = ep_ring->stream_id;
1836 } else if (xhci_requires_manual_halt_cleanup(xhci,
1837 ep_ctx, trb_comp_code)) {
1838 /* Other types of errors halt the endpoint, but the
1839 * class driver doesn't call usb_reset_endpoint() unless
1840 * the error is -EPIPE. Clear the halted status in the
1841 * xHCI hardware manually.
1842 */ 1832 */
1843 xhci_cleanup_halted_endpoint(xhci, 1833 xhci_cleanup_halted_endpoint(xhci,
1844 slot_id, ep_index, ep_ring->stream_id, 1834 slot_id, ep_index, ep_ring->stream_id,
@@ -1958,9 +1948,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
1958 else 1948 else
1959 td->urb->actual_length = 0; 1949 td->urb->actual_length = 0;
1960 1950
1961 xhci_cleanup_halted_endpoint(xhci, 1951 return finish_td(xhci, td, event_trb, event, ep, status, false);
1962 slot_id, ep_index, 0, td, event_trb);
1963 return finish_td(xhci, td, event_trb, event, ep, status, true);
1964 } 1952 }
1965 /* 1953 /*
1966 * Did we transfer any data, despite the errors that might have 1954 * Did we transfer any data, despite the errors that might have
@@ -2519,17 +2507,8 @@ cleanup:
2519 if (ret) { 2507 if (ret) {
2520 urb = td->urb; 2508 urb = td->urb;
2521 urb_priv = urb->hcpriv; 2509 urb_priv = urb->hcpriv;
2522 /* Leave the TD around for the reset endpoint function 2510
2523 * to use(but only if it's not a control endpoint, 2511 xhci_urb_free_priv(xhci, urb_priv);
2524 * since we already queued the Set TR dequeue pointer
2525 * command for stalled control endpoints).
2526 */
2527 if (usb_endpoint_xfer_control(&urb->ep->desc) ||
2528 (trb_comp_code != COMP_STALL &&
2529 trb_comp_code != COMP_BABBLE))
2530 xhci_urb_free_priv(xhci, urb_priv);
2531 else
2532 kfree(urb_priv);
2533 2512
2534 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb); 2513 usb_hcd_unlink_urb_from_ep(bus_to_hcd(urb->dev->bus), urb);
2535 if ((urb->actual_length != urb->transfer_buffer_length && 2514 if ((urb->actual_length != urb->transfer_buffer_length &&
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 2a5d45b4cb15..033b46c470bd 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -35,6 +35,8 @@
35#define DRIVER_AUTHOR "Sarah Sharp" 35#define DRIVER_AUTHOR "Sarah Sharp"
36#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver" 36#define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
37 37
38#define PORT_WAKE_BITS (PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E)
39
38/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */ 40/* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
39static int link_quirk; 41static int link_quirk;
40module_param(link_quirk, int, S_IRUGO | S_IWUSR); 42module_param(link_quirk, int, S_IRUGO | S_IWUSR);
@@ -851,13 +853,47 @@ static void xhci_clear_command_ring(struct xhci_hcd *xhci)
851 xhci_set_cmd_ring_deq(xhci); 853 xhci_set_cmd_ring_deq(xhci);
852} 854}
853 855
856static void xhci_disable_port_wake_on_bits(struct xhci_hcd *xhci)
857{
858 int port_index;
859 __le32 __iomem **port_array;
860 unsigned long flags;
861 u32 t1, t2;
862
863 spin_lock_irqsave(&xhci->lock, flags);
864
865 /* disble usb3 ports Wake bits*/
866 port_index = xhci->num_usb3_ports;
867 port_array = xhci->usb3_ports;
868 while (port_index--) {
869 t1 = readl(port_array[port_index]);
870 t1 = xhci_port_state_to_neutral(t1);
871 t2 = t1 & ~PORT_WAKE_BITS;
872 if (t1 != t2)
873 writel(t2, port_array[port_index]);
874 }
875
876 /* disble usb2 ports Wake bits*/
877 port_index = xhci->num_usb2_ports;
878 port_array = xhci->usb2_ports;
879 while (port_index--) {
880 t1 = readl(port_array[port_index]);
881 t1 = xhci_port_state_to_neutral(t1);
882 t2 = t1 & ~PORT_WAKE_BITS;
883 if (t1 != t2)
884 writel(t2, port_array[port_index]);
885 }
886
887 spin_unlock_irqrestore(&xhci->lock, flags);
888}
889
854/* 890/*
855 * Stop HC (not bus-specific) 891 * Stop HC (not bus-specific)
856 * 892 *
857 * This is called when the machine transition into S3/S4 mode. 893 * This is called when the machine transition into S3/S4 mode.
858 * 894 *
859 */ 895 */
860int xhci_suspend(struct xhci_hcd *xhci) 896int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
861{ 897{
862 int rc = 0; 898 int rc = 0;
863 unsigned int delay = XHCI_MAX_HALT_USEC; 899 unsigned int delay = XHCI_MAX_HALT_USEC;
@@ -868,6 +904,10 @@ int xhci_suspend(struct xhci_hcd *xhci)
868 xhci->shared_hcd->state != HC_STATE_SUSPENDED) 904 xhci->shared_hcd->state != HC_STATE_SUSPENDED)
869 return -EINVAL; 905 return -EINVAL;
870 906
907 /* Clear root port wake on bits if wakeup not allowed. */
908 if (!do_wakeup)
909 xhci_disable_port_wake_on_bits(xhci);
910
871 /* Don't poll the roothubs on bus suspend. */ 911 /* Don't poll the roothubs on bus suspend. */
872 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); 912 xhci_dbg(xhci, "%s: stopping port polling.\n", __func__);
873 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); 913 clear_bit(HCD_FLAG_POLL_RH, &hcd->flags);
@@ -2912,68 +2952,33 @@ void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
2912 } 2952 }
2913} 2953}
2914 2954
2915/* Deal with stalled endpoints. The core should have sent the control message 2955/* Called when clearing halted device. The core should have sent the control
2916 * to clear the halt condition. However, we need to make the xHCI hardware 2956 * message to clear the device halt condition. The host side of the halt should
2917 * reset its sequence number, since a device will expect a sequence number of 2957 * already be cleared with a reset endpoint command issued when the STALL tx
2918 * zero after the halt condition is cleared. 2958 * event was received.
2959 *
2919 * Context: in_interrupt 2960 * Context: in_interrupt
2920 */ 2961 */
2962
2921void xhci_endpoint_reset(struct usb_hcd *hcd, 2963void xhci_endpoint_reset(struct usb_hcd *hcd,
2922 struct usb_host_endpoint *ep) 2964 struct usb_host_endpoint *ep)
2923{ 2965{
2924 struct xhci_hcd *xhci; 2966 struct xhci_hcd *xhci;
2925 struct usb_device *udev;
2926 unsigned int ep_index;
2927 unsigned long flags;
2928 int ret;
2929 struct xhci_virt_ep *virt_ep;
2930 struct xhci_command *command;
2931 2967
2932 xhci = hcd_to_xhci(hcd); 2968 xhci = hcd_to_xhci(hcd);
2933 udev = (struct usb_device *) ep->hcpriv;
2934 /* Called with a root hub endpoint (or an endpoint that wasn't added
2935 * with xhci_add_endpoint()
2936 */
2937 if (!ep->hcpriv)
2938 return;
2939 ep_index = xhci_get_endpoint_index(&ep->desc);
2940 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
2941 if (!virt_ep->stopped_td) {
2942 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2943 "Endpoint 0x%x not halted, refusing to reset.",
2944 ep->desc.bEndpointAddress);
2945 return;
2946 }
2947 if (usb_endpoint_xfer_control(&ep->desc)) {
2948 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2949 "Control endpoint stall already handled.");
2950 return;
2951 }
2952 2969
2953 command = xhci_alloc_command(xhci, false, false, GFP_ATOMIC);
2954 if (!command)
2955 return;
2956
2957 xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
2958 "Queueing reset endpoint command");
2959 spin_lock_irqsave(&xhci->lock, flags);
2960 ret = xhci_queue_reset_ep(xhci, command, udev->slot_id, ep_index);
2961 /* 2970 /*
2962 * Can't change the ring dequeue pointer until it's transitioned to the 2971 * We might need to implement the config ep cmd in xhci 4.8.1 note:
2963 * stopped state, which is only upon a successful reset endpoint 2972 * The Reset Endpoint Command may only be issued to endpoints in the
2964 * command. Better hope that last command worked! 2973 * Halted state. If software wishes reset the Data Toggle or Sequence
2974 * Number of an endpoint that isn't in the Halted state, then software
2975 * may issue a Configure Endpoint Command with the Drop and Add bits set
2976 * for the target endpoint. that is in the Stopped state.
2965 */ 2977 */
2966 if (!ret) {
2967 xhci_cleanup_stalled_ring(xhci, udev, ep_index);
2968 kfree(virt_ep->stopped_td);
2969 xhci_ring_cmd_db(xhci);
2970 }
2971 virt_ep->stopped_td = NULL;
2972 virt_ep->stopped_stream = 0;
2973 spin_unlock_irqrestore(&xhci->lock, flags);
2974 2978
2975 if (ret) 2979 /* For now just print debug to follow the situation */
2976 xhci_warn(xhci, "FIXME allocate a new ring segment\n"); 2980 xhci_dbg(xhci, "Endpoint 0x%x ep reset callback called\n",
2981 ep->desc.bEndpointAddress);
2977} 2982}
2978 2983
2979static int xhci_check_streams_endpoint(struct xhci_hcd *xhci, 2984static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index df76d642e719..d745715a1e2f 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1746,7 +1746,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
1746void xhci_init_driver(struct hc_driver *drv, int (*setup_fn)(struct usb_hcd *)); 1746void xhci_init_driver(struct hc_driver *drv, int (*setup_fn)(struct usb_hcd *));
1747 1747
1748#ifdef CONFIG_PM 1748#ifdef CONFIG_PM
1749int xhci_suspend(struct xhci_hcd *xhci); 1749int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup);
1750int xhci_resume(struct xhci_hcd *xhci, bool hibernated); 1750int xhci_resume(struct xhci_hcd *xhci, bool hibernated);
1751#else 1751#else
1752#define xhci_suspend NULL 1752#define xhci_suspend NULL
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index acdfb3e68a90..5a9b977fbc19 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -209,7 +209,8 @@ static enum hrtimer_restart cppi41_recheck_tx_req(struct hrtimer *timer)
209 } 209 }
210 } 210 }
211 211
212 if (!list_empty(&controller->early_tx_list)) { 212 if (!list_empty(&controller->early_tx_list) &&
213 !hrtimer_is_queued(&controller->early_tx)) {
213 ret = HRTIMER_RESTART; 214 ret = HRTIMER_RESTART;
214 hrtimer_forward_now(&controller->early_tx, 215 hrtimer_forward_now(&controller->early_tx,
215 ktime_set(0, 20 * NSEC_PER_USEC)); 216 ktime_set(0, 20 * NSEC_PER_USEC));
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 154bcf1b5dfa..48bc09e7b83b 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -868,9 +868,15 @@ static int dsps_suspend(struct device *dev)
868 struct dsps_glue *glue = dev_get_drvdata(dev); 868 struct dsps_glue *glue = dev_get_drvdata(dev);
869 const struct dsps_musb_wrapper *wrp = glue->wrp; 869 const struct dsps_musb_wrapper *wrp = glue->wrp;
870 struct musb *musb = platform_get_drvdata(glue->musb); 870 struct musb *musb = platform_get_drvdata(glue->musb);
871 void __iomem *mbase = musb->ctrl_base; 871 void __iomem *mbase;
872 872
873 del_timer_sync(&glue->timer); 873 del_timer_sync(&glue->timer);
874
875 if (!musb)
876 /* This can happen if the musb device is in -EPROBE_DEFER */
877 return 0;
878
879 mbase = musb->ctrl_base;
874 glue->context.control = dsps_readl(mbase, wrp->control); 880 glue->context.control = dsps_readl(mbase, wrp->control);
875 glue->context.epintr = dsps_readl(mbase, wrp->epintr_set); 881 glue->context.epintr = dsps_readl(mbase, wrp->epintr_set);
876 glue->context.coreintr = dsps_readl(mbase, wrp->coreintr_set); 882 glue->context.coreintr = dsps_readl(mbase, wrp->coreintr_set);
@@ -887,8 +893,12 @@ static int dsps_resume(struct device *dev)
887 struct dsps_glue *glue = dev_get_drvdata(dev); 893 struct dsps_glue *glue = dev_get_drvdata(dev);
888 const struct dsps_musb_wrapper *wrp = glue->wrp; 894 const struct dsps_musb_wrapper *wrp = glue->wrp;
889 struct musb *musb = platform_get_drvdata(glue->musb); 895 struct musb *musb = platform_get_drvdata(glue->musb);
890 void __iomem *mbase = musb->ctrl_base; 896 void __iomem *mbase;
897
898 if (!musb)
899 return 0;
891 900
901 mbase = musb->ctrl_base;
892 dsps_writel(mbase, wrp->control, glue->context.control); 902 dsps_writel(mbase, wrp->control, glue->context.control);
893 dsps_writel(mbase, wrp->epintr_set, glue->context.epintr); 903 dsps_writel(mbase, wrp->epintr_set, glue->context.epintr);
894 dsps_writel(mbase, wrp->coreintr_set, glue->context.coreintr); 904 dsps_writel(mbase, wrp->coreintr_set, glue->context.coreintr);
@@ -896,7 +906,9 @@ static int dsps_resume(struct device *dev)
896 dsps_writel(mbase, wrp->mode, glue->context.mode); 906 dsps_writel(mbase, wrp->mode, glue->context.mode);
897 dsps_writel(mbase, wrp->tx_mode, glue->context.tx_mode); 907 dsps_writel(mbase, wrp->tx_mode, glue->context.tx_mode);
898 dsps_writel(mbase, wrp->rx_mode, glue->context.rx_mode); 908 dsps_writel(mbase, wrp->rx_mode, glue->context.rx_mode);
899 setup_timer(&glue->timer, otg_timer, (unsigned long) musb); 909 if (musb->xceiv->state == OTG_STATE_B_IDLE &&
910 musb->port_mode == MUSB_PORT_MODE_DUAL_ROLE)
911 mod_timer(&glue->timer, jiffies + wrp->poll_seconds * HZ);
900 912
901 return 0; 913 return 0;
902} 914}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index eca1747ca8c7..6c4eb3cf5efd 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -120,6 +120,7 @@ static const struct usb_device_id id_table[] = {
120 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ 120 { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */
121 { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ 121 { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */
122 { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ 122 { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */
123 { USB_DEVICE(0x10C4, 0x8875) }, /* CEL MeshConnect USB Stick */
123 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ 124 { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */
124 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ 125 { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
125 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */ 126 { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
@@ -155,6 +156,7 @@ static const struct usb_device_id id_table[] = {
155 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ 156 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
156 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ 157 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
157 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */ 158 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
159 { USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
158 { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ 160 { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
159 { USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */ 161 { USB_DEVICE(0x1D6F, 0x0010) }, /* Seluxit ApS RF Dongle */
160 { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ 162 { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index dc72b924c399..1ebb351b9e9a 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -140,6 +140,7 @@ static struct ftdi_sio_quirk ftdi_8u2232c_quirk = {
140 * /sys/bus/usb-serial/drivers/ftdi_sio/new_id and send a patch or report. 140 * /sys/bus/usb-serial/drivers/ftdi_sio/new_id and send a patch or report.
141 */ 141 */
142static const struct usb_device_id id_table_combined[] = { 142static const struct usb_device_id id_table_combined[] = {
143 { USB_DEVICE(FTDI_VID, FTDI_BRICK_PID) },
143 { USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) }, 144 { USB_DEVICE(FTDI_VID, FTDI_ZEITCONTROL_TAGTRACE_MIFARE_PID) },
144 { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) }, 145 { USB_DEVICE(FTDI_VID, FTDI_CTI_MINI_PID) },
145 { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) }, 146 { USB_DEVICE(FTDI_VID, FTDI_CTI_NANO_PID) },
@@ -469,6 +470,39 @@ static const struct usb_device_id id_table_combined[] = {
469 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FD_PID) }, 470 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FD_PID) },
470 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FE_PID) }, 471 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FE_PID) },
471 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FF_PID) }, 472 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_01FF_PID) },
473 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_4701_PID) },
474 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9300_PID) },
475 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9301_PID) },
476 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9302_PID) },
477 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9303_PID) },
478 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9304_PID) },
479 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9305_PID) },
480 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9306_PID) },
481 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9307_PID) },
482 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9308_PID) },
483 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9309_PID) },
484 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930A_PID) },
485 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930B_PID) },
486 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930C_PID) },
487 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930D_PID) },
488 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930E_PID) },
489 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_930F_PID) },
490 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9310_PID) },
491 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9311_PID) },
492 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9312_PID) },
493 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9313_PID) },
494 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9314_PID) },
495 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9315_PID) },
496 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9316_PID) },
497 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9317_PID) },
498 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9318_PID) },
499 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_9319_PID) },
500 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931A_PID) },
501 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931B_PID) },
502 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931C_PID) },
503 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931D_PID) },
504 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931E_PID) },
505 { USB_DEVICE(MTXORB_VID, MTXORB_FTDI_RANGE_931F_PID) },
472 { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) }, 506 { USB_DEVICE(FTDI_VID, FTDI_PERLE_ULTRAPORT_PID) },
473 { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) }, 507 { USB_DEVICE(FTDI_VID, FTDI_PIEGROUP_PID) },
474 { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) }, 508 { USB_DEVICE(FTDI_VID, FTDI_TNC_X_PID) },
@@ -661,6 +695,8 @@ static const struct usb_device_id id_table_combined[] = {
661 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) }, 695 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_5_PID) },
662 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) }, 696 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_6_PID) },
663 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) }, 697 { USB_DEVICE(FTDI_VID, XSENS_CONVERTER_7_PID) },
698 { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) },
699 { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
664 { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) }, 700 { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
665 { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) }, 701 { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
666 { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) }, 702 { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 5937b2d242f2..e52409c9be99 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -30,6 +30,12 @@
30 30
31/*** third-party PIDs (using FTDI_VID) ***/ 31/*** third-party PIDs (using FTDI_VID) ***/
32 32
33/*
34 * Certain versions of the official Windows FTDI driver reprogrammed
35 * counterfeit FTDI devices to PID 0. Support these devices anyway.
36 */
37#define FTDI_BRICK_PID 0x0000
38
33#define FTDI_LUMEL_PD12_PID 0x6002 39#define FTDI_LUMEL_PD12_PID 0x6002
34 40
35/* 41/*
@@ -143,8 +149,12 @@
143 * Xsens Technologies BV products (http://www.xsens.com). 149 * Xsens Technologies BV products (http://www.xsens.com).
144 */ 150 */
145#define XSENS_VID 0x2639 151#define XSENS_VID 0x2639
146#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */ 152#define XSENS_AWINDA_STATION_PID 0x0101
153#define XSENS_AWINDA_DONGLE_PID 0x0102
147#define XSENS_MTW_PID 0x0200 /* Xsens MTw */ 154#define XSENS_MTW_PID 0x0200 /* Xsens MTw */
155#define XSENS_CONVERTER_PID 0xD00D /* Xsens USB-serial converter */
156
157/* Xsens devices using FTDI VID */
148#define XSENS_CONVERTER_0_PID 0xD388 /* Xsens USB converter */ 158#define XSENS_CONVERTER_0_PID 0xD388 /* Xsens USB converter */
149#define XSENS_CONVERTER_1_PID 0xD389 /* Xsens Wireless Receiver */ 159#define XSENS_CONVERTER_1_PID 0xD389 /* Xsens Wireless Receiver */
150#define XSENS_CONVERTER_2_PID 0xD38A 160#define XSENS_CONVERTER_2_PID 0xD38A
@@ -916,8 +926,8 @@
916#define BAYER_CONTOUR_CABLE_PID 0x6001 926#define BAYER_CONTOUR_CABLE_PID 0x6001
917 927
918/* 928/*
919 * The following are the values for the Matrix Orbital FTDI Range 929 * Matrix Orbital Intelligent USB displays.
920 * Anything in this range will use an FT232RL. 930 * http://www.matrixorbital.com
921 */ 931 */
922#define MTXORB_VID 0x1B3D 932#define MTXORB_VID 0x1B3D
923#define MTXORB_FTDI_RANGE_0100_PID 0x0100 933#define MTXORB_FTDI_RANGE_0100_PID 0x0100
@@ -1176,8 +1186,39 @@
1176#define MTXORB_FTDI_RANGE_01FD_PID 0x01FD 1186#define MTXORB_FTDI_RANGE_01FD_PID 0x01FD
1177#define MTXORB_FTDI_RANGE_01FE_PID 0x01FE 1187#define MTXORB_FTDI_RANGE_01FE_PID 0x01FE
1178#define MTXORB_FTDI_RANGE_01FF_PID 0x01FF 1188#define MTXORB_FTDI_RANGE_01FF_PID 0x01FF
1179 1189#define MTXORB_FTDI_RANGE_4701_PID 0x4701
1180 1190#define MTXORB_FTDI_RANGE_9300_PID 0x9300
1191#define MTXORB_FTDI_RANGE_9301_PID 0x9301
1192#define MTXORB_FTDI_RANGE_9302_PID 0x9302
1193#define MTXORB_FTDI_RANGE_9303_PID 0x9303
1194#define MTXORB_FTDI_RANGE_9304_PID 0x9304
1195#define MTXORB_FTDI_RANGE_9305_PID 0x9305
1196#define MTXORB_FTDI_RANGE_9306_PID 0x9306
1197#define MTXORB_FTDI_RANGE_9307_PID 0x9307
1198#define MTXORB_FTDI_RANGE_9308_PID 0x9308
1199#define MTXORB_FTDI_RANGE_9309_PID 0x9309
1200#define MTXORB_FTDI_RANGE_930A_PID 0x930A
1201#define MTXORB_FTDI_RANGE_930B_PID 0x930B
1202#define MTXORB_FTDI_RANGE_930C_PID 0x930C
1203#define MTXORB_FTDI_RANGE_930D_PID 0x930D
1204#define MTXORB_FTDI_RANGE_930E_PID 0x930E
1205#define MTXORB_FTDI_RANGE_930F_PID 0x930F
1206#define MTXORB_FTDI_RANGE_9310_PID 0x9310
1207#define MTXORB_FTDI_RANGE_9311_PID 0x9311
1208#define MTXORB_FTDI_RANGE_9312_PID 0x9312
1209#define MTXORB_FTDI_RANGE_9313_PID 0x9313
1210#define MTXORB_FTDI_RANGE_9314_PID 0x9314
1211#define MTXORB_FTDI_RANGE_9315_PID 0x9315
1212#define MTXORB_FTDI_RANGE_9316_PID 0x9316
1213#define MTXORB_FTDI_RANGE_9317_PID 0x9317
1214#define MTXORB_FTDI_RANGE_9318_PID 0x9318
1215#define MTXORB_FTDI_RANGE_9319_PID 0x9319
1216#define MTXORB_FTDI_RANGE_931A_PID 0x931A
1217#define MTXORB_FTDI_RANGE_931B_PID 0x931B
1218#define MTXORB_FTDI_RANGE_931C_PID 0x931C
1219#define MTXORB_FTDI_RANGE_931D_PID 0x931D
1220#define MTXORB_FTDI_RANGE_931E_PID 0x931E
1221#define MTXORB_FTDI_RANGE_931F_PID 0x931F
1181 1222
1182/* 1223/*
1183 * The Mobility Lab (TML) 1224 * The Mobility Lab (TML)
diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c
index 93cb7cebda62..077c714f1285 100644
--- a/drivers/usb/serial/keyspan.c
+++ b/drivers/usb/serial/keyspan.c
@@ -311,24 +311,30 @@ static void usa26_indat_callback(struct urb *urb)
311 if ((data[0] & 0x80) == 0) { 311 if ((data[0] & 0x80) == 0) {
312 /* no errors on individual bytes, only 312 /* no errors on individual bytes, only
313 possible overrun err */ 313 possible overrun err */
314 if (data[0] & RXERROR_OVERRUN) 314 if (data[0] & RXERROR_OVERRUN) {
315 err = TTY_OVERRUN; 315 tty_insert_flip_char(&port->port, 0,
316 else 316 TTY_OVERRUN);
317 err = 0; 317 }
318 for (i = 1; i < urb->actual_length ; ++i) 318 for (i = 1; i < urb->actual_length ; ++i)
319 tty_insert_flip_char(&port->port, data[i], err); 319 tty_insert_flip_char(&port->port, data[i],
320 TTY_NORMAL);
320 } else { 321 } else {
321 /* some bytes had errors, every byte has status */ 322 /* some bytes had errors, every byte has status */
322 dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__); 323 dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
323 for (i = 0; i + 1 < urb->actual_length; i += 2) { 324 for (i = 0; i + 1 < urb->actual_length; i += 2) {
324 int stat = data[i], flag = 0; 325 int stat = data[i];
325 if (stat & RXERROR_OVERRUN) 326 int flag = TTY_NORMAL;
326 flag |= TTY_OVERRUN; 327
327 if (stat & RXERROR_FRAMING) 328 if (stat & RXERROR_OVERRUN) {
328 flag |= TTY_FRAME; 329 tty_insert_flip_char(&port->port, 0,
329 if (stat & RXERROR_PARITY) 330 TTY_OVERRUN);
330 flag |= TTY_PARITY; 331 }
331 /* XXX should handle break (0x10) */ 332 /* XXX should handle break (0x10) */
333 if (stat & RXERROR_PARITY)
334 flag = TTY_PARITY;
335 else if (stat & RXERROR_FRAMING)
336 flag = TTY_FRAME;
337
332 tty_insert_flip_char(&port->port, data[i+1], 338 tty_insert_flip_char(&port->port, data[i+1],
333 flag); 339 flag);
334 } 340 }
@@ -649,14 +655,19 @@ static void usa49_indat_callback(struct urb *urb)
649 } else { 655 } else {
650 /* some bytes had errors, every byte has status */ 656 /* some bytes had errors, every byte has status */
651 for (i = 0; i + 1 < urb->actual_length; i += 2) { 657 for (i = 0; i + 1 < urb->actual_length; i += 2) {
652 int stat = data[i], flag = 0; 658 int stat = data[i];
653 if (stat & RXERROR_OVERRUN) 659 int flag = TTY_NORMAL;
654 flag |= TTY_OVERRUN; 660
655 if (stat & RXERROR_FRAMING) 661 if (stat & RXERROR_OVERRUN) {
656 flag |= TTY_FRAME; 662 tty_insert_flip_char(&port->port, 0,
657 if (stat & RXERROR_PARITY) 663 TTY_OVERRUN);
658 flag |= TTY_PARITY; 664 }
659 /* XXX should handle break (0x10) */ 665 /* XXX should handle break (0x10) */
666 if (stat & RXERROR_PARITY)
667 flag = TTY_PARITY;
668 else if (stat & RXERROR_FRAMING)
669 flag = TTY_FRAME;
670
660 tty_insert_flip_char(&port->port, data[i+1], 671 tty_insert_flip_char(&port->port, data[i+1],
661 flag); 672 flag);
662 } 673 }
@@ -713,15 +724,19 @@ static void usa49wg_indat_callback(struct urb *urb)
713 */ 724 */
714 for (x = 0; x + 1 < len && 725 for (x = 0; x + 1 < len &&
715 i + 1 < urb->actual_length; x += 2) { 726 i + 1 < urb->actual_length; x += 2) {
716 int stat = data[i], flag = 0; 727 int stat = data[i];
728 int flag = TTY_NORMAL;
717 729
718 if (stat & RXERROR_OVERRUN) 730 if (stat & RXERROR_OVERRUN) {
719 flag |= TTY_OVERRUN; 731 tty_insert_flip_char(&port->port, 0,
720 if (stat & RXERROR_FRAMING) 732 TTY_OVERRUN);
721 flag |= TTY_FRAME; 733 }
722 if (stat & RXERROR_PARITY)
723 flag |= TTY_PARITY;
724 /* XXX should handle break (0x10) */ 734 /* XXX should handle break (0x10) */
735 if (stat & RXERROR_PARITY)
736 flag = TTY_PARITY;
737 else if (stat & RXERROR_FRAMING)
738 flag = TTY_FRAME;
739
725 tty_insert_flip_char(&port->port, data[i+1], 740 tty_insert_flip_char(&port->port, data[i+1],
726 flag); 741 flag);
727 i += 2; 742 i += 2;
@@ -773,25 +788,31 @@ static void usa90_indat_callback(struct urb *urb)
773 if ((data[0] & 0x80) == 0) { 788 if ((data[0] & 0x80) == 0) {
774 /* no errors on individual bytes, only 789 /* no errors on individual bytes, only
775 possible overrun err*/ 790 possible overrun err*/
776 if (data[0] & RXERROR_OVERRUN) 791 if (data[0] & RXERROR_OVERRUN) {
777 err = TTY_OVERRUN; 792 tty_insert_flip_char(&port->port, 0,
778 else 793 TTY_OVERRUN);
779 err = 0; 794 }
780 for (i = 1; i < urb->actual_length ; ++i) 795 for (i = 1; i < urb->actual_length ; ++i)
781 tty_insert_flip_char(&port->port, 796 tty_insert_flip_char(&port->port,
782 data[i], err); 797 data[i], TTY_NORMAL);
783 } else { 798 } else {
784 /* some bytes had errors, every byte has status */ 799 /* some bytes had errors, every byte has status */
785 dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__); 800 dev_dbg(&port->dev, "%s - RX error!!!!\n", __func__);
786 for (i = 0; i + 1 < urb->actual_length; i += 2) { 801 for (i = 0; i + 1 < urb->actual_length; i += 2) {
787 int stat = data[i], flag = 0; 802 int stat = data[i];
788 if (stat & RXERROR_OVERRUN) 803 int flag = TTY_NORMAL;
789 flag |= TTY_OVERRUN; 804
790 if (stat & RXERROR_FRAMING) 805 if (stat & RXERROR_OVERRUN) {
791 flag |= TTY_FRAME; 806 tty_insert_flip_char(
792 if (stat & RXERROR_PARITY) 807 &port->port, 0,
793 flag |= TTY_PARITY; 808 TTY_OVERRUN);
809 }
794 /* XXX should handle break (0x10) */ 810 /* XXX should handle break (0x10) */
811 if (stat & RXERROR_PARITY)
812 flag = TTY_PARITY;
813 else if (stat & RXERROR_FRAMING)
814 flag = TTY_FRAME;
815
795 tty_insert_flip_char(&port->port, 816 tty_insert_flip_char(&port->port,
796 data[i+1], flag); 817 data[i+1], flag);
797 } 818 }
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 078f9ed419c8..02c420af251e 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -335,7 +335,8 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
335 port->interrupt_out_urb->transfer_buffer_length = length; 335 port->interrupt_out_urb->transfer_buffer_length = length;
336 336
337 priv->cur_pos = priv->cur_pos + length; 337 priv->cur_pos = priv->cur_pos + length;
338 result = usb_submit_urb(port->interrupt_out_urb, GFP_NOIO); 338 result = usb_submit_urb(port->interrupt_out_urb,
339 GFP_ATOMIC);
339 dev_dbg(&port->dev, "%s - Send write URB returns: %i\n", __func__, result); 340 dev_dbg(&port->dev, "%s - Send write URB returns: %i\n", __func__, result);
340 todo = priv->filled - priv->cur_pos; 341 todo = priv->filled - priv->cur_pos;
341 342
@@ -350,7 +351,7 @@ static int kobil_write(struct tty_struct *tty, struct usb_serial_port *port,
350 if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID || 351 if (priv->device_type == KOBIL_ADAPTER_B_PRODUCT_ID ||
351 priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) { 352 priv->device_type == KOBIL_ADAPTER_K_PRODUCT_ID) {
352 result = usb_submit_urb(port->interrupt_in_urb, 353 result = usb_submit_urb(port->interrupt_in_urb,
353 GFP_NOIO); 354 GFP_ATOMIC);
354 dev_dbg(&port->dev, "%s - Send read URB returns: %i\n", __func__, result); 355 dev_dbg(&port->dev, "%s - Send read URB returns: %i\n", __func__, result);
355 } 356 }
356 } 357 }
@@ -414,8 +415,6 @@ static int kobil_tiocmset(struct tty_struct *tty,
414 int result; 415 int result;
415 int dtr = 0; 416 int dtr = 0;
416 int rts = 0; 417 int rts = 0;
417 unsigned char *transfer_buffer;
418 int transfer_buffer_length = 8;
419 418
420 /* FIXME: locking ? */ 419 /* FIXME: locking ? */
421 priv = usb_get_serial_port_data(port); 420 priv = usb_get_serial_port_data(port);
@@ -425,11 +424,6 @@ static int kobil_tiocmset(struct tty_struct *tty,
425 return -EINVAL; 424 return -EINVAL;
426 } 425 }
427 426
428 /* allocate memory for transfer buffer */
429 transfer_buffer = kzalloc(transfer_buffer_length, GFP_KERNEL);
430 if (!transfer_buffer)
431 return -ENOMEM;
432
433 if (set & TIOCM_RTS) 427 if (set & TIOCM_RTS)
434 rts = 1; 428 rts = 1;
435 if (set & TIOCM_DTR) 429 if (set & TIOCM_DTR)
@@ -469,7 +463,6 @@ static int kobil_tiocmset(struct tty_struct *tty,
469 KOBIL_TIMEOUT); 463 KOBIL_TIMEOUT);
470 } 464 }
471 dev_dbg(dev, "%s - Send set_status_line URB returns: %i\n", __func__, result); 465 dev_dbg(dev, "%s - Send set_status_line URB returns: %i\n", __func__, result);
472 kfree(transfer_buffer);
473 return (result < 0) ? result : 0; 466 return (result < 0) ? result : 0;
474} 467}
475 468
@@ -530,8 +523,6 @@ static int kobil_ioctl(struct tty_struct *tty,
530{ 523{
531 struct usb_serial_port *port = tty->driver_data; 524 struct usb_serial_port *port = tty->driver_data;
532 struct kobil_private *priv = usb_get_serial_port_data(port); 525 struct kobil_private *priv = usb_get_serial_port_data(port);
533 unsigned char *transfer_buffer;
534 int transfer_buffer_length = 8;
535 int result; 526 int result;
536 527
537 if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID || 528 if (priv->device_type == KOBIL_USBTWIN_PRODUCT_ID ||
@@ -541,10 +532,6 @@ static int kobil_ioctl(struct tty_struct *tty,
541 532
542 switch (cmd) { 533 switch (cmd) {
543 case TCFLSH: 534 case TCFLSH:
544 transfer_buffer = kmalloc(transfer_buffer_length, GFP_KERNEL);
545 if (!transfer_buffer)
546 return -ENOBUFS;
547
548 result = usb_control_msg(port->serial->dev, 535 result = usb_control_msg(port->serial->dev,
549 usb_sndctrlpipe(port->serial->dev, 0), 536 usb_sndctrlpipe(port->serial->dev, 0),
550 SUSBCRequest_Misc, 537 SUSBCRequest_Misc,
@@ -559,7 +546,6 @@ static int kobil_ioctl(struct tty_struct *tty,
559 dev_dbg(&port->dev, 546 dev_dbg(&port->dev,
560 "%s - Send reset_all_queues (FLUSH) URB returns: %i\n", 547 "%s - Send reset_all_queues (FLUSH) URB returns: %i\n",
561 __func__, result); 548 __func__, result);
562 kfree(transfer_buffer);
563 return (result < 0) ? -EIO: 0; 549 return (result < 0) ? -EIO: 0;
564 default: 550 default:
565 return -ENOIOCTLCMD; 551 return -ENOIOCTLCMD;
diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c
index 4856fb7e637e..4b7bfb394a32 100644
--- a/drivers/usb/serial/opticon.c
+++ b/drivers/usb/serial/opticon.c
@@ -215,7 +215,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port,
215 215
216 /* The connected devices do not have a bulk write endpoint, 216 /* The connected devices do not have a bulk write endpoint,
217 * to transmit data to de barcode device the control endpoint is used */ 217 * to transmit data to de barcode device the control endpoint is used */
218 dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_NOIO); 218 dr = kmalloc(sizeof(struct usb_ctrlrequest), GFP_ATOMIC);
219 if (!dr) { 219 if (!dr) {
220 count = -ENOMEM; 220 count = -ENOMEM;
221 goto error_no_dr; 221 goto error_no_dr;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index d1a3f6044c8a..7a4c21b4f676 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -269,6 +269,7 @@ static void option_instat_callback(struct urb *urb);
269#define TELIT_PRODUCT_DE910_DUAL 0x1010 269#define TELIT_PRODUCT_DE910_DUAL 0x1010
270#define TELIT_PRODUCT_UE910_V2 0x1012 270#define TELIT_PRODUCT_UE910_V2 0x1012
271#define TELIT_PRODUCT_LE920 0x1200 271#define TELIT_PRODUCT_LE920 0x1200
272#define TELIT_PRODUCT_LE910 0x1201
272 273
273/* ZTE PRODUCTS */ 274/* ZTE PRODUCTS */
274#define ZTE_VENDOR_ID 0x19d2 275#define ZTE_VENDOR_ID 0x19d2
@@ -362,6 +363,7 @@ static void option_instat_callback(struct urb *urb);
362 363
363/* Haier products */ 364/* Haier products */
364#define HAIER_VENDOR_ID 0x201e 365#define HAIER_VENDOR_ID 0x201e
366#define HAIER_PRODUCT_CE81B 0x10f8
365#define HAIER_PRODUCT_CE100 0x2009 367#define HAIER_PRODUCT_CE100 0x2009
366 368
367/* Cinterion (formerly Siemens) products */ 369/* Cinterion (formerly Siemens) products */
@@ -589,6 +591,11 @@ static const struct option_blacklist_info zte_1255_blacklist = {
589 .reserved = BIT(3) | BIT(4), 591 .reserved = BIT(3) | BIT(4),
590}; 592};
591 593
594static const struct option_blacklist_info telit_le910_blacklist = {
595 .sendsetup = BIT(0),
596 .reserved = BIT(1) | BIT(2),
597};
598
592static const struct option_blacklist_info telit_le920_blacklist = { 599static const struct option_blacklist_info telit_le920_blacklist = {
593 .sendsetup = BIT(0), 600 .sendsetup = BIT(0),
594 .reserved = BIT(1) | BIT(5), 601 .reserved = BIT(1) | BIT(5),
@@ -1138,6 +1145,8 @@ static const struct usb_device_id option_ids[] = {
1138 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) }, 1145 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_CC864_SINGLE) },
1139 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) }, 1146 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_DE910_DUAL) },
1140 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) }, 1147 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_UE910_V2) },
1148 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
1149 .driver_info = (kernel_ulong_t)&telit_le910_blacklist },
1141 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920), 1150 { USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE920),
1142 .driver_info = (kernel_ulong_t)&telit_le920_blacklist }, 1151 .driver_info = (kernel_ulong_t)&telit_le920_blacklist },
1143 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */ 1152 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF622, 0xff, 0xff, 0xff) }, /* ZTE WCDMA products */
@@ -1621,6 +1630,7 @@ static const struct usb_device_id option_ids[] = {
1621 { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, 1630 { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
1622 { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, 1631 { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
1623 { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, 1632 { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
1633 { USB_DEVICE_AND_INTERFACE_INFO(HAIER_VENDOR_ID, HAIER_PRODUCT_CE81B, 0xff, 0xff, 0xff) },
1624 /* Pirelli */ 1634 /* Pirelli */
1625 { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1, 0xff) }, 1635 { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_1, 0xff) },
1626 { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2, 0xff) }, 1636 { USB_DEVICE_INTERFACE_CLASS(PIRELLI_VENDOR_ID, PIRELLI_PRODUCT_C100_2, 0xff) },
diff --git a/drivers/usb/serial/ssu100.c b/drivers/usb/serial/ssu100.c
index a7fe664b6b7d..70a098de429f 100644
--- a/drivers/usb/serial/ssu100.c
+++ b/drivers/usb/serial/ssu100.c
@@ -490,10 +490,9 @@ static void ssu100_update_lsr(struct usb_serial_port *port, u8 lsr,
490 if (*tty_flag == TTY_NORMAL) 490 if (*tty_flag == TTY_NORMAL)
491 *tty_flag = TTY_FRAME; 491 *tty_flag = TTY_FRAME;
492 } 492 }
493 if (lsr & UART_LSR_OE){ 493 if (lsr & UART_LSR_OE) {
494 port->icount.overrun++; 494 port->icount.overrun++;
495 if (*tty_flag == TTY_NORMAL) 495 tty_insert_flip_char(&port->port, 0, TTY_OVERRUN);
496 *tty_flag = TTY_OVERRUN;
497 } 496 }
498 } 497 }
499 498
@@ -511,12 +510,8 @@ static void ssu100_process_read_urb(struct urb *urb)
511 if ((len >= 4) && 510 if ((len >= 4) &&
512 (packet[0] == 0x1b) && (packet[1] == 0x1b) && 511 (packet[0] == 0x1b) && (packet[1] == 0x1b) &&
513 ((packet[2] == 0x00) || (packet[2] == 0x01))) { 512 ((packet[2] == 0x00) || (packet[2] == 0x01))) {
514 if (packet[2] == 0x00) { 513 if (packet[2] == 0x00)
515 ssu100_update_lsr(port, packet[3], &flag); 514 ssu100_update_lsr(port, packet[3], &flag);
516 if (flag == TTY_OVERRUN)
517 tty_insert_flip_char(&port->port, 0,
518 TTY_OVERRUN);
519 }
520 if (packet[2] == 0x01) 515 if (packet[2] == 0x01)
521 ssu100_update_msr(port, packet[3]); 516 ssu100_update_msr(port, packet[3]);
522 517
diff --git a/drivers/usb/storage/initializers.c b/drivers/usb/storage/initializers.c
index 4bc2fc98636e..73f125e0cb58 100644
--- a/drivers/usb/storage/initializers.c
+++ b/drivers/usb/storage/initializers.c
@@ -52,7 +52,7 @@ int usb_stor_euscsi_init(struct us_data *us)
52 us->iobuf[0] = 0x1; 52 us->iobuf[0] = 0x1;
53 result = usb_stor_control_msg(us, us->send_ctrl_pipe, 53 result = usb_stor_control_msg(us, us->send_ctrl_pipe,
54 0x0C, USB_RECIP_INTERFACE | USB_TYPE_VENDOR, 54 0x0C, USB_RECIP_INTERFACE | USB_TYPE_VENDOR,
55 0x01, 0x0, us->iobuf, 0x1, USB_CTRL_SET_TIMEOUT); 55 0x01, 0x0, us->iobuf, 0x1, 5 * HZ);
56 usb_stor_dbg(us, "-- result is %d\n", result); 56 usb_stor_dbg(us, "-- result is %d\n", result);
57 57
58 return 0; 58 return 0;
@@ -100,7 +100,7 @@ int usb_stor_huawei_e220_init(struct us_data *us)
100 result = usb_stor_control_msg(us, us->send_ctrl_pipe, 100 result = usb_stor_control_msg(us, us->send_ctrl_pipe,
101 USB_REQ_SET_FEATURE, 101 USB_REQ_SET_FEATURE,
102 USB_TYPE_STANDARD | USB_RECIP_DEVICE, 102 USB_TYPE_STANDARD | USB_RECIP_DEVICE,
103 0x01, 0x0, NULL, 0x0, 1000); 103 0x01, 0x0, NULL, 0x0, 1 * HZ);
104 usb_stor_dbg(us, "Huawei mode set result is %d\n", result); 104 usb_stor_dbg(us, "Huawei mode set result is %d\n", result);
105 return 0; 105 return 0;
106} 106}
diff --git a/drivers/usb/storage/realtek_cr.c b/drivers/usb/storage/realtek_cr.c
index 8591d89a38e6..27e4a580d2ed 100644
--- a/drivers/usb/storage/realtek_cr.c
+++ b/drivers/usb/storage/realtek_cr.c
@@ -626,6 +626,7 @@ static int config_autodelink_after_power_on(struct us_data *us)
626 return 0; 626 return 0;
627} 627}
628 628
629#ifdef CONFIG_PM
629static int config_autodelink_before_power_down(struct us_data *us) 630static int config_autodelink_before_power_down(struct us_data *us)
630{ 631{
631 struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra); 632 struct rts51x_chip *chip = (struct rts51x_chip *)(us->extra);
@@ -716,6 +717,7 @@ static void fw5895_init(struct us_data *us)
716 } 717 }
717 } 718 }
718} 719}
720#endif
719 721
720#ifdef CONFIG_REALTEK_AUTOPM 722#ifdef CONFIG_REALTEK_AUTOPM
721static void fw5895_set_mmc_wp(struct us_data *us) 723static void fw5895_set_mmc_wp(struct us_data *us)
diff --git a/drivers/usb/storage/transport.c b/drivers/usb/storage/transport.c
index 22c7d4360fa2..b1d815eb6d0b 100644
--- a/drivers/usb/storage/transport.c
+++ b/drivers/usb/storage/transport.c
@@ -1118,6 +1118,31 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
1118 */ 1118 */
1119 if (result == USB_STOR_XFER_LONG) 1119 if (result == USB_STOR_XFER_LONG)
1120 fake_sense = 1; 1120 fake_sense = 1;
1121
1122 /*
1123 * Sometimes a device will mistakenly skip the data phase
1124 * and go directly to the status phase without sending a
1125 * zero-length packet. If we get a 13-byte response here,
1126 * check whether it really is a CSW.
1127 */
1128 if (result == USB_STOR_XFER_SHORT &&
1129 srb->sc_data_direction == DMA_FROM_DEVICE &&
1130 transfer_length - scsi_get_resid(srb) ==
1131 US_BULK_CS_WRAP_LEN) {
1132 struct scatterlist *sg = NULL;
1133 unsigned int offset = 0;
1134
1135 if (usb_stor_access_xfer_buf((unsigned char *) bcs,
1136 US_BULK_CS_WRAP_LEN, srb, &sg,
1137 &offset, FROM_XFER_BUF) ==
1138 US_BULK_CS_WRAP_LEN &&
1139 bcs->Signature ==
1140 cpu_to_le32(US_BULK_CS_SIGN)) {
1141 usb_stor_dbg(us, "Device skipped data phase\n");
1142 scsi_set_resid(srb, transfer_length);
1143 goto skipped_data_phase;
1144 }
1145 }
1121 } 1146 }
1122 1147
1123 /* See flow chart on pg 15 of the Bulk Only Transport spec for 1148 /* See flow chart on pg 15 of the Bulk Only Transport spec for
@@ -1153,6 +1178,7 @@ int usb_stor_Bulk_transport(struct scsi_cmnd *srb, struct us_data *us)
1153 if (result != USB_STOR_XFER_GOOD) 1178 if (result != USB_STOR_XFER_GOOD)
1154 return USB_STOR_TRANSPORT_ERROR; 1179 return USB_STOR_TRANSPORT_ERROR;
1155 1180
1181 skipped_data_phase:
1156 /* check bulk status */ 1182 /* check bulk status */
1157 residue = le32_to_cpu(bcs->Residue); 1183 residue = le32_to_cpu(bcs->Residue);
1158 usb_stor_dbg(us, "Bulk Status S 0x%x T 0x%x R %u Stat 0x%x\n", 1184 usb_stor_dbg(us, "Bulk Status S 0x%x T 0x%x R %u Stat 0x%x\n",
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 8511b54a65d9..18a283d6de1c 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -54,6 +54,20 @@ UNUSUAL_DEV(0x0bc2, 0x3312, 0x0000, 0x9999,
54 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 54 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
55 US_FL_NO_ATA_1X), 55 US_FL_NO_ATA_1X),
56 56
57/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
58UNUSUAL_DEV(0x0bc2, 0x3320, 0x0000, 0x9999,
59 "Seagate",
60 "Expansion Desk",
61 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
62 US_FL_NO_ATA_1X),
63
64/* Reported-by: Bogdan Mihalcea <bogdan.mihalcea@infim.ro> */
65UNUSUAL_DEV(0x0bc2, 0xa003, 0x0000, 0x9999,
66 "Seagate",
67 "Backup Plus",
68 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
69 US_FL_NO_ATA_1X),
70
57/* https://bbs.archlinux.org/viewtopic.php?id=183190 */ 71/* https://bbs.archlinux.org/viewtopic.php?id=183190 */
58UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999, 72UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999,
59 "Seagate", 73 "Seagate",
@@ -61,6 +75,13 @@ UNUSUAL_DEV(0x0bc2, 0xab20, 0x0000, 0x9999,
61 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 75 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
62 US_FL_NO_ATA_1X), 76 US_FL_NO_ATA_1X),
63 77
78/* https://bbs.archlinux.org/viewtopic.php?id=183190 */
79UNUSUAL_DEV(0x0bc2, 0xab21, 0x0000, 0x9999,
80 "Seagate",
81 "Backup+ BK",
82 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
83 US_FL_NO_ATA_1X),
84
64/* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */ 85/* Reported-by: Claudio Bizzarri <claudio.bizzarri@gmail.com> */
65UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999, 86UNUSUAL_DEV(0x152d, 0x0567, 0x0000, 0x9999,
66 "JMicron", 87 "JMicron",
@@ -75,3 +96,17 @@ UNUSUAL_DEV(0x174c, 0x5106, 0x0000, 0x9999,
75 "ASM1051", 96 "ASM1051",
76 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 97 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
77 US_FL_IGNORE_UAS), 98 US_FL_IGNORE_UAS),
99
100/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
101UNUSUAL_DEV(0x2109, 0x0711, 0x0000, 0x9999,
102 "VIA",
103 "VL711",
104 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
105 US_FL_NO_ATA_1X),
106
107/* Reported-by: Hans de Goede <hdegoede@redhat.com> */
108UNUSUAL_DEV(0x4971, 0x1012, 0x0000, 0x9999,
109 "Hitachi",
110 "External HDD",
111 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
112 US_FL_IGNORE_UAS),
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 69906cacd04f..a17f11850669 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1312,6 +1312,7 @@ static int
1312vhost_scsi_set_endpoint(struct vhost_scsi *vs, 1312vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1313 struct vhost_scsi_target *t) 1313 struct vhost_scsi_target *t)
1314{ 1314{
1315 struct se_portal_group *se_tpg;
1315 struct tcm_vhost_tport *tv_tport; 1316 struct tcm_vhost_tport *tv_tport;
1316 struct tcm_vhost_tpg *tpg; 1317 struct tcm_vhost_tpg *tpg;
1317 struct tcm_vhost_tpg **vs_tpg; 1318 struct tcm_vhost_tpg **vs_tpg;
@@ -1359,6 +1360,21 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
1359 ret = -EEXIST; 1360 ret = -EEXIST;
1360 goto out; 1361 goto out;
1361 } 1362 }
1363 /*
1364 * In order to ensure individual vhost-scsi configfs
1365 * groups cannot be removed while in use by vhost ioctl,
1366 * go ahead and take an explicit se_tpg->tpg_group.cg_item
1367 * dependency now.
1368 */
1369 se_tpg = &tpg->se_tpg;
1370 ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
1371 &se_tpg->tpg_group.cg_item);
1372 if (ret) {
1373 pr_warn("configfs_depend_item() failed: %d\n", ret);
1374 kfree(vs_tpg);
1375 mutex_unlock(&tpg->tv_tpg_mutex);
1376 goto out;
1377 }
1362 tpg->tv_tpg_vhost_count++; 1378 tpg->tv_tpg_vhost_count++;
1363 tpg->vhost_scsi = vs; 1379 tpg->vhost_scsi = vs;
1364 vs_tpg[tpg->tport_tpgt] = tpg; 1380 vs_tpg[tpg->tport_tpgt] = tpg;
@@ -1401,6 +1417,7 @@ static int
1401vhost_scsi_clear_endpoint(struct vhost_scsi *vs, 1417vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1402 struct vhost_scsi_target *t) 1418 struct vhost_scsi_target *t)
1403{ 1419{
1420 struct se_portal_group *se_tpg;
1404 struct tcm_vhost_tport *tv_tport; 1421 struct tcm_vhost_tport *tv_tport;
1405 struct tcm_vhost_tpg *tpg; 1422 struct tcm_vhost_tpg *tpg;
1406 struct vhost_virtqueue *vq; 1423 struct vhost_virtqueue *vq;
@@ -1449,6 +1466,13 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
1449 vs->vs_tpg[target] = NULL; 1466 vs->vs_tpg[target] = NULL;
1450 match = true; 1467 match = true;
1451 mutex_unlock(&tpg->tv_tpg_mutex); 1468 mutex_unlock(&tpg->tv_tpg_mutex);
1469 /*
1470 * Release se_tpg->tpg_group.cg_item configfs dependency now
1471 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
1472 */
1473 se_tpg = &tpg->se_tpg;
1474 configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
1475 &se_tpg->tpg_group.cg_item);
1452 } 1476 }
1453 if (match) { 1477 if (match) {
1454 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) { 1478 for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 57b1d44acbfe..eb976ee3a02f 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -448,8 +448,10 @@ static int __init fb_console_setup(char *this_opt)
448 return 1; 448 return 1;
449 449
450 while ((options = strsep(&this_opt, ",")) != NULL) { 450 while ((options = strsep(&this_opt, ",")) != NULL) {
451 if (!strncmp(options, "font:", 5)) 451 if (!strncmp(options, "font:", 5)) {
452 strlcpy(fontname, options + 5, sizeof(fontname)); 452 strlcpy(fontname, options + 5, sizeof(fontname));
453 continue;
454 }
453 455
454 if (!strncmp(options, "scrollback:", 11)) { 456 if (!strncmp(options, "scrollback:", 11)) {
455 options += 11; 457 options += 11;
@@ -457,13 +459,9 @@ static int __init fb_console_setup(char *this_opt)
457 fbcon_softback_size = simple_strtoul(options, &options, 0); 459 fbcon_softback_size = simple_strtoul(options, &options, 0);
458 if (*options == 'k' || *options == 'K') { 460 if (*options == 'k' || *options == 'K') {
459 fbcon_softback_size *= 1024; 461 fbcon_softback_size *= 1024;
460 options++;
461 } 462 }
462 if (*options != ',') 463 }
463 return 1; 464 continue;
464 options++;
465 } else
466 return 1;
467 } 465 }
468 466
469 if (!strncmp(options, "map:", 4)) { 467 if (!strncmp(options, "map:", 4)) {
@@ -478,8 +476,7 @@ static int __init fb_console_setup(char *this_opt)
478 476
479 fbcon_map_override(); 477 fbcon_map_override();
480 } 478 }
481 479 continue;
482 return 1;
483 } 480 }
484 481
485 if (!strncmp(options, "vc:", 3)) { 482 if (!strncmp(options, "vc:", 3)) {
@@ -491,7 +488,8 @@ static int __init fb_console_setup(char *this_opt)
491 if (*options++ == '-') 488 if (*options++ == '-')
492 last_fb_vc = simple_strtoul(options, &options, 10) - 1; 489 last_fb_vc = simple_strtoul(options, &options, 10) - 1;
493 fbcon_is_default = 0; 490 fbcon_is_default = 0;
494 } 491 continue;
492 }
495 493
496 if (!strncmp(options, "rotate:", 7)) { 494 if (!strncmp(options, "rotate:", 7)) {
497 options += 7; 495 options += 7;
@@ -499,6 +497,7 @@ static int __init fb_console_setup(char *this_opt)
499 initial_rotation = simple_strtoul(options, &options, 0); 497 initial_rotation = simple_strtoul(options, &options, 0);
500 if (initial_rotation > 3) 498 if (initial_rotation > 3)
501 initial_rotation = 0; 499 initial_rotation = 0;
500 continue;
502 } 501 }
503 } 502 }
504 return 1; 503 return 1;
diff --git a/drivers/video/console/vgacon.c b/drivers/video/console/vgacon.c
index 6e6aa704fe84..517f565b65d7 100644
--- a/drivers/video/console/vgacon.c
+++ b/drivers/video/console/vgacon.c
@@ -56,7 +56,7 @@ static int cursor_size_lastfrom;
56static int cursor_size_lastto; 56static int cursor_size_lastto;
57static u32 vgacon_xres; 57static u32 vgacon_xres;
58static u32 vgacon_yres; 58static u32 vgacon_yres;
59static struct vgastate state; 59static struct vgastate vgastate;
60 60
61#define BLANK 0x0020 61#define BLANK 0x0020
62 62
@@ -400,7 +400,7 @@ static const char *vgacon_startup(void)
400 400
401 vga_video_num_lines = screen_info.orig_video_lines; 401 vga_video_num_lines = screen_info.orig_video_lines;
402 vga_video_num_columns = screen_info.orig_video_cols; 402 vga_video_num_columns = screen_info.orig_video_cols;
403 state.vgabase = NULL; 403 vgastate.vgabase = NULL;
404 404
405 if (screen_info.orig_video_mode == 7) { 405 if (screen_info.orig_video_mode == 7) {
406 /* Monochrome display */ 406 /* Monochrome display */
@@ -851,12 +851,12 @@ static void vga_set_palette(struct vc_data *vc, unsigned char *table)
851{ 851{
852 int i, j; 852 int i, j;
853 853
854 vga_w(state.vgabase, VGA_PEL_MSK, 0xff); 854 vga_w(vgastate.vgabase, VGA_PEL_MSK, 0xff);
855 for (i = j = 0; i < 16; i++) { 855 for (i = j = 0; i < 16; i++) {
856 vga_w(state.vgabase, VGA_PEL_IW, table[i]); 856 vga_w(vgastate.vgabase, VGA_PEL_IW, table[i]);
857 vga_w(state.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2); 857 vga_w(vgastate.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2);
858 vga_w(state.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2); 858 vga_w(vgastate.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2);
859 vga_w(state.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2); 859 vga_w(vgastate.vgabase, VGA_PEL_D, vc->vc_palette[j++] >> 2);
860 } 860 }
861} 861}
862 862
@@ -1008,7 +1008,7 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch)
1008 switch (blank) { 1008 switch (blank) {
1009 case 0: /* Unblank */ 1009 case 0: /* Unblank */
1010 if (vga_vesa_blanked) { 1010 if (vga_vesa_blanked) {
1011 vga_vesa_unblank(&state); 1011 vga_vesa_unblank(&vgastate);
1012 vga_vesa_blanked = 0; 1012 vga_vesa_blanked = 0;
1013 } 1013 }
1014 if (vga_palette_blanked) { 1014 if (vga_palette_blanked) {
@@ -1022,7 +1022,7 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch)
1022 case 1: /* Normal blanking */ 1022 case 1: /* Normal blanking */
1023 case -1: /* Obsolete */ 1023 case -1: /* Obsolete */
1024 if (!mode_switch && vga_video_type == VIDEO_TYPE_VGAC) { 1024 if (!mode_switch && vga_video_type == VIDEO_TYPE_VGAC) {
1025 vga_pal_blank(&state); 1025 vga_pal_blank(&vgastate);
1026 vga_palette_blanked = 1; 1026 vga_palette_blanked = 1;
1027 return 0; 1027 return 0;
1028 } 1028 }
@@ -1034,7 +1034,7 @@ static int vgacon_blank(struct vc_data *c, int blank, int mode_switch)
1034 return 1; 1034 return 1;
1035 default: /* VESA blanking */ 1035 default: /* VESA blanking */
1036 if (vga_video_type == VIDEO_TYPE_VGAC) { 1036 if (vga_video_type == VIDEO_TYPE_VGAC) {
1037 vga_vesa_blank(&state, blank - 1); 1037 vga_vesa_blank(&vgastate, blank - 1);
1038 vga_vesa_blanked = blank; 1038 vga_vesa_blanked = blank;
1039 } 1039 }
1040 return 0; 1040 return 0;
@@ -1280,7 +1280,7 @@ static int vgacon_font_set(struct vc_data *c, struct console_font *font, unsigne
1280 (charcount != 256 && charcount != 512)) 1280 (charcount != 256 && charcount != 512))
1281 return -EINVAL; 1281 return -EINVAL;
1282 1282
1283 rc = vgacon_do_font_op(&state, font->data, 1, charcount == 512); 1283 rc = vgacon_do_font_op(&vgastate, font->data, 1, charcount == 512);
1284 if (rc) 1284 if (rc)
1285 return rc; 1285 return rc;
1286 1286
@@ -1299,7 +1299,7 @@ static int vgacon_font_get(struct vc_data *c, struct console_font *font)
1299 font->charcount = vga_512_chars ? 512 : 256; 1299 font->charcount = vga_512_chars ? 512 : 256;
1300 if (!font->data) 1300 if (!font->data)
1301 return 0; 1301 return 0;
1302 return vgacon_do_font_op(&state, font->data, 0, vga_512_chars); 1302 return vgacon_do_font_op(&vgastate, font->data, 0, vga_512_chars);
1303} 1303}
1304 1304
1305#else 1305#else
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index 3bf403150a2d..9ec81d46fc57 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -27,7 +27,6 @@
27#include <linux/regulator/consumer.h> 27#include <linux/regulator/consumer.h>
28#include <video/videomode.h> 28#include <video/videomode.h>
29 29
30#include <mach/cpu.h>
31#include <asm/gpio.h> 30#include <asm/gpio.h>
32 31
33#include <video/atmel_lcdc.h> 32#include <video/atmel_lcdc.h>
diff --git a/drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c b/drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c
index 5ee3b5505f7f..91921665b98b 100644
--- a/drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c
+++ b/drivers/video/fbdev/omap2/displays-new/connector-analog-tv.c
@@ -301,6 +301,8 @@ static const struct of_device_id tvc_of_match[] = {
301 {}, 301 {},
302}; 302};
303 303
304MODULE_DEVICE_TABLE(of, tvc_of_match);
305
304static struct platform_driver tvc_connector_driver = { 306static struct platform_driver tvc_connector_driver = {
305 .probe = tvc_probe, 307 .probe = tvc_probe,
306 .remove = __exit_p(tvc_remove), 308 .remove = __exit_p(tvc_remove),
@@ -308,6 +310,7 @@ static struct platform_driver tvc_connector_driver = {
308 .name = "connector-analog-tv", 310 .name = "connector-analog-tv",
309 .owner = THIS_MODULE, 311 .owner = THIS_MODULE,
310 .of_match_table = tvc_of_match, 312 .of_match_table = tvc_of_match,
313 .suppress_bind_attrs = true,
311 }, 314 },
312}; 315};
313 316
diff --git a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
index 74de2bc50c4f..2dfb6e5ff0cc 100644
--- a/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
+++ b/drivers/video/fbdev/omap2/displays-new/connector-dvi.c
@@ -391,6 +391,7 @@ static struct platform_driver dvi_connector_driver = {
391 .name = "connector-dvi", 391 .name = "connector-dvi",
392 .owner = THIS_MODULE, 392 .owner = THIS_MODULE,
393 .of_match_table = dvic_of_match, 393 .of_match_table = dvic_of_match,
394 .suppress_bind_attrs = true,
394 }, 395 },
395}; 396};
396 397
diff --git a/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c b/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c
index 131c6e260898..7b25967a91eb 100644
--- a/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c
+++ b/drivers/video/fbdev/omap2/displays-new/connector-hdmi.c
@@ -437,6 +437,7 @@ static struct platform_driver hdmi_connector_driver = {
437 .name = "connector-hdmi", 437 .name = "connector-hdmi",
438 .owner = THIS_MODULE, 438 .owner = THIS_MODULE,
439 .of_match_table = hdmic_of_match, 439 .of_match_table = hdmic_of_match,
440 .suppress_bind_attrs = true,
440 }, 441 },
441}; 442};
442 443
diff --git a/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c b/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c
index b4e9a42a79e6..47ee7cdee1c5 100644
--- a/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c
+++ b/drivers/video/fbdev/omap2/displays-new/encoder-tfp410.c
@@ -298,6 +298,7 @@ static struct platform_driver tfp410_driver = {
298 .name = "tfp410", 298 .name = "tfp410",
299 .owner = THIS_MODULE, 299 .owner = THIS_MODULE,
300 .of_match_table = tfp410_of_match, 300 .of_match_table = tfp410_of_match,
301 .suppress_bind_attrs = true,
301 }, 302 },
302}; 303};
303 304
diff --git a/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c b/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c
index c891d8f84cb2..c4abd56dd846 100644
--- a/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c
+++ b/drivers/video/fbdev/omap2/displays-new/encoder-tpd12s015.c
@@ -461,6 +461,7 @@ static struct platform_driver tpd_driver = {
461 .name = "tpd12s015", 461 .name = "tpd12s015",
462 .owner = THIS_MODULE, 462 .owner = THIS_MODULE,
463 .of_match_table = tpd_of_match, 463 .of_match_table = tpd_of_match,
464 .suppress_bind_attrs = true,
464 }, 465 },
465}; 466};
466 467
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-dpi.c b/drivers/video/fbdev/omap2/displays-new/panel-dpi.c
index 3636b61dc9b4..a9c3dcf0f6b5 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-dpi.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-dpi.c
@@ -327,6 +327,7 @@ static struct platform_driver panel_dpi_driver = {
327 .name = "panel-dpi", 327 .name = "panel-dpi",
328 .owner = THIS_MODULE, 328 .owner = THIS_MODULE,
329 .of_match_table = panel_dpi_of_match, 329 .of_match_table = panel_dpi_of_match,
330 .suppress_bind_attrs = true,
330 }, 331 },
331}; 332};
332 333
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c b/drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c
index d6f14e8717e8..899cb1ab523d 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-dsi-cm.c
@@ -1378,6 +1378,7 @@ static struct platform_driver dsicm_driver = {
1378 .name = "panel-dsi-cm", 1378 .name = "panel-dsi-cm",
1379 .owner = THIS_MODULE, 1379 .owner = THIS_MODULE,
1380 .of_match_table = dsicm_of_match, 1380 .of_match_table = dsicm_of_match,
1381 .suppress_bind_attrs = true,
1381 }, 1382 },
1382}; 1383};
1383 1384
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c b/drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c
index cc5b5124e0b4..27d4fcfa1824 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-lgphilips-lb035q02.c
@@ -394,6 +394,7 @@ static struct spi_driver lb035q02_spi_driver = {
394 .name = "panel_lgphilips_lb035q02", 394 .name = "panel_lgphilips_lb035q02",
395 .owner = THIS_MODULE, 395 .owner = THIS_MODULE,
396 .of_match_table = lb035q02_of_match, 396 .of_match_table = lb035q02_of_match,
397 .suppress_bind_attrs = true,
397 }, 398 },
398}; 399};
399 400
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c b/drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c
index 3595f111aa35..ccf3f4f3c703 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-nec-nl8048hl11.c
@@ -424,6 +424,7 @@ static struct spi_driver nec_8048_driver = {
424 .owner = THIS_MODULE, 424 .owner = THIS_MODULE,
425 .pm = NEC_8048_PM_OPS, 425 .pm = NEC_8048_PM_OPS,
426 .of_match_table = nec_8048_of_match, 426 .of_match_table = nec_8048_of_match,
427 .suppress_bind_attrs = true,
427 }, 428 },
428 .probe = nec_8048_probe, 429 .probe = nec_8048_probe,
429 .remove = nec_8048_remove, 430 .remove = nec_8048_remove,
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c b/drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c
index f1f72ce50a17..234142cc3764 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-sharp-ls037v7dw01.c
@@ -410,6 +410,7 @@ static struct platform_driver sharp_ls_driver = {
410 .name = "panel-sharp-ls037v7dw01", 410 .name = "panel-sharp-ls037v7dw01",
411 .owner = THIS_MODULE, 411 .owner = THIS_MODULE,
412 .of_match_table = sharp_ls_of_match, 412 .of_match_table = sharp_ls_of_match,
413 .suppress_bind_attrs = true,
413 }, 414 },
414}; 415};
415 416
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
index 617f8d2f5127..337ccc5c0f5e 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-sony-acx565akm.c
@@ -904,6 +904,7 @@ static struct spi_driver acx565akm_driver = {
904 .name = "acx565akm", 904 .name = "acx565akm",
905 .owner = THIS_MODULE, 905 .owner = THIS_MODULE,
906 .of_match_table = acx565akm_of_match, 906 .of_match_table = acx565akm_of_match,
907 .suppress_bind_attrs = true,
907 }, 908 },
908 .probe = acx565akm_probe, 909 .probe = acx565akm_probe,
909 .remove = acx565akm_remove, 910 .remove = acx565akm_remove,
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c
index 728808bcceeb..fbba0b8ca871 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td028ttec1.c
@@ -500,6 +500,7 @@ static struct spi_driver td028ttec1_spi_driver = {
500 .name = "panel-tpo-td028ttec1", 500 .name = "panel-tpo-td028ttec1",
501 .owner = THIS_MODULE, 501 .owner = THIS_MODULE,
502 .of_match_table = td028ttec1_of_match, 502 .of_match_table = td028ttec1_of_match,
503 .suppress_bind_attrs = true,
503 }, 504 },
504}; 505};
505 506
diff --git a/drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c
index de78ab0caaa8..5aba76bca25a 100644
--- a/drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c
+++ b/drivers/video/fbdev/omap2/displays-new/panel-tpo-td043mtea1.c
@@ -673,6 +673,7 @@ static struct spi_driver tpo_td043_spi_driver = {
673 .owner = THIS_MODULE, 673 .owner = THIS_MODULE,
674 .pm = &tpo_td043_spi_pm, 674 .pm = &tpo_td043_spi_pm,
675 .of_match_table = tpo_td043_of_match, 675 .of_match_table = tpo_td043_of_match,
676 .suppress_bind_attrs = true,
676 }, 677 },
677 .probe = tpo_td043_probe, 678 .probe = tpo_td043_probe,
678 .remove = tpo_td043_remove, 679 .remove = tpo_td043_remove,
diff --git a/drivers/video/fbdev/omap2/dss/apply.c b/drivers/video/fbdev/omap2/dss/apply.c
index 0a0b084ce65d..663ccc3bf4e5 100644
--- a/drivers/video/fbdev/omap2/dss/apply.c
+++ b/drivers/video/fbdev/omap2/dss/apply.c
@@ -1132,6 +1132,8 @@ static void dss_mgr_disable_compat(struct omap_overlay_manager *mgr)
1132 if (!mp->enabled) 1132 if (!mp->enabled)
1133 goto out; 1133 goto out;
1134 1134
1135 wait_pending_extra_info_updates();
1136
1135 if (!mgr_manual_update(mgr)) 1137 if (!mgr_manual_update(mgr))
1136 dispc_mgr_disable_sync(mgr->id); 1138 dispc_mgr_disable_sync(mgr->id);
1137 1139
diff --git a/drivers/video/fbdev/omap2/dss/dispc.c b/drivers/video/fbdev/omap2/dss/dispc.c
index be053aa80880..0e9a74bb9fc2 100644
--- a/drivers/video/fbdev/omap2/dss/dispc.c
+++ b/drivers/video/fbdev/omap2/dss/dispc.c
@@ -3290,8 +3290,11 @@ static void dispc_dump_regs(struct seq_file *s)
3290 DUMPREG(i, DISPC_OVL_FIFO_SIZE_STATUS); 3290 DUMPREG(i, DISPC_OVL_FIFO_SIZE_STATUS);
3291 DUMPREG(i, DISPC_OVL_ROW_INC); 3291 DUMPREG(i, DISPC_OVL_ROW_INC);
3292 DUMPREG(i, DISPC_OVL_PIXEL_INC); 3292 DUMPREG(i, DISPC_OVL_PIXEL_INC);
3293
3293 if (dss_has_feature(FEAT_PRELOAD)) 3294 if (dss_has_feature(FEAT_PRELOAD))
3294 DUMPREG(i, DISPC_OVL_PRELOAD); 3295 DUMPREG(i, DISPC_OVL_PRELOAD);
3296 if (dss_has_feature(FEAT_MFLAG))
3297 DUMPREG(i, DISPC_OVL_MFLAG_THRESHOLD);
3295 3298
3296 if (i == OMAP_DSS_GFX) { 3299 if (i == OMAP_DSS_GFX) {
3297 DUMPREG(i, DISPC_OVL_WINDOW_SKIP); 3300 DUMPREG(i, DISPC_OVL_WINDOW_SKIP);
@@ -3312,10 +3315,6 @@ static void dispc_dump_regs(struct seq_file *s)
3312 } 3315 }
3313 if (dss_has_feature(FEAT_ATTR2)) 3316 if (dss_has_feature(FEAT_ATTR2))
3314 DUMPREG(i, DISPC_OVL_ATTRIBUTES2); 3317 DUMPREG(i, DISPC_OVL_ATTRIBUTES2);
3315 if (dss_has_feature(FEAT_PRELOAD))
3316 DUMPREG(i, DISPC_OVL_PRELOAD);
3317 if (dss_has_feature(FEAT_MFLAG))
3318 DUMPREG(i, DISPC_OVL_MFLAG_THRESHOLD);
3319 } 3318 }
3320 3319
3321#undef DISPC_REG 3320#undef DISPC_REG
@@ -3843,6 +3842,7 @@ static struct platform_driver omap_dispchw_driver = {
3843 .owner = THIS_MODULE, 3842 .owner = THIS_MODULE,
3844 .pm = &dispc_pm_ops, 3843 .pm = &dispc_pm_ops,
3845 .of_match_table = dispc_of_match, 3844 .of_match_table = dispc_of_match,
3845 .suppress_bind_attrs = true,
3846 }, 3846 },
3847}; 3847};
3848 3848
diff --git a/drivers/video/fbdev/omap2/dss/dispc.h b/drivers/video/fbdev/omap2/dss/dispc.h
index 78edb449c763..3043d6e0a5f9 100644
--- a/drivers/video/fbdev/omap2/dss/dispc.h
+++ b/drivers/video/fbdev/omap2/dss/dispc.h
@@ -101,8 +101,7 @@
101 DISPC_FIR_COEF_V2_OFFSET(n, i)) 101 DISPC_FIR_COEF_V2_OFFSET(n, i))
102#define DISPC_OVL_PRELOAD(n) (DISPC_OVL_BASE(n) + \ 102#define DISPC_OVL_PRELOAD(n) (DISPC_OVL_BASE(n) + \
103 DISPC_PRELOAD_OFFSET(n)) 103 DISPC_PRELOAD_OFFSET(n))
104#define DISPC_OVL_MFLAG_THRESHOLD(n) (DISPC_OVL_BASE(n) + \ 104#define DISPC_OVL_MFLAG_THRESHOLD(n) DISPC_MFLAG_THRESHOLD_OFFSET(n)
105 DISPC_MFLAG_THRESHOLD_OFFSET(n))
106 105
107/* DISPC up/downsampling FIR filter coefficient structure */ 106/* DISPC up/downsampling FIR filter coefficient structure */
108struct dispc_coef { 107struct dispc_coef {
diff --git a/drivers/video/fbdev/omap2/dss/dpi.c b/drivers/video/fbdev/omap2/dss/dpi.c
index 9368972d6962..4a3363dae74a 100644
--- a/drivers/video/fbdev/omap2/dss/dpi.c
+++ b/drivers/video/fbdev/omap2/dss/dpi.c
@@ -720,6 +720,7 @@ static struct platform_driver omap_dpi_driver = {
720 .driver = { 720 .driver = {
721 .name = "omapdss_dpi", 721 .name = "omapdss_dpi",
722 .owner = THIS_MODULE, 722 .owner = THIS_MODULE,
723 .suppress_bind_attrs = true,
723 }, 724 },
724}; 725};
725 726
diff --git a/drivers/video/fbdev/omap2/dss/dsi.c b/drivers/video/fbdev/omap2/dss/dsi.c
index b6f6ae1d4664..0793bc67a275 100644
--- a/drivers/video/fbdev/omap2/dss/dsi.c
+++ b/drivers/video/fbdev/omap2/dss/dsi.c
@@ -1603,7 +1603,7 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
1603 } else if (dss_has_feature(FEAT_DSI_PLL_SELFREQDCO)) { 1603 } else if (dss_has_feature(FEAT_DSI_PLL_SELFREQDCO)) {
1604 f = cinfo->clkin4ddr < 1000000000 ? 0x2 : 0x4; 1604 f = cinfo->clkin4ddr < 1000000000 ? 0x2 : 0x4;
1605 1605
1606 l = FLD_MOD(l, f, 4, 1); /* PLL_SELFREQDCO */ 1606 l = FLD_MOD(l, f, 3, 1); /* PLL_SELFREQDCO */
1607 } 1607 }
1608 1608
1609 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */ 1609 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
@@ -5754,6 +5754,7 @@ static struct platform_driver omap_dsihw_driver = {
5754 .owner = THIS_MODULE, 5754 .owner = THIS_MODULE,
5755 .pm = &dsi_pm_ops, 5755 .pm = &dsi_pm_ops,
5756 .of_match_table = dsi_of_match, 5756 .of_match_table = dsi_of_match,
5757 .suppress_bind_attrs = true,
5757 }, 5758 },
5758}; 5759};
5759 5760
diff --git a/drivers/video/fbdev/omap2/dss/dss.c b/drivers/video/fbdev/omap2/dss/dss.c
index 6daeb7ed44c6..14bcd6c43f72 100644
--- a/drivers/video/fbdev/omap2/dss/dss.c
+++ b/drivers/video/fbdev/omap2/dss/dss.c
@@ -966,6 +966,7 @@ static struct platform_driver omap_dsshw_driver = {
966 .owner = THIS_MODULE, 966 .owner = THIS_MODULE,
967 .pm = &dss_pm_ops, 967 .pm = &dss_pm_ops,
968 .of_match_table = dss_of_match, 968 .of_match_table = dss_of_match,
969 .suppress_bind_attrs = true,
969 }, 970 },
970}; 971};
971 972
diff --git a/drivers/video/fbdev/omap2/dss/hdmi4.c b/drivers/video/fbdev/omap2/dss/hdmi4.c
index 6a8550cf43e5..9a8713ca090c 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi4.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi4.c
@@ -781,6 +781,7 @@ static struct platform_driver omapdss_hdmihw_driver = {
781 .owner = THIS_MODULE, 781 .owner = THIS_MODULE,
782 .pm = &hdmi_pm_ops, 782 .pm = &hdmi_pm_ops,
783 .of_match_table = hdmi_of_match, 783 .of_match_table = hdmi_of_match,
784 .suppress_bind_attrs = true,
784 }, 785 },
785}; 786};
786 787
diff --git a/drivers/video/fbdev/omap2/dss/hdmi5.c b/drivers/video/fbdev/omap2/dss/hdmi5.c
index 32d02ec34d23..169b764bb9d4 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi5.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi5.c
@@ -806,6 +806,7 @@ static struct platform_driver omapdss_hdmihw_driver = {
806 .owner = THIS_MODULE, 806 .owner = THIS_MODULE,
807 .pm = &hdmi_pm_ops, 807 .pm = &hdmi_pm_ops,
808 .of_match_table = hdmi_of_match, 808 .of_match_table = hdmi_of_match,
809 .suppress_bind_attrs = true,
809 }, 810 },
810}; 811};
811 812
diff --git a/drivers/video/fbdev/omap2/dss/hdmi_pll.c b/drivers/video/fbdev/omap2/dss/hdmi_pll.c
index 54df12a8d744..6d92bb32fe51 100644
--- a/drivers/video/fbdev/omap2/dss/hdmi_pll.c
+++ b/drivers/video/fbdev/omap2/dss/hdmi_pll.c
@@ -124,16 +124,15 @@ static int hdmi_pll_config(struct hdmi_pll_data *pll)
124 r = FLD_MOD(r, 0x0, 14, 14); /* PHY_CLKINEN de-assert during locking */ 124 r = FLD_MOD(r, 0x0, 14, 14); /* PHY_CLKINEN de-assert during locking */
125 r = FLD_MOD(r, fmt->refsel, 22, 21); /* REFSEL */ 125 r = FLD_MOD(r, fmt->refsel, 22, 21); /* REFSEL */
126 126
127 if (fmt->dcofreq) { 127 if (fmt->dcofreq)
128 /* divider programming for frequency beyond 1000Mhz */
129 REG_FLD_MOD(pll->base, PLLCTRL_CFG3, fmt->regsd, 17, 10);
130 r = FLD_MOD(r, 0x4, 3, 1); /* 1000MHz and 2000MHz */ 128 r = FLD_MOD(r, 0x4, 3, 1); /* 1000MHz and 2000MHz */
131 } else { 129 else
132 r = FLD_MOD(r, 0x2, 3, 1); /* 500MHz and 1000MHz */ 130 r = FLD_MOD(r, 0x2, 3, 1); /* 500MHz and 1000MHz */
133 }
134 131
135 hdmi_write_reg(pll->base, PLLCTRL_CFG2, r); 132 hdmi_write_reg(pll->base, PLLCTRL_CFG2, r);
136 133
134 REG_FLD_MOD(pll->base, PLLCTRL_CFG3, fmt->regsd, 17, 10);
135
137 r = hdmi_read_reg(pll->base, PLLCTRL_CFG4); 136 r = hdmi_read_reg(pll->base, PLLCTRL_CFG4);
138 r = FLD_MOD(r, fmt->regm2, 24, 18); 137 r = FLD_MOD(r, fmt->regm2, 24, 18);
139 r = FLD_MOD(r, fmt->regmf, 17, 0); 138 r = FLD_MOD(r, fmt->regmf, 17, 0);
@@ -144,8 +143,8 @@ static int hdmi_pll_config(struct hdmi_pll_data *pll)
144 143
145 /* wait for bit change */ 144 /* wait for bit change */
146 if (hdmi_wait_for_bit_change(pll->base, PLLCTRL_PLL_GO, 145 if (hdmi_wait_for_bit_change(pll->base, PLLCTRL_PLL_GO,
147 0, 0, 1) != 1) { 146 0, 0, 0) != 0) {
148 DSSERR("PLL GO bit not set\n"); 147 DSSERR("PLL GO bit not clearing\n");
149 return -ETIMEDOUT; 148 return -ETIMEDOUT;
150 } 149 }
151 150
diff --git a/drivers/video/fbdev/omap2/dss/rfbi.c b/drivers/video/fbdev/omap2/dss/rfbi.c
index c8a81a2b879c..878273f58839 100644
--- a/drivers/video/fbdev/omap2/dss/rfbi.c
+++ b/drivers/video/fbdev/omap2/dss/rfbi.c
@@ -1044,6 +1044,7 @@ static struct platform_driver omap_rfbihw_driver = {
1044 .name = "omapdss_rfbi", 1044 .name = "omapdss_rfbi",
1045 .owner = THIS_MODULE, 1045 .owner = THIS_MODULE,
1046 .pm = &rfbi_pm_ops, 1046 .pm = &rfbi_pm_ops,
1047 .suppress_bind_attrs = true,
1047 }, 1048 },
1048}; 1049};
1049 1050
diff --git a/drivers/video/fbdev/omap2/dss/sdi.c b/drivers/video/fbdev/omap2/dss/sdi.c
index 911dcc9173a6..4c9c46d4ea60 100644
--- a/drivers/video/fbdev/omap2/dss/sdi.c
+++ b/drivers/video/fbdev/omap2/dss/sdi.c
@@ -377,6 +377,7 @@ static struct platform_driver omap_sdi_driver = {
377 .driver = { 377 .driver = {
378 .name = "omapdss_sdi", 378 .name = "omapdss_sdi",
379 .owner = THIS_MODULE, 379 .owner = THIS_MODULE,
380 .suppress_bind_attrs = true,
380 }, 381 },
381}; 382};
382 383
diff --git a/drivers/video/fbdev/omap2/dss/venc.c b/drivers/video/fbdev/omap2/dss/venc.c
index 21d81113962b..d077d8a75ddc 100644
--- a/drivers/video/fbdev/omap2/dss/venc.c
+++ b/drivers/video/fbdev/omap2/dss/venc.c
@@ -966,6 +966,7 @@ static struct platform_driver omap_venchw_driver = {
966 .owner = THIS_MODULE, 966 .owner = THIS_MODULE,
967 .pm = &venc_pm_ops, 967 .pm = &venc_pm_ops,
968 .of_match_table = venc_of_match, 968 .of_match_table = venc_of_match,
969 .suppress_bind_attrs = true,
969 }, 970 },
970}; 971};
971 972
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
index 15872433e0c6..ce8a70570756 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-main.c
@@ -1833,14 +1833,13 @@ static void omapfb_free_resources(struct omapfb2_device *fbdev)
1833 if (fbdev == NULL) 1833 if (fbdev == NULL)
1834 return; 1834 return;
1835 1835
1836 for (i = 0; i < fbdev->num_fbs; i++) { 1836 for (i = 0; i < fbdev->num_overlays; i++) {
1837 struct omapfb_info *ofbi = FB2OFB(fbdev->fbs[i]); 1837 struct omap_overlay *ovl = fbdev->overlays[i];
1838 int j;
1839 1838
1840 for (j = 0; j < ofbi->num_overlays; j++) { 1839 ovl->disable(ovl);
1841 struct omap_overlay *ovl = ofbi->overlays[j]; 1840
1842 ovl->disable(ovl); 1841 if (ovl->manager)
1843 } 1842 ovl->unset_manager(ovl);
1844 } 1843 }
1845 1844
1846 for (i = 0; i < fbdev->num_fbs; i++) 1845 for (i = 0; i < fbdev->num_fbs; i++)
@@ -2619,7 +2618,7 @@ err0:
2619 return r; 2618 return r;
2620} 2619}
2621 2620
2622static int __exit omapfb_remove(struct platform_device *pdev) 2621static int omapfb_remove(struct platform_device *pdev)
2623{ 2622{
2624 struct omapfb2_device *fbdev = platform_get_drvdata(pdev); 2623 struct omapfb2_device *fbdev = platform_get_drvdata(pdev);
2625 2624
@@ -2636,7 +2635,7 @@ static int __exit omapfb_remove(struct platform_device *pdev)
2636 2635
2637static struct platform_driver omapfb_driver = { 2636static struct platform_driver omapfb_driver = {
2638 .probe = omapfb_probe, 2637 .probe = omapfb_probe,
2639 .remove = __exit_p(omapfb_remove), 2638 .remove = omapfb_remove,
2640 .driver = { 2639 .driver = {
2641 .name = "omapfb", 2640 .name = "omapfb",
2642 .owner = THIS_MODULE, 2641 .owner = THIS_MODULE,
@@ -2651,6 +2650,7 @@ module_param_named(mirror, def_mirror, bool, 0);
2651 2650
2652module_platform_driver(omapfb_driver); 2651module_platform_driver(omapfb_driver);
2653 2652
2653MODULE_ALIAS("platform:omapfb");
2654MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>"); 2654MODULE_AUTHOR("Tomi Valkeinen <tomi.valkeinen@nokia.com>");
2655MODULE_DESCRIPTION("OMAP2/3 Framebuffer"); 2655MODULE_DESCRIPTION("OMAP2/3 Framebuffer");
2656MODULE_LICENSE("GPL v2"); 2656MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index e3d5bf0a5021..d0107d424ee4 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -87,6 +87,15 @@ config DA9055_WATCHDOG
87 This driver can also be built as a module. If so, the module 87 This driver can also be built as a module. If so, the module
88 will be called da9055_wdt. 88 will be called da9055_wdt.
89 89
90config DA9063_WATCHDOG
91 tristate "Dialog DA9063 Watchdog"
92 depends on MFD_DA9063
93 select WATCHDOG_CORE
94 help
95 Support for the watchdog in the DA9063 PMIC.
96
97 This driver can be built as a module. The module name is da9063_wdt.
98
90config GPIO_WATCHDOG 99config GPIO_WATCHDOG
91 tristate "Watchdog device controlled through GPIO-line" 100 tristate "Watchdog device controlled through GPIO-line"
92 depends on OF_GPIO 101 depends on OF_GPIO
@@ -123,6 +132,7 @@ config WM8350_WATCHDOG
123 132
124config XILINX_WATCHDOG 133config XILINX_WATCHDOG
125 tristate "Xilinx Watchdog timer" 134 tristate "Xilinx Watchdog timer"
135 depends on HAS_IOMEM
126 select WATCHDOG_CORE 136 select WATCHDOG_CORE
127 help 137 help
128 Watchdog driver for the xps_timebase_wdt ip core. 138 Watchdog driver for the xps_timebase_wdt ip core.
@@ -157,6 +167,14 @@ config AT91SAM9X_WATCHDOG
157 Watchdog timer embedded into AT91SAM9X and AT91CAP9 chips. This will 167 Watchdog timer embedded into AT91SAM9X and AT91CAP9 chips. This will
158 reboot your system when the timeout is reached. 168 reboot your system when the timeout is reached.
159 169
170config CADENCE_WATCHDOG
171 tristate "Cadence Watchdog Timer"
172 depends on ARM
173 select WATCHDOG_CORE
174 help
175 Say Y here if you want to include support for the watchdog
176 timer in the Xilinx Zynq.
177
160config 21285_WATCHDOG 178config 21285_WATCHDOG
161 tristate "DC21285 watchdog" 179 tristate "DC21285 watchdog"
162 depends on FOOTBRIDGE 180 depends on FOOTBRIDGE
@@ -319,6 +337,17 @@ config ORION_WATCHDOG
319 To compile this driver as a module, choose M here: the 337 To compile this driver as a module, choose M here: the
320 module will be called orion_wdt. 338 module will be called orion_wdt.
321 339
340config RN5T618_WATCHDOG
341 tristate "Ricoh RN5T618 watchdog"
342 depends on MFD_RN5T618
343 select WATCHDOG_CORE
344 help
345 If you say yes here you get support for watchdog on the Ricoh
346 RN5T618 PMIC.
347
348 This driver can also be built as a module. If so, the module
349 will be called rn5t618_wdt.
350
322config SUNXI_WATCHDOG 351config SUNXI_WATCHDOG
323 tristate "Allwinner SoCs watchdog support" 352 tristate "Allwinner SoCs watchdog support"
324 depends on ARCH_SUNXI 353 depends on ARCH_SUNXI
@@ -444,7 +473,7 @@ config SIRFSOC_WATCHDOG
444 473
445config TEGRA_WATCHDOG 474config TEGRA_WATCHDOG
446 tristate "Tegra watchdog" 475 tristate "Tegra watchdog"
447 depends on ARCH_TEGRA || COMPILE_TEST 476 depends on (ARCH_TEGRA || COMPILE_TEST) && HAS_IOMEM
448 select WATCHDOG_CORE 477 select WATCHDOG_CORE
449 help 478 help
450 Say Y here to include support for the watchdog timer 479 Say Y here to include support for the watchdog timer
@@ -453,6 +482,29 @@ config TEGRA_WATCHDOG
453 To compile this driver as a module, choose M here: the 482 To compile this driver as a module, choose M here: the
454 module will be called tegra_wdt. 483 module will be called tegra_wdt.
455 484
485config QCOM_WDT
486 tristate "QCOM watchdog"
487 depends on HAS_IOMEM
488 depends on ARCH_QCOM
489 select WATCHDOG_CORE
490 help
491 Say Y here to include Watchdog timer support for the watchdog found
492 on QCOM chipsets. Currently supported targets are the MSM8960,
493 APQ8064, and IPQ8064.
494
495 To compile this driver as a module, choose M here: the
496 module will be called qcom_wdt.
497
498config MESON_WATCHDOG
499 tristate "Amlogic Meson SoCs watchdog support"
500 depends on ARCH_MESON
501 select WATCHDOG_CORE
502 help
503 Say Y here to include support for the watchdog timer
504 in Amlogic Meson SoCs.
505 To compile this driver as a module, choose M here: the
506 module will be called meson_wdt.
507
456# AVR32 Architecture 508# AVR32 Architecture
457 509
458config AT32AP700X_WDT 510config AT32AP700X_WDT
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index de1701470c14..c569ec8f8a76 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -32,6 +32,7 @@ obj-$(CONFIG_USBPCWATCHDOG) += pcwd_usb.o
32obj-$(CONFIG_ARM_SP805_WATCHDOG) += sp805_wdt.o 32obj-$(CONFIG_ARM_SP805_WATCHDOG) += sp805_wdt.o
33obj-$(CONFIG_AT91RM9200_WATCHDOG) += at91rm9200_wdt.o 33obj-$(CONFIG_AT91RM9200_WATCHDOG) += at91rm9200_wdt.o
34obj-$(CONFIG_AT91SAM9X_WATCHDOG) += at91sam9_wdt.o 34obj-$(CONFIG_AT91SAM9X_WATCHDOG) += at91sam9_wdt.o
35obj-$(CONFIG_CADENCE_WATCHDOG) += cadence_wdt.o
35obj-$(CONFIG_OMAP_WATCHDOG) += omap_wdt.o 36obj-$(CONFIG_OMAP_WATCHDOG) += omap_wdt.o
36obj-$(CONFIG_TWL4030_WATCHDOG) += twl4030_wdt.o 37obj-$(CONFIG_TWL4030_WATCHDOG) += twl4030_wdt.o
37obj-$(CONFIG_21285_WATCHDOG) += wdt285.o 38obj-$(CONFIG_21285_WATCHDOG) += wdt285.o
@@ -47,6 +48,7 @@ obj-$(CONFIG_IOP_WATCHDOG) += iop_wdt.o
47obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o 48obj-$(CONFIG_DAVINCI_WATCHDOG) += davinci_wdt.o
48obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o 49obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
49obj-$(CONFIG_SUNXI_WATCHDOG) += sunxi_wdt.o 50obj-$(CONFIG_SUNXI_WATCHDOG) += sunxi_wdt.o
51obj-$(CONFIG_RN5T618_WATCHDOG) += rn5t618_wdt.o
50obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o 52obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o
51obj-$(CONFIG_STMP3XXX_RTC_WATCHDOG) += stmp3xxx_rtc_wdt.o 53obj-$(CONFIG_STMP3XXX_RTC_WATCHDOG) += stmp3xxx_rtc_wdt.o
52obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o 54obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
@@ -57,8 +59,10 @@ obj-$(CONFIG_RETU_WATCHDOG) += retu_wdt.o
57obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o 59obj-$(CONFIG_BCM2835_WDT) += bcm2835_wdt.o
58obj-$(CONFIG_MOXART_WDT) += moxart_wdt.o 60obj-$(CONFIG_MOXART_WDT) += moxart_wdt.o
59obj-$(CONFIG_SIRFSOC_WATCHDOG) += sirfsoc_wdt.o 61obj-$(CONFIG_SIRFSOC_WATCHDOG) += sirfsoc_wdt.o
62obj-$(CONFIG_QCOM_WDT) += qcom-wdt.o
60obj-$(CONFIG_BCM_KONA_WDT) += bcm_kona_wdt.o 63obj-$(CONFIG_BCM_KONA_WDT) += bcm_kona_wdt.o
61obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o 64obj-$(CONFIG_TEGRA_WATCHDOG) += tegra_wdt.o
65obj-$(CONFIG_MESON_WATCHDOG) += meson_wdt.o
62 66
63# AVR32 Architecture 67# AVR32 Architecture
64obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o 68obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
@@ -173,6 +177,7 @@ obj-$(CONFIG_XEN_WDT) += xen_wdt.o
173# Architecture Independent 177# Architecture Independent
174obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o 178obj-$(CONFIG_DA9052_WATCHDOG) += da9052_wdt.o
175obj-$(CONFIG_DA9055_WATCHDOG) += da9055_wdt.o 179obj-$(CONFIG_DA9055_WATCHDOG) += da9055_wdt.o
180obj-$(CONFIG_DA9063_WATCHDOG) += da9063_wdt.o
176obj-$(CONFIG_GPIO_WATCHDOG) += gpio_wdt.o 181obj-$(CONFIG_GPIO_WATCHDOG) += gpio_wdt.o
177obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o 182obj-$(CONFIG_WM831X_WATCHDOG) += wm831x_wdt.o
178obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o 183obj-$(CONFIG_WM8350_WATCHDOG) += wm8350_wdt.o
diff --git a/drivers/watchdog/booke_wdt.c b/drivers/watchdog/booke_wdt.c
index 08a785398eac..e96b09b135c8 100644
--- a/drivers/watchdog/booke_wdt.c
+++ b/drivers/watchdog/booke_wdt.c
@@ -30,8 +30,6 @@
30 * occur, and the final time the board will reset. 30 * occur, and the final time the board will reset.
31 */ 31 */
32 32
33u32 booke_wdt_enabled;
34u32 booke_wdt_period = CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT;
35 33
36#ifdef CONFIG_PPC_FSL_BOOK3E 34#ifdef CONFIG_PPC_FSL_BOOK3E
37#define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15)) 35#define WDTP(x) ((((x)&0x3)<<30)|(((x)&0x3c)<<15))
@@ -41,27 +39,10 @@ u32 booke_wdt_period = CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT;
41#define WDTP_MASK (TCR_WP_MASK) 39#define WDTP_MASK (TCR_WP_MASK)
42#endif 40#endif
43 41
44/* Checks wdt=x and wdt_period=xx command-line option */ 42static bool booke_wdt_enabled;
45notrace int __init early_parse_wdt(char *p) 43module_param(booke_wdt_enabled, bool, 0);
46{ 44static int booke_wdt_period = CONFIG_BOOKE_WDT_DEFAULT_TIMEOUT;
47 if (p && strncmp(p, "0", 1) != 0) 45module_param(booke_wdt_period, int, 0);
48 booke_wdt_enabled = 1;
49
50 return 0;
51}
52early_param("wdt", early_parse_wdt);
53
54int __init early_parse_wdt_period(char *p)
55{
56 unsigned long ret;
57 if (p) {
58 if (!kstrtol(p, 0, &ret))
59 booke_wdt_period = ret;
60 }
61
62 return 0;
63}
64early_param("wdt_period", early_parse_wdt_period);
65 46
66#ifdef CONFIG_PPC_FSL_BOOK3E 47#ifdef CONFIG_PPC_FSL_BOOK3E
67 48
@@ -259,5 +240,6 @@ static int __init booke_wdt_init(void)
259module_init(booke_wdt_init); 240module_init(booke_wdt_init);
260module_exit(booke_wdt_exit); 241module_exit(booke_wdt_exit);
261 242
243MODULE_ALIAS("booke_wdt");
262MODULE_DESCRIPTION("PowerPC Book-E watchdog driver"); 244MODULE_DESCRIPTION("PowerPC Book-E watchdog driver");
263MODULE_LICENSE("GPL"); 245MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/cadence_wdt.c b/drivers/watchdog/cadence_wdt.c
new file mode 100644
index 000000000000..5927c0a98a74
--- /dev/null
+++ b/drivers/watchdog/cadence_wdt.c
@@ -0,0 +1,516 @@
1/*
2 * Cadence WDT driver - Used by Xilinx Zynq
3 *
4 * Copyright (C) 2010 - 2014 Xilinx, Inc.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/clk.h>
13#include <linux/init.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/irq.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/of.h>
20#include <linux/platform_device.h>
21#include <linux/reboot.h>
22#include <linux/watchdog.h>
23
24#define CDNS_WDT_DEFAULT_TIMEOUT 10
25/* Supports 1 - 516 sec */
26#define CDNS_WDT_MIN_TIMEOUT 1
27#define CDNS_WDT_MAX_TIMEOUT 516
28
29/* Restart key */
30#define CDNS_WDT_RESTART_KEY 0x00001999
31
32/* Counter register access key */
33#define CDNS_WDT_REGISTER_ACCESS_KEY 0x00920000
34
35/* Counter value divisor */
36#define CDNS_WDT_COUNTER_VALUE_DIVISOR 0x1000
37
38/* Clock prescaler value and selection */
39#define CDNS_WDT_PRESCALE_64 64
40#define CDNS_WDT_PRESCALE_512 512
41#define CDNS_WDT_PRESCALE_4096 4096
42#define CDNS_WDT_PRESCALE_SELECT_64 1
43#define CDNS_WDT_PRESCALE_SELECT_512 2
44#define CDNS_WDT_PRESCALE_SELECT_4096 3
45
46/* Input clock frequency */
47#define CDNS_WDT_CLK_10MHZ 10000000
48#define CDNS_WDT_CLK_75MHZ 75000000
49
50/* Counter maximum value */
51#define CDNS_WDT_COUNTER_MAX 0xFFF
52
53static int wdt_timeout = CDNS_WDT_DEFAULT_TIMEOUT;
54static int nowayout = WATCHDOG_NOWAYOUT;
55
56module_param(wdt_timeout, int, 0);
57MODULE_PARM_DESC(wdt_timeout,
58 "Watchdog time in seconds. (default="
59 __MODULE_STRING(CDNS_WDT_DEFAULT_TIMEOUT) ")");
60
61module_param(nowayout, int, 0);
62MODULE_PARM_DESC(nowayout,
63 "Watchdog cannot be stopped once started (default="
64 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
65
66/**
67 * struct cdns_wdt - Watchdog device structure
68 * @regs: baseaddress of device
69 * @rst: reset flag
70 * @clk: struct clk * of a clock source
71 * @prescaler: for saving prescaler value
72 * @ctrl_clksel: counter clock prescaler selection
73 * @io_lock: spinlock for IO register access
74 * @cdns_wdt_device: watchdog device structure
75 * @cdns_wdt_notifier: notifier structure
76 *
77 * Structure containing parameters specific to cadence watchdog.
78 */
79struct cdns_wdt {
80 void __iomem *regs;
81 bool rst;
82 struct clk *clk;
83 u32 prescaler;
84 u32 ctrl_clksel;
85 spinlock_t io_lock;
86 struct watchdog_device cdns_wdt_device;
87 struct notifier_block cdns_wdt_notifier;
88};
89
90/* Write access to Registers */
91static inline void cdns_wdt_writereg(struct cdns_wdt *wdt, u32 offset, u32 val)
92{
93 writel_relaxed(val, wdt->regs + offset);
94}
95
96/*************************Register Map**************************************/
97
98/* Register Offsets for the WDT */
99#define CDNS_WDT_ZMR_OFFSET 0x0 /* Zero Mode Register */
100#define CDNS_WDT_CCR_OFFSET 0x4 /* Counter Control Register */
101#define CDNS_WDT_RESTART_OFFSET 0x8 /* Restart Register */
102#define CDNS_WDT_SR_OFFSET 0xC /* Status Register */
103
104/*
105 * Zero Mode Register - This register controls how the time out is indicated
106 * and also contains the access code to allow writes to the register (0xABC).
107 */
108#define CDNS_WDT_ZMR_WDEN_MASK 0x00000001 /* Enable the WDT */
109#define CDNS_WDT_ZMR_RSTEN_MASK 0x00000002 /* Enable the reset output */
110#define CDNS_WDT_ZMR_IRQEN_MASK 0x00000004 /* Enable IRQ output */
111#define CDNS_WDT_ZMR_RSTLEN_16 0x00000030 /* Reset pulse of 16 pclk cycles */
112#define CDNS_WDT_ZMR_ZKEY_VAL 0x00ABC000 /* Access key, 0xABC << 12 */
113/*
114 * Counter Control register - This register controls how fast the timer runs
115 * and the reset value and also contains the access code to allow writes to
116 * the register.
117 */
118#define CDNS_WDT_CCR_CRV_MASK 0x00003FFC /* Counter reset value */
119
120/**
121 * cdns_wdt_stop - Stop the watchdog.
122 *
123 * @wdd: watchdog device
124 *
125 * Read the contents of the ZMR register, clear the WDEN bit
126 * in the register and set the access key for successful write.
127 *
128 * Return: always 0
129 */
130static int cdns_wdt_stop(struct watchdog_device *wdd)
131{
132 struct cdns_wdt *wdt = watchdog_get_drvdata(wdd);
133
134 spin_lock(&wdt->io_lock);
135 cdns_wdt_writereg(wdt, CDNS_WDT_ZMR_OFFSET,
136 CDNS_WDT_ZMR_ZKEY_VAL & (~CDNS_WDT_ZMR_WDEN_MASK));
137 spin_unlock(&wdt->io_lock);
138
139 return 0;
140}
141
142/**
143 * cdns_wdt_reload - Reload the watchdog timer (i.e. pat the watchdog).
144 *
145 * @wdd: watchdog device
146 *
147 * Write the restart key value (0x00001999) to the restart register.
148 *
149 * Return: always 0
150 */
151static int cdns_wdt_reload(struct watchdog_device *wdd)
152{
153 struct cdns_wdt *wdt = watchdog_get_drvdata(wdd);
154
155 spin_lock(&wdt->io_lock);
156 cdns_wdt_writereg(wdt, CDNS_WDT_RESTART_OFFSET,
157 CDNS_WDT_RESTART_KEY);
158 spin_unlock(&wdt->io_lock);
159
160 return 0;
161}
162
163/**
164 * cdns_wdt_start - Enable and start the watchdog.
165 *
166 * @wdd: watchdog device
167 *
168 * The counter value is calculated according to the formula:
169 * calculated count = (timeout * clock) / prescaler + 1.
170 * The calculated count is divided by 0x1000 to obtain the field value
171 * to write to counter control register.
172 * Clears the contents of prescaler and counter reset value. Sets the
173 * prescaler to 4096 and the calculated count and access key
174 * to write to CCR Register.
175 * Sets the WDT (WDEN bit) and either the Reset signal(RSTEN bit)
176 * or Interrupt signal(IRQEN) with a specified cycles and the access
177 * key to write to ZMR Register.
178 *
179 * Return: always 0
180 */
181static int cdns_wdt_start(struct watchdog_device *wdd)
182{
183 struct cdns_wdt *wdt = watchdog_get_drvdata(wdd);
184 unsigned int data = 0;
185 unsigned short count;
186 unsigned long clock_f = clk_get_rate(wdt->clk);
187
188 /*
189 * Counter value divisor to obtain the value of
190 * counter reset to be written to control register.
191 */
192 count = (wdd->timeout * (clock_f / wdt->prescaler)) /
193 CDNS_WDT_COUNTER_VALUE_DIVISOR + 1;
194
195 if (count > CDNS_WDT_COUNTER_MAX)
196 count = CDNS_WDT_COUNTER_MAX;
197
198 spin_lock(&wdt->io_lock);
199 cdns_wdt_writereg(wdt, CDNS_WDT_ZMR_OFFSET,
200 CDNS_WDT_ZMR_ZKEY_VAL);
201
202 count = (count << 2) & CDNS_WDT_CCR_CRV_MASK;
203
204 /* Write counter access key first to be able write to register */
205 data = count | CDNS_WDT_REGISTER_ACCESS_KEY | wdt->ctrl_clksel;
206 cdns_wdt_writereg(wdt, CDNS_WDT_CCR_OFFSET, data);
207 data = CDNS_WDT_ZMR_WDEN_MASK | CDNS_WDT_ZMR_RSTLEN_16 |
208 CDNS_WDT_ZMR_ZKEY_VAL;
209
210 /* Reset on timeout if specified in device tree. */
211 if (wdt->rst) {
212 data |= CDNS_WDT_ZMR_RSTEN_MASK;
213 data &= ~CDNS_WDT_ZMR_IRQEN_MASK;
214 } else {
215 data &= ~CDNS_WDT_ZMR_RSTEN_MASK;
216 data |= CDNS_WDT_ZMR_IRQEN_MASK;
217 }
218 cdns_wdt_writereg(wdt, CDNS_WDT_ZMR_OFFSET, data);
219 cdns_wdt_writereg(wdt, CDNS_WDT_RESTART_OFFSET,
220 CDNS_WDT_RESTART_KEY);
221 spin_unlock(&wdt->io_lock);
222
223 return 0;
224}
225
226/**
227 * cdns_wdt_settimeout - Set a new timeout value for the watchdog device.
228 *
229 * @wdd: watchdog device
230 * @new_time: new timeout value that needs to be set
231 * Return: 0 on success
232 *
233 * Update the watchdog_device timeout with new value which is used when
234 * cdns_wdt_start is called.
235 */
236static int cdns_wdt_settimeout(struct watchdog_device *wdd,
237 unsigned int new_time)
238{
239 wdd->timeout = new_time;
240
241 return cdns_wdt_start(wdd);
242}
243
244/**
245 * cdns_wdt_irq_handler - Notifies of watchdog timeout.
246 *
247 * @irq: interrupt number
248 * @dev_id: pointer to a platform device structure
249 * Return: IRQ_HANDLED
250 *
251 * The handler is invoked when the watchdog times out and a
252 * reset on timeout has not been enabled.
253 */
254static irqreturn_t cdns_wdt_irq_handler(int irq, void *dev_id)
255{
256 struct platform_device *pdev = dev_id;
257
258 dev_info(&pdev->dev,
259 "Watchdog timed out. Internal reset not enabled\n");
260
261 return IRQ_HANDLED;
262}
263
264/*
265 * Info structure used to indicate the features supported by the device
266 * to the upper layers. This is defined in watchdog.h header file.
267 */
268static struct watchdog_info cdns_wdt_info = {
269 .identity = "cdns_wdt watchdog",
270 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING |
271 WDIOF_MAGICCLOSE,
272};
273
274/* Watchdog Core Ops */
275static struct watchdog_ops cdns_wdt_ops = {
276 .owner = THIS_MODULE,
277 .start = cdns_wdt_start,
278 .stop = cdns_wdt_stop,
279 .ping = cdns_wdt_reload,
280 .set_timeout = cdns_wdt_settimeout,
281};
282
283/**
284 * cdns_wdt_notify_sys - Notifier for reboot or shutdown.
285 *
286 * @this: handle to notifier block
287 * @code: turn off indicator
288 * @unused: unused
289 * Return: NOTIFY_DONE
290 *
291 * This notifier is invoked whenever the system reboot or shutdown occur
292 * because we need to disable the WDT before system goes down as WDT might
293 * reset on the next boot.
294 */
295static int cdns_wdt_notify_sys(struct notifier_block *this, unsigned long code,
296 void *unused)
297{
298 struct cdns_wdt *wdt = container_of(this, struct cdns_wdt,
299 cdns_wdt_notifier);
300 if (code == SYS_DOWN || code == SYS_HALT)
301 cdns_wdt_stop(&wdt->cdns_wdt_device);
302
303 return NOTIFY_DONE;
304}
305
306/************************Platform Operations*****************************/
307/**
308 * cdns_wdt_probe - Probe call for the device.
309 *
310 * @pdev: handle to the platform device structure.
311 * Return: 0 on success, negative error otherwise.
312 *
313 * It does all the memory allocation and registration for the device.
314 */
315static int cdns_wdt_probe(struct platform_device *pdev)
316{
317 struct resource *res;
318 int ret, irq;
319 unsigned long clock_f;
320 struct cdns_wdt *wdt;
321 struct watchdog_device *cdns_wdt_device;
322
323 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
324 if (!wdt)
325 return -ENOMEM;
326
327 cdns_wdt_device = &wdt->cdns_wdt_device;
328 cdns_wdt_device->info = &cdns_wdt_info;
329 cdns_wdt_device->ops = &cdns_wdt_ops;
330 cdns_wdt_device->timeout = CDNS_WDT_DEFAULT_TIMEOUT;
331 cdns_wdt_device->min_timeout = CDNS_WDT_MIN_TIMEOUT;
332 cdns_wdt_device->max_timeout = CDNS_WDT_MAX_TIMEOUT;
333
334 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
335 wdt->regs = devm_ioremap_resource(&pdev->dev, res);
336 if (IS_ERR(wdt->regs))
337 return PTR_ERR(wdt->regs);
338
339 /* Register the interrupt */
340 wdt->rst = of_property_read_bool(pdev->dev.of_node, "reset-on-timeout");
341 irq = platform_get_irq(pdev, 0);
342 if (!wdt->rst && irq >= 0) {
343 ret = devm_request_irq(&pdev->dev, irq, cdns_wdt_irq_handler, 0,
344 pdev->name, pdev);
345 if (ret) {
346 dev_err(&pdev->dev,
347 "cannot register interrupt handler err=%d\n",
348 ret);
349 return ret;
350 }
351 }
352
353 /* Initialize the members of cdns_wdt structure */
354 cdns_wdt_device->parent = &pdev->dev;
355
356 ret = watchdog_init_timeout(cdns_wdt_device, wdt_timeout, &pdev->dev);
357 if (ret) {
358 dev_err(&pdev->dev, "unable to set timeout value\n");
359 return ret;
360 }
361
362 watchdog_set_nowayout(cdns_wdt_device, nowayout);
363 watchdog_set_drvdata(cdns_wdt_device, wdt);
364
365 wdt->clk = devm_clk_get(&pdev->dev, NULL);
366 if (IS_ERR(wdt->clk)) {
367 dev_err(&pdev->dev, "input clock not found\n");
368 ret = PTR_ERR(wdt->clk);
369 return ret;
370 }
371
372 ret = clk_prepare_enable(wdt->clk);
373 if (ret) {
374 dev_err(&pdev->dev, "unable to enable clock\n");
375 return ret;
376 }
377
378 clock_f = clk_get_rate(wdt->clk);
379 if (clock_f <= CDNS_WDT_CLK_75MHZ) {
380 wdt->prescaler = CDNS_WDT_PRESCALE_512;
381 wdt->ctrl_clksel = CDNS_WDT_PRESCALE_SELECT_512;
382 } else {
383 wdt->prescaler = CDNS_WDT_PRESCALE_4096;
384 wdt->ctrl_clksel = CDNS_WDT_PRESCALE_SELECT_4096;
385 }
386
387 spin_lock_init(&wdt->io_lock);
388
389 wdt->cdns_wdt_notifier.notifier_call = &cdns_wdt_notify_sys;
390 ret = register_reboot_notifier(&wdt->cdns_wdt_notifier);
391 if (ret != 0) {
392 dev_err(&pdev->dev, "cannot register reboot notifier err=%d)\n",
393 ret);
394 goto err_clk_disable;
395 }
396
397 ret = watchdog_register_device(cdns_wdt_device);
398 if (ret) {
399 dev_err(&pdev->dev, "Failed to register wdt device\n");
400 goto err_clk_disable;
401 }
402 platform_set_drvdata(pdev, wdt);
403
404 dev_dbg(&pdev->dev, "Xilinx Watchdog Timer at %p with timeout %ds%s\n",
405 wdt->regs, cdns_wdt_device->timeout,
406 nowayout ? ", nowayout" : "");
407
408 return 0;
409
410err_clk_disable:
411 clk_disable_unprepare(wdt->clk);
412
413 return ret;
414}
415
416/**
417 * cdns_wdt_remove - Probe call for the device.
418 *
419 * @pdev: handle to the platform device structure.
420 * Return: 0 on success, otherwise negative error.
421 *
422 * Unregister the device after releasing the resources.
423 */
424static int cdns_wdt_remove(struct platform_device *pdev)
425{
426 struct cdns_wdt *wdt = platform_get_drvdata(pdev);
427
428 cdns_wdt_stop(&wdt->cdns_wdt_device);
429 watchdog_unregister_device(&wdt->cdns_wdt_device);
430 unregister_reboot_notifier(&wdt->cdns_wdt_notifier);
431 clk_disable_unprepare(wdt->clk);
432
433 return 0;
434}
435
436/**
437 * cdns_wdt_shutdown - Stop the device.
438 *
439 * @pdev: handle to the platform structure.
440 *
441 */
442static void cdns_wdt_shutdown(struct platform_device *pdev)
443{
444 struct cdns_wdt *wdt = platform_get_drvdata(pdev);
445
446 cdns_wdt_stop(&wdt->cdns_wdt_device);
447 clk_disable_unprepare(wdt->clk);
448}
449
450/**
451 * cdns_wdt_suspend - Stop the device.
452 *
453 * @dev: handle to the device structure.
454 * Return: 0 always.
455 */
456static int __maybe_unused cdns_wdt_suspend(struct device *dev)
457{
458 struct platform_device *pdev = container_of(dev,
459 struct platform_device, dev);
460 struct cdns_wdt *wdt = platform_get_drvdata(pdev);
461
462 cdns_wdt_stop(&wdt->cdns_wdt_device);
463 clk_disable_unprepare(wdt->clk);
464
465 return 0;
466}
467
468/**
469 * cdns_wdt_resume - Resume the device.
470 *
471 * @dev: handle to the device structure.
472 * Return: 0 on success, errno otherwise.
473 */
474static int __maybe_unused cdns_wdt_resume(struct device *dev)
475{
476 int ret;
477 struct platform_device *pdev = container_of(dev,
478 struct platform_device, dev);
479 struct cdns_wdt *wdt = platform_get_drvdata(pdev);
480
481 ret = clk_prepare_enable(wdt->clk);
482 if (ret) {
483 dev_err(dev, "unable to enable clock\n");
484 return ret;
485 }
486 cdns_wdt_start(&wdt->cdns_wdt_device);
487
488 return 0;
489}
490
491static SIMPLE_DEV_PM_OPS(cdns_wdt_pm_ops, cdns_wdt_suspend, cdns_wdt_resume);
492
493static struct of_device_id cdns_wdt_of_match[] = {
494 { .compatible = "cdns,wdt-r1p2", },
495 { /* end of table */ }
496};
497MODULE_DEVICE_TABLE(of, cdns_wdt_of_match);
498
499/* Driver Structure */
500static struct platform_driver cdns_wdt_driver = {
501 .probe = cdns_wdt_probe,
502 .remove = cdns_wdt_remove,
503 .shutdown = cdns_wdt_shutdown,
504 .driver = {
505 .name = "cdns-wdt",
506 .owner = THIS_MODULE,
507 .of_match_table = cdns_wdt_of_match,
508 .pm = &cdns_wdt_pm_ops,
509 },
510};
511
512module_platform_driver(cdns_wdt_driver);
513
514MODULE_AUTHOR("Xilinx, Inc.");
515MODULE_DESCRIPTION("Watchdog driver for Cadence WDT");
516MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/da9063_wdt.c b/drivers/watchdog/da9063_wdt.c
new file mode 100644
index 000000000000..2cd6b2c2dd2a
--- /dev/null
+++ b/drivers/watchdog/da9063_wdt.c
@@ -0,0 +1,191 @@
1/*
2 * Watchdog driver for DA9063 PMICs.
3 *
4 * Copyright(c) 2012 Dialog Semiconductor Ltd.
5 *
6 * Author: Mariusz Wojtasik <mariusz.wojtasik@diasemi.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/kernel.h>
15#include <linux/module.h>
16#include <linux/watchdog.h>
17#include <linux/platform_device.h>
18#include <linux/uaccess.h>
19#include <linux/slab.h>
20#include <linux/delay.h>
21#include <linux/mfd/da9063/registers.h>
22#include <linux/mfd/da9063/core.h>
23#include <linux/regmap.h>
24
25/*
26 * Watchdog selector to timeout in seconds.
27 * 0: WDT disabled;
28 * others: timeout = 2048 ms * 2^(TWDSCALE-1).
29 */
30static const unsigned int wdt_timeout[] = { 0, 2, 4, 8, 16, 32, 65, 131 };
31#define DA9063_TWDSCALE_DISABLE 0
32#define DA9063_TWDSCALE_MIN 1
33#define DA9063_TWDSCALE_MAX (ARRAY_SIZE(wdt_timeout) - 1)
34#define DA9063_WDT_MIN_TIMEOUT wdt_timeout[DA9063_TWDSCALE_MIN]
35#define DA9063_WDT_MAX_TIMEOUT wdt_timeout[DA9063_TWDSCALE_MAX]
36#define DA9063_WDG_TIMEOUT wdt_timeout[3]
37
38struct da9063_watchdog {
39 struct da9063 *da9063;
40 struct watchdog_device wdtdev;
41};
42
43static unsigned int da9063_wdt_timeout_to_sel(unsigned int secs)
44{
45 unsigned int i;
46
47 for (i = DA9063_TWDSCALE_MIN; i <= DA9063_TWDSCALE_MAX; i++) {
48 if (wdt_timeout[i] >= secs)
49 return i;
50 }
51
52 return DA9063_TWDSCALE_MAX;
53}
54
55static int _da9063_wdt_set_timeout(struct da9063 *da9063, unsigned int regval)
56{
57 return regmap_update_bits(da9063->regmap, DA9063_REG_CONTROL_D,
58 DA9063_TWDSCALE_MASK, regval);
59}
60
61static int da9063_wdt_start(struct watchdog_device *wdd)
62{
63 struct da9063_watchdog *wdt = watchdog_get_drvdata(wdd);
64 unsigned int selector;
65 int ret;
66
67 selector = da9063_wdt_timeout_to_sel(wdt->wdtdev.timeout);
68 ret = _da9063_wdt_set_timeout(wdt->da9063, selector);
69 if (ret)
70 dev_err(wdt->da9063->dev, "Watchdog failed to start (err = %d)\n",
71 ret);
72
73 return ret;
74}
75
76static int da9063_wdt_stop(struct watchdog_device *wdd)
77{
78 struct da9063_watchdog *wdt = watchdog_get_drvdata(wdd);
79 int ret;
80
81 ret = regmap_update_bits(wdt->da9063->regmap, DA9063_REG_CONTROL_D,
82 DA9063_TWDSCALE_MASK, DA9063_TWDSCALE_DISABLE);
83 if (ret)
84 dev_alert(wdt->da9063->dev, "Watchdog failed to stop (err = %d)\n",
85 ret);
86
87 return ret;
88}
89
90static int da9063_wdt_ping(struct watchdog_device *wdd)
91{
92 struct da9063_watchdog *wdt = watchdog_get_drvdata(wdd);
93 int ret;
94
95 ret = regmap_write(wdt->da9063->regmap, DA9063_REG_CONTROL_F,
96 DA9063_WATCHDOG);
97 if (ret)
98 dev_alert(wdt->da9063->dev, "Failed to ping the watchdog (err = %d)\n",
99 ret);
100
101 return ret;
102}
103
104static int da9063_wdt_set_timeout(struct watchdog_device *wdd,
105 unsigned int timeout)
106{
107 struct da9063_watchdog *wdt = watchdog_get_drvdata(wdd);
108 unsigned int selector;
109 int ret;
110
111 selector = da9063_wdt_timeout_to_sel(timeout);
112 ret = _da9063_wdt_set_timeout(wdt->da9063, selector);
113 if (ret)
114 dev_err(wdt->da9063->dev, "Failed to set watchdog timeout (err = %d)\n",
115 ret);
116 else
117 wdd->timeout = wdt_timeout[selector];
118
119 return ret;
120}
121
122static const struct watchdog_info da9063_watchdog_info = {
123 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
124 .identity = "DA9063 Watchdog",
125};
126
127static const struct watchdog_ops da9063_watchdog_ops = {
128 .owner = THIS_MODULE,
129 .start = da9063_wdt_start,
130 .stop = da9063_wdt_stop,
131 .ping = da9063_wdt_ping,
132 .set_timeout = da9063_wdt_set_timeout,
133};
134
135static int da9063_wdt_probe(struct platform_device *pdev)
136{
137 int ret;
138 struct da9063 *da9063;
139 struct da9063_watchdog *wdt;
140
141 if (!pdev->dev.parent)
142 return -EINVAL;
143
144 da9063 = dev_get_drvdata(pdev->dev.parent);
145 if (!da9063)
146 return -EINVAL;
147
148 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
149 if (!wdt)
150 return -ENOMEM;
151
152 wdt->da9063 = da9063;
153
154 wdt->wdtdev.info = &da9063_watchdog_info;
155 wdt->wdtdev.ops = &da9063_watchdog_ops;
156 wdt->wdtdev.min_timeout = DA9063_WDT_MIN_TIMEOUT;
157 wdt->wdtdev.max_timeout = DA9063_WDT_MAX_TIMEOUT;
158 wdt->wdtdev.timeout = DA9063_WDG_TIMEOUT;
159
160 wdt->wdtdev.status = WATCHDOG_NOWAYOUT_INIT_STATUS;
161
162 watchdog_set_drvdata(&wdt->wdtdev, wdt);
163 dev_set_drvdata(&pdev->dev, wdt);
164
165 ret = watchdog_register_device(&wdt->wdtdev);
166
167 return ret;
168}
169
170static int da9063_wdt_remove(struct platform_device *pdev)
171{
172 struct da9063_watchdog *wdt = dev_get_drvdata(&pdev->dev);
173
174 watchdog_unregister_device(&wdt->wdtdev);
175
176 return 0;
177}
178
179static struct platform_driver da9063_wdt_driver = {
180 .probe = da9063_wdt_probe,
181 .remove = da9063_wdt_remove,
182 .driver = {
183 .name = DA9063_DRVNAME_WATCHDOG,
184 },
185};
186module_platform_driver(da9063_wdt_driver);
187
188MODULE_AUTHOR("Mariusz Wojtasik <mariusz.wojtasik@diasemi.com>");
189MODULE_DESCRIPTION("Watchdog driver for Dialog DA9063");
190MODULE_LICENSE("GPL");
191MODULE_ALIAS("platform:" DA9063_DRVNAME_WATCHDOG);
diff --git a/drivers/watchdog/dw_wdt.c b/drivers/watchdog/dw_wdt.c
index 9f210299de24..9e577a64ec9e 100644
--- a/drivers/watchdog/dw_wdt.c
+++ b/drivers/watchdog/dw_wdt.c
@@ -21,6 +21,7 @@
21 21
22#include <linux/bitops.h> 22#include <linux/bitops.h>
23#include <linux/clk.h> 23#include <linux/clk.h>
24#include <linux/delay.h>
24#include <linux/device.h> 25#include <linux/device.h>
25#include <linux/err.h> 26#include <linux/err.h>
26#include <linux/fs.h> 27#include <linux/fs.h>
@@ -29,9 +30,11 @@
29#include <linux/miscdevice.h> 30#include <linux/miscdevice.h>
30#include <linux/module.h> 31#include <linux/module.h>
31#include <linux/moduleparam.h> 32#include <linux/moduleparam.h>
33#include <linux/notifier.h>
32#include <linux/of.h> 34#include <linux/of.h>
33#include <linux/pm.h> 35#include <linux/pm.h>
34#include <linux/platform_device.h> 36#include <linux/platform_device.h>
37#include <linux/reboot.h>
35#include <linux/spinlock.h> 38#include <linux/spinlock.h>
36#include <linux/timer.h> 39#include <linux/timer.h>
37#include <linux/uaccess.h> 40#include <linux/uaccess.h>
@@ -40,6 +43,7 @@
40#define WDOG_CONTROL_REG_OFFSET 0x00 43#define WDOG_CONTROL_REG_OFFSET 0x00
41#define WDOG_CONTROL_REG_WDT_EN_MASK 0x01 44#define WDOG_CONTROL_REG_WDT_EN_MASK 0x01
42#define WDOG_TIMEOUT_RANGE_REG_OFFSET 0x04 45#define WDOG_TIMEOUT_RANGE_REG_OFFSET 0x04
46#define WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT 4
43#define WDOG_CURRENT_COUNT_REG_OFFSET 0x08 47#define WDOG_CURRENT_COUNT_REG_OFFSET 0x08
44#define WDOG_COUNTER_RESTART_REG_OFFSET 0x0c 48#define WDOG_COUNTER_RESTART_REG_OFFSET 0x0c
45#define WDOG_COUNTER_RESTART_KICK_VALUE 0x76 49#define WDOG_COUNTER_RESTART_KICK_VALUE 0x76
@@ -62,6 +66,7 @@ static struct {
62 unsigned long next_heartbeat; 66 unsigned long next_heartbeat;
63 struct timer_list timer; 67 struct timer_list timer;
64 int expect_close; 68 int expect_close;
69 struct notifier_block restart_handler;
65} dw_wdt; 70} dw_wdt;
66 71
67static inline int dw_wdt_is_enabled(void) 72static inline int dw_wdt_is_enabled(void)
@@ -106,7 +111,8 @@ static int dw_wdt_set_top(unsigned top_s)
106 } 111 }
107 112
108 /* Set the new value in the watchdog. */ 113 /* Set the new value in the watchdog. */
109 writel(top_val, dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET); 114 writel(top_val | top_val << WDOG_TIMEOUT_RANGE_TOPINIT_SHIFT,
115 dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
110 116
111 dw_wdt_set_next_heartbeat(); 117 dw_wdt_set_next_heartbeat();
112 118
@@ -119,6 +125,26 @@ static void dw_wdt_keepalive(void)
119 WDOG_COUNTER_RESTART_REG_OFFSET); 125 WDOG_COUNTER_RESTART_REG_OFFSET);
120} 126}
121 127
128static int dw_wdt_restart_handle(struct notifier_block *this,
129 unsigned long mode, void *cmd)
130{
131 u32 val;
132
133 writel(0, dw_wdt.regs + WDOG_TIMEOUT_RANGE_REG_OFFSET);
134 val = readl(dw_wdt.regs + WDOG_CONTROL_REG_OFFSET);
135 if (val & WDOG_CONTROL_REG_WDT_EN_MASK)
136 writel(WDOG_COUNTER_RESTART_KICK_VALUE, dw_wdt.regs +
137 WDOG_COUNTER_RESTART_REG_OFFSET);
138 else
139 writel(WDOG_CONTROL_REG_WDT_EN_MASK,
140 dw_wdt.regs + WDOG_CONTROL_REG_OFFSET);
141
142 /* wait for reset to assert... */
143 mdelay(500);
144
145 return NOTIFY_DONE;
146}
147
122static void dw_wdt_ping(unsigned long data) 148static void dw_wdt_ping(unsigned long data)
123{ 149{
124 if (time_before(jiffies, dw_wdt.next_heartbeat) || 150 if (time_before(jiffies, dw_wdt.next_heartbeat) ||
@@ -314,6 +340,12 @@ static int dw_wdt_drv_probe(struct platform_device *pdev)
314 if (ret) 340 if (ret)
315 goto out_disable_clk; 341 goto out_disable_clk;
316 342
343 dw_wdt.restart_handler.notifier_call = dw_wdt_restart_handle;
344 dw_wdt.restart_handler.priority = 128;
345 ret = register_restart_handler(&dw_wdt.restart_handler);
346 if (ret)
347 pr_warn("cannot register restart handler\n");
348
317 dw_wdt_set_next_heartbeat(); 349 dw_wdt_set_next_heartbeat();
318 setup_timer(&dw_wdt.timer, dw_wdt_ping, 0); 350 setup_timer(&dw_wdt.timer, dw_wdt_ping, 0);
319 mod_timer(&dw_wdt.timer, jiffies + WDT_TIMEOUT); 351 mod_timer(&dw_wdt.timer, jiffies + WDT_TIMEOUT);
@@ -328,6 +360,8 @@ out_disable_clk:
328 360
329static int dw_wdt_drv_remove(struct platform_device *pdev) 361static int dw_wdt_drv_remove(struct platform_device *pdev)
330{ 362{
363 unregister_restart_handler(&dw_wdt.restart_handler);
364
331 misc_deregister(&dw_wdt_miscdev); 365 misc_deregister(&dw_wdt_miscdev);
332 366
333 clk_disable_unprepare(dw_wdt.clk); 367 clk_disable_unprepare(dw_wdt.clk);
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 68c3d379ffa8..7e12f88bb4a6 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -22,14 +22,17 @@
22 */ 22 */
23 23
24#include <linux/clk.h> 24#include <linux/clk.h>
25#include <linux/delay.h>
25#include <linux/init.h> 26#include <linux/init.h>
26#include <linux/io.h> 27#include <linux/io.h>
27#include <linux/jiffies.h> 28#include <linux/jiffies.h>
28#include <linux/kernel.h> 29#include <linux/kernel.h>
29#include <linux/module.h> 30#include <linux/module.h>
30#include <linux/moduleparam.h> 31#include <linux/moduleparam.h>
32#include <linux/notifier.h>
31#include <linux/of_address.h> 33#include <linux/of_address.h>
32#include <linux/platform_device.h> 34#include <linux/platform_device.h>
35#include <linux/reboot.h>
33#include <linux/regmap.h> 36#include <linux/regmap.h>
34#include <linux/timer.h> 37#include <linux/timer.h>
35#include <linux/watchdog.h> 38#include <linux/watchdog.h>
@@ -59,6 +62,7 @@ struct imx2_wdt_device {
59 struct regmap *regmap; 62 struct regmap *regmap;
60 struct timer_list timer; /* Pings the watchdog when closed */ 63 struct timer_list timer; /* Pings the watchdog when closed */
61 struct watchdog_device wdog; 64 struct watchdog_device wdog;
65 struct notifier_block restart_handler;
62}; 66};
63 67
64static bool nowayout = WATCHDOG_NOWAYOUT; 68static bool nowayout = WATCHDOG_NOWAYOUT;
@@ -77,6 +81,31 @@ static const struct watchdog_info imx2_wdt_info = {
77 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE, 81 .options = WDIOF_KEEPALIVEPING | WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE,
78}; 82};
79 83
84static int imx2_restart_handler(struct notifier_block *this, unsigned long mode,
85 void *cmd)
86{
87 unsigned int wcr_enable = IMX2_WDT_WCR_WDE;
88 struct imx2_wdt_device *wdev = container_of(this,
89 struct imx2_wdt_device,
90 restart_handler);
91 /* Assert SRS signal */
92 regmap_write(wdev->regmap, 0, wcr_enable);
93 /*
94 * Due to imx6q errata ERR004346 (WDOG: WDOG SRS bit requires to be
95 * written twice), we add another two writes to ensure there must be at
96 * least two writes happen in the same one 32kHz clock period. We save
97 * the target check here, since the writes shouldn't be a huge burden
98 * for other platforms.
99 */
100 regmap_write(wdev->regmap, 0, wcr_enable);
101 regmap_write(wdev->regmap, 0, wcr_enable);
102
103 /* wait for reset to assert... */
104 mdelay(500);
105
106 return NOTIFY_DONE;
107}
108
80static inline void imx2_wdt_setup(struct watchdog_device *wdog) 109static inline void imx2_wdt_setup(struct watchdog_device *wdog)
81{ 110{
82 struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); 111 struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
@@ -191,12 +220,10 @@ static struct regmap_config imx2_wdt_regmap_config = {
191 220
192static int __init imx2_wdt_probe(struct platform_device *pdev) 221static int __init imx2_wdt_probe(struct platform_device *pdev)
193{ 222{
194 struct device_node *np = pdev->dev.of_node;
195 struct imx2_wdt_device *wdev; 223 struct imx2_wdt_device *wdev;
196 struct watchdog_device *wdog; 224 struct watchdog_device *wdog;
197 struct resource *res; 225 struct resource *res;
198 void __iomem *base; 226 void __iomem *base;
199 bool big_endian;
200 int ret; 227 int ret;
201 u32 val; 228 u32 val;
202 229
@@ -204,10 +231,6 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
204 if (!wdev) 231 if (!wdev)
205 return -ENOMEM; 232 return -ENOMEM;
206 233
207 big_endian = of_property_read_bool(np, "big-endian");
208 if (big_endian)
209 imx2_wdt_regmap_config.val_format_endian = REGMAP_ENDIAN_BIG;
210
211 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 234 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
212 base = devm_ioremap_resource(&pdev->dev, res); 235 base = devm_ioremap_resource(&pdev->dev, res);
213 if (IS_ERR(base)) 236 if (IS_ERR(base))
@@ -257,6 +280,12 @@ static int __init imx2_wdt_probe(struct platform_device *pdev)
257 return ret; 280 return ret;
258 } 281 }
259 282
283 wdev->restart_handler.notifier_call = imx2_restart_handler;
284 wdev->restart_handler.priority = 128;
285 ret = register_restart_handler(&wdev->restart_handler);
286 if (ret)
287 dev_err(&pdev->dev, "cannot register restart handler\n");
288
260 dev_info(&pdev->dev, "timeout %d sec (nowayout=%d)\n", 289 dev_info(&pdev->dev, "timeout %d sec (nowayout=%d)\n",
261 wdog->timeout, nowayout); 290 wdog->timeout, nowayout);
262 291
@@ -268,6 +297,8 @@ static int __exit imx2_wdt_remove(struct platform_device *pdev)
268 struct watchdog_device *wdog = platform_get_drvdata(pdev); 297 struct watchdog_device *wdog = platform_get_drvdata(pdev);
269 struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog); 298 struct imx2_wdt_device *wdev = watchdog_get_drvdata(wdog);
270 299
300 unregister_restart_handler(&wdev->restart_handler);
301
271 watchdog_unregister_device(wdog); 302 watchdog_unregister_device(wdog);
272 303
273 if (imx2_wdt_is_running(wdev)) { 304 if (imx2_wdt_is_running(wdev)) {
diff --git a/drivers/watchdog/meson_wdt.c b/drivers/watchdog/meson_wdt.c
new file mode 100644
index 000000000000..ef6a298e8c45
--- /dev/null
+++ b/drivers/watchdog/meson_wdt.c
@@ -0,0 +1,236 @@
1/*
2 * Meson Watchdog Driver
3 *
4 * Copyright (c) 2014 Carlo Caione
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/clk.h>
13#include <linux/delay.h>
14#include <linux/err.h>
15#include <linux/init.h>
16#include <linux/io.h>
17#include <linux/kernel.h>
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/notifier.h>
21#include <linux/of.h>
22#include <linux/platform_device.h>
23#include <linux/reboot.h>
24#include <linux/types.h>
25#include <linux/watchdog.h>
26
27#define DRV_NAME "meson_wdt"
28
29#define MESON_WDT_TC 0x00
30#define MESON_WDT_TC_EN BIT(22)
31#define MESON_WDT_TC_TM_MASK 0x3fffff
32#define MESON_WDT_DC_RESET (3 << 24)
33
34#define MESON_WDT_RESET 0x04
35
36#define MESON_WDT_TIMEOUT 30
37#define MESON_WDT_MIN_TIMEOUT 1
38#define MESON_WDT_MAX_TIMEOUT (MESON_WDT_TC_TM_MASK / 100000)
39
40#define MESON_SEC_TO_TC(s) ((s) * 100000)
41
42static bool nowayout = WATCHDOG_NOWAYOUT;
43static unsigned int timeout = MESON_WDT_TIMEOUT;
44
45struct meson_wdt_dev {
46 struct watchdog_device wdt_dev;
47 void __iomem *wdt_base;
48 struct notifier_block restart_handler;
49};
50
51static int meson_restart_handle(struct notifier_block *this, unsigned long mode,
52 void *cmd)
53{
54 u32 tc_reboot = MESON_WDT_DC_RESET | MESON_WDT_TC_EN;
55 struct meson_wdt_dev *meson_wdt = container_of(this,
56 struct meson_wdt_dev,
57 restart_handler);
58
59 while (1) {
60 writel(tc_reboot, meson_wdt->wdt_base + MESON_WDT_TC);
61 mdelay(5);
62 }
63
64 return NOTIFY_DONE;
65}
66
67static int meson_wdt_ping(struct watchdog_device *wdt_dev)
68{
69 struct meson_wdt_dev *meson_wdt = watchdog_get_drvdata(wdt_dev);
70
71 writel(0, meson_wdt->wdt_base + MESON_WDT_RESET);
72
73 return 0;
74}
75
76static void meson_wdt_change_timeout(struct watchdog_device *wdt_dev,
77 unsigned int timeout)
78{
79 struct meson_wdt_dev *meson_wdt = watchdog_get_drvdata(wdt_dev);
80 u32 reg;
81
82 reg = readl(meson_wdt->wdt_base + MESON_WDT_TC);
83 reg &= ~MESON_WDT_TC_TM_MASK;
84 reg |= MESON_SEC_TO_TC(timeout);
85 writel(reg, meson_wdt->wdt_base + MESON_WDT_TC);
86}
87
88static int meson_wdt_set_timeout(struct watchdog_device *wdt_dev,
89 unsigned int timeout)
90{
91 wdt_dev->timeout = timeout;
92
93 meson_wdt_change_timeout(wdt_dev, timeout);
94 meson_wdt_ping(wdt_dev);
95
96 return 0;
97}
98
99static int meson_wdt_stop(struct watchdog_device *wdt_dev)
100{
101 struct meson_wdt_dev *meson_wdt = watchdog_get_drvdata(wdt_dev);
102 u32 reg;
103
104 reg = readl(meson_wdt->wdt_base + MESON_WDT_TC);
105 reg &= ~MESON_WDT_TC_EN;
106 writel(reg, meson_wdt->wdt_base + MESON_WDT_TC);
107
108 return 0;
109}
110
111static int meson_wdt_start(struct watchdog_device *wdt_dev)
112{
113 struct meson_wdt_dev *meson_wdt = watchdog_get_drvdata(wdt_dev);
114 u32 reg;
115
116 meson_wdt_change_timeout(wdt_dev, meson_wdt->wdt_dev.timeout);
117 meson_wdt_ping(wdt_dev);
118
119 reg = readl(meson_wdt->wdt_base + MESON_WDT_TC);
120 reg |= MESON_WDT_TC_EN;
121 writel(reg, meson_wdt->wdt_base + MESON_WDT_TC);
122
123 return 0;
124}
125
126static const struct watchdog_info meson_wdt_info = {
127 .identity = DRV_NAME,
128 .options = WDIOF_SETTIMEOUT |
129 WDIOF_KEEPALIVEPING |
130 WDIOF_MAGICCLOSE,
131};
132
133static const struct watchdog_ops meson_wdt_ops = {
134 .owner = THIS_MODULE,
135 .start = meson_wdt_start,
136 .stop = meson_wdt_stop,
137 .ping = meson_wdt_ping,
138 .set_timeout = meson_wdt_set_timeout,
139};
140
141static int meson_wdt_probe(struct platform_device *pdev)
142{
143 struct resource *res;
144 struct meson_wdt_dev *meson_wdt;
145 int err;
146
147 meson_wdt = devm_kzalloc(&pdev->dev, sizeof(*meson_wdt), GFP_KERNEL);
148 if (!meson_wdt)
149 return -ENOMEM;
150
151 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
152 meson_wdt->wdt_base = devm_ioremap_resource(&pdev->dev, res);
153 if (IS_ERR(meson_wdt->wdt_base))
154 return PTR_ERR(meson_wdt->wdt_base);
155
156 meson_wdt->wdt_dev.parent = &pdev->dev;
157 meson_wdt->wdt_dev.info = &meson_wdt_info;
158 meson_wdt->wdt_dev.ops = &meson_wdt_ops;
159 meson_wdt->wdt_dev.timeout = MESON_WDT_TIMEOUT;
160 meson_wdt->wdt_dev.max_timeout = MESON_WDT_MAX_TIMEOUT;
161 meson_wdt->wdt_dev.min_timeout = MESON_WDT_MIN_TIMEOUT;
162
163 watchdog_set_drvdata(&meson_wdt->wdt_dev, meson_wdt);
164
165 watchdog_init_timeout(&meson_wdt->wdt_dev, timeout, &pdev->dev);
166 watchdog_set_nowayout(&meson_wdt->wdt_dev, nowayout);
167
168 meson_wdt_stop(&meson_wdt->wdt_dev);
169
170 err = watchdog_register_device(&meson_wdt->wdt_dev);
171 if (err)
172 return err;
173
174 platform_set_drvdata(pdev, meson_wdt);
175
176 meson_wdt->restart_handler.notifier_call = meson_restart_handle;
177 meson_wdt->restart_handler.priority = 128;
178 err = register_restart_handler(&meson_wdt->restart_handler);
179 if (err)
180 dev_err(&pdev->dev,
181 "cannot register restart handler (err=%d)\n", err);
182
183 dev_info(&pdev->dev, "Watchdog enabled (timeout=%d sec, nowayout=%d)",
184 meson_wdt->wdt_dev.timeout, nowayout);
185
186 return 0;
187}
188
189static int meson_wdt_remove(struct platform_device *pdev)
190{
191 struct meson_wdt_dev *meson_wdt = platform_get_drvdata(pdev);
192
193 unregister_restart_handler(&meson_wdt->restart_handler);
194
195 watchdog_unregister_device(&meson_wdt->wdt_dev);
196
197 return 0;
198}
199
200static void meson_wdt_shutdown(struct platform_device *pdev)
201{
202 struct meson_wdt_dev *meson_wdt = platform_get_drvdata(pdev);
203
204 meson_wdt_stop(&meson_wdt->wdt_dev);
205}
206
207static const struct of_device_id meson_wdt_dt_ids[] = {
208 { .compatible = "amlogic,meson6-wdt" },
209 { /* sentinel */ }
210};
211MODULE_DEVICE_TABLE(of, meson_wdt_dt_ids);
212
213static struct platform_driver meson_wdt_driver = {
214 .probe = meson_wdt_probe,
215 .remove = meson_wdt_remove,
216 .shutdown = meson_wdt_shutdown,
217 .driver = {
218 .owner = THIS_MODULE,
219 .name = DRV_NAME,
220 .of_match_table = meson_wdt_dt_ids,
221 },
222};
223
224module_platform_driver(meson_wdt_driver);
225
226module_param(timeout, uint, 0);
227MODULE_PARM_DESC(timeout, "Watchdog heartbeat in seconds");
228
229module_param(nowayout, bool, 0);
230MODULE_PARM_DESC(nowayout,
231 "Watchdog cannot be stopped once started (default="
232 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
233
234MODULE_LICENSE("GPL");
235MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
236MODULE_DESCRIPTION("Meson Watchdog Timer Driver");
diff --git a/drivers/watchdog/of_xilinx_wdt.c b/drivers/watchdog/of_xilinx_wdt.c
index 1e6e28df5d7b..b2e1b4cbbdc1 100644
--- a/drivers/watchdog/of_xilinx_wdt.c
+++ b/drivers/watchdog/of_xilinx_wdt.c
@@ -236,7 +236,6 @@ static struct platform_driver xwdt_driver = {
236 .probe = xwdt_probe, 236 .probe = xwdt_probe,
237 .remove = xwdt_remove, 237 .remove = xwdt_remove,
238 .driver = { 238 .driver = {
239 .owner = THIS_MODULE,
240 .name = WATCHDOG_NAME, 239 .name = WATCHDOG_NAME,
241 .of_match_table = xwdt_of_match, 240 .of_match_table = xwdt_of_match,
242 }, 241 },
diff --git a/drivers/watchdog/qcom-wdt.c b/drivers/watchdog/qcom-wdt.c
new file mode 100644
index 000000000000..aa85618c4d03
--- /dev/null
+++ b/drivers/watchdog/qcom-wdt.c
@@ -0,0 +1,224 @@
1/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 *
12 */
13#include <linux/clk.h>
14#include <linux/delay.h>
15#include <linux/io.h>
16#include <linux/kernel.h>
17#include <linux/module.h>
18#include <linux/of.h>
19#include <linux/platform_device.h>
20#include <linux/reboot.h>
21#include <linux/watchdog.h>
22
23#define WDT_RST 0x0
24#define WDT_EN 0x8
25#define WDT_BITE_TIME 0x24
26
27struct qcom_wdt {
28 struct watchdog_device wdd;
29 struct clk *clk;
30 unsigned long rate;
31 struct notifier_block restart_nb;
32 void __iomem *base;
33};
34
35static inline
36struct qcom_wdt *to_qcom_wdt(struct watchdog_device *wdd)
37{
38 return container_of(wdd, struct qcom_wdt, wdd);
39}
40
41static int qcom_wdt_start(struct watchdog_device *wdd)
42{
43 struct qcom_wdt *wdt = to_qcom_wdt(wdd);
44
45 writel(0, wdt->base + WDT_EN);
46 writel(1, wdt->base + WDT_RST);
47 writel(wdd->timeout * wdt->rate, wdt->base + WDT_BITE_TIME);
48 writel(1, wdt->base + WDT_EN);
49 return 0;
50}
51
52static int qcom_wdt_stop(struct watchdog_device *wdd)
53{
54 struct qcom_wdt *wdt = to_qcom_wdt(wdd);
55
56 writel(0, wdt->base + WDT_EN);
57 return 0;
58}
59
60static int qcom_wdt_ping(struct watchdog_device *wdd)
61{
62 struct qcom_wdt *wdt = to_qcom_wdt(wdd);
63
64 writel(1, wdt->base + WDT_RST);
65 return 0;
66}
67
68static int qcom_wdt_set_timeout(struct watchdog_device *wdd,
69 unsigned int timeout)
70{
71 wdd->timeout = timeout;
72 return qcom_wdt_start(wdd);
73}
74
75static const struct watchdog_ops qcom_wdt_ops = {
76 .start = qcom_wdt_start,
77 .stop = qcom_wdt_stop,
78 .ping = qcom_wdt_ping,
79 .set_timeout = qcom_wdt_set_timeout,
80 .owner = THIS_MODULE,
81};
82
83static const struct watchdog_info qcom_wdt_info = {
84 .options = WDIOF_KEEPALIVEPING
85 | WDIOF_MAGICCLOSE
86 | WDIOF_SETTIMEOUT,
87 .identity = KBUILD_MODNAME,
88};
89
90static int qcom_wdt_restart(struct notifier_block *nb, unsigned long action,
91 void *data)
92{
93 struct qcom_wdt *wdt = container_of(nb, struct qcom_wdt, restart_nb);
94 u32 timeout;
95
96 /*
97 * Trigger watchdog bite:
98 * Setup BITE_TIME to be 128ms, and enable WDT.
99 */
100 timeout = 128 * wdt->rate / 1000;
101
102 writel(0, wdt->base + WDT_EN);
103 writel(1, wdt->base + WDT_RST);
104 writel(timeout, wdt->base + WDT_BITE_TIME);
105 writel(1, wdt->base + WDT_EN);
106
107 /*
108 * Actually make sure the above sequence hits hardware before sleeping.
109 */
110 wmb();
111
112 msleep(150);
113 return NOTIFY_DONE;
114}
115
116static int qcom_wdt_probe(struct platform_device *pdev)
117{
118 struct qcom_wdt *wdt;
119 struct resource *res;
120 int ret;
121
122 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
123 if (!wdt)
124 return -ENOMEM;
125
126 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
127 wdt->base = devm_ioremap_resource(&pdev->dev, res);
128 if (IS_ERR(wdt->base))
129 return PTR_ERR(wdt->base);
130
131 wdt->clk = devm_clk_get(&pdev->dev, NULL);
132 if (IS_ERR(wdt->clk)) {
133 dev_err(&pdev->dev, "failed to get input clock\n");
134 return PTR_ERR(wdt->clk);
135 }
136
137 ret = clk_prepare_enable(wdt->clk);
138 if (ret) {
139 dev_err(&pdev->dev, "failed to setup clock\n");
140 return ret;
141 }
142
143 /*
144 * We use the clock rate to calculate the max timeout, so ensure it's
145 * not zero to avoid a divide-by-zero exception.
146 *
147 * WATCHDOG_CORE assumes units of seconds, if the WDT is clocked such
148 * that it would bite before a second elapses it's usefulness is
149 * limited. Bail if this is the case.
150 */
151 wdt->rate = clk_get_rate(wdt->clk);
152 if (wdt->rate == 0 ||
153 wdt->rate > 0x10000000U) {
154 dev_err(&pdev->dev, "invalid clock rate\n");
155 ret = -EINVAL;
156 goto err_clk_unprepare;
157 }
158
159 wdt->wdd.dev = &pdev->dev;
160 wdt->wdd.info = &qcom_wdt_info;
161 wdt->wdd.ops = &qcom_wdt_ops;
162 wdt->wdd.min_timeout = 1;
163 wdt->wdd.max_timeout = 0x10000000U / wdt->rate;
164
165 /*
166 * If 'timeout-sec' unspecified in devicetree, assume a 30 second
167 * default, unless the max timeout is less than 30 seconds, then use
168 * the max instead.
169 */
170 wdt->wdd.timeout = min(wdt->wdd.max_timeout, 30U);
171 watchdog_init_timeout(&wdt->wdd, 0, &pdev->dev);
172
173 ret = watchdog_register_device(&wdt->wdd);
174 if (ret) {
175 dev_err(&pdev->dev, "failed to register watchdog\n");
176 goto err_clk_unprepare;
177 }
178
179 /*
180 * WDT restart notifier has priority 0 (use as a last resort)
181 */
182 wdt->restart_nb.notifier_call = qcom_wdt_restart;
183 ret = register_restart_handler(&wdt->restart_nb);
184 if (ret)
185 dev_err(&pdev->dev, "failed to setup restart handler\n");
186
187 platform_set_drvdata(pdev, wdt);
188 return 0;
189
190err_clk_unprepare:
191 clk_disable_unprepare(wdt->clk);
192 return ret;
193}
194
195static int qcom_wdt_remove(struct platform_device *pdev)
196{
197 struct qcom_wdt *wdt = platform_get_drvdata(pdev);
198
199 unregister_restart_handler(&wdt->restart_nb);
200 watchdog_unregister_device(&wdt->wdd);
201 clk_disable_unprepare(wdt->clk);
202 return 0;
203}
204
205static const struct of_device_id qcom_wdt_of_table[] = {
206 { .compatible = "qcom,kpss-wdt-msm8960", },
207 { .compatible = "qcom,kpss-wdt-apq8064", },
208 { .compatible = "qcom,kpss-wdt-ipq8064", },
209 { },
210};
211MODULE_DEVICE_TABLE(of, qcom_wdt_of_table);
212
213static struct platform_driver qcom_watchdog_driver = {
214 .probe = qcom_wdt_probe,
215 .remove = qcom_wdt_remove,
216 .driver = {
217 .name = KBUILD_MODNAME,
218 .of_match_table = qcom_wdt_of_table,
219 },
220};
221module_platform_driver(qcom_watchdog_driver);
222
223MODULE_DESCRIPTION("QCOM KPSS Watchdog Driver");
224MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/rn5t618_wdt.c b/drivers/watchdog/rn5t618_wdt.c
new file mode 100644
index 000000000000..d1c12278cb6a
--- /dev/null
+++ b/drivers/watchdog/rn5t618_wdt.c
@@ -0,0 +1,198 @@
1/*
2 * Watchdog driver for Ricoh RN5T618 PMIC
3 *
4 * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * You should have received a copy of the GNU General Public License
11 * along with this program. If not, see <http://www.gnu.org/licenses/>.
12 */
13
14#include <linux/device.h>
15#include <linux/mfd/rn5t618.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18#include <linux/watchdog.h>
19
20#define DRIVER_NAME "rn5t618-wdt"
21
22static bool nowayout = WATCHDOG_NOWAYOUT;
23static unsigned int timeout;
24
25module_param(timeout, uint, 0);
26MODULE_PARM_DESC(timeout, "Initial watchdog timeout in seconds");
27
28module_param(nowayout, bool, 0);
29MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started (default="
30 __MODULE_STRING(WATCHDOG_NOWAYOUT) ")");
31
32struct rn5t618_wdt {
33 struct watchdog_device wdt_dev;
34 struct rn5t618 *rn5t618;
35};
36
37/*
38 * This array encodes the values of WDOGTIM field for the supported
39 * watchdog expiration times. If the watchdog is not accessed before
40 * the timer expiration, the PMU generates an interrupt and if the CPU
41 * doesn't clear it within one second the system is restarted.
42 */
43static const struct {
44 u8 reg_val;
45 unsigned int time;
46} rn5t618_wdt_map[] = {
47 { 0, 1 },
48 { 1, 8 },
49 { 2, 32 },
50 { 3, 128 },
51};
52
53static int rn5t618_wdt_set_timeout(struct watchdog_device *wdt_dev,
54 unsigned int t)
55{
56 struct rn5t618_wdt *wdt = watchdog_get_drvdata(wdt_dev);
57 int ret, i;
58
59 for (i = 0; i < ARRAY_SIZE(rn5t618_wdt_map); i++) {
60 if (rn5t618_wdt_map[i].time + 1 >= t)
61 break;
62 }
63
64 if (i == ARRAY_SIZE(rn5t618_wdt_map))
65 return -EINVAL;
66
67 ret = regmap_update_bits(wdt->rn5t618->regmap, RN5T618_WATCHDOG,
68 RN5T618_WATCHDOG_WDOGTIM_M,
69 rn5t618_wdt_map[i].reg_val);
70 if (!ret)
71 wdt_dev->timeout = rn5t618_wdt_map[i].time;
72
73 return ret;
74}
75
76static int rn5t618_wdt_start(struct watchdog_device *wdt_dev)
77{
78 struct rn5t618_wdt *wdt = watchdog_get_drvdata(wdt_dev);
79 int ret;
80
81 ret = rn5t618_wdt_set_timeout(wdt_dev, wdt_dev->timeout);
82 if (ret)
83 return ret;
84
85 /* enable repower-on */
86 ret = regmap_update_bits(wdt->rn5t618->regmap, RN5T618_REPCNT,
87 RN5T618_REPCNT_REPWRON,
88 RN5T618_REPCNT_REPWRON);
89 if (ret)
90 return ret;
91
92 /* enable watchdog */
93 ret = regmap_update_bits(wdt->rn5t618->regmap, RN5T618_WATCHDOG,
94 RN5T618_WATCHDOG_WDOGEN,
95 RN5T618_WATCHDOG_WDOGEN);
96 if (ret)
97 return ret;
98
99 /* enable watchdog interrupt */
100 return regmap_update_bits(wdt->rn5t618->regmap, RN5T618_PWRIREN,
101 RN5T618_PWRIRQ_IR_WDOG,
102 RN5T618_PWRIRQ_IR_WDOG);
103}
104
105static int rn5t618_wdt_stop(struct watchdog_device *wdt_dev)
106{
107 struct rn5t618_wdt *wdt = watchdog_get_drvdata(wdt_dev);
108
109 return regmap_update_bits(wdt->rn5t618->regmap, RN5T618_WATCHDOG,
110 RN5T618_WATCHDOG_WDOGEN, 0);
111}
112
113static int rn5t618_wdt_ping(struct watchdog_device *wdt_dev)
114{
115 struct rn5t618_wdt *wdt = watchdog_get_drvdata(wdt_dev);
116 unsigned int val;
117 int ret;
118
119 /* The counter is restarted after a R/W access to watchdog register */
120 ret = regmap_read(wdt->rn5t618->regmap, RN5T618_WATCHDOG, &val);
121 if (ret)
122 return ret;
123
124 ret = regmap_write(wdt->rn5t618->regmap, RN5T618_WATCHDOG, val);
125 if (ret)
126 return ret;
127
128 /* Clear pending watchdog interrupt */
129 return regmap_update_bits(wdt->rn5t618->regmap, RN5T618_PWRIRQ,
130 RN5T618_PWRIRQ_IR_WDOG, 0);
131}
132
133static struct watchdog_info rn5t618_wdt_info = {
134 .options = WDIOF_SETTIMEOUT | WDIOF_MAGICCLOSE |
135 WDIOF_KEEPALIVEPING,
136 .identity = DRIVER_NAME,
137};
138
139static struct watchdog_ops rn5t618_wdt_ops = {
140 .owner = THIS_MODULE,
141 .start = rn5t618_wdt_start,
142 .stop = rn5t618_wdt_stop,
143 .ping = rn5t618_wdt_ping,
144 .set_timeout = rn5t618_wdt_set_timeout,
145};
146
147static int rn5t618_wdt_probe(struct platform_device *pdev)
148{
149 struct rn5t618 *rn5t618 = dev_get_drvdata(pdev->dev.parent);
150 struct rn5t618_wdt *wdt;
151 int min_timeout, max_timeout;
152
153 wdt = devm_kzalloc(&pdev->dev, sizeof(struct rn5t618_wdt), GFP_KERNEL);
154 if (!wdt)
155 return -ENOMEM;
156
157 min_timeout = rn5t618_wdt_map[0].time;
158 max_timeout = rn5t618_wdt_map[ARRAY_SIZE(rn5t618_wdt_map) - 1].time;
159
160 wdt->rn5t618 = rn5t618;
161 wdt->wdt_dev.info = &rn5t618_wdt_info;
162 wdt->wdt_dev.ops = &rn5t618_wdt_ops;
163 wdt->wdt_dev.min_timeout = min_timeout;
164 wdt->wdt_dev.max_timeout = max_timeout;
165 wdt->wdt_dev.timeout = max_timeout;
166 wdt->wdt_dev.parent = &pdev->dev;
167
168 watchdog_set_drvdata(&wdt->wdt_dev, wdt);
169 watchdog_init_timeout(&wdt->wdt_dev, timeout, &pdev->dev);
170 watchdog_set_nowayout(&wdt->wdt_dev, nowayout);
171
172 platform_set_drvdata(pdev, wdt);
173
174 return watchdog_register_device(&wdt->wdt_dev);
175}
176
177static int rn5t618_wdt_remove(struct platform_device *pdev)
178{
179 struct rn5t618_wdt *wdt = platform_get_drvdata(pdev);
180
181 watchdog_unregister_device(&wdt->wdt_dev);
182
183 return 0;
184}
185
186static struct platform_driver rn5t618_wdt_driver = {
187 .probe = rn5t618_wdt_probe,
188 .remove = rn5t618_wdt_remove,
189 .driver = {
190 .name = DRIVER_NAME,
191 },
192};
193
194module_platform_driver(rn5t618_wdt_driver);
195
196MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>");
197MODULE_DESCRIPTION("RN5T618 watchdog driver");
198MODULE_LICENSE("GPL v2");
diff --git a/drivers/watchdog/s3c2410_wdt.c b/drivers/watchdog/s3c2410_wdt.c
index 7c6ccd071baf..1626dc66e763 100644
--- a/drivers/watchdog/s3c2410_wdt.c
+++ b/drivers/watchdog/s3c2410_wdt.c
@@ -41,6 +41,8 @@
41#include <linux/of.h> 41#include <linux/of.h>
42#include <linux/mfd/syscon.h> 42#include <linux/mfd/syscon.h>
43#include <linux/regmap.h> 43#include <linux/regmap.h>
44#include <linux/reboot.h>
45#include <linux/delay.h>
44 46
45#define S3C2410_WTCON 0x00 47#define S3C2410_WTCON 0x00
46#define S3C2410_WTDAT 0x04 48#define S3C2410_WTDAT 0x04
@@ -128,6 +130,7 @@ struct s3c2410_wdt {
128 unsigned long wtdat_save; 130 unsigned long wtdat_save;
129 struct watchdog_device wdt_device; 131 struct watchdog_device wdt_device;
130 struct notifier_block freq_transition; 132 struct notifier_block freq_transition;
133 struct notifier_block restart_handler;
131 struct s3c2410_wdt_variant *drv_data; 134 struct s3c2410_wdt_variant *drv_data;
132 struct regmap *pmureg; 135 struct regmap *pmureg;
133}; 136};
@@ -155,6 +158,15 @@ static const struct s3c2410_wdt_variant drv_data_exynos5420 = {
155 .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT, 158 .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
156}; 159};
157 160
161static const struct s3c2410_wdt_variant drv_data_exynos7 = {
162 .disable_reg = EXYNOS5_WDT_DISABLE_REG_OFFSET,
163 .mask_reset_reg = EXYNOS5_WDT_MASK_RESET_REG_OFFSET,
164 .mask_bit = 23,
165 .rst_stat_reg = EXYNOS5_RST_STAT_REG_OFFSET,
166 .rst_stat_bit = 23, /* A57 WDTRESET */
167 .quirks = QUIRK_HAS_PMU_CONFIG | QUIRK_HAS_RST_STAT,
168};
169
158static const struct of_device_id s3c2410_wdt_match[] = { 170static const struct of_device_id s3c2410_wdt_match[] = {
159 { .compatible = "samsung,s3c2410-wdt", 171 { .compatible = "samsung,s3c2410-wdt",
160 .data = &drv_data_s3c2410 }, 172 .data = &drv_data_s3c2410 },
@@ -162,6 +174,8 @@ static const struct of_device_id s3c2410_wdt_match[] = {
162 .data = &drv_data_exynos5250 }, 174 .data = &drv_data_exynos5250 },
163 { .compatible = "samsung,exynos5420-wdt", 175 { .compatible = "samsung,exynos5420-wdt",
164 .data = &drv_data_exynos5420 }, 176 .data = &drv_data_exynos5420 },
177 { .compatible = "samsung,exynos7-wdt",
178 .data = &drv_data_exynos7 },
165 {}, 179 {},
166}; 180};
167MODULE_DEVICE_TABLE(of, s3c2410_wdt_match); 181MODULE_DEVICE_TABLE(of, s3c2410_wdt_match);
@@ -438,6 +452,31 @@ static inline void s3c2410wdt_cpufreq_deregister(struct s3c2410_wdt *wdt)
438} 452}
439#endif 453#endif
440 454
455static int s3c2410wdt_restart(struct notifier_block *this,
456 unsigned long mode, void *cmd)
457{
458 struct s3c2410_wdt *wdt = container_of(this, struct s3c2410_wdt,
459 restart_handler);
460 void __iomem *wdt_base = wdt->reg_base;
461
462 /* disable watchdog, to be safe */
463 writel(0, wdt_base + S3C2410_WTCON);
464
465 /* put initial values into count and data */
466 writel(0x80, wdt_base + S3C2410_WTCNT);
467 writel(0x80, wdt_base + S3C2410_WTDAT);
468
469 /* set the watchdog to go and reset... */
470 writel(S3C2410_WTCON_ENABLE | S3C2410_WTCON_DIV16 |
471 S3C2410_WTCON_RSTEN | S3C2410_WTCON_PRESCALE(0x20),
472 wdt_base + S3C2410_WTCON);
473
474 /* wait for reset to assert... */
475 mdelay(500);
476
477 return NOTIFY_DONE;
478}
479
441static inline unsigned int s3c2410wdt_get_bootstatus(struct s3c2410_wdt *wdt) 480static inline unsigned int s3c2410wdt_get_bootstatus(struct s3c2410_wdt *wdt)
442{ 481{
443 unsigned int rst_stat; 482 unsigned int rst_stat;
@@ -592,6 +631,12 @@ static int s3c2410wdt_probe(struct platform_device *pdev)
592 631
593 platform_set_drvdata(pdev, wdt); 632 platform_set_drvdata(pdev, wdt);
594 633
634 wdt->restart_handler.notifier_call = s3c2410wdt_restart;
635 wdt->restart_handler.priority = 128;
636 ret = register_restart_handler(&wdt->restart_handler);
637 if (ret)
638 pr_err("cannot register restart handler, %d\n", ret);
639
595 /* print out a statement of readiness */ 640 /* print out a statement of readiness */
596 641
597 wtcon = readl(wdt->reg_base + S3C2410_WTCON); 642 wtcon = readl(wdt->reg_base + S3C2410_WTCON);
@@ -621,6 +666,8 @@ static int s3c2410wdt_remove(struct platform_device *dev)
621 int ret; 666 int ret;
622 struct s3c2410_wdt *wdt = platform_get_drvdata(dev); 667 struct s3c2410_wdt *wdt = platform_get_drvdata(dev);
623 668
669 unregister_restart_handler(&wdt->restart_handler);
670
624 ret = s3c2410wdt_mask_and_disable_reset(wdt, true); 671 ret = s3c2410wdt_mask_and_disable_reset(wdt, true);
625 if (ret < 0) 672 if (ret < 0)
626 return ret; 673 return ret;
diff --git a/drivers/watchdog/stmp3xxx_rtc_wdt.c b/drivers/watchdog/stmp3xxx_rtc_wdt.c
index 3804d5e9baea..a62b1b6decf4 100644
--- a/drivers/watchdog/stmp3xxx_rtc_wdt.c
+++ b/drivers/watchdog/stmp3xxx_rtc_wdt.c
@@ -94,9 +94,33 @@ static int stmp3xxx_wdt_remove(struct platform_device *pdev)
94 return 0; 94 return 0;
95} 95}
96 96
97static int __maybe_unused stmp3xxx_wdt_suspend(struct device *dev)
98{
99 struct watchdog_device *wdd = &stmp3xxx_wdd;
100
101 if (watchdog_active(wdd))
102 return wdt_stop(wdd);
103
104 return 0;
105}
106
107static int __maybe_unused stmp3xxx_wdt_resume(struct device *dev)
108{
109 struct watchdog_device *wdd = &stmp3xxx_wdd;
110
111 if (watchdog_active(wdd))
112 return wdt_start(wdd);
113
114 return 0;
115}
116
117static SIMPLE_DEV_PM_OPS(stmp3xxx_wdt_pm_ops,
118 stmp3xxx_wdt_suspend, stmp3xxx_wdt_resume);
119
97static struct platform_driver stmp3xxx_wdt_driver = { 120static struct platform_driver stmp3xxx_wdt_driver = {
98 .driver = { 121 .driver = {
99 .name = "stmp3xxx_rtc_wdt", 122 .name = "stmp3xxx_rtc_wdt",
123 .pm = &stmp3xxx_wdt_pm_ops,
100 }, 124 },
101 .probe = stmp3xxx_wdt_probe, 125 .probe = stmp3xxx_wdt_probe,
102 .remove = stmp3xxx_wdt_remove, 126 .remove = stmp3xxx_wdt_remove,
diff --git a/drivers/watchdog/sunxi_wdt.c b/drivers/watchdog/sunxi_wdt.c
index 480bb557f353..b62301e74e5f 100644
--- a/drivers/watchdog/sunxi_wdt.c
+++ b/drivers/watchdog/sunxi_wdt.c
@@ -23,6 +23,7 @@
23#include <linux/moduleparam.h> 23#include <linux/moduleparam.h>
24#include <linux/notifier.h> 24#include <linux/notifier.h>
25#include <linux/of.h> 25#include <linux/of.h>
26#include <linux/of_device.h>
26#include <linux/platform_device.h> 27#include <linux/platform_device.h>
27#include <linux/reboot.h> 28#include <linux/reboot.h>
28#include <linux/types.h> 29#include <linux/types.h>
@@ -30,15 +31,11 @@
30 31
31#define WDT_MAX_TIMEOUT 16 32#define WDT_MAX_TIMEOUT 16
32#define WDT_MIN_TIMEOUT 1 33#define WDT_MIN_TIMEOUT 1
33#define WDT_MODE_TIMEOUT(n) ((n) << 3) 34#define WDT_TIMEOUT_MASK 0x0F
34#define WDT_TIMEOUT_MASK WDT_MODE_TIMEOUT(0x0F)
35 35
36#define WDT_CTRL 0x00
37#define WDT_CTRL_RELOAD ((1 << 0) | (0x0a57 << 1)) 36#define WDT_CTRL_RELOAD ((1 << 0) | (0x0a57 << 1))
38 37
39#define WDT_MODE 0x04
40#define WDT_MODE_EN (1 << 0) 38#define WDT_MODE_EN (1 << 0)
41#define WDT_MODE_RST_EN (1 << 1)
42 39
43#define DRV_NAME "sunxi-wdt" 40#define DRV_NAME "sunxi-wdt"
44#define DRV_VERSION "1.0" 41#define DRV_VERSION "1.0"
@@ -46,15 +43,29 @@
46static bool nowayout = WATCHDOG_NOWAYOUT; 43static bool nowayout = WATCHDOG_NOWAYOUT;
47static unsigned int timeout = WDT_MAX_TIMEOUT; 44static unsigned int timeout = WDT_MAX_TIMEOUT;
48 45
46/*
47 * This structure stores the register offsets for different variants
48 * of Allwinner's watchdog hardware.
49 */
50struct sunxi_wdt_reg {
51 u8 wdt_ctrl;
52 u8 wdt_cfg;
53 u8 wdt_mode;
54 u8 wdt_timeout_shift;
55 u8 wdt_reset_mask;
56 u8 wdt_reset_val;
57};
58
49struct sunxi_wdt_dev { 59struct sunxi_wdt_dev {
50 struct watchdog_device wdt_dev; 60 struct watchdog_device wdt_dev;
51 void __iomem *wdt_base; 61 void __iomem *wdt_base;
62 const struct sunxi_wdt_reg *wdt_regs;
52 struct notifier_block restart_handler; 63 struct notifier_block restart_handler;
53}; 64};
54 65
55/* 66/*
56 * wdt_timeout_map maps the watchdog timer interval value in seconds to 67 * wdt_timeout_map maps the watchdog timer interval value in seconds to
57 * the value of the register WDT_MODE bit 3:6 68 * the value of the register WDT_MODE at bits .wdt_timeout_shift ~ +3
58 * 69 *
59 * [timeout seconds] = register value 70 * [timeout seconds] = register value
60 * 71 *
@@ -82,19 +93,32 @@ static int sunxi_restart_handle(struct notifier_block *this, unsigned long mode,
82 struct sunxi_wdt_dev, 93 struct sunxi_wdt_dev,
83 restart_handler); 94 restart_handler);
84 void __iomem *wdt_base = sunxi_wdt->wdt_base; 95 void __iomem *wdt_base = sunxi_wdt->wdt_base;
96 const struct sunxi_wdt_reg *regs = sunxi_wdt->wdt_regs;
97 u32 val;
98
99 /* Set system reset function */
100 val = readl(wdt_base + regs->wdt_cfg);
101 val &= ~(regs->wdt_reset_mask);
102 val |= regs->wdt_reset_val;
103 writel(val, wdt_base + regs->wdt_cfg);
85 104
86 /* Enable timer and set reset bit in the watchdog */ 105 /* Set lowest timeout and enable watchdog */
87 writel(WDT_MODE_EN | WDT_MODE_RST_EN, wdt_base + WDT_MODE); 106 val = readl(wdt_base + regs->wdt_mode);
107 val &= ~(WDT_TIMEOUT_MASK << regs->wdt_timeout_shift);
108 val |= WDT_MODE_EN;
109 writel(val, wdt_base + regs->wdt_mode);
88 110
89 /* 111 /*
90 * Restart the watchdog. The default (and lowest) interval 112 * Restart the watchdog. The default (and lowest) interval
91 * value for the watchdog is 0.5s. 113 * value for the watchdog is 0.5s.
92 */ 114 */
93 writel(WDT_CTRL_RELOAD, wdt_base + WDT_CTRL); 115 writel(WDT_CTRL_RELOAD, wdt_base + regs->wdt_ctrl);
94 116
95 while (1) { 117 while (1) {
96 mdelay(5); 118 mdelay(5);
97 writel(WDT_MODE_EN | WDT_MODE_RST_EN, wdt_base + WDT_MODE); 119 val = readl(wdt_base + regs->wdt_mode);
120 val |= WDT_MODE_EN;
121 writel(val, wdt_base + regs->wdt_mode);
98 } 122 }
99 return NOTIFY_DONE; 123 return NOTIFY_DONE;
100} 124}
@@ -103,8 +127,9 @@ static int sunxi_wdt_ping(struct watchdog_device *wdt_dev)
103{ 127{
104 struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev); 128 struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev);
105 void __iomem *wdt_base = sunxi_wdt->wdt_base; 129 void __iomem *wdt_base = sunxi_wdt->wdt_base;
130 const struct sunxi_wdt_reg *regs = sunxi_wdt->wdt_regs;
106 131
107 iowrite32(WDT_CTRL_RELOAD, wdt_base + WDT_CTRL); 132 writel(WDT_CTRL_RELOAD, wdt_base + regs->wdt_ctrl);
108 133
109 return 0; 134 return 0;
110} 135}
@@ -114,6 +139,7 @@ static int sunxi_wdt_set_timeout(struct watchdog_device *wdt_dev,
114{ 139{
115 struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev); 140 struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev);
116 void __iomem *wdt_base = sunxi_wdt->wdt_base; 141 void __iomem *wdt_base = sunxi_wdt->wdt_base;
142 const struct sunxi_wdt_reg *regs = sunxi_wdt->wdt_regs;
117 u32 reg; 143 u32 reg;
118 144
119 if (wdt_timeout_map[timeout] == 0) 145 if (wdt_timeout_map[timeout] == 0)
@@ -121,10 +147,10 @@ static int sunxi_wdt_set_timeout(struct watchdog_device *wdt_dev,
121 147
122 sunxi_wdt->wdt_dev.timeout = timeout; 148 sunxi_wdt->wdt_dev.timeout = timeout;
123 149
124 reg = ioread32(wdt_base + WDT_MODE); 150 reg = readl(wdt_base + regs->wdt_mode);
125 reg &= ~WDT_TIMEOUT_MASK; 151 reg &= ~(WDT_TIMEOUT_MASK << regs->wdt_timeout_shift);
126 reg |= WDT_MODE_TIMEOUT(wdt_timeout_map[timeout]); 152 reg |= wdt_timeout_map[timeout] << regs->wdt_timeout_shift;
127 iowrite32(reg, wdt_base + WDT_MODE); 153 writel(reg, wdt_base + regs->wdt_mode);
128 154
129 sunxi_wdt_ping(wdt_dev); 155 sunxi_wdt_ping(wdt_dev);
130 156
@@ -135,8 +161,9 @@ static int sunxi_wdt_stop(struct watchdog_device *wdt_dev)
135{ 161{
136 struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev); 162 struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev);
137 void __iomem *wdt_base = sunxi_wdt->wdt_base; 163 void __iomem *wdt_base = sunxi_wdt->wdt_base;
164 const struct sunxi_wdt_reg *regs = sunxi_wdt->wdt_regs;
138 165
139 iowrite32(0, wdt_base + WDT_MODE); 166 writel(0, wdt_base + regs->wdt_mode);
140 167
141 return 0; 168 return 0;
142} 169}
@@ -146,6 +173,7 @@ static int sunxi_wdt_start(struct watchdog_device *wdt_dev)
146 u32 reg; 173 u32 reg;
147 struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev); 174 struct sunxi_wdt_dev *sunxi_wdt = watchdog_get_drvdata(wdt_dev);
148 void __iomem *wdt_base = sunxi_wdt->wdt_base; 175 void __iomem *wdt_base = sunxi_wdt->wdt_base;
176 const struct sunxi_wdt_reg *regs = sunxi_wdt->wdt_regs;
149 int ret; 177 int ret;
150 178
151 ret = sunxi_wdt_set_timeout(&sunxi_wdt->wdt_dev, 179 ret = sunxi_wdt_set_timeout(&sunxi_wdt->wdt_dev,
@@ -153,9 +181,16 @@ static int sunxi_wdt_start(struct watchdog_device *wdt_dev)
153 if (ret < 0) 181 if (ret < 0)
154 return ret; 182 return ret;
155 183
156 reg = ioread32(wdt_base + WDT_MODE); 184 /* Set system reset function */
157 reg |= (WDT_MODE_RST_EN | WDT_MODE_EN); 185 reg = readl(wdt_base + regs->wdt_cfg);
158 iowrite32(reg, wdt_base + WDT_MODE); 186 reg &= ~(regs->wdt_reset_mask);
187 reg |= ~(regs->wdt_reset_val);
188 writel(reg, wdt_base + regs->wdt_cfg);
189
190 /* Enable watchdog */
191 reg = readl(wdt_base + regs->wdt_mode);
192 reg |= WDT_MODE_EN;
193 writel(reg, wdt_base + regs->wdt_mode);
159 194
160 return 0; 195 return 0;
161} 196}
@@ -175,9 +210,35 @@ static const struct watchdog_ops sunxi_wdt_ops = {
175 .set_timeout = sunxi_wdt_set_timeout, 210 .set_timeout = sunxi_wdt_set_timeout,
176}; 211};
177 212
213static const struct sunxi_wdt_reg sun4i_wdt_reg = {
214 .wdt_ctrl = 0x00,
215 .wdt_cfg = 0x04,
216 .wdt_mode = 0x04,
217 .wdt_timeout_shift = 3,
218 .wdt_reset_mask = 0x02,
219 .wdt_reset_val = 0x02,
220};
221
222static const struct sunxi_wdt_reg sun6i_wdt_reg = {
223 .wdt_ctrl = 0x10,
224 .wdt_cfg = 0x14,
225 .wdt_mode = 0x18,
226 .wdt_timeout_shift = 4,
227 .wdt_reset_mask = 0x03,
228 .wdt_reset_val = 0x01,
229};
230
231static const struct of_device_id sunxi_wdt_dt_ids[] = {
232 { .compatible = "allwinner,sun4i-a10-wdt", .data = &sun4i_wdt_reg },
233 { .compatible = "allwinner,sun6i-a31-wdt", .data = &sun6i_wdt_reg },
234 { /* sentinel */ }
235};
236MODULE_DEVICE_TABLE(of, sunxi_wdt_dt_ids);
237
178static int sunxi_wdt_probe(struct platform_device *pdev) 238static int sunxi_wdt_probe(struct platform_device *pdev)
179{ 239{
180 struct sunxi_wdt_dev *sunxi_wdt; 240 struct sunxi_wdt_dev *sunxi_wdt;
241 const struct of_device_id *device;
181 struct resource *res; 242 struct resource *res;
182 int err; 243 int err;
183 244
@@ -187,6 +248,12 @@ static int sunxi_wdt_probe(struct platform_device *pdev)
187 248
188 platform_set_drvdata(pdev, sunxi_wdt); 249 platform_set_drvdata(pdev, sunxi_wdt);
189 250
251 device = of_match_device(sunxi_wdt_dt_ids, &pdev->dev);
252 if (!device)
253 return -ENODEV;
254
255 sunxi_wdt->wdt_regs = device->data;
256
190 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 257 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
191 sunxi_wdt->wdt_base = devm_ioremap_resource(&pdev->dev, res); 258 sunxi_wdt->wdt_base = devm_ioremap_resource(&pdev->dev, res);
192 if (IS_ERR(sunxi_wdt->wdt_base)) 259 if (IS_ERR(sunxi_wdt->wdt_base))
@@ -242,12 +309,6 @@ static void sunxi_wdt_shutdown(struct platform_device *pdev)
242 sunxi_wdt_stop(&sunxi_wdt->wdt_dev); 309 sunxi_wdt_stop(&sunxi_wdt->wdt_dev);
243} 310}
244 311
245static const struct of_device_id sunxi_wdt_dt_ids[] = {
246 { .compatible = "allwinner,sun4i-a10-wdt" },
247 { /* sentinel */ }
248};
249MODULE_DEVICE_TABLE(of, sunxi_wdt_dt_ids);
250
251static struct platform_driver sunxi_wdt_driver = { 312static struct platform_driver sunxi_wdt_driver = {
252 .probe = sunxi_wdt_probe, 313 .probe = sunxi_wdt_probe,
253 .remove = sunxi_wdt_remove, 314 .remove = sunxi_wdt_remove,
diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c
index afa9d6ef353a..dee9c6cbe6df 100644
--- a/drivers/watchdog/ts72xx_wdt.c
+++ b/drivers/watchdog/ts72xx_wdt.c
@@ -428,11 +428,7 @@ static int ts72xx_wdt_probe(struct platform_device *pdev)
428 428
429static int ts72xx_wdt_remove(struct platform_device *pdev) 429static int ts72xx_wdt_remove(struct platform_device *pdev)
430{ 430{
431 int error; 431 return misc_deregister(&ts72xx_wdt_miscdev);
432
433 error = misc_deregister(&ts72xx_wdt_miscdev);
434
435 return error;
436} 432}
437 433
438static struct platform_driver ts72xx_wdt_driver = { 434static struct platform_driver ts72xx_wdt_driver = {
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index 1e0a317d3dcd..3860d02729dc 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -167,6 +167,9 @@ static struct page *balloon_next_page(struct page *page)
167 167
168static enum bp_state update_schedule(enum bp_state state) 168static enum bp_state update_schedule(enum bp_state state)
169{ 169{
170 if (state == BP_ECANCELED)
171 return BP_ECANCELED;
172
170 if (state == BP_DONE) { 173 if (state == BP_DONE) {
171 balloon_stats.schedule_delay = 1; 174 balloon_stats.schedule_delay = 1;
172 balloon_stats.retry_count = 1; 175 balloon_stats.retry_count = 1;
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index dd9c249ea311..95ee4302ffb8 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -41,24 +41,29 @@ static int xen_add_device(struct device *dev)
41#endif 41#endif
42 42
43 if (pci_seg_supported) { 43 if (pci_seg_supported) {
44 struct physdev_pci_device_add add = { 44 struct {
45 .seg = pci_domain_nr(pci_dev->bus), 45 struct physdev_pci_device_add add;
46 .bus = pci_dev->bus->number, 46 uint32_t pxm;
47 .devfn = pci_dev->devfn 47 } add_ext = {
48 .add.seg = pci_domain_nr(pci_dev->bus),
49 .add.bus = pci_dev->bus->number,
50 .add.devfn = pci_dev->devfn
48 }; 51 };
52 struct physdev_pci_device_add *add = &add_ext.add;
53
49#ifdef CONFIG_ACPI 54#ifdef CONFIG_ACPI
50 acpi_handle handle; 55 acpi_handle handle;
51#endif 56#endif
52 57
53#ifdef CONFIG_PCI_IOV 58#ifdef CONFIG_PCI_IOV
54 if (pci_dev->is_virtfn) { 59 if (pci_dev->is_virtfn) {
55 add.flags = XEN_PCI_DEV_VIRTFN; 60 add->flags = XEN_PCI_DEV_VIRTFN;
56 add.physfn.bus = physfn->bus->number; 61 add->physfn.bus = physfn->bus->number;
57 add.physfn.devfn = physfn->devfn; 62 add->physfn.devfn = physfn->devfn;
58 } else 63 } else
59#endif 64#endif
60 if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn)) 65 if (pci_ari_enabled(pci_dev->bus) && PCI_SLOT(pci_dev->devfn))
61 add.flags = XEN_PCI_DEV_EXTFN; 66 add->flags = XEN_PCI_DEV_EXTFN;
62 67
63#ifdef CONFIG_ACPI 68#ifdef CONFIG_ACPI
64 handle = ACPI_HANDLE(&pci_dev->dev); 69 handle = ACPI_HANDLE(&pci_dev->dev);
@@ -77,8 +82,8 @@ static int xen_add_device(struct device *dev)
77 status = acpi_evaluate_integer(handle, "_PXM", 82 status = acpi_evaluate_integer(handle, "_PXM",
78 NULL, &pxm); 83 NULL, &pxm);
79 if (ACPI_SUCCESS(status)) { 84 if (ACPI_SUCCESS(status)) {
80 add.optarr[0] = pxm; 85 add->optarr[0] = pxm;
81 add.flags |= XEN_PCI_DEV_PXM; 86 add->flags |= XEN_PCI_DEV_PXM;
82 break; 87 break;
83 } 88 }
84 status = acpi_get_parent(handle, &handle); 89 status = acpi_get_parent(handle, &handle);
@@ -86,7 +91,7 @@ static int xen_add_device(struct device *dev)
86 } 91 }
87#endif /* CONFIG_ACPI */ 92#endif /* CONFIG_ACPI */
88 93
89 r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, &add); 94 r = HYPERVISOR_physdev_op(PHYSDEVOP_pci_device_add, add);
90 if (r != -ENOSYS) 95 if (r != -ENOSYS)
91 return r; 96 return r;
92 pci_seg_supported = false; 97 pci_seg_supported = false;
diff --git a/fs/Kconfig b/fs/Kconfig
index db5dc1598716..664991afe0c0 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -67,6 +67,7 @@ source "fs/quota/Kconfig"
67 67
68source "fs/autofs4/Kconfig" 68source "fs/autofs4/Kconfig"
69source "fs/fuse/Kconfig" 69source "fs/fuse/Kconfig"
70source "fs/overlayfs/Kconfig"
70 71
71menu "Caches" 72menu "Caches"
72 73
diff --git a/fs/Makefile b/fs/Makefile
index 90c88529892b..da0bbb456d3f 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -104,6 +104,7 @@ obj-$(CONFIG_QNX6FS_FS) += qnx6/
104obj-$(CONFIG_AUTOFS4_FS) += autofs4/ 104obj-$(CONFIG_AUTOFS4_FS) += autofs4/
105obj-$(CONFIG_ADFS_FS) += adfs/ 105obj-$(CONFIG_ADFS_FS) += adfs/
106obj-$(CONFIG_FUSE_FS) += fuse/ 106obj-$(CONFIG_FUSE_FS) += fuse/
107obj-$(CONFIG_OVERLAY_FS) += overlayfs/
107obj-$(CONFIG_UDF_FS) += udf/ 108obj-$(CONFIG_UDF_FS) += udf/
108obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/ 109obj-$(CONFIG_SUN_OPENPROMFS) += openpromfs/
109obj-$(CONFIG_OMFS_FS) += omfs/ 110obj-$(CONFIG_OMFS_FS) += omfs/
diff --git a/fs/aio.c b/fs/aio.c
index 84a751005f5b..14b93159ef83 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -165,6 +165,15 @@ static struct vfsmount *aio_mnt;
165static const struct file_operations aio_ring_fops; 165static const struct file_operations aio_ring_fops;
166static const struct address_space_operations aio_ctx_aops; 166static const struct address_space_operations aio_ctx_aops;
167 167
168/* Backing dev info for aio fs.
169 * -no dirty page accounting or writeback happens
170 */
171static struct backing_dev_info aio_fs_backing_dev_info = {
172 .name = "aiofs",
173 .state = 0,
174 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_MAP_COPY,
175};
176
168static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages) 177static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
169{ 178{
170 struct qstr this = QSTR_INIT("[aio]", 5); 179 struct qstr this = QSTR_INIT("[aio]", 5);
@@ -176,6 +185,7 @@ static struct file *aio_private_file(struct kioctx *ctx, loff_t nr_pages)
176 185
177 inode->i_mapping->a_ops = &aio_ctx_aops; 186 inode->i_mapping->a_ops = &aio_ctx_aops;
178 inode->i_mapping->private_data = ctx; 187 inode->i_mapping->private_data = ctx;
188 inode->i_mapping->backing_dev_info = &aio_fs_backing_dev_info;
179 inode->i_size = PAGE_SIZE * nr_pages; 189 inode->i_size = PAGE_SIZE * nr_pages;
180 190
181 path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this); 191 path.dentry = d_alloc_pseudo(aio_mnt->mnt_sb, &this);
@@ -220,6 +230,9 @@ static int __init aio_setup(void)
220 if (IS_ERR(aio_mnt)) 230 if (IS_ERR(aio_mnt))
221 panic("Failed to create aio fs mount."); 231 panic("Failed to create aio fs mount.");
222 232
233 if (bdi_init(&aio_fs_backing_dev_info))
234 panic("Failed to init aio fs backing dev info.");
235
223 kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC); 236 kiocb_cachep = KMEM_CACHE(kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
224 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC); 237 kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
225 238
@@ -281,11 +294,6 @@ static const struct file_operations aio_ring_fops = {
281 .mmap = aio_ring_mmap, 294 .mmap = aio_ring_mmap,
282}; 295};
283 296
284static int aio_set_page_dirty(struct page *page)
285{
286 return 0;
287}
288
289#if IS_ENABLED(CONFIG_MIGRATION) 297#if IS_ENABLED(CONFIG_MIGRATION)
290static int aio_migratepage(struct address_space *mapping, struct page *new, 298static int aio_migratepage(struct address_space *mapping, struct page *new,
291 struct page *old, enum migrate_mode mode) 299 struct page *old, enum migrate_mode mode)
@@ -357,7 +365,7 @@ out:
357#endif 365#endif
358 366
359static const struct address_space_operations aio_ctx_aops = { 367static const struct address_space_operations aio_ctx_aops = {
360 .set_page_dirty = aio_set_page_dirty, 368 .set_page_dirty = __set_page_dirty_no_writeback,
361#if IS_ENABLED(CONFIG_MIGRATION) 369#if IS_ENABLED(CONFIG_MIGRATION)
362 .migratepage = aio_migratepage, 370 .migratepage = aio_migratepage,
363#endif 371#endif
@@ -412,7 +420,6 @@ static int aio_setup_ring(struct kioctx *ctx)
412 pr_debug("pid(%d) page[%d]->count=%d\n", 420 pr_debug("pid(%d) page[%d]->count=%d\n",
413 current->pid, i, page_count(page)); 421 current->pid, i, page_count(page));
414 SetPageUptodate(page); 422 SetPageUptodate(page);
415 SetPageDirty(page);
416 unlock_page(page); 423 unlock_page(page);
417 424
418 ctx->ring_pages[i] = page; 425 ctx->ring_pages[i] = page;
diff --git a/fs/block_dev.c b/fs/block_dev.c
index cc9d4114cda0..1d9c9f3754f8 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1585,7 +1585,7 @@ ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from)
1585} 1585}
1586EXPORT_SYMBOL_GPL(blkdev_write_iter); 1586EXPORT_SYMBOL_GPL(blkdev_write_iter);
1587 1587
1588static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) 1588ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
1589{ 1589{
1590 struct file *file = iocb->ki_filp; 1590 struct file *file = iocb->ki_filp;
1591 struct inode *bd_inode = file->f_mapping->host; 1591 struct inode *bd_inode = file->f_mapping->host;
@@ -1599,6 +1599,7 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to)
1599 iov_iter_truncate(to, size); 1599 iov_iter_truncate(to, size);
1600 return generic_file_read_iter(iocb, to); 1600 return generic_file_read_iter(iocb, to);
1601} 1601}
1602EXPORT_SYMBOL_GPL(blkdev_read_iter);
1602 1603
1603/* 1604/*
1604 * Try to release a page associated with block device when the system 1605 * Try to release a page associated with block device when the system
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index d3220d31d3cb..dcd9be32ac57 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -1011,8 +1011,6 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1011 bytes = min(bytes, working_bytes); 1011 bytes = min(bytes, working_bytes);
1012 kaddr = kmap_atomic(page_out); 1012 kaddr = kmap_atomic(page_out);
1013 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes); 1013 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
1014 if (*pg_index == (vcnt - 1) && *pg_offset == 0)
1015 memset(kaddr + bytes, 0, PAGE_CACHE_SIZE - bytes);
1016 kunmap_atomic(kaddr); 1014 kunmap_atomic(kaddr);
1017 flush_dcache_page(page_out); 1015 flush_dcache_page(page_out);
1018 1016
@@ -1054,3 +1052,34 @@ int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1054 1052
1055 return 1; 1053 return 1;
1056} 1054}
1055
1056/*
1057 * When uncompressing data, we need to make sure and zero any parts of
1058 * the biovec that were not filled in by the decompression code. pg_index
1059 * and pg_offset indicate the last page and the last offset of that page
1060 * that have been filled in. This will zero everything remaining in the
1061 * biovec.
1062 */
1063void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
1064 unsigned long pg_index,
1065 unsigned long pg_offset)
1066{
1067 while (pg_index < vcnt) {
1068 struct page *page = bvec[pg_index].bv_page;
1069 unsigned long off = bvec[pg_index].bv_offset;
1070 unsigned long len = bvec[pg_index].bv_len;
1071
1072 if (pg_offset < off)
1073 pg_offset = off;
1074 if (pg_offset < off + len) {
1075 unsigned long bytes = off + len - pg_offset;
1076 char *kaddr;
1077
1078 kaddr = kmap_atomic(page);
1079 memset(kaddr + pg_offset, 0, bytes);
1080 kunmap_atomic(kaddr);
1081 }
1082 pg_index++;
1083 pg_offset = 0;
1084 }
1085}
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 0c803b4fbf93..d181f70caae0 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -45,7 +45,9 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
45 unsigned long nr_pages); 45 unsigned long nr_pages);
46int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio, 46int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
47 int mirror_num, unsigned long bio_flags); 47 int mirror_num, unsigned long bio_flags);
48 48void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
49 unsigned long pg_index,
50 unsigned long pg_offset);
49struct btrfs_compress_op { 51struct btrfs_compress_op {
50 struct list_head *(*alloc_workspace)(void); 52 struct list_head *(*alloc_workspace)(void);
51 53
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 19bc6162fb8e..150822ee0a0b 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -80,13 +80,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
80{ 80{
81 int i; 81 int i;
82 82
83#ifdef CONFIG_DEBUG_LOCK_ALLOC
84 /* lockdep really cares that we take all of these spinlocks
85 * in the right order. If any of the locks in the path are not
86 * currently blocking, it is going to complain. So, make really
87 * really sure by forcing the path to blocking before we clear
88 * the path blocking.
89 */
90 if (held) { 83 if (held) {
91 btrfs_set_lock_blocking_rw(held, held_rw); 84 btrfs_set_lock_blocking_rw(held, held_rw);
92 if (held_rw == BTRFS_WRITE_LOCK) 85 if (held_rw == BTRFS_WRITE_LOCK)
@@ -95,7 +88,6 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
95 held_rw = BTRFS_READ_LOCK_BLOCKING; 88 held_rw = BTRFS_READ_LOCK_BLOCKING;
96 } 89 }
97 btrfs_set_path_blocking(p); 90 btrfs_set_path_blocking(p);
98#endif
99 91
100 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) { 92 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
101 if (p->nodes[i] && p->locks[i]) { 93 if (p->nodes[i] && p->locks[i]) {
@@ -107,10 +99,8 @@ noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
107 } 99 }
108 } 100 }
109 101
110#ifdef CONFIG_DEBUG_LOCK_ALLOC
111 if (held) 102 if (held)
112 btrfs_clear_lock_blocking_rw(held, held_rw); 103 btrfs_clear_lock_blocking_rw(held, held_rw);
113#endif
114} 104}
115 105
116/* this also releases the path */ 106/* this also releases the path */
@@ -2893,7 +2883,7 @@ cow_done:
2893 } 2883 }
2894 p->locks[level] = BTRFS_WRITE_LOCK; 2884 p->locks[level] = BTRFS_WRITE_LOCK;
2895 } else { 2885 } else {
2896 err = btrfs_try_tree_read_lock(b); 2886 err = btrfs_tree_read_lock_atomic(b);
2897 if (!err) { 2887 if (!err) {
2898 btrfs_set_path_blocking(p); 2888 btrfs_set_path_blocking(p);
2899 btrfs_tree_read_lock(b); 2889 btrfs_tree_read_lock(b);
@@ -3025,7 +3015,7 @@ again:
3025 } 3015 }
3026 3016
3027 level = btrfs_header_level(b); 3017 level = btrfs_header_level(b);
3028 err = btrfs_try_tree_read_lock(b); 3018 err = btrfs_tree_read_lock_atomic(b);
3029 if (!err) { 3019 if (!err) {
3030 btrfs_set_path_blocking(p); 3020 btrfs_set_path_blocking(p);
3031 btrfs_tree_read_lock(b); 3021 btrfs_tree_read_lock(b);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index d557264ee974..fe69edda11fb 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -3276,7 +3276,7 @@ int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
3276 struct btrfs_root *root, unsigned long count); 3276 struct btrfs_root *root, unsigned long count);
3277int btrfs_async_run_delayed_refs(struct btrfs_root *root, 3277int btrfs_async_run_delayed_refs(struct btrfs_root *root,
3278 unsigned long count, int wait); 3278 unsigned long count, int wait);
3279int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len); 3279int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len);
3280int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans, 3280int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
3281 struct btrfs_root *root, u64 bytenr, 3281 struct btrfs_root *root, u64 bytenr,
3282 u64 offset, int metadata, u64 *refs, u64 *flags); 3282 u64 offset, int metadata, u64 *refs, u64 *flags);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 1ad0f47ac850..1bf9f897065d 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3817,19 +3817,19 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3817 struct btrfs_super_block *sb = fs_info->super_copy; 3817 struct btrfs_super_block *sb = fs_info->super_copy;
3818 int ret = 0; 3818 int ret = 0;
3819 3819
3820 if (sb->root_level > BTRFS_MAX_LEVEL) { 3820 if (btrfs_super_root_level(sb) >= BTRFS_MAX_LEVEL) {
3821 printk(KERN_ERR "BTRFS: tree_root level too big: %d > %d\n", 3821 printk(KERN_ERR "BTRFS: tree_root level too big: %d >= %d\n",
3822 sb->root_level, BTRFS_MAX_LEVEL); 3822 btrfs_super_root_level(sb), BTRFS_MAX_LEVEL);
3823 ret = -EINVAL; 3823 ret = -EINVAL;
3824 } 3824 }
3825 if (sb->chunk_root_level > BTRFS_MAX_LEVEL) { 3825 if (btrfs_super_chunk_root_level(sb) >= BTRFS_MAX_LEVEL) {
3826 printk(KERN_ERR "BTRFS: chunk_root level too big: %d > %d\n", 3826 printk(KERN_ERR "BTRFS: chunk_root level too big: %d >= %d\n",
3827 sb->chunk_root_level, BTRFS_MAX_LEVEL); 3827 btrfs_super_chunk_root_level(sb), BTRFS_MAX_LEVEL);
3828 ret = -EINVAL; 3828 ret = -EINVAL;
3829 } 3829 }
3830 if (sb->log_root_level > BTRFS_MAX_LEVEL) { 3830 if (btrfs_super_log_root_level(sb) >= BTRFS_MAX_LEVEL) {
3831 printk(KERN_ERR "BTRFS: log_root level too big: %d > %d\n", 3831 printk(KERN_ERR "BTRFS: log_root level too big: %d >= %d\n",
3832 sb->log_root_level, BTRFS_MAX_LEVEL); 3832 btrfs_super_log_root_level(sb), BTRFS_MAX_LEVEL);
3833 ret = -EINVAL; 3833 ret = -EINVAL;
3834 } 3834 }
3835 3835
@@ -3837,15 +3837,15 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3837 * The common minimum, we don't know if we can trust the nodesize/sectorsize 3837 * The common minimum, we don't know if we can trust the nodesize/sectorsize
3838 * items yet, they'll be verified later. Issue just a warning. 3838 * items yet, they'll be verified later. Issue just a warning.
3839 */ 3839 */
3840 if (!IS_ALIGNED(sb->root, 4096)) 3840 if (!IS_ALIGNED(btrfs_super_root(sb), 4096))
3841 printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n", 3841 printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
3842 sb->root); 3842 sb->root);
3843 if (!IS_ALIGNED(sb->chunk_root, 4096)) 3843 if (!IS_ALIGNED(btrfs_super_chunk_root(sb), 4096))
3844 printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n", 3844 printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
3845 sb->chunk_root); 3845 sb->chunk_root);
3846 if (!IS_ALIGNED(sb->log_root, 4096)) 3846 if (!IS_ALIGNED(btrfs_super_log_root(sb), 4096))
3847 printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n", 3847 printk(KERN_WARNING "BTRFS: tree_root block unaligned: %llu\n",
3848 sb->log_root); 3848 btrfs_super_log_root(sb));
3849 3849
3850 if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) { 3850 if (memcmp(fs_info->fsid, sb->dev_item.fsid, BTRFS_UUID_SIZE) != 0) {
3851 printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n", 3851 printk(KERN_ERR "BTRFS: dev_item UUID does not match fsid: %pU != %pU\n",
@@ -3857,13 +3857,13 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3857 * Hint to catch really bogus numbers, bitflips or so, more exact checks are 3857 * Hint to catch really bogus numbers, bitflips or so, more exact checks are
3858 * done later 3858 * done later
3859 */ 3859 */
3860 if (sb->num_devices > (1UL << 31)) 3860 if (btrfs_super_num_devices(sb) > (1UL << 31))
3861 printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n", 3861 printk(KERN_WARNING "BTRFS: suspicious number of devices: %llu\n",
3862 sb->num_devices); 3862 btrfs_super_num_devices(sb));
3863 3863
3864 if (sb->bytenr != BTRFS_SUPER_INFO_OFFSET) { 3864 if (btrfs_super_bytenr(sb) != BTRFS_SUPER_INFO_OFFSET) {
3865 printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n", 3865 printk(KERN_ERR "BTRFS: super offset mismatch %llu != %u\n",
3866 sb->bytenr, BTRFS_SUPER_INFO_OFFSET); 3866 btrfs_super_bytenr(sb), BTRFS_SUPER_INFO_OFFSET);
3867 ret = -EINVAL; 3867 ret = -EINVAL;
3868 } 3868 }
3869 3869
@@ -3871,14 +3871,15 @@ static int btrfs_check_super_valid(struct btrfs_fs_info *fs_info,
3871 * The generation is a global counter, we'll trust it more than the others 3871 * The generation is a global counter, we'll trust it more than the others
3872 * but it's still possible that it's the one that's wrong. 3872 * but it's still possible that it's the one that's wrong.
3873 */ 3873 */
3874 if (sb->generation < sb->chunk_root_generation) 3874 if (btrfs_super_generation(sb) < btrfs_super_chunk_root_generation(sb))
3875 printk(KERN_WARNING 3875 printk(KERN_WARNING
3876 "BTRFS: suspicious: generation < chunk_root_generation: %llu < %llu\n", 3876 "BTRFS: suspicious: generation < chunk_root_generation: %llu < %llu\n",
3877 sb->generation, sb->chunk_root_generation); 3877 btrfs_super_generation(sb), btrfs_super_chunk_root_generation(sb));
3878 if (sb->generation < sb->cache_generation && sb->cache_generation != (u64)-1) 3878 if (btrfs_super_generation(sb) < btrfs_super_cache_generation(sb)
3879 && btrfs_super_cache_generation(sb) != (u64)-1)
3879 printk(KERN_WARNING 3880 printk(KERN_WARNING
3880 "BTRFS: suspicious: generation < cache_generation: %llu < %llu\n", 3881 "BTRFS: suspicious: generation < cache_generation: %llu < %llu\n",
3881 sb->generation, sb->cache_generation); 3882 btrfs_super_generation(sb), btrfs_super_cache_generation(sb));
3882 3883
3883 return ret; 3884 return ret;
3884} 3885}
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index d56589571012..47c1ba141082 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -710,8 +710,8 @@ void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
710 rcu_read_unlock(); 710 rcu_read_unlock();
711} 711}
712 712
713/* simple helper to search for an existing extent at a given offset */ 713/* simple helper to search for an existing data extent at a given offset */
714int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len) 714int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
715{ 715{
716 int ret; 716 int ret;
717 struct btrfs_key key; 717 struct btrfs_key key;
@@ -726,12 +726,6 @@ int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
726 key.type = BTRFS_EXTENT_ITEM_KEY; 726 key.type = BTRFS_EXTENT_ITEM_KEY;
727 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path, 727 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
728 0, 0); 728 0, 0);
729 if (ret > 0) {
730 btrfs_item_key_to_cpu(path->nodes[0], &key, path->slots[0]);
731 if (key.objectid == start &&
732 key.type == BTRFS_METADATA_ITEM_KEY)
733 ret = 0;
734 }
735 btrfs_free_path(path); 729 btrfs_free_path(path);
736 return ret; 730 return ret;
737} 731}
@@ -786,7 +780,6 @@ search_again:
786 else 780 else
787 key.type = BTRFS_EXTENT_ITEM_KEY; 781 key.type = BTRFS_EXTENT_ITEM_KEY;
788 782
789again:
790 ret = btrfs_search_slot(trans, root->fs_info->extent_root, 783 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
791 &key, path, 0, 0); 784 &key, path, 0, 0);
792 if (ret < 0) 785 if (ret < 0)
@@ -802,13 +795,6 @@ again:
802 key.offset == root->nodesize) 795 key.offset == root->nodesize)
803 ret = 0; 796 ret = 0;
804 } 797 }
805 if (ret) {
806 key.objectid = bytenr;
807 key.type = BTRFS_EXTENT_ITEM_KEY;
808 key.offset = root->nodesize;
809 btrfs_release_path(path);
810 goto again;
811 }
812 } 798 }
813 799
814 if (ret == 0) { 800 if (ret == 0) {
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 783a94355efd..84a2d1868271 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -413,7 +413,7 @@ int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
413 ret = 0; 413 ret = 0;
414fail: 414fail:
415 while (ret < 0 && !list_empty(&tmplist)) { 415 while (ret < 0 && !list_empty(&tmplist)) {
416 sums = list_entry(&tmplist, struct btrfs_ordered_sum, list); 416 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
417 list_del(&sums->list); 417 list_del(&sums->list);
418 kfree(sums); 418 kfree(sums);
419 } 419 }
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 8d2b76e29d3b..4399f0c3a4ce 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -765,23 +765,6 @@ out:
765 return ret; 765 return ret;
766} 766}
767 767
768/* copy of check_sticky in fs/namei.c()
769* It's inline, so penalty for filesystems that don't use sticky bit is
770* minimal.
771*/
772static inline int btrfs_check_sticky(struct inode *dir, struct inode *inode)
773{
774 kuid_t fsuid = current_fsuid();
775
776 if (!(dir->i_mode & S_ISVTX))
777 return 0;
778 if (uid_eq(inode->i_uid, fsuid))
779 return 0;
780 if (uid_eq(dir->i_uid, fsuid))
781 return 0;
782 return !capable(CAP_FOWNER);
783}
784
785/* copy of may_delete in fs/namei.c() 768/* copy of may_delete in fs/namei.c()
786 * Check whether we can remove a link victim from directory dir, check 769 * Check whether we can remove a link victim from directory dir, check
787 * whether the type of victim is right. 770 * whether the type of victim is right.
@@ -817,8 +800,7 @@ static int btrfs_may_delete(struct inode *dir, struct dentry *victim, int isdir)
817 return error; 800 return error;
818 if (IS_APPEND(dir)) 801 if (IS_APPEND(dir))
819 return -EPERM; 802 return -EPERM;
820 if (btrfs_check_sticky(dir, victim->d_inode)|| 803 if (check_sticky(dir, victim->d_inode) || IS_APPEND(victim->d_inode) ||
821 IS_APPEND(victim->d_inode)||
822 IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode)) 804 IS_IMMUTABLE(victim->d_inode) || IS_SWAPFILE(victim->d_inode))
823 return -EPERM; 805 return -EPERM;
824 if (isdir) { 806 if (isdir) {
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 5665d2149249..f8229ef1b46d 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -128,6 +128,26 @@ again:
128} 128}
129 129
130/* 130/*
131 * take a spinning read lock.
132 * returns 1 if we get the read lock and 0 if we don't
133 * this won't wait for blocking writers
134 */
135int btrfs_tree_read_lock_atomic(struct extent_buffer *eb)
136{
137 if (atomic_read(&eb->blocking_writers))
138 return 0;
139
140 read_lock(&eb->lock);
141 if (atomic_read(&eb->blocking_writers)) {
142 read_unlock(&eb->lock);
143 return 0;
144 }
145 atomic_inc(&eb->read_locks);
146 atomic_inc(&eb->spinning_readers);
147 return 1;
148}
149
150/*
131 * returns 1 if we get the read lock and 0 if we don't 151 * returns 1 if we get the read lock and 0 if we don't
132 * this won't wait for blocking writers 152 * this won't wait for blocking writers
133 */ 153 */
@@ -158,9 +178,7 @@ int btrfs_try_tree_write_lock(struct extent_buffer *eb)
158 atomic_read(&eb->blocking_readers)) 178 atomic_read(&eb->blocking_readers))
159 return 0; 179 return 0;
160 180
161 if (!write_trylock(&eb->lock)) 181 write_lock(&eb->lock);
162 return 0;
163
164 if (atomic_read(&eb->blocking_writers) || 182 if (atomic_read(&eb->blocking_writers) ||
165 atomic_read(&eb->blocking_readers)) { 183 atomic_read(&eb->blocking_readers)) {
166 write_unlock(&eb->lock); 184 write_unlock(&eb->lock);
diff --git a/fs/btrfs/locking.h b/fs/btrfs/locking.h
index b81e0e9a4894..c44a9d5f5362 100644
--- a/fs/btrfs/locking.h
+++ b/fs/btrfs/locking.h
@@ -35,6 +35,8 @@ void btrfs_clear_lock_blocking_rw(struct extent_buffer *eb, int rw);
35void btrfs_assert_tree_locked(struct extent_buffer *eb); 35void btrfs_assert_tree_locked(struct extent_buffer *eb);
36int btrfs_try_tree_read_lock(struct extent_buffer *eb); 36int btrfs_try_tree_read_lock(struct extent_buffer *eb);
37int btrfs_try_tree_write_lock(struct extent_buffer *eb); 37int btrfs_try_tree_write_lock(struct extent_buffer *eb);
38int btrfs_tree_read_lock_atomic(struct extent_buffer *eb);
39
38 40
39static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw) 41static inline void btrfs_tree_unlock_rw(struct extent_buffer *eb, int rw)
40{ 42{
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 78285f30909e..617553cdb7d3 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -373,6 +373,8 @@ cont:
373 } 373 }
374done: 374done:
375 kunmap(pages_in[page_in_index]); 375 kunmap(pages_in[page_in_index]);
376 if (!ret)
377 btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
376 return ret; 378 return ret;
377} 379}
378 380
@@ -410,10 +412,23 @@ static int lzo_decompress(struct list_head *ws, unsigned char *data_in,
410 goto out; 412 goto out;
411 } 413 }
412 414
415 /*
416 * the caller is already checking against PAGE_SIZE, but lets
417 * move this check closer to the memcpy/memset
418 */
419 destlen = min_t(unsigned long, destlen, PAGE_SIZE);
413 bytes = min_t(unsigned long, destlen, out_len - start_byte); 420 bytes = min_t(unsigned long, destlen, out_len - start_byte);
414 421
415 kaddr = kmap_atomic(dest_page); 422 kaddr = kmap_atomic(dest_page);
416 memcpy(kaddr, workspace->buf + start_byte, bytes); 423 memcpy(kaddr, workspace->buf + start_byte, bytes);
424
425 /*
426 * btrfs_getblock is doing a zero on the tail of the page too,
427 * but this will cover anything missing from the decompressed
428 * data.
429 */
430 if (bytes < destlen)
431 memset(kaddr+bytes, 0, destlen-bytes);
417 kunmap_atomic(kaddr); 432 kunmap_atomic(kaddr);
418out: 433out:
419 return ret; 434 return ret;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index a2b97ef10317..54bd91ece35b 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -2151,6 +2151,7 @@ static void __exit exit_btrfs_fs(void)
2151 extent_map_exit(); 2151 extent_map_exit();
2152 extent_io_exit(); 2152 extent_io_exit();
2153 btrfs_interface_exit(); 2153 btrfs_interface_exit();
2154 btrfs_end_io_wq_exit();
2154 unregister_filesystem(&btrfs_fs_type); 2155 unregister_filesystem(&btrfs_fs_type);
2155 btrfs_exit_sysfs(); 2156 btrfs_exit_sysfs();
2156 btrfs_cleanup_fs_uuids(); 2157 btrfs_cleanup_fs_uuids();
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index 1475979e5718..286213cec861 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -672,7 +672,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans,
672 * is this extent already allocated in the extent 672 * is this extent already allocated in the extent
673 * allocation tree? If so, just add a reference 673 * allocation tree? If so, just add a reference
674 */ 674 */
675 ret = btrfs_lookup_extent(root, ins.objectid, 675 ret = btrfs_lookup_data_extent(root, ins.objectid,
676 ins.offset); 676 ins.offset);
677 if (ret == 0) { 677 if (ret == 0) {
678 ret = btrfs_inc_extent_ref(trans, root, 678 ret = btrfs_inc_extent_ref(trans, root,
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 759fa4e2de8f..fb22fd8d8fb8 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -299,6 +299,8 @@ done:
299 zlib_inflateEnd(&workspace->strm); 299 zlib_inflateEnd(&workspace->strm);
300 if (data_in) 300 if (data_in)
301 kunmap(pages_in[page_in_index]); 301 kunmap(pages_in[page_in_index]);
302 if (!ret)
303 btrfs_clear_biovec_end(bvec, vcnt, page_out_index, pg_offset);
302 return ret; 304 return ret;
303} 305}
304 306
@@ -310,10 +312,14 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
310 struct workspace *workspace = list_entry(ws, struct workspace, list); 312 struct workspace *workspace = list_entry(ws, struct workspace, list);
311 int ret = 0; 313 int ret = 0;
312 int wbits = MAX_WBITS; 314 int wbits = MAX_WBITS;
313 unsigned long bytes_left = destlen; 315 unsigned long bytes_left;
314 unsigned long total_out = 0; 316 unsigned long total_out = 0;
317 unsigned long pg_offset = 0;
315 char *kaddr; 318 char *kaddr;
316 319
320 destlen = min_t(unsigned long, destlen, PAGE_SIZE);
321 bytes_left = destlen;
322
317 workspace->strm.next_in = data_in; 323 workspace->strm.next_in = data_in;
318 workspace->strm.avail_in = srclen; 324 workspace->strm.avail_in = srclen;
319 workspace->strm.total_in = 0; 325 workspace->strm.total_in = 0;
@@ -341,7 +347,6 @@ static int zlib_decompress(struct list_head *ws, unsigned char *data_in,
341 unsigned long buf_start; 347 unsigned long buf_start;
342 unsigned long buf_offset; 348 unsigned long buf_offset;
343 unsigned long bytes; 349 unsigned long bytes;
344 unsigned long pg_offset = 0;
345 350
346 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH); 351 ret = zlib_inflate(&workspace->strm, Z_NO_FLUSH);
347 if (ret != Z_OK && ret != Z_STREAM_END) 352 if (ret != Z_OK && ret != Z_STREAM_END)
@@ -384,6 +389,17 @@ next:
384 ret = 0; 389 ret = 0;
385 390
386 zlib_inflateEnd(&workspace->strm); 391 zlib_inflateEnd(&workspace->strm);
392
393 /*
394 * this should only happen if zlib returned fewer bytes than we
395 * expected. btrfs_get_block is responsible for zeroing from the
396 * end of the inline extent (destlen) to the end of the page
397 */
398 if (pg_offset < destlen) {
399 kaddr = kmap_atomic(dest_page);
400 memset(kaddr + pg_offset, 0, destlen - pg_offset);
401 kunmap_atomic(kaddr);
402 }
387 return ret; 403 return ret;
388} 404}
389 405
diff --git a/fs/buffer.c b/fs/buffer.c
index 9614adc7e754..20805db2c987 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -128,21 +128,15 @@ __clear_page_buffers(struct page *page)
128 page_cache_release(page); 128 page_cache_release(page);
129} 129}
130 130
131 131static void buffer_io_error(struct buffer_head *bh, char *msg)
132static int quiet_error(struct buffer_head *bh)
133{
134 if (!test_bit(BH_Quiet, &bh->b_state) && printk_ratelimit())
135 return 0;
136 return 1;
137}
138
139
140static void buffer_io_error(struct buffer_head *bh)
141{ 132{
142 char b[BDEVNAME_SIZE]; 133 char b[BDEVNAME_SIZE];
143 printk(KERN_ERR "Buffer I/O error on device %s, logical block %Lu\n", 134
135 if (!test_bit(BH_Quiet, &bh->b_state))
136 printk_ratelimited(KERN_ERR
137 "Buffer I/O error on dev %s, logical block %llu%s\n",
144 bdevname(bh->b_bdev, b), 138 bdevname(bh->b_bdev, b),
145 (unsigned long long)bh->b_blocknr); 139 (unsigned long long)bh->b_blocknr, msg);
146} 140}
147 141
148/* 142/*
@@ -177,17 +171,10 @@ EXPORT_SYMBOL(end_buffer_read_sync);
177 171
178void end_buffer_write_sync(struct buffer_head *bh, int uptodate) 172void end_buffer_write_sync(struct buffer_head *bh, int uptodate)
179{ 173{
180 char b[BDEVNAME_SIZE];
181
182 if (uptodate) { 174 if (uptodate) {
183 set_buffer_uptodate(bh); 175 set_buffer_uptodate(bh);
184 } else { 176 } else {
185 if (!quiet_error(bh)) { 177 buffer_io_error(bh, ", lost sync page write");
186 buffer_io_error(bh);
187 printk(KERN_WARNING "lost page write due to "
188 "I/O error on %s\n",
189 bdevname(bh->b_bdev, b));
190 }
191 set_buffer_write_io_error(bh); 178 set_buffer_write_io_error(bh);
192 clear_buffer_uptodate(bh); 179 clear_buffer_uptodate(bh);
193 } 180 }
@@ -304,8 +291,7 @@ static void end_buffer_async_read(struct buffer_head *bh, int uptodate)
304 set_buffer_uptodate(bh); 291 set_buffer_uptodate(bh);
305 } else { 292 } else {
306 clear_buffer_uptodate(bh); 293 clear_buffer_uptodate(bh);
307 if (!quiet_error(bh)) 294 buffer_io_error(bh, ", async page read");
308 buffer_io_error(bh);
309 SetPageError(page); 295 SetPageError(page);
310 } 296 }
311 297
@@ -353,7 +339,6 @@ still_busy:
353 */ 339 */
354void end_buffer_async_write(struct buffer_head *bh, int uptodate) 340void end_buffer_async_write(struct buffer_head *bh, int uptodate)
355{ 341{
356 char b[BDEVNAME_SIZE];
357 unsigned long flags; 342 unsigned long flags;
358 struct buffer_head *first; 343 struct buffer_head *first;
359 struct buffer_head *tmp; 344 struct buffer_head *tmp;
@@ -365,12 +350,7 @@ void end_buffer_async_write(struct buffer_head *bh, int uptodate)
365 if (uptodate) { 350 if (uptodate) {
366 set_buffer_uptodate(bh); 351 set_buffer_uptodate(bh);
367 } else { 352 } else {
368 if (!quiet_error(bh)) { 353 buffer_io_error(bh, ", lost async page write");
369 buffer_io_error(bh);
370 printk(KERN_WARNING "lost page write due to "
371 "I/O error on %s\n",
372 bdevname(bh->b_bdev, b));
373 }
374 set_bit(AS_EIO, &page->mapping->flags); 354 set_bit(AS_EIO, &page->mapping->flags);
375 set_buffer_write_io_error(bh); 355 set_buffer_write_io_error(bh);
376 clear_buffer_uptodate(bh); 356 clear_buffer_uptodate(bh);
@@ -993,7 +973,7 @@ init_page_buffers(struct page *page, struct block_device *bdev,
993 */ 973 */
994static int 974static int
995grow_dev_page(struct block_device *bdev, sector_t block, 975grow_dev_page(struct block_device *bdev, sector_t block,
996 pgoff_t index, int size, int sizebits) 976 pgoff_t index, int size, int sizebits, gfp_t gfp)
997{ 977{
998 struct inode *inode = bdev->bd_inode; 978 struct inode *inode = bdev->bd_inode;
999 struct page *page; 979 struct page *page;
@@ -1002,8 +982,8 @@ grow_dev_page(struct block_device *bdev, sector_t block,
1002 int ret = 0; /* Will call free_more_memory() */ 982 int ret = 0; /* Will call free_more_memory() */
1003 gfp_t gfp_mask; 983 gfp_t gfp_mask;
1004 984
1005 gfp_mask = mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS; 985 gfp_mask = (mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS) | gfp;
1006 gfp_mask |= __GFP_MOVABLE; 986
1007 /* 987 /*
1008 * XXX: __getblk_slow() can not really deal with failure and 988 * XXX: __getblk_slow() can not really deal with failure and
1009 * will endlessly loop on improvised global reclaim. Prefer 989 * will endlessly loop on improvised global reclaim. Prefer
@@ -1060,7 +1040,7 @@ failed:
1060 * that page was dirty, the buffers are set dirty also. 1040 * that page was dirty, the buffers are set dirty also.
1061 */ 1041 */
1062static int 1042static int
1063grow_buffers(struct block_device *bdev, sector_t block, int size) 1043grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp)
1064{ 1044{
1065 pgoff_t index; 1045 pgoff_t index;
1066 int sizebits; 1046 int sizebits;
@@ -1087,11 +1067,12 @@ grow_buffers(struct block_device *bdev, sector_t block, int size)
1087 } 1067 }
1088 1068
1089 /* Create a page with the proper size buffers.. */ 1069 /* Create a page with the proper size buffers.. */
1090 return grow_dev_page(bdev, block, index, size, sizebits); 1070 return grow_dev_page(bdev, block, index, size, sizebits, gfp);
1091} 1071}
1092 1072
1093static struct buffer_head * 1073struct buffer_head *
1094__getblk_slow(struct block_device *bdev, sector_t block, int size) 1074__getblk_slow(struct block_device *bdev, sector_t block,
1075 unsigned size, gfp_t gfp)
1095{ 1076{
1096 /* Size must be multiple of hard sectorsize */ 1077 /* Size must be multiple of hard sectorsize */
1097 if (unlikely(size & (bdev_logical_block_size(bdev)-1) || 1078 if (unlikely(size & (bdev_logical_block_size(bdev)-1) ||
@@ -1113,13 +1094,14 @@ __getblk_slow(struct block_device *bdev, sector_t block, int size)
1113 if (bh) 1094 if (bh)
1114 return bh; 1095 return bh;
1115 1096
1116 ret = grow_buffers(bdev, block, size); 1097 ret = grow_buffers(bdev, block, size, gfp);
1117 if (ret < 0) 1098 if (ret < 0)
1118 return NULL; 1099 return NULL;
1119 if (ret == 0) 1100 if (ret == 0)
1120 free_more_memory(); 1101 free_more_memory();
1121 } 1102 }
1122} 1103}
1104EXPORT_SYMBOL(__getblk_slow);
1123 1105
1124/* 1106/*
1125 * The relationship between dirty buffers and dirty pages: 1107 * The relationship between dirty buffers and dirty pages:
@@ -1373,24 +1355,25 @@ __find_get_block(struct block_device *bdev, sector_t block, unsigned size)
1373EXPORT_SYMBOL(__find_get_block); 1355EXPORT_SYMBOL(__find_get_block);
1374 1356
1375/* 1357/*
1376 * __getblk will locate (and, if necessary, create) the buffer_head 1358 * __getblk_gfp() will locate (and, if necessary, create) the buffer_head
1377 * which corresponds to the passed block_device, block and size. The 1359 * which corresponds to the passed block_device, block and size. The
1378 * returned buffer has its reference count incremented. 1360 * returned buffer has its reference count incremented.
1379 * 1361 *
1380 * __getblk() will lock up the machine if grow_dev_page's try_to_free_buffers() 1362 * __getblk_gfp() will lock up the machine if grow_dev_page's
1381 * attempt is failing. FIXME, perhaps? 1363 * try_to_free_buffers() attempt is failing. FIXME, perhaps?
1382 */ 1364 */
1383struct buffer_head * 1365struct buffer_head *
1384__getblk(struct block_device *bdev, sector_t block, unsigned size) 1366__getblk_gfp(struct block_device *bdev, sector_t block,
1367 unsigned size, gfp_t gfp)
1385{ 1368{
1386 struct buffer_head *bh = __find_get_block(bdev, block, size); 1369 struct buffer_head *bh = __find_get_block(bdev, block, size);
1387 1370
1388 might_sleep(); 1371 might_sleep();
1389 if (bh == NULL) 1372 if (bh == NULL)
1390 bh = __getblk_slow(bdev, block, size); 1373 bh = __getblk_slow(bdev, block, size, gfp);
1391 return bh; 1374 return bh;
1392} 1375}
1393EXPORT_SYMBOL(__getblk); 1376EXPORT_SYMBOL(__getblk_gfp);
1394 1377
1395/* 1378/*
1396 * Do async read-ahead on a buffer.. 1379 * Do async read-ahead on a buffer..
@@ -1406,24 +1389,28 @@ void __breadahead(struct block_device *bdev, sector_t block, unsigned size)
1406EXPORT_SYMBOL(__breadahead); 1389EXPORT_SYMBOL(__breadahead);
1407 1390
1408/** 1391/**
1409 * __bread() - reads a specified block and returns the bh 1392 * __bread_gfp() - reads a specified block and returns the bh
1410 * @bdev: the block_device to read from 1393 * @bdev: the block_device to read from
1411 * @block: number of block 1394 * @block: number of block
1412 * @size: size (in bytes) to read 1395 * @size: size (in bytes) to read
1413 * 1396 * @gfp: page allocation flag
1397 *
1414 * Reads a specified block, and returns buffer head that contains it. 1398 * Reads a specified block, and returns buffer head that contains it.
1399 * The page cache can be allocated from non-movable area
1400 * not to prevent page migration if you set gfp to zero.
1415 * It returns NULL if the block was unreadable. 1401 * It returns NULL if the block was unreadable.
1416 */ 1402 */
1417struct buffer_head * 1403struct buffer_head *
1418__bread(struct block_device *bdev, sector_t block, unsigned size) 1404__bread_gfp(struct block_device *bdev, sector_t block,
1405 unsigned size, gfp_t gfp)
1419{ 1406{
1420 struct buffer_head *bh = __getblk(bdev, block, size); 1407 struct buffer_head *bh = __getblk_gfp(bdev, block, size, gfp);
1421 1408
1422 if (likely(bh) && !buffer_uptodate(bh)) 1409 if (likely(bh) && !buffer_uptodate(bh))
1423 bh = __bread_slow(bh); 1410 bh = __bread_slow(bh);
1424 return bh; 1411 return bh;
1425} 1412}
1426EXPORT_SYMBOL(__bread); 1413EXPORT_SYMBOL(__bread_gfp);
1427 1414
1428/* 1415/*
1429 * invalidate_bh_lrus() is called rarely - but not only at unmount. 1416 * invalidate_bh_lrus() is called rarely - but not only at unmount.
@@ -2082,6 +2069,7 @@ int generic_write_end(struct file *file, struct address_space *mapping,
2082 struct page *page, void *fsdata) 2069 struct page *page, void *fsdata)
2083{ 2070{
2084 struct inode *inode = mapping->host; 2071 struct inode *inode = mapping->host;
2072 loff_t old_size = inode->i_size;
2085 int i_size_changed = 0; 2073 int i_size_changed = 0;
2086 2074
2087 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata); 2075 copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
@@ -2101,6 +2089,8 @@ int generic_write_end(struct file *file, struct address_space *mapping,
2101 unlock_page(page); 2089 unlock_page(page);
2102 page_cache_release(page); 2090 page_cache_release(page);
2103 2091
2092 if (old_size < pos)
2093 pagecache_isize_extended(inode, old_size, pos);
2104 /* 2094 /*
2105 * Don't mark the inode dirty under page lock. First, it unnecessarily 2095 * Don't mark the inode dirty under page lock. First, it unnecessarily
2106 * makes the holding time of page lock longer. Second, it forces lock 2096 * makes the holding time of page lock longer. Second, it forces lock
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 659f2ea9e6f7..cefca661464b 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -2638,7 +2638,7 @@ static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
2638 2638
2639 for (i = 0; i < CEPH_CAP_BITS; i++) 2639 for (i = 0; i < CEPH_CAP_BITS; i++)
2640 if ((dirty & (1 << i)) && 2640 if ((dirty & (1 << i)) &&
2641 flush_tid == ci->i_cap_flush_tid[i]) 2641 (u16)flush_tid == ci->i_cap_flush_tid[i])
2642 cleaned |= 1 << i; 2642 cleaned |= 1 << i;
2643 2643
2644 dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s," 2644 dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
diff --git a/fs/dcache.c b/fs/dcache.c
index d5a23fd0da90..5bc72b07fde2 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -778,6 +778,7 @@ restart:
778 struct dentry *parent = lock_parent(dentry); 778 struct dentry *parent = lock_parent(dentry);
779 if (likely(!dentry->d_lockref.count)) { 779 if (likely(!dentry->d_lockref.count)) {
780 __dentry_kill(dentry); 780 __dentry_kill(dentry);
781 dput(parent);
781 goto restart; 782 goto restart;
782 } 783 }
783 if (parent) 784 if (parent)
@@ -2673,11 +2674,13 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
2673 if (!IS_ROOT(new)) { 2674 if (!IS_ROOT(new)) {
2674 spin_unlock(&inode->i_lock); 2675 spin_unlock(&inode->i_lock);
2675 dput(new); 2676 dput(new);
2677 iput(inode);
2676 return ERR_PTR(-EIO); 2678 return ERR_PTR(-EIO);
2677 } 2679 }
2678 if (d_ancestor(new, dentry)) { 2680 if (d_ancestor(new, dentry)) {
2679 spin_unlock(&inode->i_lock); 2681 spin_unlock(&inode->i_lock);
2680 dput(new); 2682 dput(new);
2683 iput(inode);
2681 return ERR_PTR(-EIO); 2684 return ERR_PTR(-EIO);
2682 } 2685 }
2683 write_seqlock(&rename_lock); 2686 write_seqlock(&rename_lock);
diff --git a/fs/ecryptfs/main.c b/fs/ecryptfs/main.c
index 1b119d3bf924..c4cd1fd86cc2 100644
--- a/fs/ecryptfs/main.c
+++ b/fs/ecryptfs/main.c
@@ -566,6 +566,13 @@ static struct dentry *ecryptfs_mount(struct file_system_type *fs_type, int flags
566 s->s_maxbytes = path.dentry->d_sb->s_maxbytes; 566 s->s_maxbytes = path.dentry->d_sb->s_maxbytes;
567 s->s_blocksize = path.dentry->d_sb->s_blocksize; 567 s->s_blocksize = path.dentry->d_sb->s_blocksize;
568 s->s_magic = ECRYPTFS_SUPER_MAGIC; 568 s->s_magic = ECRYPTFS_SUPER_MAGIC;
569 s->s_stack_depth = path.dentry->d_sb->s_stack_depth + 1;
570
571 rc = -EINVAL;
572 if (s->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
573 pr_err("eCryptfs: maximum fs stacking depth exceeded\n");
574 goto out_free;
575 }
569 576
570 inode = ecryptfs_get_inode(path.dentry->d_inode, s); 577 inode = ecryptfs_get_inode(path.dentry->d_inode, s);
571 rc = PTR_ERR(inode); 578 rc = PTR_ERR(inode);
diff --git a/fs/exofs/Kbuild b/fs/exofs/Kbuild
index 389ba8312d5d..b47c7b8dc275 100644
--- a/fs/exofs/Kbuild
+++ b/fs/exofs/Kbuild
@@ -4,7 +4,7 @@
4# Copyright (C) 2008 Panasas Inc. All rights reserved. 4# Copyright (C) 2008 Panasas Inc. All rights reserved.
5# 5#
6# Authors: 6# Authors:
7# Boaz Harrosh <bharrosh@panasas.com> 7# Boaz Harrosh <ooo@electrozaur.com>
8# 8#
9# This program is free software; you can redistribute it and/or modify 9# This program is free software; you can redistribute it and/or modify
10# it under the terms of the GNU General Public License version 2 10# it under the terms of the GNU General Public License version 2
diff --git a/fs/exofs/common.h b/fs/exofs/common.h
index 3bbd46956d77..7d88ef566213 100644
--- a/fs/exofs/common.h
+++ b/fs/exofs/common.h
@@ -4,7 +4,7 @@
4 * Copyright (C) 2005, 2006 4 * Copyright (C) 2005, 2006
5 * Avishay Traeger (avishay@gmail.com) 5 * Avishay Traeger (avishay@gmail.com)
6 * Copyright (C) 2008, 2009 6 * Copyright (C) 2008, 2009
7 * Boaz Harrosh <bharrosh@panasas.com> 7 * Boaz Harrosh <ooo@electrozaur.com>
8 * 8 *
9 * Copyrights for code taken from ext2: 9 * Copyrights for code taken from ext2:
10 * Copyright (C) 1992, 1993, 1994, 1995 10 * Copyright (C) 1992, 1993, 1994, 1995
diff --git a/fs/exofs/dir.c b/fs/exofs/dir.c
index 49f51ab4caac..d7defd557601 100644
--- a/fs/exofs/dir.c
+++ b/fs/exofs/dir.c
@@ -2,7 +2,7 @@
2 * Copyright (C) 2005, 2006 2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com) 3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2008, 2009 4 * Copyright (C) 2008, 2009
5 * Boaz Harrosh <bharrosh@panasas.com> 5 * Boaz Harrosh <ooo@electrozaur.com>
6 * 6 *
7 * Copyrights for code taken from ext2: 7 * Copyrights for code taken from ext2:
8 * Copyright (C) 1992, 1993, 1994, 1995 8 * Copyright (C) 1992, 1993, 1994, 1995
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h
index fffe86fd7a42..ad9cac670a47 100644
--- a/fs/exofs/exofs.h
+++ b/fs/exofs/exofs.h
@@ -2,7 +2,7 @@
2 * Copyright (C) 2005, 2006 2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com) 3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2008, 2009 4 * Copyright (C) 2008, 2009
5 * Boaz Harrosh <bharrosh@panasas.com> 5 * Boaz Harrosh <ooo@electrozaur.com>
6 * 6 *
7 * Copyrights for code taken from ext2: 7 * Copyrights for code taken from ext2:
8 * Copyright (C) 1992, 1993, 1994, 1995 8 * Copyright (C) 1992, 1993, 1994, 1995
diff --git a/fs/exofs/file.c b/fs/exofs/file.c
index 71bf8e4fb5d4..1a376b42d305 100644
--- a/fs/exofs/file.c
+++ b/fs/exofs/file.c
@@ -2,7 +2,7 @@
2 * Copyright (C) 2005, 2006 2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com) 3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2008, 2009 4 * Copyright (C) 2008, 2009
5 * Boaz Harrosh <bharrosh@panasas.com> 5 * Boaz Harrosh <ooo@electrozaur.com>
6 * 6 *
7 * Copyrights for code taken from ext2: 7 * Copyrights for code taken from ext2:
8 * Copyright (C) 1992, 1993, 1994, 1995 8 * Copyright (C) 1992, 1993, 1994, 1995
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index 3f9cafd73931..f1d3d4eb8c4f 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -2,7 +2,7 @@
2 * Copyright (C) 2005, 2006 2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com) 3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2008, 2009 4 * Copyright (C) 2008, 2009
5 * Boaz Harrosh <bharrosh@panasas.com> 5 * Boaz Harrosh <ooo@electrozaur.com>
6 * 6 *
7 * Copyrights for code taken from ext2: 7 * Copyrights for code taken from ext2:
8 * Copyright (C) 1992, 1993, 1994, 1995 8 * Copyright (C) 1992, 1993, 1994, 1995
diff --git a/fs/exofs/namei.c b/fs/exofs/namei.c
index 4731fd991efe..28907460e8fa 100644
--- a/fs/exofs/namei.c
+++ b/fs/exofs/namei.c
@@ -2,7 +2,7 @@
2 * Copyright (C) 2005, 2006 2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com) 3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2008, 2009 4 * Copyright (C) 2008, 2009
5 * Boaz Harrosh <bharrosh@panasas.com> 5 * Boaz Harrosh <ooo@electrozaur.com>
6 * 6 *
7 * Copyrights for code taken from ext2: 7 * Copyrights for code taken from ext2:
8 * Copyright (C) 1992, 1993, 1994, 1995 8 * Copyright (C) 1992, 1993, 1994, 1995
diff --git a/fs/exofs/ore.c b/fs/exofs/ore.c
index cfc0205d62c4..7bd8ac8dfb28 100644
--- a/fs/exofs/ore.c
+++ b/fs/exofs/ore.c
@@ -2,7 +2,7 @@
2 * Copyright (C) 2005, 2006 2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com) 3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2008, 2009 4 * Copyright (C) 2008, 2009
5 * Boaz Harrosh <bharrosh@panasas.com> 5 * Boaz Harrosh <ooo@electrozaur.com>
6 * 6 *
7 * This file is part of exofs. 7 * This file is part of exofs.
8 * 8 *
@@ -29,7 +29,7 @@
29 29
30#include "ore_raid.h" 30#include "ore_raid.h"
31 31
32MODULE_AUTHOR("Boaz Harrosh <bharrosh@panasas.com>"); 32MODULE_AUTHOR("Boaz Harrosh <ooo@electrozaur.com>");
33MODULE_DESCRIPTION("Objects Raid Engine ore.ko"); 33MODULE_DESCRIPTION("Objects Raid Engine ore.ko");
34MODULE_LICENSE("GPL"); 34MODULE_LICENSE("GPL");
35 35
diff --git a/fs/exofs/ore_raid.c b/fs/exofs/ore_raid.c
index 84529b8a331b..27cbdb697649 100644
--- a/fs/exofs/ore_raid.c
+++ b/fs/exofs/ore_raid.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2011 2 * Copyright (C) 2011
3 * Boaz Harrosh <bharrosh@panasas.com> 3 * Boaz Harrosh <ooo@electrozaur.com>
4 * 4 *
5 * This file is part of the objects raid engine (ore). 5 * This file is part of the objects raid engine (ore).
6 * 6 *
diff --git a/fs/exofs/ore_raid.h b/fs/exofs/ore_raid.h
index cf6375d82129..a6e746775570 100644
--- a/fs/exofs/ore_raid.h
+++ b/fs/exofs/ore_raid.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) from 2011 2 * Copyright (C) from 2011
3 * Boaz Harrosh <bharrosh@panasas.com> 3 * Boaz Harrosh <ooo@electrozaur.com>
4 * 4 *
5 * This file is part of the objects raid engine (ore). 5 * This file is part of the objects raid engine (ore).
6 * 6 *
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index ed73ed8ebbee..95965503afcb 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -2,7 +2,7 @@
2 * Copyright (C) 2005, 2006 2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com) 3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2008, 2009 4 * Copyright (C) 2008, 2009
5 * Boaz Harrosh <bharrosh@panasas.com> 5 * Boaz Harrosh <ooo@electrozaur.com>
6 * 6 *
7 * Copyrights for code taken from ext2: 7 * Copyrights for code taken from ext2:
8 * Copyright (C) 1992, 1993, 1994, 1995 8 * Copyright (C) 1992, 1993, 1994, 1995
diff --git a/fs/exofs/symlink.c b/fs/exofs/symlink.c
index 4dd687c3e747..832e2624b80b 100644
--- a/fs/exofs/symlink.c
+++ b/fs/exofs/symlink.c
@@ -2,7 +2,7 @@
2 * Copyright (C) 2005, 2006 2 * Copyright (C) 2005, 2006
3 * Avishay Traeger (avishay@gmail.com) 3 * Avishay Traeger (avishay@gmail.com)
4 * Copyright (C) 2008, 2009 4 * Copyright (C) 2008, 2009
5 * Boaz Harrosh <bharrosh@panasas.com> 5 * Boaz Harrosh <ooo@electrozaur.com>
6 * 6 *
7 * Copyrights for code taken from ext2: 7 * Copyrights for code taken from ext2:
8 * Copyright (C) 1992, 1993, 1994, 1995 8 * Copyright (C) 1992, 1993, 1994, 1995
diff --git a/fs/exofs/sys.c b/fs/exofs/sys.c
index 1b4f2f95fc37..5e6a2c0a1f0b 100644
--- a/fs/exofs/sys.c
+++ b/fs/exofs/sys.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * Copyright (C) 2012 2 * Copyright (C) 2012
3 * Sachin Bhamare <sbhamare@panasas.com> 3 * Sachin Bhamare <sbhamare@panasas.com>
4 * Boaz Harrosh <bharrosh@panasas.com> 4 * Boaz Harrosh <ooo@electrozaur.com>
5 * 5 *
6 * This file is part of exofs. 6 * This file is part of exofs.
7 * 7 *
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 7015db0bafd1..eb742d0e67ff 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -1354,13 +1354,6 @@ set_qf_format:
1354 "not specified."); 1354 "not specified.");
1355 return 0; 1355 return 0;
1356 } 1356 }
1357 } else {
1358 if (sbi->s_jquota_fmt) {
1359 ext3_msg(sb, KERN_ERR, "error: journaled quota format "
1360 "specified with no journaling "
1361 "enabled.");
1362 return 0;
1363 }
1364 } 1357 }
1365#endif 1358#endif
1366 return 1; 1359 return 1;
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 581ef40fbe90..83a6f497c4e0 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -176,7 +176,7 @@ static unsigned int num_clusters_in_group(struct super_block *sb,
176} 176}
177 177
178/* Initializes an uninitialized block bitmap */ 178/* Initializes an uninitialized block bitmap */
179static void ext4_init_block_bitmap(struct super_block *sb, 179static int ext4_init_block_bitmap(struct super_block *sb,
180 struct buffer_head *bh, 180 struct buffer_head *bh,
181 ext4_group_t block_group, 181 ext4_group_t block_group,
182 struct ext4_group_desc *gdp) 182 struct ext4_group_desc *gdp)
@@ -192,7 +192,6 @@ static void ext4_init_block_bitmap(struct super_block *sb,
192 /* If checksum is bad mark all blocks used to prevent allocation 192 /* If checksum is bad mark all blocks used to prevent allocation
193 * essentially implementing a per-group read-only flag. */ 193 * essentially implementing a per-group read-only flag. */
194 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { 194 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
195 ext4_error(sb, "Checksum bad for group %u", block_group);
196 grp = ext4_get_group_info(sb, block_group); 195 grp = ext4_get_group_info(sb, block_group);
197 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp)) 196 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
198 percpu_counter_sub(&sbi->s_freeclusters_counter, 197 percpu_counter_sub(&sbi->s_freeclusters_counter,
@@ -205,7 +204,7 @@ static void ext4_init_block_bitmap(struct super_block *sb,
205 count); 204 count);
206 } 205 }
207 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state); 206 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
208 return; 207 return -EIO;
209 } 208 }
210 memset(bh->b_data, 0, sb->s_blocksize); 209 memset(bh->b_data, 0, sb->s_blocksize);
211 210
@@ -243,6 +242,7 @@ static void ext4_init_block_bitmap(struct super_block *sb,
243 sb->s_blocksize * 8, bh->b_data); 242 sb->s_blocksize * 8, bh->b_data);
244 ext4_block_bitmap_csum_set(sb, block_group, gdp, bh); 243 ext4_block_bitmap_csum_set(sb, block_group, gdp, bh);
245 ext4_group_desc_csum_set(sb, block_group, gdp); 244 ext4_group_desc_csum_set(sb, block_group, gdp);
245 return 0;
246} 246}
247 247
248/* Return the number of free blocks in a block group. It is used when 248/* Return the number of free blocks in a block group. It is used when
@@ -438,11 +438,15 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
438 } 438 }
439 ext4_lock_group(sb, block_group); 439 ext4_lock_group(sb, block_group);
440 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 440 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
441 ext4_init_block_bitmap(sb, bh, block_group, desc); 441 int err;
442
443 err = ext4_init_block_bitmap(sb, bh, block_group, desc);
442 set_bitmap_uptodate(bh); 444 set_bitmap_uptodate(bh);
443 set_buffer_uptodate(bh); 445 set_buffer_uptodate(bh);
444 ext4_unlock_group(sb, block_group); 446 ext4_unlock_group(sb, block_group);
445 unlock_buffer(bh); 447 unlock_buffer(bh);
448 if (err)
449 ext4_error(sb, "Checksum bad for grp %u", block_group);
446 return bh; 450 return bh;
447 } 451 }
448 ext4_unlock_group(sb, block_group); 452 ext4_unlock_group(sb, block_group);
@@ -636,8 +640,7 @@ ext4_fsblk_t ext4_new_meta_blocks(handle_t *handle, struct inode *inode,
636 * Account for the allocated meta blocks. We will never 640 * Account for the allocated meta blocks. We will never
637 * fail EDQUOT for metdata, but we do account for it. 641 * fail EDQUOT for metdata, but we do account for it.
638 */ 642 */
639 if (!(*errp) && 643 if (!(*errp) && (flags & EXT4_MB_DELALLOC_RESERVED)) {
640 ext4_test_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED)) {
641 spin_lock(&EXT4_I(inode)->i_block_reservation_lock); 644 spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
642 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock); 645 spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
643 dquot_alloc_block_nofail(inode, 646 dquot_alloc_block_nofail(inode,
diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
index 3285aa5a706a..b610779a958c 100644
--- a/fs/ext4/bitmap.c
+++ b/fs/ext4/bitmap.c
@@ -24,8 +24,7 @@ int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
24 __u32 provided, calculated; 24 __u32 provided, calculated;
25 struct ext4_sb_info *sbi = EXT4_SB(sb); 25 struct ext4_sb_info *sbi = EXT4_SB(sb);
26 26
27 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 27 if (!ext4_has_metadata_csum(sb))
28 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
29 return 1; 28 return 1;
30 29
31 provided = le16_to_cpu(gdp->bg_inode_bitmap_csum_lo); 30 provided = le16_to_cpu(gdp->bg_inode_bitmap_csum_lo);
@@ -46,8 +45,7 @@ void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
46 __u32 csum; 45 __u32 csum;
47 struct ext4_sb_info *sbi = EXT4_SB(sb); 46 struct ext4_sb_info *sbi = EXT4_SB(sb);
48 47
49 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 48 if (!ext4_has_metadata_csum(sb))
50 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
51 return; 49 return;
52 50
53 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); 51 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
@@ -65,8 +63,7 @@ int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
65 struct ext4_sb_info *sbi = EXT4_SB(sb); 63 struct ext4_sb_info *sbi = EXT4_SB(sb);
66 int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8; 64 int sz = EXT4_CLUSTERS_PER_GROUP(sb) / 8;
67 65
68 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 66 if (!ext4_has_metadata_csum(sb))
69 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
70 return 1; 67 return 1;
71 68
72 provided = le16_to_cpu(gdp->bg_block_bitmap_csum_lo); 69 provided = le16_to_cpu(gdp->bg_block_bitmap_csum_lo);
@@ -91,8 +88,7 @@ void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
91 __u32 csum; 88 __u32 csum;
92 struct ext4_sb_info *sbi = EXT4_SB(sb); 89 struct ext4_sb_info *sbi = EXT4_SB(sb);
93 90
94 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 91 if (!ext4_has_metadata_csum(sb))
95 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
96 return; 92 return;
97 93
98 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz); 94 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 0bb3f9ea0832..c24143ea9c08 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -151,13 +151,11 @@ static int ext4_readdir(struct file *file, struct dir_context *ctx)
151 &file->f_ra, file, 151 &file->f_ra, file,
152 index, 1); 152 index, 1);
153 file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT; 153 file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
154 bh = ext4_bread(NULL, inode, map.m_lblk, 0, &err); 154 bh = ext4_bread(NULL, inode, map.m_lblk, 0);
155 if (IS_ERR(bh))
156 return PTR_ERR(bh);
155 } 157 }
156 158
157 /*
158 * We ignore I/O errors on directories so users have a chance
159 * of recovering data when there's a bad sector
160 */
161 if (!bh) { 159 if (!bh) {
162 if (!dir_has_error) { 160 if (!dir_has_error) {
163 EXT4_ERROR_FILE(file, 0, 161 EXT4_ERROR_FILE(file, 0,
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index b0c225cdb52c..c55a1faaed58 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -572,15 +572,15 @@ enum {
572 572
573/* 573/*
574 * The bit position of these flags must not overlap with any of the 574 * The bit position of these flags must not overlap with any of the
575 * EXT4_GET_BLOCKS_*. They are used by ext4_ext_find_extent(), 575 * EXT4_GET_BLOCKS_*. They are used by ext4_find_extent(),
576 * read_extent_tree_block(), ext4_split_extent_at(), 576 * read_extent_tree_block(), ext4_split_extent_at(),
577 * ext4_ext_insert_extent(), and ext4_ext_create_new_leaf(). 577 * ext4_ext_insert_extent(), and ext4_ext_create_new_leaf().
578 * EXT4_EX_NOCACHE is used to indicate that the we shouldn't be 578 * EXT4_EX_NOCACHE is used to indicate that the we shouldn't be
579 * caching the extents when reading from the extent tree while a 579 * caching the extents when reading from the extent tree while a
580 * truncate or punch hole operation is in progress. 580 * truncate or punch hole operation is in progress.
581 */ 581 */
582#define EXT4_EX_NOCACHE 0x0400 582#define EXT4_EX_NOCACHE 0x40000000
583#define EXT4_EX_FORCE_CACHE 0x0800 583#define EXT4_EX_FORCE_CACHE 0x20000000
584 584
585/* 585/*
586 * Flags used by ext4_free_blocks 586 * Flags used by ext4_free_blocks
@@ -890,6 +890,7 @@ struct ext4_inode_info {
890 struct ext4_es_tree i_es_tree; 890 struct ext4_es_tree i_es_tree;
891 rwlock_t i_es_lock; 891 rwlock_t i_es_lock;
892 struct list_head i_es_lru; 892 struct list_head i_es_lru;
893 unsigned int i_es_all_nr; /* protected by i_es_lock */
893 unsigned int i_es_lru_nr; /* protected by i_es_lock */ 894 unsigned int i_es_lru_nr; /* protected by i_es_lock */
894 unsigned long i_touch_when; /* jiffies of last accessing */ 895 unsigned long i_touch_when; /* jiffies of last accessing */
895 896
@@ -1174,6 +1175,9 @@ struct ext4_super_block {
1174#define EXT4_MF_MNTDIR_SAMPLED 0x0001 1175#define EXT4_MF_MNTDIR_SAMPLED 0x0001
1175#define EXT4_MF_FS_ABORTED 0x0002 /* Fatal error detected */ 1176#define EXT4_MF_FS_ABORTED 0x0002 /* Fatal error detected */
1176 1177
1178/* Number of quota types we support */
1179#define EXT4_MAXQUOTAS 2
1180
1177/* 1181/*
1178 * fourth extended-fs super-block data in memory 1182 * fourth extended-fs super-block data in memory
1179 */ 1183 */
@@ -1237,7 +1241,7 @@ struct ext4_sb_info {
1237 u32 s_min_batch_time; 1241 u32 s_min_batch_time;
1238 struct block_device *journal_bdev; 1242 struct block_device *journal_bdev;
1239#ifdef CONFIG_QUOTA 1243#ifdef CONFIG_QUOTA
1240 char *s_qf_names[MAXQUOTAS]; /* Names of quota files with journalled quota */ 1244 char *s_qf_names[EXT4_MAXQUOTAS]; /* Names of quota files with journalled quota */
1241 int s_jquota_fmt; /* Format of quota to use */ 1245 int s_jquota_fmt; /* Format of quota to use */
1242#endif 1246#endif
1243 unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */ 1247 unsigned int s_want_extra_isize; /* New inodes should reserve # bytes */
@@ -1330,8 +1334,7 @@ struct ext4_sb_info {
1330 /* Reclaim extents from extent status tree */ 1334 /* Reclaim extents from extent status tree */
1331 struct shrinker s_es_shrinker; 1335 struct shrinker s_es_shrinker;
1332 struct list_head s_es_lru; 1336 struct list_head s_es_lru;
1333 unsigned long s_es_last_sorted; 1337 struct ext4_es_stats s_es_stats;
1334 struct percpu_counter s_extent_cache_cnt;
1335 struct mb_cache *s_mb_cache; 1338 struct mb_cache *s_mb_cache;
1336 spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp; 1339 spinlock_t s_es_lru_lock ____cacheline_aligned_in_smp;
1337 1340
@@ -1399,7 +1402,6 @@ enum {
1399 EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */ 1402 EXT4_STATE_EXT_MIGRATE, /* Inode is migrating */
1400 EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/ 1403 EXT4_STATE_DIO_UNWRITTEN, /* need convert on dio done*/
1401 EXT4_STATE_NEWENTRY, /* File just added to dir */ 1404 EXT4_STATE_NEWENTRY, /* File just added to dir */
1402 EXT4_STATE_DELALLOC_RESERVED, /* blks already reserved for delalloc */
1403 EXT4_STATE_DIOREAD_LOCK, /* Disable support for dio read 1405 EXT4_STATE_DIOREAD_LOCK, /* Disable support for dio read
1404 nolocking */ 1406 nolocking */
1405 EXT4_STATE_MAY_INLINE_DATA, /* may have in-inode data */ 1407 EXT4_STATE_MAY_INLINE_DATA, /* may have in-inode data */
@@ -2086,10 +2088,8 @@ extern int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
2086extern int ext4_trim_fs(struct super_block *, struct fstrim_range *); 2088extern int ext4_trim_fs(struct super_block *, struct fstrim_range *);
2087 2089
2088/* inode.c */ 2090/* inode.c */
2089struct buffer_head *ext4_getblk(handle_t *, struct inode *, 2091struct buffer_head *ext4_getblk(handle_t *, struct inode *, ext4_lblk_t, int);
2090 ext4_lblk_t, int, int *); 2092struct buffer_head *ext4_bread(handle_t *, struct inode *, ext4_lblk_t, int);
2091struct buffer_head *ext4_bread(handle_t *, struct inode *,
2092 ext4_lblk_t, int, int *);
2093int ext4_get_block_write(struct inode *inode, sector_t iblock, 2093int ext4_get_block_write(struct inode *inode, sector_t iblock,
2094 struct buffer_head *bh_result, int create); 2094 struct buffer_head *bh_result, int create);
2095int ext4_get_block(struct inode *inode, sector_t iblock, 2095int ext4_get_block(struct inode *inode, sector_t iblock,
@@ -2109,6 +2109,7 @@ int do_journal_get_write_access(handle_t *handle,
2109#define CONVERT_INLINE_DATA 2 2109#define CONVERT_INLINE_DATA 2
2110 2110
2111extern struct inode *ext4_iget(struct super_block *, unsigned long); 2111extern struct inode *ext4_iget(struct super_block *, unsigned long);
2112extern struct inode *ext4_iget_normal(struct super_block *, unsigned long);
2112extern int ext4_write_inode(struct inode *, struct writeback_control *); 2113extern int ext4_write_inode(struct inode *, struct writeback_control *);
2113extern int ext4_setattr(struct dentry *, struct iattr *); 2114extern int ext4_setattr(struct dentry *, struct iattr *);
2114extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, 2115extern int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
@@ -2332,10 +2333,18 @@ extern int ext4_register_li_request(struct super_block *sb,
2332static inline int ext4_has_group_desc_csum(struct super_block *sb) 2333static inline int ext4_has_group_desc_csum(struct super_block *sb)
2333{ 2334{
2334 return EXT4_HAS_RO_COMPAT_FEATURE(sb, 2335 return EXT4_HAS_RO_COMPAT_FEATURE(sb,
2335 EXT4_FEATURE_RO_COMPAT_GDT_CSUM | 2336 EXT4_FEATURE_RO_COMPAT_GDT_CSUM) ||
2336 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM); 2337 (EXT4_SB(sb)->s_chksum_driver != NULL);
2337} 2338}
2338 2339
2340static inline int ext4_has_metadata_csum(struct super_block *sb)
2341{
2342 WARN_ON_ONCE(EXT4_HAS_RO_COMPAT_FEATURE(sb,
2343 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
2344 !EXT4_SB(sb)->s_chksum_driver);
2345
2346 return (EXT4_SB(sb)->s_chksum_driver != NULL);
2347}
2339static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es) 2348static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es)
2340{ 2349{
2341 return ((ext4_fsblk_t)le32_to_cpu(es->s_blocks_count_hi) << 32) | 2350 return ((ext4_fsblk_t)le32_to_cpu(es->s_blocks_count_hi) << 32) |
@@ -2731,21 +2740,26 @@ extern int ext4_can_extents_be_merged(struct inode *inode,
2731 struct ext4_extent *ex1, 2740 struct ext4_extent *ex1,
2732 struct ext4_extent *ex2); 2741 struct ext4_extent *ex2);
2733extern int ext4_ext_insert_extent(handle_t *, struct inode *, 2742extern int ext4_ext_insert_extent(handle_t *, struct inode *,
2734 struct ext4_ext_path *, 2743 struct ext4_ext_path **,
2735 struct ext4_extent *, int); 2744 struct ext4_extent *, int);
2736extern struct ext4_ext_path *ext4_ext_find_extent(struct inode *, ext4_lblk_t, 2745extern struct ext4_ext_path *ext4_find_extent(struct inode *, ext4_lblk_t,
2737 struct ext4_ext_path *, 2746 struct ext4_ext_path **,
2738 int flags); 2747 int flags);
2739extern void ext4_ext_drop_refs(struct ext4_ext_path *); 2748extern void ext4_ext_drop_refs(struct ext4_ext_path *);
2740extern int ext4_ext_check_inode(struct inode *inode); 2749extern int ext4_ext_check_inode(struct inode *inode);
2741extern int ext4_find_delalloc_range(struct inode *inode, 2750extern int ext4_find_delalloc_range(struct inode *inode,
2742 ext4_lblk_t lblk_start, 2751 ext4_lblk_t lblk_start,
2743 ext4_lblk_t lblk_end); 2752 ext4_lblk_t lblk_end);
2744extern int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk); 2753extern int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk);
2754extern ext4_lblk_t ext4_ext_next_allocated_block(struct ext4_ext_path *path);
2745extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 2755extern int ext4_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
2746 __u64 start, __u64 len); 2756 __u64 start, __u64 len);
2747extern int ext4_ext_precache(struct inode *inode); 2757extern int ext4_ext_precache(struct inode *inode);
2748extern int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len); 2758extern int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len);
2759extern int ext4_swap_extents(handle_t *handle, struct inode *inode1,
2760 struct inode *inode2, ext4_lblk_t lblk1,
2761 ext4_lblk_t lblk2, ext4_lblk_t count,
2762 int mark_unwritten,int *err);
2749 2763
2750/* move_extent.c */ 2764/* move_extent.c */
2751extern void ext4_double_down_write_data_sem(struct inode *first, 2765extern void ext4_double_down_write_data_sem(struct inode *first,
@@ -2755,8 +2769,6 @@ extern void ext4_double_up_write_data_sem(struct inode *orig_inode,
2755extern int ext4_move_extents(struct file *o_filp, struct file *d_filp, 2769extern int ext4_move_extents(struct file *o_filp, struct file *d_filp,
2756 __u64 start_orig, __u64 start_donor, 2770 __u64 start_orig, __u64 start_donor,
2757 __u64 len, __u64 *moved_len); 2771 __u64 len, __u64 *moved_len);
2758extern int mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
2759 struct ext4_extent **extent);
2760 2772
2761/* page-io.c */ 2773/* page-io.c */
2762extern int __init ext4_init_pageio(void); 2774extern int __init ext4_init_pageio(void);
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index a867f5ca9991..3c9381547094 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -123,6 +123,7 @@ find_ext4_extent_tail(struct ext4_extent_header *eh)
123struct ext4_ext_path { 123struct ext4_ext_path {
124 ext4_fsblk_t p_block; 124 ext4_fsblk_t p_block;
125 __u16 p_depth; 125 __u16 p_depth;
126 __u16 p_maxdepth;
126 struct ext4_extent *p_ext; 127 struct ext4_extent *p_ext;
127 struct ext4_extent_idx *p_idx; 128 struct ext4_extent_idx *p_idx;
128 struct ext4_extent_header *p_hdr; 129 struct ext4_extent_header *p_hdr;
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index 0074e0d23d6e..3445035c7e01 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -256,8 +256,8 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
256 set_buffer_prio(bh); 256 set_buffer_prio(bh);
257 if (ext4_handle_valid(handle)) { 257 if (ext4_handle_valid(handle)) {
258 err = jbd2_journal_dirty_metadata(handle, bh); 258 err = jbd2_journal_dirty_metadata(handle, bh);
259 /* Errors can only happen if there is a bug */ 259 /* Errors can only happen due to aborted journal or a nasty bug */
260 if (WARN_ON_ONCE(err)) { 260 if (!is_handle_aborted(handle) && WARN_ON_ONCE(err)) {
261 ext4_journal_abort_handle(where, line, __func__, bh, 261 ext4_journal_abort_handle(where, line, __func__, bh,
262 handle, err); 262 handle, err);
263 if (inode == NULL) { 263 if (inode == NULL) {
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index 17c00ff202f2..9c5b49fb281e 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -102,9 +102,9 @@
102#define EXT4_QUOTA_INIT_BLOCKS(sb) 0 102#define EXT4_QUOTA_INIT_BLOCKS(sb) 0
103#define EXT4_QUOTA_DEL_BLOCKS(sb) 0 103#define EXT4_QUOTA_DEL_BLOCKS(sb) 0
104#endif 104#endif
105#define EXT4_MAXQUOTAS_TRANS_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_TRANS_BLOCKS(sb)) 105#define EXT4_MAXQUOTAS_TRANS_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_TRANS_BLOCKS(sb))
106#define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb)) 106#define EXT4_MAXQUOTAS_INIT_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_INIT_BLOCKS(sb))
107#define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb)) 107#define EXT4_MAXQUOTAS_DEL_BLOCKS(sb) (EXT4_MAXQUOTAS*EXT4_QUOTA_DEL_BLOCKS(sb))
108 108
109static inline int ext4_jbd2_credits_xattr(struct inode *inode) 109static inline int ext4_jbd2_credits_xattr(struct inode *inode)
110{ 110{
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 74292a71b384..0b16fb4c06d3 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -73,8 +73,7 @@ static int ext4_extent_block_csum_verify(struct inode *inode,
73{ 73{
74 struct ext4_extent_tail *et; 74 struct ext4_extent_tail *et;
75 75
76 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 76 if (!ext4_has_metadata_csum(inode->i_sb))
77 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
78 return 1; 77 return 1;
79 78
80 et = find_ext4_extent_tail(eh); 79 et = find_ext4_extent_tail(eh);
@@ -88,8 +87,7 @@ static void ext4_extent_block_csum_set(struct inode *inode,
88{ 87{
89 struct ext4_extent_tail *et; 88 struct ext4_extent_tail *et;
90 89
91 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 90 if (!ext4_has_metadata_csum(inode->i_sb))
92 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
93 return; 91 return;
94 92
95 et = find_ext4_extent_tail(eh); 93 et = find_ext4_extent_tail(eh);
@@ -98,14 +96,14 @@ static void ext4_extent_block_csum_set(struct inode *inode,
98 96
99static int ext4_split_extent(handle_t *handle, 97static int ext4_split_extent(handle_t *handle,
100 struct inode *inode, 98 struct inode *inode,
101 struct ext4_ext_path *path, 99 struct ext4_ext_path **ppath,
102 struct ext4_map_blocks *map, 100 struct ext4_map_blocks *map,
103 int split_flag, 101 int split_flag,
104 int flags); 102 int flags);
105 103
106static int ext4_split_extent_at(handle_t *handle, 104static int ext4_split_extent_at(handle_t *handle,
107 struct inode *inode, 105 struct inode *inode,
108 struct ext4_ext_path *path, 106 struct ext4_ext_path **ppath,
109 ext4_lblk_t split, 107 ext4_lblk_t split,
110 int split_flag, 108 int split_flag,
111 int flags); 109 int flags);
@@ -291,6 +289,20 @@ static inline int ext4_ext_space_root_idx(struct inode *inode, int check)
291 return size; 289 return size;
292} 290}
293 291
292static inline int
293ext4_force_split_extent_at(handle_t *handle, struct inode *inode,
294 struct ext4_ext_path **ppath, ext4_lblk_t lblk,
295 int nofail)
296{
297 struct ext4_ext_path *path = *ppath;
298 int unwritten = ext4_ext_is_unwritten(path[path->p_depth].p_ext);
299
300 return ext4_split_extent_at(handle, inode, ppath, lblk, unwritten ?
301 EXT4_EXT_MARK_UNWRIT1|EXT4_EXT_MARK_UNWRIT2 : 0,
302 EXT4_EX_NOCACHE | EXT4_GET_BLOCKS_PRE_IO |
303 (nofail ? EXT4_GET_BLOCKS_METADATA_NOFAIL:0));
304}
305
294/* 306/*
295 * Calculate the number of metadata blocks needed 307 * Calculate the number of metadata blocks needed
296 * to allocate @blocks 308 * to allocate @blocks
@@ -695,9 +707,11 @@ static void ext4_ext_show_move(struct inode *inode, struct ext4_ext_path *path,
695 707
696void ext4_ext_drop_refs(struct ext4_ext_path *path) 708void ext4_ext_drop_refs(struct ext4_ext_path *path)
697{ 709{
698 int depth = path->p_depth; 710 int depth, i;
699 int i;
700 711
712 if (!path)
713 return;
714 depth = path->p_depth;
701 for (i = 0; i <= depth; i++, path++) 715 for (i = 0; i <= depth; i++, path++)
702 if (path->p_bh) { 716 if (path->p_bh) {
703 brelse(path->p_bh); 717 brelse(path->p_bh);
@@ -841,24 +855,32 @@ int ext4_ext_tree_init(handle_t *handle, struct inode *inode)
841} 855}
842 856
843struct ext4_ext_path * 857struct ext4_ext_path *
844ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block, 858ext4_find_extent(struct inode *inode, ext4_lblk_t block,
845 struct ext4_ext_path *path, int flags) 859 struct ext4_ext_path **orig_path, int flags)
846{ 860{
847 struct ext4_extent_header *eh; 861 struct ext4_extent_header *eh;
848 struct buffer_head *bh; 862 struct buffer_head *bh;
849 short int depth, i, ppos = 0, alloc = 0; 863 struct ext4_ext_path *path = orig_path ? *orig_path : NULL;
864 short int depth, i, ppos = 0;
850 int ret; 865 int ret;
851 866
852 eh = ext_inode_hdr(inode); 867 eh = ext_inode_hdr(inode);
853 depth = ext_depth(inode); 868 depth = ext_depth(inode);
854 869
855 /* account possible depth increase */ 870 if (path) {
871 ext4_ext_drop_refs(path);
872 if (depth > path[0].p_maxdepth) {
873 kfree(path);
874 *orig_path = path = NULL;
875 }
876 }
856 if (!path) { 877 if (!path) {
878 /* account possible depth increase */
857 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2), 879 path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 2),
858 GFP_NOFS); 880 GFP_NOFS);
859 if (!path) 881 if (unlikely(!path))
860 return ERR_PTR(-ENOMEM); 882 return ERR_PTR(-ENOMEM);
861 alloc = 1; 883 path[0].p_maxdepth = depth + 1;
862 } 884 }
863 path[0].p_hdr = eh; 885 path[0].p_hdr = eh;
864 path[0].p_bh = NULL; 886 path[0].p_bh = NULL;
@@ -876,7 +898,7 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
876 898
877 bh = read_extent_tree_block(inode, path[ppos].p_block, --i, 899 bh = read_extent_tree_block(inode, path[ppos].p_block, --i,
878 flags); 900 flags);
879 if (IS_ERR(bh)) { 901 if (unlikely(IS_ERR(bh))) {
880 ret = PTR_ERR(bh); 902 ret = PTR_ERR(bh);
881 goto err; 903 goto err;
882 } 904 }
@@ -910,8 +932,9 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
910 932
911err: 933err:
912 ext4_ext_drop_refs(path); 934 ext4_ext_drop_refs(path);
913 if (alloc) 935 kfree(path);
914 kfree(path); 936 if (orig_path)
937 *orig_path = NULL;
915 return ERR_PTR(ret); 938 return ERR_PTR(ret);
916} 939}
917 940
@@ -1238,16 +1261,24 @@ cleanup:
1238 * just created block 1261 * just created block
1239 */ 1262 */
1240static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode, 1263static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1241 unsigned int flags, 1264 unsigned int flags)
1242 struct ext4_extent *newext)
1243{ 1265{
1244 struct ext4_extent_header *neh; 1266 struct ext4_extent_header *neh;
1245 struct buffer_head *bh; 1267 struct buffer_head *bh;
1246 ext4_fsblk_t newblock; 1268 ext4_fsblk_t newblock, goal = 0;
1269 struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
1247 int err = 0; 1270 int err = 0;
1248 1271
1249 newblock = ext4_ext_new_meta_block(handle, inode, NULL, 1272 /* Try to prepend new index to old one */
1250 newext, &err, flags); 1273 if (ext_depth(inode))
1274 goal = ext4_idx_pblock(EXT_FIRST_INDEX(ext_inode_hdr(inode)));
1275 if (goal > le32_to_cpu(es->s_first_data_block)) {
1276 flags |= EXT4_MB_HINT_TRY_GOAL;
1277 goal--;
1278 } else
1279 goal = ext4_inode_to_goal_block(inode);
1280 newblock = ext4_new_meta_blocks(handle, inode, goal, flags,
1281 NULL, &err);
1251 if (newblock == 0) 1282 if (newblock == 0)
1252 return err; 1283 return err;
1253 1284
@@ -1314,9 +1345,10 @@ out:
1314static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode, 1345static int ext4_ext_create_new_leaf(handle_t *handle, struct inode *inode,
1315 unsigned int mb_flags, 1346 unsigned int mb_flags,
1316 unsigned int gb_flags, 1347 unsigned int gb_flags,
1317 struct ext4_ext_path *path, 1348 struct ext4_ext_path **ppath,
1318 struct ext4_extent *newext) 1349 struct ext4_extent *newext)
1319{ 1350{
1351 struct ext4_ext_path *path = *ppath;
1320 struct ext4_ext_path *curp; 1352 struct ext4_ext_path *curp;
1321 int depth, i, err = 0; 1353 int depth, i, err = 0;
1322 1354
@@ -1340,23 +1372,21 @@ repeat:
1340 goto out; 1372 goto out;
1341 1373
1342 /* refill path */ 1374 /* refill path */
1343 ext4_ext_drop_refs(path); 1375 path = ext4_find_extent(inode,
1344 path = ext4_ext_find_extent(inode,
1345 (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1376 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1346 path, gb_flags); 1377 ppath, gb_flags);
1347 if (IS_ERR(path)) 1378 if (IS_ERR(path))
1348 err = PTR_ERR(path); 1379 err = PTR_ERR(path);
1349 } else { 1380 } else {
1350 /* tree is full, time to grow in depth */ 1381 /* tree is full, time to grow in depth */
1351 err = ext4_ext_grow_indepth(handle, inode, mb_flags, newext); 1382 err = ext4_ext_grow_indepth(handle, inode, mb_flags);
1352 if (err) 1383 if (err)
1353 goto out; 1384 goto out;
1354 1385
1355 /* refill path */ 1386 /* refill path */
1356 ext4_ext_drop_refs(path); 1387 path = ext4_find_extent(inode,
1357 path = ext4_ext_find_extent(inode,
1358 (ext4_lblk_t)le32_to_cpu(newext->ee_block), 1388 (ext4_lblk_t)le32_to_cpu(newext->ee_block),
1359 path, gb_flags); 1389 ppath, gb_flags);
1360 if (IS_ERR(path)) { 1390 if (IS_ERR(path)) {
1361 err = PTR_ERR(path); 1391 err = PTR_ERR(path);
1362 goto out; 1392 goto out;
@@ -1559,7 +1589,7 @@ found_extent:
1559 * allocated block. Thus, index entries have to be consistent 1589 * allocated block. Thus, index entries have to be consistent
1560 * with leaves. 1590 * with leaves.
1561 */ 1591 */
1562static ext4_lblk_t 1592ext4_lblk_t
1563ext4_ext_next_allocated_block(struct ext4_ext_path *path) 1593ext4_ext_next_allocated_block(struct ext4_ext_path *path)
1564{ 1594{
1565 int depth; 1595 int depth;
@@ -1802,6 +1832,7 @@ static void ext4_ext_try_to_merge_up(handle_t *handle,
1802 sizeof(struct ext4_extent_idx); 1832 sizeof(struct ext4_extent_idx);
1803 s += sizeof(struct ext4_extent_header); 1833 s += sizeof(struct ext4_extent_header);
1804 1834
1835 path[1].p_maxdepth = path[0].p_maxdepth;
1805 memcpy(path[0].p_hdr, path[1].p_hdr, s); 1836 memcpy(path[0].p_hdr, path[1].p_hdr, s);
1806 path[0].p_depth = 0; 1837 path[0].p_depth = 0;
1807 path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) + 1838 path[0].p_ext = EXT_FIRST_EXTENT(path[0].p_hdr) +
@@ -1896,9 +1927,10 @@ out:
1896 * creating new leaf in the no-space case. 1927 * creating new leaf in the no-space case.
1897 */ 1928 */
1898int ext4_ext_insert_extent(handle_t *handle, struct inode *inode, 1929int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1899 struct ext4_ext_path *path, 1930 struct ext4_ext_path **ppath,
1900 struct ext4_extent *newext, int gb_flags) 1931 struct ext4_extent *newext, int gb_flags)
1901{ 1932{
1933 struct ext4_ext_path *path = *ppath;
1902 struct ext4_extent_header *eh; 1934 struct ext4_extent_header *eh;
1903 struct ext4_extent *ex, *fex; 1935 struct ext4_extent *ex, *fex;
1904 struct ext4_extent *nearex; /* nearest extent */ 1936 struct ext4_extent *nearex; /* nearest extent */
@@ -1907,6 +1939,8 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1907 ext4_lblk_t next; 1939 ext4_lblk_t next;
1908 int mb_flags = 0, unwritten; 1940 int mb_flags = 0, unwritten;
1909 1941
1942 if (gb_flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
1943 mb_flags |= EXT4_MB_DELALLOC_RESERVED;
1910 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) { 1944 if (unlikely(ext4_ext_get_actual_len(newext) == 0)) {
1911 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0"); 1945 EXT4_ERROR_INODE(inode, "ext4_ext_get_actual_len(newext) == 0");
1912 return -EIO; 1946 return -EIO;
@@ -1925,7 +1959,7 @@ int ext4_ext_insert_extent(handle_t *handle, struct inode *inode,
1925 /* 1959 /*
1926 * Try to see whether we should rather test the extent on 1960 * Try to see whether we should rather test the extent on
1927 * right from ex, or from the left of ex. This is because 1961 * right from ex, or from the left of ex. This is because
1928 * ext4_ext_find_extent() can return either extent on the 1962 * ext4_find_extent() can return either extent on the
1929 * left, or on the right from the searched position. This 1963 * left, or on the right from the searched position. This
1930 * will make merging more effective. 1964 * will make merging more effective.
1931 */ 1965 */
@@ -2008,7 +2042,7 @@ prepend:
2008 if (next != EXT_MAX_BLOCKS) { 2042 if (next != EXT_MAX_BLOCKS) {
2009 ext_debug("next leaf block - %u\n", next); 2043 ext_debug("next leaf block - %u\n", next);
2010 BUG_ON(npath != NULL); 2044 BUG_ON(npath != NULL);
2011 npath = ext4_ext_find_extent(inode, next, NULL, 0); 2045 npath = ext4_find_extent(inode, next, NULL, 0);
2012 if (IS_ERR(npath)) 2046 if (IS_ERR(npath))
2013 return PTR_ERR(npath); 2047 return PTR_ERR(npath);
2014 BUG_ON(npath->p_depth != path->p_depth); 2048 BUG_ON(npath->p_depth != path->p_depth);
@@ -2028,9 +2062,9 @@ prepend:
2028 * We're gonna add a new leaf in the tree. 2062 * We're gonna add a new leaf in the tree.
2029 */ 2063 */
2030 if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL) 2064 if (gb_flags & EXT4_GET_BLOCKS_METADATA_NOFAIL)
2031 mb_flags = EXT4_MB_USE_RESERVED; 2065 mb_flags |= EXT4_MB_USE_RESERVED;
2032 err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags, 2066 err = ext4_ext_create_new_leaf(handle, inode, mb_flags, gb_flags,
2033 path, newext); 2067 ppath, newext);
2034 if (err) 2068 if (err)
2035 goto cleanup; 2069 goto cleanup;
2036 depth = ext_depth(inode); 2070 depth = ext_depth(inode);
@@ -2108,10 +2142,8 @@ merge:
2108 err = ext4_ext_dirty(handle, inode, path + path->p_depth); 2142 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
2109 2143
2110cleanup: 2144cleanup:
2111 if (npath) { 2145 ext4_ext_drop_refs(npath);
2112 ext4_ext_drop_refs(npath); 2146 kfree(npath);
2113 kfree(npath);
2114 }
2115 return err; 2147 return err;
2116} 2148}
2117 2149
@@ -2133,13 +2165,7 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
2133 /* find extent for this block */ 2165 /* find extent for this block */
2134 down_read(&EXT4_I(inode)->i_data_sem); 2166 down_read(&EXT4_I(inode)->i_data_sem);
2135 2167
2136 if (path && ext_depth(inode) != depth) { 2168 path = ext4_find_extent(inode, block, &path, 0);
2137 /* depth was changed. we have to realloc path */
2138 kfree(path);
2139 path = NULL;
2140 }
2141
2142 path = ext4_ext_find_extent(inode, block, path, 0);
2143 if (IS_ERR(path)) { 2169 if (IS_ERR(path)) {
2144 up_read(&EXT4_I(inode)->i_data_sem); 2170 up_read(&EXT4_I(inode)->i_data_sem);
2145 err = PTR_ERR(path); 2171 err = PTR_ERR(path);
@@ -2156,7 +2182,6 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
2156 } 2182 }
2157 ex = path[depth].p_ext; 2183 ex = path[depth].p_ext;
2158 next = ext4_ext_next_allocated_block(path); 2184 next = ext4_ext_next_allocated_block(path);
2159 ext4_ext_drop_refs(path);
2160 2185
2161 flags = 0; 2186 flags = 0;
2162 exists = 0; 2187 exists = 0;
@@ -2266,11 +2291,8 @@ static int ext4_fill_fiemap_extents(struct inode *inode,
2266 block = es.es_lblk + es.es_len; 2291 block = es.es_lblk + es.es_len;
2267 } 2292 }
2268 2293
2269 if (path) { 2294 ext4_ext_drop_refs(path);
2270 ext4_ext_drop_refs(path); 2295 kfree(path);
2271 kfree(path);
2272 }
2273
2274 return err; 2296 return err;
2275} 2297}
2276 2298
@@ -2826,7 +2848,7 @@ again:
2826 ext4_lblk_t ee_block; 2848 ext4_lblk_t ee_block;
2827 2849
2828 /* find extent for this block */ 2850 /* find extent for this block */
2829 path = ext4_ext_find_extent(inode, end, NULL, EXT4_EX_NOCACHE); 2851 path = ext4_find_extent(inode, end, NULL, EXT4_EX_NOCACHE);
2830 if (IS_ERR(path)) { 2852 if (IS_ERR(path)) {
2831 ext4_journal_stop(handle); 2853 ext4_journal_stop(handle);
2832 return PTR_ERR(path); 2854 return PTR_ERR(path);
@@ -2854,24 +2876,14 @@ again:
2854 */ 2876 */
2855 if (end >= ee_block && 2877 if (end >= ee_block &&
2856 end < ee_block + ext4_ext_get_actual_len(ex) - 1) { 2878 end < ee_block + ext4_ext_get_actual_len(ex) - 1) {
2857 int split_flag = 0;
2858
2859 if (ext4_ext_is_unwritten(ex))
2860 split_flag = EXT4_EXT_MARK_UNWRIT1 |
2861 EXT4_EXT_MARK_UNWRIT2;
2862
2863 /* 2879 /*
2864 * Split the extent in two so that 'end' is the last 2880 * Split the extent in two so that 'end' is the last
2865 * block in the first new extent. Also we should not 2881 * block in the first new extent. Also we should not
2866 * fail removing space due to ENOSPC so try to use 2882 * fail removing space due to ENOSPC so try to use
2867 * reserved block if that happens. 2883 * reserved block if that happens.
2868 */ 2884 */
2869 err = ext4_split_extent_at(handle, inode, path, 2885 err = ext4_force_split_extent_at(handle, inode, &path,
2870 end + 1, split_flag, 2886 end + 1, 1);
2871 EXT4_EX_NOCACHE |
2872 EXT4_GET_BLOCKS_PRE_IO |
2873 EXT4_GET_BLOCKS_METADATA_NOFAIL);
2874
2875 if (err < 0) 2887 if (err < 0)
2876 goto out; 2888 goto out;
2877 } 2889 }
@@ -2893,7 +2905,7 @@ again:
2893 ext4_journal_stop(handle); 2905 ext4_journal_stop(handle);
2894 return -ENOMEM; 2906 return -ENOMEM;
2895 } 2907 }
2896 path[0].p_depth = depth; 2908 path[0].p_maxdepth = path[0].p_depth = depth;
2897 path[0].p_hdr = ext_inode_hdr(inode); 2909 path[0].p_hdr = ext_inode_hdr(inode);
2898 i = 0; 2910 i = 0;
2899 2911
@@ -3013,10 +3025,9 @@ again:
3013out: 3025out:
3014 ext4_ext_drop_refs(path); 3026 ext4_ext_drop_refs(path);
3015 kfree(path); 3027 kfree(path);
3016 if (err == -EAGAIN) { 3028 path = NULL;
3017 path = NULL; 3029 if (err == -EAGAIN)
3018 goto again; 3030 goto again;
3019 }
3020 ext4_journal_stop(handle); 3031 ext4_journal_stop(handle);
3021 3032
3022 return err; 3033 return err;
@@ -3130,11 +3141,12 @@ static int ext4_ext_zeroout(struct inode *inode, struct ext4_extent *ex)
3130 */ 3141 */
3131static int ext4_split_extent_at(handle_t *handle, 3142static int ext4_split_extent_at(handle_t *handle,
3132 struct inode *inode, 3143 struct inode *inode,
3133 struct ext4_ext_path *path, 3144 struct ext4_ext_path **ppath,
3134 ext4_lblk_t split, 3145 ext4_lblk_t split,
3135 int split_flag, 3146 int split_flag,
3136 int flags) 3147 int flags)
3137{ 3148{
3149 struct ext4_ext_path *path = *ppath;
3138 ext4_fsblk_t newblock; 3150 ext4_fsblk_t newblock;
3139 ext4_lblk_t ee_block; 3151 ext4_lblk_t ee_block;
3140 struct ext4_extent *ex, newex, orig_ex, zero_ex; 3152 struct ext4_extent *ex, newex, orig_ex, zero_ex;
@@ -3205,7 +3217,7 @@ static int ext4_split_extent_at(handle_t *handle,
3205 if (split_flag & EXT4_EXT_MARK_UNWRIT2) 3217 if (split_flag & EXT4_EXT_MARK_UNWRIT2)
3206 ext4_ext_mark_unwritten(ex2); 3218 ext4_ext_mark_unwritten(ex2);
3207 3219
3208 err = ext4_ext_insert_extent(handle, inode, path, &newex, flags); 3220 err = ext4_ext_insert_extent(handle, inode, ppath, &newex, flags);
3209 if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) { 3221 if (err == -ENOSPC && (EXT4_EXT_MAY_ZEROOUT & split_flag)) {
3210 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) { 3222 if (split_flag & (EXT4_EXT_DATA_VALID1|EXT4_EXT_DATA_VALID2)) {
3211 if (split_flag & EXT4_EXT_DATA_VALID1) { 3223 if (split_flag & EXT4_EXT_DATA_VALID1) {
@@ -3271,11 +3283,12 @@ fix_extent_len:
3271 */ 3283 */
3272static int ext4_split_extent(handle_t *handle, 3284static int ext4_split_extent(handle_t *handle,
3273 struct inode *inode, 3285 struct inode *inode,
3274 struct ext4_ext_path *path, 3286 struct ext4_ext_path **ppath,
3275 struct ext4_map_blocks *map, 3287 struct ext4_map_blocks *map,
3276 int split_flag, 3288 int split_flag,
3277 int flags) 3289 int flags)
3278{ 3290{
3291 struct ext4_ext_path *path = *ppath;
3279 ext4_lblk_t ee_block; 3292 ext4_lblk_t ee_block;
3280 struct ext4_extent *ex; 3293 struct ext4_extent *ex;
3281 unsigned int ee_len, depth; 3294 unsigned int ee_len, depth;
@@ -3298,7 +3311,7 @@ static int ext4_split_extent(handle_t *handle,
3298 EXT4_EXT_MARK_UNWRIT2; 3311 EXT4_EXT_MARK_UNWRIT2;
3299 if (split_flag & EXT4_EXT_DATA_VALID2) 3312 if (split_flag & EXT4_EXT_DATA_VALID2)
3300 split_flag1 |= EXT4_EXT_DATA_VALID1; 3313 split_flag1 |= EXT4_EXT_DATA_VALID1;
3301 err = ext4_split_extent_at(handle, inode, path, 3314 err = ext4_split_extent_at(handle, inode, ppath,
3302 map->m_lblk + map->m_len, split_flag1, flags1); 3315 map->m_lblk + map->m_len, split_flag1, flags1);
3303 if (err) 3316 if (err)
3304 goto out; 3317 goto out;
@@ -3309,8 +3322,7 @@ static int ext4_split_extent(handle_t *handle,
3309 * Update path is required because previous ext4_split_extent_at() may 3322 * Update path is required because previous ext4_split_extent_at() may
3310 * result in split of original leaf or extent zeroout. 3323 * result in split of original leaf or extent zeroout.
3311 */ 3324 */
3312 ext4_ext_drop_refs(path); 3325 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3313 path = ext4_ext_find_extent(inode, map->m_lblk, path, 0);
3314 if (IS_ERR(path)) 3326 if (IS_ERR(path))
3315 return PTR_ERR(path); 3327 return PTR_ERR(path);
3316 depth = ext_depth(inode); 3328 depth = ext_depth(inode);
@@ -3330,7 +3342,7 @@ static int ext4_split_extent(handle_t *handle,
3330 split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT | 3342 split_flag1 |= split_flag & (EXT4_EXT_MAY_ZEROOUT |
3331 EXT4_EXT_MARK_UNWRIT2); 3343 EXT4_EXT_MARK_UNWRIT2);
3332 } 3344 }
3333 err = ext4_split_extent_at(handle, inode, path, 3345 err = ext4_split_extent_at(handle, inode, ppath,
3334 map->m_lblk, split_flag1, flags); 3346 map->m_lblk, split_flag1, flags);
3335 if (err) 3347 if (err)
3336 goto out; 3348 goto out;
@@ -3364,9 +3376,10 @@ out:
3364static int ext4_ext_convert_to_initialized(handle_t *handle, 3376static int ext4_ext_convert_to_initialized(handle_t *handle,
3365 struct inode *inode, 3377 struct inode *inode,
3366 struct ext4_map_blocks *map, 3378 struct ext4_map_blocks *map,
3367 struct ext4_ext_path *path, 3379 struct ext4_ext_path **ppath,
3368 int flags) 3380 int flags)
3369{ 3381{
3382 struct ext4_ext_path *path = *ppath;
3370 struct ext4_sb_info *sbi; 3383 struct ext4_sb_info *sbi;
3371 struct ext4_extent_header *eh; 3384 struct ext4_extent_header *eh;
3372 struct ext4_map_blocks split_map; 3385 struct ext4_map_blocks split_map;
@@ -3590,11 +3603,10 @@ static int ext4_ext_convert_to_initialized(handle_t *handle,
3590 } 3603 }
3591 } 3604 }
3592 3605
3593 allocated = ext4_split_extent(handle, inode, path, 3606 err = ext4_split_extent(handle, inode, ppath, &split_map, split_flag,
3594 &split_map, split_flag, flags); 3607 flags);
3595 if (allocated < 0) 3608 if (err > 0)
3596 err = allocated; 3609 err = 0;
3597
3598out: 3610out:
3599 /* If we have gotten a failure, don't zero out status tree */ 3611 /* If we have gotten a failure, don't zero out status tree */
3600 if (!err) 3612 if (!err)
@@ -3629,9 +3641,10 @@ out:
3629static int ext4_split_convert_extents(handle_t *handle, 3641static int ext4_split_convert_extents(handle_t *handle,
3630 struct inode *inode, 3642 struct inode *inode,
3631 struct ext4_map_blocks *map, 3643 struct ext4_map_blocks *map,
3632 struct ext4_ext_path *path, 3644 struct ext4_ext_path **ppath,
3633 int flags) 3645 int flags)
3634{ 3646{
3647 struct ext4_ext_path *path = *ppath;
3635 ext4_lblk_t eof_block; 3648 ext4_lblk_t eof_block;
3636 ext4_lblk_t ee_block; 3649 ext4_lblk_t ee_block;
3637 struct ext4_extent *ex; 3650 struct ext4_extent *ex;
@@ -3665,74 +3678,15 @@ static int ext4_split_convert_extents(handle_t *handle,
3665 split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2); 3678 split_flag |= (EXT4_EXT_MARK_UNWRIT2 | EXT4_EXT_DATA_VALID2);
3666 } 3679 }
3667 flags |= EXT4_GET_BLOCKS_PRE_IO; 3680 flags |= EXT4_GET_BLOCKS_PRE_IO;
3668 return ext4_split_extent(handle, inode, path, map, split_flag, flags); 3681 return ext4_split_extent(handle, inode, ppath, map, split_flag, flags);
3669} 3682}
3670 3683
3671static int ext4_convert_initialized_extents(handle_t *handle,
3672 struct inode *inode,
3673 struct ext4_map_blocks *map,
3674 struct ext4_ext_path *path)
3675{
3676 struct ext4_extent *ex;
3677 ext4_lblk_t ee_block;
3678 unsigned int ee_len;
3679 int depth;
3680 int err = 0;
3681
3682 depth = ext_depth(inode);
3683 ex = path[depth].p_ext;
3684 ee_block = le32_to_cpu(ex->ee_block);
3685 ee_len = ext4_ext_get_actual_len(ex);
3686
3687 ext_debug("%s: inode %lu, logical"
3688 "block %llu, max_blocks %u\n", __func__, inode->i_ino,
3689 (unsigned long long)ee_block, ee_len);
3690
3691 if (ee_block != map->m_lblk || ee_len > map->m_len) {
3692 err = ext4_split_convert_extents(handle, inode, map, path,
3693 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
3694 if (err < 0)
3695 goto out;
3696 ext4_ext_drop_refs(path);
3697 path = ext4_ext_find_extent(inode, map->m_lblk, path, 0);
3698 if (IS_ERR(path)) {
3699 err = PTR_ERR(path);
3700 goto out;
3701 }
3702 depth = ext_depth(inode);
3703 ex = path[depth].p_ext;
3704 if (!ex) {
3705 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3706 (unsigned long) map->m_lblk);
3707 err = -EIO;
3708 goto out;
3709 }
3710 }
3711
3712 err = ext4_ext_get_access(handle, inode, path + depth);
3713 if (err)
3714 goto out;
3715 /* first mark the extent as unwritten */
3716 ext4_ext_mark_unwritten(ex);
3717
3718 /* note: ext4_ext_correct_indexes() isn't needed here because
3719 * borders are not changed
3720 */
3721 ext4_ext_try_to_merge(handle, inode, path, ex);
3722
3723 /* Mark modified extent as dirty */
3724 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3725out:
3726 ext4_ext_show_leaf(inode, path);
3727 return err;
3728}
3729
3730
3731static int ext4_convert_unwritten_extents_endio(handle_t *handle, 3684static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3732 struct inode *inode, 3685 struct inode *inode,
3733 struct ext4_map_blocks *map, 3686 struct ext4_map_blocks *map,
3734 struct ext4_ext_path *path) 3687 struct ext4_ext_path **ppath)
3735{ 3688{
3689 struct ext4_ext_path *path = *ppath;
3736 struct ext4_extent *ex; 3690 struct ext4_extent *ex;
3737 ext4_lblk_t ee_block; 3691 ext4_lblk_t ee_block;
3738 unsigned int ee_len; 3692 unsigned int ee_len;
@@ -3761,16 +3715,13 @@ static int ext4_convert_unwritten_extents_endio(handle_t *handle,
3761 inode->i_ino, (unsigned long long)ee_block, ee_len, 3715 inode->i_ino, (unsigned long long)ee_block, ee_len,
3762 (unsigned long long)map->m_lblk, map->m_len); 3716 (unsigned long long)map->m_lblk, map->m_len);
3763#endif 3717#endif
3764 err = ext4_split_convert_extents(handle, inode, map, path, 3718 err = ext4_split_convert_extents(handle, inode, map, ppath,
3765 EXT4_GET_BLOCKS_CONVERT); 3719 EXT4_GET_BLOCKS_CONVERT);
3766 if (err < 0) 3720 if (err < 0)
3767 goto out; 3721 return err;
3768 ext4_ext_drop_refs(path); 3722 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3769 path = ext4_ext_find_extent(inode, map->m_lblk, path, 0); 3723 if (IS_ERR(path))
3770 if (IS_ERR(path)) { 3724 return PTR_ERR(path);
3771 err = PTR_ERR(path);
3772 goto out;
3773 }
3774 depth = ext_depth(inode); 3725 depth = ext_depth(inode);
3775 ex = path[depth].p_ext; 3726 ex = path[depth].p_ext;
3776 } 3727 }
@@ -3963,12 +3914,16 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start,
3963} 3914}
3964 3915
3965static int 3916static int
3966ext4_ext_convert_initialized_extent(handle_t *handle, struct inode *inode, 3917convert_initialized_extent(handle_t *handle, struct inode *inode,
3967 struct ext4_map_blocks *map, 3918 struct ext4_map_blocks *map,
3968 struct ext4_ext_path *path, int flags, 3919 struct ext4_ext_path **ppath, int flags,
3969 unsigned int allocated, ext4_fsblk_t newblock) 3920 unsigned int allocated, ext4_fsblk_t newblock)
3970{ 3921{
3971 int ret = 0; 3922 struct ext4_ext_path *path = *ppath;
3923 struct ext4_extent *ex;
3924 ext4_lblk_t ee_block;
3925 unsigned int ee_len;
3926 int depth;
3972 int err = 0; 3927 int err = 0;
3973 3928
3974 /* 3929 /*
@@ -3978,28 +3933,67 @@ ext4_ext_convert_initialized_extent(handle_t *handle, struct inode *inode,
3978 if (map->m_len > EXT_UNWRITTEN_MAX_LEN) 3933 if (map->m_len > EXT_UNWRITTEN_MAX_LEN)
3979 map->m_len = EXT_UNWRITTEN_MAX_LEN / 2; 3934 map->m_len = EXT_UNWRITTEN_MAX_LEN / 2;
3980 3935
3981 ret = ext4_convert_initialized_extents(handle, inode, map, 3936 depth = ext_depth(inode);
3982 path); 3937 ex = path[depth].p_ext;
3983 if (ret >= 0) { 3938 ee_block = le32_to_cpu(ex->ee_block);
3984 ext4_update_inode_fsync_trans(handle, inode, 1); 3939 ee_len = ext4_ext_get_actual_len(ex);
3985 err = check_eofblocks_fl(handle, inode, map->m_lblk, 3940
3986 path, map->m_len); 3941 ext_debug("%s: inode %lu, logical"
3987 } else 3942 "block %llu, max_blocks %u\n", __func__, inode->i_ino,
3988 err = ret; 3943 (unsigned long long)ee_block, ee_len);
3944
3945 if (ee_block != map->m_lblk || ee_len > map->m_len) {
3946 err = ext4_split_convert_extents(handle, inode, map, ppath,
3947 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN);
3948 if (err < 0)
3949 return err;
3950 path = ext4_find_extent(inode, map->m_lblk, ppath, 0);
3951 if (IS_ERR(path))
3952 return PTR_ERR(path);
3953 depth = ext_depth(inode);
3954 ex = path[depth].p_ext;
3955 if (!ex) {
3956 EXT4_ERROR_INODE(inode, "unexpected hole at %lu",
3957 (unsigned long) map->m_lblk);
3958 return -EIO;
3959 }
3960 }
3961
3962 err = ext4_ext_get_access(handle, inode, path + depth);
3963 if (err)
3964 return err;
3965 /* first mark the extent as unwritten */
3966 ext4_ext_mark_unwritten(ex);
3967
3968 /* note: ext4_ext_correct_indexes() isn't needed here because
3969 * borders are not changed
3970 */
3971 ext4_ext_try_to_merge(handle, inode, path, ex);
3972
3973 /* Mark modified extent as dirty */
3974 err = ext4_ext_dirty(handle, inode, path + path->p_depth);
3975 if (err)
3976 return err;
3977 ext4_ext_show_leaf(inode, path);
3978
3979 ext4_update_inode_fsync_trans(handle, inode, 1);
3980 err = check_eofblocks_fl(handle, inode, map->m_lblk, path, map->m_len);
3981 if (err)
3982 return err;
3989 map->m_flags |= EXT4_MAP_UNWRITTEN; 3983 map->m_flags |= EXT4_MAP_UNWRITTEN;
3990 if (allocated > map->m_len) 3984 if (allocated > map->m_len)
3991 allocated = map->m_len; 3985 allocated = map->m_len;
3992 map->m_len = allocated; 3986 map->m_len = allocated;
3993 3987 return allocated;
3994 return err ? err : allocated;
3995} 3988}
3996 3989
3997static int 3990static int
3998ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode, 3991ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
3999 struct ext4_map_blocks *map, 3992 struct ext4_map_blocks *map,
4000 struct ext4_ext_path *path, int flags, 3993 struct ext4_ext_path **ppath, int flags,
4001 unsigned int allocated, ext4_fsblk_t newblock) 3994 unsigned int allocated, ext4_fsblk_t newblock)
4002{ 3995{
3996 struct ext4_ext_path *path = *ppath;
4003 int ret = 0; 3997 int ret = 0;
4004 int err = 0; 3998 int err = 0;
4005 ext4_io_end_t *io = ext4_inode_aio(inode); 3999 ext4_io_end_t *io = ext4_inode_aio(inode);
@@ -4021,8 +4015,8 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
4021 4015
4022 /* get_block() before submit the IO, split the extent */ 4016 /* get_block() before submit the IO, split the extent */
4023 if (flags & EXT4_GET_BLOCKS_PRE_IO) { 4017 if (flags & EXT4_GET_BLOCKS_PRE_IO) {
4024 ret = ext4_split_convert_extents(handle, inode, map, 4018 ret = ext4_split_convert_extents(handle, inode, map, ppath,
4025 path, flags | EXT4_GET_BLOCKS_CONVERT); 4019 flags | EXT4_GET_BLOCKS_CONVERT);
4026 if (ret <= 0) 4020 if (ret <= 0)
4027 goto out; 4021 goto out;
4028 /* 4022 /*
@@ -4040,7 +4034,7 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
4040 /* IO end_io complete, convert the filled extent to written */ 4034 /* IO end_io complete, convert the filled extent to written */
4041 if (flags & EXT4_GET_BLOCKS_CONVERT) { 4035 if (flags & EXT4_GET_BLOCKS_CONVERT) {
4042 ret = ext4_convert_unwritten_extents_endio(handle, inode, map, 4036 ret = ext4_convert_unwritten_extents_endio(handle, inode, map,
4043 path); 4037 ppath);
4044 if (ret >= 0) { 4038 if (ret >= 0) {
4045 ext4_update_inode_fsync_trans(handle, inode, 1); 4039 ext4_update_inode_fsync_trans(handle, inode, 1);
4046 err = check_eofblocks_fl(handle, inode, map->m_lblk, 4040 err = check_eofblocks_fl(handle, inode, map->m_lblk,
@@ -4078,7 +4072,7 @@ ext4_ext_handle_unwritten_extents(handle_t *handle, struct inode *inode,
4078 } 4072 }
4079 4073
4080 /* buffered write, writepage time, convert*/ 4074 /* buffered write, writepage time, convert*/
4081 ret = ext4_ext_convert_to_initialized(handle, inode, map, path, flags); 4075 ret = ext4_ext_convert_to_initialized(handle, inode, map, ppath, flags);
4082 if (ret >= 0) 4076 if (ret >= 0)
4083 ext4_update_inode_fsync_trans(handle, inode, 1); 4077 ext4_update_inode_fsync_trans(handle, inode, 1);
4084out: 4078out:
@@ -4279,7 +4273,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4279 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags); 4273 trace_ext4_ext_map_blocks_enter(inode, map->m_lblk, map->m_len, flags);
4280 4274
4281 /* find extent for this block */ 4275 /* find extent for this block */
4282 path = ext4_ext_find_extent(inode, map->m_lblk, NULL, 0); 4276 path = ext4_find_extent(inode, map->m_lblk, NULL, 0);
4283 if (IS_ERR(path)) { 4277 if (IS_ERR(path)) {
4284 err = PTR_ERR(path); 4278 err = PTR_ERR(path);
4285 path = NULL; 4279 path = NULL;
@@ -4291,7 +4285,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4291 /* 4285 /*
4292 * consistent leaf must not be empty; 4286 * consistent leaf must not be empty;
4293 * this situation is possible, though, _during_ tree modification; 4287 * this situation is possible, though, _during_ tree modification;
4294 * this is why assert can't be put in ext4_ext_find_extent() 4288 * this is why assert can't be put in ext4_find_extent()
4295 */ 4289 */
4296 if (unlikely(path[depth].p_ext == NULL && depth != 0)) { 4290 if (unlikely(path[depth].p_ext == NULL && depth != 0)) {
4297 EXT4_ERROR_INODE(inode, "bad extent address " 4291 EXT4_ERROR_INODE(inode, "bad extent address "
@@ -4331,15 +4325,15 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4331 */ 4325 */
4332 if ((!ext4_ext_is_unwritten(ex)) && 4326 if ((!ext4_ext_is_unwritten(ex)) &&
4333 (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) { 4327 (flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
4334 allocated = ext4_ext_convert_initialized_extent( 4328 allocated = convert_initialized_extent(
4335 handle, inode, map, path, flags, 4329 handle, inode, map, &path,
4336 allocated, newblock); 4330 flags, allocated, newblock);
4337 goto out2; 4331 goto out2;
4338 } else if (!ext4_ext_is_unwritten(ex)) 4332 } else if (!ext4_ext_is_unwritten(ex))
4339 goto out; 4333 goto out;
4340 4334
4341 ret = ext4_ext_handle_unwritten_extents( 4335 ret = ext4_ext_handle_unwritten_extents(
4342 handle, inode, map, path, flags, 4336 handle, inode, map, &path, flags,
4343 allocated, newblock); 4337 allocated, newblock);
4344 if (ret < 0) 4338 if (ret < 0)
4345 err = ret; 4339 err = ret;
@@ -4376,7 +4370,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4376 4370
4377 /* 4371 /*
4378 * If we are doing bigalloc, check to see if the extent returned 4372 * If we are doing bigalloc, check to see if the extent returned
4379 * by ext4_ext_find_extent() implies a cluster we can use. 4373 * by ext4_find_extent() implies a cluster we can use.
4380 */ 4374 */
4381 if (cluster_offset && ex && 4375 if (cluster_offset && ex &&
4382 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) { 4376 get_implied_cluster_alloc(inode->i_sb, map, ex, path)) {
@@ -4451,6 +4445,8 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode,
4451 ar.flags = 0; 4445 ar.flags = 0;
4452 if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE) 4446 if (flags & EXT4_GET_BLOCKS_NO_NORMALIZE)
4453 ar.flags |= EXT4_MB_HINT_NOPREALLOC; 4447 ar.flags |= EXT4_MB_HINT_NOPREALLOC;
4448 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
4449 ar.flags |= EXT4_MB_DELALLOC_RESERVED;
4454 newblock = ext4_mb_new_blocks(handle, &ar, &err); 4450 newblock = ext4_mb_new_blocks(handle, &ar, &err);
4455 if (!newblock) 4451 if (!newblock)
4456 goto out2; 4452 goto out2;
@@ -4486,7 +4482,7 @@ got_allocated_blocks:
4486 err = check_eofblocks_fl(handle, inode, map->m_lblk, 4482 err = check_eofblocks_fl(handle, inode, map->m_lblk,
4487 path, ar.len); 4483 path, ar.len);
4488 if (!err) 4484 if (!err)
4489 err = ext4_ext_insert_extent(handle, inode, path, 4485 err = ext4_ext_insert_extent(handle, inode, &path,
4490 &newex, flags); 4486 &newex, flags);
4491 4487
4492 if (!err && set_unwritten) { 4488 if (!err && set_unwritten) {
@@ -4619,10 +4615,8 @@ out:
4619 map->m_pblk = newblock; 4615 map->m_pblk = newblock;
4620 map->m_len = allocated; 4616 map->m_len = allocated;
4621out2: 4617out2:
4622 if (path) { 4618 ext4_ext_drop_refs(path);
4623 ext4_ext_drop_refs(path); 4619 kfree(path);
4624 kfree(path);
4625 }
4626 4620
4627 trace_ext4_ext_map_blocks_exit(inode, flags, map, 4621 trace_ext4_ext_map_blocks_exit(inode, flags, map,
4628 err ? err : allocated); 4622 err ? err : allocated);
@@ -4799,7 +4793,8 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4799 max_blocks -= lblk; 4793 max_blocks -= lblk;
4800 4794
4801 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT | 4795 flags = EXT4_GET_BLOCKS_CREATE_UNWRIT_EXT |
4802 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN; 4796 EXT4_GET_BLOCKS_CONVERT_UNWRITTEN |
4797 EXT4_EX_NOCACHE;
4803 if (mode & FALLOC_FL_KEEP_SIZE) 4798 if (mode & FALLOC_FL_KEEP_SIZE)
4804 flags |= EXT4_GET_BLOCKS_KEEP_SIZE; 4799 flags |= EXT4_GET_BLOCKS_KEEP_SIZE;
4805 4800
@@ -4837,15 +4832,21 @@ static long ext4_zero_range(struct file *file, loff_t offset,
4837 ext4_inode_block_unlocked_dio(inode); 4832 ext4_inode_block_unlocked_dio(inode);
4838 inode_dio_wait(inode); 4833 inode_dio_wait(inode);
4839 4834
4835 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
4836 flags, mode);
4837 if (ret)
4838 goto out_dio;
4840 /* 4839 /*
4841 * Remove entire range from the extent status tree. 4840 * Remove entire range from the extent status tree.
4841 *
4842 * ext4_es_remove_extent(inode, lblk, max_blocks) is
4843 * NOT sufficient. I'm not sure why this is the case,
4844 * but let's be conservative and remove the extent
4845 * status tree for the entire inode. There should be
4846 * no outstanding delalloc extents thanks to the
4847 * filemap_write_and_wait_range() call above.
4842 */ 4848 */
4843 ret = ext4_es_remove_extent(inode, lblk, max_blocks); 4849 ret = ext4_es_remove_extent(inode, 0, EXT_MAX_BLOCKS);
4844 if (ret)
4845 goto out_dio;
4846
4847 ret = ext4_alloc_file_blocks(file, lblk, max_blocks, new_size,
4848 flags, mode);
4849 if (ret) 4850 if (ret)
4850 goto out_dio; 4851 goto out_dio;
4851 } 4852 }
@@ -5304,36 +5305,31 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
5304 struct ext4_ext_path *path; 5305 struct ext4_ext_path *path;
5305 int ret = 0, depth; 5306 int ret = 0, depth;
5306 struct ext4_extent *extent; 5307 struct ext4_extent *extent;
5307 ext4_lblk_t stop_block, current_block; 5308 ext4_lblk_t stop_block;
5308 ext4_lblk_t ex_start, ex_end; 5309 ext4_lblk_t ex_start, ex_end;
5309 5310
5310 /* Let path point to the last extent */ 5311 /* Let path point to the last extent */
5311 path = ext4_ext_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0); 5312 path = ext4_find_extent(inode, EXT_MAX_BLOCKS - 1, NULL, 0);
5312 if (IS_ERR(path)) 5313 if (IS_ERR(path))
5313 return PTR_ERR(path); 5314 return PTR_ERR(path);
5314 5315
5315 depth = path->p_depth; 5316 depth = path->p_depth;
5316 extent = path[depth].p_ext; 5317 extent = path[depth].p_ext;
5317 if (!extent) { 5318 if (!extent)
5318 ext4_ext_drop_refs(path); 5319 goto out;
5319 kfree(path);
5320 return ret;
5321 }
5322 5320
5323 stop_block = le32_to_cpu(extent->ee_block) + 5321 stop_block = le32_to_cpu(extent->ee_block) +
5324 ext4_ext_get_actual_len(extent); 5322 ext4_ext_get_actual_len(extent);
5325 ext4_ext_drop_refs(path);
5326 kfree(path);
5327 5323
5328 /* Nothing to shift, if hole is at the end of file */ 5324 /* Nothing to shift, if hole is at the end of file */
5329 if (start >= stop_block) 5325 if (start >= stop_block)
5330 return ret; 5326 goto out;
5331 5327
5332 /* 5328 /*
5333 * Don't start shifting extents until we make sure the hole is big 5329 * Don't start shifting extents until we make sure the hole is big
5334 * enough to accomodate the shift. 5330 * enough to accomodate the shift.
5335 */ 5331 */
5336 path = ext4_ext_find_extent(inode, start - 1, NULL, 0); 5332 path = ext4_find_extent(inode, start - 1, &path, 0);
5337 if (IS_ERR(path)) 5333 if (IS_ERR(path))
5338 return PTR_ERR(path); 5334 return PTR_ERR(path);
5339 depth = path->p_depth; 5335 depth = path->p_depth;
@@ -5346,8 +5342,6 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
5346 ex_start = 0; 5342 ex_start = 0;
5347 ex_end = 0; 5343 ex_end = 0;
5348 } 5344 }
5349 ext4_ext_drop_refs(path);
5350 kfree(path);
5351 5345
5352 if ((start == ex_start && shift > ex_start) || 5346 if ((start == ex_start && shift > ex_start) ||
5353 (shift > start - ex_end)) 5347 (shift > start - ex_end))
@@ -5355,7 +5349,7 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
5355 5349
5356 /* Its safe to start updating extents */ 5350 /* Its safe to start updating extents */
5357 while (start < stop_block) { 5351 while (start < stop_block) {
5358 path = ext4_ext_find_extent(inode, start, NULL, 0); 5352 path = ext4_find_extent(inode, start, &path, 0);
5359 if (IS_ERR(path)) 5353 if (IS_ERR(path))
5360 return PTR_ERR(path); 5354 return PTR_ERR(path);
5361 depth = path->p_depth; 5355 depth = path->p_depth;
@@ -5365,27 +5359,23 @@ ext4_ext_shift_extents(struct inode *inode, handle_t *handle,
5365 (unsigned long) start); 5359 (unsigned long) start);
5366 return -EIO; 5360 return -EIO;
5367 } 5361 }
5368 5362 if (start > le32_to_cpu(extent->ee_block)) {
5369 current_block = le32_to_cpu(extent->ee_block);
5370 if (start > current_block) {
5371 /* Hole, move to the next extent */ 5363 /* Hole, move to the next extent */
5372 ret = mext_next_extent(inode, path, &extent); 5364 if (extent < EXT_LAST_EXTENT(path[depth].p_hdr)) {
5373 if (ret != 0) { 5365 path[depth].p_ext++;
5374 ext4_ext_drop_refs(path); 5366 } else {
5375 kfree(path); 5367 start = ext4_ext_next_allocated_block(path);
5376 if (ret == 1) 5368 continue;
5377 ret = 0;
5378 break;
5379 } 5369 }
5380 } 5370 }
5381 ret = ext4_ext_shift_path_extents(path, shift, inode, 5371 ret = ext4_ext_shift_path_extents(path, shift, inode,
5382 handle, &start); 5372 handle, &start);
5383 ext4_ext_drop_refs(path);
5384 kfree(path);
5385 if (ret) 5373 if (ret)
5386 break; 5374 break;
5387 } 5375 }
5388 5376out:
5377 ext4_ext_drop_refs(path);
5378 kfree(path);
5389 return ret; 5379 return ret;
5390} 5380}
5391 5381
@@ -5508,3 +5498,199 @@ out_mutex:
5508 mutex_unlock(&inode->i_mutex); 5498 mutex_unlock(&inode->i_mutex);
5509 return ret; 5499 return ret;
5510} 5500}
5501
5502/**
5503 * ext4_swap_extents - Swap extents between two inodes
5504 *
5505 * @inode1: First inode
5506 * @inode2: Second inode
5507 * @lblk1: Start block for first inode
5508 * @lblk2: Start block for second inode
5509 * @count: Number of blocks to swap
5510 * @mark_unwritten: Mark second inode's extents as unwritten after swap
5511 * @erp: Pointer to save error value
5512 *
5513 * This helper routine does exactly what is promise "swap extents". All other
5514 * stuff such as page-cache locking consistency, bh mapping consistency or
5515 * extent's data copying must be performed by caller.
5516 * Locking:
5517 * i_mutex is held for both inodes
5518 * i_data_sem is locked for write for both inodes
5519 * Assumptions:
5520 * All pages from requested range are locked for both inodes
5521 */
5522int
5523ext4_swap_extents(handle_t *handle, struct inode *inode1,
5524 struct inode *inode2, ext4_lblk_t lblk1, ext4_lblk_t lblk2,
5525 ext4_lblk_t count, int unwritten, int *erp)
5526{
5527 struct ext4_ext_path *path1 = NULL;
5528 struct ext4_ext_path *path2 = NULL;
5529 int replaced_count = 0;
5530
5531 BUG_ON(!rwsem_is_locked(&EXT4_I(inode1)->i_data_sem));
5532 BUG_ON(!rwsem_is_locked(&EXT4_I(inode2)->i_data_sem));
5533 BUG_ON(!mutex_is_locked(&inode1->i_mutex));
5534 BUG_ON(!mutex_is_locked(&inode1->i_mutex));
5535
5536 *erp = ext4_es_remove_extent(inode1, lblk1, count);
5537 if (unlikely(*erp))
5538 return 0;
5539 *erp = ext4_es_remove_extent(inode2, lblk2, count);
5540 if (unlikely(*erp))
5541 return 0;
5542
5543 while (count) {
5544 struct ext4_extent *ex1, *ex2, tmp_ex;
5545 ext4_lblk_t e1_blk, e2_blk;
5546 int e1_len, e2_len, len;
5547 int split = 0;
5548
5549 path1 = ext4_find_extent(inode1, lblk1, NULL, EXT4_EX_NOCACHE);
5550 if (unlikely(IS_ERR(path1))) {
5551 *erp = PTR_ERR(path1);
5552 path1 = NULL;
5553 finish:
5554 count = 0;
5555 goto repeat;
5556 }
5557 path2 = ext4_find_extent(inode2, lblk2, NULL, EXT4_EX_NOCACHE);
5558 if (unlikely(IS_ERR(path2))) {
5559 *erp = PTR_ERR(path2);
5560 path2 = NULL;
5561 goto finish;
5562 }
5563 ex1 = path1[path1->p_depth].p_ext;
5564 ex2 = path2[path2->p_depth].p_ext;
5565 /* Do we have somthing to swap ? */
5566 if (unlikely(!ex2 || !ex1))
5567 goto finish;
5568
5569 e1_blk = le32_to_cpu(ex1->ee_block);
5570 e2_blk = le32_to_cpu(ex2->ee_block);
5571 e1_len = ext4_ext_get_actual_len(ex1);
5572 e2_len = ext4_ext_get_actual_len(ex2);
5573
5574 /* Hole handling */
5575 if (!in_range(lblk1, e1_blk, e1_len) ||
5576 !in_range(lblk2, e2_blk, e2_len)) {
5577 ext4_lblk_t next1, next2;
5578
5579 /* if hole after extent, then go to next extent */
5580 next1 = ext4_ext_next_allocated_block(path1);
5581 next2 = ext4_ext_next_allocated_block(path2);
5582 /* If hole before extent, then shift to that extent */
5583 if (e1_blk > lblk1)
5584 next1 = e1_blk;
5585 if (e2_blk > lblk2)
5586 next2 = e1_blk;
5587 /* Do we have something to swap */
5588 if (next1 == EXT_MAX_BLOCKS || next2 == EXT_MAX_BLOCKS)
5589 goto finish;
5590 /* Move to the rightest boundary */
5591 len = next1 - lblk1;
5592 if (len < next2 - lblk2)
5593 len = next2 - lblk2;
5594 if (len > count)
5595 len = count;
5596 lblk1 += len;
5597 lblk2 += len;
5598 count -= len;
5599 goto repeat;
5600 }
5601
5602 /* Prepare left boundary */
5603 if (e1_blk < lblk1) {
5604 split = 1;
5605 *erp = ext4_force_split_extent_at(handle, inode1,
5606 &path1, lblk1, 0);
5607 if (unlikely(*erp))
5608 goto finish;
5609 }
5610 if (e2_blk < lblk2) {
5611 split = 1;
5612 *erp = ext4_force_split_extent_at(handle, inode2,
5613 &path2, lblk2, 0);
5614 if (unlikely(*erp))
5615 goto finish;
5616 }
5617 /* ext4_split_extent_at() may result in leaf extent split,
5618 * path must to be revalidated. */
5619 if (split)
5620 goto repeat;
5621
5622 /* Prepare right boundary */
5623 len = count;
5624 if (len > e1_blk + e1_len - lblk1)
5625 len = e1_blk + e1_len - lblk1;
5626 if (len > e2_blk + e2_len - lblk2)
5627 len = e2_blk + e2_len - lblk2;
5628
5629 if (len != e1_len) {
5630 split = 1;
5631 *erp = ext4_force_split_extent_at(handle, inode1,
5632 &path1, lblk1 + len, 0);
5633 if (unlikely(*erp))
5634 goto finish;
5635 }
5636 if (len != e2_len) {
5637 split = 1;
5638 *erp = ext4_force_split_extent_at(handle, inode2,
5639 &path2, lblk2 + len, 0);
5640 if (*erp)
5641 goto finish;
5642 }
5643 /* ext4_split_extent_at() may result in leaf extent split,
5644 * path must to be revalidated. */
5645 if (split)
5646 goto repeat;
5647
5648 BUG_ON(e2_len != e1_len);
5649 *erp = ext4_ext_get_access(handle, inode1, path1 + path1->p_depth);
5650 if (unlikely(*erp))
5651 goto finish;
5652 *erp = ext4_ext_get_access(handle, inode2, path2 + path2->p_depth);
5653 if (unlikely(*erp))
5654 goto finish;
5655
5656 /* Both extents are fully inside boundaries. Swap it now */
5657 tmp_ex = *ex1;
5658 ext4_ext_store_pblock(ex1, ext4_ext_pblock(ex2));
5659 ext4_ext_store_pblock(ex2, ext4_ext_pblock(&tmp_ex));
5660 ex1->ee_len = cpu_to_le16(e2_len);
5661 ex2->ee_len = cpu_to_le16(e1_len);
5662 if (unwritten)
5663 ext4_ext_mark_unwritten(ex2);
5664 if (ext4_ext_is_unwritten(&tmp_ex))
5665 ext4_ext_mark_unwritten(ex1);
5666
5667 ext4_ext_try_to_merge(handle, inode2, path2, ex2);
5668 ext4_ext_try_to_merge(handle, inode1, path1, ex1);
5669 *erp = ext4_ext_dirty(handle, inode2, path2 +
5670 path2->p_depth);
5671 if (unlikely(*erp))
5672 goto finish;
5673 *erp = ext4_ext_dirty(handle, inode1, path1 +
5674 path1->p_depth);
5675 /*
5676 * Looks scarry ah..? second inode already points to new blocks,
5677 * and it was successfully dirtied. But luckily error may happen
5678 * only due to journal error, so full transaction will be
5679 * aborted anyway.
5680 */
5681 if (unlikely(*erp))
5682 goto finish;
5683 lblk1 += len;
5684 lblk2 += len;
5685 replaced_count += len;
5686 count -= len;
5687
5688 repeat:
5689 ext4_ext_drop_refs(path1);
5690 kfree(path1);
5691 ext4_ext_drop_refs(path2);
5692 kfree(path2);
5693 path1 = path2 = NULL;
5694 }
5695 return replaced_count;
5696}
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index 0b7e28e7eaa4..94e7855ae71b 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -11,6 +11,8 @@
11 */ 11 */
12#include <linux/rbtree.h> 12#include <linux/rbtree.h>
13#include <linux/list_sort.h> 13#include <linux/list_sort.h>
14#include <linux/proc_fs.h>
15#include <linux/seq_file.h>
14#include "ext4.h" 16#include "ext4.h"
15#include "extents_status.h" 17#include "extents_status.h"
16 18
@@ -313,19 +315,27 @@ ext4_es_alloc_extent(struct inode *inode, ext4_lblk_t lblk, ext4_lblk_t len,
313 */ 315 */
314 if (!ext4_es_is_delayed(es)) { 316 if (!ext4_es_is_delayed(es)) {
315 EXT4_I(inode)->i_es_lru_nr++; 317 EXT4_I(inode)->i_es_lru_nr++;
316 percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt); 318 percpu_counter_inc(&EXT4_SB(inode->i_sb)->
319 s_es_stats.es_stats_lru_cnt);
317 } 320 }
318 321
322 EXT4_I(inode)->i_es_all_nr++;
323 percpu_counter_inc(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
324
319 return es; 325 return es;
320} 326}
321 327
322static void ext4_es_free_extent(struct inode *inode, struct extent_status *es) 328static void ext4_es_free_extent(struct inode *inode, struct extent_status *es)
323{ 329{
330 EXT4_I(inode)->i_es_all_nr--;
331 percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_es_stats.es_stats_all_cnt);
332
324 /* Decrease the lru counter when this es is not delayed */ 333 /* Decrease the lru counter when this es is not delayed */
325 if (!ext4_es_is_delayed(es)) { 334 if (!ext4_es_is_delayed(es)) {
326 BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0); 335 BUG_ON(EXT4_I(inode)->i_es_lru_nr == 0);
327 EXT4_I(inode)->i_es_lru_nr--; 336 EXT4_I(inode)->i_es_lru_nr--;
328 percpu_counter_dec(&EXT4_SB(inode->i_sb)->s_extent_cache_cnt); 337 percpu_counter_dec(&EXT4_SB(inode->i_sb)->
338 s_es_stats.es_stats_lru_cnt);
329 } 339 }
330 340
331 kmem_cache_free(ext4_es_cachep, es); 341 kmem_cache_free(ext4_es_cachep, es);
@@ -426,7 +436,7 @@ static void ext4_es_insert_extent_ext_check(struct inode *inode,
426 unsigned short ee_len; 436 unsigned short ee_len;
427 int depth, ee_status, es_status; 437 int depth, ee_status, es_status;
428 438
429 path = ext4_ext_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE); 439 path = ext4_find_extent(inode, es->es_lblk, NULL, EXT4_EX_NOCACHE);
430 if (IS_ERR(path)) 440 if (IS_ERR(path))
431 return; 441 return;
432 442
@@ -499,10 +509,8 @@ static void ext4_es_insert_extent_ext_check(struct inode *inode,
499 } 509 }
500 } 510 }
501out: 511out:
502 if (path) { 512 ext4_ext_drop_refs(path);
503 ext4_ext_drop_refs(path); 513 kfree(path);
504 kfree(path);
505 }
506} 514}
507 515
508static void ext4_es_insert_extent_ind_check(struct inode *inode, 516static void ext4_es_insert_extent_ind_check(struct inode *inode,
@@ -731,6 +739,7 @@ int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
731 struct extent_status *es) 739 struct extent_status *es)
732{ 740{
733 struct ext4_es_tree *tree; 741 struct ext4_es_tree *tree;
742 struct ext4_es_stats *stats;
734 struct extent_status *es1 = NULL; 743 struct extent_status *es1 = NULL;
735 struct rb_node *node; 744 struct rb_node *node;
736 int found = 0; 745 int found = 0;
@@ -767,11 +776,15 @@ int ext4_es_lookup_extent(struct inode *inode, ext4_lblk_t lblk,
767 } 776 }
768 777
769out: 778out:
779 stats = &EXT4_SB(inode->i_sb)->s_es_stats;
770 if (found) { 780 if (found) {
771 BUG_ON(!es1); 781 BUG_ON(!es1);
772 es->es_lblk = es1->es_lblk; 782 es->es_lblk = es1->es_lblk;
773 es->es_len = es1->es_len; 783 es->es_len = es1->es_len;
774 es->es_pblk = es1->es_pblk; 784 es->es_pblk = es1->es_pblk;
785 stats->es_stats_cache_hits++;
786 } else {
787 stats->es_stats_cache_misses++;
775 } 788 }
776 789
777 read_unlock(&EXT4_I(inode)->i_es_lock); 790 read_unlock(&EXT4_I(inode)->i_es_lock);
@@ -933,11 +946,16 @@ static int __ext4_es_shrink(struct ext4_sb_info *sbi, int nr_to_scan,
933 struct ext4_inode_info *locked_ei) 946 struct ext4_inode_info *locked_ei)
934{ 947{
935 struct ext4_inode_info *ei; 948 struct ext4_inode_info *ei;
949 struct ext4_es_stats *es_stats;
936 struct list_head *cur, *tmp; 950 struct list_head *cur, *tmp;
937 LIST_HEAD(skipped); 951 LIST_HEAD(skipped);
952 ktime_t start_time;
953 u64 scan_time;
938 int nr_shrunk = 0; 954 int nr_shrunk = 0;
939 int retried = 0, skip_precached = 1, nr_skipped = 0; 955 int retried = 0, skip_precached = 1, nr_skipped = 0;
940 956
957 es_stats = &sbi->s_es_stats;
958 start_time = ktime_get();
941 spin_lock(&sbi->s_es_lru_lock); 959 spin_lock(&sbi->s_es_lru_lock);
942 960
943retry: 961retry:
@@ -948,7 +966,8 @@ retry:
948 * If we have already reclaimed all extents from extent 966 * If we have already reclaimed all extents from extent
949 * status tree, just stop the loop immediately. 967 * status tree, just stop the loop immediately.
950 */ 968 */
951 if (percpu_counter_read_positive(&sbi->s_extent_cache_cnt) == 0) 969 if (percpu_counter_read_positive(
970 &es_stats->es_stats_lru_cnt) == 0)
952 break; 971 break;
953 972
954 ei = list_entry(cur, struct ext4_inode_info, i_es_lru); 973 ei = list_entry(cur, struct ext4_inode_info, i_es_lru);
@@ -958,7 +977,7 @@ retry:
958 * time. Normally we try hard to avoid shrinking 977 * time. Normally we try hard to avoid shrinking
959 * precached inodes, but we will as a last resort. 978 * precached inodes, but we will as a last resort.
960 */ 979 */
961 if ((sbi->s_es_last_sorted < ei->i_touch_when) || 980 if ((es_stats->es_stats_last_sorted < ei->i_touch_when) ||
962 (skip_precached && ext4_test_inode_state(&ei->vfs_inode, 981 (skip_precached && ext4_test_inode_state(&ei->vfs_inode,
963 EXT4_STATE_EXT_PRECACHED))) { 982 EXT4_STATE_EXT_PRECACHED))) {
964 nr_skipped++; 983 nr_skipped++;
@@ -992,7 +1011,7 @@ retry:
992 if ((nr_shrunk == 0) && nr_skipped && !retried) { 1011 if ((nr_shrunk == 0) && nr_skipped && !retried) {
993 retried++; 1012 retried++;
994 list_sort(NULL, &sbi->s_es_lru, ext4_inode_touch_time_cmp); 1013 list_sort(NULL, &sbi->s_es_lru, ext4_inode_touch_time_cmp);
995 sbi->s_es_last_sorted = jiffies; 1014 es_stats->es_stats_last_sorted = jiffies;
996 ei = list_first_entry(&sbi->s_es_lru, struct ext4_inode_info, 1015 ei = list_first_entry(&sbi->s_es_lru, struct ext4_inode_info,
997 i_es_lru); 1016 i_es_lru);
998 /* 1017 /*
@@ -1010,6 +1029,22 @@ retry:
1010 if (locked_ei && nr_shrunk == 0) 1029 if (locked_ei && nr_shrunk == 0)
1011 nr_shrunk = __es_try_to_reclaim_extents(locked_ei, nr_to_scan); 1030 nr_shrunk = __es_try_to_reclaim_extents(locked_ei, nr_to_scan);
1012 1031
1032 scan_time = ktime_to_ns(ktime_sub(ktime_get(), start_time));
1033 if (likely(es_stats->es_stats_scan_time))
1034 es_stats->es_stats_scan_time = (scan_time +
1035 es_stats->es_stats_scan_time*3) / 4;
1036 else
1037 es_stats->es_stats_scan_time = scan_time;
1038 if (scan_time > es_stats->es_stats_max_scan_time)
1039 es_stats->es_stats_max_scan_time = scan_time;
1040 if (likely(es_stats->es_stats_shrunk))
1041 es_stats->es_stats_shrunk = (nr_shrunk +
1042 es_stats->es_stats_shrunk*3) / 4;
1043 else
1044 es_stats->es_stats_shrunk = nr_shrunk;
1045
1046 trace_ext4_es_shrink(sbi->s_sb, nr_shrunk, scan_time, skip_precached,
1047 nr_skipped, retried);
1013 return nr_shrunk; 1048 return nr_shrunk;
1014} 1049}
1015 1050
@@ -1020,8 +1055,8 @@ static unsigned long ext4_es_count(struct shrinker *shrink,
1020 struct ext4_sb_info *sbi; 1055 struct ext4_sb_info *sbi;
1021 1056
1022 sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker); 1057 sbi = container_of(shrink, struct ext4_sb_info, s_es_shrinker);
1023 nr = percpu_counter_read_positive(&sbi->s_extent_cache_cnt); 1058 nr = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_lru_cnt);
1024 trace_ext4_es_shrink_enter(sbi->s_sb, sc->nr_to_scan, nr); 1059 trace_ext4_es_shrink_count(sbi->s_sb, sc->nr_to_scan, nr);
1025 return nr; 1060 return nr;
1026} 1061}
1027 1062
@@ -1033,31 +1068,160 @@ static unsigned long ext4_es_scan(struct shrinker *shrink,
1033 int nr_to_scan = sc->nr_to_scan; 1068 int nr_to_scan = sc->nr_to_scan;
1034 int ret, nr_shrunk; 1069 int ret, nr_shrunk;
1035 1070
1036 ret = percpu_counter_read_positive(&sbi->s_extent_cache_cnt); 1071 ret = percpu_counter_read_positive(&sbi->s_es_stats.es_stats_lru_cnt);
1037 trace_ext4_es_shrink_enter(sbi->s_sb, nr_to_scan, ret); 1072 trace_ext4_es_shrink_scan_enter(sbi->s_sb, nr_to_scan, ret);
1038 1073
1039 if (!nr_to_scan) 1074 if (!nr_to_scan)
1040 return ret; 1075 return ret;
1041 1076
1042 nr_shrunk = __ext4_es_shrink(sbi, nr_to_scan, NULL); 1077 nr_shrunk = __ext4_es_shrink(sbi, nr_to_scan, NULL);
1043 1078
1044 trace_ext4_es_shrink_exit(sbi->s_sb, nr_shrunk, ret); 1079 trace_ext4_es_shrink_scan_exit(sbi->s_sb, nr_shrunk, ret);
1045 return nr_shrunk; 1080 return nr_shrunk;
1046} 1081}
1047 1082
1048void ext4_es_register_shrinker(struct ext4_sb_info *sbi) 1083static void *ext4_es_seq_shrinker_info_start(struct seq_file *seq, loff_t *pos)
1049{ 1084{
1085 return *pos ? NULL : SEQ_START_TOKEN;
1086}
1087
1088static void *
1089ext4_es_seq_shrinker_info_next(struct seq_file *seq, void *v, loff_t *pos)
1090{
1091 return NULL;
1092}
1093
1094static int ext4_es_seq_shrinker_info_show(struct seq_file *seq, void *v)
1095{
1096 struct ext4_sb_info *sbi = seq->private;
1097 struct ext4_es_stats *es_stats = &sbi->s_es_stats;
1098 struct ext4_inode_info *ei, *max = NULL;
1099 unsigned int inode_cnt = 0;
1100
1101 if (v != SEQ_START_TOKEN)
1102 return 0;
1103
1104 /* here we just find an inode that has the max nr. of objects */
1105 spin_lock(&sbi->s_es_lru_lock);
1106 list_for_each_entry(ei, &sbi->s_es_lru, i_es_lru) {
1107 inode_cnt++;
1108 if (max && max->i_es_all_nr < ei->i_es_all_nr)
1109 max = ei;
1110 else if (!max)
1111 max = ei;
1112 }
1113 spin_unlock(&sbi->s_es_lru_lock);
1114
1115 seq_printf(seq, "stats:\n %lld objects\n %lld reclaimable objects\n",
1116 percpu_counter_sum_positive(&es_stats->es_stats_all_cnt),
1117 percpu_counter_sum_positive(&es_stats->es_stats_lru_cnt));
1118 seq_printf(seq, " %lu/%lu cache hits/misses\n",
1119 es_stats->es_stats_cache_hits,
1120 es_stats->es_stats_cache_misses);
1121 if (es_stats->es_stats_last_sorted != 0)
1122 seq_printf(seq, " %u ms last sorted interval\n",
1123 jiffies_to_msecs(jiffies -
1124 es_stats->es_stats_last_sorted));
1125 if (inode_cnt)
1126 seq_printf(seq, " %d inodes on lru list\n", inode_cnt);
1127
1128 seq_printf(seq, "average:\n %llu us scan time\n",
1129 div_u64(es_stats->es_stats_scan_time, 1000));
1130 seq_printf(seq, " %lu shrunk objects\n", es_stats->es_stats_shrunk);
1131 if (inode_cnt)
1132 seq_printf(seq,
1133 "maximum:\n %lu inode (%u objects, %u reclaimable)\n"
1134 " %llu us max scan time\n",
1135 max->vfs_inode.i_ino, max->i_es_all_nr, max->i_es_lru_nr,
1136 div_u64(es_stats->es_stats_max_scan_time, 1000));
1137
1138 return 0;
1139}
1140
1141static void ext4_es_seq_shrinker_info_stop(struct seq_file *seq, void *v)
1142{
1143}
1144
1145static const struct seq_operations ext4_es_seq_shrinker_info_ops = {
1146 .start = ext4_es_seq_shrinker_info_start,
1147 .next = ext4_es_seq_shrinker_info_next,
1148 .stop = ext4_es_seq_shrinker_info_stop,
1149 .show = ext4_es_seq_shrinker_info_show,
1150};
1151
1152static int
1153ext4_es_seq_shrinker_info_open(struct inode *inode, struct file *file)
1154{
1155 int ret;
1156
1157 ret = seq_open(file, &ext4_es_seq_shrinker_info_ops);
1158 if (!ret) {
1159 struct seq_file *m = file->private_data;
1160 m->private = PDE_DATA(inode);
1161 }
1162
1163 return ret;
1164}
1165
1166static int
1167ext4_es_seq_shrinker_info_release(struct inode *inode, struct file *file)
1168{
1169 return seq_release(inode, file);
1170}
1171
1172static const struct file_operations ext4_es_seq_shrinker_info_fops = {
1173 .owner = THIS_MODULE,
1174 .open = ext4_es_seq_shrinker_info_open,
1175 .read = seq_read,
1176 .llseek = seq_lseek,
1177 .release = ext4_es_seq_shrinker_info_release,
1178};
1179
1180int ext4_es_register_shrinker(struct ext4_sb_info *sbi)
1181{
1182 int err;
1183
1050 INIT_LIST_HEAD(&sbi->s_es_lru); 1184 INIT_LIST_HEAD(&sbi->s_es_lru);
1051 spin_lock_init(&sbi->s_es_lru_lock); 1185 spin_lock_init(&sbi->s_es_lru_lock);
1052 sbi->s_es_last_sorted = 0; 1186 sbi->s_es_stats.es_stats_last_sorted = 0;
1187 sbi->s_es_stats.es_stats_shrunk = 0;
1188 sbi->s_es_stats.es_stats_cache_hits = 0;
1189 sbi->s_es_stats.es_stats_cache_misses = 0;
1190 sbi->s_es_stats.es_stats_scan_time = 0;
1191 sbi->s_es_stats.es_stats_max_scan_time = 0;
1192 err = percpu_counter_init(&sbi->s_es_stats.es_stats_all_cnt, 0, GFP_KERNEL);
1193 if (err)
1194 return err;
1195 err = percpu_counter_init(&sbi->s_es_stats.es_stats_lru_cnt, 0, GFP_KERNEL);
1196 if (err)
1197 goto err1;
1198
1053 sbi->s_es_shrinker.scan_objects = ext4_es_scan; 1199 sbi->s_es_shrinker.scan_objects = ext4_es_scan;
1054 sbi->s_es_shrinker.count_objects = ext4_es_count; 1200 sbi->s_es_shrinker.count_objects = ext4_es_count;
1055 sbi->s_es_shrinker.seeks = DEFAULT_SEEKS; 1201 sbi->s_es_shrinker.seeks = DEFAULT_SEEKS;
1056 register_shrinker(&sbi->s_es_shrinker); 1202 err = register_shrinker(&sbi->s_es_shrinker);
1203 if (err)
1204 goto err2;
1205
1206 if (sbi->s_proc)
1207 proc_create_data("es_shrinker_info", S_IRUGO, sbi->s_proc,
1208 &ext4_es_seq_shrinker_info_fops, sbi);
1209
1210 return 0;
1211
1212err2:
1213 percpu_counter_destroy(&sbi->s_es_stats.es_stats_lru_cnt);
1214err1:
1215 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
1216 return err;
1057} 1217}
1058 1218
1059void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi) 1219void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi)
1060{ 1220{
1221 if (sbi->s_proc)
1222 remove_proc_entry("es_shrinker_info", sbi->s_proc);
1223 percpu_counter_destroy(&sbi->s_es_stats.es_stats_all_cnt);
1224 percpu_counter_destroy(&sbi->s_es_stats.es_stats_lru_cnt);
1061 unregister_shrinker(&sbi->s_es_shrinker); 1225 unregister_shrinker(&sbi->s_es_shrinker);
1062} 1226}
1063 1227
diff --git a/fs/ext4/extents_status.h b/fs/ext4/extents_status.h
index f1b62a419920..efd5f970b501 100644
--- a/fs/ext4/extents_status.h
+++ b/fs/ext4/extents_status.h
@@ -64,6 +64,17 @@ struct ext4_es_tree {
64 struct extent_status *cache_es; /* recently accessed extent */ 64 struct extent_status *cache_es; /* recently accessed extent */
65}; 65};
66 66
67struct ext4_es_stats {
68 unsigned long es_stats_last_sorted;
69 unsigned long es_stats_shrunk;
70 unsigned long es_stats_cache_hits;
71 unsigned long es_stats_cache_misses;
72 u64 es_stats_scan_time;
73 u64 es_stats_max_scan_time;
74 struct percpu_counter es_stats_all_cnt;
75 struct percpu_counter es_stats_lru_cnt;
76};
77
67extern int __init ext4_init_es(void); 78extern int __init ext4_init_es(void);
68extern void ext4_exit_es(void); 79extern void ext4_exit_es(void);
69extern void ext4_es_init_tree(struct ext4_es_tree *tree); 80extern void ext4_es_init_tree(struct ext4_es_tree *tree);
@@ -138,7 +149,7 @@ static inline void ext4_es_store_pblock_status(struct extent_status *es,
138 (pb & ~ES_MASK)); 149 (pb & ~ES_MASK));
139} 150}
140 151
141extern void ext4_es_register_shrinker(struct ext4_sb_info *sbi); 152extern int ext4_es_register_shrinker(struct ext4_sb_info *sbi);
142extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi); 153extern void ext4_es_unregister_shrinker(struct ext4_sb_info *sbi);
143extern void ext4_es_lru_add(struct inode *inode); 154extern void ext4_es_lru_add(struct inode *inode);
144extern void ext4_es_lru_del(struct inode *inode); 155extern void ext4_es_lru_del(struct inode *inode);
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index aca7b24a4432..8131be8c0af3 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -137,10 +137,10 @@ ext4_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
137 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - pos); 137 iov_iter_truncate(from, sbi->s_bitmap_maxbytes - pos);
138 } 138 }
139 139
140 iocb->private = &overwrite;
140 if (o_direct) { 141 if (o_direct) {
141 blk_start_plug(&plug); 142 blk_start_plug(&plug);
142 143
143 iocb->private = &overwrite;
144 144
145 /* check whether we do a DIO overwrite or not */ 145 /* check whether we do a DIO overwrite or not */
146 if (ext4_should_dioread_nolock(inode) && !aio_mutex && 146 if (ext4_should_dioread_nolock(inode) && !aio_mutex &&
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 5b87fc36aab8..ac644c31ca67 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -887,6 +887,10 @@ got:
887 struct buffer_head *block_bitmap_bh; 887 struct buffer_head *block_bitmap_bh;
888 888
889 block_bitmap_bh = ext4_read_block_bitmap(sb, group); 889 block_bitmap_bh = ext4_read_block_bitmap(sb, group);
890 if (!block_bitmap_bh) {
891 err = -EIO;
892 goto out;
893 }
890 BUFFER_TRACE(block_bitmap_bh, "get block bitmap access"); 894 BUFFER_TRACE(block_bitmap_bh, "get block bitmap access");
891 err = ext4_journal_get_write_access(handle, block_bitmap_bh); 895 err = ext4_journal_get_write_access(handle, block_bitmap_bh);
892 if (err) { 896 if (err) {
@@ -1011,8 +1015,7 @@ got:
1011 spin_unlock(&sbi->s_next_gen_lock); 1015 spin_unlock(&sbi->s_next_gen_lock);
1012 1016
1013 /* Precompute checksum seed for inode metadata */ 1017 /* Precompute checksum seed for inode metadata */
1014 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 1018 if (ext4_has_metadata_csum(sb)) {
1015 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
1016 __u32 csum; 1019 __u32 csum;
1017 __le32 inum = cpu_to_le32(inode->i_ino); 1020 __le32 inum = cpu_to_le32(inode->i_ino);
1018 __le32 gen = cpu_to_le32(inode->i_generation); 1021 __le32 gen = cpu_to_le32(inode->i_generation);
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index e75f840000a0..36b369697a13 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -318,34 +318,24 @@ static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned int blks,
318 * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain 318 * ext4_alloc_block() (normally -ENOSPC). Otherwise we set the chain
319 * as described above and return 0. 319 * as described above and return 0.
320 */ 320 */
321static int ext4_alloc_branch(handle_t *handle, struct inode *inode, 321static int ext4_alloc_branch(handle_t *handle,
322 ext4_lblk_t iblock, int indirect_blks, 322 struct ext4_allocation_request *ar,
323 int *blks, ext4_fsblk_t goal, 323 int indirect_blks, ext4_lblk_t *offsets,
324 ext4_lblk_t *offsets, Indirect *branch) 324 Indirect *branch)
325{ 325{
326 struct ext4_allocation_request ar;
327 struct buffer_head * bh; 326 struct buffer_head * bh;
328 ext4_fsblk_t b, new_blocks[4]; 327 ext4_fsblk_t b, new_blocks[4];
329 __le32 *p; 328 __le32 *p;
330 int i, j, err, len = 1; 329 int i, j, err, len = 1;
331 330
332 /*
333 * Set up for the direct block allocation
334 */
335 memset(&ar, 0, sizeof(ar));
336 ar.inode = inode;
337 ar.len = *blks;
338 ar.logical = iblock;
339 if (S_ISREG(inode->i_mode))
340 ar.flags = EXT4_MB_HINT_DATA;
341
342 for (i = 0; i <= indirect_blks; i++) { 331 for (i = 0; i <= indirect_blks; i++) {
343 if (i == indirect_blks) { 332 if (i == indirect_blks) {
344 ar.goal = goal; 333 new_blocks[i] = ext4_mb_new_blocks(handle, ar, &err);
345 new_blocks[i] = ext4_mb_new_blocks(handle, &ar, &err);
346 } else 334 } else
347 goal = new_blocks[i] = ext4_new_meta_blocks(handle, inode, 335 ar->goal = new_blocks[i] = ext4_new_meta_blocks(handle,
348 goal, 0, NULL, &err); 336 ar->inode, ar->goal,
337 ar->flags & EXT4_MB_DELALLOC_RESERVED,
338 NULL, &err);
349 if (err) { 339 if (err) {
350 i--; 340 i--;
351 goto failed; 341 goto failed;
@@ -354,7 +344,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
354 if (i == 0) 344 if (i == 0)
355 continue; 345 continue;
356 346
357 bh = branch[i].bh = sb_getblk(inode->i_sb, new_blocks[i-1]); 347 bh = branch[i].bh = sb_getblk(ar->inode->i_sb, new_blocks[i-1]);
358 if (unlikely(!bh)) { 348 if (unlikely(!bh)) {
359 err = -ENOMEM; 349 err = -ENOMEM;
360 goto failed; 350 goto failed;
@@ -372,7 +362,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
372 b = new_blocks[i]; 362 b = new_blocks[i];
373 363
374 if (i == indirect_blks) 364 if (i == indirect_blks)
375 len = ar.len; 365 len = ar->len;
376 for (j = 0; j < len; j++) 366 for (j = 0; j < len; j++)
377 *p++ = cpu_to_le32(b++); 367 *p++ = cpu_to_le32(b++);
378 368
@@ -381,11 +371,10 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
381 unlock_buffer(bh); 371 unlock_buffer(bh);
382 372
383 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 373 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
384 err = ext4_handle_dirty_metadata(handle, inode, bh); 374 err = ext4_handle_dirty_metadata(handle, ar->inode, bh);
385 if (err) 375 if (err)
386 goto failed; 376 goto failed;
387 } 377 }
388 *blks = ar.len;
389 return 0; 378 return 0;
390failed: 379failed:
391 for (; i >= 0; i--) { 380 for (; i >= 0; i--) {
@@ -396,10 +385,10 @@ failed:
396 * existing before ext4_alloc_branch() was called. 385 * existing before ext4_alloc_branch() was called.
397 */ 386 */
398 if (i > 0 && i != indirect_blks && branch[i].bh) 387 if (i > 0 && i != indirect_blks && branch[i].bh)
399 ext4_forget(handle, 1, inode, branch[i].bh, 388 ext4_forget(handle, 1, ar->inode, branch[i].bh,
400 branch[i].bh->b_blocknr); 389 branch[i].bh->b_blocknr);
401 ext4_free_blocks(handle, inode, NULL, new_blocks[i], 390 ext4_free_blocks(handle, ar->inode, NULL, new_blocks[i],
402 (i == indirect_blks) ? ar.len : 1, 0); 391 (i == indirect_blks) ? ar->len : 1, 0);
403 } 392 }
404 return err; 393 return err;
405} 394}
@@ -419,9 +408,9 @@ failed:
419 * inode (->i_blocks, etc.). In case of success we end up with the full 408 * inode (->i_blocks, etc.). In case of success we end up with the full
420 * chain to new block and return 0. 409 * chain to new block and return 0.
421 */ 410 */
422static int ext4_splice_branch(handle_t *handle, struct inode *inode, 411static int ext4_splice_branch(handle_t *handle,
423 ext4_lblk_t block, Indirect *where, int num, 412 struct ext4_allocation_request *ar,
424 int blks) 413 Indirect *where, int num)
425{ 414{
426 int i; 415 int i;
427 int err = 0; 416 int err = 0;
@@ -446,9 +435,9 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
446 * Update the host buffer_head or inode to point to more just allocated 435 * Update the host buffer_head or inode to point to more just allocated
447 * direct blocks blocks 436 * direct blocks blocks
448 */ 437 */
449 if (num == 0 && blks > 1) { 438 if (num == 0 && ar->len > 1) {
450 current_block = le32_to_cpu(where->key) + 1; 439 current_block = le32_to_cpu(where->key) + 1;
451 for (i = 1; i < blks; i++) 440 for (i = 1; i < ar->len; i++)
452 *(where->p + i) = cpu_to_le32(current_block++); 441 *(where->p + i) = cpu_to_le32(current_block++);
453 } 442 }
454 443
@@ -465,14 +454,14 @@ static int ext4_splice_branch(handle_t *handle, struct inode *inode,
465 */ 454 */
466 jbd_debug(5, "splicing indirect only\n"); 455 jbd_debug(5, "splicing indirect only\n");
467 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata"); 456 BUFFER_TRACE(where->bh, "call ext4_handle_dirty_metadata");
468 err = ext4_handle_dirty_metadata(handle, inode, where->bh); 457 err = ext4_handle_dirty_metadata(handle, ar->inode, where->bh);
469 if (err) 458 if (err)
470 goto err_out; 459 goto err_out;
471 } else { 460 } else {
472 /* 461 /*
473 * OK, we spliced it into the inode itself on a direct block. 462 * OK, we spliced it into the inode itself on a direct block.
474 */ 463 */
475 ext4_mark_inode_dirty(handle, inode); 464 ext4_mark_inode_dirty(handle, ar->inode);
476 jbd_debug(5, "splicing direct\n"); 465 jbd_debug(5, "splicing direct\n");
477 } 466 }
478 return err; 467 return err;
@@ -484,11 +473,11 @@ err_out:
484 * need to revoke the block, which is why we don't 473 * need to revoke the block, which is why we don't
485 * need to set EXT4_FREE_BLOCKS_METADATA. 474 * need to set EXT4_FREE_BLOCKS_METADATA.
486 */ 475 */
487 ext4_free_blocks(handle, inode, where[i].bh, 0, 1, 476 ext4_free_blocks(handle, ar->inode, where[i].bh, 0, 1,
488 EXT4_FREE_BLOCKS_FORGET); 477 EXT4_FREE_BLOCKS_FORGET);
489 } 478 }
490 ext4_free_blocks(handle, inode, NULL, le32_to_cpu(where[num].key), 479 ext4_free_blocks(handle, ar->inode, NULL, le32_to_cpu(where[num].key),
491 blks, 0); 480 ar->len, 0);
492 481
493 return err; 482 return err;
494} 483}
@@ -525,11 +514,11 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
525 struct ext4_map_blocks *map, 514 struct ext4_map_blocks *map,
526 int flags) 515 int flags)
527{ 516{
517 struct ext4_allocation_request ar;
528 int err = -EIO; 518 int err = -EIO;
529 ext4_lblk_t offsets[4]; 519 ext4_lblk_t offsets[4];
530 Indirect chain[4]; 520 Indirect chain[4];
531 Indirect *partial; 521 Indirect *partial;
532 ext4_fsblk_t goal;
533 int indirect_blks; 522 int indirect_blks;
534 int blocks_to_boundary = 0; 523 int blocks_to_boundary = 0;
535 int depth; 524 int depth;
@@ -579,7 +568,16 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
579 return -ENOSPC; 568 return -ENOSPC;
580 } 569 }
581 570
582 goal = ext4_find_goal(inode, map->m_lblk, partial); 571 /* Set up for the direct block allocation */
572 memset(&ar, 0, sizeof(ar));
573 ar.inode = inode;
574 ar.logical = map->m_lblk;
575 if (S_ISREG(inode->i_mode))
576 ar.flags = EXT4_MB_HINT_DATA;
577 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
578 ar.flags |= EXT4_MB_DELALLOC_RESERVED;
579
580 ar.goal = ext4_find_goal(inode, map->m_lblk, partial);
583 581
584 /* the number of blocks need to allocate for [d,t]indirect blocks */ 582 /* the number of blocks need to allocate for [d,t]indirect blocks */
585 indirect_blks = (chain + depth) - partial - 1; 583 indirect_blks = (chain + depth) - partial - 1;
@@ -588,13 +586,13 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
588 * Next look up the indirect map to count the totoal number of 586 * Next look up the indirect map to count the totoal number of
589 * direct blocks to allocate for this branch. 587 * direct blocks to allocate for this branch.
590 */ 588 */
591 count = ext4_blks_to_allocate(partial, indirect_blks, 589 ar.len = ext4_blks_to_allocate(partial, indirect_blks,
592 map->m_len, blocks_to_boundary); 590 map->m_len, blocks_to_boundary);
591
593 /* 592 /*
594 * Block out ext4_truncate while we alter the tree 593 * Block out ext4_truncate while we alter the tree
595 */ 594 */
596 err = ext4_alloc_branch(handle, inode, map->m_lblk, indirect_blks, 595 err = ext4_alloc_branch(handle, &ar, indirect_blks,
597 &count, goal,
598 offsets + (partial - chain), partial); 596 offsets + (partial - chain), partial);
599 597
600 /* 598 /*
@@ -605,14 +603,14 @@ int ext4_ind_map_blocks(handle_t *handle, struct inode *inode,
605 * may need to return -EAGAIN upwards in the worst case. --sct 603 * may need to return -EAGAIN upwards in the worst case. --sct
606 */ 604 */
607 if (!err) 605 if (!err)
608 err = ext4_splice_branch(handle, inode, map->m_lblk, 606 err = ext4_splice_branch(handle, &ar, partial, indirect_blks);
609 partial, indirect_blks, count);
610 if (err) 607 if (err)
611 goto cleanup; 608 goto cleanup;
612 609
613 map->m_flags |= EXT4_MAP_NEW; 610 map->m_flags |= EXT4_MAP_NEW;
614 611
615 ext4_update_inode_fsync_trans(handle, inode, 1); 612 ext4_update_inode_fsync_trans(handle, inode, 1);
613 count = ar.len;
616got_it: 614got_it:
617 map->m_flags |= EXT4_MAP_MAPPED; 615 map->m_flags |= EXT4_MAP_MAPPED;
618 map->m_pblk = le32_to_cpu(chain[depth-1].key); 616 map->m_pblk = le32_to_cpu(chain[depth-1].key);
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index bea662bd0ca6..3ea62695abce 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -594,6 +594,7 @@ retry:
594 if (ret) { 594 if (ret) {
595 unlock_page(page); 595 unlock_page(page);
596 page_cache_release(page); 596 page_cache_release(page);
597 page = NULL;
597 ext4_orphan_add(handle, inode); 598 ext4_orphan_add(handle, inode);
598 up_write(&EXT4_I(inode)->xattr_sem); 599 up_write(&EXT4_I(inode)->xattr_sem);
599 sem_held = 0; 600 sem_held = 0;
@@ -613,7 +614,8 @@ retry:
613 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries)) 614 if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
614 goto retry; 615 goto retry;
615 616
616 block_commit_write(page, from, to); 617 if (page)
618 block_commit_write(page, from, to);
617out: 619out:
618 if (page) { 620 if (page) {
619 unlock_page(page); 621 unlock_page(page);
@@ -1126,8 +1128,7 @@ static int ext4_finish_convert_inline_dir(handle_t *handle,
1126 memcpy((void *)de, buf + EXT4_INLINE_DOTDOT_SIZE, 1128 memcpy((void *)de, buf + EXT4_INLINE_DOTDOT_SIZE,
1127 inline_size - EXT4_INLINE_DOTDOT_SIZE); 1129 inline_size - EXT4_INLINE_DOTDOT_SIZE);
1128 1130
1129 if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 1131 if (ext4_has_metadata_csum(inode->i_sb))
1130 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
1131 csum_size = sizeof(struct ext4_dir_entry_tail); 1132 csum_size = sizeof(struct ext4_dir_entry_tail);
1132 1133
1133 inode->i_size = inode->i_sb->s_blocksize; 1134 inode->i_size = inode->i_sb->s_blocksize;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 3aa26e9117c4..3356ab5395f4 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -83,8 +83,7 @@ static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
83 83
84 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 84 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
85 cpu_to_le32(EXT4_OS_LINUX) || 85 cpu_to_le32(EXT4_OS_LINUX) ||
86 !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 86 !ext4_has_metadata_csum(inode->i_sb))
87 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
88 return 1; 87 return 1;
89 88
90 provided = le16_to_cpu(raw->i_checksum_lo); 89 provided = le16_to_cpu(raw->i_checksum_lo);
@@ -105,8 +104,7 @@ static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
105 104
106 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os != 105 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
107 cpu_to_le32(EXT4_OS_LINUX) || 106 cpu_to_le32(EXT4_OS_LINUX) ||
108 !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 107 !ext4_has_metadata_csum(inode->i_sb))
109 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
110 return; 108 return;
111 109
112 csum = ext4_inode_csum(inode, raw, ei); 110 csum = ext4_inode_csum(inode, raw, ei);
@@ -224,16 +222,15 @@ void ext4_evict_inode(struct inode *inode)
224 goto no_delete; 222 goto no_delete;
225 } 223 }
226 224
227 if (!is_bad_inode(inode)) 225 if (is_bad_inode(inode))
228 dquot_initialize(inode); 226 goto no_delete;
227 dquot_initialize(inode);
229 228
230 if (ext4_should_order_data(inode)) 229 if (ext4_should_order_data(inode))
231 ext4_begin_ordered_truncate(inode, 0); 230 ext4_begin_ordered_truncate(inode, 0);
232 truncate_inode_pages_final(&inode->i_data); 231 truncate_inode_pages_final(&inode->i_data);
233 232
234 WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count)); 233 WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count));
235 if (is_bad_inode(inode))
236 goto no_delete;
237 234
238 /* 235 /*
239 * Protect us against freezing - iput() caller didn't have to have any 236 * Protect us against freezing - iput() caller didn't have to have any
@@ -590,20 +587,12 @@ found:
590 /* 587 /*
591 * New blocks allocate and/or writing to unwritten extent 588 * New blocks allocate and/or writing to unwritten extent
592 * will possibly result in updating i_data, so we take 589 * will possibly result in updating i_data, so we take
593 * the write lock of i_data_sem, and call get_blocks() 590 * the write lock of i_data_sem, and call get_block()
594 * with create == 1 flag. 591 * with create == 1 flag.
595 */ 592 */
596 down_write(&EXT4_I(inode)->i_data_sem); 593 down_write(&EXT4_I(inode)->i_data_sem);
597 594
598 /* 595 /*
599 * if the caller is from delayed allocation writeout path
600 * we have already reserved fs blocks for allocation
601 * let the underlying get_block() function know to
602 * avoid double accounting
603 */
604 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
605 ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
606 /*
607 * We need to check for EXT4 here because migrate 596 * We need to check for EXT4 here because migrate
608 * could have changed the inode type in between 597 * could have changed the inode type in between
609 */ 598 */
@@ -631,8 +620,6 @@ found:
631 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)) 620 (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
632 ext4_da_update_reserve_space(inode, retval, 1); 621 ext4_da_update_reserve_space(inode, retval, 1);
633 } 622 }
634 if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
635 ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
636 623
637 if (retval > 0) { 624 if (retval > 0) {
638 unsigned int status; 625 unsigned int status;
@@ -734,11 +721,11 @@ int ext4_get_block(struct inode *inode, sector_t iblock,
734 * `handle' can be NULL if create is zero 721 * `handle' can be NULL if create is zero
735 */ 722 */
736struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode, 723struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
737 ext4_lblk_t block, int create, int *errp) 724 ext4_lblk_t block, int create)
738{ 725{
739 struct ext4_map_blocks map; 726 struct ext4_map_blocks map;
740 struct buffer_head *bh; 727 struct buffer_head *bh;
741 int fatal = 0, err; 728 int err;
742 729
743 J_ASSERT(handle != NULL || create == 0); 730 J_ASSERT(handle != NULL || create == 0);
744 731
@@ -747,21 +734,14 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
747 err = ext4_map_blocks(handle, inode, &map, 734 err = ext4_map_blocks(handle, inode, &map,
748 create ? EXT4_GET_BLOCKS_CREATE : 0); 735 create ? EXT4_GET_BLOCKS_CREATE : 0);
749 736
750 /* ensure we send some value back into *errp */ 737 if (err == 0)
751 *errp = 0; 738 return create ? ERR_PTR(-ENOSPC) : NULL;
752
753 if (create && err == 0)
754 err = -ENOSPC; /* should never happen */
755 if (err < 0) 739 if (err < 0)
756 *errp = err; 740 return ERR_PTR(err);
757 if (err <= 0)
758 return NULL;
759 741
760 bh = sb_getblk(inode->i_sb, map.m_pblk); 742 bh = sb_getblk(inode->i_sb, map.m_pblk);
761 if (unlikely(!bh)) { 743 if (unlikely(!bh))
762 *errp = -ENOMEM; 744 return ERR_PTR(-ENOMEM);
763 return NULL;
764 }
765 if (map.m_flags & EXT4_MAP_NEW) { 745 if (map.m_flags & EXT4_MAP_NEW) {
766 J_ASSERT(create != 0); 746 J_ASSERT(create != 0);
767 J_ASSERT(handle != NULL); 747 J_ASSERT(handle != NULL);
@@ -775,44 +755,44 @@ struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
775 */ 755 */
776 lock_buffer(bh); 756 lock_buffer(bh);
777 BUFFER_TRACE(bh, "call get_create_access"); 757 BUFFER_TRACE(bh, "call get_create_access");
778 fatal = ext4_journal_get_create_access(handle, bh); 758 err = ext4_journal_get_create_access(handle, bh);
779 if (!fatal && !buffer_uptodate(bh)) { 759 if (unlikely(err)) {
760 unlock_buffer(bh);
761 goto errout;
762 }
763 if (!buffer_uptodate(bh)) {
780 memset(bh->b_data, 0, inode->i_sb->s_blocksize); 764 memset(bh->b_data, 0, inode->i_sb->s_blocksize);
781 set_buffer_uptodate(bh); 765 set_buffer_uptodate(bh);
782 } 766 }
783 unlock_buffer(bh); 767 unlock_buffer(bh);
784 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 768 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
785 err = ext4_handle_dirty_metadata(handle, inode, bh); 769 err = ext4_handle_dirty_metadata(handle, inode, bh);
786 if (!fatal) 770 if (unlikely(err))
787 fatal = err; 771 goto errout;
788 } else { 772 } else
789 BUFFER_TRACE(bh, "not a new buffer"); 773 BUFFER_TRACE(bh, "not a new buffer");
790 }
791 if (fatal) {
792 *errp = fatal;
793 brelse(bh);
794 bh = NULL;
795 }
796 return bh; 774 return bh;
775errout:
776 brelse(bh);
777 return ERR_PTR(err);
797} 778}
798 779
799struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode, 780struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
800 ext4_lblk_t block, int create, int *err) 781 ext4_lblk_t block, int create)
801{ 782{
802 struct buffer_head *bh; 783 struct buffer_head *bh;
803 784
804 bh = ext4_getblk(handle, inode, block, create, err); 785 bh = ext4_getblk(handle, inode, block, create);
805 if (!bh) 786 if (IS_ERR(bh))
806 return bh; 787 return bh;
807 if (buffer_uptodate(bh)) 788 if (!bh || buffer_uptodate(bh))
808 return bh; 789 return bh;
809 ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh); 790 ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
810 wait_on_buffer(bh); 791 wait_on_buffer(bh);
811 if (buffer_uptodate(bh)) 792 if (buffer_uptodate(bh))
812 return bh; 793 return bh;
813 put_bh(bh); 794 put_bh(bh);
814 *err = -EIO; 795 return ERR_PTR(-EIO);
815 return NULL;
816} 796}
817 797
818int ext4_walk_page_buffers(handle_t *handle, 798int ext4_walk_page_buffers(handle_t *handle,
@@ -1536,7 +1516,7 @@ out_unlock:
1536} 1516}
1537 1517
1538/* 1518/*
1539 * This is a special get_blocks_t callback which is used by 1519 * This is a special get_block_t callback which is used by
1540 * ext4_da_write_begin(). It will either return mapped block or 1520 * ext4_da_write_begin(). It will either return mapped block or
1541 * reserve space for a single block. 1521 * reserve space for a single block.
1542 * 1522 *
@@ -2011,12 +1991,10 @@ static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2011 * in data loss. So use reserved blocks to allocate metadata if 1991 * in data loss. So use reserved blocks to allocate metadata if
2012 * possible. 1992 * possible.
2013 * 1993 *
2014 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if the blocks 1994 * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if
2015 * in question are delalloc blocks. This affects functions in many 1995 * the blocks in question are delalloc blocks. This indicates
2016 * different parts of the allocation call path. This flag exists 1996 * that the blocks and quotas has already been checked when
2017 * primarily because we don't want to change *many* call functions, so 1997 * the data was copied into the page cache.
2018 * ext4_map_blocks() will set the EXT4_STATE_DELALLOC_RESERVED flag
2019 * once the inode's allocation semaphore is taken.
2020 */ 1998 */
2021 get_blocks_flags = EXT4_GET_BLOCKS_CREATE | 1999 get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
2022 EXT4_GET_BLOCKS_METADATA_NOFAIL; 2000 EXT4_GET_BLOCKS_METADATA_NOFAIL;
@@ -2515,6 +2493,20 @@ static int ext4_nonda_switch(struct super_block *sb)
2515 return 0; 2493 return 0;
2516} 2494}
2517 2495
2496/* We always reserve for an inode update; the superblock could be there too */
2497static int ext4_da_write_credits(struct inode *inode, loff_t pos, unsigned len)
2498{
2499 if (likely(EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
2500 EXT4_FEATURE_RO_COMPAT_LARGE_FILE)))
2501 return 1;
2502
2503 if (pos + len <= 0x7fffffffULL)
2504 return 1;
2505
2506 /* We might need to update the superblock to set LARGE_FILE */
2507 return 2;
2508}
2509
2518static int ext4_da_write_begin(struct file *file, struct address_space *mapping, 2510static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2519 loff_t pos, unsigned len, unsigned flags, 2511 loff_t pos, unsigned len, unsigned flags,
2520 struct page **pagep, void **fsdata) 2512 struct page **pagep, void **fsdata)
@@ -2565,7 +2557,8 @@ retry_grab:
2565 * of file which has an already mapped buffer. 2557 * of file which has an already mapped buffer.
2566 */ 2558 */
2567retry_journal: 2559retry_journal:
2568 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1); 2560 handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
2561 ext4_da_write_credits(inode, pos, len));
2569 if (IS_ERR(handle)) { 2562 if (IS_ERR(handle)) {
2570 page_cache_release(page); 2563 page_cache_release(page);
2571 return PTR_ERR(handle); 2564 return PTR_ERR(handle);
@@ -2658,10 +2651,7 @@ static int ext4_da_write_end(struct file *file,
2658 if (copied && new_i_size > EXT4_I(inode)->i_disksize) { 2651 if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
2659 if (ext4_has_inline_data(inode) || 2652 if (ext4_has_inline_data(inode) ||
2660 ext4_da_should_update_i_disksize(page, end)) { 2653 ext4_da_should_update_i_disksize(page, end)) {
2661 down_write(&EXT4_I(inode)->i_data_sem); 2654 ext4_update_i_disksize(inode, new_i_size);
2662 if (new_i_size > EXT4_I(inode)->i_disksize)
2663 EXT4_I(inode)->i_disksize = new_i_size;
2664 up_write(&EXT4_I(inode)->i_data_sem);
2665 /* We need to mark inode dirty even if 2655 /* We need to mark inode dirty even if
2666 * new_i_size is less that inode->i_size 2656 * new_i_size is less that inode->i_size
2667 * bu greater than i_disksize.(hint delalloc) 2657 * bu greater than i_disksize.(hint delalloc)
@@ -3936,8 +3926,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
3936 ei->i_extra_isize = 0; 3926 ei->i_extra_isize = 0;
3937 3927
3938 /* Precompute checksum seed for inode metadata */ 3928 /* Precompute checksum seed for inode metadata */
3939 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 3929 if (ext4_has_metadata_csum(sb)) {
3940 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
3941 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); 3930 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3942 __u32 csum; 3931 __u32 csum;
3943 __le32 inum = cpu_to_le32(inode->i_ino); 3932 __le32 inum = cpu_to_le32(inode->i_ino);
@@ -4127,6 +4116,13 @@ bad_inode:
4127 return ERR_PTR(ret); 4116 return ERR_PTR(ret);
4128} 4117}
4129 4118
4119struct inode *ext4_iget_normal(struct super_block *sb, unsigned long ino)
4120{
4121 if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
4122 return ERR_PTR(-EIO);
4123 return ext4_iget(sb, ino);
4124}
4125
4130static int ext4_inode_blocks_set(handle_t *handle, 4126static int ext4_inode_blocks_set(handle_t *handle,
4131 struct ext4_inode *raw_inode, 4127 struct ext4_inode *raw_inode,
4132 struct ext4_inode_info *ei) 4128 struct ext4_inode_info *ei)
@@ -4226,7 +4222,8 @@ static int ext4_do_update_inode(handle_t *handle,
4226 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode); 4222 EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4227 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode); 4223 EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4228 4224
4229 if (ext4_inode_blocks_set(handle, raw_inode, ei)) { 4225 err = ext4_inode_blocks_set(handle, raw_inode, ei);
4226 if (err) {
4230 spin_unlock(&ei->i_raw_lock); 4227 spin_unlock(&ei->i_raw_lock);
4231 goto out_brelse; 4228 goto out_brelse;
4232 } 4229 }
@@ -4536,8 +4533,12 @@ int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4536 ext4_orphan_del(NULL, inode); 4533 ext4_orphan_del(NULL, inode);
4537 goto err_out; 4534 goto err_out;
4538 } 4535 }
4539 } else 4536 } else {
4537 loff_t oldsize = inode->i_size;
4538
4540 i_size_write(inode, attr->ia_size); 4539 i_size_write(inode, attr->ia_size);
4540 pagecache_isize_extended(inode, oldsize, inode->i_size);
4541 }
4541 4542
4542 /* 4543 /*
4543 * Blocks are going to be removed from the inode. Wait 4544 * Blocks are going to be removed from the inode. Wait
@@ -4958,7 +4959,12 @@ int ext4_change_inode_journal_flag(struct inode *inode, int val)
4958 if (val) 4959 if (val)
4959 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 4960 ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
4960 else { 4961 else {
4961 jbd2_journal_flush(journal); 4962 err = jbd2_journal_flush(journal);
4963 if (err < 0) {
4964 jbd2_journal_unlock_updates(journal);
4965 ext4_inode_resume_unlocked_dio(inode);
4966 return err;
4967 }
4962 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA); 4968 ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
4963 } 4969 }
4964 ext4_set_aops(inode); 4970 ext4_set_aops(inode);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 0f2252ec274d..bfda18a15592 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -331,8 +331,7 @@ flags_out:
331 if (!inode_owner_or_capable(inode)) 331 if (!inode_owner_or_capable(inode))
332 return -EPERM; 332 return -EPERM;
333 333
334 if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 334 if (ext4_has_metadata_csum(inode->i_sb)) {
335 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
336 ext4_warning(sb, "Setting inode version is not " 335 ext4_warning(sb, "Setting inode version is not "
337 "supported with metadata_csum enabled."); 336 "supported with metadata_csum enabled.");
338 return -ENOTTY; 337 return -ENOTTY;
@@ -532,9 +531,17 @@ group_add_out:
532 } 531 }
533 532
534 case EXT4_IOC_SWAP_BOOT: 533 case EXT4_IOC_SWAP_BOOT:
534 {
535 int err;
535 if (!(filp->f_mode & FMODE_WRITE)) 536 if (!(filp->f_mode & FMODE_WRITE))
536 return -EBADF; 537 return -EBADF;
537 return swap_inode_boot_loader(sb, inode); 538 err = mnt_want_write_file(filp);
539 if (err)
540 return err;
541 err = swap_inode_boot_loader(sb, inode);
542 mnt_drop_write_file(filp);
543 return err;
544 }
538 545
539 case EXT4_IOC_RESIZE_FS: { 546 case EXT4_IOC_RESIZE_FS: {
540 ext4_fsblk_t n_blocks_count; 547 ext4_fsblk_t n_blocks_count;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 748c9136a60a..dbfe15c2533c 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3155,9 +3155,8 @@ ext4_mb_normalize_request(struct ext4_allocation_context *ac,
3155 "start %lu, size %lu, fe_logical %lu", 3155 "start %lu, size %lu, fe_logical %lu",
3156 (unsigned long) start, (unsigned long) size, 3156 (unsigned long) start, (unsigned long) size,
3157 (unsigned long) ac->ac_o_ex.fe_logical); 3157 (unsigned long) ac->ac_o_ex.fe_logical);
3158 BUG();
3158 } 3159 }
3159 BUG_ON(start + size <= ac->ac_o_ex.fe_logical &&
3160 start > ac->ac_o_ex.fe_logical);
3161 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb)); 3160 BUG_ON(size <= 0 || size > EXT4_BLOCKS_PER_GROUP(ac->ac_sb));
3162 3161
3163 /* now prepare goal request */ 3162 /* now prepare goal request */
@@ -4410,14 +4409,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
4410 if (IS_NOQUOTA(ar->inode)) 4409 if (IS_NOQUOTA(ar->inode))
4411 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS; 4410 ar->flags |= EXT4_MB_USE_ROOT_BLOCKS;
4412 4411
4413 /* 4412 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0) {
4414 * For delayed allocation, we could skip the ENOSPC and
4415 * EDQUOT check, as blocks and quotas have been already
4416 * reserved when data being copied into pagecache.
4417 */
4418 if (ext4_test_inode_state(ar->inode, EXT4_STATE_DELALLOC_RESERVED))
4419 ar->flags |= EXT4_MB_DELALLOC_RESERVED;
4420 else {
4421 /* Without delayed allocation we need to verify 4413 /* Without delayed allocation we need to verify
4422 * there is enough free blocks to do block allocation 4414 * there is enough free blocks to do block allocation
4423 * and verify allocation doesn't exceed the quota limits. 4415 * and verify allocation doesn't exceed the quota limits.
@@ -4528,8 +4520,7 @@ out:
4528 if (inquota && ar->len < inquota) 4520 if (inquota && ar->len < inquota)
4529 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len)); 4521 dquot_free_block(ar->inode, EXT4_C2B(sbi, inquota - ar->len));
4530 if (!ar->len) { 4522 if (!ar->len) {
4531 if (!ext4_test_inode_state(ar->inode, 4523 if ((ar->flags & EXT4_MB_DELALLOC_RESERVED) == 0)
4532 EXT4_STATE_DELALLOC_RESERVED))
4533 /* release all the reserved blocks if non delalloc */ 4524 /* release all the reserved blocks if non delalloc */
4534 percpu_counter_sub(&sbi->s_dirtyclusters_counter, 4525 percpu_counter_sub(&sbi->s_dirtyclusters_counter,
4535 reserv_clstrs); 4526 reserv_clstrs);
diff --git a/fs/ext4/migrate.c b/fs/ext4/migrate.c
index d3567f27bae7..a432634f2e6a 100644
--- a/fs/ext4/migrate.c
+++ b/fs/ext4/migrate.c
@@ -41,8 +41,7 @@ static int finish_range(handle_t *handle, struct inode *inode,
41 ext4_ext_store_pblock(&newext, lb->first_pblock); 41 ext4_ext_store_pblock(&newext, lb->first_pblock);
42 /* Locking only for convinience since we are operating on temp inode */ 42 /* Locking only for convinience since we are operating on temp inode */
43 down_write(&EXT4_I(inode)->i_data_sem); 43 down_write(&EXT4_I(inode)->i_data_sem);
44 path = ext4_ext_find_extent(inode, lb->first_block, NULL, 0); 44 path = ext4_find_extent(inode, lb->first_block, NULL, 0);
45
46 if (IS_ERR(path)) { 45 if (IS_ERR(path)) {
47 retval = PTR_ERR(path); 46 retval = PTR_ERR(path);
48 path = NULL; 47 path = NULL;
@@ -81,13 +80,11 @@ static int finish_range(handle_t *handle, struct inode *inode,
81 goto err_out; 80 goto err_out;
82 } 81 }
83 } 82 }
84 retval = ext4_ext_insert_extent(handle, inode, path, &newext, 0); 83 retval = ext4_ext_insert_extent(handle, inode, &path, &newext, 0);
85err_out: 84err_out:
86 up_write((&EXT4_I(inode)->i_data_sem)); 85 up_write((&EXT4_I(inode)->i_data_sem));
87 if (path) { 86 ext4_ext_drop_refs(path);
88 ext4_ext_drop_refs(path); 87 kfree(path);
89 kfree(path);
90 }
91 lb->first_pblock = 0; 88 lb->first_pblock = 0;
92 return retval; 89 return retval;
93} 90}
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index 32bce844c2e1..8313ca3324ec 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -20,8 +20,7 @@ static __le32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp)
20 20
21static int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp) 21static int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
22{ 22{
23 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 23 if (!ext4_has_metadata_csum(sb))
24 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
25 return 1; 24 return 1;
26 25
27 return mmp->mmp_checksum == ext4_mmp_csum(sb, mmp); 26 return mmp->mmp_checksum == ext4_mmp_csum(sb, mmp);
@@ -29,8 +28,7 @@ static int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
29 28
30static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp) 29static void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
31{ 30{
32 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 31 if (!ext4_has_metadata_csum(sb))
33 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
34 return; 32 return;
35 33
36 mmp->mmp_checksum = ext4_mmp_csum(sb, mmp); 34 mmp->mmp_checksum = ext4_mmp_csum(sb, mmp);
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index 671a74b14fd7..9f2311bc9c4f 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -27,120 +27,26 @@
27 * @lblock: logical block number to find an extent path 27 * @lblock: logical block number to find an extent path
28 * @path: pointer to an extent path pointer (for output) 28 * @path: pointer to an extent path pointer (for output)
29 * 29 *
30 * ext4_ext_find_extent wrapper. Return 0 on success, or a negative error value 30 * ext4_find_extent wrapper. Return 0 on success, or a negative error value
31 * on failure. 31 * on failure.
32 */ 32 */
33static inline int 33static inline int
34get_ext_path(struct inode *inode, ext4_lblk_t lblock, 34get_ext_path(struct inode *inode, ext4_lblk_t lblock,
35 struct ext4_ext_path **orig_path) 35 struct ext4_ext_path **ppath)
36{ 36{
37 int ret = 0;
38 struct ext4_ext_path *path; 37 struct ext4_ext_path *path;
39 38
40 path = ext4_ext_find_extent(inode, lblock, *orig_path, EXT4_EX_NOCACHE); 39 path = ext4_find_extent(inode, lblock, ppath, EXT4_EX_NOCACHE);
41 if (IS_ERR(path)) 40 if (IS_ERR(path))
42 ret = PTR_ERR(path); 41 return PTR_ERR(path);
43 else if (path[ext_depth(inode)].p_ext == NULL) 42 if (path[ext_depth(inode)].p_ext == NULL) {
44 ret = -ENODATA; 43 ext4_ext_drop_refs(path);
45 else 44 kfree(path);
46 *orig_path = path; 45 *ppath = NULL;
47 46 return -ENODATA;
48 return ret;
49}
50
51/**
52 * copy_extent_status - Copy the extent's initialization status
53 *
54 * @src: an extent for getting initialize status
55 * @dest: an extent to be set the status
56 */
57static void
58copy_extent_status(struct ext4_extent *src, struct ext4_extent *dest)
59{
60 if (ext4_ext_is_unwritten(src))
61 ext4_ext_mark_unwritten(dest);
62 else
63 dest->ee_len = cpu_to_le16(ext4_ext_get_actual_len(dest));
64}
65
66/**
67 * mext_next_extent - Search for the next extent and set it to "extent"
68 *
69 * @inode: inode which is searched
70 * @path: this will obtain data for the next extent
71 * @extent: pointer to the next extent we have just gotten
72 *
73 * Search the next extent in the array of ext4_ext_path structure (@path)
74 * and set it to ext4_extent structure (@extent). In addition, the member of
75 * @path (->p_ext) also points the next extent. Return 0 on success, 1 if
76 * ext4_ext_path structure refers to the last extent, or a negative error
77 * value on failure.
78 */
79int
80mext_next_extent(struct inode *inode, struct ext4_ext_path *path,
81 struct ext4_extent **extent)
82{
83 struct ext4_extent_header *eh;
84 int ppos, leaf_ppos = path->p_depth;
85
86 ppos = leaf_ppos;
87 if (EXT_LAST_EXTENT(path[ppos].p_hdr) > path[ppos].p_ext) {
88 /* leaf block */
89 *extent = ++path[ppos].p_ext;
90 path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext);
91 return 0;
92 }
93
94 while (--ppos >= 0) {
95 if (EXT_LAST_INDEX(path[ppos].p_hdr) >
96 path[ppos].p_idx) {
97 int cur_ppos = ppos;
98
99 /* index block */
100 path[ppos].p_idx++;
101 path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx);
102 if (path[ppos+1].p_bh)
103 brelse(path[ppos+1].p_bh);
104 path[ppos+1].p_bh =
105 sb_bread(inode->i_sb, path[ppos].p_block);
106 if (!path[ppos+1].p_bh)
107 return -EIO;
108 path[ppos+1].p_hdr =
109 ext_block_hdr(path[ppos+1].p_bh);
110
111 /* Halfway index block */
112 while (++cur_ppos < leaf_ppos) {
113 path[cur_ppos].p_idx =
114 EXT_FIRST_INDEX(path[cur_ppos].p_hdr);
115 path[cur_ppos].p_block =
116 ext4_idx_pblock(path[cur_ppos].p_idx);
117 if (path[cur_ppos+1].p_bh)
118 brelse(path[cur_ppos+1].p_bh);
119 path[cur_ppos+1].p_bh = sb_bread(inode->i_sb,
120 path[cur_ppos].p_block);
121 if (!path[cur_ppos+1].p_bh)
122 return -EIO;
123 path[cur_ppos+1].p_hdr =
124 ext_block_hdr(path[cur_ppos+1].p_bh);
125 }
126
127 path[leaf_ppos].p_ext = *extent = NULL;
128
129 eh = path[leaf_ppos].p_hdr;
130 if (le16_to_cpu(eh->eh_entries) == 0)
131 /* empty leaf is found */
132 return -ENODATA;
133
134 /* leaf block */
135 path[leaf_ppos].p_ext = *extent =
136 EXT_FIRST_EXTENT(path[leaf_ppos].p_hdr);
137 path[leaf_ppos].p_block =
138 ext4_ext_pblock(path[leaf_ppos].p_ext);
139 return 0;
140 }
141 } 47 }
142 /* We found the last extent */ 48 *ppath = path;
143 return 1; 49 return 0;
144} 50}
145 51
146/** 52/**
@@ -178,417 +84,6 @@ ext4_double_up_write_data_sem(struct inode *orig_inode,
178} 84}
179 85
180/** 86/**
181 * mext_insert_across_blocks - Insert extents across leaf block
182 *
183 * @handle: journal handle
184 * @orig_inode: original inode
185 * @o_start: first original extent to be changed
186 * @o_end: last original extent to be changed
187 * @start_ext: first new extent to be inserted
188 * @new_ext: middle of new extent to be inserted
189 * @end_ext: last new extent to be inserted
190 *
191 * Allocate a new leaf block and insert extents into it. Return 0 on success,
192 * or a negative error value on failure.
193 */
194static int
195mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode,
196 struct ext4_extent *o_start, struct ext4_extent *o_end,
197 struct ext4_extent *start_ext, struct ext4_extent *new_ext,
198 struct ext4_extent *end_ext)
199{
200 struct ext4_ext_path *orig_path = NULL;
201 ext4_lblk_t eblock = 0;
202 int new_flag = 0;
203 int end_flag = 0;
204 int err = 0;
205
206 if (start_ext->ee_len && new_ext->ee_len && end_ext->ee_len) {
207 if (o_start == o_end) {
208
209 /* start_ext new_ext end_ext
210 * donor |---------|-----------|--------|
211 * orig |------------------------------|
212 */
213 end_flag = 1;
214 } else {
215
216 /* start_ext new_ext end_ext
217 * donor |---------|----------|---------|
218 * orig |---------------|--------------|
219 */
220 o_end->ee_block = end_ext->ee_block;
221 o_end->ee_len = end_ext->ee_len;
222 ext4_ext_store_pblock(o_end, ext4_ext_pblock(end_ext));
223 }
224
225 o_start->ee_len = start_ext->ee_len;
226 eblock = le32_to_cpu(start_ext->ee_block);
227 new_flag = 1;
228
229 } else if (start_ext->ee_len && new_ext->ee_len &&
230 !end_ext->ee_len && o_start == o_end) {
231
232 /* start_ext new_ext
233 * donor |--------------|---------------|
234 * orig |------------------------------|
235 */
236 o_start->ee_len = start_ext->ee_len;
237 eblock = le32_to_cpu(start_ext->ee_block);
238 new_flag = 1;
239
240 } else if (!start_ext->ee_len && new_ext->ee_len &&
241 end_ext->ee_len && o_start == o_end) {
242
243 /* new_ext end_ext
244 * donor |--------------|---------------|
245 * orig |------------------------------|
246 */
247 o_end->ee_block = end_ext->ee_block;
248 o_end->ee_len = end_ext->ee_len;
249 ext4_ext_store_pblock(o_end, ext4_ext_pblock(end_ext));
250
251 /*
252 * Set 0 to the extent block if new_ext was
253 * the first block.
254 */
255 if (new_ext->ee_block)
256 eblock = le32_to_cpu(new_ext->ee_block);
257
258 new_flag = 1;
259 } else {
260 ext4_debug("ext4 move extent: Unexpected insert case\n");
261 return -EIO;
262 }
263
264 if (new_flag) {
265 err = get_ext_path(orig_inode, eblock, &orig_path);
266 if (err)
267 goto out;
268
269 if (ext4_ext_insert_extent(handle, orig_inode,
270 orig_path, new_ext, 0))
271 goto out;
272 }
273
274 if (end_flag) {
275 err = get_ext_path(orig_inode,
276 le32_to_cpu(end_ext->ee_block) - 1, &orig_path);
277 if (err)
278 goto out;
279
280 if (ext4_ext_insert_extent(handle, orig_inode,
281 orig_path, end_ext, 0))
282 goto out;
283 }
284out:
285 if (orig_path) {
286 ext4_ext_drop_refs(orig_path);
287 kfree(orig_path);
288 }
289
290 return err;
291
292}
293
294/**
295 * mext_insert_inside_block - Insert new extent to the extent block
296 *
297 * @o_start: first original extent to be moved
298 * @o_end: last original extent to be moved
299 * @start_ext: first new extent to be inserted
300 * @new_ext: middle of new extent to be inserted
301 * @end_ext: last new extent to be inserted
302 * @eh: extent header of target leaf block
303 * @range_to_move: used to decide how to insert extent
304 *
305 * Insert extents into the leaf block. The extent (@o_start) is overwritten
306 * by inserted extents.
307 */
308static void
309mext_insert_inside_block(struct ext4_extent *o_start,
310 struct ext4_extent *o_end,
311 struct ext4_extent *start_ext,
312 struct ext4_extent *new_ext,
313 struct ext4_extent *end_ext,
314 struct ext4_extent_header *eh,
315 int range_to_move)
316{
317 int i = 0;
318 unsigned long len;
319
320 /* Move the existing extents */
321 if (range_to_move && o_end < EXT_LAST_EXTENT(eh)) {
322 len = (unsigned long)(EXT_LAST_EXTENT(eh) + 1) -
323 (unsigned long)(o_end + 1);
324 memmove(o_end + 1 + range_to_move, o_end + 1, len);
325 }
326
327 /* Insert start entry */
328 if (start_ext->ee_len)
329 o_start[i++].ee_len = start_ext->ee_len;
330
331 /* Insert new entry */
332 if (new_ext->ee_len) {
333 o_start[i] = *new_ext;
334 ext4_ext_store_pblock(&o_start[i++], ext4_ext_pblock(new_ext));
335 }
336
337 /* Insert end entry */
338 if (end_ext->ee_len)
339 o_start[i] = *end_ext;
340
341 /* Increment the total entries counter on the extent block */
342 le16_add_cpu(&eh->eh_entries, range_to_move);
343}
344
345/**
346 * mext_insert_extents - Insert new extent
347 *
348 * @handle: journal handle
349 * @orig_inode: original inode
350 * @orig_path: path indicates first extent to be changed
351 * @o_start: first original extent to be changed
352 * @o_end: last original extent to be changed
353 * @start_ext: first new extent to be inserted
354 * @new_ext: middle of new extent to be inserted
355 * @end_ext: last new extent to be inserted
356 *
357 * Call the function to insert extents. If we cannot add more extents into
358 * the leaf block, we call mext_insert_across_blocks() to create a
359 * new leaf block. Otherwise call mext_insert_inside_block(). Return 0
360 * on success, or a negative error value on failure.
361 */
362static int
363mext_insert_extents(handle_t *handle, struct inode *orig_inode,
364 struct ext4_ext_path *orig_path,
365 struct ext4_extent *o_start,
366 struct ext4_extent *o_end,
367 struct ext4_extent *start_ext,
368 struct ext4_extent *new_ext,
369 struct ext4_extent *end_ext)
370{
371 struct ext4_extent_header *eh;
372 unsigned long need_slots, slots_range;
373 int range_to_move, depth, ret;
374
375 /*
376 * The extents need to be inserted
377 * start_extent + new_extent + end_extent.
378 */
379 need_slots = (start_ext->ee_len ? 1 : 0) + (end_ext->ee_len ? 1 : 0) +
380 (new_ext->ee_len ? 1 : 0);
381
382 /* The number of slots between start and end */
383 slots_range = ((unsigned long)(o_end + 1) - (unsigned long)o_start + 1)
384 / sizeof(struct ext4_extent);
385
386 /* Range to move the end of extent */
387 range_to_move = need_slots - slots_range;
388 depth = orig_path->p_depth;
389 orig_path += depth;
390 eh = orig_path->p_hdr;
391
392 if (depth) {
393 /* Register to journal */
394 BUFFER_TRACE(orig_path->p_bh, "get_write_access");
395 ret = ext4_journal_get_write_access(handle, orig_path->p_bh);
396 if (ret)
397 return ret;
398 }
399
400 /* Expansion */
401 if (range_to_move > 0 &&
402 (range_to_move > le16_to_cpu(eh->eh_max)
403 - le16_to_cpu(eh->eh_entries))) {
404
405 ret = mext_insert_across_blocks(handle, orig_inode, o_start,
406 o_end, start_ext, new_ext, end_ext);
407 if (ret < 0)
408 return ret;
409 } else
410 mext_insert_inside_block(o_start, o_end, start_ext, new_ext,
411 end_ext, eh, range_to_move);
412
413 return ext4_ext_dirty(handle, orig_inode, orig_path);
414}
415
416/**
417 * mext_leaf_block - Move one leaf extent block into the inode.
418 *
419 * @handle: journal handle
420 * @orig_inode: original inode
421 * @orig_path: path indicates first extent to be changed
422 * @dext: donor extent
423 * @from: start offset on the target file
424 *
425 * In order to insert extents into the leaf block, we must divide the extent
426 * in the leaf block into three extents. The one is located to be inserted
427 * extents, and the others are located around it.
428 *
429 * Therefore, this function creates structures to save extents of the leaf
430 * block, and inserts extents by calling mext_insert_extents() with
431 * created extents. Return 0 on success, or a negative error value on failure.
432 */
433static int
434mext_leaf_block(handle_t *handle, struct inode *orig_inode,
435 struct ext4_ext_path *orig_path, struct ext4_extent *dext,
436 ext4_lblk_t *from)
437{
438 struct ext4_extent *oext, *o_start, *o_end, *prev_ext;
439 struct ext4_extent new_ext, start_ext, end_ext;
440 ext4_lblk_t new_ext_end;
441 int oext_alen, new_ext_alen, end_ext_alen;
442 int depth = ext_depth(orig_inode);
443 int ret;
444
445 start_ext.ee_block = end_ext.ee_block = 0;
446 o_start = o_end = oext = orig_path[depth].p_ext;
447 oext_alen = ext4_ext_get_actual_len(oext);
448 start_ext.ee_len = end_ext.ee_len = 0;
449
450 new_ext.ee_block = cpu_to_le32(*from);
451 ext4_ext_store_pblock(&new_ext, ext4_ext_pblock(dext));
452 new_ext.ee_len = dext->ee_len;
453 new_ext_alen = ext4_ext_get_actual_len(&new_ext);
454 new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1;
455
456 /*
457 * Case: original extent is first
458 * oext |--------|
459 * new_ext |--|
460 * start_ext |--|
461 */
462 if (le32_to_cpu(oext->ee_block) < le32_to_cpu(new_ext.ee_block) &&
463 le32_to_cpu(new_ext.ee_block) <
464 le32_to_cpu(oext->ee_block) + oext_alen) {
465 start_ext.ee_len = cpu_to_le16(le32_to_cpu(new_ext.ee_block) -
466 le32_to_cpu(oext->ee_block));
467 start_ext.ee_block = oext->ee_block;
468 copy_extent_status(oext, &start_ext);
469 } else if (oext > EXT_FIRST_EXTENT(orig_path[depth].p_hdr)) {
470 prev_ext = oext - 1;
471 /*
472 * We can merge new_ext into previous extent,
473 * if these are contiguous and same extent type.
474 */
475 if (ext4_can_extents_be_merged(orig_inode, prev_ext,
476 &new_ext)) {
477 o_start = prev_ext;
478 start_ext.ee_len = cpu_to_le16(
479 ext4_ext_get_actual_len(prev_ext) +
480 new_ext_alen);
481 start_ext.ee_block = oext->ee_block;
482 copy_extent_status(prev_ext, &start_ext);
483 new_ext.ee_len = 0;
484 }
485 }
486
487 /*
488 * Case: new_ext_end must be less than oext
489 * oext |-----------|
490 * new_ext |-------|
491 */
492 if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) {
493 EXT4_ERROR_INODE(orig_inode,
494 "new_ext_end(%u) should be less than or equal to "
495 "oext->ee_block(%u) + oext_alen(%d) - 1",
496 new_ext_end, le32_to_cpu(oext->ee_block),
497 oext_alen);
498 ret = -EIO;
499 goto out;
500 }
501
502 /*
503 * Case: new_ext is smaller than original extent
504 * oext |---------------|
505 * new_ext |-----------|
506 * end_ext |---|
507 */
508 if (le32_to_cpu(oext->ee_block) <= new_ext_end &&
509 new_ext_end < le32_to_cpu(oext->ee_block) + oext_alen - 1) {
510 end_ext.ee_len =
511 cpu_to_le16(le32_to_cpu(oext->ee_block) +
512 oext_alen - 1 - new_ext_end);
513 copy_extent_status(oext, &end_ext);
514 end_ext_alen = ext4_ext_get_actual_len(&end_ext);
515 ext4_ext_store_pblock(&end_ext,
516 (ext4_ext_pblock(o_end) + oext_alen - end_ext_alen));
517 end_ext.ee_block =
518 cpu_to_le32(le32_to_cpu(o_end->ee_block) +
519 oext_alen - end_ext_alen);
520 }
521
522 ret = mext_insert_extents(handle, orig_inode, orig_path, o_start,
523 o_end, &start_ext, &new_ext, &end_ext);
524out:
525 return ret;
526}
527
528/**
529 * mext_calc_swap_extents - Calculate extents for extent swapping.
530 *
531 * @tmp_dext: the extent that will belong to the original inode
532 * @tmp_oext: the extent that will belong to the donor inode
533 * @orig_off: block offset of original inode
534 * @donor_off: block offset of donor inode
535 * @max_count: the maximum length of extents
536 *
537 * Return 0 on success, or a negative error value on failure.
538 */
539static int
540mext_calc_swap_extents(struct ext4_extent *tmp_dext,
541 struct ext4_extent *tmp_oext,
542 ext4_lblk_t orig_off, ext4_lblk_t donor_off,
543 ext4_lblk_t max_count)
544{
545 ext4_lblk_t diff, orig_diff;
546 struct ext4_extent dext_old, oext_old;
547
548 BUG_ON(orig_off != donor_off);
549
550 /* original and donor extents have to cover the same block offset */
551 if (orig_off < le32_to_cpu(tmp_oext->ee_block) ||
552 le32_to_cpu(tmp_oext->ee_block) +
553 ext4_ext_get_actual_len(tmp_oext) - 1 < orig_off)
554 return -ENODATA;
555
556 if (orig_off < le32_to_cpu(tmp_dext->ee_block) ||
557 le32_to_cpu(tmp_dext->ee_block) +
558 ext4_ext_get_actual_len(tmp_dext) - 1 < orig_off)
559 return -ENODATA;
560
561 dext_old = *tmp_dext;
562 oext_old = *tmp_oext;
563
564 /* When tmp_dext is too large, pick up the target range. */
565 diff = donor_off - le32_to_cpu(tmp_dext->ee_block);
566
567 ext4_ext_store_pblock(tmp_dext, ext4_ext_pblock(tmp_dext) + diff);
568 le32_add_cpu(&tmp_dext->ee_block, diff);
569 le16_add_cpu(&tmp_dext->ee_len, -diff);
570
571 if (max_count < ext4_ext_get_actual_len(tmp_dext))
572 tmp_dext->ee_len = cpu_to_le16(max_count);
573
574 orig_diff = orig_off - le32_to_cpu(tmp_oext->ee_block);
575 ext4_ext_store_pblock(tmp_oext, ext4_ext_pblock(tmp_oext) + orig_diff);
576
577 /* Adjust extent length if donor extent is larger than orig */
578 if (ext4_ext_get_actual_len(tmp_dext) >
579 ext4_ext_get_actual_len(tmp_oext) - orig_diff)
580 tmp_dext->ee_len = cpu_to_le16(le16_to_cpu(tmp_oext->ee_len) -
581 orig_diff);
582
583 tmp_oext->ee_len = cpu_to_le16(ext4_ext_get_actual_len(tmp_dext));
584
585 copy_extent_status(&oext_old, tmp_dext);
586 copy_extent_status(&dext_old, tmp_oext);
587
588 return 0;
589}
590
591/**
592 * mext_check_coverage - Check that all extents in range has the same type 87 * mext_check_coverage - Check that all extents in range has the same type
593 * 88 *
594 * @inode: inode in question 89 * @inode: inode in question
@@ -619,171 +114,25 @@ mext_check_coverage(struct inode *inode, ext4_lblk_t from, ext4_lblk_t count,
619 } 114 }
620 ret = 1; 115 ret = 1;
621out: 116out:
622 if (path) { 117 ext4_ext_drop_refs(path);
623 ext4_ext_drop_refs(path); 118 kfree(path);
624 kfree(path);
625 }
626 return ret; 119 return ret;
627} 120}
628 121
629/** 122/**
630 * mext_replace_branches - Replace original extents with new extents
631 *
632 * @handle: journal handle
633 * @orig_inode: original inode
634 * @donor_inode: donor inode
635 * @from: block offset of orig_inode
636 * @count: block count to be replaced
637 * @err: pointer to save return value
638 *
639 * Replace original inode extents and donor inode extents page by page.
640 * We implement this replacement in the following three steps:
641 * 1. Save the block information of original and donor inodes into
642 * dummy extents.
643 * 2. Change the block information of original inode to point at the
644 * donor inode blocks.
645 * 3. Change the block information of donor inode to point at the saved
646 * original inode blocks in the dummy extents.
647 *
648 * Return replaced block count.
649 */
650static int
651mext_replace_branches(handle_t *handle, struct inode *orig_inode,
652 struct inode *donor_inode, ext4_lblk_t from,
653 ext4_lblk_t count, int *err)
654{
655 struct ext4_ext_path *orig_path = NULL;
656 struct ext4_ext_path *donor_path = NULL;
657 struct ext4_extent *oext, *dext;
658 struct ext4_extent tmp_dext, tmp_oext;
659 ext4_lblk_t orig_off = from, donor_off = from;
660 int depth;
661 int replaced_count = 0;
662 int dext_alen;
663
664 *err = ext4_es_remove_extent(orig_inode, from, count);
665 if (*err)
666 goto out;
667
668 *err = ext4_es_remove_extent(donor_inode, from, count);
669 if (*err)
670 goto out;
671
672 /* Get the original extent for the block "orig_off" */
673 *err = get_ext_path(orig_inode, orig_off, &orig_path);
674 if (*err)
675 goto out;
676
677 /* Get the donor extent for the head */
678 *err = get_ext_path(donor_inode, donor_off, &donor_path);
679 if (*err)
680 goto out;
681 depth = ext_depth(orig_inode);
682 oext = orig_path[depth].p_ext;
683 tmp_oext = *oext;
684
685 depth = ext_depth(donor_inode);
686 dext = donor_path[depth].p_ext;
687 if (unlikely(!dext))
688 goto missing_donor_extent;
689 tmp_dext = *dext;
690
691 *err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off,
692 donor_off, count);
693 if (*err)
694 goto out;
695
696 /* Loop for the donor extents */
697 while (1) {
698 /* The extent for donor must be found. */
699 if (unlikely(!dext)) {
700 missing_donor_extent:
701 EXT4_ERROR_INODE(donor_inode,
702 "The extent for donor must be found");
703 *err = -EIO;
704 goto out;
705 } else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) {
706 EXT4_ERROR_INODE(donor_inode,
707 "Donor offset(%u) and the first block of donor "
708 "extent(%u) should be equal",
709 donor_off,
710 le32_to_cpu(tmp_dext.ee_block));
711 *err = -EIO;
712 goto out;
713 }
714
715 /* Set donor extent to orig extent */
716 *err = mext_leaf_block(handle, orig_inode,
717 orig_path, &tmp_dext, &orig_off);
718 if (*err)
719 goto out;
720
721 /* Set orig extent to donor extent */
722 *err = mext_leaf_block(handle, donor_inode,
723 donor_path, &tmp_oext, &donor_off);
724 if (*err)
725 goto out;
726
727 dext_alen = ext4_ext_get_actual_len(&tmp_dext);
728 replaced_count += dext_alen;
729 donor_off += dext_alen;
730 orig_off += dext_alen;
731
732 BUG_ON(replaced_count > count);
733 /* Already moved the expected blocks */
734 if (replaced_count >= count)
735 break;
736
737 if (orig_path)
738 ext4_ext_drop_refs(orig_path);
739 *err = get_ext_path(orig_inode, orig_off, &orig_path);
740 if (*err)
741 goto out;
742 depth = ext_depth(orig_inode);
743 oext = orig_path[depth].p_ext;
744 tmp_oext = *oext;
745
746 if (donor_path)
747 ext4_ext_drop_refs(donor_path);
748 *err = get_ext_path(donor_inode, donor_off, &donor_path);
749 if (*err)
750 goto out;
751 depth = ext_depth(donor_inode);
752 dext = donor_path[depth].p_ext;
753 tmp_dext = *dext;
754
755 *err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off,
756 donor_off, count - replaced_count);
757 if (*err)
758 goto out;
759 }
760
761out:
762 if (orig_path) {
763 ext4_ext_drop_refs(orig_path);
764 kfree(orig_path);
765 }
766 if (donor_path) {
767 ext4_ext_drop_refs(donor_path);
768 kfree(donor_path);
769 }
770
771 return replaced_count;
772}
773
774/**
775 * mext_page_double_lock - Grab and lock pages on both @inode1 and @inode2 123 * mext_page_double_lock - Grab and lock pages on both @inode1 and @inode2
776 * 124 *
777 * @inode1: the inode structure 125 * @inode1: the inode structure
778 * @inode2: the inode structure 126 * @inode2: the inode structure
779 * @index: page index 127 * @index1: page index
128 * @index2: page index
780 * @page: result page vector 129 * @page: result page vector
781 * 130 *
782 * Grab two locked pages for inode's by inode order 131 * Grab two locked pages for inode's by inode order
783 */ 132 */
784static int 133static int
785mext_page_double_lock(struct inode *inode1, struct inode *inode2, 134mext_page_double_lock(struct inode *inode1, struct inode *inode2,
786 pgoff_t index, struct page *page[2]) 135 pgoff_t index1, pgoff_t index2, struct page *page[2])
787{ 136{
788 struct address_space *mapping[2]; 137 struct address_space *mapping[2];
789 unsigned fl = AOP_FLAG_NOFS; 138 unsigned fl = AOP_FLAG_NOFS;
@@ -793,15 +142,18 @@ mext_page_double_lock(struct inode *inode1, struct inode *inode2,
793 mapping[0] = inode1->i_mapping; 142 mapping[0] = inode1->i_mapping;
794 mapping[1] = inode2->i_mapping; 143 mapping[1] = inode2->i_mapping;
795 } else { 144 } else {
145 pgoff_t tmp = index1;
146 index1 = index2;
147 index2 = tmp;
796 mapping[0] = inode2->i_mapping; 148 mapping[0] = inode2->i_mapping;
797 mapping[1] = inode1->i_mapping; 149 mapping[1] = inode1->i_mapping;
798 } 150 }
799 151
800 page[0] = grab_cache_page_write_begin(mapping[0], index, fl); 152 page[0] = grab_cache_page_write_begin(mapping[0], index1, fl);
801 if (!page[0]) 153 if (!page[0])
802 return -ENOMEM; 154 return -ENOMEM;
803 155
804 page[1] = grab_cache_page_write_begin(mapping[1], index, fl); 156 page[1] = grab_cache_page_write_begin(mapping[1], index2, fl);
805 if (!page[1]) { 157 if (!page[1]) {
806 unlock_page(page[0]); 158 unlock_page(page[0]);
807 page_cache_release(page[0]); 159 page_cache_release(page[0]);
@@ -893,25 +245,27 @@ out:
893 * @o_filp: file structure of original file 245 * @o_filp: file structure of original file
894 * @donor_inode: donor inode 246 * @donor_inode: donor inode
895 * @orig_page_offset: page index on original file 247 * @orig_page_offset: page index on original file
248 * @donor_page_offset: page index on donor file
896 * @data_offset_in_page: block index where data swapping starts 249 * @data_offset_in_page: block index where data swapping starts
897 * @block_len_in_page: the number of blocks to be swapped 250 * @block_len_in_page: the number of blocks to be swapped
898 * @unwritten: orig extent is unwritten or not 251 * @unwritten: orig extent is unwritten or not
899 * @err: pointer to save return value 252 * @err: pointer to save return value
900 * 253 *
901 * Save the data in original inode blocks and replace original inode extents 254 * Save the data in original inode blocks and replace original inode extents
902 * with donor inode extents by calling mext_replace_branches(). 255 * with donor inode extents by calling ext4_swap_extents().
903 * Finally, write out the saved data in new original inode blocks. Return 256 * Finally, write out the saved data in new original inode blocks. Return
904 * replaced block count. 257 * replaced block count.
905 */ 258 */
906static int 259static int
907move_extent_per_page(struct file *o_filp, struct inode *donor_inode, 260move_extent_per_page(struct file *o_filp, struct inode *donor_inode,
908 pgoff_t orig_page_offset, int data_offset_in_page, 261 pgoff_t orig_page_offset, pgoff_t donor_page_offset,
909 int block_len_in_page, int unwritten, int *err) 262 int data_offset_in_page,
263 int block_len_in_page, int unwritten, int *err)
910{ 264{
911 struct inode *orig_inode = file_inode(o_filp); 265 struct inode *orig_inode = file_inode(o_filp);
912 struct page *pagep[2] = {NULL, NULL}; 266 struct page *pagep[2] = {NULL, NULL};
913 handle_t *handle; 267 handle_t *handle;
914 ext4_lblk_t orig_blk_offset; 268 ext4_lblk_t orig_blk_offset, donor_blk_offset;
915 unsigned long blocksize = orig_inode->i_sb->s_blocksize; 269 unsigned long blocksize = orig_inode->i_sb->s_blocksize;
916 unsigned int w_flags = 0; 270 unsigned int w_flags = 0;
917 unsigned int tmp_data_size, data_size, replaced_size; 271 unsigned int tmp_data_size, data_size, replaced_size;
@@ -939,6 +293,9 @@ again:
939 orig_blk_offset = orig_page_offset * blocks_per_page + 293 orig_blk_offset = orig_page_offset * blocks_per_page +
940 data_offset_in_page; 294 data_offset_in_page;
941 295
296 donor_blk_offset = donor_page_offset * blocks_per_page +
297 data_offset_in_page;
298
942 /* Calculate data_size */ 299 /* Calculate data_size */
943 if ((orig_blk_offset + block_len_in_page - 1) == 300 if ((orig_blk_offset + block_len_in_page - 1) ==
944 ((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) { 301 ((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) {
@@ -959,7 +316,7 @@ again:
959 replaced_size = data_size; 316 replaced_size = data_size;
960 317
961 *err = mext_page_double_lock(orig_inode, donor_inode, orig_page_offset, 318 *err = mext_page_double_lock(orig_inode, donor_inode, orig_page_offset,
962 pagep); 319 donor_page_offset, pagep);
963 if (unlikely(*err < 0)) 320 if (unlikely(*err < 0))
964 goto stop_journal; 321 goto stop_journal;
965 /* 322 /*
@@ -978,7 +335,7 @@ again:
978 if (*err) 335 if (*err)
979 goto drop_data_sem; 336 goto drop_data_sem;
980 337
981 unwritten &= mext_check_coverage(donor_inode, orig_blk_offset, 338 unwritten &= mext_check_coverage(donor_inode, donor_blk_offset,
982 block_len_in_page, 1, err); 339 block_len_in_page, 1, err);
983 if (*err) 340 if (*err)
984 goto drop_data_sem; 341 goto drop_data_sem;
@@ -994,9 +351,10 @@ again:
994 *err = -EBUSY; 351 *err = -EBUSY;
995 goto drop_data_sem; 352 goto drop_data_sem;
996 } 353 }
997 replaced_count = mext_replace_branches(handle, orig_inode, 354 replaced_count = ext4_swap_extents(handle, orig_inode,
998 donor_inode, orig_blk_offset, 355 donor_inode, orig_blk_offset,
999 block_len_in_page, err); 356 donor_blk_offset,
357 block_len_in_page, 1, err);
1000 drop_data_sem: 358 drop_data_sem:
1001 ext4_double_up_write_data_sem(orig_inode, donor_inode); 359 ext4_double_up_write_data_sem(orig_inode, donor_inode);
1002 goto unlock_pages; 360 goto unlock_pages;
@@ -1014,9 +372,9 @@ data_copy:
1014 goto unlock_pages; 372 goto unlock_pages;
1015 } 373 }
1016 ext4_double_down_write_data_sem(orig_inode, donor_inode); 374 ext4_double_down_write_data_sem(orig_inode, donor_inode);
1017 replaced_count = mext_replace_branches(handle, orig_inode, donor_inode, 375 replaced_count = ext4_swap_extents(handle, orig_inode, donor_inode,
1018 orig_blk_offset, 376 orig_blk_offset, donor_blk_offset,
1019 block_len_in_page, err); 377 block_len_in_page, 1, err);
1020 ext4_double_up_write_data_sem(orig_inode, donor_inode); 378 ext4_double_up_write_data_sem(orig_inode, donor_inode);
1021 if (*err) { 379 if (*err) {
1022 if (replaced_count) { 380 if (replaced_count) {
@@ -1061,9 +419,9 @@ repair_branches:
1061 * Try to swap extents to it's original places 419 * Try to swap extents to it's original places
1062 */ 420 */
1063 ext4_double_down_write_data_sem(orig_inode, donor_inode); 421 ext4_double_down_write_data_sem(orig_inode, donor_inode);
1064 replaced_count = mext_replace_branches(handle, donor_inode, orig_inode, 422 replaced_count = ext4_swap_extents(handle, donor_inode, orig_inode,
1065 orig_blk_offset, 423 orig_blk_offset, donor_blk_offset,
1066 block_len_in_page, &err2); 424 block_len_in_page, 0, &err2);
1067 ext4_double_up_write_data_sem(orig_inode, donor_inode); 425 ext4_double_up_write_data_sem(orig_inode, donor_inode);
1068 if (replaced_count != block_len_in_page) { 426 if (replaced_count != block_len_in_page) {
1069 EXT4_ERROR_INODE_BLOCK(orig_inode, (sector_t)(orig_blk_offset), 427 EXT4_ERROR_INODE_BLOCK(orig_inode, (sector_t)(orig_blk_offset),
@@ -1093,10 +451,14 @@ mext_check_arguments(struct inode *orig_inode,
1093 struct inode *donor_inode, __u64 orig_start, 451 struct inode *donor_inode, __u64 orig_start,
1094 __u64 donor_start, __u64 *len) 452 __u64 donor_start, __u64 *len)
1095{ 453{
1096 ext4_lblk_t orig_blocks, donor_blocks; 454 __u64 orig_eof, donor_eof;
1097 unsigned int blkbits = orig_inode->i_blkbits; 455 unsigned int blkbits = orig_inode->i_blkbits;
1098 unsigned int blocksize = 1 << blkbits; 456 unsigned int blocksize = 1 << blkbits;
1099 457
458 orig_eof = (i_size_read(orig_inode) + blocksize - 1) >> blkbits;
459 donor_eof = (i_size_read(donor_inode) + blocksize - 1) >> blkbits;
460
461
1100 if (donor_inode->i_mode & (S_ISUID|S_ISGID)) { 462 if (donor_inode->i_mode & (S_ISUID|S_ISGID)) {
1101 ext4_debug("ext4 move extent: suid or sgid is set" 463 ext4_debug("ext4 move extent: suid or sgid is set"
1102 " to donor file [ino:orig %lu, donor %lu]\n", 464 " to donor file [ino:orig %lu, donor %lu]\n",
@@ -1112,7 +474,7 @@ mext_check_arguments(struct inode *orig_inode,
1112 ext4_debug("ext4 move extent: The argument files should " 474 ext4_debug("ext4 move extent: The argument files should "
1113 "not be swapfile [ino:orig %lu, donor %lu]\n", 475 "not be swapfile [ino:orig %lu, donor %lu]\n",
1114 orig_inode->i_ino, donor_inode->i_ino); 476 orig_inode->i_ino, donor_inode->i_ino);
1115 return -EINVAL; 477 return -EBUSY;
1116 } 478 }
1117 479
1118 /* Ext4 move extent supports only extent based file */ 480 /* Ext4 move extent supports only extent based file */
@@ -1132,67 +494,28 @@ mext_check_arguments(struct inode *orig_inode,
1132 } 494 }
1133 495
1134 /* Start offset should be same */ 496 /* Start offset should be same */
1135 if (orig_start != donor_start) { 497 if ((orig_start & ~(PAGE_MASK >> orig_inode->i_blkbits)) !=
498 (donor_start & ~(PAGE_MASK >> orig_inode->i_blkbits))) {
1136 ext4_debug("ext4 move extent: orig and donor's start " 499 ext4_debug("ext4 move extent: orig and donor's start "
1137 "offset are not same [ino:orig %lu, donor %lu]\n", 500 "offset are not alligned [ino:orig %lu, donor %lu]\n",
1138 orig_inode->i_ino, donor_inode->i_ino); 501 orig_inode->i_ino, donor_inode->i_ino);
1139 return -EINVAL; 502 return -EINVAL;
1140 } 503 }
1141 504
1142 if ((orig_start >= EXT_MAX_BLOCKS) || 505 if ((orig_start >= EXT_MAX_BLOCKS) ||
506 (donor_start >= EXT_MAX_BLOCKS) ||
1143 (*len > EXT_MAX_BLOCKS) || 507 (*len > EXT_MAX_BLOCKS) ||
508 (donor_start + *len >= EXT_MAX_BLOCKS) ||
1144 (orig_start + *len >= EXT_MAX_BLOCKS)) { 509 (orig_start + *len >= EXT_MAX_BLOCKS)) {
1145 ext4_debug("ext4 move extent: Can't handle over [%u] blocks " 510 ext4_debug("ext4 move extent: Can't handle over [%u] blocks "
1146 "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS, 511 "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS,
1147 orig_inode->i_ino, donor_inode->i_ino); 512 orig_inode->i_ino, donor_inode->i_ino);
1148 return -EINVAL; 513 return -EINVAL;
1149 } 514 }
1150 515 if (orig_eof < orig_start + *len - 1)
1151 if (orig_inode->i_size > donor_inode->i_size) { 516 *len = orig_eof - orig_start;
1152 donor_blocks = (donor_inode->i_size + blocksize - 1) >> blkbits; 517 if (donor_eof < donor_start + *len - 1)
1153 /* TODO: eliminate this artificial restriction */ 518 *len = donor_eof - donor_start;
1154 if (orig_start >= donor_blocks) {
1155 ext4_debug("ext4 move extent: orig start offset "
1156 "[%llu] should be less than donor file blocks "
1157 "[%u] [ino:orig %lu, donor %lu]\n",
1158 orig_start, donor_blocks,
1159 orig_inode->i_ino, donor_inode->i_ino);
1160 return -EINVAL;
1161 }
1162
1163 /* TODO: eliminate this artificial restriction */
1164 if (orig_start + *len > donor_blocks) {
1165 ext4_debug("ext4 move extent: End offset [%llu] should "
1166 "be less than donor file blocks [%u]."
1167 "So adjust length from %llu to %llu "
1168 "[ino:orig %lu, donor %lu]\n",
1169 orig_start + *len, donor_blocks,
1170 *len, donor_blocks - orig_start,
1171 orig_inode->i_ino, donor_inode->i_ino);
1172 *len = donor_blocks - orig_start;
1173 }
1174 } else {
1175 orig_blocks = (orig_inode->i_size + blocksize - 1) >> blkbits;
1176 if (orig_start >= orig_blocks) {
1177 ext4_debug("ext4 move extent: start offset [%llu] "
1178 "should be less than original file blocks "
1179 "[%u] [ino:orig %lu, donor %lu]\n",
1180 orig_start, orig_blocks,
1181 orig_inode->i_ino, donor_inode->i_ino);
1182 return -EINVAL;
1183 }
1184
1185 if (orig_start + *len > orig_blocks) {
1186 ext4_debug("ext4 move extent: Adjust length "
1187 "from %llu to %llu. Because it should be "
1188 "less than original file blocks "
1189 "[ino:orig %lu, donor %lu]\n",
1190 *len, orig_blocks - orig_start,
1191 orig_inode->i_ino, donor_inode->i_ino);
1192 *len = orig_blocks - orig_start;
1193 }
1194 }
1195
1196 if (!*len) { 519 if (!*len) {
1197 ext4_debug("ext4 move extent: len should not be 0 " 520 ext4_debug("ext4 move extent: len should not be 0 "
1198 "[ino:orig %lu, donor %lu]\n", orig_inode->i_ino, 521 "[ino:orig %lu, donor %lu]\n", orig_inode->i_ino,
@@ -1208,60 +531,26 @@ mext_check_arguments(struct inode *orig_inode,
1208 * 531 *
1209 * @o_filp: file structure of the original file 532 * @o_filp: file structure of the original file
1210 * @d_filp: file structure of the donor file 533 * @d_filp: file structure of the donor file
1211 * @orig_start: start offset in block for orig 534 * @orig_blk: start offset in block for orig
1212 * @donor_start: start offset in block for donor 535 * @donor_blk: start offset in block for donor
1213 * @len: the number of blocks to be moved 536 * @len: the number of blocks to be moved
1214 * @moved_len: moved block length 537 * @moved_len: moved block length
1215 * 538 *
1216 * This function returns 0 and moved block length is set in moved_len 539 * This function returns 0 and moved block length is set in moved_len
1217 * if succeed, otherwise returns error value. 540 * if succeed, otherwise returns error value.
1218 * 541 *
1219 * Note: ext4_move_extents() proceeds the following order.
1220 * 1:ext4_move_extents() calculates the last block number of moving extent
1221 * function by the start block number (orig_start) and the number of blocks
1222 * to be moved (len) specified as arguments.
1223 * If the {orig, donor}_start points a hole, the extent's start offset
1224 * pointed by ext_cur (current extent), holecheck_path, orig_path are set
1225 * after hole behind.
1226 * 2:Continue step 3 to step 5, until the holecheck_path points to last_extent
1227 * or the ext_cur exceeds the block_end which is last logical block number.
1228 * 3:To get the length of continues area, call mext_next_extent()
1229 * specified with the ext_cur (initial value is holecheck_path) re-cursive,
1230 * until find un-continuous extent, the start logical block number exceeds
1231 * the block_end or the extent points to the last extent.
1232 * 4:Exchange the original inode data with donor inode data
1233 * from orig_page_offset to seq_end_page.
1234 * The start indexes of data are specified as arguments.
1235 * That of the original inode is orig_page_offset,
1236 * and the donor inode is also orig_page_offset
1237 * (To easily handle blocksize != pagesize case, the offset for the
1238 * donor inode is block unit).
1239 * 5:Update holecheck_path and orig_path to points a next proceeding extent,
1240 * then returns to step 2.
1241 * 6:Release holecheck_path, orig_path and set the len to moved_len
1242 * which shows the number of moved blocks.
1243 * The moved_len is useful for the command to calculate the file offset
1244 * for starting next move extent ioctl.
1245 * 7:Return 0 on success, or a negative error value on failure.
1246 */ 542 */
1247int 543int
1248ext4_move_extents(struct file *o_filp, struct file *d_filp, 544ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_blk,
1249 __u64 orig_start, __u64 donor_start, __u64 len, 545 __u64 donor_blk, __u64 len, __u64 *moved_len)
1250 __u64 *moved_len)
1251{ 546{
1252 struct inode *orig_inode = file_inode(o_filp); 547 struct inode *orig_inode = file_inode(o_filp);
1253 struct inode *donor_inode = file_inode(d_filp); 548 struct inode *donor_inode = file_inode(d_filp);
1254 struct ext4_ext_path *orig_path = NULL, *holecheck_path = NULL; 549 struct ext4_ext_path *path = NULL;
1255 struct ext4_extent *ext_prev, *ext_cur, *ext_dummy;
1256 ext4_lblk_t block_start = orig_start;
1257 ext4_lblk_t block_end, seq_start, add_blocks, file_end, seq_blocks = 0;
1258 ext4_lblk_t rest_blocks;
1259 pgoff_t orig_page_offset = 0, seq_end_page;
1260 int ret, depth, last_extent = 0;
1261 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; 550 int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
1262 int data_offset_in_page; 551 ext4_lblk_t o_end, o_start = orig_blk;
1263 int block_len_in_page; 552 ext4_lblk_t d_start = donor_blk;
1264 int unwritten; 553 int ret;
1265 554
1266 if (orig_inode->i_sb != donor_inode->i_sb) { 555 if (orig_inode->i_sb != donor_inode->i_sb) {
1267 ext4_debug("ext4 move extent: The argument files " 556 ext4_debug("ext4 move extent: The argument files "
@@ -1303,121 +592,58 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
1303 /* Protect extent tree against block allocations via delalloc */ 592 /* Protect extent tree against block allocations via delalloc */
1304 ext4_double_down_write_data_sem(orig_inode, donor_inode); 593 ext4_double_down_write_data_sem(orig_inode, donor_inode);
1305 /* Check the filesystem environment whether move_extent can be done */ 594 /* Check the filesystem environment whether move_extent can be done */
1306 ret = mext_check_arguments(orig_inode, donor_inode, orig_start, 595 ret = mext_check_arguments(orig_inode, donor_inode, orig_blk,
1307 donor_start, &len); 596 donor_blk, &len);
1308 if (ret) 597 if (ret)
1309 goto out; 598 goto out;
599 o_end = o_start + len;
1310 600
1311 file_end = (i_size_read(orig_inode) - 1) >> orig_inode->i_blkbits; 601 while (o_start < o_end) {
1312 block_end = block_start + len - 1; 602 struct ext4_extent *ex;
1313 if (file_end < block_end) 603 ext4_lblk_t cur_blk, next_blk;
1314 len -= block_end - file_end; 604 pgoff_t orig_page_index, donor_page_index;
605 int offset_in_page;
606 int unwritten, cur_len;
1315 607
1316 ret = get_ext_path(orig_inode, block_start, &orig_path); 608 ret = get_ext_path(orig_inode, o_start, &path);
1317 if (ret) 609 if (ret)
1318 goto out;
1319
1320 /* Get path structure to check the hole */
1321 ret = get_ext_path(orig_inode, block_start, &holecheck_path);
1322 if (ret)
1323 goto out;
1324
1325 depth = ext_depth(orig_inode);
1326 ext_cur = holecheck_path[depth].p_ext;
1327
1328 /*
1329 * Get proper starting location of block replacement if block_start was
1330 * within the hole.
1331 */
1332 if (le32_to_cpu(ext_cur->ee_block) +
1333 ext4_ext_get_actual_len(ext_cur) - 1 < block_start) {
1334 /*
1335 * The hole exists between extents or the tail of
1336 * original file.
1337 */
1338 last_extent = mext_next_extent(orig_inode,
1339 holecheck_path, &ext_cur);
1340 if (last_extent < 0) {
1341 ret = last_extent;
1342 goto out;
1343 }
1344 last_extent = mext_next_extent(orig_inode, orig_path,
1345 &ext_dummy);
1346 if (last_extent < 0) {
1347 ret = last_extent;
1348 goto out; 610 goto out;
1349 } 611 ex = path[path->p_depth].p_ext;
1350 seq_start = le32_to_cpu(ext_cur->ee_block); 612 next_blk = ext4_ext_next_allocated_block(path);
1351 } else if (le32_to_cpu(ext_cur->ee_block) > block_start) 613 cur_blk = le32_to_cpu(ex->ee_block);
1352 /* The hole exists at the beginning of original file. */ 614 cur_len = ext4_ext_get_actual_len(ex);
1353 seq_start = le32_to_cpu(ext_cur->ee_block); 615 /* Check hole before the start pos */
1354 else 616 if (cur_blk + cur_len - 1 < o_start) {
1355 seq_start = block_start; 617 if (next_blk == EXT_MAX_BLOCKS) {
1356 618 o_start = o_end;
1357 /* No blocks within the specified range. */ 619 ret = -ENODATA;
1358 if (le32_to_cpu(ext_cur->ee_block) > block_end) { 620 goto out;
1359 ext4_debug("ext4 move extent: The specified range of file " 621 }
1360 "may be the hole\n"); 622 d_start += next_blk - o_start;
1361 ret = -EINVAL; 623 o_start = next_blk;
1362 goto out;
1363 }
1364
1365 /* Adjust start blocks */
1366 add_blocks = min(le32_to_cpu(ext_cur->ee_block) +
1367 ext4_ext_get_actual_len(ext_cur), block_end + 1) -
1368 max(le32_to_cpu(ext_cur->ee_block), block_start);
1369
1370 while (!last_extent && le32_to_cpu(ext_cur->ee_block) <= block_end) {
1371 seq_blocks += add_blocks;
1372
1373 /* Adjust tail blocks */
1374 if (seq_start + seq_blocks - 1 > block_end)
1375 seq_blocks = block_end - seq_start + 1;
1376
1377 ext_prev = ext_cur;
1378 last_extent = mext_next_extent(orig_inode, holecheck_path,
1379 &ext_cur);
1380 if (last_extent < 0) {
1381 ret = last_extent;
1382 break;
1383 }
1384 add_blocks = ext4_ext_get_actual_len(ext_cur);
1385
1386 /*
1387 * Extend the length of contiguous block (seq_blocks)
1388 * if extents are contiguous.
1389 */
1390 if (ext4_can_extents_be_merged(orig_inode,
1391 ext_prev, ext_cur) &&
1392 block_end >= le32_to_cpu(ext_cur->ee_block) &&
1393 !last_extent)
1394 continue; 624 continue;
1395 625 /* Check hole after the start pos */
1396 /* Is original extent is unwritten */ 626 } else if (cur_blk > o_start) {
1397 unwritten = ext4_ext_is_unwritten(ext_prev); 627 /* Skip hole */
1398 628 d_start += cur_blk - o_start;
1399 data_offset_in_page = seq_start % blocks_per_page; 629 o_start = cur_blk;
1400 630 /* Extent inside requested range ?*/
1401 /* 631 if (cur_blk >= o_end)
1402 * Calculate data blocks count that should be swapped 632 goto out;
1403 * at the first page. 633 } else { /* in_range(o_start, o_blk, o_len) */
1404 */ 634 cur_len += cur_blk - o_start;
1405 if (data_offset_in_page + seq_blocks > blocks_per_page) {
1406 /* Swapped blocks are across pages */
1407 block_len_in_page =
1408 blocks_per_page - data_offset_in_page;
1409 } else {
1410 /* Swapped blocks are in a page */
1411 block_len_in_page = seq_blocks;
1412 } 635 }
1413 636 unwritten = ext4_ext_is_unwritten(ex);
1414 orig_page_offset = seq_start >> 637 if (o_end - o_start < cur_len)
1415 (PAGE_CACHE_SHIFT - orig_inode->i_blkbits); 638 cur_len = o_end - o_start;
1416 seq_end_page = (seq_start + seq_blocks - 1) >> 639
1417 (PAGE_CACHE_SHIFT - orig_inode->i_blkbits); 640 orig_page_index = o_start >> (PAGE_CACHE_SHIFT -
1418 seq_start = le32_to_cpu(ext_cur->ee_block); 641 orig_inode->i_blkbits);
1419 rest_blocks = seq_blocks; 642 donor_page_index = d_start >> (PAGE_CACHE_SHIFT -
1420 643 donor_inode->i_blkbits);
644 offset_in_page = o_start % blocks_per_page;
645 if (cur_len > blocks_per_page- offset_in_page)
646 cur_len = blocks_per_page - offset_in_page;
1421 /* 647 /*
1422 * Up semaphore to avoid following problems: 648 * Up semaphore to avoid following problems:
1423 * a. transaction deadlock among ext4_journal_start, 649 * a. transaction deadlock among ext4_journal_start,
@@ -1426,77 +652,29 @@ ext4_move_extents(struct file *o_filp, struct file *d_filp,
1426 * in move_extent_per_page 652 * in move_extent_per_page
1427 */ 653 */
1428 ext4_double_up_write_data_sem(orig_inode, donor_inode); 654 ext4_double_up_write_data_sem(orig_inode, donor_inode);
1429 655 /* Swap original branches with new branches */
1430 while (orig_page_offset <= seq_end_page) { 656 move_extent_per_page(o_filp, donor_inode,
1431 657 orig_page_index, donor_page_index,
1432 /* Swap original branches with new branches */ 658 offset_in_page, cur_len,
1433 block_len_in_page = move_extent_per_page( 659 unwritten, &ret);
1434 o_filp, donor_inode,
1435 orig_page_offset,
1436 data_offset_in_page,
1437 block_len_in_page,
1438 unwritten, &ret);
1439
1440 /* Count how many blocks we have exchanged */
1441 *moved_len += block_len_in_page;
1442 if (ret < 0)
1443 break;
1444 if (*moved_len > len) {
1445 EXT4_ERROR_INODE(orig_inode,
1446 "We replaced blocks too much! "
1447 "sum of replaced: %llu requested: %llu",
1448 *moved_len, len);
1449 ret = -EIO;
1450 break;
1451 }
1452
1453 orig_page_offset++;
1454 data_offset_in_page = 0;
1455 rest_blocks -= block_len_in_page;
1456 if (rest_blocks > blocks_per_page)
1457 block_len_in_page = blocks_per_page;
1458 else
1459 block_len_in_page = rest_blocks;
1460 }
1461
1462 ext4_double_down_write_data_sem(orig_inode, donor_inode); 660 ext4_double_down_write_data_sem(orig_inode, donor_inode);
1463 if (ret < 0) 661 if (ret < 0)
1464 break; 662 break;
1465 663 o_start += cur_len;
1466 /* Decrease buffer counter */ 664 d_start += cur_len;
1467 if (holecheck_path)
1468 ext4_ext_drop_refs(holecheck_path);
1469 ret = get_ext_path(orig_inode, seq_start, &holecheck_path);
1470 if (ret)
1471 break;
1472 depth = holecheck_path->p_depth;
1473
1474 /* Decrease buffer counter */
1475 if (orig_path)
1476 ext4_ext_drop_refs(orig_path);
1477 ret = get_ext_path(orig_inode, seq_start, &orig_path);
1478 if (ret)
1479 break;
1480
1481 ext_cur = holecheck_path[depth].p_ext;
1482 add_blocks = ext4_ext_get_actual_len(ext_cur);
1483 seq_blocks = 0;
1484
1485 } 665 }
666 *moved_len = o_start - orig_blk;
667 if (*moved_len > len)
668 *moved_len = len;
669
1486out: 670out:
1487 if (*moved_len) { 671 if (*moved_len) {
1488 ext4_discard_preallocations(orig_inode); 672 ext4_discard_preallocations(orig_inode);
1489 ext4_discard_preallocations(donor_inode); 673 ext4_discard_preallocations(donor_inode);
1490 } 674 }
1491 675
1492 if (orig_path) { 676 ext4_ext_drop_refs(path);
1493 ext4_ext_drop_refs(orig_path); 677 kfree(path);
1494 kfree(orig_path);
1495 }
1496 if (holecheck_path) {
1497 ext4_ext_drop_refs(holecheck_path);
1498 kfree(holecheck_path);
1499 }
1500 ext4_double_up_write_data_sem(orig_inode, donor_inode); 678 ext4_double_up_write_data_sem(orig_inode, donor_inode);
1501 ext4_inode_resume_unlocked_dio(orig_inode); 679 ext4_inode_resume_unlocked_dio(orig_inode);
1502 ext4_inode_resume_unlocked_dio(donor_inode); 680 ext4_inode_resume_unlocked_dio(donor_inode);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 603e4ebbd0ac..426211882f72 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -53,7 +53,7 @@ static struct buffer_head *ext4_append(handle_t *handle,
53 ext4_lblk_t *block) 53 ext4_lblk_t *block)
54{ 54{
55 struct buffer_head *bh; 55 struct buffer_head *bh;
56 int err = 0; 56 int err;
57 57
58 if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb && 58 if (unlikely(EXT4_SB(inode->i_sb)->s_max_dir_size_kb &&
59 ((inode->i_size >> 10) >= 59 ((inode->i_size >> 10) >=
@@ -62,9 +62,9 @@ static struct buffer_head *ext4_append(handle_t *handle,
62 62
63 *block = inode->i_size >> inode->i_sb->s_blocksize_bits; 63 *block = inode->i_size >> inode->i_sb->s_blocksize_bits;
64 64
65 bh = ext4_bread(handle, inode, *block, 1, &err); 65 bh = ext4_bread(handle, inode, *block, 1);
66 if (!bh) 66 if (IS_ERR(bh))
67 return ERR_PTR(err); 67 return bh;
68 inode->i_size += inode->i_sb->s_blocksize; 68 inode->i_size += inode->i_sb->s_blocksize;
69 EXT4_I(inode)->i_disksize = inode->i_size; 69 EXT4_I(inode)->i_disksize = inode->i_size;
70 BUFFER_TRACE(bh, "get_write_access"); 70 BUFFER_TRACE(bh, "get_write_access");
@@ -94,20 +94,20 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
94{ 94{
95 struct buffer_head *bh; 95 struct buffer_head *bh;
96 struct ext4_dir_entry *dirent; 96 struct ext4_dir_entry *dirent;
97 int err = 0, is_dx_block = 0; 97 int is_dx_block = 0;
98 98
99 bh = ext4_bread(NULL, inode, block, 0, &err); 99 bh = ext4_bread(NULL, inode, block, 0);
100 if (!bh) { 100 if (IS_ERR(bh)) {
101 if (err == 0) {
102 ext4_error_inode(inode, __func__, line, block,
103 "Directory hole found");
104 return ERR_PTR(-EIO);
105 }
106 __ext4_warning(inode->i_sb, __func__, line, 101 __ext4_warning(inode->i_sb, __func__, line,
107 "error reading directory block " 102 "error %ld reading directory block "
108 "(ino %lu, block %lu)", inode->i_ino, 103 "(ino %lu, block %lu)", PTR_ERR(bh), inode->i_ino,
109 (unsigned long) block); 104 (unsigned long) block);
110 return ERR_PTR(err); 105
106 return bh;
107 }
108 if (!bh) {
109 ext4_error_inode(inode, __func__, line, block, "Directory hole found");
110 return ERR_PTR(-EIO);
111 } 111 }
112 dirent = (struct ext4_dir_entry *) bh->b_data; 112 dirent = (struct ext4_dir_entry *) bh->b_data;
113 /* Determine whether or not we have an index block */ 113 /* Determine whether or not we have an index block */
@@ -124,8 +124,7 @@ static struct buffer_head *__ext4_read_dirblock(struct inode *inode,
124 "directory leaf block found instead of index block"); 124 "directory leaf block found instead of index block");
125 return ERR_PTR(-EIO); 125 return ERR_PTR(-EIO);
126 } 126 }
127 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 127 if (!ext4_has_metadata_csum(inode->i_sb) ||
128 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) ||
129 buffer_verified(bh)) 128 buffer_verified(bh))
130 return bh; 129 return bh;
131 130
@@ -253,8 +252,7 @@ static unsigned dx_node_limit(struct inode *dir);
253static struct dx_frame *dx_probe(const struct qstr *d_name, 252static struct dx_frame *dx_probe(const struct qstr *d_name,
254 struct inode *dir, 253 struct inode *dir,
255 struct dx_hash_info *hinfo, 254 struct dx_hash_info *hinfo,
256 struct dx_frame *frame, 255 struct dx_frame *frame);
257 int *err);
258static void dx_release(struct dx_frame *frames); 256static void dx_release(struct dx_frame *frames);
259static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize, 257static int dx_make_map(struct ext4_dir_entry_2 *de, unsigned blocksize,
260 struct dx_hash_info *hinfo, struct dx_map_entry map[]); 258 struct dx_hash_info *hinfo, struct dx_map_entry map[]);
@@ -270,8 +268,7 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
270 __u32 *start_hash); 268 __u32 *start_hash);
271static struct buffer_head * ext4_dx_find_entry(struct inode *dir, 269static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
272 const struct qstr *d_name, 270 const struct qstr *d_name,
273 struct ext4_dir_entry_2 **res_dir, 271 struct ext4_dir_entry_2 **res_dir);
274 int *err);
275static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, 272static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
276 struct inode *inode); 273 struct inode *inode);
277 274
@@ -340,8 +337,7 @@ int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent)
340{ 337{
341 struct ext4_dir_entry_tail *t; 338 struct ext4_dir_entry_tail *t;
342 339
343 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 340 if (!ext4_has_metadata_csum(inode->i_sb))
344 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
345 return 1; 341 return 1;
346 342
347 t = get_dirent_tail(inode, dirent); 343 t = get_dirent_tail(inode, dirent);
@@ -362,8 +358,7 @@ static void ext4_dirent_csum_set(struct inode *inode,
362{ 358{
363 struct ext4_dir_entry_tail *t; 359 struct ext4_dir_entry_tail *t;
364 360
365 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 361 if (!ext4_has_metadata_csum(inode->i_sb))
366 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
367 return; 362 return;
368 363
369 t = get_dirent_tail(inode, dirent); 364 t = get_dirent_tail(inode, dirent);
@@ -438,8 +433,7 @@ static int ext4_dx_csum_verify(struct inode *inode,
438 struct dx_tail *t; 433 struct dx_tail *t;
439 int count_offset, limit, count; 434 int count_offset, limit, count;
440 435
441 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 436 if (!ext4_has_metadata_csum(inode->i_sb))
442 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
443 return 1; 437 return 1;
444 438
445 c = get_dx_countlimit(inode, dirent, &count_offset); 439 c = get_dx_countlimit(inode, dirent, &count_offset);
@@ -468,8 +462,7 @@ static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent)
468 struct dx_tail *t; 462 struct dx_tail *t;
469 int count_offset, limit, count; 463 int count_offset, limit, count;
470 464
471 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 465 if (!ext4_has_metadata_csum(inode->i_sb))
472 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
473 return; 466 return;
474 467
475 c = get_dx_countlimit(inode, dirent, &count_offset); 468 c = get_dx_countlimit(inode, dirent, &count_offset);
@@ -557,8 +550,7 @@ static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
557 unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) - 550 unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
558 EXT4_DIR_REC_LEN(2) - infosize; 551 EXT4_DIR_REC_LEN(2) - infosize;
559 552
560 if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, 553 if (ext4_has_metadata_csum(dir->i_sb))
561 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
562 entry_space -= sizeof(struct dx_tail); 554 entry_space -= sizeof(struct dx_tail);
563 return entry_space / sizeof(struct dx_entry); 555 return entry_space / sizeof(struct dx_entry);
564} 556}
@@ -567,8 +559,7 @@ static inline unsigned dx_node_limit(struct inode *dir)
567{ 559{
568 unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0); 560 unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
569 561
570 if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, 562 if (ext4_has_metadata_csum(dir->i_sb))
571 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
572 entry_space -= sizeof(struct dx_tail); 563 entry_space -= sizeof(struct dx_tail);
573 return entry_space / sizeof(struct dx_entry); 564 return entry_space / sizeof(struct dx_entry);
574} 565}
@@ -641,7 +632,9 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
641 u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash; 632 u32 range = i < count - 1? (dx_get_hash(entries + 1) - hash): ~hash;
642 struct stats stats; 633 struct stats stats;
643 printk("%s%3u:%03u hash %8x/%8x ",levels?"":" ", i, block, hash, range); 634 printk("%s%3u:%03u hash %8x/%8x ",levels?"":" ", i, block, hash, range);
644 if (!(bh = ext4_bread (NULL,dir, block, 0,&err))) continue; 635 bh = ext4_bread(NULL,dir, block, 0);
636 if (!bh || IS_ERR(bh))
637 continue;
645 stats = levels? 638 stats = levels?
646 dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1): 639 dx_show_entries(hinfo, dir, ((struct dx_node *) bh->b_data)->entries, levels - 1):
647 dx_show_leaf(hinfo, (struct ext4_dir_entry_2 *) bh->b_data, blocksize, 0); 640 dx_show_leaf(hinfo, (struct ext4_dir_entry_2 *) bh->b_data, blocksize, 0);
@@ -669,29 +662,25 @@ struct stats dx_show_entries(struct dx_hash_info *hinfo, struct inode *dir,
669 */ 662 */
670static struct dx_frame * 663static struct dx_frame *
671dx_probe(const struct qstr *d_name, struct inode *dir, 664dx_probe(const struct qstr *d_name, struct inode *dir,
672 struct dx_hash_info *hinfo, struct dx_frame *frame_in, int *err) 665 struct dx_hash_info *hinfo, struct dx_frame *frame_in)
673{ 666{
674 unsigned count, indirect; 667 unsigned count, indirect;
675 struct dx_entry *at, *entries, *p, *q, *m; 668 struct dx_entry *at, *entries, *p, *q, *m;
676 struct dx_root *root; 669 struct dx_root *root;
677 struct buffer_head *bh;
678 struct dx_frame *frame = frame_in; 670 struct dx_frame *frame = frame_in;
671 struct dx_frame *ret_err = ERR_PTR(ERR_BAD_DX_DIR);
679 u32 hash; 672 u32 hash;
680 673
681 frame->bh = NULL; 674 frame->bh = ext4_read_dirblock(dir, 0, INDEX);
682 bh = ext4_read_dirblock(dir, 0, INDEX); 675 if (IS_ERR(frame->bh))
683 if (IS_ERR(bh)) { 676 return (struct dx_frame *) frame->bh;
684 *err = PTR_ERR(bh); 677
685 goto fail; 678 root = (struct dx_root *) frame->bh->b_data;
686 }
687 root = (struct dx_root *) bh->b_data;
688 if (root->info.hash_version != DX_HASH_TEA && 679 if (root->info.hash_version != DX_HASH_TEA &&
689 root->info.hash_version != DX_HASH_HALF_MD4 && 680 root->info.hash_version != DX_HASH_HALF_MD4 &&
690 root->info.hash_version != DX_HASH_LEGACY) { 681 root->info.hash_version != DX_HASH_LEGACY) {
691 ext4_warning(dir->i_sb, "Unrecognised inode hash code %d", 682 ext4_warning(dir->i_sb, "Unrecognised inode hash code %d",
692 root->info.hash_version); 683 root->info.hash_version);
693 brelse(bh);
694 *err = ERR_BAD_DX_DIR;
695 goto fail; 684 goto fail;
696 } 685 }
697 hinfo->hash_version = root->info.hash_version; 686 hinfo->hash_version = root->info.hash_version;
@@ -705,16 +694,12 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
705 if (root->info.unused_flags & 1) { 694 if (root->info.unused_flags & 1) {
706 ext4_warning(dir->i_sb, "Unimplemented inode hash flags: %#06x", 695 ext4_warning(dir->i_sb, "Unimplemented inode hash flags: %#06x",
707 root->info.unused_flags); 696 root->info.unused_flags);
708 brelse(bh);
709 *err = ERR_BAD_DX_DIR;
710 goto fail; 697 goto fail;
711 } 698 }
712 699
713 if ((indirect = root->info.indirect_levels) > 1) { 700 if ((indirect = root->info.indirect_levels) > 1) {
714 ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x", 701 ext4_warning(dir->i_sb, "Unimplemented inode hash depth: %#06x",
715 root->info.indirect_levels); 702 root->info.indirect_levels);
716 brelse(bh);
717 *err = ERR_BAD_DX_DIR;
718 goto fail; 703 goto fail;
719 } 704 }
720 705
@@ -724,27 +709,21 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
724 if (dx_get_limit(entries) != dx_root_limit(dir, 709 if (dx_get_limit(entries) != dx_root_limit(dir,
725 root->info.info_length)) { 710 root->info.info_length)) {
726 ext4_warning(dir->i_sb, "dx entry: limit != root limit"); 711 ext4_warning(dir->i_sb, "dx entry: limit != root limit");
727 brelse(bh);
728 *err = ERR_BAD_DX_DIR;
729 goto fail; 712 goto fail;
730 } 713 }
731 714
732 dxtrace(printk("Look up %x", hash)); 715 dxtrace(printk("Look up %x", hash));
733 while (1) 716 while (1) {
734 {
735 count = dx_get_count(entries); 717 count = dx_get_count(entries);
736 if (!count || count > dx_get_limit(entries)) { 718 if (!count || count > dx_get_limit(entries)) {
737 ext4_warning(dir->i_sb, 719 ext4_warning(dir->i_sb,
738 "dx entry: no count or count > limit"); 720 "dx entry: no count or count > limit");
739 brelse(bh); 721 goto fail;
740 *err = ERR_BAD_DX_DIR;
741 goto fail2;
742 } 722 }
743 723
744 p = entries + 1; 724 p = entries + 1;
745 q = entries + count - 1; 725 q = entries + count - 1;
746 while (p <= q) 726 while (p <= q) {
747 {
748 m = p + (q - p)/2; 727 m = p + (q - p)/2;
749 dxtrace(printk(".")); 728 dxtrace(printk("."));
750 if (dx_get_hash(m) > hash) 729 if (dx_get_hash(m) > hash)
@@ -753,8 +732,7 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
753 p = m + 1; 732 p = m + 1;
754 } 733 }
755 734
756 if (0) // linear search cross check 735 if (0) { // linear search cross check
757 {
758 unsigned n = count - 1; 736 unsigned n = count - 1;
759 at = entries; 737 at = entries;
760 while (n--) 738 while (n--)
@@ -771,38 +749,35 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
771 749
772 at = p - 1; 750 at = p - 1;
773 dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at))); 751 dxtrace(printk(" %x->%u\n", at == entries? 0: dx_get_hash(at), dx_get_block(at)));
774 frame->bh = bh;
775 frame->entries = entries; 752 frame->entries = entries;
776 frame->at = at; 753 frame->at = at;
777 if (!indirect--) return frame; 754 if (!indirect--)
778 bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX); 755 return frame;
779 if (IS_ERR(bh)) { 756 frame++;
780 *err = PTR_ERR(bh); 757 frame->bh = ext4_read_dirblock(dir, dx_get_block(at), INDEX);
781 goto fail2; 758 if (IS_ERR(frame->bh)) {
759 ret_err = (struct dx_frame *) frame->bh;
760 frame->bh = NULL;
761 goto fail;
782 } 762 }
783 entries = ((struct dx_node *) bh->b_data)->entries; 763 entries = ((struct dx_node *) frame->bh->b_data)->entries;
784 764
785 if (dx_get_limit(entries) != dx_node_limit (dir)) { 765 if (dx_get_limit(entries) != dx_node_limit (dir)) {
786 ext4_warning(dir->i_sb, 766 ext4_warning(dir->i_sb,
787 "dx entry: limit != node limit"); 767 "dx entry: limit != node limit");
788 brelse(bh); 768 goto fail;
789 *err = ERR_BAD_DX_DIR;
790 goto fail2;
791 } 769 }
792 frame++;
793 frame->bh = NULL;
794 } 770 }
795fail2: 771fail:
796 while (frame >= frame_in) { 772 while (frame >= frame_in) {
797 brelse(frame->bh); 773 brelse(frame->bh);
798 frame--; 774 frame--;
799 } 775 }
800fail: 776 if (ret_err == ERR_PTR(ERR_BAD_DX_DIR))
801 if (*err == ERR_BAD_DX_DIR)
802 ext4_warning(dir->i_sb, 777 ext4_warning(dir->i_sb,
803 "Corrupt dir inode %lu, running e2fsck is " 778 "Corrupt dir inode %lu, running e2fsck is "
804 "recommended.", dir->i_ino); 779 "recommended.", dir->i_ino);
805 return NULL; 780 return ret_err;
806} 781}
807 782
808static void dx_release (struct dx_frame *frames) 783static void dx_release (struct dx_frame *frames)
@@ -988,9 +963,9 @@ int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
988 } 963 }
989 hinfo.hash = start_hash; 964 hinfo.hash = start_hash;
990 hinfo.minor_hash = 0; 965 hinfo.minor_hash = 0;
991 frame = dx_probe(NULL, dir, &hinfo, frames, &err); 966 frame = dx_probe(NULL, dir, &hinfo, frames);
992 if (!frame) 967 if (IS_ERR(frame))
993 return err; 968 return PTR_ERR(frame);
994 969
995 /* Add '.' and '..' from the htree header */ 970 /* Add '.' and '..' from the htree header */
996 if (!start_hash && !start_minor_hash) { 971 if (!start_hash && !start_minor_hash) {
@@ -1227,8 +1202,7 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
1227 buffer */ 1202 buffer */
1228 int num = 0; 1203 int num = 0;
1229 ext4_lblk_t nblocks; 1204 ext4_lblk_t nblocks;
1230 int i, err = 0; 1205 int i, namelen;
1231 int namelen;
1232 1206
1233 *res_dir = NULL; 1207 *res_dir = NULL;
1234 sb = dir->i_sb; 1208 sb = dir->i_sb;
@@ -1258,17 +1232,13 @@ static struct buffer_head * ext4_find_entry (struct inode *dir,
1258 goto restart; 1232 goto restart;
1259 } 1233 }
1260 if (is_dx(dir)) { 1234 if (is_dx(dir)) {
1261 bh = ext4_dx_find_entry(dir, d_name, res_dir, &err); 1235 bh = ext4_dx_find_entry(dir, d_name, res_dir);
1262 /* 1236 /*
1263 * On success, or if the error was file not found, 1237 * On success, or if the error was file not found,
1264 * return. Otherwise, fall back to doing a search the 1238 * return. Otherwise, fall back to doing a search the
1265 * old fashioned way. 1239 * old fashioned way.
1266 */ 1240 */
1267 if (err == -ENOENT) 1241 if (!IS_ERR(bh) || PTR_ERR(bh) != ERR_BAD_DX_DIR)
1268 return NULL;
1269 if (err && err != ERR_BAD_DX_DIR)
1270 return ERR_PTR(err);
1271 if (bh)
1272 return bh; 1242 return bh;
1273 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, " 1243 dxtrace(printk(KERN_DEBUG "ext4_find_entry: dx failed, "
1274 "falling back\n")); 1244 "falling back\n"));
@@ -1298,10 +1268,10 @@ restart:
1298 break; 1268 break;
1299 } 1269 }
1300 num++; 1270 num++;
1301 bh = ext4_getblk(NULL, dir, b++, 0, &err); 1271 bh = ext4_getblk(NULL, dir, b++, 0);
1302 if (unlikely(err)) { 1272 if (unlikely(IS_ERR(bh))) {
1303 if (ra_max == 0) 1273 if (ra_max == 0)
1304 return ERR_PTR(err); 1274 return bh;
1305 break; 1275 break;
1306 } 1276 }
1307 bh_use[ra_max] = bh; 1277 bh_use[ra_max] = bh;
@@ -1366,7 +1336,7 @@ cleanup_and_exit:
1366} 1336}
1367 1337
1368static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name, 1338static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct qstr *d_name,
1369 struct ext4_dir_entry_2 **res_dir, int *err) 1339 struct ext4_dir_entry_2 **res_dir)
1370{ 1340{
1371 struct super_block * sb = dir->i_sb; 1341 struct super_block * sb = dir->i_sb;
1372 struct dx_hash_info hinfo; 1342 struct dx_hash_info hinfo;
@@ -1375,25 +1345,23 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q
1375 ext4_lblk_t block; 1345 ext4_lblk_t block;
1376 int retval; 1346 int retval;
1377 1347
1378 if (!(frame = dx_probe(d_name, dir, &hinfo, frames, err))) 1348 frame = dx_probe(d_name, dir, &hinfo, frames);
1379 return NULL; 1349 if (IS_ERR(frame))
1350 return (struct buffer_head *) frame;
1380 do { 1351 do {
1381 block = dx_get_block(frame->at); 1352 block = dx_get_block(frame->at);
1382 bh = ext4_read_dirblock(dir, block, DIRENT); 1353 bh = ext4_read_dirblock(dir, block, DIRENT);
1383 if (IS_ERR(bh)) { 1354 if (IS_ERR(bh))
1384 *err = PTR_ERR(bh);
1385 goto errout; 1355 goto errout;
1386 } 1356
1387 retval = search_dirblock(bh, dir, d_name, 1357 retval = search_dirblock(bh, dir, d_name,
1388 block << EXT4_BLOCK_SIZE_BITS(sb), 1358 block << EXT4_BLOCK_SIZE_BITS(sb),
1389 res_dir); 1359 res_dir);
1390 if (retval == 1) { /* Success! */ 1360 if (retval == 1)
1391 dx_release(frames); 1361 goto success;
1392 return bh;
1393 }
1394 brelse(bh); 1362 brelse(bh);
1395 if (retval == -1) { 1363 if (retval == -1) {
1396 *err = ERR_BAD_DX_DIR; 1364 bh = ERR_PTR(ERR_BAD_DX_DIR);
1397 goto errout; 1365 goto errout;
1398 } 1366 }
1399 1367
@@ -1402,18 +1370,19 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q
1402 frames, NULL); 1370 frames, NULL);
1403 if (retval < 0) { 1371 if (retval < 0) {
1404 ext4_warning(sb, 1372 ext4_warning(sb,
1405 "error reading index page in directory #%lu", 1373 "error %d reading index page in directory #%lu",
1406 dir->i_ino); 1374 retval, dir->i_ino);
1407 *err = retval; 1375 bh = ERR_PTR(retval);
1408 goto errout; 1376 goto errout;
1409 } 1377 }
1410 } while (retval == 1); 1378 } while (retval == 1);
1411 1379
1412 *err = -ENOENT; 1380 bh = NULL;
1413errout: 1381errout:
1414 dxtrace(printk(KERN_DEBUG "%s not found\n", d_name->name)); 1382 dxtrace(printk(KERN_DEBUG "%s not found\n", d_name->name));
1415 dx_release (frames); 1383success:
1416 return NULL; 1384 dx_release(frames);
1385 return bh;
1417} 1386}
1418 1387
1419static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags) 1388static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsigned int flags)
@@ -1441,7 +1410,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi
1441 dentry); 1410 dentry);
1442 return ERR_PTR(-EIO); 1411 return ERR_PTR(-EIO);
1443 } 1412 }
1444 inode = ext4_iget(dir->i_sb, ino); 1413 inode = ext4_iget_normal(dir->i_sb, ino);
1445 if (inode == ERR_PTR(-ESTALE)) { 1414 if (inode == ERR_PTR(-ESTALE)) {
1446 EXT4_ERROR_INODE(dir, 1415 EXT4_ERROR_INODE(dir,
1447 "deleted inode referenced: %u", 1416 "deleted inode referenced: %u",
@@ -1474,7 +1443,7 @@ struct dentry *ext4_get_parent(struct dentry *child)
1474 return ERR_PTR(-EIO); 1443 return ERR_PTR(-EIO);
1475 } 1444 }
1476 1445
1477 return d_obtain_alias(ext4_iget(child->d_inode->i_sb, ino)); 1446 return d_obtain_alias(ext4_iget_normal(child->d_inode->i_sb, ino));
1478} 1447}
1479 1448
1480/* 1449/*
@@ -1533,7 +1502,7 @@ static struct ext4_dir_entry_2* dx_pack_dirents(char *base, unsigned blocksize)
1533 */ 1502 */
1534static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir, 1503static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1535 struct buffer_head **bh,struct dx_frame *frame, 1504 struct buffer_head **bh,struct dx_frame *frame,
1536 struct dx_hash_info *hinfo, int *error) 1505 struct dx_hash_info *hinfo)
1537{ 1506{
1538 unsigned blocksize = dir->i_sb->s_blocksize; 1507 unsigned blocksize = dir->i_sb->s_blocksize;
1539 unsigned count, continued; 1508 unsigned count, continued;
@@ -1548,16 +1517,14 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1548 int csum_size = 0; 1517 int csum_size = 0;
1549 int err = 0, i; 1518 int err = 0, i;
1550 1519
1551 if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, 1520 if (ext4_has_metadata_csum(dir->i_sb))
1552 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
1553 csum_size = sizeof(struct ext4_dir_entry_tail); 1521 csum_size = sizeof(struct ext4_dir_entry_tail);
1554 1522
1555 bh2 = ext4_append(handle, dir, &newblock); 1523 bh2 = ext4_append(handle, dir, &newblock);
1556 if (IS_ERR(bh2)) { 1524 if (IS_ERR(bh2)) {
1557 brelse(*bh); 1525 brelse(*bh);
1558 *bh = NULL; 1526 *bh = NULL;
1559 *error = PTR_ERR(bh2); 1527 return (struct ext4_dir_entry_2 *) bh2;
1560 return NULL;
1561 } 1528 }
1562 1529
1563 BUFFER_TRACE(*bh, "get_write_access"); 1530 BUFFER_TRACE(*bh, "get_write_access");
@@ -1617,8 +1584,7 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1617 dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1)); 1584 dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
1618 1585
1619 /* Which block gets the new entry? */ 1586 /* Which block gets the new entry? */
1620 if (hinfo->hash >= hash2) 1587 if (hinfo->hash >= hash2) {
1621 {
1622 swap(*bh, bh2); 1588 swap(*bh, bh2);
1623 de = de2; 1589 de = de2;
1624 } 1590 }
@@ -1638,8 +1604,7 @@ journal_error:
1638 brelse(bh2); 1604 brelse(bh2);
1639 *bh = NULL; 1605 *bh = NULL;
1640 ext4_std_error(dir->i_sb, err); 1606 ext4_std_error(dir->i_sb, err);
1641 *error = err; 1607 return ERR_PTR(err);
1642 return NULL;
1643} 1608}
1644 1609
1645int ext4_find_dest_de(struct inode *dir, struct inode *inode, 1610int ext4_find_dest_de(struct inode *dir, struct inode *inode,
@@ -1718,8 +1683,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
1718 int csum_size = 0; 1683 int csum_size = 0;
1719 int err; 1684 int err;
1720 1685
1721 if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 1686 if (ext4_has_metadata_csum(inode->i_sb))
1722 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
1723 csum_size = sizeof(struct ext4_dir_entry_tail); 1687 csum_size = sizeof(struct ext4_dir_entry_tail);
1724 1688
1725 if (!de) { 1689 if (!de) {
@@ -1786,8 +1750,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
1786 struct fake_dirent *fde; 1750 struct fake_dirent *fde;
1787 int csum_size = 0; 1751 int csum_size = 0;
1788 1752
1789 if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 1753 if (ext4_has_metadata_csum(inode->i_sb))
1790 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
1791 csum_size = sizeof(struct ext4_dir_entry_tail); 1754 csum_size = sizeof(struct ext4_dir_entry_tail);
1792 1755
1793 blocksize = dir->i_sb->s_blocksize; 1756 blocksize = dir->i_sb->s_blocksize;
@@ -1853,31 +1816,39 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
1853 hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned; 1816 hinfo.hash_version += EXT4_SB(dir->i_sb)->s_hash_unsigned;
1854 hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed; 1817 hinfo.seed = EXT4_SB(dir->i_sb)->s_hash_seed;
1855 ext4fs_dirhash(name, namelen, &hinfo); 1818 ext4fs_dirhash(name, namelen, &hinfo);
1819 memset(frames, 0, sizeof(frames));
1856 frame = frames; 1820 frame = frames;
1857 frame->entries = entries; 1821 frame->entries = entries;
1858 frame->at = entries; 1822 frame->at = entries;
1859 frame->bh = bh; 1823 frame->bh = bh;
1860 bh = bh2; 1824 bh = bh2;
1861 1825
1862 ext4_handle_dirty_dx_node(handle, dir, frame->bh); 1826 retval = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
1863 ext4_handle_dirty_dirent_node(handle, dir, bh); 1827 if (retval)
1828 goto out_frames;
1829 retval = ext4_handle_dirty_dirent_node(handle, dir, bh);
1830 if (retval)
1831 goto out_frames;
1864 1832
1865 de = do_split(handle,dir, &bh, frame, &hinfo, &retval); 1833 de = do_split(handle,dir, &bh, frame, &hinfo);
1866 if (!de) { 1834 if (IS_ERR(de)) {
1867 /* 1835 retval = PTR_ERR(de);
1868 * Even if the block split failed, we have to properly write 1836 goto out_frames;
1869 * out all the changes we did so far. Otherwise we can end up
1870 * with corrupted filesystem.
1871 */
1872 ext4_mark_inode_dirty(handle, dir);
1873 dx_release(frames);
1874 return retval;
1875 } 1837 }
1876 dx_release(frames); 1838 dx_release(frames);
1877 1839
1878 retval = add_dirent_to_buf(handle, dentry, inode, de, bh); 1840 retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
1879 brelse(bh); 1841 brelse(bh);
1880 return retval; 1842 return retval;
1843out_frames:
1844 /*
1845 * Even if the block split failed, we have to properly write
1846 * out all the changes we did so far. Otherwise we can end up
1847 * with corrupted filesystem.
1848 */
1849 ext4_mark_inode_dirty(handle, dir);
1850 dx_release(frames);
1851 return retval;
1881} 1852}
1882 1853
1883/* 1854/*
@@ -1904,8 +1875,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1904 ext4_lblk_t block, blocks; 1875 ext4_lblk_t block, blocks;
1905 int csum_size = 0; 1876 int csum_size = 0;
1906 1877
1907 if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 1878 if (ext4_has_metadata_csum(inode->i_sb))
1908 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
1909 csum_size = sizeof(struct ext4_dir_entry_tail); 1879 csum_size = sizeof(struct ext4_dir_entry_tail);
1910 1880
1911 sb = dir->i_sb; 1881 sb = dir->i_sb;
@@ -1982,9 +1952,9 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1982 struct ext4_dir_entry_2 *de; 1952 struct ext4_dir_entry_2 *de;
1983 int err; 1953 int err;
1984 1954
1985 frame = dx_probe(&dentry->d_name, dir, &hinfo, frames, &err); 1955 frame = dx_probe(&dentry->d_name, dir, &hinfo, frames);
1986 if (!frame) 1956 if (IS_ERR(frame))
1987 return err; 1957 return PTR_ERR(frame);
1988 entries = frame->entries; 1958 entries = frame->entries;
1989 at = frame->at; 1959 at = frame->at;
1990 bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT); 1960 bh = ext4_read_dirblock(dir, dx_get_block(frame->at), DIRENT);
@@ -2095,9 +2065,11 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
2095 goto cleanup; 2065 goto cleanup;
2096 } 2066 }
2097 } 2067 }
2098 de = do_split(handle, dir, &bh, frame, &hinfo, &err); 2068 de = do_split(handle, dir, &bh, frame, &hinfo);
2099 if (!de) 2069 if (IS_ERR(de)) {
2070 err = PTR_ERR(de);
2100 goto cleanup; 2071 goto cleanup;
2072 }
2101 err = add_dirent_to_buf(handle, dentry, inode, de, bh); 2073 err = add_dirent_to_buf(handle, dentry, inode, de, bh);
2102 goto cleanup; 2074 goto cleanup;
2103 2075
@@ -2167,8 +2139,7 @@ static int ext4_delete_entry(handle_t *handle,
2167 return err; 2139 return err;
2168 } 2140 }
2169 2141
2170 if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, 2142 if (ext4_has_metadata_csum(dir->i_sb))
2171 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
2172 csum_size = sizeof(struct ext4_dir_entry_tail); 2143 csum_size = sizeof(struct ext4_dir_entry_tail);
2173 2144
2174 BUFFER_TRACE(bh, "get_write_access"); 2145 BUFFER_TRACE(bh, "get_write_access");
@@ -2387,8 +2358,7 @@ static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
2387 int csum_size = 0; 2358 int csum_size = 0;
2388 int err; 2359 int err;
2389 2360
2390 if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb, 2361 if (ext4_has_metadata_csum(dir->i_sb))
2391 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
2392 csum_size = sizeof(struct ext4_dir_entry_tail); 2362 csum_size = sizeof(struct ext4_dir_entry_tail);
2393 2363
2394 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) { 2364 if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
@@ -2403,10 +2373,6 @@ static int ext4_init_new_dir(handle_t *handle, struct inode *dir,
2403 dir_block = ext4_append(handle, inode, &block); 2373 dir_block = ext4_append(handle, inode, &block);
2404 if (IS_ERR(dir_block)) 2374 if (IS_ERR(dir_block))
2405 return PTR_ERR(dir_block); 2375 return PTR_ERR(dir_block);
2406 BUFFER_TRACE(dir_block, "get_write_access");
2407 err = ext4_journal_get_write_access(handle, dir_block);
2408 if (err)
2409 goto out;
2410 de = (struct ext4_dir_entry_2 *)dir_block->b_data; 2376 de = (struct ext4_dir_entry_2 *)dir_block->b_data;
2411 ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0); 2377 ext4_init_dot_dotdot(inode, de, blocksize, csum_size, dir->i_ino, 0);
2412 set_nlink(inode, 2); 2378 set_nlink(inode, 2);
@@ -2573,7 +2539,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
2573 int err = 0, rc; 2539 int err = 0, rc;
2574 bool dirty = false; 2540 bool dirty = false;
2575 2541
2576 if (!sbi->s_journal) 2542 if (!sbi->s_journal || is_bad_inode(inode))
2577 return 0; 2543 return 0;
2578 2544
2579 WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) && 2545 WARN_ON_ONCE(!(inode->i_state & (I_NEW | I_FREEING)) &&
@@ -3190,6 +3156,39 @@ static void ext4_update_dir_count(handle_t *handle, struct ext4_renament *ent)
3190 } 3156 }
3191} 3157}
3192 3158
3159static struct inode *ext4_whiteout_for_rename(struct ext4_renament *ent,
3160 int credits, handle_t **h)
3161{
3162 struct inode *wh;
3163 handle_t *handle;
3164 int retries = 0;
3165
3166 /*
3167 * for inode block, sb block, group summaries,
3168 * and inode bitmap
3169 */
3170 credits += (EXT4_MAXQUOTAS_TRANS_BLOCKS(ent->dir->i_sb) +
3171 EXT4_XATTR_TRANS_BLOCKS + 4);
3172retry:
3173 wh = ext4_new_inode_start_handle(ent->dir, S_IFCHR | WHITEOUT_MODE,
3174 &ent->dentry->d_name, 0, NULL,
3175 EXT4_HT_DIR, credits);
3176
3177 handle = ext4_journal_current_handle();
3178 if (IS_ERR(wh)) {
3179 if (handle)
3180 ext4_journal_stop(handle);
3181 if (PTR_ERR(wh) == -ENOSPC &&
3182 ext4_should_retry_alloc(ent->dir->i_sb, &retries))
3183 goto retry;
3184 } else {
3185 *h = handle;
3186 init_special_inode(wh, wh->i_mode, WHITEOUT_DEV);
3187 wh->i_op = &ext4_special_inode_operations;
3188 }
3189 return wh;
3190}
3191
3193/* 3192/*
3194 * Anybody can rename anything with this: the permission checks are left to the 3193 * Anybody can rename anything with this: the permission checks are left to the
3195 * higher-level routines. 3194 * higher-level routines.
@@ -3199,7 +3198,8 @@ static void ext4_update_dir_count(handle_t *handle, struct ext4_renament *ent)
3199 * This comes from rename(const char *oldpath, const char *newpath) 3198 * This comes from rename(const char *oldpath, const char *newpath)
3200 */ 3199 */
3201static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry, 3200static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
3202 struct inode *new_dir, struct dentry *new_dentry) 3201 struct inode *new_dir, struct dentry *new_dentry,
3202 unsigned int flags)
3203{ 3203{
3204 handle_t *handle = NULL; 3204 handle_t *handle = NULL;
3205 struct ext4_renament old = { 3205 struct ext4_renament old = {
@@ -3214,6 +3214,9 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
3214 }; 3214 };
3215 int force_reread; 3215 int force_reread;
3216 int retval; 3216 int retval;
3217 struct inode *whiteout = NULL;
3218 int credits;
3219 u8 old_file_type;
3217 3220
3218 dquot_initialize(old.dir); 3221 dquot_initialize(old.dir);
3219 dquot_initialize(new.dir); 3222 dquot_initialize(new.dir);
@@ -3252,11 +3255,17 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
3252 if (new.inode && !test_opt(new.dir->i_sb, NO_AUTO_DA_ALLOC)) 3255 if (new.inode && !test_opt(new.dir->i_sb, NO_AUTO_DA_ALLOC))
3253 ext4_alloc_da_blocks(old.inode); 3256 ext4_alloc_da_blocks(old.inode);
3254 3257
3255 handle = ext4_journal_start(old.dir, EXT4_HT_DIR, 3258 credits = (2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) +
3256 (2 * EXT4_DATA_TRANS_BLOCKS(old.dir->i_sb) + 3259 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2);
3257 EXT4_INDEX_EXTRA_TRANS_BLOCKS + 2)); 3260 if (!(flags & RENAME_WHITEOUT)) {
3258 if (IS_ERR(handle)) 3261 handle = ext4_journal_start(old.dir, EXT4_HT_DIR, credits);
3259 return PTR_ERR(handle); 3262 if (IS_ERR(handle))
3263 return PTR_ERR(handle);
3264 } else {
3265 whiteout = ext4_whiteout_for_rename(&old, credits, &handle);
3266 if (IS_ERR(whiteout))
3267 return PTR_ERR(whiteout);
3268 }
3260 3269
3261 if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir)) 3270 if (IS_DIRSYNC(old.dir) || IS_DIRSYNC(new.dir))
3262 ext4_handle_sync(handle); 3271 ext4_handle_sync(handle);
@@ -3284,13 +3293,26 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
3284 */ 3293 */
3285 force_reread = (new.dir->i_ino == old.dir->i_ino && 3294 force_reread = (new.dir->i_ino == old.dir->i_ino &&
3286 ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA)); 3295 ext4_test_inode_flag(new.dir, EXT4_INODE_INLINE_DATA));
3296
3297 old_file_type = old.de->file_type;
3298 if (whiteout) {
3299 /*
3300 * Do this before adding a new entry, so the old entry is sure
3301 * to be still pointing to the valid old entry.
3302 */
3303 retval = ext4_setent(handle, &old, whiteout->i_ino,
3304 EXT4_FT_CHRDEV);
3305 if (retval)
3306 goto end_rename;
3307 ext4_mark_inode_dirty(handle, whiteout);
3308 }
3287 if (!new.bh) { 3309 if (!new.bh) {
3288 retval = ext4_add_entry(handle, new.dentry, old.inode); 3310 retval = ext4_add_entry(handle, new.dentry, old.inode);
3289 if (retval) 3311 if (retval)
3290 goto end_rename; 3312 goto end_rename;
3291 } else { 3313 } else {
3292 retval = ext4_setent(handle, &new, 3314 retval = ext4_setent(handle, &new,
3293 old.inode->i_ino, old.de->file_type); 3315 old.inode->i_ino, old_file_type);
3294 if (retval) 3316 if (retval)
3295 goto end_rename; 3317 goto end_rename;
3296 } 3318 }
@@ -3305,10 +3327,12 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
3305 old.inode->i_ctime = ext4_current_time(old.inode); 3327 old.inode->i_ctime = ext4_current_time(old.inode);
3306 ext4_mark_inode_dirty(handle, old.inode); 3328 ext4_mark_inode_dirty(handle, old.inode);
3307 3329
3308 /* 3330 if (!whiteout) {
3309 * ok, that's it 3331 /*
3310 */ 3332 * ok, that's it
3311 ext4_rename_delete(handle, &old, force_reread); 3333 */
3334 ext4_rename_delete(handle, &old, force_reread);
3335 }
3312 3336
3313 if (new.inode) { 3337 if (new.inode) {
3314 ext4_dec_count(handle, new.inode); 3338 ext4_dec_count(handle, new.inode);
@@ -3344,6 +3368,12 @@ end_rename:
3344 brelse(old.dir_bh); 3368 brelse(old.dir_bh);
3345 brelse(old.bh); 3369 brelse(old.bh);
3346 brelse(new.bh); 3370 brelse(new.bh);
3371 if (whiteout) {
3372 if (retval)
3373 drop_nlink(whiteout);
3374 unlock_new_inode(whiteout);
3375 iput(whiteout);
3376 }
3347 if (handle) 3377 if (handle)
3348 ext4_journal_stop(handle); 3378 ext4_journal_stop(handle);
3349 return retval; 3379 return retval;
@@ -3476,18 +3506,15 @@ static int ext4_rename2(struct inode *old_dir, struct dentry *old_dentry,
3476 struct inode *new_dir, struct dentry *new_dentry, 3506 struct inode *new_dir, struct dentry *new_dentry,
3477 unsigned int flags) 3507 unsigned int flags)
3478{ 3508{
3479 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) 3509 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
3480 return -EINVAL; 3510 return -EINVAL;
3481 3511
3482 if (flags & RENAME_EXCHANGE) { 3512 if (flags & RENAME_EXCHANGE) {
3483 return ext4_cross_rename(old_dir, old_dentry, 3513 return ext4_cross_rename(old_dir, old_dentry,
3484 new_dir, new_dentry); 3514 new_dir, new_dentry);
3485 } 3515 }
3486 /* 3516
3487 * Existence checking was done by the VFS, otherwise "RENAME_NOREPLACE" 3517 return ext4_rename(old_dir, old_dentry, new_dir, new_dentry, flags);
3488 * is equivalent to regular rename.
3489 */
3490 return ext4_rename(old_dir, old_dentry, new_dir, new_dentry);
3491} 3518}
3492 3519
3493/* 3520/*
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 1e43b905ff98..ca4588388fc3 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -1081,7 +1081,7 @@ static void update_backups(struct super_block *sb, int blk_off, char *data,
1081 break; 1081 break;
1082 1082
1083 if (meta_bg == 0) 1083 if (meta_bg == 0)
1084 backup_block = group * bpg + blk_off; 1084 backup_block = ((ext4_fsblk_t)group) * bpg + blk_off;
1085 else 1085 else
1086 backup_block = (ext4_group_first_block_no(sb, group) + 1086 backup_block = (ext4_group_first_block_no(sb, group) +
1087 ext4_bg_has_super(sb, group)); 1087 ext4_bg_has_super(sb, group));
@@ -1212,8 +1212,7 @@ static int ext4_set_bitmap_checksums(struct super_block *sb,
1212{ 1212{
1213 struct buffer_head *bh; 1213 struct buffer_head *bh;
1214 1214
1215 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 1215 if (!ext4_has_metadata_csum(sb))
1216 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
1217 return 0; 1216 return 0;
1218 1217
1219 bh = ext4_get_bitmap(sb, group_data->inode_bitmap); 1218 bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 05c159218bc2..2c9e6864abd9 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -70,7 +70,6 @@ static void ext4_mark_recovery_complete(struct super_block *sb,
70static void ext4_clear_journal_err(struct super_block *sb, 70static void ext4_clear_journal_err(struct super_block *sb,
71 struct ext4_super_block *es); 71 struct ext4_super_block *es);
72static int ext4_sync_fs(struct super_block *sb, int wait); 72static int ext4_sync_fs(struct super_block *sb, int wait);
73static int ext4_sync_fs_nojournal(struct super_block *sb, int wait);
74static int ext4_remount(struct super_block *sb, int *flags, char *data); 73static int ext4_remount(struct super_block *sb, int *flags, char *data);
75static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf); 74static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
76static int ext4_unfreeze(struct super_block *sb); 75static int ext4_unfreeze(struct super_block *sb);
@@ -141,8 +140,7 @@ static __le32 ext4_superblock_csum(struct super_block *sb,
141static int ext4_superblock_csum_verify(struct super_block *sb, 140static int ext4_superblock_csum_verify(struct super_block *sb,
142 struct ext4_super_block *es) 141 struct ext4_super_block *es)
143{ 142{
144 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 143 if (!ext4_has_metadata_csum(sb))
145 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
146 return 1; 144 return 1;
147 145
148 return es->s_checksum == ext4_superblock_csum(sb, es); 146 return es->s_checksum == ext4_superblock_csum(sb, es);
@@ -152,8 +150,7 @@ void ext4_superblock_csum_set(struct super_block *sb)
152{ 150{
153 struct ext4_super_block *es = EXT4_SB(sb)->s_es; 151 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
154 152
155 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, 153 if (!ext4_has_metadata_csum(sb))
156 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
157 return; 154 return;
158 155
159 es->s_checksum = ext4_superblock_csum(sb, es); 156 es->s_checksum = ext4_superblock_csum(sb, es);
@@ -820,10 +817,9 @@ static void ext4_put_super(struct super_block *sb)
820 percpu_counter_destroy(&sbi->s_freeinodes_counter); 817 percpu_counter_destroy(&sbi->s_freeinodes_counter);
821 percpu_counter_destroy(&sbi->s_dirs_counter); 818 percpu_counter_destroy(&sbi->s_dirs_counter);
822 percpu_counter_destroy(&sbi->s_dirtyclusters_counter); 819 percpu_counter_destroy(&sbi->s_dirtyclusters_counter);
823 percpu_counter_destroy(&sbi->s_extent_cache_cnt);
824 brelse(sbi->s_sbh); 820 brelse(sbi->s_sbh);
825#ifdef CONFIG_QUOTA 821#ifdef CONFIG_QUOTA
826 for (i = 0; i < MAXQUOTAS; i++) 822 for (i = 0; i < EXT4_MAXQUOTAS; i++)
827 kfree(sbi->s_qf_names[i]); 823 kfree(sbi->s_qf_names[i]);
828#endif 824#endif
829 825
@@ -885,6 +881,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
885 ext4_es_init_tree(&ei->i_es_tree); 881 ext4_es_init_tree(&ei->i_es_tree);
886 rwlock_init(&ei->i_es_lock); 882 rwlock_init(&ei->i_es_lock);
887 INIT_LIST_HEAD(&ei->i_es_lru); 883 INIT_LIST_HEAD(&ei->i_es_lru);
884 ei->i_es_all_nr = 0;
888 ei->i_es_lru_nr = 0; 885 ei->i_es_lru_nr = 0;
889 ei->i_touch_when = 0; 886 ei->i_touch_when = 0;
890 ei->i_reserved_data_blocks = 0; 887 ei->i_reserved_data_blocks = 0;
@@ -1002,7 +999,7 @@ static struct inode *ext4_nfs_get_inode(struct super_block *sb,
1002 * Currently we don't know the generation for parent directory, so 999 * Currently we don't know the generation for parent directory, so
1003 * a generation of 0 means "accept any" 1000 * a generation of 0 means "accept any"
1004 */ 1001 */
1005 inode = ext4_iget(sb, ino); 1002 inode = ext4_iget_normal(sb, ino);
1006 if (IS_ERR(inode)) 1003 if (IS_ERR(inode))
1007 return ERR_CAST(inode); 1004 return ERR_CAST(inode);
1008 if (generation && inode->i_generation != generation) { 1005 if (generation && inode->i_generation != generation) {
@@ -1124,25 +1121,6 @@ static const struct super_operations ext4_sops = {
1124 .bdev_try_to_free_page = bdev_try_to_free_page, 1121 .bdev_try_to_free_page = bdev_try_to_free_page,
1125}; 1122};
1126 1123
1127static const struct super_operations ext4_nojournal_sops = {
1128 .alloc_inode = ext4_alloc_inode,
1129 .destroy_inode = ext4_destroy_inode,
1130 .write_inode = ext4_write_inode,
1131 .dirty_inode = ext4_dirty_inode,
1132 .drop_inode = ext4_drop_inode,
1133 .evict_inode = ext4_evict_inode,
1134 .sync_fs = ext4_sync_fs_nojournal,
1135 .put_super = ext4_put_super,
1136 .statfs = ext4_statfs,
1137 .remount_fs = ext4_remount,
1138 .show_options = ext4_show_options,
1139#ifdef CONFIG_QUOTA
1140 .quota_read = ext4_quota_read,
1141 .quota_write = ext4_quota_write,
1142#endif
1143 .bdev_try_to_free_page = bdev_try_to_free_page,
1144};
1145
1146static const struct export_operations ext4_export_ops = { 1124static const struct export_operations ext4_export_ops = {
1147 .fh_to_dentry = ext4_fh_to_dentry, 1125 .fh_to_dentry = ext4_fh_to_dentry,
1148 .fh_to_parent = ext4_fh_to_parent, 1126 .fh_to_parent = ext4_fh_to_parent,
@@ -1712,13 +1690,6 @@ static int parse_options(char *options, struct super_block *sb,
1712 "not specified"); 1690 "not specified");
1713 return 0; 1691 return 0;
1714 } 1692 }
1715 } else {
1716 if (sbi->s_jquota_fmt) {
1717 ext4_msg(sb, KERN_ERR, "journaled quota format "
1718 "specified with no journaling "
1719 "enabled");
1720 return 0;
1721 }
1722 } 1693 }
1723#endif 1694#endif
1724 if (test_opt(sb, DIOREAD_NOLOCK)) { 1695 if (test_opt(sb, DIOREAD_NOLOCK)) {
@@ -2016,8 +1987,7 @@ static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
2016 __u16 crc = 0; 1987 __u16 crc = 0;
2017 __le32 le_group = cpu_to_le32(block_group); 1988 __le32 le_group = cpu_to_le32(block_group);
2018 1989
2019 if ((sbi->s_es->s_feature_ro_compat & 1990 if (ext4_has_metadata_csum(sbi->s_sb)) {
2020 cpu_to_le32(EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))) {
2021 /* Use new metadata_csum algorithm */ 1991 /* Use new metadata_csum algorithm */
2022 __le16 save_csum; 1992 __le16 save_csum;
2023 __u32 csum32; 1993 __u32 csum32;
@@ -2035,6 +2005,10 @@ static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
2035 } 2005 }
2036 2006
2037 /* old crc16 code */ 2007 /* old crc16 code */
2008 if (!(sbi->s_es->s_feature_ro_compat &
2009 cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)))
2010 return 0;
2011
2038 offset = offsetof(struct ext4_group_desc, bg_checksum); 2012 offset = offsetof(struct ext4_group_desc, bg_checksum);
2039 2013
2040 crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid)); 2014 crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
@@ -2191,7 +2165,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
2191 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) { 2165 if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
2192 /* don't clear list on RO mount w/ errors */ 2166 /* don't clear list on RO mount w/ errors */
2193 if (es->s_last_orphan && !(s_flags & MS_RDONLY)) { 2167 if (es->s_last_orphan && !(s_flags & MS_RDONLY)) {
2194 jbd_debug(1, "Errors on filesystem, " 2168 ext4_msg(sb, KERN_INFO, "Errors on filesystem, "
2195 "clearing orphan list.\n"); 2169 "clearing orphan list.\n");
2196 es->s_last_orphan = 0; 2170 es->s_last_orphan = 0;
2197 } 2171 }
@@ -2207,7 +2181,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
2207 /* Needed for iput() to work correctly and not trash data */ 2181 /* Needed for iput() to work correctly and not trash data */
2208 sb->s_flags |= MS_ACTIVE; 2182 sb->s_flags |= MS_ACTIVE;
2209 /* Turn on quotas so that they are updated correctly */ 2183 /* Turn on quotas so that they are updated correctly */
2210 for (i = 0; i < MAXQUOTAS; i++) { 2184 for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2211 if (EXT4_SB(sb)->s_qf_names[i]) { 2185 if (EXT4_SB(sb)->s_qf_names[i]) {
2212 int ret = ext4_quota_on_mount(sb, i); 2186 int ret = ext4_quota_on_mount(sb, i);
2213 if (ret < 0) 2187 if (ret < 0)
@@ -2263,7 +2237,7 @@ static void ext4_orphan_cleanup(struct super_block *sb,
2263 PLURAL(nr_truncates)); 2237 PLURAL(nr_truncates));
2264#ifdef CONFIG_QUOTA 2238#ifdef CONFIG_QUOTA
2265 /* Turn quotas off */ 2239 /* Turn quotas off */
2266 for (i = 0; i < MAXQUOTAS; i++) { 2240 for (i = 0; i < EXT4_MAXQUOTAS; i++) {
2267 if (sb_dqopt(sb)->files[i]) 2241 if (sb_dqopt(sb)->files[i])
2268 dquot_quota_off(sb, i); 2242 dquot_quota_off(sb, i);
2269 } 2243 }
@@ -2548,6 +2522,16 @@ static ssize_t sbi_ui_store(struct ext4_attr *a,
2548 return count; 2522 return count;
2549} 2523}
2550 2524
2525static ssize_t es_ui_show(struct ext4_attr *a,
2526 struct ext4_sb_info *sbi, char *buf)
2527{
2528
2529 unsigned int *ui = (unsigned int *) (((char *) sbi->s_es) +
2530 a->u.offset);
2531
2532 return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
2533}
2534
2551static ssize_t reserved_clusters_show(struct ext4_attr *a, 2535static ssize_t reserved_clusters_show(struct ext4_attr *a,
2552 struct ext4_sb_info *sbi, char *buf) 2536 struct ext4_sb_info *sbi, char *buf)
2553{ 2537{
@@ -2601,14 +2585,29 @@ static struct ext4_attr ext4_attr_##_name = { \
2601 .offset = offsetof(struct ext4_sb_info, _elname),\ 2585 .offset = offsetof(struct ext4_sb_info, _elname),\
2602 }, \ 2586 }, \
2603} 2587}
2588
2589#define EXT4_ATTR_OFFSET_ES(_name,_mode,_show,_store,_elname) \
2590static struct ext4_attr ext4_attr_##_name = { \
2591 .attr = {.name = __stringify(_name), .mode = _mode }, \
2592 .show = _show, \
2593 .store = _store, \
2594 .u = { \
2595 .offset = offsetof(struct ext4_super_block, _elname), \
2596 }, \
2597}
2598
2604#define EXT4_ATTR(name, mode, show, store) \ 2599#define EXT4_ATTR(name, mode, show, store) \
2605static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store) 2600static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store)
2606 2601
2607#define EXT4_INFO_ATTR(name) EXT4_ATTR(name, 0444, NULL, NULL) 2602#define EXT4_INFO_ATTR(name) EXT4_ATTR(name, 0444, NULL, NULL)
2608#define EXT4_RO_ATTR(name) EXT4_ATTR(name, 0444, name##_show, NULL) 2603#define EXT4_RO_ATTR(name) EXT4_ATTR(name, 0444, name##_show, NULL)
2609#define EXT4_RW_ATTR(name) EXT4_ATTR(name, 0644, name##_show, name##_store) 2604#define EXT4_RW_ATTR(name) EXT4_ATTR(name, 0644, name##_show, name##_store)
2605
2606#define EXT4_RO_ATTR_ES_UI(name, elname) \
2607 EXT4_ATTR_OFFSET_ES(name, 0444, es_ui_show, NULL, elname)
2610#define EXT4_RW_ATTR_SBI_UI(name, elname) \ 2608#define EXT4_RW_ATTR_SBI_UI(name, elname) \
2611 EXT4_ATTR_OFFSET(name, 0644, sbi_ui_show, sbi_ui_store, elname) 2609 EXT4_ATTR_OFFSET(name, 0644, sbi_ui_show, sbi_ui_store, elname)
2610
2612#define ATTR_LIST(name) &ext4_attr_##name.attr 2611#define ATTR_LIST(name) &ext4_attr_##name.attr
2613#define EXT4_DEPRECATED_ATTR(_name, _val) \ 2612#define EXT4_DEPRECATED_ATTR(_name, _val) \
2614static struct ext4_attr ext4_attr_##_name = { \ 2613static struct ext4_attr ext4_attr_##_name = { \
@@ -2641,6 +2640,9 @@ EXT4_RW_ATTR_SBI_UI(warning_ratelimit_interval_ms, s_warning_ratelimit_state.int
2641EXT4_RW_ATTR_SBI_UI(warning_ratelimit_burst, s_warning_ratelimit_state.burst); 2640EXT4_RW_ATTR_SBI_UI(warning_ratelimit_burst, s_warning_ratelimit_state.burst);
2642EXT4_RW_ATTR_SBI_UI(msg_ratelimit_interval_ms, s_msg_ratelimit_state.interval); 2641EXT4_RW_ATTR_SBI_UI(msg_ratelimit_interval_ms, s_msg_ratelimit_state.interval);
2643EXT4_RW_ATTR_SBI_UI(msg_ratelimit_burst, s_msg_ratelimit_state.burst); 2642EXT4_RW_ATTR_SBI_UI(msg_ratelimit_burst, s_msg_ratelimit_state.burst);
2643EXT4_RO_ATTR_ES_UI(errors_count, s_error_count);
2644EXT4_RO_ATTR_ES_UI(first_error_time, s_first_error_time);
2645EXT4_RO_ATTR_ES_UI(last_error_time, s_last_error_time);
2644 2646
2645static struct attribute *ext4_attrs[] = { 2647static struct attribute *ext4_attrs[] = {
2646 ATTR_LIST(delayed_allocation_blocks), 2648 ATTR_LIST(delayed_allocation_blocks),
@@ -2664,6 +2666,9 @@ static struct attribute *ext4_attrs[] = {
2664 ATTR_LIST(warning_ratelimit_burst), 2666 ATTR_LIST(warning_ratelimit_burst),
2665 ATTR_LIST(msg_ratelimit_interval_ms), 2667 ATTR_LIST(msg_ratelimit_interval_ms),
2666 ATTR_LIST(msg_ratelimit_burst), 2668 ATTR_LIST(msg_ratelimit_burst),
2669 ATTR_LIST(errors_count),
2670 ATTR_LIST(first_error_time),
2671 ATTR_LIST(last_error_time),
2667 NULL, 2672 NULL,
2668}; 2673};
2669 2674
@@ -2723,9 +2728,25 @@ static void ext4_feat_release(struct kobject *kobj)
2723 complete(&ext4_feat->f_kobj_unregister); 2728 complete(&ext4_feat->f_kobj_unregister);
2724} 2729}
2725 2730
2731static ssize_t ext4_feat_show(struct kobject *kobj,
2732 struct attribute *attr, char *buf)
2733{
2734 return snprintf(buf, PAGE_SIZE, "supported\n");
2735}
2736
2737/*
2738 * We can not use ext4_attr_show/store because it relies on the kobject
2739 * being embedded in the ext4_sb_info structure which is definitely not
2740 * true in this case.
2741 */
2742static const struct sysfs_ops ext4_feat_ops = {
2743 .show = ext4_feat_show,
2744 .store = NULL,
2745};
2746
2726static struct kobj_type ext4_feat_ktype = { 2747static struct kobj_type ext4_feat_ktype = {
2727 .default_attrs = ext4_feat_attrs, 2748 .default_attrs = ext4_feat_attrs,
2728 .sysfs_ops = &ext4_attr_ops, 2749 .sysfs_ops = &ext4_feat_ops,
2729 .release = ext4_feat_release, 2750 .release = ext4_feat_release,
2730}; 2751};
2731 2752
@@ -3179,8 +3200,7 @@ static int set_journal_csum_feature_set(struct super_block *sb)
3179 int compat, incompat; 3200 int compat, incompat;
3180 struct ext4_sb_info *sbi = EXT4_SB(sb); 3201 struct ext4_sb_info *sbi = EXT4_SB(sb);
3181 3202
3182 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 3203 if (ext4_has_metadata_csum(sb)) {
3183 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
3184 /* journal checksum v3 */ 3204 /* journal checksum v3 */
3185 compat = 0; 3205 compat = 0;
3186 incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3; 3206 incompat = JBD2_FEATURE_INCOMPAT_CSUM_V3;
@@ -3190,6 +3210,10 @@ static int set_journal_csum_feature_set(struct super_block *sb)
3190 incompat = 0; 3210 incompat = 0;
3191 } 3211 }
3192 3212
3213 jbd2_journal_clear_features(sbi->s_journal,
3214 JBD2_FEATURE_COMPAT_CHECKSUM, 0,
3215 JBD2_FEATURE_INCOMPAT_CSUM_V3 |
3216 JBD2_FEATURE_INCOMPAT_CSUM_V2);
3193 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 3217 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
3194 ret = jbd2_journal_set_features(sbi->s_journal, 3218 ret = jbd2_journal_set_features(sbi->s_journal,
3195 compat, 0, 3219 compat, 0,
@@ -3202,11 +3226,8 @@ static int set_journal_csum_feature_set(struct super_block *sb)
3202 jbd2_journal_clear_features(sbi->s_journal, 0, 0, 3226 jbd2_journal_clear_features(sbi->s_journal, 0, 0,
3203 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); 3227 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
3204 } else { 3228 } else {
3205 jbd2_journal_clear_features(sbi->s_journal, 3229 jbd2_journal_clear_features(sbi->s_journal, 0, 0,
3206 JBD2_FEATURE_COMPAT_CHECKSUM, 0, 3230 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
3207 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
3208 JBD2_FEATURE_INCOMPAT_CSUM_V3 |
3209 JBD2_FEATURE_INCOMPAT_CSUM_V2);
3210 } 3231 }
3211 3232
3212 return ret; 3233 return ret;
@@ -3436,7 +3457,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3436 logical_sb_block = sb_block; 3457 logical_sb_block = sb_block;
3437 } 3458 }
3438 3459
3439 if (!(bh = sb_bread(sb, logical_sb_block))) { 3460 if (!(bh = sb_bread_unmovable(sb, logical_sb_block))) {
3440 ext4_msg(sb, KERN_ERR, "unable to read superblock"); 3461 ext4_msg(sb, KERN_ERR, "unable to read superblock");
3441 goto out_fail; 3462 goto out_fail;
3442 } 3463 }
@@ -3487,8 +3508,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3487 } 3508 }
3488 3509
3489 /* Precompute checksum seed for all metadata */ 3510 /* Precompute checksum seed for all metadata */
3490 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 3511 if (ext4_has_metadata_csum(sb))
3491 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
3492 sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid, 3512 sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
3493 sizeof(es->s_uuid)); 3513 sizeof(es->s_uuid));
3494 3514
@@ -3506,6 +3526,10 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3506#ifdef CONFIG_EXT4_FS_POSIX_ACL 3526#ifdef CONFIG_EXT4_FS_POSIX_ACL
3507 set_opt(sb, POSIX_ACL); 3527 set_opt(sb, POSIX_ACL);
3508#endif 3528#endif
3529 /* don't forget to enable journal_csum when metadata_csum is enabled. */
3530 if (ext4_has_metadata_csum(sb))
3531 set_opt(sb, JOURNAL_CHECKSUM);
3532
3509 if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA) 3533 if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
3510 set_opt(sb, JOURNAL_DATA); 3534 set_opt(sb, JOURNAL_DATA);
3511 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED) 3535 else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
@@ -3519,8 +3543,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3519 set_opt(sb, ERRORS_CONT); 3543 set_opt(sb, ERRORS_CONT);
3520 else 3544 else
3521 set_opt(sb, ERRORS_RO); 3545 set_opt(sb, ERRORS_RO);
3522 if (def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY) 3546 /* block_validity enabled by default; disable with noblock_validity */
3523 set_opt(sb, BLOCK_VALIDITY); 3547 set_opt(sb, BLOCK_VALIDITY);
3524 if (def_mount_opts & EXT4_DEFM_DISCARD) 3548 if (def_mount_opts & EXT4_DEFM_DISCARD)
3525 set_opt(sb, DISCARD); 3549 set_opt(sb, DISCARD);
3526 3550
@@ -3646,7 +3670,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3646 brelse(bh); 3670 brelse(bh);
3647 logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE; 3671 logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
3648 offset = do_div(logical_sb_block, blocksize); 3672 offset = do_div(logical_sb_block, blocksize);
3649 bh = sb_bread(sb, logical_sb_block); 3673 bh = sb_bread_unmovable(sb, logical_sb_block);
3650 if (!bh) { 3674 if (!bh) {
3651 ext4_msg(sb, KERN_ERR, 3675 ext4_msg(sb, KERN_ERR,
3652 "Can't read superblock on 2nd try"); 3676 "Can't read superblock on 2nd try");
@@ -3868,7 +3892,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3868 3892
3869 for (i = 0; i < db_count; i++) { 3893 for (i = 0; i < db_count; i++) {
3870 block = descriptor_loc(sb, logical_sb_block, i); 3894 block = descriptor_loc(sb, logical_sb_block, i);
3871 sbi->s_group_desc[i] = sb_bread(sb, block); 3895 sbi->s_group_desc[i] = sb_bread_unmovable(sb, block);
3872 if (!sbi->s_group_desc[i]) { 3896 if (!sbi->s_group_desc[i]) {
3873 ext4_msg(sb, KERN_ERR, 3897 ext4_msg(sb, KERN_ERR,
3874 "can't read group descriptor %d", i); 3898 "can't read group descriptor %d", i);
@@ -3890,13 +3914,8 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3890 sbi->s_err_report.data = (unsigned long) sb; 3914 sbi->s_err_report.data = (unsigned long) sb;
3891 3915
3892 /* Register extent status tree shrinker */ 3916 /* Register extent status tree shrinker */
3893 ext4_es_register_shrinker(sbi); 3917 if (ext4_es_register_shrinker(sbi))
3894
3895 err = percpu_counter_init(&sbi->s_extent_cache_cnt, 0, GFP_KERNEL);
3896 if (err) {
3897 ext4_msg(sb, KERN_ERR, "insufficient memory");
3898 goto failed_mount3; 3918 goto failed_mount3;
3899 }
3900 3919
3901 sbi->s_stripe = ext4_get_stripe_size(sbi); 3920 sbi->s_stripe = ext4_get_stripe_size(sbi);
3902 sbi->s_extent_max_zeroout_kb = 32; 3921 sbi->s_extent_max_zeroout_kb = 32;
@@ -3904,11 +3923,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3904 /* 3923 /*
3905 * set up enough so that it can read an inode 3924 * set up enough so that it can read an inode
3906 */ 3925 */
3907 if (!test_opt(sb, NOLOAD) && 3926 sb->s_op = &ext4_sops;
3908 EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
3909 sb->s_op = &ext4_sops;
3910 else
3911 sb->s_op = &ext4_nojournal_sops;
3912 sb->s_export_op = &ext4_export_ops; 3927 sb->s_export_op = &ext4_export_ops;
3913 sb->s_xattr = ext4_xattr_handlers; 3928 sb->s_xattr = ext4_xattr_handlers;
3914#ifdef CONFIG_QUOTA 3929#ifdef CONFIG_QUOTA
@@ -3932,7 +3947,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3932 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_MMP) && 3947 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_MMP) &&
3933 !(sb->s_flags & MS_RDONLY)) 3948 !(sb->s_flags & MS_RDONLY))
3934 if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block))) 3949 if (ext4_multi_mount_protect(sb, le64_to_cpu(es->s_mmp_block)))
3935 goto failed_mount3; 3950 goto failed_mount3a;
3936 3951
3937 /* 3952 /*
3938 * The first inode we look at is the journal inode. Don't try 3953 * The first inode we look at is the journal inode. Don't try
@@ -3941,7 +3956,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3941 if (!test_opt(sb, NOLOAD) && 3956 if (!test_opt(sb, NOLOAD) &&
3942 EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) { 3957 EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) {
3943 if (ext4_load_journal(sb, es, journal_devnum)) 3958 if (ext4_load_journal(sb, es, journal_devnum))
3944 goto failed_mount3; 3959 goto failed_mount3a;
3945 } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) && 3960 } else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
3946 EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) { 3961 EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) {
3947 ext4_msg(sb, KERN_ERR, "required journal recovery " 3962 ext4_msg(sb, KERN_ERR, "required journal recovery "
@@ -4229,10 +4244,10 @@ failed_mount_wq:
4229 jbd2_journal_destroy(sbi->s_journal); 4244 jbd2_journal_destroy(sbi->s_journal);
4230 sbi->s_journal = NULL; 4245 sbi->s_journal = NULL;
4231 } 4246 }
4232failed_mount3: 4247failed_mount3a:
4233 ext4_es_unregister_shrinker(sbi); 4248 ext4_es_unregister_shrinker(sbi);
4249failed_mount3:
4234 del_timer_sync(&sbi->s_err_report); 4250 del_timer_sync(&sbi->s_err_report);
4235 percpu_counter_destroy(&sbi->s_extent_cache_cnt);
4236 if (sbi->s_mmp_tsk) 4251 if (sbi->s_mmp_tsk)
4237 kthread_stop(sbi->s_mmp_tsk); 4252 kthread_stop(sbi->s_mmp_tsk);
4238failed_mount2: 4253failed_mount2:
@@ -4247,7 +4262,7 @@ failed_mount:
4247 remove_proc_entry(sb->s_id, ext4_proc_root); 4262 remove_proc_entry(sb->s_id, ext4_proc_root);
4248 } 4263 }
4249#ifdef CONFIG_QUOTA 4264#ifdef CONFIG_QUOTA
4250 for (i = 0; i < MAXQUOTAS; i++) 4265 for (i = 0; i < EXT4_MAXQUOTAS; i++)
4251 kfree(sbi->s_qf_names[i]); 4266 kfree(sbi->s_qf_names[i]);
4252#endif 4267#endif
4253 ext4_blkdev_remove(sbi); 4268 ext4_blkdev_remove(sbi);
@@ -4375,6 +4390,15 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
4375 goto out_bdev; 4390 goto out_bdev;
4376 } 4391 }
4377 4392
4393 if ((le32_to_cpu(es->s_feature_ro_compat) &
4394 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
4395 es->s_checksum != ext4_superblock_csum(sb, es)) {
4396 ext4_msg(sb, KERN_ERR, "external journal has "
4397 "corrupt superblock");
4398 brelse(bh);
4399 goto out_bdev;
4400 }
4401
4378 if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) { 4402 if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
4379 ext4_msg(sb, KERN_ERR, "journal UUID does not match"); 4403 ext4_msg(sb, KERN_ERR, "journal UUID does not match");
4380 brelse(bh); 4404 brelse(bh);
@@ -4677,15 +4701,19 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
4677 * being sent at the end of the function. But we can skip it if 4701 * being sent at the end of the function. But we can skip it if
4678 * transaction_commit will do it for us. 4702 * transaction_commit will do it for us.
4679 */ 4703 */
4680 target = jbd2_get_latest_transaction(sbi->s_journal); 4704 if (sbi->s_journal) {
4681 if (wait && sbi->s_journal->j_flags & JBD2_BARRIER && 4705 target = jbd2_get_latest_transaction(sbi->s_journal);
4682 !jbd2_trans_will_send_data_barrier(sbi->s_journal, target)) 4706 if (wait && sbi->s_journal->j_flags & JBD2_BARRIER &&
4707 !jbd2_trans_will_send_data_barrier(sbi->s_journal, target))
4708 needs_barrier = true;
4709
4710 if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
4711 if (wait)
4712 ret = jbd2_log_wait_commit(sbi->s_journal,
4713 target);
4714 }
4715 } else if (wait && test_opt(sb, BARRIER))
4683 needs_barrier = true; 4716 needs_barrier = true;
4684
4685 if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
4686 if (wait)
4687 ret = jbd2_log_wait_commit(sbi->s_journal, target);
4688 }
4689 if (needs_barrier) { 4717 if (needs_barrier) {
4690 int err; 4718 int err;
4691 err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL); 4719 err = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
@@ -4696,19 +4724,6 @@ static int ext4_sync_fs(struct super_block *sb, int wait)
4696 return ret; 4724 return ret;
4697} 4725}
4698 4726
4699static int ext4_sync_fs_nojournal(struct super_block *sb, int wait)
4700{
4701 int ret = 0;
4702
4703 trace_ext4_sync_fs(sb, wait);
4704 flush_workqueue(EXT4_SB(sb)->rsv_conversion_wq);
4705 dquot_writeback_dquots(sb, -1);
4706 if (wait && test_opt(sb, BARRIER))
4707 ret = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
4708
4709 return ret;
4710}
4711
4712/* 4727/*
4713 * LVM calls this function before a (read-only) snapshot is created. This 4728 * LVM calls this function before a (read-only) snapshot is created. This
4714 * gives us a chance to flush the journal completely and mark the fs clean. 4729 * gives us a chance to flush the journal completely and mark the fs clean.
@@ -4727,23 +4742,26 @@ static int ext4_freeze(struct super_block *sb)
4727 4742
4728 journal = EXT4_SB(sb)->s_journal; 4743 journal = EXT4_SB(sb)->s_journal;
4729 4744
4730 /* Now we set up the journal barrier. */ 4745 if (journal) {
4731 jbd2_journal_lock_updates(journal); 4746 /* Now we set up the journal barrier. */
4747 jbd2_journal_lock_updates(journal);
4732 4748
4733 /* 4749 /*
4734 * Don't clear the needs_recovery flag if we failed to flush 4750 * Don't clear the needs_recovery flag if we failed to
4735 * the journal. 4751 * flush the journal.
4736 */ 4752 */
4737 error = jbd2_journal_flush(journal); 4753 error = jbd2_journal_flush(journal);
4738 if (error < 0) 4754 if (error < 0)
4739 goto out; 4755 goto out;
4756 }
4740 4757
4741 /* Journal blocked and flushed, clear needs_recovery flag. */ 4758 /* Journal blocked and flushed, clear needs_recovery flag. */
4742 EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER); 4759 EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
4743 error = ext4_commit_super(sb, 1); 4760 error = ext4_commit_super(sb, 1);
4744out: 4761out:
4745 /* we rely on upper layer to stop further updates */ 4762 if (journal)
4746 jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal); 4763 /* we rely on upper layer to stop further updates */
4764 jbd2_journal_unlock_updates(journal);
4747 return error; 4765 return error;
4748} 4766}
4749 4767
@@ -4774,7 +4792,7 @@ struct ext4_mount_options {
4774 u32 s_min_batch_time, s_max_batch_time; 4792 u32 s_min_batch_time, s_max_batch_time;
4775#ifdef CONFIG_QUOTA 4793#ifdef CONFIG_QUOTA
4776 int s_jquota_fmt; 4794 int s_jquota_fmt;
4777 char *s_qf_names[MAXQUOTAS]; 4795 char *s_qf_names[EXT4_MAXQUOTAS];
4778#endif 4796#endif
4779}; 4797};
4780 4798
@@ -4804,7 +4822,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
4804 old_opts.s_max_batch_time = sbi->s_max_batch_time; 4822 old_opts.s_max_batch_time = sbi->s_max_batch_time;
4805#ifdef CONFIG_QUOTA 4823#ifdef CONFIG_QUOTA
4806 old_opts.s_jquota_fmt = sbi->s_jquota_fmt; 4824 old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
4807 for (i = 0; i < MAXQUOTAS; i++) 4825 for (i = 0; i < EXT4_MAXQUOTAS; i++)
4808 if (sbi->s_qf_names[i]) { 4826 if (sbi->s_qf_names[i]) {
4809 old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i], 4827 old_opts.s_qf_names[i] = kstrdup(sbi->s_qf_names[i],
4810 GFP_KERNEL); 4828 GFP_KERNEL);
@@ -4828,6 +4846,14 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
4828 goto restore_opts; 4846 goto restore_opts;
4829 } 4847 }
4830 4848
4849 if ((old_opts.s_mount_opt & EXT4_MOUNT_JOURNAL_CHECKSUM) ^
4850 test_opt(sb, JOURNAL_CHECKSUM)) {
4851 ext4_msg(sb, KERN_ERR, "changing journal_checksum "
4852 "during remount not supported");
4853 err = -EINVAL;
4854 goto restore_opts;
4855 }
4856
4831 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) { 4857 if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
4832 if (test_opt2(sb, EXPLICIT_DELALLOC)) { 4858 if (test_opt2(sb, EXPLICIT_DELALLOC)) {
4833 ext4_msg(sb, KERN_ERR, "can't mount with " 4859 ext4_msg(sb, KERN_ERR, "can't mount with "
@@ -4965,7 +4991,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
4965 4991
4966#ifdef CONFIG_QUOTA 4992#ifdef CONFIG_QUOTA
4967 /* Release old quota file names */ 4993 /* Release old quota file names */
4968 for (i = 0; i < MAXQUOTAS; i++) 4994 for (i = 0; i < EXT4_MAXQUOTAS; i++)
4969 kfree(old_opts.s_qf_names[i]); 4995 kfree(old_opts.s_qf_names[i]);
4970 if (enable_quota) { 4996 if (enable_quota) {
4971 if (sb_any_quota_suspended(sb)) 4997 if (sb_any_quota_suspended(sb))
@@ -4994,7 +5020,7 @@ restore_opts:
4994 sbi->s_max_batch_time = old_opts.s_max_batch_time; 5020 sbi->s_max_batch_time = old_opts.s_max_batch_time;
4995#ifdef CONFIG_QUOTA 5021#ifdef CONFIG_QUOTA
4996 sbi->s_jquota_fmt = old_opts.s_jquota_fmt; 5022 sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
4997 for (i = 0; i < MAXQUOTAS; i++) { 5023 for (i = 0; i < EXT4_MAXQUOTAS; i++) {
4998 kfree(sbi->s_qf_names[i]); 5024 kfree(sbi->s_qf_names[i]);
4999 sbi->s_qf_names[i] = old_opts.s_qf_names[i]; 5025 sbi->s_qf_names[i] = old_opts.s_qf_names[i];
5000 } 5026 }
@@ -5197,7 +5223,7 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
5197{ 5223{
5198 int err; 5224 int err;
5199 struct inode *qf_inode; 5225 struct inode *qf_inode;
5200 unsigned long qf_inums[MAXQUOTAS] = { 5226 unsigned long qf_inums[EXT4_MAXQUOTAS] = {
5201 le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum), 5227 le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
5202 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum) 5228 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum)
5203 }; 5229 };
@@ -5225,13 +5251,13 @@ static int ext4_quota_enable(struct super_block *sb, int type, int format_id,
5225static int ext4_enable_quotas(struct super_block *sb) 5251static int ext4_enable_quotas(struct super_block *sb)
5226{ 5252{
5227 int type, err = 0; 5253 int type, err = 0;
5228 unsigned long qf_inums[MAXQUOTAS] = { 5254 unsigned long qf_inums[EXT4_MAXQUOTAS] = {
5229 le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum), 5255 le32_to_cpu(EXT4_SB(sb)->s_es->s_usr_quota_inum),
5230 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum) 5256 le32_to_cpu(EXT4_SB(sb)->s_es->s_grp_quota_inum)
5231 }; 5257 };
5232 5258
5233 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE; 5259 sb_dqopt(sb)->flags |= DQUOT_QUOTA_SYS_FILE;
5234 for (type = 0; type < MAXQUOTAS; type++) { 5260 for (type = 0; type < EXT4_MAXQUOTAS; type++) {
5235 if (qf_inums[type]) { 5261 if (qf_inums[type]) {
5236 err = ext4_quota_enable(sb, type, QFMT_VFS_V1, 5262 err = ext4_quota_enable(sb, type, QFMT_VFS_V1,
5237 DQUOT_USAGE_ENABLED); 5263 DQUOT_USAGE_ENABLED);
@@ -5309,7 +5335,6 @@ static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
5309{ 5335{
5310 struct inode *inode = sb_dqopt(sb)->files[type]; 5336 struct inode *inode = sb_dqopt(sb)->files[type];
5311 ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); 5337 ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
5312 int err = 0;
5313 int offset = off & (sb->s_blocksize - 1); 5338 int offset = off & (sb->s_blocksize - 1);
5314 int tocopy; 5339 int tocopy;
5315 size_t toread; 5340 size_t toread;
@@ -5324,9 +5349,9 @@ static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
5324 while (toread > 0) { 5349 while (toread > 0) {
5325 tocopy = sb->s_blocksize - offset < toread ? 5350 tocopy = sb->s_blocksize - offset < toread ?
5326 sb->s_blocksize - offset : toread; 5351 sb->s_blocksize - offset : toread;
5327 bh = ext4_bread(NULL, inode, blk, 0, &err); 5352 bh = ext4_bread(NULL, inode, blk, 0);
5328 if (err) 5353 if (IS_ERR(bh))
5329 return err; 5354 return PTR_ERR(bh);
5330 if (!bh) /* A hole? */ 5355 if (!bh) /* A hole? */
5331 memset(data, 0, tocopy); 5356 memset(data, 0, tocopy);
5332 else 5357 else
@@ -5347,8 +5372,7 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
5347{ 5372{
5348 struct inode *inode = sb_dqopt(sb)->files[type]; 5373 struct inode *inode = sb_dqopt(sb)->files[type];
5349 ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb); 5374 ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
5350 int err = 0; 5375 int err, offset = off & (sb->s_blocksize - 1);
5351 int offset = off & (sb->s_blocksize - 1);
5352 struct buffer_head *bh; 5376 struct buffer_head *bh;
5353 handle_t *handle = journal_current_handle(); 5377 handle_t *handle = journal_current_handle();
5354 5378
@@ -5369,14 +5393,16 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
5369 return -EIO; 5393 return -EIO;
5370 } 5394 }
5371 5395
5372 bh = ext4_bread(handle, inode, blk, 1, &err); 5396 bh = ext4_bread(handle, inode, blk, 1);
5397 if (IS_ERR(bh))
5398 return PTR_ERR(bh);
5373 if (!bh) 5399 if (!bh)
5374 goto out; 5400 goto out;
5375 BUFFER_TRACE(bh, "get write access"); 5401 BUFFER_TRACE(bh, "get write access");
5376 err = ext4_journal_get_write_access(handle, bh); 5402 err = ext4_journal_get_write_access(handle, bh);
5377 if (err) { 5403 if (err) {
5378 brelse(bh); 5404 brelse(bh);
5379 goto out; 5405 return err;
5380 } 5406 }
5381 lock_buffer(bh); 5407 lock_buffer(bh);
5382 memcpy(bh->b_data+offset, data, len); 5408 memcpy(bh->b_data+offset, data, len);
@@ -5385,8 +5411,6 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
5385 err = ext4_handle_dirty_metadata(handle, NULL, bh); 5411 err = ext4_handle_dirty_metadata(handle, NULL, bh);
5386 brelse(bh); 5412 brelse(bh);
5387out: 5413out:
5388 if (err)
5389 return err;
5390 if (inode->i_size < off + len) { 5414 if (inode->i_size < off + len) {
5391 i_size_write(inode, off + len); 5415 i_size_write(inode, off + len);
5392 EXT4_I(inode)->i_disksize = inode->i_size; 5416 EXT4_I(inode)->i_disksize = inode->i_size;
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index e7387337060c..1e09fc77395c 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -142,8 +142,7 @@ static int ext4_xattr_block_csum_verify(struct inode *inode,
142 sector_t block_nr, 142 sector_t block_nr,
143 struct ext4_xattr_header *hdr) 143 struct ext4_xattr_header *hdr)
144{ 144{
145 if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 145 if (ext4_has_metadata_csum(inode->i_sb) &&
146 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
147 (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr))) 146 (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
148 return 0; 147 return 0;
149 return 1; 148 return 1;
@@ -153,8 +152,7 @@ static void ext4_xattr_block_csum_set(struct inode *inode,
153 sector_t block_nr, 152 sector_t block_nr,
154 struct ext4_xattr_header *hdr) 153 struct ext4_xattr_header *hdr)
155{ 154{
156 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, 155 if (!ext4_has_metadata_csum(inode->i_sb))
157 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
158 return; 156 return;
159 157
160 hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr); 158 hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
@@ -190,14 +188,28 @@ ext4_listxattr(struct dentry *dentry, char *buffer, size_t size)
190} 188}
191 189
192static int 190static int
193ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end) 191ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end,
192 void *value_start)
194{ 193{
195 while (!IS_LAST_ENTRY(entry)) { 194 struct ext4_xattr_entry *e = entry;
196 struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(entry); 195
196 while (!IS_LAST_ENTRY(e)) {
197 struct ext4_xattr_entry *next = EXT4_XATTR_NEXT(e);
197 if ((void *)next >= end) 198 if ((void *)next >= end)
198 return -EIO; 199 return -EIO;
199 entry = next; 200 e = next;
200 } 201 }
202
203 while (!IS_LAST_ENTRY(entry)) {
204 if (entry->e_value_size != 0 &&
205 (value_start + le16_to_cpu(entry->e_value_offs) <
206 (void *)e + sizeof(__u32) ||
207 value_start + le16_to_cpu(entry->e_value_offs) +
208 le32_to_cpu(entry->e_value_size) > end))
209 return -EIO;
210 entry = EXT4_XATTR_NEXT(entry);
211 }
212
201 return 0; 213 return 0;
202} 214}
203 215
@@ -214,7 +226,8 @@ ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
214 return -EIO; 226 return -EIO;
215 if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh))) 227 if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
216 return -EIO; 228 return -EIO;
217 error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size); 229 error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
230 bh->b_data);
218 if (!error) 231 if (!error)
219 set_buffer_verified(bh); 232 set_buffer_verified(bh);
220 return error; 233 return error;
@@ -331,7 +344,7 @@ ext4_xattr_ibody_get(struct inode *inode, int name_index, const char *name,
331 header = IHDR(inode, raw_inode); 344 header = IHDR(inode, raw_inode);
332 entry = IFIRST(header); 345 entry = IFIRST(header);
333 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; 346 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
334 error = ext4_xattr_check_names(entry, end); 347 error = ext4_xattr_check_names(entry, end, entry);
335 if (error) 348 if (error)
336 goto cleanup; 349 goto cleanup;
337 error = ext4_xattr_find_entry(&entry, name_index, name, 350 error = ext4_xattr_find_entry(&entry, name_index, name,
@@ -463,7 +476,7 @@ ext4_xattr_ibody_list(struct dentry *dentry, char *buffer, size_t buffer_size)
463 raw_inode = ext4_raw_inode(&iloc); 476 raw_inode = ext4_raw_inode(&iloc);
464 header = IHDR(inode, raw_inode); 477 header = IHDR(inode, raw_inode);
465 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; 478 end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
466 error = ext4_xattr_check_names(IFIRST(header), end); 479 error = ext4_xattr_check_names(IFIRST(header), end, IFIRST(header));
467 if (error) 480 if (error)
468 goto cleanup; 481 goto cleanup;
469 error = ext4_xattr_list_entries(dentry, IFIRST(header), 482 error = ext4_xattr_list_entries(dentry, IFIRST(header),
@@ -899,14 +912,8 @@ inserted:
899 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) 912 if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
900 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS; 913 goal = goal & EXT4_MAX_BLOCK_FILE_PHYS;
901 914
902 /*
903 * take i_data_sem because we will test
904 * i_delalloc_reserved_flag in ext4_mb_new_blocks
905 */
906 down_read(&EXT4_I(inode)->i_data_sem);
907 block = ext4_new_meta_blocks(handle, inode, goal, 0, 915 block = ext4_new_meta_blocks(handle, inode, goal, 0,
908 NULL, &error); 916 NULL, &error);
909 up_read((&EXT4_I(inode)->i_data_sem));
910 if (error) 917 if (error)
911 goto cleanup; 918 goto cleanup;
912 919
@@ -986,7 +993,8 @@ int ext4_xattr_ibody_find(struct inode *inode, struct ext4_xattr_info *i,
986 is->s.here = is->s.first; 993 is->s.here = is->s.first;
987 is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size; 994 is->s.end = (void *)raw_inode + EXT4_SB(inode->i_sb)->s_inode_size;
988 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) { 995 if (ext4_test_inode_state(inode, EXT4_STATE_XATTR)) {
989 error = ext4_xattr_check_names(IFIRST(header), is->s.end); 996 error = ext4_xattr_check_names(IFIRST(header), is->s.end,
997 IFIRST(header));
990 if (error) 998 if (error)
991 return error; 999 return error;
992 /* Find the named attribute. */ 1000 /* Find the named attribute. */
diff --git a/fs/fat/namei_vfat.c b/fs/fat/namei_vfat.c
index 6df8d3d885e5..b8b92c2f9683 100644
--- a/fs/fat/namei_vfat.c
+++ b/fs/fat/namei_vfat.c
@@ -736,7 +736,12 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
736 } 736 }
737 737
738 alias = d_find_alias(inode); 738 alias = d_find_alias(inode);
739 if (alias && !vfat_d_anon_disconn(alias)) { 739 /*
740 * Checking "alias->d_parent == dentry->d_parent" to make sure
741 * FS is not corrupted (especially double linked dir).
742 */
743 if (alias && alias->d_parent == dentry->d_parent &&
744 !vfat_d_anon_disconn(alias)) {
740 /* 745 /*
741 * This inode has non anonymous-DCACHE_DISCONNECTED 746 * This inode has non anonymous-DCACHE_DISCONNECTED
742 * dentry. This means, the user did ->lookup() by an 747 * dentry. This means, the user did ->lookup() by an
@@ -755,12 +760,9 @@ static struct dentry *vfat_lookup(struct inode *dir, struct dentry *dentry,
755 760
756out: 761out:
757 mutex_unlock(&MSDOS_SB(sb)->s_lock); 762 mutex_unlock(&MSDOS_SB(sb)->s_lock);
758 dentry->d_time = dentry->d_parent->d_inode->i_version; 763 if (!inode)
759 dentry = d_splice_alias(inode, dentry); 764 dentry->d_time = dir->i_version;
760 if (dentry) 765 return d_splice_alias(inode, dentry);
761 dentry->d_time = dentry->d_parent->d_inode->i_version;
762 return dentry;
763
764error: 766error:
765 mutex_unlock(&MSDOS_SB(sb)->s_lock); 767 mutex_unlock(&MSDOS_SB(sb)->s_lock);
766 return ERR_PTR(err); 768 return ERR_PTR(err);
@@ -793,7 +795,6 @@ static int vfat_create(struct inode *dir, struct dentry *dentry, umode_t mode,
793 inode->i_mtime = inode->i_atime = inode->i_ctime = ts; 795 inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
794 /* timestamp is already written, so mark_inode_dirty() is unneeded. */ 796 /* timestamp is already written, so mark_inode_dirty() is unneeded. */
795 797
796 dentry->d_time = dentry->d_parent->d_inode->i_version;
797 d_instantiate(dentry, inode); 798 d_instantiate(dentry, inode);
798out: 799out:
799 mutex_unlock(&MSDOS_SB(sb)->s_lock); 800 mutex_unlock(&MSDOS_SB(sb)->s_lock);
@@ -824,6 +825,7 @@ static int vfat_rmdir(struct inode *dir, struct dentry *dentry)
824 clear_nlink(inode); 825 clear_nlink(inode);
825 inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC; 826 inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
826 fat_detach(inode); 827 fat_detach(inode);
828 dentry->d_time = dir->i_version;
827out: 829out:
828 mutex_unlock(&MSDOS_SB(sb)->s_lock); 830 mutex_unlock(&MSDOS_SB(sb)->s_lock);
829 831
@@ -849,6 +851,7 @@ static int vfat_unlink(struct inode *dir, struct dentry *dentry)
849 clear_nlink(inode); 851 clear_nlink(inode);
850 inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC; 852 inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC;
851 fat_detach(inode); 853 fat_detach(inode);
854 dentry->d_time = dir->i_version;
852out: 855out:
853 mutex_unlock(&MSDOS_SB(sb)->s_lock); 856 mutex_unlock(&MSDOS_SB(sb)->s_lock);
854 857
@@ -889,7 +892,6 @@ static int vfat_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
889 inode->i_mtime = inode->i_atime = inode->i_ctime = ts; 892 inode->i_mtime = inode->i_atime = inode->i_ctime = ts;
890 /* timestamp is already written, so mark_inode_dirty() is unneeded. */ 893 /* timestamp is already written, so mark_inode_dirty() is unneeded. */
891 894
892 dentry->d_time = dentry->d_parent->d_inode->i_version;
893 d_instantiate(dentry, inode); 895 d_instantiate(dentry, inode);
894 896
895 mutex_unlock(&MSDOS_SB(sb)->s_lock); 897 mutex_unlock(&MSDOS_SB(sb)->s_lock);
diff --git a/fs/internal.h b/fs/internal.h
index 9477f8f6aefc..757ba2abf21e 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -47,7 +47,6 @@ extern void __init chrdev_init(void);
47/* 47/*
48 * namei.c 48 * namei.c
49 */ 49 */
50extern int __inode_permission(struct inode *, int);
51extern int user_path_mountpoint_at(int, const char __user *, unsigned int, struct path *); 50extern int user_path_mountpoint_at(int, const char __user *, unsigned int, struct path *);
52extern int vfs_path_lookup(struct dentry *, struct vfsmount *, 51extern int vfs_path_lookup(struct dentry *, struct vfsmount *,
53 const char *, unsigned int, struct path *); 52 const char *, unsigned int, struct path *);
@@ -139,12 +138,6 @@ extern long prune_dcache_sb(struct super_block *sb, unsigned long nr_to_scan,
139extern int rw_verify_area(int, struct file *, const loff_t *, size_t); 138extern int rw_verify_area(int, struct file *, const loff_t *, size_t);
140 139
141/* 140/*
142 * splice.c
143 */
144extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
145 loff_t *opos, size_t len, unsigned int flags);
146
147/*
148 * pipe.c 141 * pipe.c
149 */ 142 */
150extern const struct file_operations pipefifo_fops; 143extern const struct file_operations pipefifo_fops;
diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c
index 881b3bd0143f..d67a16f2a45d 100644
--- a/fs/isofs/inode.c
+++ b/fs/isofs/inode.c
@@ -29,13 +29,9 @@
29#define BEQUIET 29#define BEQUIET
30 30
31static int isofs_hashi(const struct dentry *parent, struct qstr *qstr); 31static int isofs_hashi(const struct dentry *parent, struct qstr *qstr);
32static int isofs_hash(const struct dentry *parent, struct qstr *qstr);
33static int isofs_dentry_cmpi(const struct dentry *parent, 32static int isofs_dentry_cmpi(const struct dentry *parent,
34 const struct dentry *dentry, 33 const struct dentry *dentry,
35 unsigned int len, const char *str, const struct qstr *name); 34 unsigned int len, const char *str, const struct qstr *name);
36static int isofs_dentry_cmp(const struct dentry *parent,
37 const struct dentry *dentry,
38 unsigned int len, const char *str, const struct qstr *name);
39 35
40#ifdef CONFIG_JOLIET 36#ifdef CONFIG_JOLIET
41static int isofs_hashi_ms(const struct dentry *parent, struct qstr *qstr); 37static int isofs_hashi_ms(const struct dentry *parent, struct qstr *qstr);
@@ -135,10 +131,6 @@ static const struct super_operations isofs_sops = {
135 131
136static const struct dentry_operations isofs_dentry_ops[] = { 132static const struct dentry_operations isofs_dentry_ops[] = {
137 { 133 {
138 .d_hash = isofs_hash,
139 .d_compare = isofs_dentry_cmp,
140 },
141 {
142 .d_hash = isofs_hashi, 134 .d_hash = isofs_hashi,
143 .d_compare = isofs_dentry_cmpi, 135 .d_compare = isofs_dentry_cmpi,
144 }, 136 },
@@ -182,27 +174,6 @@ struct iso9660_options{
182 * Compute the hash for the isofs name corresponding to the dentry. 174 * Compute the hash for the isofs name corresponding to the dentry.
183 */ 175 */
184static int 176static int
185isofs_hash_common(struct qstr *qstr, int ms)
186{
187 const char *name;
188 int len;
189
190 len = qstr->len;
191 name = qstr->name;
192 if (ms) {
193 while (len && name[len-1] == '.')
194 len--;
195 }
196
197 qstr->hash = full_name_hash(name, len);
198
199 return 0;
200}
201
202/*
203 * Compute the hash for the isofs name corresponding to the dentry.
204 */
205static int
206isofs_hashi_common(struct qstr *qstr, int ms) 177isofs_hashi_common(struct qstr *qstr, int ms)
207{ 178{
208 const char *name; 179 const char *name;
@@ -258,32 +229,40 @@ static int isofs_dentry_cmp_common(
258} 229}
259 230
260static int 231static int
261isofs_hash(const struct dentry *dentry, struct qstr *qstr)
262{
263 return isofs_hash_common(qstr, 0);
264}
265
266static int
267isofs_hashi(const struct dentry *dentry, struct qstr *qstr) 232isofs_hashi(const struct dentry *dentry, struct qstr *qstr)
268{ 233{
269 return isofs_hashi_common(qstr, 0); 234 return isofs_hashi_common(qstr, 0);
270} 235}
271 236
272static int 237static int
273isofs_dentry_cmp(const struct dentry *parent, const struct dentry *dentry, 238isofs_dentry_cmpi(const struct dentry *parent, const struct dentry *dentry,
274 unsigned int len, const char *str, const struct qstr *name) 239 unsigned int len, const char *str, const struct qstr *name)
275{ 240{
276 return isofs_dentry_cmp_common(len, str, name, 0, 0); 241 return isofs_dentry_cmp_common(len, str, name, 0, 1);
277} 242}
278 243
244#ifdef CONFIG_JOLIET
245/*
246 * Compute the hash for the isofs name corresponding to the dentry.
247 */
279static int 248static int
280isofs_dentry_cmpi(const struct dentry *parent, const struct dentry *dentry, 249isofs_hash_common(struct qstr *qstr, int ms)
281 unsigned int len, const char *str, const struct qstr *name)
282{ 250{
283 return isofs_dentry_cmp_common(len, str, name, 0, 1); 251 const char *name;
252 int len;
253
254 len = qstr->len;
255 name = qstr->name;
256 if (ms) {
257 while (len && name[len-1] == '.')
258 len--;
259 }
260
261 qstr->hash = full_name_hash(name, len);
262
263 return 0;
284} 264}
285 265
286#ifdef CONFIG_JOLIET
287static int 266static int
288isofs_hash_ms(const struct dentry *dentry, struct qstr *qstr) 267isofs_hash_ms(const struct dentry *dentry, struct qstr *qstr)
289{ 268{
@@ -930,7 +909,8 @@ root_found:
930 if (opt.check == 'r') 909 if (opt.check == 'r')
931 table++; 910 table++;
932 911
933 s->s_d_op = &isofs_dentry_ops[table]; 912 if (table)
913 s->s_d_op = &isofs_dentry_ops[table - 1];
934 914
935 /* get the root dentry */ 915 /* get the root dentry */
936 s->s_root = d_make_root(inode); 916 s->s_root = d_make_root(inode);
diff --git a/fs/isofs/namei.c b/fs/isofs/namei.c
index 95295640d9c8..7b543e6b6526 100644
--- a/fs/isofs/namei.c
+++ b/fs/isofs/namei.c
@@ -18,25 +18,10 @@ static int
18isofs_cmp(struct dentry *dentry, const char *compare, int dlen) 18isofs_cmp(struct dentry *dentry, const char *compare, int dlen)
19{ 19{
20 struct qstr qstr; 20 struct qstr qstr;
21
22 if (!compare)
23 return 1;
24
25 /* check special "." and ".." files */
26 if (dlen == 1) {
27 /* "." */
28 if (compare[0] == 0) {
29 if (!dentry->d_name.len)
30 return 0;
31 compare = ".";
32 } else if (compare[0] == 1) {
33 compare = "..";
34 dlen = 2;
35 }
36 }
37
38 qstr.name = compare; 21 qstr.name = compare;
39 qstr.len = dlen; 22 qstr.len = dlen;
23 if (likely(!dentry->d_op))
24 return dentry->d_name.len != dlen || memcmp(dentry->d_name.name, compare, dlen);
40 return dentry->d_op->d_compare(NULL, NULL, dentry->d_name.len, dentry->d_name.name, &qstr); 25 return dentry->d_op->d_compare(NULL, NULL, dentry->d_name.len, dentry->d_name.name, &qstr);
41} 26}
42 27
@@ -146,7 +131,8 @@ isofs_find_entry(struct inode *dir, struct dentry *dentry,
146 (!(de->flags[-sbi->s_high_sierra] & 1))) && 131 (!(de->flags[-sbi->s_high_sierra] & 1))) &&
147 (sbi->s_showassoc || 132 (sbi->s_showassoc ||
148 (!(de->flags[-sbi->s_high_sierra] & 4)))) { 133 (!(de->flags[-sbi->s_high_sierra] & 4)))) {
149 match = (isofs_cmp(dentry, dpnt, dlen) == 0); 134 if (dpnt && (dlen > 1 || dpnt[0] > 1))
135 match = (isofs_cmp(dentry, dpnt, dlen) == 0);
150 } 136 }
151 if (match) { 137 if (match) {
152 isofs_normalize_block_and_offset(de, 138 isofs_normalize_block_and_offset(de,
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 06fe11e0abfa..aab8549591e7 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -886,7 +886,7 @@ journal_t * journal_init_inode (struct inode *inode)
886 goto out_err; 886 goto out_err;
887 } 887 }
888 888
889 bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize); 889 bh = getblk_unmovable(journal->j_dev, blocknr, journal->j_blocksize);
890 if (!bh) { 890 if (!bh) {
891 printk(KERN_ERR 891 printk(KERN_ERR
892 "%s: Cannot get buffer for journal superblock\n", 892 "%s: Cannot get buffer for journal superblock\n",
diff --git a/fs/jbd/revoke.c b/fs/jbd/revoke.c
index 8898bbd2b61e..dcead636c33b 100644
--- a/fs/jbd/revoke.c
+++ b/fs/jbd/revoke.c
@@ -93,6 +93,7 @@
93#include <linux/bio.h> 93#include <linux/bio.h>
94#endif 94#endif
95#include <linux/log2.h> 95#include <linux/log2.h>
96#include <linux/hash.h>
96 97
97static struct kmem_cache *revoke_record_cache; 98static struct kmem_cache *revoke_record_cache;
98static struct kmem_cache *revoke_table_cache; 99static struct kmem_cache *revoke_table_cache;
@@ -129,15 +130,11 @@ static void flush_descriptor(journal_t *, struct journal_head *, int, int);
129 130
130/* Utility functions to maintain the revoke table */ 131/* Utility functions to maintain the revoke table */
131 132
132/* Borrowed from buffer.c: this is a tried and tested block hash function */
133static inline int hash(journal_t *journal, unsigned int block) 133static inline int hash(journal_t *journal, unsigned int block)
134{ 134{
135 struct jbd_revoke_table_s *table = journal->j_revoke; 135 struct jbd_revoke_table_s *table = journal->j_revoke;
136 int hash_shift = table->hash_shift;
137 136
138 return ((block << (hash_shift - 6)) ^ 137 return hash_32(block, table->hash_shift);
139 (block >> 13) ^
140 (block << (hash_shift - 12))) & (table->hash_size - 1);
141} 138}
142 139
143static int insert_revoke_hash(journal_t *journal, unsigned int blocknr, 140static int insert_revoke_hash(journal_t *journal, unsigned int blocknr,
diff --git a/fs/jbd2/checkpoint.c b/fs/jbd2/checkpoint.c
index 7f34f4716165..988b32ed4c87 100644
--- a/fs/jbd2/checkpoint.c
+++ b/fs/jbd2/checkpoint.c
@@ -96,15 +96,8 @@ static int __try_to_free_cp_buf(struct journal_head *jh)
96 96
97 if (jh->b_transaction == NULL && !buffer_locked(bh) && 97 if (jh->b_transaction == NULL && !buffer_locked(bh) &&
98 !buffer_dirty(bh) && !buffer_write_io_error(bh)) { 98 !buffer_dirty(bh) && !buffer_write_io_error(bh)) {
99 /*
100 * Get our reference so that bh cannot be freed before
101 * we unlock it
102 */
103 get_bh(bh);
104 JBUFFER_TRACE(jh, "remove from checkpoint list"); 99 JBUFFER_TRACE(jh, "remove from checkpoint list");
105 ret = __jbd2_journal_remove_checkpoint(jh) + 1; 100 ret = __jbd2_journal_remove_checkpoint(jh) + 1;
106 BUFFER_TRACE(bh, "release");
107 __brelse(bh);
108 } 101 }
109 return ret; 102 return ret;
110} 103}
@@ -122,8 +115,6 @@ void __jbd2_log_wait_for_space(journal_t *journal)
122 115
123 nblocks = jbd2_space_needed(journal); 116 nblocks = jbd2_space_needed(journal);
124 while (jbd2_log_space_left(journal) < nblocks) { 117 while (jbd2_log_space_left(journal) < nblocks) {
125 if (journal->j_flags & JBD2_ABORT)
126 return;
127 write_unlock(&journal->j_state_lock); 118 write_unlock(&journal->j_state_lock);
128 mutex_lock(&journal->j_checkpoint_mutex); 119 mutex_lock(&journal->j_checkpoint_mutex);
129 120
@@ -139,6 +130,10 @@ void __jbd2_log_wait_for_space(journal_t *journal)
139 * trace for forensic evidence. 130 * trace for forensic evidence.
140 */ 131 */
141 write_lock(&journal->j_state_lock); 132 write_lock(&journal->j_state_lock);
133 if (journal->j_flags & JBD2_ABORT) {
134 mutex_unlock(&journal->j_checkpoint_mutex);
135 return;
136 }
142 spin_lock(&journal->j_list_lock); 137 spin_lock(&journal->j_list_lock);
143 nblocks = jbd2_space_needed(journal); 138 nblocks = jbd2_space_needed(journal);
144 space_left = jbd2_log_space_left(journal); 139 space_left = jbd2_log_space_left(journal);
@@ -183,58 +178,6 @@ void __jbd2_log_wait_for_space(journal_t *journal)
183 } 178 }
184} 179}
185 180
186/*
187 * Clean up transaction's list of buffers submitted for io.
188 * We wait for any pending IO to complete and remove any clean
189 * buffers. Note that we take the buffers in the opposite ordering
190 * from the one in which they were submitted for IO.
191 *
192 * Return 0 on success, and return <0 if some buffers have failed
193 * to be written out.
194 *
195 * Called with j_list_lock held.
196 */
197static int __wait_cp_io(journal_t *journal, transaction_t *transaction)
198{
199 struct journal_head *jh;
200 struct buffer_head *bh;
201 tid_t this_tid;
202 int released = 0;
203 int ret = 0;
204
205 this_tid = transaction->t_tid;
206restart:
207 /* Did somebody clean up the transaction in the meanwhile? */
208 if (journal->j_checkpoint_transactions != transaction ||
209 transaction->t_tid != this_tid)
210 return ret;
211 while (!released && transaction->t_checkpoint_io_list) {
212 jh = transaction->t_checkpoint_io_list;
213 bh = jh2bh(jh);
214 get_bh(bh);
215 if (buffer_locked(bh)) {
216 spin_unlock(&journal->j_list_lock);
217 wait_on_buffer(bh);
218 /* the journal_head may have gone by now */
219 BUFFER_TRACE(bh, "brelse");
220 __brelse(bh);
221 spin_lock(&journal->j_list_lock);
222 goto restart;
223 }
224 if (unlikely(buffer_write_io_error(bh)))
225 ret = -EIO;
226
227 /*
228 * Now in whatever state the buffer currently is, we know that
229 * it has been written out and so we can drop it from the list
230 */
231 released = __jbd2_journal_remove_checkpoint(jh);
232 __brelse(bh);
233 }
234
235 return ret;
236}
237
238static void 181static void
239__flush_batch(journal_t *journal, int *batch_count) 182__flush_batch(journal_t *journal, int *batch_count)
240{ 183{
@@ -255,81 +198,6 @@ __flush_batch(journal_t *journal, int *batch_count)
255} 198}
256 199
257/* 200/*
258 * Try to flush one buffer from the checkpoint list to disk.
259 *
260 * Return 1 if something happened which requires us to abort the current
261 * scan of the checkpoint list. Return <0 if the buffer has failed to
262 * be written out.
263 *
264 * Called with j_list_lock held and drops it if 1 is returned
265 */
266static int __process_buffer(journal_t *journal, struct journal_head *jh,
267 int *batch_count, transaction_t *transaction)
268{
269 struct buffer_head *bh = jh2bh(jh);
270 int ret = 0;
271
272 if (buffer_locked(bh)) {
273 get_bh(bh);
274 spin_unlock(&journal->j_list_lock);
275 wait_on_buffer(bh);
276 /* the journal_head may have gone by now */
277 BUFFER_TRACE(bh, "brelse");
278 __brelse(bh);
279 ret = 1;
280 } else if (jh->b_transaction != NULL) {
281 transaction_t *t = jh->b_transaction;
282 tid_t tid = t->t_tid;
283
284 transaction->t_chp_stats.cs_forced_to_close++;
285 spin_unlock(&journal->j_list_lock);
286 if (unlikely(journal->j_flags & JBD2_UNMOUNT))
287 /*
288 * The journal thread is dead; so starting and
289 * waiting for a commit to finish will cause
290 * us to wait for a _very_ long time.
291 */
292 printk(KERN_ERR "JBD2: %s: "
293 "Waiting for Godot: block %llu\n",
294 journal->j_devname,
295 (unsigned long long) bh->b_blocknr);
296 jbd2_log_start_commit(journal, tid);
297 jbd2_log_wait_commit(journal, tid);
298 ret = 1;
299 } else if (!buffer_dirty(bh)) {
300 ret = 1;
301 if (unlikely(buffer_write_io_error(bh)))
302 ret = -EIO;
303 get_bh(bh);
304 BUFFER_TRACE(bh, "remove from checkpoint");
305 __jbd2_journal_remove_checkpoint(jh);
306 spin_unlock(&journal->j_list_lock);
307 __brelse(bh);
308 } else {
309 /*
310 * Important: we are about to write the buffer, and
311 * possibly block, while still holding the journal lock.
312 * We cannot afford to let the transaction logic start
313 * messing around with this buffer before we write it to
314 * disk, as that would break recoverability.
315 */
316 BUFFER_TRACE(bh, "queue");
317 get_bh(bh);
318 J_ASSERT_BH(bh, !buffer_jwrite(bh));
319 journal->j_chkpt_bhs[*batch_count] = bh;
320 __buffer_relink_io(jh);
321 transaction->t_chp_stats.cs_written++;
322 (*batch_count)++;
323 if (*batch_count == JBD2_NR_BATCH) {
324 spin_unlock(&journal->j_list_lock);
325 __flush_batch(journal, batch_count);
326 ret = 1;
327 }
328 }
329 return ret;
330}
331
332/*
333 * Perform an actual checkpoint. We take the first transaction on the 201 * Perform an actual checkpoint. We take the first transaction on the
334 * list of transactions to be checkpointed and send all its buffers 202 * list of transactions to be checkpointed and send all its buffers
335 * to disk. We submit larger chunks of data at once. 203 * to disk. We submit larger chunks of data at once.
@@ -339,9 +207,11 @@ static int __process_buffer(journal_t *journal, struct journal_head *jh,
339 */ 207 */
340int jbd2_log_do_checkpoint(journal_t *journal) 208int jbd2_log_do_checkpoint(journal_t *journal)
341{ 209{
342 transaction_t *transaction; 210 struct journal_head *jh;
343 tid_t this_tid; 211 struct buffer_head *bh;
344 int result; 212 transaction_t *transaction;
213 tid_t this_tid;
214 int result, batch_count = 0;
345 215
346 jbd_debug(1, "Start checkpoint\n"); 216 jbd_debug(1, "Start checkpoint\n");
347 217
@@ -374,45 +244,117 @@ restart:
374 * done (maybe it's a new transaction, but it fell at the same 244 * done (maybe it's a new transaction, but it fell at the same
375 * address). 245 * address).
376 */ 246 */
377 if (journal->j_checkpoint_transactions == transaction && 247 if (journal->j_checkpoint_transactions != transaction ||
378 transaction->t_tid == this_tid) { 248 transaction->t_tid != this_tid)
379 int batch_count = 0; 249 goto out;
380 struct journal_head *jh; 250
381 int retry = 0, err; 251 /* checkpoint all of the transaction's buffers */
382 252 while (transaction->t_checkpoint_list) {
383 while (!retry && transaction->t_checkpoint_list) { 253 jh = transaction->t_checkpoint_list;
384 jh = transaction->t_checkpoint_list; 254 bh = jh2bh(jh);
385 retry = __process_buffer(journal, jh, &batch_count, 255
386 transaction); 256 if (buffer_locked(bh)) {
387 if (retry < 0 && !result) 257 spin_unlock(&journal->j_list_lock);
388 result = retry; 258 get_bh(bh);
389 if (!retry && (need_resched() || 259 wait_on_buffer(bh);
390 spin_needbreak(&journal->j_list_lock))) { 260 /* the journal_head may have gone by now */
391 spin_unlock(&journal->j_list_lock); 261 BUFFER_TRACE(bh, "brelse");
392 retry = 1; 262 __brelse(bh);
393 break; 263 goto retry;
394 }
395 } 264 }
265 if (jh->b_transaction != NULL) {
266 transaction_t *t = jh->b_transaction;
267 tid_t tid = t->t_tid;
396 268
397 if (batch_count) { 269 transaction->t_chp_stats.cs_forced_to_close++;
398 if (!retry) { 270 spin_unlock(&journal->j_list_lock);
399 spin_unlock(&journal->j_list_lock); 271 if (unlikely(journal->j_flags & JBD2_UNMOUNT))
400 retry = 1; 272 /*
401 } 273 * The journal thread is dead; so
402 __flush_batch(journal, &batch_count); 274 * starting and waiting for a commit
275 * to finish will cause us to wait for
276 * a _very_ long time.
277 */
278 printk(KERN_ERR
279 "JBD2: %s: Waiting for Godot: block %llu\n",
280 journal->j_devname, (unsigned long long) bh->b_blocknr);
281
282 jbd2_log_start_commit(journal, tid);
283 jbd2_log_wait_commit(journal, tid);
284 goto retry;
285 }
286 if (!buffer_dirty(bh)) {
287 if (unlikely(buffer_write_io_error(bh)) && !result)
288 result = -EIO;
289 BUFFER_TRACE(bh, "remove from checkpoint");
290 if (__jbd2_journal_remove_checkpoint(jh))
291 /* The transaction was released; we're done */
292 goto out;
293 continue;
403 } 294 }
295 /*
296 * Important: we are about to write the buffer, and
297 * possibly block, while still holding the journal
298 * lock. We cannot afford to let the transaction
299 * logic start messing around with this buffer before
300 * we write it to disk, as that would break
301 * recoverability.
302 */
303 BUFFER_TRACE(bh, "queue");
304 get_bh(bh);
305 J_ASSERT_BH(bh, !buffer_jwrite(bh));
306 journal->j_chkpt_bhs[batch_count++] = bh;
307 __buffer_relink_io(jh);
308 transaction->t_chp_stats.cs_written++;
309 if ((batch_count == JBD2_NR_BATCH) ||
310 need_resched() ||
311 spin_needbreak(&journal->j_list_lock))
312 goto unlock_and_flush;
313 }
404 314
405 if (retry) { 315 if (batch_count) {
316 unlock_and_flush:
317 spin_unlock(&journal->j_list_lock);
318 retry:
319 if (batch_count)
320 __flush_batch(journal, &batch_count);
406 spin_lock(&journal->j_list_lock); 321 spin_lock(&journal->j_list_lock);
407 goto restart; 322 goto restart;
323 }
324
325 /*
326 * Now we issued all of the transaction's buffers, let's deal
327 * with the buffers that are out for I/O.
328 */
329restart2:
330 /* Did somebody clean up the transaction in the meanwhile? */
331 if (journal->j_checkpoint_transactions != transaction ||
332 transaction->t_tid != this_tid)
333 goto out;
334
335 while (transaction->t_checkpoint_io_list) {
336 jh = transaction->t_checkpoint_io_list;
337 bh = jh2bh(jh);
338 if (buffer_locked(bh)) {
339 spin_unlock(&journal->j_list_lock);
340 get_bh(bh);
341 wait_on_buffer(bh);
342 /* the journal_head may have gone by now */
343 BUFFER_TRACE(bh, "brelse");
344 __brelse(bh);
345 spin_lock(&journal->j_list_lock);
346 goto restart2;
408 } 347 }
348 if (unlikely(buffer_write_io_error(bh)) && !result)
349 result = -EIO;
350
409 /* 351 /*
410 * Now we have cleaned up the first transaction's checkpoint 352 * Now in whatever state the buffer currently is, we
411 * list. Let's clean up the second one 353 * know that it has been written out and so we can
354 * drop it from the list
412 */ 355 */
413 err = __wait_cp_io(journal, transaction); 356 if (__jbd2_journal_remove_checkpoint(jh))
414 if (!result) 357 break;
415 result = err;
416 } 358 }
417out: 359out:
418 spin_unlock(&journal->j_list_lock); 360 spin_unlock(&journal->j_list_lock);
@@ -478,18 +420,16 @@ int jbd2_cleanup_journal_tail(journal_t *journal)
478 * Find all the written-back checkpoint buffers in the given list and 420 * Find all the written-back checkpoint buffers in the given list and
479 * release them. 421 * release them.
480 * 422 *
481 * Called with the journal locked.
482 * Called with j_list_lock held. 423 * Called with j_list_lock held.
483 * Returns number of buffers reaped (for debug) 424 * Returns 1 if we freed the transaction, 0 otherwise.
484 */ 425 */
485 426static int journal_clean_one_cp_list(struct journal_head *jh)
486static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
487{ 427{
488 struct journal_head *last_jh; 428 struct journal_head *last_jh;
489 struct journal_head *next_jh = jh; 429 struct journal_head *next_jh = jh;
490 int ret, freed = 0; 430 int ret;
431 int freed = 0;
491 432
492 *released = 0;
493 if (!jh) 433 if (!jh)
494 return 0; 434 return 0;
495 435
@@ -498,13 +438,11 @@ static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
498 jh = next_jh; 438 jh = next_jh;
499 next_jh = jh->b_cpnext; 439 next_jh = jh->b_cpnext;
500 ret = __try_to_free_cp_buf(jh); 440 ret = __try_to_free_cp_buf(jh);
501 if (ret) { 441 if (!ret)
502 freed++; 442 return freed;
503 if (ret == 2) { 443 if (ret == 2)
504 *released = 1; 444 return 1;
505 return freed; 445 freed = 1;
506 }
507 }
508 /* 446 /*
509 * This function only frees up some memory 447 * This function only frees up some memory
510 * if possible so we dont have an obligation 448 * if possible so we dont have an obligation
@@ -523,49 +461,49 @@ static int journal_clean_one_cp_list(struct journal_head *jh, int *released)
523 * 461 *
524 * Find all the written-back checkpoint buffers in the journal and release them. 462 * Find all the written-back checkpoint buffers in the journal and release them.
525 * 463 *
526 * Called with the journal locked.
527 * Called with j_list_lock held. 464 * Called with j_list_lock held.
528 * Returns number of buffers reaped (for debug)
529 */ 465 */
530 466void __jbd2_journal_clean_checkpoint_list(journal_t *journal)
531int __jbd2_journal_clean_checkpoint_list(journal_t *journal)
532{ 467{
533 transaction_t *transaction, *last_transaction, *next_transaction; 468 transaction_t *transaction, *last_transaction, *next_transaction;
534 int ret = 0; 469 int ret;
535 int released;
536 470
537 transaction = journal->j_checkpoint_transactions; 471 transaction = journal->j_checkpoint_transactions;
538 if (!transaction) 472 if (!transaction)
539 goto out; 473 return;
540 474
541 last_transaction = transaction->t_cpprev; 475 last_transaction = transaction->t_cpprev;
542 next_transaction = transaction; 476 next_transaction = transaction;
543 do { 477 do {
544 transaction = next_transaction; 478 transaction = next_transaction;
545 next_transaction = transaction->t_cpnext; 479 next_transaction = transaction->t_cpnext;
546 ret += journal_clean_one_cp_list(transaction-> 480 ret = journal_clean_one_cp_list(transaction->t_checkpoint_list);
547 t_checkpoint_list, &released);
548 /* 481 /*
549 * This function only frees up some memory if possible so we 482 * This function only frees up some memory if possible so we
550 * dont have an obligation to finish processing. Bail out if 483 * dont have an obligation to finish processing. Bail out if
551 * preemption requested: 484 * preemption requested:
552 */ 485 */
553 if (need_resched()) 486 if (need_resched())
554 goto out; 487 return;
555 if (released) 488 if (ret)
556 continue; 489 continue;
557 /* 490 /*
558 * It is essential that we are as careful as in the case of 491 * It is essential that we are as careful as in the case of
559 * t_checkpoint_list with removing the buffer from the list as 492 * t_checkpoint_list with removing the buffer from the list as
560 * we can possibly see not yet submitted buffers on io_list 493 * we can possibly see not yet submitted buffers on io_list
561 */ 494 */
562 ret += journal_clean_one_cp_list(transaction-> 495 ret = journal_clean_one_cp_list(transaction->
563 t_checkpoint_io_list, &released); 496 t_checkpoint_io_list);
564 if (need_resched()) 497 if (need_resched())
565 goto out; 498 return;
499 /*
500 * Stop scanning if we couldn't free the transaction. This
501 * avoids pointless scanning of transactions which still
502 * weren't checkpointed.
503 */
504 if (!ret)
505 return;
566 } while (transaction != last_transaction); 506 } while (transaction != last_transaction);
567out:
568 return ret;
569} 507}
570 508
571/* 509/*
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 19d74d86d99c..1df94fabe4eb 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -1237,7 +1237,7 @@ journal_t * jbd2_journal_init_inode (struct inode *inode)
1237 goto out_err; 1237 goto out_err;
1238 } 1238 }
1239 1239
1240 bh = __getblk(journal->j_dev, blocknr, journal->j_blocksize); 1240 bh = getblk_unmovable(journal->j_dev, blocknr, journal->j_blocksize);
1241 if (!bh) { 1241 if (!bh) {
1242 printk(KERN_ERR 1242 printk(KERN_ERR
1243 "%s: Cannot get buffer for journal superblock\n", 1243 "%s: Cannot get buffer for journal superblock\n",
@@ -1522,14 +1522,6 @@ static int journal_get_superblock(journal_t *journal)
1522 goto out; 1522 goto out;
1523 } 1523 }
1524 1524
1525 if (jbd2_journal_has_csum_v2or3(journal) &&
1526 JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM)) {
1527 /* Can't have checksum v1 and v2 on at the same time! */
1528 printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2 "
1529 "at the same time!\n");
1530 goto out;
1531 }
1532
1533 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) && 1525 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2) &&
1534 JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3)) { 1526 JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V3)) {
1535 /* Can't have checksum v2 and v3 at the same time! */ 1527 /* Can't have checksum v2 and v3 at the same time! */
@@ -1538,6 +1530,14 @@ static int journal_get_superblock(journal_t *journal)
1538 goto out; 1530 goto out;
1539 } 1531 }
1540 1532
1533 if (jbd2_journal_has_csum_v2or3(journal) &&
1534 JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM)) {
1535 /* Can't have checksum v1 and v2 on at the same time! */
1536 printk(KERN_ERR "JBD2: Can't enable checksumming v1 and v2/3 "
1537 "at the same time!\n");
1538 goto out;
1539 }
1540
1541 if (!jbd2_verify_csum_type(journal, sb)) { 1541 if (!jbd2_verify_csum_type(journal, sb)) {
1542 printk(KERN_ERR "JBD2: Unknown checksum type\n"); 1542 printk(KERN_ERR "JBD2: Unknown checksum type\n");
1543 goto out; 1543 goto out;
@@ -1853,13 +1853,12 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
1853 journal->j_chksum_driver = NULL; 1853 journal->j_chksum_driver = NULL;
1854 return 0; 1854 return 0;
1855 } 1855 }
1856 }
1857 1856
1858 /* Precompute checksum seed for all metadata */ 1857 /* Precompute checksum seed for all metadata */
1859 if (jbd2_journal_has_csum_v2or3(journal))
1860 journal->j_csum_seed = jbd2_chksum(journal, ~0, 1858 journal->j_csum_seed = jbd2_chksum(journal, ~0,
1861 sb->s_uuid, 1859 sb->s_uuid,
1862 sizeof(sb->s_uuid)); 1860 sizeof(sb->s_uuid));
1861 }
1863 } 1862 }
1864 1863
1865 /* If enabling v1 checksums, downgrade superblock */ 1864 /* If enabling v1 checksums, downgrade superblock */
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 9b329b55ffe3..bcbef08a4d8f 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -525,6 +525,7 @@ static int do_one_pass(journal_t *journal,
525 !jbd2_descr_block_csum_verify(journal, 525 !jbd2_descr_block_csum_verify(journal,
526 bh->b_data)) { 526 bh->b_data)) {
527 err = -EIO; 527 err = -EIO;
528 brelse(bh);
528 goto failed; 529 goto failed;
529 } 530 }
530 531
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index d5e95a175c92..c6cbaef2bda1 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -92,6 +92,7 @@
92#include <linux/init.h> 92#include <linux/init.h>
93#include <linux/bio.h> 93#include <linux/bio.h>
94#include <linux/log2.h> 94#include <linux/log2.h>
95#include <linux/hash.h>
95#endif 96#endif
96 97
97static struct kmem_cache *jbd2_revoke_record_cache; 98static struct kmem_cache *jbd2_revoke_record_cache;
@@ -130,16 +131,9 @@ static void flush_descriptor(journal_t *, struct buffer_head *, int, int);
130 131
131/* Utility functions to maintain the revoke table */ 132/* Utility functions to maintain the revoke table */
132 133
133/* Borrowed from buffer.c: this is a tried and tested block hash function */
134static inline int hash(journal_t *journal, unsigned long long block) 134static inline int hash(journal_t *journal, unsigned long long block)
135{ 135{
136 struct jbd2_revoke_table_s *table = journal->j_revoke; 136 return hash_64(block, journal->j_revoke->hash_shift);
137 int hash_shift = table->hash_shift;
138 int hash = (int)block ^ (int)((block >> 31) >> 1);
139
140 return ((hash << (hash_shift - 6)) ^
141 (hash >> 13) ^
142 (hash << (hash_shift - 12))) & (table->hash_size - 1);
143} 137}
144 138
145static int insert_revoke_hash(journal_t *journal, unsigned long long blocknr, 139static int insert_revoke_hash(journal_t *journal, unsigned long long blocknr,
diff --git a/fs/namei.c b/fs/namei.c
index 43927d14db67..db5fe86319e6 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -416,6 +416,7 @@ int __inode_permission(struct inode *inode, int mask)
416 416
417 return security_inode_permission(inode, mask); 417 return security_inode_permission(inode, mask);
418} 418}
419EXPORT_SYMBOL(__inode_permission);
419 420
420/** 421/**
421 * sb_permission - Check superblock-level permissions 422 * sb_permission - Check superblock-level permissions
@@ -2383,22 +2384,17 @@ kern_path_mountpoint(int dfd, const char *name, struct path *path,
2383} 2384}
2384EXPORT_SYMBOL(kern_path_mountpoint); 2385EXPORT_SYMBOL(kern_path_mountpoint);
2385 2386
2386/* 2387int __check_sticky(struct inode *dir, struct inode *inode)
2387 * It's inline, so penalty for filesystems that don't use sticky bit is
2388 * minimal.
2389 */
2390static inline int check_sticky(struct inode *dir, struct inode *inode)
2391{ 2388{
2392 kuid_t fsuid = current_fsuid(); 2389 kuid_t fsuid = current_fsuid();
2393 2390
2394 if (!(dir->i_mode & S_ISVTX))
2395 return 0;
2396 if (uid_eq(inode->i_uid, fsuid)) 2391 if (uid_eq(inode->i_uid, fsuid))
2397 return 0; 2392 return 0;
2398 if (uid_eq(dir->i_uid, fsuid)) 2393 if (uid_eq(dir->i_uid, fsuid))
2399 return 0; 2394 return 0;
2400 return !capable_wrt_inode_uidgid(inode, CAP_FOWNER); 2395 return !capable_wrt_inode_uidgid(inode, CAP_FOWNER);
2401} 2396}
2397EXPORT_SYMBOL(__check_sticky);
2402 2398
2403/* 2399/*
2404 * Check whether we can remove a link victim from directory dir, check 2400 * Check whether we can remove a link victim from directory dir, check
@@ -2501,7 +2497,7 @@ struct dentry *lock_rename(struct dentry *p1, struct dentry *p2)
2501 } 2497 }
2502 2498
2503 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT); 2499 mutex_lock_nested(&p1->d_inode->i_mutex, I_MUTEX_PARENT);
2504 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_CHILD); 2500 mutex_lock_nested(&p2->d_inode->i_mutex, I_MUTEX_PARENT2);
2505 return NULL; 2501 return NULL;
2506} 2502}
2507EXPORT_SYMBOL(lock_rename); 2503EXPORT_SYMBOL(lock_rename);
@@ -3064,9 +3060,12 @@ finish_open_created:
3064 error = may_open(&nd->path, acc_mode, open_flag); 3060 error = may_open(&nd->path, acc_mode, open_flag);
3065 if (error) 3061 if (error)
3066 goto out; 3062 goto out;
3067 file->f_path.mnt = nd->path.mnt; 3063
3068 error = finish_open(file, nd->path.dentry, NULL, opened); 3064 BUG_ON(*opened & FILE_OPENED); /* once it's opened, it's opened */
3069 if (error) { 3065 error = vfs_open(&nd->path, file, current_cred());
3066 if (!error) {
3067 *opened |= FILE_OPENED;
3068 } else {
3070 if (error == -EOPENSTALE) 3069 if (error == -EOPENSTALE)
3071 goto stale_open; 3070 goto stale_open;
3072 goto out; 3071 goto out;
@@ -3155,7 +3154,8 @@ static int do_tmpfile(int dfd, struct filename *pathname,
3155 if (error) 3154 if (error)
3156 goto out2; 3155 goto out2;
3157 audit_inode(pathname, nd->path.dentry, 0); 3156 audit_inode(pathname, nd->path.dentry, 0);
3158 error = may_open(&nd->path, op->acc_mode, op->open_flag); 3157 /* Don't check for other permissions, the inode was just created */
3158 error = may_open(&nd->path, MAY_OPEN, op->open_flag);
3159 if (error) 3159 if (error)
3160 goto out2; 3160 goto out2;
3161 file->f_path.mnt = nd->path.mnt; 3161 file->f_path.mnt = nd->path.mnt;
@@ -4210,12 +4210,16 @@ SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname,
4210 bool should_retry = false; 4210 bool should_retry = false;
4211 int error; 4211 int error;
4212 4212
4213 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) 4213 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
4214 return -EINVAL; 4214 return -EINVAL;
4215 4215
4216 if ((flags & RENAME_NOREPLACE) && (flags & RENAME_EXCHANGE)) 4216 if ((flags & (RENAME_NOREPLACE | RENAME_WHITEOUT)) &&
4217 (flags & RENAME_EXCHANGE))
4217 return -EINVAL; 4218 return -EINVAL;
4218 4219
4220 if ((flags & RENAME_WHITEOUT) && !capable(CAP_MKNOD))
4221 return -EPERM;
4222
4219retry: 4223retry:
4220 from = user_path_parent(olddfd, oldname, &oldnd, lookup_flags); 4224 from = user_path_parent(olddfd, oldname, &oldnd, lookup_flags);
4221 if (IS_ERR(from)) { 4225 if (IS_ERR(from)) {
@@ -4347,6 +4351,20 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
4347 return sys_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0); 4351 return sys_renameat2(AT_FDCWD, oldname, AT_FDCWD, newname, 0);
4348} 4352}
4349 4353
4354int vfs_whiteout(struct inode *dir, struct dentry *dentry)
4355{
4356 int error = may_create(dir, dentry);
4357 if (error)
4358 return error;
4359
4360 if (!dir->i_op->mknod)
4361 return -EPERM;
4362
4363 return dir->i_op->mknod(dir, dentry,
4364 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
4365}
4366EXPORT_SYMBOL(vfs_whiteout);
4367
4350int readlink_copy(char __user *buffer, int buflen, const char *link) 4368int readlink_copy(char __user *buffer, int buflen, const char *link)
4351{ 4369{
4352 int len = PTR_ERR(link); 4370 int len = PTR_ERR(link);
diff --git a/fs/namespace.c b/fs/namespace.c
index fbba8b17330d..5b66b2b3624d 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1686,6 +1686,33 @@ void drop_collected_mounts(struct vfsmount *mnt)
1686 namespace_unlock(); 1686 namespace_unlock();
1687} 1687}
1688 1688
1689/**
1690 * clone_private_mount - create a private clone of a path
1691 *
1692 * This creates a new vfsmount, which will be the clone of @path. The new will
1693 * not be attached anywhere in the namespace and will be private (i.e. changes
1694 * to the originating mount won't be propagated into this).
1695 *
1696 * Release with mntput().
1697 */
1698struct vfsmount *clone_private_mount(struct path *path)
1699{
1700 struct mount *old_mnt = real_mount(path->mnt);
1701 struct mount *new_mnt;
1702
1703 if (IS_MNT_UNBINDABLE(old_mnt))
1704 return ERR_PTR(-EINVAL);
1705
1706 down_read(&namespace_sem);
1707 new_mnt = clone_mnt(old_mnt, path->dentry, CL_PRIVATE);
1708 up_read(&namespace_sem);
1709 if (IS_ERR(new_mnt))
1710 return ERR_CAST(new_mnt);
1711
1712 return &new_mnt->mnt;
1713}
1714EXPORT_SYMBOL_GPL(clone_private_mount);
1715
1689int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg, 1716int iterate_mounts(int (*f)(struct vfsmount *, void *), void *arg,
1690 struct vfsmount *root) 1717 struct vfsmount *root)
1691{ 1718{
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 5228f201d3d5..4f46f7a05289 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -378,7 +378,7 @@ bl_write_pagelist(struct nfs_pgio_header *header, int sync)
378 loff_t offset = header->args.offset; 378 loff_t offset = header->args.offset;
379 size_t count = header->args.count; 379 size_t count = header->args.count;
380 struct page **pages = header->args.pages; 380 struct page **pages = header->args.pages;
381 int pg_index = pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT; 381 int pg_index = header->args.pgbase >> PAGE_CACHE_SHIFT;
382 unsigned int pg_len; 382 unsigned int pg_len;
383 struct blk_plug plug; 383 struct blk_plug plug;
384 int i; 384 int i;
diff --git a/fs/nfs/blocklayout/rpc_pipefs.c b/fs/nfs/blocklayout/rpc_pipefs.c
index e966c023b1b7..acbf9ca4018c 100644
--- a/fs/nfs/blocklayout/rpc_pipefs.c
+++ b/fs/nfs/blocklayout/rpc_pipefs.c
@@ -65,17 +65,18 @@ bl_resolve_deviceid(struct nfs_server *server, struct pnfs_block_volume *b,
65 65
66 dprintk("%s CREATING PIPEFS MESSAGE\n", __func__); 66 dprintk("%s CREATING PIPEFS MESSAGE\n", __func__);
67 67
68 mutex_lock(&nn->bl_mutex);
68 bl_pipe_msg.bl_wq = &nn->bl_wq; 69 bl_pipe_msg.bl_wq = &nn->bl_wq;
69 70
70 b->simple.len += 4; /* single volume */ 71 b->simple.len += 4; /* single volume */
71 if (b->simple.len > PAGE_SIZE) 72 if (b->simple.len > PAGE_SIZE)
72 return -EIO; 73 goto out_unlock;
73 74
74 memset(msg, 0, sizeof(*msg)); 75 memset(msg, 0, sizeof(*msg));
75 msg->len = sizeof(*bl_msg) + b->simple.len; 76 msg->len = sizeof(*bl_msg) + b->simple.len;
76 msg->data = kzalloc(msg->len, gfp_mask); 77 msg->data = kzalloc(msg->len, gfp_mask);
77 if (!msg->data) 78 if (!msg->data)
78 goto out; 79 goto out_free_data;
79 80
80 bl_msg = msg->data; 81 bl_msg = msg->data;
81 bl_msg->type = BL_DEVICE_MOUNT, 82 bl_msg->type = BL_DEVICE_MOUNT,
@@ -87,7 +88,7 @@ bl_resolve_deviceid(struct nfs_server *server, struct pnfs_block_volume *b,
87 rc = rpc_queue_upcall(nn->bl_device_pipe, msg); 88 rc = rpc_queue_upcall(nn->bl_device_pipe, msg);
88 if (rc < 0) { 89 if (rc < 0) {
89 remove_wait_queue(&nn->bl_wq, &wq); 90 remove_wait_queue(&nn->bl_wq, &wq);
90 goto out; 91 goto out_free_data;
91 } 92 }
92 93
93 set_current_state(TASK_UNINTERRUPTIBLE); 94 set_current_state(TASK_UNINTERRUPTIBLE);
@@ -97,12 +98,14 @@ bl_resolve_deviceid(struct nfs_server *server, struct pnfs_block_volume *b,
97 if (reply->status != BL_DEVICE_REQUEST_PROC) { 98 if (reply->status != BL_DEVICE_REQUEST_PROC) {
98 printk(KERN_WARNING "%s failed to decode device: %d\n", 99 printk(KERN_WARNING "%s failed to decode device: %d\n",
99 __func__, reply->status); 100 __func__, reply->status);
100 goto out; 101 goto out_free_data;
101 } 102 }
102 103
103 dev = MKDEV(reply->major, reply->minor); 104 dev = MKDEV(reply->major, reply->minor);
104out: 105out_free_data:
105 kfree(msg->data); 106 kfree(msg->data);
107out_unlock:
108 mutex_unlock(&nn->bl_mutex);
106 return dev; 109 return dev;
107} 110}
108 111
@@ -232,6 +235,7 @@ static int nfs4blocklayout_net_init(struct net *net)
232 struct nfs_net *nn = net_generic(net, nfs_net_id); 235 struct nfs_net *nn = net_generic(net, nfs_net_id);
233 struct dentry *dentry; 236 struct dentry *dentry;
234 237
238 mutex_init(&nn->bl_mutex);
235 init_waitqueue_head(&nn->bl_wq); 239 init_waitqueue_head(&nn->bl_wq);
236 nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0); 240 nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0);
237 if (IS_ERR(nn->bl_device_pipe)) 241 if (IS_ERR(nn->bl_device_pipe))
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 5853f53db732..7f3f60641344 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -125,6 +125,8 @@ again:
125 continue; 125 continue;
126 if (!test_bit(NFS_DELEGATED_STATE, &state->flags)) 126 if (!test_bit(NFS_DELEGATED_STATE, &state->flags))
127 continue; 127 continue;
128 if (!nfs4_valid_open_stateid(state))
129 continue;
128 if (!nfs4_stateid_match(&state->stateid, stateid)) 130 if (!nfs4_stateid_match(&state->stateid, stateid))
129 continue; 131 continue;
130 get_nfs_open_context(ctx); 132 get_nfs_open_context(ctx);
@@ -193,7 +195,11 @@ static int nfs_do_return_delegation(struct inode *inode, struct nfs_delegation *
193{ 195{
194 int res = 0; 196 int res = 0;
195 197
196 res = nfs4_proc_delegreturn(inode, delegation->cred, &delegation->stateid, issync); 198 if (!test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
199 res = nfs4_proc_delegreturn(inode,
200 delegation->cred,
201 &delegation->stateid,
202 issync);
197 nfs_free_delegation(delegation); 203 nfs_free_delegation(delegation);
198 return res; 204 return res;
199} 205}
@@ -380,11 +386,13 @@ static int nfs_end_delegation_return(struct inode *inode, struct nfs_delegation
380{ 386{
381 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client; 387 struct nfs_client *clp = NFS_SERVER(inode)->nfs_client;
382 struct nfs_inode *nfsi = NFS_I(inode); 388 struct nfs_inode *nfsi = NFS_I(inode);
383 int err; 389 int err = 0;
384 390
385 if (delegation == NULL) 391 if (delegation == NULL)
386 return 0; 392 return 0;
387 do { 393 do {
394 if (test_bit(NFS_DELEGATION_REVOKED, &delegation->flags))
395 break;
388 err = nfs_delegation_claim_opens(inode, &delegation->stateid); 396 err = nfs_delegation_claim_opens(inode, &delegation->stateid);
389 if (!issync || err != -EAGAIN) 397 if (!issync || err != -EAGAIN)
390 break; 398 break;
@@ -605,10 +613,23 @@ static void nfs_client_mark_return_unused_delegation_types(struct nfs_client *cl
605 rcu_read_unlock(); 613 rcu_read_unlock();
606} 614}
607 615
616static void nfs_revoke_delegation(struct inode *inode)
617{
618 struct nfs_delegation *delegation;
619 rcu_read_lock();
620 delegation = rcu_dereference(NFS_I(inode)->delegation);
621 if (delegation != NULL) {
622 set_bit(NFS_DELEGATION_REVOKED, &delegation->flags);
623 nfs_mark_return_delegation(NFS_SERVER(inode), delegation);
624 }
625 rcu_read_unlock();
626}
627
608void nfs_remove_bad_delegation(struct inode *inode) 628void nfs_remove_bad_delegation(struct inode *inode)
609{ 629{
610 struct nfs_delegation *delegation; 630 struct nfs_delegation *delegation;
611 631
632 nfs_revoke_delegation(inode);
612 delegation = nfs_inode_detach_delegation(inode); 633 delegation = nfs_inode_detach_delegation(inode);
613 if (delegation) { 634 if (delegation) {
614 nfs_inode_find_state_and_recover(inode, &delegation->stateid); 635 nfs_inode_find_state_and_recover(inode, &delegation->stateid);
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index 5c1cce39297f..e3c20a3ccc93 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -31,6 +31,7 @@ enum {
31 NFS_DELEGATION_RETURN_IF_CLOSED, 31 NFS_DELEGATION_RETURN_IF_CLOSED,
32 NFS_DELEGATION_REFERENCED, 32 NFS_DELEGATION_REFERENCED,
33 NFS_DELEGATION_RETURNING, 33 NFS_DELEGATION_RETURNING,
34 NFS_DELEGATION_REVOKED,
34}; 35};
35 36
36int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res); 37int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct nfs_openres *res);
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 06e8cfcbb670..6e62155abf26 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1527,6 +1527,7 @@ int nfs_atomic_open(struct inode *dir, struct dentry *dentry,
1527 case -ENOENT: 1527 case -ENOENT:
1528 d_drop(dentry); 1528 d_drop(dentry);
1529 d_add(dentry, NULL); 1529 d_add(dentry, NULL);
1530 nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
1530 break; 1531 break;
1531 case -EISDIR: 1532 case -EISDIR:
1532 case -ENOTDIR: 1533 case -ENOTDIR:
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 20cffc830468..10bf07280f4a 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -266,6 +266,7 @@ static void nfs_direct_req_free(struct kref *kref)
266{ 266{
267 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref); 267 struct nfs_direct_req *dreq = container_of(kref, struct nfs_direct_req, kref);
268 268
269 nfs_free_pnfs_ds_cinfo(&dreq->ds_cinfo);
269 if (dreq->l_ctx != NULL) 270 if (dreq->l_ctx != NULL)
270 nfs_put_lock_context(dreq->l_ctx); 271 nfs_put_lock_context(dreq->l_ctx);
271 if (dreq->ctx != NULL) 272 if (dreq->ctx != NULL)
diff --git a/fs/nfs/filelayout/filelayout.c b/fs/nfs/filelayout/filelayout.c
index 46fab1cb455a..7afb52f6a25a 100644
--- a/fs/nfs/filelayout/filelayout.c
+++ b/fs/nfs/filelayout/filelayout.c
@@ -145,9 +145,6 @@ static int filelayout_async_handle_error(struct rpc_task *task,
145 case -NFS4ERR_DELEG_REVOKED: 145 case -NFS4ERR_DELEG_REVOKED:
146 case -NFS4ERR_ADMIN_REVOKED: 146 case -NFS4ERR_ADMIN_REVOKED:
147 case -NFS4ERR_BAD_STATEID: 147 case -NFS4ERR_BAD_STATEID:
148 if (state == NULL)
149 break;
150 nfs_remove_bad_delegation(state->inode);
151 case -NFS4ERR_OPENMODE: 148 case -NFS4ERR_OPENMODE:
152 if (state == NULL) 149 if (state == NULL)
153 break; 150 break;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 6388a59f2add..00689a8a85e4 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -626,7 +626,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
626{ 626{
627 struct inode *inode = dentry->d_inode; 627 struct inode *inode = dentry->d_inode;
628 int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME; 628 int need_atime = NFS_I(inode)->cache_validity & NFS_INO_INVALID_ATIME;
629 int err; 629 int err = 0;
630 630
631 trace_nfs_getattr_enter(inode); 631 trace_nfs_getattr_enter(inode);
632 /* Flush out writes to the server in order to update c/mtime. */ 632 /* Flush out writes to the server in order to update c/mtime. */
diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h
index ef221fb8a183..f0e06e4acbef 100644
--- a/fs/nfs/netns.h
+++ b/fs/nfs/netns.h
@@ -19,6 +19,7 @@ struct nfs_net {
19 struct rpc_pipe *bl_device_pipe; 19 struct rpc_pipe *bl_device_pipe;
20 struct bl_dev_msg bl_mount_reply; 20 struct bl_dev_msg bl_mount_reply;
21 wait_queue_head_t bl_wq; 21 wait_queue_head_t bl_wq;
22 struct mutex bl_mutex;
22 struct list_head nfs_client_list; 23 struct list_head nfs_client_list;
23 struct list_head nfs_volume_list; 24 struct list_head nfs_volume_list;
24#if IS_ENABLED(CONFIG_NFS_V4) 25#if IS_ENABLED(CONFIG_NFS_V4)
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 405bd95c1f58..69dc20a743f9 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -370,11 +370,6 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
370 case -NFS4ERR_DELEG_REVOKED: 370 case -NFS4ERR_DELEG_REVOKED:
371 case -NFS4ERR_ADMIN_REVOKED: 371 case -NFS4ERR_ADMIN_REVOKED:
372 case -NFS4ERR_BAD_STATEID: 372 case -NFS4ERR_BAD_STATEID:
373 if (inode != NULL && nfs4_have_delegation(inode, FMODE_READ)) {
374 nfs_remove_bad_delegation(inode);
375 exception->retry = 1;
376 break;
377 }
378 if (state == NULL) 373 if (state == NULL)
379 break; 374 break;
380 ret = nfs4_schedule_stateid_recovery(server, state); 375 ret = nfs4_schedule_stateid_recovery(server, state);
@@ -1654,7 +1649,7 @@ static int nfs4_handle_delegation_recall_error(struct nfs_server *server, struct
1654 nfs_inode_find_state_and_recover(state->inode, 1649 nfs_inode_find_state_and_recover(state->inode,
1655 stateid); 1650 stateid);
1656 nfs4_schedule_stateid_recovery(server, state); 1651 nfs4_schedule_stateid_recovery(server, state);
1657 return 0; 1652 return -EAGAIN;
1658 case -NFS4ERR_DELAY: 1653 case -NFS4ERR_DELAY:
1659 case -NFS4ERR_GRACE: 1654 case -NFS4ERR_GRACE:
1660 set_bit(NFS_DELEGATED_STATE, &state->flags); 1655 set_bit(NFS_DELEGATED_STATE, &state->flags);
@@ -2109,46 +2104,60 @@ static int nfs4_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *sta
2109 return ret; 2104 return ret;
2110} 2105}
2111 2106
2107static void nfs_finish_clear_delegation_stateid(struct nfs4_state *state)
2108{
2109 nfs_remove_bad_delegation(state->inode);
2110 write_seqlock(&state->seqlock);
2111 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2112 write_sequnlock(&state->seqlock);
2113 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2114}
2115
2116static void nfs40_clear_delegation_stateid(struct nfs4_state *state)
2117{
2118 if (rcu_access_pointer(NFS_I(state->inode)->delegation) != NULL)
2119 nfs_finish_clear_delegation_stateid(state);
2120}
2121
2122static int nfs40_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *state)
2123{
2124 /* NFSv4.0 doesn't allow for delegation recovery on open expire */
2125 nfs40_clear_delegation_stateid(state);
2126 return nfs4_open_expired(sp, state);
2127}
2128
2112#if defined(CONFIG_NFS_V4_1) 2129#if defined(CONFIG_NFS_V4_1)
2113static void nfs41_clear_delegation_stateid(struct nfs4_state *state) 2130static void nfs41_check_delegation_stateid(struct nfs4_state *state)
2114{ 2131{
2115 struct nfs_server *server = NFS_SERVER(state->inode); 2132 struct nfs_server *server = NFS_SERVER(state->inode);
2116 nfs4_stateid *stateid = &state->stateid; 2133 nfs4_stateid stateid;
2117 struct nfs_delegation *delegation; 2134 struct nfs_delegation *delegation;
2118 struct rpc_cred *cred = NULL; 2135 struct rpc_cred *cred;
2119 int status = -NFS4ERR_BAD_STATEID; 2136 int status;
2120
2121 /* If a state reset has been done, test_stateid is unneeded */
2122 if (test_bit(NFS_DELEGATED_STATE, &state->flags) == 0)
2123 return;
2124 2137
2125 /* Get the delegation credential for use by test/free_stateid */ 2138 /* Get the delegation credential for use by test/free_stateid */
2126 rcu_read_lock(); 2139 rcu_read_lock();
2127 delegation = rcu_dereference(NFS_I(state->inode)->delegation); 2140 delegation = rcu_dereference(NFS_I(state->inode)->delegation);
2128 if (delegation != NULL && 2141 if (delegation == NULL) {
2129 nfs4_stateid_match(&delegation->stateid, stateid)) {
2130 cred = get_rpccred(delegation->cred);
2131 rcu_read_unlock();
2132 status = nfs41_test_stateid(server, stateid, cred);
2133 trace_nfs4_test_delegation_stateid(state, NULL, status);
2134 } else
2135 rcu_read_unlock(); 2142 rcu_read_unlock();
2143 return;
2144 }
2145
2146 nfs4_stateid_copy(&stateid, &delegation->stateid);
2147 cred = get_rpccred(delegation->cred);
2148 rcu_read_unlock();
2149 status = nfs41_test_stateid(server, &stateid, cred);
2150 trace_nfs4_test_delegation_stateid(state, NULL, status);
2136 2151
2137 if (status != NFS_OK) { 2152 if (status != NFS_OK) {
2138 /* Free the stateid unless the server explicitly 2153 /* Free the stateid unless the server explicitly
2139 * informs us the stateid is unrecognized. */ 2154 * informs us the stateid is unrecognized. */
2140 if (status != -NFS4ERR_BAD_STATEID) 2155 if (status != -NFS4ERR_BAD_STATEID)
2141 nfs41_free_stateid(server, stateid, cred); 2156 nfs41_free_stateid(server, &stateid, cred);
2142 nfs_remove_bad_delegation(state->inode); 2157 nfs_finish_clear_delegation_stateid(state);
2143
2144 write_seqlock(&state->seqlock);
2145 nfs4_stateid_copy(&state->stateid, &state->open_stateid);
2146 write_sequnlock(&state->seqlock);
2147 clear_bit(NFS_DELEGATED_STATE, &state->flags);
2148 } 2158 }
2149 2159
2150 if (cred != NULL) 2160 put_rpccred(cred);
2151 put_rpccred(cred);
2152} 2161}
2153 2162
2154/** 2163/**
@@ -2192,7 +2201,7 @@ static int nfs41_open_expired(struct nfs4_state_owner *sp, struct nfs4_state *st
2192{ 2201{
2193 int status; 2202 int status;
2194 2203
2195 nfs41_clear_delegation_stateid(state); 2204 nfs41_check_delegation_stateid(state);
2196 status = nfs41_check_open_stateid(state); 2205 status = nfs41_check_open_stateid(state);
2197 if (status != NFS_OK) 2206 if (status != NFS_OK)
2198 status = nfs4_open_expired(sp, state); 2207 status = nfs4_open_expired(sp, state);
@@ -2231,19 +2240,8 @@ static int _nfs4_open_and_get_state(struct nfs4_opendata *opendata,
2231 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount); 2240 seq = raw_seqcount_begin(&sp->so_reclaim_seqcount);
2232 2241
2233 ret = _nfs4_proc_open(opendata); 2242 ret = _nfs4_proc_open(opendata);
2234 if (ret != 0) { 2243 if (ret != 0)
2235 if (ret == -ENOENT) {
2236 dentry = opendata->dentry;
2237 if (dentry->d_inode)
2238 d_delete(dentry);
2239 else if (d_unhashed(dentry))
2240 d_add(dentry, NULL);
2241
2242 nfs_set_verifier(dentry,
2243 nfs_save_change_attribute(opendata->dir->d_inode));
2244 }
2245 goto out; 2244 goto out;
2246 }
2247 2245
2248 state = nfs4_opendata_to_nfs4_state(opendata); 2246 state = nfs4_opendata_to_nfs4_state(opendata);
2249 ret = PTR_ERR(state); 2247 ret = PTR_ERR(state);
@@ -4841,9 +4839,6 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
4841 case -NFS4ERR_DELEG_REVOKED: 4839 case -NFS4ERR_DELEG_REVOKED:
4842 case -NFS4ERR_ADMIN_REVOKED: 4840 case -NFS4ERR_ADMIN_REVOKED:
4843 case -NFS4ERR_BAD_STATEID: 4841 case -NFS4ERR_BAD_STATEID:
4844 if (state == NULL)
4845 break;
4846 nfs_remove_bad_delegation(state->inode);
4847 case -NFS4ERR_OPENMODE: 4842 case -NFS4ERR_OPENMODE:
4848 if (state == NULL) 4843 if (state == NULL)
4849 break; 4844 break;
@@ -8341,7 +8336,7 @@ static const struct nfs4_state_recovery_ops nfs41_reboot_recovery_ops = {
8341static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = { 8336static const struct nfs4_state_recovery_ops nfs40_nograce_recovery_ops = {
8342 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE, 8337 .owner_flag_bit = NFS_OWNER_RECLAIM_NOGRACE,
8343 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE, 8338 .state_flag_bit = NFS_STATE_RECLAIM_NOGRACE,
8344 .recover_open = nfs4_open_expired, 8339 .recover_open = nfs40_open_expired,
8345 .recover_lock = nfs4_lock_expired, 8340 .recover_lock = nfs4_lock_expired,
8346 .establish_clid = nfs4_init_clientid, 8341 .establish_clid = nfs4_init_clientid,
8347}; 8342};
@@ -8408,8 +8403,7 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
8408 | NFS_CAP_CHANGE_ATTR 8403 | NFS_CAP_CHANGE_ATTR
8409 | NFS_CAP_POSIX_LOCK 8404 | NFS_CAP_POSIX_LOCK
8410 | NFS_CAP_STATEID_NFSV41 8405 | NFS_CAP_STATEID_NFSV41
8411 | NFS_CAP_ATOMIC_OPEN_V1 8406 | NFS_CAP_ATOMIC_OPEN_V1,
8412 | NFS_CAP_SEEK,
8413 .init_client = nfs41_init_client, 8407 .init_client = nfs41_init_client,
8414 .shutdown_client = nfs41_shutdown_client, 8408 .shutdown_client = nfs41_shutdown_client,
8415 .match_stateid = nfs41_match_stateid, 8409 .match_stateid = nfs41_match_stateid,
@@ -8431,7 +8425,8 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
8431 | NFS_CAP_CHANGE_ATTR 8425 | NFS_CAP_CHANGE_ATTR
8432 | NFS_CAP_POSIX_LOCK 8426 | NFS_CAP_POSIX_LOCK
8433 | NFS_CAP_STATEID_NFSV41 8427 | NFS_CAP_STATEID_NFSV41
8434 | NFS_CAP_ATOMIC_OPEN_V1, 8428 | NFS_CAP_ATOMIC_OPEN_V1
8429 | NFS_CAP_SEEK,
8435 .init_client = nfs41_init_client, 8430 .init_client = nfs41_init_client,
8436 .shutdown_client = nfs41_shutdown_client, 8431 .shutdown_client = nfs41_shutdown_client,
8437 .match_stateid = nfs41_match_stateid, 8432 .match_stateid = nfs41_match_stateid,
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index c6e4bda63000..9e5bc42180e4 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -5,7 +5,7 @@
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Benny Halevy <bhalevy@panasas.com> 7 * Benny Halevy <bhalevy@panasas.com>
8 * Boaz Harrosh <bharrosh@panasas.com> 8 * Boaz Harrosh <ooo@electrozaur.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 11 * it under the terms of the GNU General Public License version 2
diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
index c89357c7a914..919efd4a1a23 100644
--- a/fs/nfs/objlayout/objlayout.c
+++ b/fs/nfs/objlayout/objlayout.c
@@ -5,7 +5,7 @@
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Benny Halevy <bhalevy@panasas.com> 7 * Benny Halevy <bhalevy@panasas.com>
8 * Boaz Harrosh <bharrosh@panasas.com> 8 * Boaz Harrosh <ooo@electrozaur.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 11 * it under the terms of the GNU General Public License version 2
diff --git a/fs/nfs/objlayout/objlayout.h b/fs/nfs/objlayout/objlayout.h
index 3a0828d57339..2641dbad345c 100644
--- a/fs/nfs/objlayout/objlayout.h
+++ b/fs/nfs/objlayout/objlayout.h
@@ -6,7 +6,7 @@
6 * All rights reserved. 6 * All rights reserved.
7 * 7 *
8 * Benny Halevy <bhalevy@panasas.com> 8 * Benny Halevy <bhalevy@panasas.com>
9 * Boaz Harrosh <bharrosh@panasas.com> 9 * Boaz Harrosh <ooo@electrozaur.com>
10 * 10 *
11 * This program is free software; you can redistribute it and/or modify 11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 12 * it under the terms of the GNU General Public License version 2
diff --git a/fs/nfs/objlayout/pnfs_osd_xdr_cli.c b/fs/nfs/objlayout/pnfs_osd_xdr_cli.c
index b3918f7ac34d..f093c7ec983b 100644
--- a/fs/nfs/objlayout/pnfs_osd_xdr_cli.c
+++ b/fs/nfs/objlayout/pnfs_osd_xdr_cli.c
@@ -5,7 +5,7 @@
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Benny Halevy <bhalevy@panasas.com> 7 * Benny Halevy <bhalevy@panasas.com>
8 * Boaz Harrosh <bharrosh@panasas.com> 8 * Boaz Harrosh <ooo@electrozaur.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 11 * it under the terms of the GNU General Public License version 2
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 12493846a2d3..f83b02dc9166 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -715,8 +715,6 @@ static void nfs_inode_remove_request(struct nfs_page *req)
715 715
716 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags)) 716 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
717 nfs_release_request(req); 717 nfs_release_request(req);
718 else
719 WARN_ON_ONCE(1);
720} 718}
721 719
722static void 720static void
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index ed2b1151b171..7cbdf1b2e4ab 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -774,8 +774,12 @@ static bool nfsd41_cb_get_slot(struct nfs4_client *clp, struct rpc_task *task)
774{ 774{
775 if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) { 775 if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
776 rpc_sleep_on(&clp->cl_cb_waitq, task, NULL); 776 rpc_sleep_on(&clp->cl_cb_waitq, task, NULL);
777 dprintk("%s slot is busy\n", __func__); 777 /* Race breaker */
778 return false; 778 if (test_and_set_bit(0, &clp->cl_cb_slot_busy) != 0) {
779 dprintk("%s slot is busy\n", __func__);
780 return false;
781 }
782 rpc_wake_up_queued_task(&clp->cl_cb_waitq, task);
779 } 783 }
780 return true; 784 return true;
781} 785}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index cdeb3cfd6f32..0beb023f25ac 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1272,7 +1272,8 @@ static bool need_wrongsec_check(struct svc_rqst *rqstp)
1272 */ 1272 */
1273 if (argp->opcnt == resp->opcnt) 1273 if (argp->opcnt == resp->opcnt)
1274 return false; 1274 return false;
1275 1275 if (next->opnum == OP_ILLEGAL)
1276 return false;
1276 nextd = OPDESC(next); 1277 nextd = OPDESC(next);
1277 /* 1278 /*
1278 * Rest of 2.6.3.1.1: certain operations will return WRONGSEC 1279 * Rest of 2.6.3.1.1: certain operations will return WRONGSEC
@@ -1589,7 +1590,8 @@ static inline u32 nfsd4_rename_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op
1589static inline u32 nfsd4_sequence_rsize(struct svc_rqst *rqstp, 1590static inline u32 nfsd4_sequence_rsize(struct svc_rqst *rqstp,
1590 struct nfsd4_op *op) 1591 struct nfsd4_op *op)
1591{ 1592{
1592 return NFS4_MAX_SESSIONID_LEN + 20; 1593 return (op_encode_hdr_size
1594 + XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 5) * sizeof(__be32);
1593} 1595}
1594 1596
1595static inline u32 nfsd4_setattr_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op) 1597static inline u32 nfsd4_setattr_rsize(struct svc_rqst *rqstp, struct nfsd4_op *op)
@@ -1893,6 +1895,7 @@ static struct nfsd4_operation nfsd4_ops[] = {
1893 .op_func = (nfsd4op_func)nfsd4_sequence, 1895 .op_func = (nfsd4op_func)nfsd4_sequence,
1894 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP, 1896 .op_flags = ALLOWED_WITHOUT_FH | ALLOWED_AS_FIRST_OP,
1895 .op_name = "OP_SEQUENCE", 1897 .op_name = "OP_SEQUENCE",
1898 .op_rsize_bop = (nfsd4op_rsize)nfsd4_sequence_rsize,
1896 }, 1899 },
1897 [OP_DESTROY_CLIENTID] = { 1900 [OP_DESTROY_CLIENTID] = {
1898 .op_func = (nfsd4op_func)nfsd4_destroy_clientid, 1901 .op_func = (nfsd4op_func)nfsd4_destroy_clientid,
diff --git a/fs/nfsd/nfsd.h b/fs/nfsd/nfsd.h
index 747f3b95bd11..33a46a8dfaf7 100644
--- a/fs/nfsd/nfsd.h
+++ b/fs/nfsd/nfsd.h
@@ -335,12 +335,15 @@ void nfsd_lockd_shutdown(void);
335 (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT) 335 (NFSD4_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SUPPATTR_EXCLCREAT)
336 336
337#ifdef CONFIG_NFSD_V4_SECURITY_LABEL 337#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
338#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \ 338#define NFSD4_2_SECURITY_ATTRS FATTR4_WORD2_SECURITY_LABEL
339 (NFSD4_1_SUPPORTED_ATTRS_WORD2 | FATTR4_WORD2_SECURITY_LABEL)
340#else 339#else
341#define NFSD4_2_SUPPORTED_ATTRS_WORD2 0 340#define NFSD4_2_SECURITY_ATTRS 0
342#endif 341#endif
343 342
343#define NFSD4_2_SUPPORTED_ATTRS_WORD2 \
344 (NFSD4_1_SUPPORTED_ATTRS_WORD2 | \
345 NFSD4_2_SECURITY_ATTRS)
346
344static inline u32 nfsd_suppattrs0(u32 minorversion) 347static inline u32 nfsd_suppattrs0(u32 minorversion)
345{ 348{
346 return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0 349 return minorversion ? NFSD4_1_SUPPORTED_ATTRS_WORD0
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index 9d3e9c50066a..89326acd4561 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -229,8 +229,16 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
229 &fsnotify_mark_srcu); 229 &fsnotify_mark_srcu);
230 } 230 }
231 231
232 /*
233 * We need to merge inode & vfsmount mark lists so that inode mark
234 * ignore masks are properly reflected for mount mark notifications.
235 * That's why this traversal is so complicated...
236 */
232 while (inode_node || vfsmount_node) { 237 while (inode_node || vfsmount_node) {
233 inode_group = vfsmount_group = NULL; 238 inode_group = NULL;
239 inode_mark = NULL;
240 vfsmount_group = NULL;
241 vfsmount_mark = NULL;
234 242
235 if (inode_node) { 243 if (inode_node) {
236 inode_mark = hlist_entry(srcu_dereference(inode_node, &fsnotify_mark_srcu), 244 inode_mark = hlist_entry(srcu_dereference(inode_node, &fsnotify_mark_srcu),
@@ -244,21 +252,19 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
244 vfsmount_group = vfsmount_mark->group; 252 vfsmount_group = vfsmount_mark->group;
245 } 253 }
246 254
247 if (inode_group > vfsmount_group) { 255 if (inode_group && vfsmount_group) {
248 /* handle inode */ 256 int cmp = fsnotify_compare_groups(inode_group,
249 ret = send_to_group(to_tell, inode_mark, NULL, mask, 257 vfsmount_group);
250 data, data_is, cookie, file_name); 258 if (cmp > 0) {
251 /* we didn't use the vfsmount_mark */ 259 inode_group = NULL;
252 vfsmount_group = NULL; 260 inode_mark = NULL;
253 } else if (vfsmount_group > inode_group) { 261 } else if (cmp < 0) {
254 ret = send_to_group(to_tell, NULL, vfsmount_mark, mask, 262 vfsmount_group = NULL;
255 data, data_is, cookie, file_name); 263 vfsmount_mark = NULL;
256 inode_group = NULL; 264 }
257 } else {
258 ret = send_to_group(to_tell, inode_mark, vfsmount_mark,
259 mask, data, data_is, cookie,
260 file_name);
261 } 265 }
266 ret = send_to_group(to_tell, inode_mark, vfsmount_mark, mask,
267 data, data_is, cookie, file_name);
262 268
263 if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS)) 269 if (ret && (mask & ALL_FSNOTIFY_PERM_EVENTS))
264 goto out; 270 goto out;
diff --git a/fs/notify/fsnotify.h b/fs/notify/fsnotify.h
index 9c0898c4cfe1..3b68b0ae0a97 100644
--- a/fs/notify/fsnotify.h
+++ b/fs/notify/fsnotify.h
@@ -12,6 +12,10 @@ extern void fsnotify_flush_notify(struct fsnotify_group *group);
12/* protects reads of inode and vfsmount marks list */ 12/* protects reads of inode and vfsmount marks list */
13extern struct srcu_struct fsnotify_mark_srcu; 13extern struct srcu_struct fsnotify_mark_srcu;
14 14
15/* compare two groups for sorting of marks lists */
16extern int fsnotify_compare_groups(struct fsnotify_group *a,
17 struct fsnotify_group *b);
18
15extern void fsnotify_set_inode_mark_mask_locked(struct fsnotify_mark *fsn_mark, 19extern void fsnotify_set_inode_mark_mask_locked(struct fsnotify_mark *fsn_mark,
16 __u32 mask); 20 __u32 mask);
17/* add a mark to an inode */ 21/* add a mark to an inode */
diff --git a/fs/notify/inode_mark.c b/fs/notify/inode_mark.c
index 9ce062218de9..dfbf5447eea4 100644
--- a/fs/notify/inode_mark.c
+++ b/fs/notify/inode_mark.c
@@ -194,6 +194,7 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
194{ 194{
195 struct fsnotify_mark *lmark, *last = NULL; 195 struct fsnotify_mark *lmark, *last = NULL;
196 int ret = 0; 196 int ret = 0;
197 int cmp;
197 198
198 mark->flags |= FSNOTIFY_MARK_FLAG_INODE; 199 mark->flags |= FSNOTIFY_MARK_FLAG_INODE;
199 200
@@ -219,11 +220,8 @@ int fsnotify_add_inode_mark(struct fsnotify_mark *mark,
219 goto out; 220 goto out;
220 } 221 }
221 222
222 if (mark->group->priority < lmark->group->priority) 223 cmp = fsnotify_compare_groups(lmark->group, mark->group);
223 continue; 224 if (cmp < 0)
224
225 if ((mark->group->priority == lmark->group->priority) &&
226 (mark->group < lmark->group))
227 continue; 225 continue;
228 226
229 hlist_add_before_rcu(&mark->i.i_list, &lmark->i.i_list); 227 hlist_add_before_rcu(&mark->i.i_list, &lmark->i.i_list);
@@ -288,20 +286,25 @@ void fsnotify_unmount_inodes(struct list_head *list)
288 spin_unlock(&inode->i_lock); 286 spin_unlock(&inode->i_lock);
289 287
290 /* In case the dropping of a reference would nuke next_i. */ 288 /* In case the dropping of a reference would nuke next_i. */
291 if ((&next_i->i_sb_list != list) && 289 while (&next_i->i_sb_list != list) {
292 atomic_read(&next_i->i_count)) {
293 spin_lock(&next_i->i_lock); 290 spin_lock(&next_i->i_lock);
294 if (!(next_i->i_state & (I_FREEING | I_WILL_FREE))) { 291 if (!(next_i->i_state & (I_FREEING | I_WILL_FREE)) &&
292 atomic_read(&next_i->i_count)) {
295 __iget(next_i); 293 __iget(next_i);
296 need_iput = next_i; 294 need_iput = next_i;
295 spin_unlock(&next_i->i_lock);
296 break;
297 } 297 }
298 spin_unlock(&next_i->i_lock); 298 spin_unlock(&next_i->i_lock);
299 next_i = list_entry(next_i->i_sb_list.next,
300 struct inode, i_sb_list);
299 } 301 }
300 302
301 /* 303 /*
302 * We can safely drop inode_sb_list_lock here because we hold 304 * We can safely drop inode_sb_list_lock here because either
303 * references on both inode and next_i. Also no new inodes 305 * we actually hold references on both inode and next_i or
304 * will be added since the umount has begun. 306 * end of list. Also no new inodes will be added since the
307 * umount has begun.
305 */ 308 */
306 spin_unlock(&inode_sb_list_lock); 309 spin_unlock(&inode_sb_list_lock);
307 310
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index d90deaa08e78..34c38fabf514 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -210,6 +210,42 @@ void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mas
210} 210}
211 211
212/* 212/*
213 * Sorting function for lists of fsnotify marks.
214 *
215 * Fanotify supports different notification classes (reflected as priority of
216 * notification group). Events shall be passed to notification groups in
217 * decreasing priority order. To achieve this marks in notification lists for
218 * inodes and vfsmounts are sorted so that priorities of corresponding groups
219 * are descending.
220 *
221 * Furthermore correct handling of the ignore mask requires processing inode
222 * and vfsmount marks of each group together. Using the group address as
223 * further sort criterion provides a unique sorting order and thus we can
224 * merge inode and vfsmount lists of marks in linear time and find groups
225 * present in both lists.
226 *
227 * A return value of 1 signifies that b has priority over a.
228 * A return value of 0 signifies that the two marks have to be handled together.
229 * A return value of -1 signifies that a has priority over b.
230 */
231int fsnotify_compare_groups(struct fsnotify_group *a, struct fsnotify_group *b)
232{
233 if (a == b)
234 return 0;
235 if (!a)
236 return 1;
237 if (!b)
238 return -1;
239 if (a->priority < b->priority)
240 return 1;
241 if (a->priority > b->priority)
242 return -1;
243 if (a < b)
244 return 1;
245 return -1;
246}
247
248/*
213 * Attach an initialized mark to a given group and fs object. 249 * Attach an initialized mark to a given group and fs object.
214 * These marks may be used for the fsnotify backend to determine which 250 * These marks may be used for the fsnotify backend to determine which
215 * event types should be delivered to which group. 251 * event types should be delivered to which group.
diff --git a/fs/notify/vfsmount_mark.c b/fs/notify/vfsmount_mark.c
index ac851e8376b1..faefa72a11eb 100644
--- a/fs/notify/vfsmount_mark.c
+++ b/fs/notify/vfsmount_mark.c
@@ -153,6 +153,7 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
153 struct mount *m = real_mount(mnt); 153 struct mount *m = real_mount(mnt);
154 struct fsnotify_mark *lmark, *last = NULL; 154 struct fsnotify_mark *lmark, *last = NULL;
155 int ret = 0; 155 int ret = 0;
156 int cmp;
156 157
157 mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT; 158 mark->flags |= FSNOTIFY_MARK_FLAG_VFSMOUNT;
158 159
@@ -178,11 +179,8 @@ int fsnotify_add_vfsmount_mark(struct fsnotify_mark *mark,
178 goto out; 179 goto out;
179 } 180 }
180 181
181 if (mark->group->priority < lmark->group->priority) 182 cmp = fsnotify_compare_groups(lmark->group, mark->group);
182 continue; 183 if (cmp < 0)
183
184 if ((mark->group->priority == lmark->group->priority) &&
185 (mark->group < lmark->group))
186 continue; 184 continue;
187 185
188 hlist_add_before_rcu(&mark->m.m_list, &lmark->m.m_list); 186 hlist_add_before_rcu(&mark->m.m_list, &lmark->m.m_list);
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 97de0fbd9f78..a96044004064 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -925,7 +925,7 @@ static int o2net_send_tcp_msg(struct socket *sock, struct kvec *vec,
925 size_t veclen, size_t total) 925 size_t veclen, size_t total)
926{ 926{
927 int ret; 927 int ret;
928 struct msghdr msg; 928 struct msghdr msg = {.msg_flags = 0,};
929 929
930 if (sock == NULL) { 930 if (sock == NULL) {
931 ret = -EINVAL; 931 ret = -EINVAL;
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 8add6f1030d7..b931e04e3388 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -158,7 +158,7 @@ bail_add:
158 * NOTE: This dentry already has ->d_op set from 158 * NOTE: This dentry already has ->d_op set from
159 * ocfs2_get_parent() and ocfs2_get_dentry() 159 * ocfs2_get_parent() and ocfs2_get_dentry()
160 */ 160 */
161 if (ret) 161 if (!IS_ERR_OR_NULL(ret))
162 dentry = ret; 162 dentry = ret;
163 163
164 status = ocfs2_dentry_attach_lock(dentry, inode, 164 status = ocfs2_dentry_attach_lock(dentry, inode,
diff --git a/fs/open.c b/fs/open.c
index d6fd3acde134..de92c13b58be 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -823,8 +823,7 @@ struct file *dentry_open(const struct path *path, int flags,
823 f = get_empty_filp(); 823 f = get_empty_filp();
824 if (!IS_ERR(f)) { 824 if (!IS_ERR(f)) {
825 f->f_flags = flags; 825 f->f_flags = flags;
826 f->f_path = *path; 826 error = vfs_open(path, f, cred);
827 error = do_dentry_open(f, NULL, cred);
828 if (!error) { 827 if (!error) {
829 /* from now on we need fput() to dispose of f */ 828 /* from now on we need fput() to dispose of f */
830 error = open_check_o_direct(f); 829 error = open_check_o_direct(f);
@@ -841,6 +840,26 @@ struct file *dentry_open(const struct path *path, int flags,
841} 840}
842EXPORT_SYMBOL(dentry_open); 841EXPORT_SYMBOL(dentry_open);
843 842
843/**
844 * vfs_open - open the file at the given path
845 * @path: path to open
846 * @filp: newly allocated file with f_flag initialized
847 * @cred: credentials to use
848 */
849int vfs_open(const struct path *path, struct file *filp,
850 const struct cred *cred)
851{
852 struct inode *inode = path->dentry->d_inode;
853
854 if (inode->i_op->dentry_open)
855 return inode->i_op->dentry_open(path->dentry, filp, cred);
856 else {
857 filp->f_path = *path;
858 return do_dentry_open(filp, NULL, cred);
859 }
860}
861EXPORT_SYMBOL(vfs_open);
862
844static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op) 863static inline int build_open_flags(int flags, umode_t mode, struct open_flags *op)
845{ 864{
846 int lookup_flags = 0; 865 int lookup_flags = 0;
diff --git a/fs/overlayfs/Kconfig b/fs/overlayfs/Kconfig
new file mode 100644
index 000000000000..34355818a2e0
--- /dev/null
+++ b/fs/overlayfs/Kconfig
@@ -0,0 +1,10 @@
1config OVERLAY_FS
2 tristate "Overlay filesystem support"
3 help
4 An overlay filesystem combines two filesystems - an 'upper' filesystem
5 and a 'lower' filesystem. When a name exists in both filesystems, the
6 object in the 'upper' filesystem is visible while the object in the
7 'lower' filesystem is either hidden or, in the case of directories,
8 merged with the 'upper' object.
9
10 For more information see Documentation/filesystems/overlayfs.txt
diff --git a/fs/overlayfs/Makefile b/fs/overlayfs/Makefile
new file mode 100644
index 000000000000..900daed3e91d
--- /dev/null
+++ b/fs/overlayfs/Makefile
@@ -0,0 +1,7 @@
1#
2# Makefile for the overlay filesystem.
3#
4
5obj-$(CONFIG_OVERLAY_FS) += overlay.o
6
7overlay-objs := super.o inode.o dir.o readdir.o copy_up.o
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
new file mode 100644
index 000000000000..ea10a8719107
--- /dev/null
+++ b/fs/overlayfs/copy_up.c
@@ -0,0 +1,414 @@
1/*
2 *
3 * Copyright (C) 2011 Novell Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/fs.h>
11#include <linux/slab.h>
12#include <linux/file.h>
13#include <linux/splice.h>
14#include <linux/xattr.h>
15#include <linux/security.h>
16#include <linux/uaccess.h>
17#include <linux/sched.h>
18#include <linux/namei.h>
19#include "overlayfs.h"
20
21#define OVL_COPY_UP_CHUNK_SIZE (1 << 20)
22
23int ovl_copy_xattr(struct dentry *old, struct dentry *new)
24{
25 ssize_t list_size, size;
26 char *buf, *name, *value;
27 int error;
28
29 if (!old->d_inode->i_op->getxattr ||
30 !new->d_inode->i_op->getxattr)
31 return 0;
32
33 list_size = vfs_listxattr(old, NULL, 0);
34 if (list_size <= 0) {
35 if (list_size == -EOPNOTSUPP)
36 return 0;
37 return list_size;
38 }
39
40 buf = kzalloc(list_size, GFP_KERNEL);
41 if (!buf)
42 return -ENOMEM;
43
44 error = -ENOMEM;
45 value = kmalloc(XATTR_SIZE_MAX, GFP_KERNEL);
46 if (!value)
47 goto out;
48
49 list_size = vfs_listxattr(old, buf, list_size);
50 if (list_size <= 0) {
51 error = list_size;
52 goto out_free_value;
53 }
54
55 for (name = buf; name < (buf + list_size); name += strlen(name) + 1) {
56 size = vfs_getxattr(old, name, value, XATTR_SIZE_MAX);
57 if (size <= 0) {
58 error = size;
59 goto out_free_value;
60 }
61 error = vfs_setxattr(new, name, value, size, 0);
62 if (error)
63 goto out_free_value;
64 }
65
66out_free_value:
67 kfree(value);
68out:
69 kfree(buf);
70 return error;
71}
72
73static int ovl_copy_up_data(struct path *old, struct path *new, loff_t len)
74{
75 struct file *old_file;
76 struct file *new_file;
77 loff_t old_pos = 0;
78 loff_t new_pos = 0;
79 int error = 0;
80
81 if (len == 0)
82 return 0;
83
84 old_file = ovl_path_open(old, O_RDONLY);
85 if (IS_ERR(old_file))
86 return PTR_ERR(old_file);
87
88 new_file = ovl_path_open(new, O_WRONLY);
89 if (IS_ERR(new_file)) {
90 error = PTR_ERR(new_file);
91 goto out_fput;
92 }
93
94 /* FIXME: copy up sparse files efficiently */
95 while (len) {
96 size_t this_len = OVL_COPY_UP_CHUNK_SIZE;
97 long bytes;
98
99 if (len < this_len)
100 this_len = len;
101
102 if (signal_pending_state(TASK_KILLABLE, current)) {
103 error = -EINTR;
104 break;
105 }
106
107 bytes = do_splice_direct(old_file, &old_pos,
108 new_file, &new_pos,
109 this_len, SPLICE_F_MOVE);
110 if (bytes <= 0) {
111 error = bytes;
112 break;
113 }
114 WARN_ON(old_pos != new_pos);
115
116 len -= bytes;
117 }
118
119 fput(new_file);
120out_fput:
121 fput(old_file);
122 return error;
123}
124
125static char *ovl_read_symlink(struct dentry *realdentry)
126{
127 int res;
128 char *buf;
129 struct inode *inode = realdentry->d_inode;
130 mm_segment_t old_fs;
131
132 res = -EINVAL;
133 if (!inode->i_op->readlink)
134 goto err;
135
136 res = -ENOMEM;
137 buf = (char *) __get_free_page(GFP_KERNEL);
138 if (!buf)
139 goto err;
140
141 old_fs = get_fs();
142 set_fs(get_ds());
143 /* The cast to a user pointer is valid due to the set_fs() */
144 res = inode->i_op->readlink(realdentry,
145 (char __user *)buf, PAGE_SIZE - 1);
146 set_fs(old_fs);
147 if (res < 0) {
148 free_page((unsigned long) buf);
149 goto err;
150 }
151 buf[res] = '\0';
152
153 return buf;
154
155err:
156 return ERR_PTR(res);
157}
158
159static int ovl_set_timestamps(struct dentry *upperdentry, struct kstat *stat)
160{
161 struct iattr attr = {
162 .ia_valid =
163 ATTR_ATIME | ATTR_MTIME | ATTR_ATIME_SET | ATTR_MTIME_SET,
164 .ia_atime = stat->atime,
165 .ia_mtime = stat->mtime,
166 };
167
168 return notify_change(upperdentry, &attr, NULL);
169}
170
171int ovl_set_attr(struct dentry *upperdentry, struct kstat *stat)
172{
173 int err = 0;
174
175 if (!S_ISLNK(stat->mode)) {
176 struct iattr attr = {
177 .ia_valid = ATTR_MODE,
178 .ia_mode = stat->mode,
179 };
180 err = notify_change(upperdentry, &attr, NULL);
181 }
182 if (!err) {
183 struct iattr attr = {
184 .ia_valid = ATTR_UID | ATTR_GID,
185 .ia_uid = stat->uid,
186 .ia_gid = stat->gid,
187 };
188 err = notify_change(upperdentry, &attr, NULL);
189 }
190 if (!err)
191 ovl_set_timestamps(upperdentry, stat);
192
193 return err;
194
195}
196
197static int ovl_copy_up_locked(struct dentry *workdir, struct dentry *upperdir,
198 struct dentry *dentry, struct path *lowerpath,
199 struct kstat *stat, struct iattr *attr,
200 const char *link)
201{
202 struct inode *wdir = workdir->d_inode;
203 struct inode *udir = upperdir->d_inode;
204 struct dentry *newdentry = NULL;
205 struct dentry *upper = NULL;
206 umode_t mode = stat->mode;
207 int err;
208
209 newdentry = ovl_lookup_temp(workdir, dentry);
210 err = PTR_ERR(newdentry);
211 if (IS_ERR(newdentry))
212 goto out;
213
214 upper = lookup_one_len(dentry->d_name.name, upperdir,
215 dentry->d_name.len);
216 err = PTR_ERR(upper);
217 if (IS_ERR(upper))
218 goto out1;
219
220 /* Can't properly set mode on creation because of the umask */
221 stat->mode &= S_IFMT;
222 err = ovl_create_real(wdir, newdentry, stat, link, NULL, true);
223 stat->mode = mode;
224 if (err)
225 goto out2;
226
227 if (S_ISREG(stat->mode)) {
228 struct path upperpath;
229 ovl_path_upper(dentry, &upperpath);
230 BUG_ON(upperpath.dentry != NULL);
231 upperpath.dentry = newdentry;
232
233 err = ovl_copy_up_data(lowerpath, &upperpath, stat->size);
234 if (err)
235 goto out_cleanup;
236 }
237
238 err = ovl_copy_xattr(lowerpath->dentry, newdentry);
239 if (err)
240 goto out_cleanup;
241
242 mutex_lock(&newdentry->d_inode->i_mutex);
243 err = ovl_set_attr(newdentry, stat);
244 if (!err && attr)
245 err = notify_change(newdentry, attr, NULL);
246 mutex_unlock(&newdentry->d_inode->i_mutex);
247 if (err)
248 goto out_cleanup;
249
250 err = ovl_do_rename(wdir, newdentry, udir, upper, 0);
251 if (err)
252 goto out_cleanup;
253
254 ovl_dentry_update(dentry, newdentry);
255 newdentry = NULL;
256
257 /*
258 * Non-directores become opaque when copied up.
259 */
260 if (!S_ISDIR(stat->mode))
261 ovl_dentry_set_opaque(dentry, true);
262out2:
263 dput(upper);
264out1:
265 dput(newdentry);
266out:
267 return err;
268
269out_cleanup:
270 ovl_cleanup(wdir, newdentry);
271 goto out;
272}
273
274/*
275 * Copy up a single dentry
276 *
277 * Directory renames only allowed on "pure upper" (already created on
278 * upper filesystem, never copied up). Directories which are on lower or
279 * are merged may not be renamed. For these -EXDEV is returned and
280 * userspace has to deal with it. This means, when copying up a
281 * directory we can rely on it and ancestors being stable.
282 *
283 * Non-directory renames start with copy up of source if necessary. The
284 * actual rename will only proceed once the copy up was successful. Copy
285 * up uses upper parent i_mutex for exclusion. Since rename can change
286 * d_parent it is possible that the copy up will lock the old parent. At
287 * that point the file will have already been copied up anyway.
288 */
289int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
290 struct path *lowerpath, struct kstat *stat,
291 struct iattr *attr)
292{
293 struct dentry *workdir = ovl_workdir(dentry);
294 int err;
295 struct kstat pstat;
296 struct path parentpath;
297 struct dentry *upperdir;
298 struct dentry *upperdentry;
299 const struct cred *old_cred;
300 struct cred *override_cred;
301 char *link = NULL;
302
303 ovl_path_upper(parent, &parentpath);
304 upperdir = parentpath.dentry;
305
306 err = vfs_getattr(&parentpath, &pstat);
307 if (err)
308 return err;
309
310 if (S_ISLNK(stat->mode)) {
311 link = ovl_read_symlink(lowerpath->dentry);
312 if (IS_ERR(link))
313 return PTR_ERR(link);
314 }
315
316 err = -ENOMEM;
317 override_cred = prepare_creds();
318 if (!override_cred)
319 goto out_free_link;
320
321 override_cred->fsuid = stat->uid;
322 override_cred->fsgid = stat->gid;
323 /*
324 * CAP_SYS_ADMIN for copying up extended attributes
325 * CAP_DAC_OVERRIDE for create
326 * CAP_FOWNER for chmod, timestamp update
327 * CAP_FSETID for chmod
328 * CAP_CHOWN for chown
329 * CAP_MKNOD for mknod
330 */
331 cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
332 cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
333 cap_raise(override_cred->cap_effective, CAP_FOWNER);
334 cap_raise(override_cred->cap_effective, CAP_FSETID);
335 cap_raise(override_cred->cap_effective, CAP_CHOWN);
336 cap_raise(override_cred->cap_effective, CAP_MKNOD);
337 old_cred = override_creds(override_cred);
338
339 err = -EIO;
340 if (lock_rename(workdir, upperdir) != NULL) {
341 pr_err("overlayfs: failed to lock workdir+upperdir\n");
342 goto out_unlock;
343 }
344 upperdentry = ovl_dentry_upper(dentry);
345 if (upperdentry) {
346 unlock_rename(workdir, upperdir);
347 err = 0;
348 /* Raced with another copy-up? Do the setattr here */
349 if (attr) {
350 mutex_lock(&upperdentry->d_inode->i_mutex);
351 err = notify_change(upperdentry, attr, NULL);
352 mutex_unlock(&upperdentry->d_inode->i_mutex);
353 }
354 goto out_put_cred;
355 }
356
357 err = ovl_copy_up_locked(workdir, upperdir, dentry, lowerpath,
358 stat, attr, link);
359 if (!err) {
360 /* Restore timestamps on parent (best effort) */
361 ovl_set_timestamps(upperdir, &pstat);
362 }
363out_unlock:
364 unlock_rename(workdir, upperdir);
365out_put_cred:
366 revert_creds(old_cred);
367 put_cred(override_cred);
368
369out_free_link:
370 if (link)
371 free_page((unsigned long) link);
372
373 return err;
374}
375
376int ovl_copy_up(struct dentry *dentry)
377{
378 int err;
379
380 err = 0;
381 while (!err) {
382 struct dentry *next;
383 struct dentry *parent;
384 struct path lowerpath;
385 struct kstat stat;
386 enum ovl_path_type type = ovl_path_type(dentry);
387
388 if (type != OVL_PATH_LOWER)
389 break;
390
391 next = dget(dentry);
392 /* find the topmost dentry not yet copied up */
393 for (;;) {
394 parent = dget_parent(next);
395
396 type = ovl_path_type(parent);
397 if (type != OVL_PATH_LOWER)
398 break;
399
400 dput(next);
401 next = parent;
402 }
403
404 ovl_path_lower(next, &lowerpath);
405 err = vfs_getattr(&lowerpath, &stat);
406 if (!err)
407 err = ovl_copy_up_one(parent, next, &lowerpath, &stat, NULL);
408
409 dput(parent);
410 dput(next);
411 }
412
413 return err;
414}
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
new file mode 100644
index 000000000000..8ffc4b980f1b
--- /dev/null
+++ b/fs/overlayfs/dir.c
@@ -0,0 +1,928 @@
1/*
2 *
3 * Copyright (C) 2011 Novell Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/fs.h>
11#include <linux/namei.h>
12#include <linux/xattr.h>
13#include <linux/security.h>
14#include <linux/cred.h>
15#include "overlayfs.h"
16
17void ovl_cleanup(struct inode *wdir, struct dentry *wdentry)
18{
19 int err;
20
21 dget(wdentry);
22 if (S_ISDIR(wdentry->d_inode->i_mode))
23 err = ovl_do_rmdir(wdir, wdentry);
24 else
25 err = ovl_do_unlink(wdir, wdentry);
26 dput(wdentry);
27
28 if (err) {
29 pr_err("overlayfs: cleanup of '%pd2' failed (%i)\n",
30 wdentry, err);
31 }
32}
33
34struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry)
35{
36 struct dentry *temp;
37 char name[20];
38
39 snprintf(name, sizeof(name), "#%lx", (unsigned long) dentry);
40
41 temp = lookup_one_len(name, workdir, strlen(name));
42 if (!IS_ERR(temp) && temp->d_inode) {
43 pr_err("overlayfs: workdir/%s already exists\n", name);
44 dput(temp);
45 temp = ERR_PTR(-EIO);
46 }
47
48 return temp;
49}
50
51/* caller holds i_mutex on workdir */
52static struct dentry *ovl_whiteout(struct dentry *workdir,
53 struct dentry *dentry)
54{
55 int err;
56 struct dentry *whiteout;
57 struct inode *wdir = workdir->d_inode;
58
59 whiteout = ovl_lookup_temp(workdir, dentry);
60 if (IS_ERR(whiteout))
61 return whiteout;
62
63 err = ovl_do_whiteout(wdir, whiteout);
64 if (err) {
65 dput(whiteout);
66 whiteout = ERR_PTR(err);
67 }
68
69 return whiteout;
70}
71
72int ovl_create_real(struct inode *dir, struct dentry *newdentry,
73 struct kstat *stat, const char *link,
74 struct dentry *hardlink, bool debug)
75{
76 int err;
77
78 if (newdentry->d_inode)
79 return -ESTALE;
80
81 if (hardlink) {
82 err = ovl_do_link(hardlink, dir, newdentry, debug);
83 } else {
84 switch (stat->mode & S_IFMT) {
85 case S_IFREG:
86 err = ovl_do_create(dir, newdentry, stat->mode, debug);
87 break;
88
89 case S_IFDIR:
90 err = ovl_do_mkdir(dir, newdentry, stat->mode, debug);
91 break;
92
93 case S_IFCHR:
94 case S_IFBLK:
95 case S_IFIFO:
96 case S_IFSOCK:
97 err = ovl_do_mknod(dir, newdentry,
98 stat->mode, stat->rdev, debug);
99 break;
100
101 case S_IFLNK:
102 err = ovl_do_symlink(dir, newdentry, link, debug);
103 break;
104
105 default:
106 err = -EPERM;
107 }
108 }
109 if (!err && WARN_ON(!newdentry->d_inode)) {
110 /*
111 * Not quite sure if non-instantiated dentry is legal or not.
112 * VFS doesn't seem to care so check and warn here.
113 */
114 err = -ENOENT;
115 }
116 return err;
117}
118
119static int ovl_set_opaque(struct dentry *upperdentry)
120{
121 return ovl_do_setxattr(upperdentry, ovl_opaque_xattr, "y", 1, 0);
122}
123
124static void ovl_remove_opaque(struct dentry *upperdentry)
125{
126 int err;
127
128 err = ovl_do_removexattr(upperdentry, ovl_opaque_xattr);
129 if (err) {
130 pr_warn("overlayfs: failed to remove opaque from '%s' (%i)\n",
131 upperdentry->d_name.name, err);
132 }
133}
134
135static int ovl_dir_getattr(struct vfsmount *mnt, struct dentry *dentry,
136 struct kstat *stat)
137{
138 int err;
139 enum ovl_path_type type;
140 struct path realpath;
141
142 type = ovl_path_real(dentry, &realpath);
143 err = vfs_getattr(&realpath, stat);
144 if (err)
145 return err;
146
147 stat->dev = dentry->d_sb->s_dev;
148 stat->ino = dentry->d_inode->i_ino;
149
150 /*
151 * It's probably not worth it to count subdirs to get the
152 * correct link count. nlink=1 seems to pacify 'find' and
153 * other utilities.
154 */
155 if (type == OVL_PATH_MERGE)
156 stat->nlink = 1;
157
158 return 0;
159}
160
161static int ovl_create_upper(struct dentry *dentry, struct inode *inode,
162 struct kstat *stat, const char *link,
163 struct dentry *hardlink)
164{
165 struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
166 struct inode *udir = upperdir->d_inode;
167 struct dentry *newdentry;
168 int err;
169
170 mutex_lock_nested(&udir->i_mutex, I_MUTEX_PARENT);
171 newdentry = lookup_one_len(dentry->d_name.name, upperdir,
172 dentry->d_name.len);
173 err = PTR_ERR(newdentry);
174 if (IS_ERR(newdentry))
175 goto out_unlock;
176 err = ovl_create_real(udir, newdentry, stat, link, hardlink, false);
177 if (err)
178 goto out_dput;
179
180 ovl_dentry_version_inc(dentry->d_parent);
181 ovl_dentry_update(dentry, newdentry);
182 ovl_copyattr(newdentry->d_inode, inode);
183 d_instantiate(dentry, inode);
184 newdentry = NULL;
185out_dput:
186 dput(newdentry);
187out_unlock:
188 mutex_unlock(&udir->i_mutex);
189 return err;
190}
191
192static int ovl_lock_rename_workdir(struct dentry *workdir,
193 struct dentry *upperdir)
194{
195 /* Workdir should not be the same as upperdir */
196 if (workdir == upperdir)
197 goto err;
198
199 /* Workdir should not be subdir of upperdir and vice versa */
200 if (lock_rename(workdir, upperdir) != NULL)
201 goto err_unlock;
202
203 return 0;
204
205err_unlock:
206 unlock_rename(workdir, upperdir);
207err:
208 pr_err("overlayfs: failed to lock workdir+upperdir\n");
209 return -EIO;
210}
211
212static struct dentry *ovl_clear_empty(struct dentry *dentry,
213 struct list_head *list)
214{
215 struct dentry *workdir = ovl_workdir(dentry);
216 struct inode *wdir = workdir->d_inode;
217 struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
218 struct inode *udir = upperdir->d_inode;
219 struct path upperpath;
220 struct dentry *upper;
221 struct dentry *opaquedir;
222 struct kstat stat;
223 int err;
224
225 err = ovl_lock_rename_workdir(workdir, upperdir);
226 if (err)
227 goto out;
228
229 ovl_path_upper(dentry, &upperpath);
230 err = vfs_getattr(&upperpath, &stat);
231 if (err)
232 goto out_unlock;
233
234 err = -ESTALE;
235 if (!S_ISDIR(stat.mode))
236 goto out_unlock;
237 upper = upperpath.dentry;
238 if (upper->d_parent->d_inode != udir)
239 goto out_unlock;
240
241 opaquedir = ovl_lookup_temp(workdir, dentry);
242 err = PTR_ERR(opaquedir);
243 if (IS_ERR(opaquedir))
244 goto out_unlock;
245
246 err = ovl_create_real(wdir, opaquedir, &stat, NULL, NULL, true);
247 if (err)
248 goto out_dput;
249
250 err = ovl_copy_xattr(upper, opaquedir);
251 if (err)
252 goto out_cleanup;
253
254 err = ovl_set_opaque(opaquedir);
255 if (err)
256 goto out_cleanup;
257
258 mutex_lock(&opaquedir->d_inode->i_mutex);
259 err = ovl_set_attr(opaquedir, &stat);
260 mutex_unlock(&opaquedir->d_inode->i_mutex);
261 if (err)
262 goto out_cleanup;
263
264 err = ovl_do_rename(wdir, opaquedir, udir, upper, RENAME_EXCHANGE);
265 if (err)
266 goto out_cleanup;
267
268 ovl_cleanup_whiteouts(upper, list);
269 ovl_cleanup(wdir, upper);
270 unlock_rename(workdir, upperdir);
271
272 /* dentry's upper doesn't match now, get rid of it */
273 d_drop(dentry);
274
275 return opaquedir;
276
277out_cleanup:
278 ovl_cleanup(wdir, opaquedir);
279out_dput:
280 dput(opaquedir);
281out_unlock:
282 unlock_rename(workdir, upperdir);
283out:
284 return ERR_PTR(err);
285}
286
287static struct dentry *ovl_check_empty_and_clear(struct dentry *dentry)
288{
289 int err;
290 struct dentry *ret = NULL;
291 LIST_HEAD(list);
292
293 err = ovl_check_empty_dir(dentry, &list);
294 if (err)
295 ret = ERR_PTR(err);
296 else {
297 /*
298 * If no upperdentry then skip clearing whiteouts.
299 *
300 * Can race with copy-up, since we don't hold the upperdir
301 * mutex. Doesn't matter, since copy-up can't create a
302 * non-empty directory from an empty one.
303 */
304 if (ovl_dentry_upper(dentry))
305 ret = ovl_clear_empty(dentry, &list);
306 }
307
308 ovl_cache_free(&list);
309
310 return ret;
311}
312
313static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
314 struct kstat *stat, const char *link,
315 struct dentry *hardlink)
316{
317 struct dentry *workdir = ovl_workdir(dentry);
318 struct inode *wdir = workdir->d_inode;
319 struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
320 struct inode *udir = upperdir->d_inode;
321 struct dentry *upper;
322 struct dentry *newdentry;
323 int err;
324
325 err = ovl_lock_rename_workdir(workdir, upperdir);
326 if (err)
327 goto out;
328
329 newdentry = ovl_lookup_temp(workdir, dentry);
330 err = PTR_ERR(newdentry);
331 if (IS_ERR(newdentry))
332 goto out_unlock;
333
334 upper = lookup_one_len(dentry->d_name.name, upperdir,
335 dentry->d_name.len);
336 err = PTR_ERR(upper);
337 if (IS_ERR(upper))
338 goto out_dput;
339
340 err = ovl_create_real(wdir, newdentry, stat, link, hardlink, true);
341 if (err)
342 goto out_dput2;
343
344 if (S_ISDIR(stat->mode)) {
345 err = ovl_set_opaque(newdentry);
346 if (err)
347 goto out_cleanup;
348
349 err = ovl_do_rename(wdir, newdentry, udir, upper,
350 RENAME_EXCHANGE);
351 if (err)
352 goto out_cleanup;
353
354 ovl_cleanup(wdir, upper);
355 } else {
356 err = ovl_do_rename(wdir, newdentry, udir, upper, 0);
357 if (err)
358 goto out_cleanup;
359 }
360 ovl_dentry_version_inc(dentry->d_parent);
361 ovl_dentry_update(dentry, newdentry);
362 ovl_copyattr(newdentry->d_inode, inode);
363 d_instantiate(dentry, inode);
364 newdentry = NULL;
365out_dput2:
366 dput(upper);
367out_dput:
368 dput(newdentry);
369out_unlock:
370 unlock_rename(workdir, upperdir);
371out:
372 return err;
373
374out_cleanup:
375 ovl_cleanup(wdir, newdentry);
376 goto out_dput2;
377}
378
379static int ovl_create_or_link(struct dentry *dentry, int mode, dev_t rdev,
380 const char *link, struct dentry *hardlink)
381{
382 int err;
383 struct inode *inode;
384 struct kstat stat = {
385 .mode = mode,
386 .rdev = rdev,
387 };
388
389 err = -ENOMEM;
390 inode = ovl_new_inode(dentry->d_sb, mode, dentry->d_fsdata);
391 if (!inode)
392 goto out;
393
394 err = ovl_copy_up(dentry->d_parent);
395 if (err)
396 goto out_iput;
397
398 if (!ovl_dentry_is_opaque(dentry)) {
399 err = ovl_create_upper(dentry, inode, &stat, link, hardlink);
400 } else {
401 const struct cred *old_cred;
402 struct cred *override_cred;
403
404 err = -ENOMEM;
405 override_cred = prepare_creds();
406 if (!override_cred)
407 goto out_iput;
408
409 /*
410 * CAP_SYS_ADMIN for setting opaque xattr
411 * CAP_DAC_OVERRIDE for create in workdir, rename
412 * CAP_FOWNER for removing whiteout from sticky dir
413 */
414 cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
415 cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
416 cap_raise(override_cred->cap_effective, CAP_FOWNER);
417 old_cred = override_creds(override_cred);
418
419 err = ovl_create_over_whiteout(dentry, inode, &stat, link,
420 hardlink);
421
422 revert_creds(old_cred);
423 put_cred(override_cred);
424 }
425
426 if (!err)
427 inode = NULL;
428out_iput:
429 iput(inode);
430out:
431 return err;
432}
433
434static int ovl_create_object(struct dentry *dentry, int mode, dev_t rdev,
435 const char *link)
436{
437 int err;
438
439 err = ovl_want_write(dentry);
440 if (!err) {
441 err = ovl_create_or_link(dentry, mode, rdev, link, NULL);
442 ovl_drop_write(dentry);
443 }
444
445 return err;
446}
447
448static int ovl_create(struct inode *dir, struct dentry *dentry, umode_t mode,
449 bool excl)
450{
451 return ovl_create_object(dentry, (mode & 07777) | S_IFREG, 0, NULL);
452}
453
454static int ovl_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
455{
456 return ovl_create_object(dentry, (mode & 07777) | S_IFDIR, 0, NULL);
457}
458
459static int ovl_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
460 dev_t rdev)
461{
462 /* Don't allow creation of "whiteout" on overlay */
463 if (S_ISCHR(mode) && rdev == WHITEOUT_DEV)
464 return -EPERM;
465
466 return ovl_create_object(dentry, mode, rdev, NULL);
467}
468
469static int ovl_symlink(struct inode *dir, struct dentry *dentry,
470 const char *link)
471{
472 return ovl_create_object(dentry, S_IFLNK, 0, link);
473}
474
475static int ovl_link(struct dentry *old, struct inode *newdir,
476 struct dentry *new)
477{
478 int err;
479 struct dentry *upper;
480
481 err = ovl_want_write(old);
482 if (err)
483 goto out;
484
485 err = ovl_copy_up(old);
486 if (err)
487 goto out_drop_write;
488
489 upper = ovl_dentry_upper(old);
490 err = ovl_create_or_link(new, upper->d_inode->i_mode, 0, NULL, upper);
491
492out_drop_write:
493 ovl_drop_write(old);
494out:
495 return err;
496}
497
498static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
499{
500 struct dentry *workdir = ovl_workdir(dentry);
501 struct inode *wdir = workdir->d_inode;
502 struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
503 struct inode *udir = upperdir->d_inode;
504 struct dentry *whiteout;
505 struct dentry *upper;
506 struct dentry *opaquedir = NULL;
507 int err;
508
509 if (is_dir) {
510 opaquedir = ovl_check_empty_and_clear(dentry);
511 err = PTR_ERR(opaquedir);
512 if (IS_ERR(opaquedir))
513 goto out;
514 }
515
516 err = ovl_lock_rename_workdir(workdir, upperdir);
517 if (err)
518 goto out_dput;
519
520 whiteout = ovl_whiteout(workdir, dentry);
521 err = PTR_ERR(whiteout);
522 if (IS_ERR(whiteout))
523 goto out_unlock;
524
525 upper = ovl_dentry_upper(dentry);
526 if (!upper) {
527 upper = lookup_one_len(dentry->d_name.name, upperdir,
528 dentry->d_name.len);
529 err = PTR_ERR(upper);
530 if (IS_ERR(upper))
531 goto kill_whiteout;
532
533 err = ovl_do_rename(wdir, whiteout, udir, upper, 0);
534 dput(upper);
535 if (err)
536 goto kill_whiteout;
537 } else {
538 int flags = 0;
539
540 if (opaquedir)
541 upper = opaquedir;
542 err = -ESTALE;
543 if (upper->d_parent != upperdir)
544 goto kill_whiteout;
545
546 if (is_dir)
547 flags |= RENAME_EXCHANGE;
548
549 err = ovl_do_rename(wdir, whiteout, udir, upper, flags);
550 if (err)
551 goto kill_whiteout;
552
553 if (is_dir)
554 ovl_cleanup(wdir, upper);
555 }
556 ovl_dentry_version_inc(dentry->d_parent);
557out_d_drop:
558 d_drop(dentry);
559 dput(whiteout);
560out_unlock:
561 unlock_rename(workdir, upperdir);
562out_dput:
563 dput(opaquedir);
564out:
565 return err;
566
567kill_whiteout:
568 ovl_cleanup(wdir, whiteout);
569 goto out_d_drop;
570}
571
572static int ovl_remove_upper(struct dentry *dentry, bool is_dir)
573{
574 struct dentry *upperdir = ovl_dentry_upper(dentry->d_parent);
575 struct inode *dir = upperdir->d_inode;
576 struct dentry *upper = ovl_dentry_upper(dentry);
577 int err;
578
579 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
580 err = -ESTALE;
581 if (upper->d_parent == upperdir) {
582 /* Don't let d_delete() think it can reset d_inode */
583 dget(upper);
584 if (is_dir)
585 err = vfs_rmdir(dir, upper);
586 else
587 err = vfs_unlink(dir, upper, NULL);
588 dput(upper);
589 ovl_dentry_version_inc(dentry->d_parent);
590 }
591
592 /*
593 * Keeping this dentry hashed would mean having to release
594 * upperpath/lowerpath, which could only be done if we are the
595 * sole user of this dentry. Too tricky... Just unhash for
596 * now.
597 */
598 d_drop(dentry);
599 mutex_unlock(&dir->i_mutex);
600
601 return err;
602}
603
604static inline int ovl_check_sticky(struct dentry *dentry)
605{
606 struct inode *dir = ovl_dentry_real(dentry->d_parent)->d_inode;
607 struct inode *inode = ovl_dentry_real(dentry)->d_inode;
608
609 if (check_sticky(dir, inode))
610 return -EPERM;
611
612 return 0;
613}
614
615static int ovl_do_remove(struct dentry *dentry, bool is_dir)
616{
617 enum ovl_path_type type;
618 int err;
619
620 err = ovl_check_sticky(dentry);
621 if (err)
622 goto out;
623
624 err = ovl_want_write(dentry);
625 if (err)
626 goto out;
627
628 err = ovl_copy_up(dentry->d_parent);
629 if (err)
630 goto out_drop_write;
631
632 type = ovl_path_type(dentry);
633 if (type == OVL_PATH_PURE_UPPER) {
634 err = ovl_remove_upper(dentry, is_dir);
635 } else {
636 const struct cred *old_cred;
637 struct cred *override_cred;
638
639 err = -ENOMEM;
640 override_cred = prepare_creds();
641 if (!override_cred)
642 goto out_drop_write;
643
644 /*
645 * CAP_SYS_ADMIN for setting xattr on whiteout, opaque dir
646 * CAP_DAC_OVERRIDE for create in workdir, rename
647 * CAP_FOWNER for removing whiteout from sticky dir
648 * CAP_FSETID for chmod of opaque dir
649 * CAP_CHOWN for chown of opaque dir
650 */
651 cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
652 cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
653 cap_raise(override_cred->cap_effective, CAP_FOWNER);
654 cap_raise(override_cred->cap_effective, CAP_FSETID);
655 cap_raise(override_cred->cap_effective, CAP_CHOWN);
656 old_cred = override_creds(override_cred);
657
658 err = ovl_remove_and_whiteout(dentry, is_dir);
659
660 revert_creds(old_cred);
661 put_cred(override_cred);
662 }
663out_drop_write:
664 ovl_drop_write(dentry);
665out:
666 return err;
667}
668
669static int ovl_unlink(struct inode *dir, struct dentry *dentry)
670{
671 return ovl_do_remove(dentry, false);
672}
673
674static int ovl_rmdir(struct inode *dir, struct dentry *dentry)
675{
676 return ovl_do_remove(dentry, true);
677}
678
679static int ovl_rename2(struct inode *olddir, struct dentry *old,
680 struct inode *newdir, struct dentry *new,
681 unsigned int flags)
682{
683 int err;
684 enum ovl_path_type old_type;
685 enum ovl_path_type new_type;
686 struct dentry *old_upperdir;
687 struct dentry *new_upperdir;
688 struct dentry *olddentry;
689 struct dentry *newdentry;
690 struct dentry *trap;
691 bool old_opaque;
692 bool new_opaque;
693 bool new_create = false;
694 bool cleanup_whiteout = false;
695 bool overwrite = !(flags & RENAME_EXCHANGE);
696 bool is_dir = S_ISDIR(old->d_inode->i_mode);
697 bool new_is_dir = false;
698 struct dentry *opaquedir = NULL;
699 const struct cred *old_cred = NULL;
700 struct cred *override_cred = NULL;
701
702 err = -EINVAL;
703 if (flags & ~(RENAME_EXCHANGE | RENAME_NOREPLACE))
704 goto out;
705
706 flags &= ~RENAME_NOREPLACE;
707
708 err = ovl_check_sticky(old);
709 if (err)
710 goto out;
711
712 /* Don't copy up directory trees */
713 old_type = ovl_path_type(old);
714 err = -EXDEV;
715 if ((old_type == OVL_PATH_LOWER || old_type == OVL_PATH_MERGE) && is_dir)
716 goto out;
717
718 if (new->d_inode) {
719 err = ovl_check_sticky(new);
720 if (err)
721 goto out;
722
723 if (S_ISDIR(new->d_inode->i_mode))
724 new_is_dir = true;
725
726 new_type = ovl_path_type(new);
727 err = -EXDEV;
728 if (!overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir)
729 goto out;
730
731 err = 0;
732 if (new_type == OVL_PATH_LOWER && old_type == OVL_PATH_LOWER) {
733 if (ovl_dentry_lower(old)->d_inode ==
734 ovl_dentry_lower(new)->d_inode)
735 goto out;
736 }
737 if (new_type != OVL_PATH_LOWER && old_type != OVL_PATH_LOWER) {
738 if (ovl_dentry_upper(old)->d_inode ==
739 ovl_dentry_upper(new)->d_inode)
740 goto out;
741 }
742 } else {
743 if (ovl_dentry_is_opaque(new))
744 new_type = OVL_PATH_UPPER;
745 else
746 new_type = OVL_PATH_PURE_UPPER;
747 }
748
749 err = ovl_want_write(old);
750 if (err)
751 goto out;
752
753 err = ovl_copy_up(old);
754 if (err)
755 goto out_drop_write;
756
757 err = ovl_copy_up(new->d_parent);
758 if (err)
759 goto out_drop_write;
760 if (!overwrite) {
761 err = ovl_copy_up(new);
762 if (err)
763 goto out_drop_write;
764 }
765
766 old_opaque = old_type != OVL_PATH_PURE_UPPER;
767 new_opaque = new_type != OVL_PATH_PURE_UPPER;
768
769 if (old_opaque || new_opaque) {
770 err = -ENOMEM;
771 override_cred = prepare_creds();
772 if (!override_cred)
773 goto out_drop_write;
774
775 /*
776 * CAP_SYS_ADMIN for setting xattr on whiteout, opaque dir
777 * CAP_DAC_OVERRIDE for create in workdir
778 * CAP_FOWNER for removing whiteout from sticky dir
779 * CAP_FSETID for chmod of opaque dir
780 * CAP_CHOWN for chown of opaque dir
781 */
782 cap_raise(override_cred->cap_effective, CAP_SYS_ADMIN);
783 cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
784 cap_raise(override_cred->cap_effective, CAP_FOWNER);
785 cap_raise(override_cred->cap_effective, CAP_FSETID);
786 cap_raise(override_cred->cap_effective, CAP_CHOWN);
787 old_cred = override_creds(override_cred);
788 }
789
790 if (overwrite && (new_type == OVL_PATH_LOWER || new_type == OVL_PATH_MERGE) && new_is_dir) {
791 opaquedir = ovl_check_empty_and_clear(new);
792 err = PTR_ERR(opaquedir);
793 if (IS_ERR(opaquedir)) {
794 opaquedir = NULL;
795 goto out_revert_creds;
796 }
797 }
798
799 if (overwrite) {
800 if (old_opaque) {
801 if (new->d_inode || !new_opaque) {
802 /* Whiteout source */
803 flags |= RENAME_WHITEOUT;
804 } else {
805 /* Switch whiteouts */
806 flags |= RENAME_EXCHANGE;
807 }
808 } else if (is_dir && !new->d_inode && new_opaque) {
809 flags |= RENAME_EXCHANGE;
810 cleanup_whiteout = true;
811 }
812 }
813
814 old_upperdir = ovl_dentry_upper(old->d_parent);
815 new_upperdir = ovl_dentry_upper(new->d_parent);
816
817 trap = lock_rename(new_upperdir, old_upperdir);
818
819 olddentry = ovl_dentry_upper(old);
820 newdentry = ovl_dentry_upper(new);
821 if (newdentry) {
822 if (opaquedir) {
823 newdentry = opaquedir;
824 opaquedir = NULL;
825 } else {
826 dget(newdentry);
827 }
828 } else {
829 new_create = true;
830 newdentry = lookup_one_len(new->d_name.name, new_upperdir,
831 new->d_name.len);
832 err = PTR_ERR(newdentry);
833 if (IS_ERR(newdentry))
834 goto out_unlock;
835 }
836
837 err = -ESTALE;
838 if (olddentry->d_parent != old_upperdir)
839 goto out_dput;
840 if (newdentry->d_parent != new_upperdir)
841 goto out_dput;
842 if (olddentry == trap)
843 goto out_dput;
844 if (newdentry == trap)
845 goto out_dput;
846
847 if (is_dir && !old_opaque && new_opaque) {
848 err = ovl_set_opaque(olddentry);
849 if (err)
850 goto out_dput;
851 }
852 if (!overwrite && new_is_dir && old_opaque && !new_opaque) {
853 err = ovl_set_opaque(newdentry);
854 if (err)
855 goto out_dput;
856 }
857
858 if (old_opaque || new_opaque) {
859 err = ovl_do_rename(old_upperdir->d_inode, olddentry,
860 new_upperdir->d_inode, newdentry,
861 flags);
862 } else {
863 /* No debug for the plain case */
864 BUG_ON(flags & ~RENAME_EXCHANGE);
865 err = vfs_rename(old_upperdir->d_inode, olddentry,
866 new_upperdir->d_inode, newdentry,
867 NULL, flags);
868 }
869
870 if (err) {
871 if (is_dir && !old_opaque && new_opaque)
872 ovl_remove_opaque(olddentry);
873 if (!overwrite && new_is_dir && old_opaque && !new_opaque)
874 ovl_remove_opaque(newdentry);
875 goto out_dput;
876 }
877
878 if (is_dir && old_opaque && !new_opaque)
879 ovl_remove_opaque(olddentry);
880 if (!overwrite && new_is_dir && !old_opaque && new_opaque)
881 ovl_remove_opaque(newdentry);
882
883 if (old_opaque != new_opaque) {
884 ovl_dentry_set_opaque(old, new_opaque);
885 if (!overwrite)
886 ovl_dentry_set_opaque(new, old_opaque);
887 }
888
889 if (cleanup_whiteout)
890 ovl_cleanup(old_upperdir->d_inode, newdentry);
891
892 ovl_dentry_version_inc(old->d_parent);
893 ovl_dentry_version_inc(new->d_parent);
894
895out_dput:
896 dput(newdentry);
897out_unlock:
898 unlock_rename(new_upperdir, old_upperdir);
899out_revert_creds:
900 if (old_opaque || new_opaque) {
901 revert_creds(old_cred);
902 put_cred(override_cred);
903 }
904out_drop_write:
905 ovl_drop_write(old);
906out:
907 dput(opaquedir);
908 return err;
909}
910
911const struct inode_operations ovl_dir_inode_operations = {
912 .lookup = ovl_lookup,
913 .mkdir = ovl_mkdir,
914 .symlink = ovl_symlink,
915 .unlink = ovl_unlink,
916 .rmdir = ovl_rmdir,
917 .rename2 = ovl_rename2,
918 .link = ovl_link,
919 .setattr = ovl_setattr,
920 .create = ovl_create,
921 .mknod = ovl_mknod,
922 .permission = ovl_permission,
923 .getattr = ovl_dir_getattr,
924 .setxattr = ovl_setxattr,
925 .getxattr = ovl_getxattr,
926 .listxattr = ovl_listxattr,
927 .removexattr = ovl_removexattr,
928};
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
new file mode 100644
index 000000000000..07d74b24913b
--- /dev/null
+++ b/fs/overlayfs/inode.c
@@ -0,0 +1,434 @@
1/*
2 *
3 * Copyright (C) 2011 Novell Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/fs.h>
11#include <linux/slab.h>
12#include <linux/xattr.h>
13#include "overlayfs.h"
14
15static int ovl_copy_up_last(struct dentry *dentry, struct iattr *attr,
16 bool no_data)
17{
18 int err;
19 struct dentry *parent;
20 struct kstat stat;
21 struct path lowerpath;
22
23 parent = dget_parent(dentry);
24 err = ovl_copy_up(parent);
25 if (err)
26 goto out_dput_parent;
27
28 ovl_path_lower(dentry, &lowerpath);
29 err = vfs_getattr(&lowerpath, &stat);
30 if (err)
31 goto out_dput_parent;
32
33 if (no_data)
34 stat.size = 0;
35
36 err = ovl_copy_up_one(parent, dentry, &lowerpath, &stat, attr);
37
38out_dput_parent:
39 dput(parent);
40 return err;
41}
42
43int ovl_setattr(struct dentry *dentry, struct iattr *attr)
44{
45 int err;
46 struct dentry *upperdentry;
47
48 err = ovl_want_write(dentry);
49 if (err)
50 goto out;
51
52 upperdentry = ovl_dentry_upper(dentry);
53 if (upperdentry) {
54 mutex_lock(&upperdentry->d_inode->i_mutex);
55 err = notify_change(upperdentry, attr, NULL);
56 mutex_unlock(&upperdentry->d_inode->i_mutex);
57 } else {
58 err = ovl_copy_up_last(dentry, attr, false);
59 }
60 ovl_drop_write(dentry);
61out:
62 return err;
63}
64
65static int ovl_getattr(struct vfsmount *mnt, struct dentry *dentry,
66 struct kstat *stat)
67{
68 struct path realpath;
69
70 ovl_path_real(dentry, &realpath);
71 return vfs_getattr(&realpath, stat);
72}
73
74int ovl_permission(struct inode *inode, int mask)
75{
76 struct ovl_entry *oe;
77 struct dentry *alias = NULL;
78 struct inode *realinode;
79 struct dentry *realdentry;
80 bool is_upper;
81 int err;
82
83 if (S_ISDIR(inode->i_mode)) {
84 oe = inode->i_private;
85 } else if (mask & MAY_NOT_BLOCK) {
86 return -ECHILD;
87 } else {
88 /*
89 * For non-directories find an alias and get the info
90 * from there.
91 */
92 alias = d_find_any_alias(inode);
93 if (WARN_ON(!alias))
94 return -ENOENT;
95
96 oe = alias->d_fsdata;
97 }
98
99 realdentry = ovl_entry_real(oe, &is_upper);
100
101 /* Careful in RCU walk mode */
102 realinode = ACCESS_ONCE(realdentry->d_inode);
103 if (!realinode) {
104 WARN_ON(!(mask & MAY_NOT_BLOCK));
105 err = -ENOENT;
106 goto out_dput;
107 }
108
109 if (mask & MAY_WRITE) {
110 umode_t mode = realinode->i_mode;
111
112 /*
113 * Writes will always be redirected to upper layer, so
114 * ignore lower layer being read-only.
115 *
116 * If the overlay itself is read-only then proceed
117 * with the permission check, don't return EROFS.
118 * This will only happen if this is the lower layer of
119 * another overlayfs.
120 *
121 * If upper fs becomes read-only after the overlay was
122 * constructed return EROFS to prevent modification of
123 * upper layer.
124 */
125 err = -EROFS;
126 if (is_upper && !IS_RDONLY(inode) && IS_RDONLY(realinode) &&
127 (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)))
128 goto out_dput;
129 }
130
131 err = __inode_permission(realinode, mask);
132out_dput:
133 dput(alias);
134 return err;
135}
136
137
138struct ovl_link_data {
139 struct dentry *realdentry;
140 void *cookie;
141};
142
143static void *ovl_follow_link(struct dentry *dentry, struct nameidata *nd)
144{
145 void *ret;
146 struct dentry *realdentry;
147 struct inode *realinode;
148
149 realdentry = ovl_dentry_real(dentry);
150 realinode = realdentry->d_inode;
151
152 if (WARN_ON(!realinode->i_op->follow_link))
153 return ERR_PTR(-EPERM);
154
155 ret = realinode->i_op->follow_link(realdentry, nd);
156 if (IS_ERR(ret))
157 return ret;
158
159 if (realinode->i_op->put_link) {
160 struct ovl_link_data *data;
161
162 data = kmalloc(sizeof(struct ovl_link_data), GFP_KERNEL);
163 if (!data) {
164 realinode->i_op->put_link(realdentry, nd, ret);
165 return ERR_PTR(-ENOMEM);
166 }
167 data->realdentry = realdentry;
168 data->cookie = ret;
169
170 return data;
171 } else {
172 return NULL;
173 }
174}
175
176static void ovl_put_link(struct dentry *dentry, struct nameidata *nd, void *c)
177{
178 struct inode *realinode;
179 struct ovl_link_data *data = c;
180
181 if (!data)
182 return;
183
184 realinode = data->realdentry->d_inode;
185 realinode->i_op->put_link(data->realdentry, nd, data->cookie);
186 kfree(data);
187}
188
189static int ovl_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
190{
191 struct path realpath;
192 struct inode *realinode;
193
194 ovl_path_real(dentry, &realpath);
195 realinode = realpath.dentry->d_inode;
196
197 if (!realinode->i_op->readlink)
198 return -EINVAL;
199
200 touch_atime(&realpath);
201
202 return realinode->i_op->readlink(realpath.dentry, buf, bufsiz);
203}
204
205
206static bool ovl_is_private_xattr(const char *name)
207{
208 return strncmp(name, "trusted.overlay.", 14) == 0;
209}
210
211int ovl_setxattr(struct dentry *dentry, const char *name,
212 const void *value, size_t size, int flags)
213{
214 int err;
215 struct dentry *upperdentry;
216
217 err = ovl_want_write(dentry);
218 if (err)
219 goto out;
220
221 err = -EPERM;
222 if (ovl_is_private_xattr(name))
223 goto out_drop_write;
224
225 err = ovl_copy_up(dentry);
226 if (err)
227 goto out_drop_write;
228
229 upperdentry = ovl_dentry_upper(dentry);
230 err = vfs_setxattr(upperdentry, name, value, size, flags);
231
232out_drop_write:
233 ovl_drop_write(dentry);
234out:
235 return err;
236}
237
238static bool ovl_need_xattr_filter(struct dentry *dentry,
239 enum ovl_path_type type)
240{
241 return type == OVL_PATH_UPPER && S_ISDIR(dentry->d_inode->i_mode);
242}
243
244ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
245 void *value, size_t size)
246{
247 struct path realpath;
248 enum ovl_path_type type = ovl_path_real(dentry, &realpath);
249
250 if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
251 return -ENODATA;
252
253 return vfs_getxattr(realpath.dentry, name, value, size);
254}
255
256ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size)
257{
258 struct path realpath;
259 enum ovl_path_type type = ovl_path_real(dentry, &realpath);
260 ssize_t res;
261 int off;
262
263 res = vfs_listxattr(realpath.dentry, list, size);
264 if (res <= 0 || size == 0)
265 return res;
266
267 if (!ovl_need_xattr_filter(dentry, type))
268 return res;
269
270 /* filter out private xattrs */
271 for (off = 0; off < res;) {
272 char *s = list + off;
273 size_t slen = strlen(s) + 1;
274
275 BUG_ON(off + slen > res);
276
277 if (ovl_is_private_xattr(s)) {
278 res -= slen;
279 memmove(s, s + slen, res - off);
280 } else {
281 off += slen;
282 }
283 }
284
285 return res;
286}
287
288int ovl_removexattr(struct dentry *dentry, const char *name)
289{
290 int err;
291 struct path realpath;
292 enum ovl_path_type type = ovl_path_real(dentry, &realpath);
293
294 err = ovl_want_write(dentry);
295 if (err)
296 goto out;
297
298 err = -ENODATA;
299 if (ovl_need_xattr_filter(dentry, type) && ovl_is_private_xattr(name))
300 goto out_drop_write;
301
302 if (type == OVL_PATH_LOWER) {
303 err = vfs_getxattr(realpath.dentry, name, NULL, 0);
304 if (err < 0)
305 goto out_drop_write;
306
307 err = ovl_copy_up(dentry);
308 if (err)
309 goto out_drop_write;
310
311 ovl_path_upper(dentry, &realpath);
312 }
313
314 err = vfs_removexattr(realpath.dentry, name);
315out_drop_write:
316 ovl_drop_write(dentry);
317out:
318 return err;
319}
320
321static bool ovl_open_need_copy_up(int flags, enum ovl_path_type type,
322 struct dentry *realdentry)
323{
324 if (type != OVL_PATH_LOWER)
325 return false;
326
327 if (special_file(realdentry->d_inode->i_mode))
328 return false;
329
330 if (!(OPEN_FMODE(flags) & FMODE_WRITE) && !(flags & O_TRUNC))
331 return false;
332
333 return true;
334}
335
336static int ovl_dentry_open(struct dentry *dentry, struct file *file,
337 const struct cred *cred)
338{
339 int err;
340 struct path realpath;
341 enum ovl_path_type type;
342 bool want_write = false;
343
344 type = ovl_path_real(dentry, &realpath);
345 if (ovl_open_need_copy_up(file->f_flags, type, realpath.dentry)) {
346 want_write = true;
347 err = ovl_want_write(dentry);
348 if (err)
349 goto out;
350
351 if (file->f_flags & O_TRUNC)
352 err = ovl_copy_up_last(dentry, NULL, true);
353 else
354 err = ovl_copy_up(dentry);
355 if (err)
356 goto out_drop_write;
357
358 ovl_path_upper(dentry, &realpath);
359 }
360
361 err = vfs_open(&realpath, file, cred);
362out_drop_write:
363 if (want_write)
364 ovl_drop_write(dentry);
365out:
366 return err;
367}
368
369static const struct inode_operations ovl_file_inode_operations = {
370 .setattr = ovl_setattr,
371 .permission = ovl_permission,
372 .getattr = ovl_getattr,
373 .setxattr = ovl_setxattr,
374 .getxattr = ovl_getxattr,
375 .listxattr = ovl_listxattr,
376 .removexattr = ovl_removexattr,
377 .dentry_open = ovl_dentry_open,
378};
379
380static const struct inode_operations ovl_symlink_inode_operations = {
381 .setattr = ovl_setattr,
382 .follow_link = ovl_follow_link,
383 .put_link = ovl_put_link,
384 .readlink = ovl_readlink,
385 .getattr = ovl_getattr,
386 .setxattr = ovl_setxattr,
387 .getxattr = ovl_getxattr,
388 .listxattr = ovl_listxattr,
389 .removexattr = ovl_removexattr,
390};
391
392struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
393 struct ovl_entry *oe)
394{
395 struct inode *inode;
396
397 inode = new_inode(sb);
398 if (!inode)
399 return NULL;
400
401 mode &= S_IFMT;
402
403 inode->i_ino = get_next_ino();
404 inode->i_mode = mode;
405 inode->i_flags |= S_NOATIME | S_NOCMTIME;
406
407 switch (mode) {
408 case S_IFDIR:
409 inode->i_private = oe;
410 inode->i_op = &ovl_dir_inode_operations;
411 inode->i_fop = &ovl_dir_operations;
412 break;
413
414 case S_IFLNK:
415 inode->i_op = &ovl_symlink_inode_operations;
416 break;
417
418 case S_IFREG:
419 case S_IFSOCK:
420 case S_IFBLK:
421 case S_IFCHR:
422 case S_IFIFO:
423 inode->i_op = &ovl_file_inode_operations;
424 break;
425
426 default:
427 WARN(1, "illegal file type: %i\n", mode);
428 iput(inode);
429 inode = NULL;
430 }
431
432 return inode;
433
434}
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
new file mode 100644
index 000000000000..814bed33dd07
--- /dev/null
+++ b/fs/overlayfs/overlayfs.h
@@ -0,0 +1,191 @@
1/*
2 *
3 * Copyright (C) 2011 Novell Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/kernel.h>
11
12struct ovl_entry;
13
14enum ovl_path_type {
15 OVL_PATH_PURE_UPPER,
16 OVL_PATH_UPPER,
17 OVL_PATH_MERGE,
18 OVL_PATH_LOWER,
19};
20
21extern const char *ovl_opaque_xattr;
22
23static inline int ovl_do_rmdir(struct inode *dir, struct dentry *dentry)
24{
25 int err = vfs_rmdir(dir, dentry);
26 pr_debug("rmdir(%pd2) = %i\n", dentry, err);
27 return err;
28}
29
30static inline int ovl_do_unlink(struct inode *dir, struct dentry *dentry)
31{
32 int err = vfs_unlink(dir, dentry, NULL);
33 pr_debug("unlink(%pd2) = %i\n", dentry, err);
34 return err;
35}
36
37static inline int ovl_do_link(struct dentry *old_dentry, struct inode *dir,
38 struct dentry *new_dentry, bool debug)
39{
40 int err = vfs_link(old_dentry, dir, new_dentry, NULL);
41 if (debug) {
42 pr_debug("link(%pd2, %pd2) = %i\n",
43 old_dentry, new_dentry, err);
44 }
45 return err;
46}
47
48static inline int ovl_do_create(struct inode *dir, struct dentry *dentry,
49 umode_t mode, bool debug)
50{
51 int err = vfs_create(dir, dentry, mode, true);
52 if (debug)
53 pr_debug("create(%pd2, 0%o) = %i\n", dentry, mode, err);
54 return err;
55}
56
57static inline int ovl_do_mkdir(struct inode *dir, struct dentry *dentry,
58 umode_t mode, bool debug)
59{
60 int err = vfs_mkdir(dir, dentry, mode);
61 if (debug)
62 pr_debug("mkdir(%pd2, 0%o) = %i\n", dentry, mode, err);
63 return err;
64}
65
66static inline int ovl_do_mknod(struct inode *dir, struct dentry *dentry,
67 umode_t mode, dev_t dev, bool debug)
68{
69 int err = vfs_mknod(dir, dentry, mode, dev);
70 if (debug) {
71 pr_debug("mknod(%pd2, 0%o, 0%o) = %i\n",
72 dentry, mode, dev, err);
73 }
74 return err;
75}
76
77static inline int ovl_do_symlink(struct inode *dir, struct dentry *dentry,
78 const char *oldname, bool debug)
79{
80 int err = vfs_symlink(dir, dentry, oldname);
81 if (debug)
82 pr_debug("symlink(\"%s\", %pd2) = %i\n", oldname, dentry, err);
83 return err;
84}
85
86static inline int ovl_do_setxattr(struct dentry *dentry, const char *name,
87 const void *value, size_t size, int flags)
88{
89 int err = vfs_setxattr(dentry, name, value, size, flags);
90 pr_debug("setxattr(%pd2, \"%s\", \"%*s\", 0x%x) = %i\n",
91 dentry, name, (int) size, (char *) value, flags, err);
92 return err;
93}
94
95static inline int ovl_do_removexattr(struct dentry *dentry, const char *name)
96{
97 int err = vfs_removexattr(dentry, name);
98 pr_debug("removexattr(%pd2, \"%s\") = %i\n", dentry, name, err);
99 return err;
100}
101
102static inline int ovl_do_rename(struct inode *olddir, struct dentry *olddentry,
103 struct inode *newdir, struct dentry *newdentry,
104 unsigned int flags)
105{
106 int err;
107
108 pr_debug("rename2(%pd2, %pd2, 0x%x)\n",
109 olddentry, newdentry, flags);
110
111 err = vfs_rename(olddir, olddentry, newdir, newdentry, NULL, flags);
112
113 if (err) {
114 pr_debug("...rename2(%pd2, %pd2, ...) = %i\n",
115 olddentry, newdentry, err);
116 }
117 return err;
118}
119
120static inline int ovl_do_whiteout(struct inode *dir, struct dentry *dentry)
121{
122 int err = vfs_whiteout(dir, dentry);
123 pr_debug("whiteout(%pd2) = %i\n", dentry, err);
124 return err;
125}
126
127enum ovl_path_type ovl_path_type(struct dentry *dentry);
128u64 ovl_dentry_version_get(struct dentry *dentry);
129void ovl_dentry_version_inc(struct dentry *dentry);
130void ovl_path_upper(struct dentry *dentry, struct path *path);
131void ovl_path_lower(struct dentry *dentry, struct path *path);
132enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path);
133struct dentry *ovl_dentry_upper(struct dentry *dentry);
134struct dentry *ovl_dentry_lower(struct dentry *dentry);
135struct dentry *ovl_dentry_real(struct dentry *dentry);
136struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper);
137struct ovl_dir_cache *ovl_dir_cache(struct dentry *dentry);
138void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache);
139struct dentry *ovl_workdir(struct dentry *dentry);
140int ovl_want_write(struct dentry *dentry);
141void ovl_drop_write(struct dentry *dentry);
142bool ovl_dentry_is_opaque(struct dentry *dentry);
143void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque);
144bool ovl_is_whiteout(struct dentry *dentry);
145void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry);
146struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
147 unsigned int flags);
148struct file *ovl_path_open(struct path *path, int flags);
149
150struct dentry *ovl_upper_create(struct dentry *upperdir, struct dentry *dentry,
151 struct kstat *stat, const char *link);
152
153/* readdir.c */
154extern const struct file_operations ovl_dir_operations;
155int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list);
156void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list);
157void ovl_cache_free(struct list_head *list);
158
159/* inode.c */
160int ovl_setattr(struct dentry *dentry, struct iattr *attr);
161int ovl_permission(struct inode *inode, int mask);
162int ovl_setxattr(struct dentry *dentry, const char *name,
163 const void *value, size_t size, int flags);
164ssize_t ovl_getxattr(struct dentry *dentry, const char *name,
165 void *value, size_t size);
166ssize_t ovl_listxattr(struct dentry *dentry, char *list, size_t size);
167int ovl_removexattr(struct dentry *dentry, const char *name);
168
169struct inode *ovl_new_inode(struct super_block *sb, umode_t mode,
170 struct ovl_entry *oe);
171static inline void ovl_copyattr(struct inode *from, struct inode *to)
172{
173 to->i_uid = from->i_uid;
174 to->i_gid = from->i_gid;
175}
176
177/* dir.c */
178extern const struct inode_operations ovl_dir_inode_operations;
179struct dentry *ovl_lookup_temp(struct dentry *workdir, struct dentry *dentry);
180int ovl_create_real(struct inode *dir, struct dentry *newdentry,
181 struct kstat *stat, const char *link,
182 struct dentry *hardlink, bool debug);
183void ovl_cleanup(struct inode *dir, struct dentry *dentry);
184
185/* copy_up.c */
186int ovl_copy_up(struct dentry *dentry);
187int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
188 struct path *lowerpath, struct kstat *stat,
189 struct iattr *attr);
190int ovl_copy_xattr(struct dentry *old, struct dentry *new);
191int ovl_set_attr(struct dentry *upper, struct kstat *stat);
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
new file mode 100644
index 000000000000..ab1e3dcbed95
--- /dev/null
+++ b/fs/overlayfs/readdir.c
@@ -0,0 +1,586 @@
1/*
2 *
3 * Copyright (C) 2011 Novell Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/fs.h>
11#include <linux/slab.h>
12#include <linux/namei.h>
13#include <linux/file.h>
14#include <linux/xattr.h>
15#include <linux/rbtree.h>
16#include <linux/security.h>
17#include <linux/cred.h>
18#include "overlayfs.h"
19
20struct ovl_cache_entry {
21 unsigned int len;
22 unsigned int type;
23 u64 ino;
24 struct list_head l_node;
25 struct rb_node node;
26 bool is_whiteout;
27 bool is_cursor;
28 char name[];
29};
30
31struct ovl_dir_cache {
32 long refcount;
33 u64 version;
34 struct list_head entries;
35};
36
37struct ovl_readdir_data {
38 struct dir_context ctx;
39 bool is_merge;
40 struct rb_root root;
41 struct list_head *list;
42 struct list_head middle;
43 int count;
44 int err;
45};
46
47struct ovl_dir_file {
48 bool is_real;
49 bool is_upper;
50 struct ovl_dir_cache *cache;
51 struct ovl_cache_entry cursor;
52 struct file *realfile;
53 struct file *upperfile;
54};
55
56static struct ovl_cache_entry *ovl_cache_entry_from_node(struct rb_node *n)
57{
58 return container_of(n, struct ovl_cache_entry, node);
59}
60
61static struct ovl_cache_entry *ovl_cache_entry_find(struct rb_root *root,
62 const char *name, int len)
63{
64 struct rb_node *node = root->rb_node;
65 int cmp;
66
67 while (node) {
68 struct ovl_cache_entry *p = ovl_cache_entry_from_node(node);
69
70 cmp = strncmp(name, p->name, len);
71 if (cmp > 0)
72 node = p->node.rb_right;
73 else if (cmp < 0 || len < p->len)
74 node = p->node.rb_left;
75 else
76 return p;
77 }
78
79 return NULL;
80}
81
82static struct ovl_cache_entry *ovl_cache_entry_new(const char *name, int len,
83 u64 ino, unsigned int d_type)
84{
85 struct ovl_cache_entry *p;
86 size_t size = offsetof(struct ovl_cache_entry, name[len + 1]);
87
88 p = kmalloc(size, GFP_KERNEL);
89 if (p) {
90 memcpy(p->name, name, len);
91 p->name[len] = '\0';
92 p->len = len;
93 p->type = d_type;
94 p->ino = ino;
95 p->is_whiteout = false;
96 p->is_cursor = false;
97 }
98
99 return p;
100}
101
102static int ovl_cache_entry_add_rb(struct ovl_readdir_data *rdd,
103 const char *name, int len, u64 ino,
104 unsigned int d_type)
105{
106 struct rb_node **newp = &rdd->root.rb_node;
107 struct rb_node *parent = NULL;
108 struct ovl_cache_entry *p;
109
110 while (*newp) {
111 int cmp;
112 struct ovl_cache_entry *tmp;
113
114 parent = *newp;
115 tmp = ovl_cache_entry_from_node(*newp);
116 cmp = strncmp(name, tmp->name, len);
117 if (cmp > 0)
118 newp = &tmp->node.rb_right;
119 else if (cmp < 0 || len < tmp->len)
120 newp = &tmp->node.rb_left;
121 else
122 return 0;
123 }
124
125 p = ovl_cache_entry_new(name, len, ino, d_type);
126 if (p == NULL)
127 return -ENOMEM;
128
129 list_add_tail(&p->l_node, rdd->list);
130 rb_link_node(&p->node, parent, newp);
131 rb_insert_color(&p->node, &rdd->root);
132
133 return 0;
134}
135
136static int ovl_fill_lower(struct ovl_readdir_data *rdd,
137 const char *name, int namelen,
138 loff_t offset, u64 ino, unsigned int d_type)
139{
140 struct ovl_cache_entry *p;
141
142 p = ovl_cache_entry_find(&rdd->root, name, namelen);
143 if (p) {
144 list_move_tail(&p->l_node, &rdd->middle);
145 } else {
146 p = ovl_cache_entry_new(name, namelen, ino, d_type);
147 if (p == NULL)
148 rdd->err = -ENOMEM;
149 else
150 list_add_tail(&p->l_node, &rdd->middle);
151 }
152
153 return rdd->err;
154}
155
156void ovl_cache_free(struct list_head *list)
157{
158 struct ovl_cache_entry *p;
159 struct ovl_cache_entry *n;
160
161 list_for_each_entry_safe(p, n, list, l_node)
162 kfree(p);
163
164 INIT_LIST_HEAD(list);
165}
166
167static void ovl_cache_put(struct ovl_dir_file *od, struct dentry *dentry)
168{
169 struct ovl_dir_cache *cache = od->cache;
170
171 list_del_init(&od->cursor.l_node);
172 WARN_ON(cache->refcount <= 0);
173 cache->refcount--;
174 if (!cache->refcount) {
175 if (ovl_dir_cache(dentry) == cache)
176 ovl_set_dir_cache(dentry, NULL);
177
178 ovl_cache_free(&cache->entries);
179 kfree(cache);
180 }
181}
182
183static int ovl_fill_merge(void *buf, const char *name, int namelen,
184 loff_t offset, u64 ino, unsigned int d_type)
185{
186 struct ovl_readdir_data *rdd = buf;
187
188 rdd->count++;
189 if (!rdd->is_merge)
190 return ovl_cache_entry_add_rb(rdd, name, namelen, ino, d_type);
191 else
192 return ovl_fill_lower(rdd, name, namelen, offset, ino, d_type);
193}
194
195static inline int ovl_dir_read(struct path *realpath,
196 struct ovl_readdir_data *rdd)
197{
198 struct file *realfile;
199 int err;
200
201 realfile = ovl_path_open(realpath, O_RDONLY | O_DIRECTORY);
202 if (IS_ERR(realfile))
203 return PTR_ERR(realfile);
204
205 rdd->ctx.pos = 0;
206 do {
207 rdd->count = 0;
208 rdd->err = 0;
209 err = iterate_dir(realfile, &rdd->ctx);
210 if (err >= 0)
211 err = rdd->err;
212 } while (!err && rdd->count);
213 fput(realfile);
214
215 return err;
216}
217
218static void ovl_dir_reset(struct file *file)
219{
220 struct ovl_dir_file *od = file->private_data;
221 struct ovl_dir_cache *cache = od->cache;
222 struct dentry *dentry = file->f_path.dentry;
223 enum ovl_path_type type = ovl_path_type(dentry);
224
225 if (cache && ovl_dentry_version_get(dentry) != cache->version) {
226 ovl_cache_put(od, dentry);
227 od->cache = NULL;
228 }
229 WARN_ON(!od->is_real && type != OVL_PATH_MERGE);
230 if (od->is_real && type == OVL_PATH_MERGE)
231 od->is_real = false;
232}
233
234static int ovl_dir_mark_whiteouts(struct dentry *dir,
235 struct ovl_readdir_data *rdd)
236{
237 struct ovl_cache_entry *p;
238 struct dentry *dentry;
239 const struct cred *old_cred;
240 struct cred *override_cred;
241
242 override_cred = prepare_creds();
243 if (!override_cred) {
244 ovl_cache_free(rdd->list);
245 return -ENOMEM;
246 }
247
248 /*
249 * CAP_DAC_OVERRIDE for lookup
250 */
251 cap_raise(override_cred->cap_effective, CAP_DAC_OVERRIDE);
252 old_cred = override_creds(override_cred);
253
254 mutex_lock(&dir->d_inode->i_mutex);
255 list_for_each_entry(p, rdd->list, l_node) {
256 if (p->is_cursor)
257 continue;
258
259 if (p->type != DT_CHR)
260 continue;
261
262 dentry = lookup_one_len(p->name, dir, p->len);
263 if (IS_ERR(dentry))
264 continue;
265
266 p->is_whiteout = ovl_is_whiteout(dentry);
267 dput(dentry);
268 }
269 mutex_unlock(&dir->d_inode->i_mutex);
270
271 revert_creds(old_cred);
272 put_cred(override_cred);
273
274 return 0;
275}
276
277static int ovl_dir_read_merged(struct dentry *dentry, struct list_head *list)
278{
279 int err;
280 struct path lowerpath;
281 struct path upperpath;
282 struct ovl_readdir_data rdd = {
283 .ctx.actor = ovl_fill_merge,
284 .list = list,
285 .root = RB_ROOT,
286 .is_merge = false,
287 };
288
289 ovl_path_lower(dentry, &lowerpath);
290 ovl_path_upper(dentry, &upperpath);
291
292 if (upperpath.dentry) {
293 err = ovl_dir_read(&upperpath, &rdd);
294 if (err)
295 goto out;
296
297 if (lowerpath.dentry) {
298 err = ovl_dir_mark_whiteouts(upperpath.dentry, &rdd);
299 if (err)
300 goto out;
301 }
302 }
303 if (lowerpath.dentry) {
304 /*
305 * Insert lowerpath entries before upperpath ones, this allows
306 * offsets to be reasonably constant
307 */
308 list_add(&rdd.middle, rdd.list);
309 rdd.is_merge = true;
310 err = ovl_dir_read(&lowerpath, &rdd);
311 list_del(&rdd.middle);
312 }
313out:
314 return err;
315}
316
317static void ovl_seek_cursor(struct ovl_dir_file *od, loff_t pos)
318{
319 struct ovl_cache_entry *p;
320 loff_t off = 0;
321
322 list_for_each_entry(p, &od->cache->entries, l_node) {
323 if (p->is_cursor)
324 continue;
325 if (off >= pos)
326 break;
327 off++;
328 }
329 list_move_tail(&od->cursor.l_node, &p->l_node);
330}
331
332static struct ovl_dir_cache *ovl_cache_get(struct dentry *dentry)
333{
334 int res;
335 struct ovl_dir_cache *cache;
336
337 cache = ovl_dir_cache(dentry);
338 if (cache && ovl_dentry_version_get(dentry) == cache->version) {
339 cache->refcount++;
340 return cache;
341 }
342 ovl_set_dir_cache(dentry, NULL);
343
344 cache = kzalloc(sizeof(struct ovl_dir_cache), GFP_KERNEL);
345 if (!cache)
346 return ERR_PTR(-ENOMEM);
347
348 cache->refcount = 1;
349 INIT_LIST_HEAD(&cache->entries);
350
351 res = ovl_dir_read_merged(dentry, &cache->entries);
352 if (res) {
353 ovl_cache_free(&cache->entries);
354 kfree(cache);
355 return ERR_PTR(res);
356 }
357
358 cache->version = ovl_dentry_version_get(dentry);
359 ovl_set_dir_cache(dentry, cache);
360
361 return cache;
362}
363
364static int ovl_iterate(struct file *file, struct dir_context *ctx)
365{
366 struct ovl_dir_file *od = file->private_data;
367 struct dentry *dentry = file->f_path.dentry;
368
369 if (!ctx->pos)
370 ovl_dir_reset(file);
371
372 if (od->is_real)
373 return iterate_dir(od->realfile, ctx);
374
375 if (!od->cache) {
376 struct ovl_dir_cache *cache;
377
378 cache = ovl_cache_get(dentry);
379 if (IS_ERR(cache))
380 return PTR_ERR(cache);
381
382 od->cache = cache;
383 ovl_seek_cursor(od, ctx->pos);
384 }
385
386 while (od->cursor.l_node.next != &od->cache->entries) {
387 struct ovl_cache_entry *p;
388
389 p = list_entry(od->cursor.l_node.next, struct ovl_cache_entry, l_node);
390 /* Skip cursors */
391 if (!p->is_cursor) {
392 if (!p->is_whiteout) {
393 if (!dir_emit(ctx, p->name, p->len, p->ino, p->type))
394 break;
395 }
396 ctx->pos++;
397 }
398 list_move(&od->cursor.l_node, &p->l_node);
399 }
400 return 0;
401}
402
403static loff_t ovl_dir_llseek(struct file *file, loff_t offset, int origin)
404{
405 loff_t res;
406 struct ovl_dir_file *od = file->private_data;
407
408 mutex_lock(&file_inode(file)->i_mutex);
409 if (!file->f_pos)
410 ovl_dir_reset(file);
411
412 if (od->is_real) {
413 res = vfs_llseek(od->realfile, offset, origin);
414 file->f_pos = od->realfile->f_pos;
415 } else {
416 res = -EINVAL;
417
418 switch (origin) {
419 case SEEK_CUR:
420 offset += file->f_pos;
421 break;
422 case SEEK_SET:
423 break;
424 default:
425 goto out_unlock;
426 }
427 if (offset < 0)
428 goto out_unlock;
429
430 if (offset != file->f_pos) {
431 file->f_pos = offset;
432 if (od->cache)
433 ovl_seek_cursor(od, offset);
434 }
435 res = offset;
436 }
437out_unlock:
438 mutex_unlock(&file_inode(file)->i_mutex);
439
440 return res;
441}
442
443static int ovl_dir_fsync(struct file *file, loff_t start, loff_t end,
444 int datasync)
445{
446 struct ovl_dir_file *od = file->private_data;
447 struct dentry *dentry = file->f_path.dentry;
448 struct file *realfile = od->realfile;
449
450 /*
451 * Need to check if we started out being a lower dir, but got copied up
452 */
453 if (!od->is_upper && ovl_path_type(dentry) != OVL_PATH_LOWER) {
454 struct inode *inode = file_inode(file);
455
456 realfile = lockless_dereference(od->upperfile);
457 if (!realfile) {
458 struct path upperpath;
459
460 ovl_path_upper(dentry, &upperpath);
461 realfile = ovl_path_open(&upperpath, O_RDONLY);
462 smp_mb__before_spinlock();
463 mutex_lock(&inode->i_mutex);
464 if (!od->upperfile) {
465 if (IS_ERR(realfile)) {
466 mutex_unlock(&inode->i_mutex);
467 return PTR_ERR(realfile);
468 }
469 od->upperfile = realfile;
470 } else {
471 /* somebody has beaten us to it */
472 if (!IS_ERR(realfile))
473 fput(realfile);
474 realfile = od->upperfile;
475 }
476 mutex_unlock(&inode->i_mutex);
477 }
478 }
479
480 return vfs_fsync_range(realfile, start, end, datasync);
481}
482
483static int ovl_dir_release(struct inode *inode, struct file *file)
484{
485 struct ovl_dir_file *od = file->private_data;
486
487 if (od->cache) {
488 mutex_lock(&inode->i_mutex);
489 ovl_cache_put(od, file->f_path.dentry);
490 mutex_unlock(&inode->i_mutex);
491 }
492 fput(od->realfile);
493 if (od->upperfile)
494 fput(od->upperfile);
495 kfree(od);
496
497 return 0;
498}
499
500static int ovl_dir_open(struct inode *inode, struct file *file)
501{
502 struct path realpath;
503 struct file *realfile;
504 struct ovl_dir_file *od;
505 enum ovl_path_type type;
506
507 od = kzalloc(sizeof(struct ovl_dir_file), GFP_KERNEL);
508 if (!od)
509 return -ENOMEM;
510
511 type = ovl_path_real(file->f_path.dentry, &realpath);
512 realfile = ovl_path_open(&realpath, file->f_flags);
513 if (IS_ERR(realfile)) {
514 kfree(od);
515 return PTR_ERR(realfile);
516 }
517 INIT_LIST_HEAD(&od->cursor.l_node);
518 od->realfile = realfile;
519 od->is_real = (type != OVL_PATH_MERGE);
520 od->is_upper = (type != OVL_PATH_LOWER);
521 od->cursor.is_cursor = true;
522 file->private_data = od;
523
524 return 0;
525}
526
527const struct file_operations ovl_dir_operations = {
528 .read = generic_read_dir,
529 .open = ovl_dir_open,
530 .iterate = ovl_iterate,
531 .llseek = ovl_dir_llseek,
532 .fsync = ovl_dir_fsync,
533 .release = ovl_dir_release,
534};
535
536int ovl_check_empty_dir(struct dentry *dentry, struct list_head *list)
537{
538 int err;
539 struct ovl_cache_entry *p;
540
541 err = ovl_dir_read_merged(dentry, list);
542 if (err)
543 return err;
544
545 err = 0;
546
547 list_for_each_entry(p, list, l_node) {
548 if (p->is_whiteout)
549 continue;
550
551 if (p->name[0] == '.') {
552 if (p->len == 1)
553 continue;
554 if (p->len == 2 && p->name[1] == '.')
555 continue;
556 }
557 err = -ENOTEMPTY;
558 break;
559 }
560
561 return err;
562}
563
564void ovl_cleanup_whiteouts(struct dentry *upper, struct list_head *list)
565{
566 struct ovl_cache_entry *p;
567
568 mutex_lock_nested(&upper->d_inode->i_mutex, I_MUTEX_CHILD);
569 list_for_each_entry(p, list, l_node) {
570 struct dentry *dentry;
571
572 if (!p->is_whiteout)
573 continue;
574
575 dentry = lookup_one_len(p->name, upper, p->len);
576 if (IS_ERR(dentry)) {
577 pr_err("overlayfs: lookup '%s/%.*s' failed (%i)\n",
578 upper->d_name.name, p->len, p->name,
579 (int) PTR_ERR(dentry));
580 continue;
581 }
582 ovl_cleanup(upper->d_inode, dentry);
583 dput(dentry);
584 }
585 mutex_unlock(&upper->d_inode->i_mutex);
586}
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
new file mode 100644
index 000000000000..f16d318b71f8
--- /dev/null
+++ b/fs/overlayfs/super.c
@@ -0,0 +1,833 @@
1/*
2 *
3 * Copyright (C) 2011 Novell Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/fs.h>
11#include <linux/namei.h>
12#include <linux/xattr.h>
13#include <linux/security.h>
14#include <linux/mount.h>
15#include <linux/slab.h>
16#include <linux/parser.h>
17#include <linux/module.h>
18#include <linux/sched.h>
19#include <linux/statfs.h>
20#include <linux/seq_file.h>
21#include "overlayfs.h"
22
23MODULE_AUTHOR("Miklos Szeredi <miklos@szeredi.hu>");
24MODULE_DESCRIPTION("Overlay filesystem");
25MODULE_LICENSE("GPL");
26
27#define OVERLAYFS_SUPER_MAGIC 0x794c7630
28
29struct ovl_config {
30 char *lowerdir;
31 char *upperdir;
32 char *workdir;
33};
34
35/* private information held for overlayfs's superblock */
36struct ovl_fs {
37 struct vfsmount *upper_mnt;
38 struct vfsmount *lower_mnt;
39 struct dentry *workdir;
40 long lower_namelen;
41 /* pathnames of lower and upper dirs, for show_options */
42 struct ovl_config config;
43};
44
45struct ovl_dir_cache;
46
47/* private information held for every overlayfs dentry */
48struct ovl_entry {
49 struct dentry *__upperdentry;
50 struct dentry *lowerdentry;
51 struct ovl_dir_cache *cache;
52 union {
53 struct {
54 u64 version;
55 bool opaque;
56 };
57 struct rcu_head rcu;
58 };
59};
60
61const char *ovl_opaque_xattr = "trusted.overlay.opaque";
62
63
64enum ovl_path_type ovl_path_type(struct dentry *dentry)
65{
66 struct ovl_entry *oe = dentry->d_fsdata;
67
68 if (oe->__upperdentry) {
69 if (oe->lowerdentry) {
70 if (S_ISDIR(dentry->d_inode->i_mode))
71 return OVL_PATH_MERGE;
72 else
73 return OVL_PATH_UPPER;
74 } else {
75 if (oe->opaque)
76 return OVL_PATH_UPPER;
77 else
78 return OVL_PATH_PURE_UPPER;
79 }
80 } else {
81 return OVL_PATH_LOWER;
82 }
83}
84
85static struct dentry *ovl_upperdentry_dereference(struct ovl_entry *oe)
86{
87 return lockless_dereference(oe->__upperdentry);
88}
89
90void ovl_path_upper(struct dentry *dentry, struct path *path)
91{
92 struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
93 struct ovl_entry *oe = dentry->d_fsdata;
94
95 path->mnt = ofs->upper_mnt;
96 path->dentry = ovl_upperdentry_dereference(oe);
97}
98
99enum ovl_path_type ovl_path_real(struct dentry *dentry, struct path *path)
100{
101
102 enum ovl_path_type type = ovl_path_type(dentry);
103
104 if (type == OVL_PATH_LOWER)
105 ovl_path_lower(dentry, path);
106 else
107 ovl_path_upper(dentry, path);
108
109 return type;
110}
111
112struct dentry *ovl_dentry_upper(struct dentry *dentry)
113{
114 struct ovl_entry *oe = dentry->d_fsdata;
115
116 return ovl_upperdentry_dereference(oe);
117}
118
119struct dentry *ovl_dentry_lower(struct dentry *dentry)
120{
121 struct ovl_entry *oe = dentry->d_fsdata;
122
123 return oe->lowerdentry;
124}
125
126struct dentry *ovl_dentry_real(struct dentry *dentry)
127{
128 struct ovl_entry *oe = dentry->d_fsdata;
129 struct dentry *realdentry;
130
131 realdentry = ovl_upperdentry_dereference(oe);
132 if (!realdentry)
133 realdentry = oe->lowerdentry;
134
135 return realdentry;
136}
137
138struct dentry *ovl_entry_real(struct ovl_entry *oe, bool *is_upper)
139{
140 struct dentry *realdentry;
141
142 realdentry = ovl_upperdentry_dereference(oe);
143 if (realdentry) {
144 *is_upper = true;
145 } else {
146 realdentry = oe->lowerdentry;
147 *is_upper = false;
148 }
149 return realdentry;
150}
151
152struct ovl_dir_cache *ovl_dir_cache(struct dentry *dentry)
153{
154 struct ovl_entry *oe = dentry->d_fsdata;
155
156 return oe->cache;
157}
158
159void ovl_set_dir_cache(struct dentry *dentry, struct ovl_dir_cache *cache)
160{
161 struct ovl_entry *oe = dentry->d_fsdata;
162
163 oe->cache = cache;
164}
165
166void ovl_path_lower(struct dentry *dentry, struct path *path)
167{
168 struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
169 struct ovl_entry *oe = dentry->d_fsdata;
170
171 path->mnt = ofs->lower_mnt;
172 path->dentry = oe->lowerdentry;
173}
174
175int ovl_want_write(struct dentry *dentry)
176{
177 struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
178 return mnt_want_write(ofs->upper_mnt);
179}
180
181void ovl_drop_write(struct dentry *dentry)
182{
183 struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
184 mnt_drop_write(ofs->upper_mnt);
185}
186
187struct dentry *ovl_workdir(struct dentry *dentry)
188{
189 struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
190 return ofs->workdir;
191}
192
193bool ovl_dentry_is_opaque(struct dentry *dentry)
194{
195 struct ovl_entry *oe = dentry->d_fsdata;
196 return oe->opaque;
197}
198
199void ovl_dentry_set_opaque(struct dentry *dentry, bool opaque)
200{
201 struct ovl_entry *oe = dentry->d_fsdata;
202 oe->opaque = opaque;
203}
204
205void ovl_dentry_update(struct dentry *dentry, struct dentry *upperdentry)
206{
207 struct ovl_entry *oe = dentry->d_fsdata;
208
209 WARN_ON(!mutex_is_locked(&upperdentry->d_parent->d_inode->i_mutex));
210 WARN_ON(oe->__upperdentry);
211 BUG_ON(!upperdentry->d_inode);
212 /*
213 * Make sure upperdentry is consistent before making it visible to
214 * ovl_upperdentry_dereference().
215 */
216 smp_wmb();
217 oe->__upperdentry = upperdentry;
218}
219
220void ovl_dentry_version_inc(struct dentry *dentry)
221{
222 struct ovl_entry *oe = dentry->d_fsdata;
223
224 WARN_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
225 oe->version++;
226}
227
228u64 ovl_dentry_version_get(struct dentry *dentry)
229{
230 struct ovl_entry *oe = dentry->d_fsdata;
231
232 WARN_ON(!mutex_is_locked(&dentry->d_inode->i_mutex));
233 return oe->version;
234}
235
236bool ovl_is_whiteout(struct dentry *dentry)
237{
238 struct inode *inode = dentry->d_inode;
239
240 return inode && IS_WHITEOUT(inode);
241}
242
243static bool ovl_is_opaquedir(struct dentry *dentry)
244{
245 int res;
246 char val;
247 struct inode *inode = dentry->d_inode;
248
249 if (!S_ISDIR(inode->i_mode) || !inode->i_op->getxattr)
250 return false;
251
252 res = inode->i_op->getxattr(dentry, ovl_opaque_xattr, &val, 1);
253 if (res == 1 && val == 'y')
254 return true;
255
256 return false;
257}
258
259static void ovl_dentry_release(struct dentry *dentry)
260{
261 struct ovl_entry *oe = dentry->d_fsdata;
262
263 if (oe) {
264 dput(oe->__upperdentry);
265 dput(oe->lowerdentry);
266 kfree_rcu(oe, rcu);
267 }
268}
269
270static const struct dentry_operations ovl_dentry_operations = {
271 .d_release = ovl_dentry_release,
272};
273
274static struct ovl_entry *ovl_alloc_entry(void)
275{
276 return kzalloc(sizeof(struct ovl_entry), GFP_KERNEL);
277}
278
279static inline struct dentry *ovl_lookup_real(struct dentry *dir,
280 struct qstr *name)
281{
282 struct dentry *dentry;
283
284 mutex_lock(&dir->d_inode->i_mutex);
285 dentry = lookup_one_len(name->name, dir, name->len);
286 mutex_unlock(&dir->d_inode->i_mutex);
287
288 if (IS_ERR(dentry)) {
289 if (PTR_ERR(dentry) == -ENOENT)
290 dentry = NULL;
291 } else if (!dentry->d_inode) {
292 dput(dentry);
293 dentry = NULL;
294 }
295 return dentry;
296}
297
298struct dentry *ovl_lookup(struct inode *dir, struct dentry *dentry,
299 unsigned int flags)
300{
301 struct ovl_entry *oe;
302 struct dentry *upperdir;
303 struct dentry *lowerdir;
304 struct dentry *upperdentry = NULL;
305 struct dentry *lowerdentry = NULL;
306 struct inode *inode = NULL;
307 int err;
308
309 err = -ENOMEM;
310 oe = ovl_alloc_entry();
311 if (!oe)
312 goto out;
313
314 upperdir = ovl_dentry_upper(dentry->d_parent);
315 lowerdir = ovl_dentry_lower(dentry->d_parent);
316
317 if (upperdir) {
318 upperdentry = ovl_lookup_real(upperdir, &dentry->d_name);
319 err = PTR_ERR(upperdentry);
320 if (IS_ERR(upperdentry))
321 goto out_put_dir;
322
323 if (lowerdir && upperdentry) {
324 if (ovl_is_whiteout(upperdentry)) {
325 dput(upperdentry);
326 upperdentry = NULL;
327 oe->opaque = true;
328 } else if (ovl_is_opaquedir(upperdentry)) {
329 oe->opaque = true;
330 }
331 }
332 }
333 if (lowerdir && !oe->opaque) {
334 lowerdentry = ovl_lookup_real(lowerdir, &dentry->d_name);
335 err = PTR_ERR(lowerdentry);
336 if (IS_ERR(lowerdentry))
337 goto out_dput_upper;
338 }
339
340 if (lowerdentry && upperdentry &&
341 (!S_ISDIR(upperdentry->d_inode->i_mode) ||
342 !S_ISDIR(lowerdentry->d_inode->i_mode))) {
343 dput(lowerdentry);
344 lowerdentry = NULL;
345 oe->opaque = true;
346 }
347
348 if (lowerdentry || upperdentry) {
349 struct dentry *realdentry;
350
351 realdentry = upperdentry ? upperdentry : lowerdentry;
352 err = -ENOMEM;
353 inode = ovl_new_inode(dentry->d_sb, realdentry->d_inode->i_mode,
354 oe);
355 if (!inode)
356 goto out_dput;
357 ovl_copyattr(realdentry->d_inode, inode);
358 }
359
360 oe->__upperdentry = upperdentry;
361 oe->lowerdentry = lowerdentry;
362
363 dentry->d_fsdata = oe;
364 d_add(dentry, inode);
365
366 return NULL;
367
368out_dput:
369 dput(lowerdentry);
370out_dput_upper:
371 dput(upperdentry);
372out_put_dir:
373 kfree(oe);
374out:
375 return ERR_PTR(err);
376}
377
378struct file *ovl_path_open(struct path *path, int flags)
379{
380 return dentry_open(path, flags, current_cred());
381}
382
383static void ovl_put_super(struct super_block *sb)
384{
385 struct ovl_fs *ufs = sb->s_fs_info;
386
387 dput(ufs->workdir);
388 mntput(ufs->upper_mnt);
389 mntput(ufs->lower_mnt);
390
391 kfree(ufs->config.lowerdir);
392 kfree(ufs->config.upperdir);
393 kfree(ufs->config.workdir);
394 kfree(ufs);
395}
396
397/**
398 * ovl_statfs
399 * @sb: The overlayfs super block
400 * @buf: The struct kstatfs to fill in with stats
401 *
402 * Get the filesystem statistics. As writes always target the upper layer
403 * filesystem pass the statfs to the same filesystem.
404 */
405static int ovl_statfs(struct dentry *dentry, struct kstatfs *buf)
406{
407 struct ovl_fs *ofs = dentry->d_sb->s_fs_info;
408 struct dentry *root_dentry = dentry->d_sb->s_root;
409 struct path path;
410 int err;
411
412 ovl_path_upper(root_dentry, &path);
413
414 err = vfs_statfs(&path, buf);
415 if (!err) {
416 buf->f_namelen = max(buf->f_namelen, ofs->lower_namelen);
417 buf->f_type = OVERLAYFS_SUPER_MAGIC;
418 }
419
420 return err;
421}
422
423/**
424 * ovl_show_options
425 *
426 * Prints the mount options for a given superblock.
427 * Returns zero; does not fail.
428 */
429static int ovl_show_options(struct seq_file *m, struct dentry *dentry)
430{
431 struct super_block *sb = dentry->d_sb;
432 struct ovl_fs *ufs = sb->s_fs_info;
433
434 seq_printf(m, ",lowerdir=%s", ufs->config.lowerdir);
435 seq_printf(m, ",upperdir=%s", ufs->config.upperdir);
436 seq_printf(m, ",workdir=%s", ufs->config.workdir);
437 return 0;
438}
439
440static const struct super_operations ovl_super_operations = {
441 .put_super = ovl_put_super,
442 .statfs = ovl_statfs,
443 .show_options = ovl_show_options,
444};
445
446enum {
447 OPT_LOWERDIR,
448 OPT_UPPERDIR,
449 OPT_WORKDIR,
450 OPT_ERR,
451};
452
453static const match_table_t ovl_tokens = {
454 {OPT_LOWERDIR, "lowerdir=%s"},
455 {OPT_UPPERDIR, "upperdir=%s"},
456 {OPT_WORKDIR, "workdir=%s"},
457 {OPT_ERR, NULL}
458};
459
460static char *ovl_next_opt(char **s)
461{
462 char *sbegin = *s;
463 char *p;
464
465 if (sbegin == NULL)
466 return NULL;
467
468 for (p = sbegin; *p; p++) {
469 if (*p == '\\') {
470 p++;
471 if (!*p)
472 break;
473 } else if (*p == ',') {
474 *p = '\0';
475 *s = p + 1;
476 return sbegin;
477 }
478 }
479 *s = NULL;
480 return sbegin;
481}
482
483static int ovl_parse_opt(char *opt, struct ovl_config *config)
484{
485 char *p;
486
487 while ((p = ovl_next_opt(&opt)) != NULL) {
488 int token;
489 substring_t args[MAX_OPT_ARGS];
490
491 if (!*p)
492 continue;
493
494 token = match_token(p, ovl_tokens, args);
495 switch (token) {
496 case OPT_UPPERDIR:
497 kfree(config->upperdir);
498 config->upperdir = match_strdup(&args[0]);
499 if (!config->upperdir)
500 return -ENOMEM;
501 break;
502
503 case OPT_LOWERDIR:
504 kfree(config->lowerdir);
505 config->lowerdir = match_strdup(&args[0]);
506 if (!config->lowerdir)
507 return -ENOMEM;
508 break;
509
510 case OPT_WORKDIR:
511 kfree(config->workdir);
512 config->workdir = match_strdup(&args[0]);
513 if (!config->workdir)
514 return -ENOMEM;
515 break;
516
517 default:
518 return -EINVAL;
519 }
520 }
521 return 0;
522}
523
524#define OVL_WORKDIR_NAME "work"
525
526static struct dentry *ovl_workdir_create(struct vfsmount *mnt,
527 struct dentry *dentry)
528{
529 struct inode *dir = dentry->d_inode;
530 struct dentry *work;
531 int err;
532 bool retried = false;
533
534 err = mnt_want_write(mnt);
535 if (err)
536 return ERR_PTR(err);
537
538 mutex_lock_nested(&dir->i_mutex, I_MUTEX_PARENT);
539retry:
540 work = lookup_one_len(OVL_WORKDIR_NAME, dentry,
541 strlen(OVL_WORKDIR_NAME));
542
543 if (!IS_ERR(work)) {
544 struct kstat stat = {
545 .mode = S_IFDIR | 0,
546 };
547
548 if (work->d_inode) {
549 err = -EEXIST;
550 if (retried)
551 goto out_dput;
552
553 retried = true;
554 ovl_cleanup(dir, work);
555 dput(work);
556 goto retry;
557 }
558
559 err = ovl_create_real(dir, work, &stat, NULL, NULL, true);
560 if (err)
561 goto out_dput;
562 }
563out_unlock:
564 mutex_unlock(&dir->i_mutex);
565 mnt_drop_write(mnt);
566
567 return work;
568
569out_dput:
570 dput(work);
571 work = ERR_PTR(err);
572 goto out_unlock;
573}
574
575static void ovl_unescape(char *s)
576{
577 char *d = s;
578
579 for (;; s++, d++) {
580 if (*s == '\\')
581 s++;
582 *d = *s;
583 if (!*s)
584 break;
585 }
586}
587
588static int ovl_mount_dir(const char *name, struct path *path)
589{
590 int err;
591 char *tmp = kstrdup(name, GFP_KERNEL);
592
593 if (!tmp)
594 return -ENOMEM;
595
596 ovl_unescape(tmp);
597 err = kern_path(tmp, LOOKUP_FOLLOW, path);
598 if (err) {
599 pr_err("overlayfs: failed to resolve '%s': %i\n", tmp, err);
600 err = -EINVAL;
601 }
602 kfree(tmp);
603 return err;
604}
605
606static bool ovl_is_allowed_fs_type(struct dentry *root)
607{
608 const struct dentry_operations *dop = root->d_op;
609
610 /*
611 * We don't support:
612 * - automount filesystems
613 * - filesystems with revalidate (FIXME for lower layer)
614 * - filesystems with case insensitive names
615 */
616 if (dop &&
617 (dop->d_manage || dop->d_automount ||
618 dop->d_revalidate || dop->d_weak_revalidate ||
619 dop->d_compare || dop->d_hash)) {
620 return false;
621 }
622 return true;
623}
624
625/* Workdir should not be subdir of upperdir and vice versa */
626static bool ovl_workdir_ok(struct dentry *workdir, struct dentry *upperdir)
627{
628 bool ok = false;
629
630 if (workdir != upperdir) {
631 ok = (lock_rename(workdir, upperdir) == NULL);
632 unlock_rename(workdir, upperdir);
633 }
634 return ok;
635}
636
637static int ovl_fill_super(struct super_block *sb, void *data, int silent)
638{
639 struct path lowerpath;
640 struct path upperpath;
641 struct path workpath;
642 struct inode *root_inode;
643 struct dentry *root_dentry;
644 struct ovl_entry *oe;
645 struct ovl_fs *ufs;
646 struct kstatfs statfs;
647 int err;
648
649 err = -ENOMEM;
650 ufs = kzalloc(sizeof(struct ovl_fs), GFP_KERNEL);
651 if (!ufs)
652 goto out;
653
654 err = ovl_parse_opt((char *) data, &ufs->config);
655 if (err)
656 goto out_free_config;
657
658 /* FIXME: workdir is not needed for a R/O mount */
659 err = -EINVAL;
660 if (!ufs->config.upperdir || !ufs->config.lowerdir ||
661 !ufs->config.workdir) {
662 pr_err("overlayfs: missing upperdir or lowerdir or workdir\n");
663 goto out_free_config;
664 }
665
666 err = -ENOMEM;
667 oe = ovl_alloc_entry();
668 if (oe == NULL)
669 goto out_free_config;
670
671 err = ovl_mount_dir(ufs->config.upperdir, &upperpath);
672 if (err)
673 goto out_free_oe;
674
675 err = ovl_mount_dir(ufs->config.lowerdir, &lowerpath);
676 if (err)
677 goto out_put_upperpath;
678
679 err = ovl_mount_dir(ufs->config.workdir, &workpath);
680 if (err)
681 goto out_put_lowerpath;
682
683 err = -EINVAL;
684 if (!S_ISDIR(upperpath.dentry->d_inode->i_mode) ||
685 !S_ISDIR(lowerpath.dentry->d_inode->i_mode) ||
686 !S_ISDIR(workpath.dentry->d_inode->i_mode)) {
687 pr_err("overlayfs: upperdir or lowerdir or workdir not a directory\n");
688 goto out_put_workpath;
689 }
690
691 if (upperpath.mnt != workpath.mnt) {
692 pr_err("overlayfs: workdir and upperdir must reside under the same mount\n");
693 goto out_put_workpath;
694 }
695 if (!ovl_workdir_ok(workpath.dentry, upperpath.dentry)) {
696 pr_err("overlayfs: workdir and upperdir must be separate subtrees\n");
697 goto out_put_workpath;
698 }
699
700 if (!ovl_is_allowed_fs_type(upperpath.dentry)) {
701 pr_err("overlayfs: filesystem of upperdir is not supported\n");
702 goto out_put_workpath;
703 }
704
705 if (!ovl_is_allowed_fs_type(lowerpath.dentry)) {
706 pr_err("overlayfs: filesystem of lowerdir is not supported\n");
707 goto out_put_workpath;
708 }
709
710 err = vfs_statfs(&lowerpath, &statfs);
711 if (err) {
712 pr_err("overlayfs: statfs failed on lowerpath\n");
713 goto out_put_workpath;
714 }
715 ufs->lower_namelen = statfs.f_namelen;
716
717 sb->s_stack_depth = max(upperpath.mnt->mnt_sb->s_stack_depth,
718 lowerpath.mnt->mnt_sb->s_stack_depth) + 1;
719
720 err = -EINVAL;
721 if (sb->s_stack_depth > FILESYSTEM_MAX_STACK_DEPTH) {
722 pr_err("overlayfs: maximum fs stacking depth exceeded\n");
723 goto out_put_workpath;
724 }
725
726 ufs->upper_mnt = clone_private_mount(&upperpath);
727 err = PTR_ERR(ufs->upper_mnt);
728 if (IS_ERR(ufs->upper_mnt)) {
729 pr_err("overlayfs: failed to clone upperpath\n");
730 goto out_put_workpath;
731 }
732
733 ufs->lower_mnt = clone_private_mount(&lowerpath);
734 err = PTR_ERR(ufs->lower_mnt);
735 if (IS_ERR(ufs->lower_mnt)) {
736 pr_err("overlayfs: failed to clone lowerpath\n");
737 goto out_put_upper_mnt;
738 }
739
740 ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry);
741 err = PTR_ERR(ufs->workdir);
742 if (IS_ERR(ufs->workdir)) {
743 pr_err("overlayfs: failed to create directory %s/%s\n",
744 ufs->config.workdir, OVL_WORKDIR_NAME);
745 goto out_put_lower_mnt;
746 }
747
748 /*
749 * Make lower_mnt R/O. That way fchmod/fchown on lower file
750 * will fail instead of modifying lower fs.
751 */
752 ufs->lower_mnt->mnt_flags |= MNT_READONLY;
753
754 /* If the upper fs is r/o, we mark overlayfs r/o too */
755 if (ufs->upper_mnt->mnt_sb->s_flags & MS_RDONLY)
756 sb->s_flags |= MS_RDONLY;
757
758 sb->s_d_op = &ovl_dentry_operations;
759
760 err = -ENOMEM;
761 root_inode = ovl_new_inode(sb, S_IFDIR, oe);
762 if (!root_inode)
763 goto out_put_workdir;
764
765 root_dentry = d_make_root(root_inode);
766 if (!root_dentry)
767 goto out_put_workdir;
768
769 mntput(upperpath.mnt);
770 mntput(lowerpath.mnt);
771 path_put(&workpath);
772
773 oe->__upperdentry = upperpath.dentry;
774 oe->lowerdentry = lowerpath.dentry;
775
776 root_dentry->d_fsdata = oe;
777
778 sb->s_magic = OVERLAYFS_SUPER_MAGIC;
779 sb->s_op = &ovl_super_operations;
780 sb->s_root = root_dentry;
781 sb->s_fs_info = ufs;
782
783 return 0;
784
785out_put_workdir:
786 dput(ufs->workdir);
787out_put_lower_mnt:
788 mntput(ufs->lower_mnt);
789out_put_upper_mnt:
790 mntput(ufs->upper_mnt);
791out_put_workpath:
792 path_put(&workpath);
793out_put_lowerpath:
794 path_put(&lowerpath);
795out_put_upperpath:
796 path_put(&upperpath);
797out_free_oe:
798 kfree(oe);
799out_free_config:
800 kfree(ufs->config.lowerdir);
801 kfree(ufs->config.upperdir);
802 kfree(ufs->config.workdir);
803 kfree(ufs);
804out:
805 return err;
806}
807
808static struct dentry *ovl_mount(struct file_system_type *fs_type, int flags,
809 const char *dev_name, void *raw_data)
810{
811 return mount_nodev(fs_type, flags, raw_data, ovl_fill_super);
812}
813
814static struct file_system_type ovl_fs_type = {
815 .owner = THIS_MODULE,
816 .name = "overlay",
817 .mount = ovl_mount,
818 .kill_sb = kill_anon_super,
819};
820MODULE_ALIAS_FS("overlay");
821
822static int __init ovl_init(void)
823{
824 return register_filesystem(&ovl_fs_type);
825}
826
827static void __exit ovl_exit(void)
828{
829 unregister_filesystem(&ovl_fs_type);
830}
831
832module_init(ovl_init);
833module_exit(ovl_exit);
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 8b663b2d9562..6b4527216a7f 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -634,7 +634,7 @@ int dquot_writeback_dquots(struct super_block *sb, int type)
634 dqstats_inc(DQST_LOOKUPS); 634 dqstats_inc(DQST_LOOKUPS);
635 err = sb->dq_op->write_dquot(dquot); 635 err = sb->dq_op->write_dquot(dquot);
636 if (!ret && err) 636 if (!ret && err)
637 err = ret; 637 ret = err;
638 dqput(dquot); 638 dqput(dquot);
639 spin_lock(&dq_list_lock); 639 spin_lock(&dq_list_lock);
640 } 640 }
diff --git a/fs/splice.c b/fs/splice.c
index f5cb9ba84510..75c6058eabf2 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1330,6 +1330,7 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
1330 1330
1331 return ret; 1331 return ret;
1332} 1332}
1333EXPORT_SYMBOL(do_splice_direct);
1333 1334
1334static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe, 1335static int splice_pipe_to_pipe(struct pipe_inode_info *ipipe,
1335 struct pipe_inode_info *opipe, 1336 struct pipe_inode_info *opipe,
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 92e8f99a5857..281002689d64 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -1338,7 +1338,10 @@ xfs_free_file_space(
1338 goto out; 1338 goto out;
1339} 1339}
1340 1340
1341 1341/*
1342 * Preallocate and zero a range of a file. This mechanism has the allocation
1343 * semantics of fallocate and in addition converts data in the range to zeroes.
1344 */
1342int 1345int
1343xfs_zero_file_space( 1346xfs_zero_file_space(
1344 struct xfs_inode *ip, 1347 struct xfs_inode *ip,
@@ -1346,65 +1349,30 @@ xfs_zero_file_space(
1346 xfs_off_t len) 1349 xfs_off_t len)
1347{ 1350{
1348 struct xfs_mount *mp = ip->i_mount; 1351 struct xfs_mount *mp = ip->i_mount;
1349 uint granularity; 1352 uint blksize;
1350 xfs_off_t start_boundary;
1351 xfs_off_t end_boundary;
1352 int error; 1353 int error;
1353 1354
1354 trace_xfs_zero_file_space(ip); 1355 trace_xfs_zero_file_space(ip);
1355 1356
1356 granularity = max_t(uint, 1 << mp->m_sb.sb_blocklog, PAGE_CACHE_SIZE); 1357 blksize = 1 << mp->m_sb.sb_blocklog;
1357 1358
1358 /* 1359 /*
1359 * Round the range of extents we are going to convert inwards. If the 1360 * Punch a hole and prealloc the range. We use hole punch rather than
1360 * offset is aligned, then it doesn't get changed so we zero from the 1361 * unwritten extent conversion for two reasons:
1361 * start of the block offset points to. 1362 *
1363 * 1.) Hole punch handles partial block zeroing for us.
1364 *
1365 * 2.) If prealloc returns ENOSPC, the file range is still zero-valued
1366 * by virtue of the hole punch.
1362 */ 1367 */
1363 start_boundary = round_up(offset, granularity); 1368 error = xfs_free_file_space(ip, offset, len);
1364 end_boundary = round_down(offset + len, granularity); 1369 if (error)
1365 1370 goto out;
1366 ASSERT(start_boundary >= offset);
1367 ASSERT(end_boundary <= offset + len);
1368
1369 if (start_boundary < end_boundary - 1) {
1370 /*
1371 * Writeback the range to ensure any inode size updates due to
1372 * appending writes make it to disk (otherwise we could just
1373 * punch out the delalloc blocks).
1374 */
1375 error = filemap_write_and_wait_range(VFS_I(ip)->i_mapping,
1376 start_boundary, end_boundary - 1);
1377 if (error)
1378 goto out;
1379 truncate_pagecache_range(VFS_I(ip), start_boundary,
1380 end_boundary - 1);
1381
1382 /* convert the blocks */
1383 error = xfs_alloc_file_space(ip, start_boundary,
1384 end_boundary - start_boundary - 1,
1385 XFS_BMAPI_PREALLOC | XFS_BMAPI_CONVERT);
1386 if (error)
1387 goto out;
1388
1389 /* We've handled the interior of the range, now for the edges */
1390 if (start_boundary != offset) {
1391 error = xfs_iozero(ip, offset, start_boundary - offset);
1392 if (error)
1393 goto out;
1394 }
1395
1396 if (end_boundary != offset + len)
1397 error = xfs_iozero(ip, end_boundary,
1398 offset + len - end_boundary);
1399
1400 } else {
1401 /*
1402 * It's either a sub-granularity range or the range spanned lies
1403 * partially across two adjacent blocks.
1404 */
1405 error = xfs_iozero(ip, offset, len);
1406 }
1407 1371
1372 error = xfs_alloc_file_space(ip, round_down(offset, blksize),
1373 round_up(offset + len, blksize) -
1374 round_down(offset, blksize),
1375 XFS_BMAPI_PREALLOC);
1408out: 1376out:
1409 return error; 1377 return error;
1410 1378
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index f1deb961a296..894924a5129b 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -236,8 +236,10 @@ xfs_bulkstat_grab_ichunk(
236 XFS_WANT_CORRUPTED_RETURN(stat == 1); 236 XFS_WANT_CORRUPTED_RETURN(stat == 1);
237 237
238 /* Check if the record contains the inode in request */ 238 /* Check if the record contains the inode in request */
239 if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) 239 if (irec->ir_startino + XFS_INODES_PER_CHUNK <= agino) {
240 return -EINVAL; 240 *icount = 0;
241 return 0;
242 }
241 243
242 idx = agino - irec->ir_startino + 1; 244 idx = agino - irec->ir_startino + 1;
243 if (idx < XFS_INODES_PER_CHUNK && 245 if (idx < XFS_INODES_PER_CHUNK &&
@@ -262,75 +264,76 @@ xfs_bulkstat_grab_ichunk(
262 264
263#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size) 265#define XFS_BULKSTAT_UBLEFT(ubleft) ((ubleft) >= statstruct_size)
264 266
267struct xfs_bulkstat_agichunk {
268 char __user **ac_ubuffer;/* pointer into user's buffer */
269 int ac_ubleft; /* bytes left in user's buffer */
270 int ac_ubelem; /* spaces used in user's buffer */
271};
272
265/* 273/*
266 * Process inodes in chunk with a pointer to a formatter function 274 * Process inodes in chunk with a pointer to a formatter function
267 * that will iget the inode and fill in the appropriate structure. 275 * that will iget the inode and fill in the appropriate structure.
268 */ 276 */
269int 277static int
270xfs_bulkstat_ag_ichunk( 278xfs_bulkstat_ag_ichunk(
271 struct xfs_mount *mp, 279 struct xfs_mount *mp,
272 xfs_agnumber_t agno, 280 xfs_agnumber_t agno,
273 struct xfs_inobt_rec_incore *irbp, 281 struct xfs_inobt_rec_incore *irbp,
274 bulkstat_one_pf formatter, 282 bulkstat_one_pf formatter,
275 size_t statstruct_size, 283 size_t statstruct_size,
276 struct xfs_bulkstat_agichunk *acp) 284 struct xfs_bulkstat_agichunk *acp,
285 xfs_agino_t *last_agino)
277{ 286{
278 xfs_ino_t lastino = acp->ac_lastino;
279 char __user **ubufp = acp->ac_ubuffer; 287 char __user **ubufp = acp->ac_ubuffer;
280 int ubleft = acp->ac_ubleft; 288 int chunkidx;
281 int ubelem = acp->ac_ubelem;
282 int chunkidx, clustidx;
283 int error = 0; 289 int error = 0;
284 xfs_agino_t agino; 290 xfs_agino_t agino = irbp->ir_startino;
285 291
286 for (agino = irbp->ir_startino, chunkidx = clustidx = 0; 292 for (chunkidx = 0; chunkidx < XFS_INODES_PER_CHUNK;
287 XFS_BULKSTAT_UBLEFT(ubleft) && 293 chunkidx++, agino++) {
288 irbp->ir_freecount < XFS_INODES_PER_CHUNK; 294 int fmterror;
289 chunkidx++, clustidx++, agino++) {
290 int fmterror; /* bulkstat formatter result */
291 int ubused; 295 int ubused;
292 xfs_ino_t ino = XFS_AGINO_TO_INO(mp, agno, agino);
293 296
294 ASSERT(chunkidx < XFS_INODES_PER_CHUNK); 297 /* inode won't fit in buffer, we are done */
298 if (acp->ac_ubleft < statstruct_size)
299 break;
295 300
296 /* Skip if this inode is free */ 301 /* Skip if this inode is free */
297 if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free) { 302 if (XFS_INOBT_MASK(chunkidx) & irbp->ir_free)
298 lastino = ino;
299 continue; 303 continue;
300 }
301
302 /*
303 * Count used inodes as free so we can tell when the
304 * chunk is used up.
305 */
306 irbp->ir_freecount++;
307 304
308 /* Get the inode and fill in a single buffer */ 305 /* Get the inode and fill in a single buffer */
309 ubused = statstruct_size; 306 ubused = statstruct_size;
310 error = formatter(mp, ino, *ubufp, ubleft, &ubused, &fmterror); 307 error = formatter(mp, XFS_AGINO_TO_INO(mp, agno, agino),
311 if (fmterror == BULKSTAT_RV_NOTHING) { 308 *ubufp, acp->ac_ubleft, &ubused, &fmterror);
312 if (error && error != -ENOENT && error != -EINVAL) { 309
313 ubleft = 0; 310 if (fmterror == BULKSTAT_RV_GIVEUP ||
314 break; 311 (error && error != -ENOENT && error != -EINVAL)) {
315 } 312 acp->ac_ubleft = 0;
316 lastino = ino;
317 continue;
318 }
319 if (fmterror == BULKSTAT_RV_GIVEUP) {
320 ubleft = 0;
321 ASSERT(error); 313 ASSERT(error);
322 break; 314 break;
323 } 315 }
324 if (*ubufp) 316
325 *ubufp += ubused; 317 /* be careful not to leak error if at end of chunk */
326 ubleft -= ubused; 318 if (fmterror == BULKSTAT_RV_NOTHING || error) {
327 ubelem++; 319 error = 0;
328 lastino = ino; 320 continue;
321 }
322
323 *ubufp += ubused;
324 acp->ac_ubleft -= ubused;
325 acp->ac_ubelem++;
329 } 326 }
330 327
331 acp->ac_lastino = lastino; 328 /*
332 acp->ac_ubleft = ubleft; 329 * Post-update *last_agino. At this point, agino will always point one
333 acp->ac_ubelem = ubelem; 330 * inode past the last inode we processed successfully. Hence we
331 * substract that inode when setting the *last_agino cursor so that we
332 * return the correct cookie to userspace. On the next bulkstat call,
333 * the inode under the lastino cookie will be skipped as we have already
334 * processed it here.
335 */
336 *last_agino = agino - 1;
334 337
335 return error; 338 return error;
336} 339}
@@ -353,45 +356,33 @@ xfs_bulkstat(
353 xfs_agino_t agino; /* inode # in allocation group */ 356 xfs_agino_t agino; /* inode # in allocation group */
354 xfs_agnumber_t agno; /* allocation group number */ 357 xfs_agnumber_t agno; /* allocation group number */
355 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */ 358 xfs_btree_cur_t *cur; /* btree cursor for ialloc btree */
356 int end_of_ag; /* set if we've seen the ag end */
357 int error; /* error code */
358 int fmterror;/* bulkstat formatter result */
359 int i; /* loop index */
360 int icount; /* count of inodes good in irbuf */
361 size_t irbsize; /* size of irec buffer in bytes */ 359 size_t irbsize; /* size of irec buffer in bytes */
362 xfs_ino_t ino; /* inode number (filesystem) */
363 xfs_inobt_rec_incore_t *irbp; /* current irec buffer pointer */
364 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */ 360 xfs_inobt_rec_incore_t *irbuf; /* start of irec buffer */
365 xfs_inobt_rec_incore_t *irbufend; /* end of good irec buffer entries */
366 xfs_ino_t lastino; /* last inode number returned */
367 int nirbuf; /* size of irbuf */ 361 int nirbuf; /* size of irbuf */
368 int rval; /* return value error code */
369 int tmp; /* result value from btree calls */
370 int ubcount; /* size of user's buffer */ 362 int ubcount; /* size of user's buffer */
371 int ubleft; /* bytes left in user's buffer */ 363 struct xfs_bulkstat_agichunk ac;
372 char __user *ubufp; /* pointer into user's buffer */ 364 int error = 0;
373 int ubelem; /* spaces used in user's buffer */
374 365
375 /* 366 /*
376 * Get the last inode value, see if there's nothing to do. 367 * Get the last inode value, see if there's nothing to do.
377 */ 368 */
378 ino = (xfs_ino_t)*lastinop; 369 agno = XFS_INO_TO_AGNO(mp, *lastinop);
379 lastino = ino; 370 agino = XFS_INO_TO_AGINO(mp, *lastinop);
380 agno = XFS_INO_TO_AGNO(mp, ino);
381 agino = XFS_INO_TO_AGINO(mp, ino);
382 if (agno >= mp->m_sb.sb_agcount || 371 if (agno >= mp->m_sb.sb_agcount ||
383 ino != XFS_AGINO_TO_INO(mp, agno, agino)) { 372 *lastinop != XFS_AGINO_TO_INO(mp, agno, agino)) {
384 *done = 1; 373 *done = 1;
385 *ubcountp = 0; 374 *ubcountp = 0;
386 return 0; 375 return 0;
387 } 376 }
388 377
389 ubcount = *ubcountp; /* statstruct's */ 378 ubcount = *ubcountp; /* statstruct's */
390 ubleft = ubcount * statstruct_size; /* bytes */ 379 ac.ac_ubuffer = &ubuffer;
391 *ubcountp = ubelem = 0; 380 ac.ac_ubleft = ubcount * statstruct_size; /* bytes */;
381 ac.ac_ubelem = 0;
382
383 *ubcountp = 0;
392 *done = 0; 384 *done = 0;
393 fmterror = 0; 385
394 ubufp = ubuffer;
395 irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4); 386 irbuf = kmem_zalloc_greedy(&irbsize, PAGE_SIZE, PAGE_SIZE * 4);
396 if (!irbuf) 387 if (!irbuf)
397 return -ENOMEM; 388 return -ENOMEM;
@@ -402,9 +393,13 @@ xfs_bulkstat(
402 * Loop over the allocation groups, starting from the last 393 * Loop over the allocation groups, starting from the last
403 * inode returned; 0 means start of the allocation group. 394 * inode returned; 0 means start of the allocation group.
404 */ 395 */
405 rval = 0; 396 while (agno < mp->m_sb.sb_agcount) {
406 while (XFS_BULKSTAT_UBLEFT(ubleft) && agno < mp->m_sb.sb_agcount) { 397 struct xfs_inobt_rec_incore *irbp = irbuf;
407 cond_resched(); 398 struct xfs_inobt_rec_incore *irbufend = irbuf + nirbuf;
399 bool end_of_ag = false;
400 int icount = 0;
401 int stat;
402
408 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp); 403 error = xfs_ialloc_read_agi(mp, NULL, agno, &agbp);
409 if (error) 404 if (error)
410 break; 405 break;
@@ -414,10 +409,6 @@ xfs_bulkstat(
414 */ 409 */
415 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno, 410 cur = xfs_inobt_init_cursor(mp, NULL, agbp, agno,
416 XFS_BTNUM_INO); 411 XFS_BTNUM_INO);
417 irbp = irbuf;
418 irbufend = irbuf + nirbuf;
419 end_of_ag = 0;
420 icount = 0;
421 if (agino > 0) { 412 if (agino > 0) {
422 /* 413 /*
423 * In the middle of an allocation group, we need to get 414 * In the middle of an allocation group, we need to get
@@ -427,22 +418,23 @@ xfs_bulkstat(
427 418
428 error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r); 419 error = xfs_bulkstat_grab_ichunk(cur, agino, &icount, &r);
429 if (error) 420 if (error)
430 break; 421 goto del_cursor;
431 if (icount) { 422 if (icount) {
432 irbp->ir_startino = r.ir_startino; 423 irbp->ir_startino = r.ir_startino;
433 irbp->ir_freecount = r.ir_freecount; 424 irbp->ir_freecount = r.ir_freecount;
434 irbp->ir_free = r.ir_free; 425 irbp->ir_free = r.ir_free;
435 irbp++; 426 irbp++;
436 agino = r.ir_startino + XFS_INODES_PER_CHUNK;
437 } 427 }
438 /* Increment to the next record */ 428 /* Increment to the next record */
439 error = xfs_btree_increment(cur, 0, &tmp); 429 error = xfs_btree_increment(cur, 0, &stat);
440 } else { 430 } else {
441 /* Start of ag. Lookup the first inode chunk */ 431 /* Start of ag. Lookup the first inode chunk */
442 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &tmp); 432 error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &stat);
433 }
434 if (error || stat == 0) {
435 end_of_ag = true;
436 goto del_cursor;
443 } 437 }
444 if (error)
445 break;
446 438
447 /* 439 /*
448 * Loop through inode btree records in this ag, 440 * Loop through inode btree records in this ag,
@@ -451,10 +443,10 @@ xfs_bulkstat(
451 while (irbp < irbufend && icount < ubcount) { 443 while (irbp < irbufend && icount < ubcount) {
452 struct xfs_inobt_rec_incore r; 444 struct xfs_inobt_rec_incore r;
453 445
454 error = xfs_inobt_get_rec(cur, &r, &i); 446 error = xfs_inobt_get_rec(cur, &r, &stat);
455 if (error || i == 0) { 447 if (error || stat == 0) {
456 end_of_ag = 1; 448 end_of_ag = true;
457 break; 449 goto del_cursor;
458 } 450 }
459 451
460 /* 452 /*
@@ -469,77 +461,79 @@ xfs_bulkstat(
469 irbp++; 461 irbp++;
470 icount += XFS_INODES_PER_CHUNK - r.ir_freecount; 462 icount += XFS_INODES_PER_CHUNK - r.ir_freecount;
471 } 463 }
472 /* 464 error = xfs_btree_increment(cur, 0, &stat);
473 * Set agino to after this chunk and bump the cursor. 465 if (error || stat == 0) {
474 */ 466 end_of_ag = true;
475 agino = r.ir_startino + XFS_INODES_PER_CHUNK; 467 goto del_cursor;
476 error = xfs_btree_increment(cur, 0, &tmp); 468 }
477 cond_resched(); 469 cond_resched();
478 } 470 }
471
479 /* 472 /*
480 * Drop the btree buffers and the agi buffer. 473 * Drop the btree buffers and the agi buffer as we can't hold any
481 * We can't hold any of the locks these represent 474 * of the locks these represent when calling iget. If there is a
482 * when calling iget. 475 * pending error, then we are done.
483 */ 476 */
477del_cursor:
484 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); 478 xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR);
485 xfs_buf_relse(agbp); 479 xfs_buf_relse(agbp);
480 if (error)
481 break;
486 /* 482 /*
487 * Now format all the good inodes into the user's buffer. 483 * Now format all the good inodes into the user's buffer. The
484 * call to xfs_bulkstat_ag_ichunk() sets up the agino pointer
485 * for the next loop iteration.
488 */ 486 */
489 irbufend = irbp; 487 irbufend = irbp;
490 for (irbp = irbuf; 488 for (irbp = irbuf;
491 irbp < irbufend && XFS_BULKSTAT_UBLEFT(ubleft); irbp++) { 489 irbp < irbufend && ac.ac_ubleft >= statstruct_size;
492 struct xfs_bulkstat_agichunk ac; 490 irbp++) {
493
494 ac.ac_lastino = lastino;
495 ac.ac_ubuffer = &ubuffer;
496 ac.ac_ubleft = ubleft;
497 ac.ac_ubelem = ubelem;
498 error = xfs_bulkstat_ag_ichunk(mp, agno, irbp, 491 error = xfs_bulkstat_ag_ichunk(mp, agno, irbp,
499 formatter, statstruct_size, &ac); 492 formatter, statstruct_size, &ac,
493 &agino);
500 if (error) 494 if (error)
501 rval = error; 495 break;
502
503 lastino = ac.ac_lastino;
504 ubleft = ac.ac_ubleft;
505 ubelem = ac.ac_ubelem;
506 496
507 cond_resched(); 497 cond_resched();
508 } 498 }
499
509 /* 500 /*
510 * Set up for the next loop iteration. 501 * If we've run out of space or had a formatting error, we
502 * are now done
511 */ 503 */
512 if (XFS_BULKSTAT_UBLEFT(ubleft)) { 504 if (ac.ac_ubleft < statstruct_size || error)
513 if (end_of_ag) {
514 agno++;
515 agino = 0;
516 } else
517 agino = XFS_INO_TO_AGINO(mp, lastino);
518 } else
519 break; 505 break;
506
507 if (end_of_ag) {
508 agno++;
509 agino = 0;
510 }
520 } 511 }
521 /* 512 /*
522 * Done, we're either out of filesystem or space to put the data. 513 * Done, we're either out of filesystem or space to put the data.
523 */ 514 */
524 kmem_free(irbuf); 515 kmem_free(irbuf);
525 *ubcountp = ubelem; 516 *ubcountp = ac.ac_ubelem;
517
526 /* 518 /*
527 * Found some inodes, return them now and return the error next time. 519 * We found some inodes, so clear the error status and return them.
520 * The lastino pointer will point directly at the inode that triggered
521 * any error that occurred, so on the next call the error will be
522 * triggered again and propagated to userspace as there will be no
523 * formatted inodes in the buffer.
528 */ 524 */
529 if (ubelem) 525 if (ac.ac_ubelem)
530 rval = 0; 526 error = 0;
531 if (agno >= mp->m_sb.sb_agcount) { 527
532 /* 528 /*
533 * If we ran out of filesystem, mark lastino as off 529 * If we ran out of filesystem, lastino will point off the end of
534 * the end of the filesystem, so the next call 530 * the filesystem so the next call will return immediately.
535 * will return immediately. 531 */
536 */ 532 *lastinop = XFS_AGINO_TO_INO(mp, agno, agino);
537 *lastinop = (xfs_ino_t)XFS_AGINO_TO_INO(mp, agno, 0); 533 if (agno >= mp->m_sb.sb_agcount)
538 *done = 1; 534 *done = 1;
539 } else
540 *lastinop = (xfs_ino_t)lastino;
541 535
542 return rval; 536 return error;
543} 537}
544 538
545int 539int
diff --git a/fs/xfs/xfs_itable.h b/fs/xfs/xfs_itable.h
index aaed08022eb9..6ea8b3912fa4 100644
--- a/fs/xfs/xfs_itable.h
+++ b/fs/xfs/xfs_itable.h
@@ -30,22 +30,6 @@ typedef int (*bulkstat_one_pf)(struct xfs_mount *mp,
30 int *ubused, 30 int *ubused,
31 int *stat); 31 int *stat);
32 32
33struct xfs_bulkstat_agichunk {
34 xfs_ino_t ac_lastino; /* last inode returned */
35 char __user **ac_ubuffer;/* pointer into user's buffer */
36 int ac_ubleft; /* bytes left in user's buffer */
37 int ac_ubelem; /* spaces used in user's buffer */
38};
39
40int
41xfs_bulkstat_ag_ichunk(
42 struct xfs_mount *mp,
43 xfs_agnumber_t agno,
44 struct xfs_inobt_rec_incore *irbp,
45 bulkstat_one_pf formatter,
46 size_t statstruct_size,
47 struct xfs_bulkstat_agichunk *acp);
48
49/* 33/*
50 * Values for stat return value. 34 * Values for stat return value.
51 */ 35 */
diff --git a/include/acpi/acnames.h b/include/acpi/acnames.h
index f97804bdf1ff..7461327e14e4 100644
--- a/include/acpi/acnames.h
+++ b/include/acpi/acnames.h
@@ -52,6 +52,7 @@
52#define METHOD_NAME__CBA "_CBA" 52#define METHOD_NAME__CBA "_CBA"
53#define METHOD_NAME__CID "_CID" 53#define METHOD_NAME__CID "_CID"
54#define METHOD_NAME__CRS "_CRS" 54#define METHOD_NAME__CRS "_CRS"
55#define METHOD_NAME__DDN "_DDN"
55#define METHOD_NAME__HID "_HID" 56#define METHOD_NAME__HID "_HID"
56#define METHOD_NAME__INI "_INI" 57#define METHOD_NAME__INI "_INI"
57#define METHOD_NAME__PLD "_PLD" 58#define METHOD_NAME__PLD "_PLD"
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index 57ee0528aacb..f34a0835aa4f 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -433,6 +433,7 @@ int acpi_device_set_power(struct acpi_device *device, int state);
433int acpi_bus_init_power(struct acpi_device *device); 433int acpi_bus_init_power(struct acpi_device *device);
434int acpi_device_fix_up_power(struct acpi_device *device); 434int acpi_device_fix_up_power(struct acpi_device *device);
435int acpi_bus_update_power(acpi_handle handle, int *state_p); 435int acpi_bus_update_power(acpi_handle handle, int *state_p);
436int acpi_device_update_power(struct acpi_device *device, int *state_p);
436bool acpi_bus_power_manageable(acpi_handle handle); 437bool acpi_bus_power_manageable(acpi_handle handle);
437 438
438#ifdef CONFIG_PM 439#ifdef CONFIG_PM
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 9fc1d71c82bc..ab2acf629a64 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
46 46
47/* Current ACPICA subsystem version in YYYYMMDD format */ 47/* Current ACPICA subsystem version in YYYYMMDD format */
48 48
49#define ACPI_CA_VERSION 0x20140828 49#define ACPI_CA_VERSION 0x20140926
50 50
51#include <acpi/acconfig.h> 51#include <acpi/acconfig.h>
52#include <acpi/actypes.h> 52#include <acpi/actypes.h>
diff --git a/include/acpi/actypes.h b/include/acpi/actypes.h
index ac03ec81d342..7000e66f768e 100644
--- a/include/acpi/actypes.h
+++ b/include/acpi/actypes.h
@@ -721,7 +721,7 @@ typedef u32 acpi_event_type;
721 * | | | +--- Enabled for wake? 721 * | | | +--- Enabled for wake?
722 * | | +----- Set? 722 * | | +----- Set?
723 * | +------- Has a handler? 723 * | +------- Has a handler?
724 * +----------- <Reserved> 724 * +------------- <Reserved>
725 */ 725 */
726typedef u32 acpi_event_status; 726typedef u32 acpi_event_status;
727 727
@@ -729,7 +729,7 @@ typedef u32 acpi_event_status;
729#define ACPI_EVENT_FLAG_ENABLED (acpi_event_status) 0x01 729#define ACPI_EVENT_FLAG_ENABLED (acpi_event_status) 0x01
730#define ACPI_EVENT_FLAG_WAKE_ENABLED (acpi_event_status) 0x02 730#define ACPI_EVENT_FLAG_WAKE_ENABLED (acpi_event_status) 0x02
731#define ACPI_EVENT_FLAG_SET (acpi_event_status) 0x04 731#define ACPI_EVENT_FLAG_SET (acpi_event_status) 0x04
732#define ACPI_EVENT_FLAG_HANDLE (acpi_event_status) 0x08 732#define ACPI_EVENT_FLAG_HAS_HANDLER (acpi_event_status) 0x08
733 733
734/* Actions for acpi_set_gpe, acpi_gpe_wakeup, acpi_hw_low_set_gpe */ 734/* Actions for acpi_set_gpe, acpi_gpe_wakeup, acpi_hw_low_set_gpe */
735 735
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index e973540cd15b..2dd405c9be78 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -74,7 +74,6 @@
74 {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ 74 {0x1002, 0x4C64, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
75 {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ 75 {0x1002, 0x4C66, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
76 {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \ 76 {0x1002, 0x4C67, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV250|RADEON_IS_MOBILITY}, \
77 {0x1002, 0x4C6E, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV280|RADEON_IS_MOBILITY}, \
78 {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ 77 {0x1002, 0x4E44, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
79 {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ 78 {0x1002, 0x4E45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
80 {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \ 79 {0x1002, 0x4E46, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_R300}, \
diff --git a/include/dt-bindings/clock/imx6qdl-clock.h b/include/dt-bindings/clock/imx6qdl-clock.h
index ddaef8620b2c..b690cdba163b 100644
--- a/include/dt-bindings/clock/imx6qdl-clock.h
+++ b/include/dt-bindings/clock/imx6qdl-clock.h
@@ -62,8 +62,8 @@
62#define IMX6QDL_CLK_USDHC3_SEL 50 62#define IMX6QDL_CLK_USDHC3_SEL 50
63#define IMX6QDL_CLK_USDHC4_SEL 51 63#define IMX6QDL_CLK_USDHC4_SEL 51
64#define IMX6QDL_CLK_ENFC_SEL 52 64#define IMX6QDL_CLK_ENFC_SEL 52
65#define IMX6QDL_CLK_EMI_SEL 53 65#define IMX6QDL_CLK_EIM_SEL 53
66#define IMX6QDL_CLK_EMI_SLOW_SEL 54 66#define IMX6QDL_CLK_EIM_SLOW_SEL 54
67#define IMX6QDL_CLK_VDO_AXI_SEL 55 67#define IMX6QDL_CLK_VDO_AXI_SEL 55
68#define IMX6QDL_CLK_VPU_AXI_SEL 56 68#define IMX6QDL_CLK_VPU_AXI_SEL 56
69#define IMX6QDL_CLK_CKO1_SEL 57 69#define IMX6QDL_CLK_CKO1_SEL 57
@@ -106,8 +106,8 @@
106#define IMX6QDL_CLK_USDHC4_PODF 94 106#define IMX6QDL_CLK_USDHC4_PODF 94
107#define IMX6QDL_CLK_ENFC_PRED 95 107#define IMX6QDL_CLK_ENFC_PRED 95
108#define IMX6QDL_CLK_ENFC_PODF 96 108#define IMX6QDL_CLK_ENFC_PODF 96
109#define IMX6QDL_CLK_EMI_PODF 97 109#define IMX6QDL_CLK_EIM_PODF 97
110#define IMX6QDL_CLK_EMI_SLOW_PODF 98 110#define IMX6QDL_CLK_EIM_SLOW_PODF 98
111#define IMX6QDL_CLK_VPU_AXI_PODF 99 111#define IMX6QDL_CLK_VPU_AXI_PODF 99
112#define IMX6QDL_CLK_CKO1_PODF 100 112#define IMX6QDL_CLK_CKO1_PODF 100
113#define IMX6QDL_CLK_AXI 101 113#define IMX6QDL_CLK_AXI 101
diff --git a/include/dt-bindings/clock/qcom,mmcc-apq8084.h b/include/dt-bindings/clock/qcom,mmcc-apq8084.h
index a929f86d0ddd..d72b5b35f15e 100644
--- a/include/dt-bindings/clock/qcom,mmcc-apq8084.h
+++ b/include/dt-bindings/clock/qcom,mmcc-apq8084.h
@@ -60,7 +60,7 @@
60#define ESC1_CLK_SRC 43 60#define ESC1_CLK_SRC 43
61#define HDMI_CLK_SRC 44 61#define HDMI_CLK_SRC 44
62#define VSYNC_CLK_SRC 45 62#define VSYNC_CLK_SRC 45
63#define RBCPR_CLK_SRC 46 63#define MMSS_RBCPR_CLK_SRC 46
64#define RBBMTIMER_CLK_SRC 47 64#define RBBMTIMER_CLK_SRC 47
65#define MAPLE_CLK_SRC 48 65#define MAPLE_CLK_SRC 48
66#define VDP_CLK_SRC 49 66#define VDP_CLK_SRC 49
diff --git a/include/dt-bindings/clock/vf610-clock.h b/include/dt-bindings/clock/vf610-clock.h
index d6b56b21539b..801c0ac50c47 100644
--- a/include/dt-bindings/clock/vf610-clock.h
+++ b/include/dt-bindings/clock/vf610-clock.h
@@ -21,24 +21,24 @@
21#define VF610_CLK_FASK_CLK_SEL 8 21#define VF610_CLK_FASK_CLK_SEL 8
22#define VF610_CLK_AUDIO_EXT 9 22#define VF610_CLK_AUDIO_EXT 9
23#define VF610_CLK_ENET_EXT 10 23#define VF610_CLK_ENET_EXT 10
24#define VF610_CLK_PLL1_MAIN 11 24#define VF610_CLK_PLL1_SYS 11
25#define VF610_CLK_PLL1_PFD1 12 25#define VF610_CLK_PLL1_PFD1 12
26#define VF610_CLK_PLL1_PFD2 13 26#define VF610_CLK_PLL1_PFD2 13
27#define VF610_CLK_PLL1_PFD3 14 27#define VF610_CLK_PLL1_PFD3 14
28#define VF610_CLK_PLL1_PFD4 15 28#define VF610_CLK_PLL1_PFD4 15
29#define VF610_CLK_PLL2_MAIN 16 29#define VF610_CLK_PLL2_BUS 16
30#define VF610_CLK_PLL2_PFD1 17 30#define VF610_CLK_PLL2_PFD1 17
31#define VF610_CLK_PLL2_PFD2 18 31#define VF610_CLK_PLL2_PFD2 18
32#define VF610_CLK_PLL2_PFD3 19 32#define VF610_CLK_PLL2_PFD3 19
33#define VF610_CLK_PLL2_PFD4 20 33#define VF610_CLK_PLL2_PFD4 20
34#define VF610_CLK_PLL3_MAIN 21 34#define VF610_CLK_PLL3_USB_OTG 21
35#define VF610_CLK_PLL3_PFD1 22 35#define VF610_CLK_PLL3_PFD1 22
36#define VF610_CLK_PLL3_PFD2 23 36#define VF610_CLK_PLL3_PFD2 23
37#define VF610_CLK_PLL3_PFD3 24 37#define VF610_CLK_PLL3_PFD3 24
38#define VF610_CLK_PLL3_PFD4 25 38#define VF610_CLK_PLL3_PFD4 25
39#define VF610_CLK_PLL4_MAIN 26 39#define VF610_CLK_PLL4_AUDIO 26
40#define VF610_CLK_PLL5_MAIN 27 40#define VF610_CLK_PLL5_ENET 27
41#define VF610_CLK_PLL6_MAIN 28 41#define VF610_CLK_PLL6_VIDEO 28
42#define VF610_CLK_PLL3_MAIN_DIV 29 42#define VF610_CLK_PLL3_MAIN_DIV 29
43#define VF610_CLK_PLL4_MAIN_DIV 30 43#define VF610_CLK_PLL4_MAIN_DIV 30
44#define VF610_CLK_PLL6_MAIN_DIV 31 44#define VF610_CLK_PLL6_MAIN_DIV 31
@@ -166,9 +166,32 @@
166#define VF610_CLK_DMAMUX3 153 166#define VF610_CLK_DMAMUX3 153
167#define VF610_CLK_FLEXCAN0_EN 154 167#define VF610_CLK_FLEXCAN0_EN 154
168#define VF610_CLK_FLEXCAN1_EN 155 168#define VF610_CLK_FLEXCAN1_EN 155
169#define VF610_CLK_PLL7_MAIN 156 169#define VF610_CLK_PLL7_USB_HOST 156
170#define VF610_CLK_USBPHY0 157 170#define VF610_CLK_USBPHY0 157
171#define VF610_CLK_USBPHY1 158 171#define VF610_CLK_USBPHY1 158
172#define VF610_CLK_END 159 172#define VF610_CLK_LVDS1_IN 159
173#define VF610_CLK_ANACLK1 160
174#define VF610_CLK_PLL1_BYPASS_SRC 161
175#define VF610_CLK_PLL2_BYPASS_SRC 162
176#define VF610_CLK_PLL3_BYPASS_SRC 163
177#define VF610_CLK_PLL4_BYPASS_SRC 164
178#define VF610_CLK_PLL5_BYPASS_SRC 165
179#define VF610_CLK_PLL6_BYPASS_SRC 166
180#define VF610_CLK_PLL7_BYPASS_SRC 167
181#define VF610_CLK_PLL1 168
182#define VF610_CLK_PLL2 169
183#define VF610_CLK_PLL3 170
184#define VF610_CLK_PLL4 171
185#define VF610_CLK_PLL5 172
186#define VF610_CLK_PLL6 173
187#define VF610_CLK_PLL7 174
188#define VF610_PLL1_BYPASS 175
189#define VF610_PLL2_BYPASS 176
190#define VF610_PLL3_BYPASS 177
191#define VF610_PLL4_BYPASS 178
192#define VF610_PLL5_BYPASS 179
193#define VF610_PLL6_BYPASS 180
194#define VF610_PLL7_BYPASS 181
195#define VF610_CLK_END 182
173 196
174#endif /* __DT_BINDINGS_CLOCK_VF610_H */ 197#endif /* __DT_BINDINGS_CLOCK_VF610_H */
diff --git a/include/dt-bindings/pinctrl/dra.h b/include/dt-bindings/pinctrl/dra.h
index 3d33794e4f3e..7448edff4723 100644
--- a/include/dt-bindings/pinctrl/dra.h
+++ b/include/dt-bindings/pinctrl/dra.h
@@ -40,8 +40,8 @@
40 40
41/* Active pin states */ 41/* Active pin states */
42#define PIN_OUTPUT (0 | PULL_DIS) 42#define PIN_OUTPUT (0 | PULL_DIS)
43#define PIN_OUTPUT_PULLUP (PIN_OUTPUT | PULL_ENA | PULL_UP) 43#define PIN_OUTPUT_PULLUP (PULL_UP)
44#define PIN_OUTPUT_PULLDOWN (PIN_OUTPUT | PULL_ENA) 44#define PIN_OUTPUT_PULLDOWN (0)
45#define PIN_INPUT (INPUT_EN | PULL_DIS) 45#define PIN_INPUT (INPUT_EN | PULL_DIS)
46#define PIN_INPUT_SLEW (INPUT_EN | SLEWCONTROL) 46#define PIN_INPUT_SLEW (INPUT_EN | SLEWCONTROL)
47#define PIN_INPUT_PULLUP (PULL_ENA | INPUT_EN | PULL_UP) 47#define PIN_INPUT_PULLUP (PULL_ENA | INPUT_EN | PULL_UP)
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index b7926bb9b444..407a12f663eb 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -432,6 +432,7 @@ static inline bool acpi_driver_match_device(struct device *dev,
432int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *); 432int acpi_device_uevent_modalias(struct device *, struct kobj_uevent_env *);
433int acpi_device_modalias(struct device *, char *, int); 433int acpi_device_modalias(struct device *, char *, int);
434 434
435struct platform_device *acpi_create_platform_device(struct acpi_device *);
435#define ACPI_PTR(_ptr) (_ptr) 436#define ACPI_PTR(_ptr) (_ptr)
436 437
437#else /* !CONFIG_ACPI */ 438#else /* !CONFIG_ACPI */
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 36dffeccebdb..e58fe7df8b9c 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -90,7 +90,7 @@ extern unsigned compat_dir_class[];
90extern unsigned compat_chattr_class[]; 90extern unsigned compat_chattr_class[];
91extern unsigned compat_signal_class[]; 91extern unsigned compat_signal_class[];
92 92
93extern int __weak audit_classify_compat_syscall(int abi, unsigned syscall); 93extern int audit_classify_compat_syscall(int abi, unsigned syscall);
94 94
95/* audit_names->type values */ 95/* audit_names->type values */
96#define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */ 96#define AUDIT_TYPE_UNKNOWN 0 /* we don't know yet */
diff --git a/include/linux/bitops.h b/include/linux/bitops.h
index be5fd38bd5a0..5d858e02997f 100644
--- a/include/linux/bitops.h
+++ b/include/linux/bitops.h
@@ -18,8 +18,11 @@
18 * position @h. For example 18 * position @h. For example
19 * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000. 19 * GENMASK_ULL(39, 21) gives us the 64bit vector 0x000000ffffe00000.
20 */ 20 */
21#define GENMASK(h, l) (((U32_C(1) << ((h) - (l) + 1)) - 1) << (l)) 21#define GENMASK(h, l) \
22#define GENMASK_ULL(h, l) (((U64_C(1) << ((h) - (l) + 1)) - 1) << (l)) 22 (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h))))
23
24#define GENMASK_ULL(h, l) \
25 (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h))))
23 26
24extern unsigned int __sw_hweight8(unsigned int w); 27extern unsigned int __sw_hweight8(unsigned int w);
25extern unsigned int __sw_hweight16(unsigned int w); 28extern unsigned int __sw_hweight16(unsigned int w);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 0207a78a8d82..aac0f9ea952a 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1136,8 +1136,7 @@ static inline bool blk_needs_flush_plug(struct task_struct *tsk)
1136/* 1136/*
1137 * tag stuff 1137 * tag stuff
1138 */ 1138 */
1139#define blk_rq_tagged(rq) \ 1139#define blk_rq_tagged(rq) ((rq)->cmd_flags & REQ_QUEUED)
1140 ((rq)->mq_ctx || ((rq)->cmd_flags & REQ_QUEUED))
1141extern int blk_queue_start_tag(struct request_queue *, struct request *); 1140extern int blk_queue_start_tag(struct request_queue *, struct request *);
1142extern struct request *blk_queue_find_tag(struct request_queue *, int); 1141extern struct request *blk_queue_find_tag(struct request_queue *, int);
1143extern void blk_queue_end_tag(struct request_queue *, struct request *); 1142extern void blk_queue_end_tag(struct request_queue *, struct request *);
@@ -1583,13 +1582,13 @@ static inline bool blk_integrity_merge_rq(struct request_queue *rq,
1583 struct request *r1, 1582 struct request *r1,
1584 struct request *r2) 1583 struct request *r2)
1585{ 1584{
1586 return 0; 1585 return true;
1587} 1586}
1588static inline bool blk_integrity_merge_bio(struct request_queue *rq, 1587static inline bool blk_integrity_merge_bio(struct request_queue *rq,
1589 struct request *r, 1588 struct request *r,
1590 struct bio *b) 1589 struct bio *b)
1591{ 1590{
1592 return 0; 1591 return true;
1593} 1592}
1594static inline bool blk_integrity_is_initialized(struct gendisk *g) 1593static inline bool blk_integrity_is_initialized(struct gendisk *g)
1595{ 1594{
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 4e2bd4c95b66..0995c2de8162 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -46,6 +46,7 @@ extern unsigned long init_bootmem_node(pg_data_t *pgdat,
46extern unsigned long init_bootmem(unsigned long addr, unsigned long memend); 46extern unsigned long init_bootmem(unsigned long addr, unsigned long memend);
47 47
48extern unsigned long free_all_bootmem(void); 48extern unsigned long free_all_bootmem(void);
49extern void reset_node_managed_pages(pg_data_t *pgdat);
49extern void reset_all_zones_managed_pages(void); 50extern void reset_all_zones_managed_pages(void);
50 51
51extern void free_bootmem_node(pg_data_t *pgdat, 52extern void free_bootmem_node(pg_data_t *pgdat,
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 324329ceea1e..73b45225a7ca 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -175,12 +175,13 @@ void __wait_on_buffer(struct buffer_head *);
175wait_queue_head_t *bh_waitq_head(struct buffer_head *bh); 175wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
176struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block, 176struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
177 unsigned size); 177 unsigned size);
178struct buffer_head *__getblk(struct block_device *bdev, sector_t block, 178struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
179 unsigned size); 179 unsigned size, gfp_t gfp);
180void __brelse(struct buffer_head *); 180void __brelse(struct buffer_head *);
181void __bforget(struct buffer_head *); 181void __bforget(struct buffer_head *);
182void __breadahead(struct block_device *, sector_t block, unsigned int size); 182void __breadahead(struct block_device *, sector_t block, unsigned int size);
183struct buffer_head *__bread(struct block_device *, sector_t block, unsigned size); 183struct buffer_head *__bread_gfp(struct block_device *,
184 sector_t block, unsigned size, gfp_t gfp);
184void invalidate_bh_lrus(void); 185void invalidate_bh_lrus(void);
185struct buffer_head *alloc_buffer_head(gfp_t gfp_flags); 186struct buffer_head *alloc_buffer_head(gfp_t gfp_flags);
186void free_buffer_head(struct buffer_head * bh); 187void free_buffer_head(struct buffer_head * bh);
@@ -295,7 +296,13 @@ static inline void bforget(struct buffer_head *bh)
295static inline struct buffer_head * 296static inline struct buffer_head *
296sb_bread(struct super_block *sb, sector_t block) 297sb_bread(struct super_block *sb, sector_t block)
297{ 298{
298 return __bread(sb->s_bdev, block, sb->s_blocksize); 299 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
300}
301
302static inline struct buffer_head *
303sb_bread_unmovable(struct super_block *sb, sector_t block)
304{
305 return __bread_gfp(sb->s_bdev, block, sb->s_blocksize, 0);
299} 306}
300 307
301static inline void 308static inline void
@@ -307,7 +314,7 @@ sb_breadahead(struct super_block *sb, sector_t block)
307static inline struct buffer_head * 314static inline struct buffer_head *
308sb_getblk(struct super_block *sb, sector_t block) 315sb_getblk(struct super_block *sb, sector_t block)
309{ 316{
310 return __getblk(sb->s_bdev, block, sb->s_blocksize); 317 return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
311} 318}
312 319
313static inline struct buffer_head * 320static inline struct buffer_head *
@@ -344,6 +351,36 @@ static inline void lock_buffer(struct buffer_head *bh)
344 __lock_buffer(bh); 351 __lock_buffer(bh);
345} 352}
346 353
354static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
355 sector_t block,
356 unsigned size)
357{
358 return __getblk_gfp(bdev, block, size, 0);
359}
360
361static inline struct buffer_head *__getblk(struct block_device *bdev,
362 sector_t block,
363 unsigned size)
364{
365 return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
366}
367
368/**
369 * __bread() - reads a specified block and returns the bh
370 * @bdev: the block_device to read from
371 * @block: number of block
372 * @size: size (in bytes) to read
373 *
374 * Reads a specified block, and returns buffer head that contains it.
375 * The page cache is allocated from movable area so that it can be migrated.
376 * It returns NULL if the block was unreadable.
377 */
378static inline struct buffer_head *
379__bread(struct block_device *bdev, sector_t block, unsigned size)
380{
381 return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
382}
383
347extern int __set_page_dirty_buffers(struct page *page); 384extern int __set_page_dirty_buffers(struct page *page);
348 385
349#else /* CONFIG_BLOCK */ 386#else /* CONFIG_BLOCK */
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 6992afc6ba7f..b37ea95bc348 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -99,6 +99,12 @@ inval_skb:
99 return 1; 99 return 1;
100} 100}
101 101
102static inline bool can_is_canfd_skb(const struct sk_buff *skb)
103{
104 /* the CAN specific type of skb is identified by its data length */
105 return skb->len == CANFD_MTU;
106}
107
102/* get data length from can_dlc with sanitized can_dlc */ 108/* get data length from can_dlc with sanitized can_dlc */
103u8 can_dlc2len(u8 can_dlc); 109u8 can_dlc2len(u8 can_dlc);
104 110
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index be21af149f11..2839c639f092 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -352,7 +352,6 @@ struct clk_divider {
352#define CLK_DIVIDER_READ_ONLY BIT(5) 352#define CLK_DIVIDER_READ_ONLY BIT(5)
353 353
354extern const struct clk_ops clk_divider_ops; 354extern const struct clk_ops clk_divider_ops;
355extern const struct clk_ops clk_divider_ro_ops;
356struct clk *clk_register_divider(struct device *dev, const char *name, 355struct clk *clk_register_divider(struct device *dev, const char *name,
357 const char *parent_name, unsigned long flags, 356 const char *parent_name, unsigned long flags,
358 void __iomem *reg, u8 shift, u8 width, 357 void __iomem *reg, u8 shift, u8 width,
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index 653f0e2b6ca9..abcafaa20b86 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -287,7 +287,7 @@ extern struct clocksource* clocksource_get_next(void);
287extern void clocksource_change_rating(struct clocksource *cs, int rating); 287extern void clocksource_change_rating(struct clocksource *cs, int rating);
288extern void clocksource_suspend(void); 288extern void clocksource_suspend(void);
289extern void clocksource_resume(void); 289extern void clocksource_resume(void);
290extern struct clocksource * __init __weak clocksource_default_clock(void); 290extern struct clocksource * __init clocksource_default_clock(void);
291extern void clocksource_mark_unstable(struct clocksource *cs); 291extern void clocksource_mark_unstable(struct clocksource *cs);
292 292
293extern u64 293extern u64
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 0430ed05d3b9..a93438beb33c 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -18,12 +18,12 @@ struct cma;
18extern phys_addr_t cma_get_base(struct cma *cma); 18extern phys_addr_t cma_get_base(struct cma *cma);
19extern unsigned long cma_get_size(struct cma *cma); 19extern unsigned long cma_get_size(struct cma *cma);
20 20
21extern int __init cma_declare_contiguous(phys_addr_t size, 21extern int __init cma_declare_contiguous(phys_addr_t base,
22 phys_addr_t base, phys_addr_t limit, 22 phys_addr_t size, phys_addr_t limit,
23 phys_addr_t alignment, unsigned int order_per_bit, 23 phys_addr_t alignment, unsigned int order_per_bit,
24 bool fixed, struct cma **res_cma); 24 bool fixed, struct cma **res_cma);
25extern int cma_init_reserved_mem(phys_addr_t size, 25extern int cma_init_reserved_mem(phys_addr_t base,
26 phys_addr_t base, int order_per_bit, 26 phys_addr_t size, int order_per_bit,
27 struct cma **res_cma); 27 struct cma **res_cma);
28extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align); 28extern struct page *cma_alloc(struct cma *cma, int count, unsigned int align);
29extern bool cma_release(struct cma *cma, struct page *pages, int count); 29extern bool cma_release(struct cma *cma, struct page *pages, int count);
diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
index 2507fd2a1eb4..d1a558239b1a 100644
--- a/include/linux/compiler-gcc4.h
+++ b/include/linux/compiler-gcc4.h
@@ -71,7 +71,6 @@
71 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 71 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
72 * 72 *
73 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. 73 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
74 * Fixed in GCC 4.8.2 and later versions.
75 * 74 *
76 * (asm goto is automatically volatile - the naming reflects this.) 75 * (asm goto is automatically volatile - the naming reflects this.)
77 */ 76 */
diff --git a/include/linux/compiler-gcc5.h b/include/linux/compiler-gcc5.h
index cdd1cc202d51..c8c565952548 100644
--- a/include/linux/compiler-gcc5.h
+++ b/include/linux/compiler-gcc5.h
@@ -53,7 +53,6 @@
53 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670 53 * http://gcc.gnu.org/bugzilla/show_bug.cgi?id=58670
54 * 54 *
55 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek. 55 * Work it around via a compiler barrier quirk suggested by Jakub Jelinek.
56 * Fixed in GCC 4.8.2 and later versions.
57 * 56 *
58 * (asm goto is automatically volatile - the naming reflects this.) 57 * (asm goto is automatically volatile - the naming reflects this.)
59 */ 58 */
diff --git a/include/linux/cpufreq-dt.h b/include/linux/cpufreq-dt.h
new file mode 100644
index 000000000000..0414009e2c30
--- /dev/null
+++ b/include/linux/cpufreq-dt.h
@@ -0,0 +1,22 @@
1/*
2 * Copyright (C) 2014 Marvell
3 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifndef __CPUFREQ_DT_H__
11#define __CPUFREQ_DT_H__
12
13struct cpufreq_dt_platform_data {
14 /*
15 * True when each CPU has its own clock to control its
16 * frequency, false when all CPUs are controlled by a single
17 * clock.
18 */
19 bool independent_clocks;
20};
21
22#endif /* __CPUFREQ_DT_H__ */
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 138336b6bb04..503b085b7832 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -219,6 +219,7 @@ __ATTR(_name, 0644, show_##_name, store_##_name)
219struct cpufreq_driver { 219struct cpufreq_driver {
220 char name[CPUFREQ_NAME_LEN]; 220 char name[CPUFREQ_NAME_LEN];
221 u8 flags; 221 u8 flags;
222 void *driver_data;
222 223
223 /* needed by all drivers */ 224 /* needed by all drivers */
224 int (*init) (struct cpufreq_policy *policy); 225 int (*init) (struct cpufreq_policy *policy);
@@ -312,6 +313,7 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data);
312int cpufreq_unregister_driver(struct cpufreq_driver *driver_data); 313int cpufreq_unregister_driver(struct cpufreq_driver *driver_data);
313 314
314const char *cpufreq_get_current_driver(void); 315const char *cpufreq_get_current_driver(void);
316void *cpufreq_get_driver_data(void);
315 317
316static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy, 318static inline void cpufreq_verify_within_limits(struct cpufreq_policy *policy,
317 unsigned int min, unsigned int max) 319 unsigned int min, unsigned int max)
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 72ab536ad3de..3849fce7ecfe 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -14,14 +14,13 @@
14extern unsigned long long elfcorehdr_addr; 14extern unsigned long long elfcorehdr_addr;
15extern unsigned long long elfcorehdr_size; 15extern unsigned long long elfcorehdr_size;
16 16
17extern int __weak elfcorehdr_alloc(unsigned long long *addr, 17extern int elfcorehdr_alloc(unsigned long long *addr, unsigned long long *size);
18 unsigned long long *size); 18extern void elfcorehdr_free(unsigned long long addr);
19extern void __weak elfcorehdr_free(unsigned long long addr); 19extern ssize_t elfcorehdr_read(char *buf, size_t count, u64 *ppos);
20extern ssize_t __weak elfcorehdr_read(char *buf, size_t count, u64 *ppos); 20extern ssize_t elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos);
21extern ssize_t __weak elfcorehdr_read_notes(char *buf, size_t count, u64 *ppos); 21extern int remap_oldmem_pfn_range(struct vm_area_struct *vma,
22extern int __weak remap_oldmem_pfn_range(struct vm_area_struct *vma, 22 unsigned long from, unsigned long pfn,
23 unsigned long from, unsigned long pfn, 23 unsigned long size, pgprot_t prot);
24 unsigned long size, pgprot_t prot);
25 24
26extern ssize_t copy_oldmem_page(unsigned long, char *, size_t, 25extern ssize_t copy_oldmem_page(unsigned long, char *, size_t,
27 unsigned long, int); 26 unsigned long, int);
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 45cb4ffdea62..0949f9c7e872 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -92,6 +92,7 @@ typedef struct {
92#define EFI_MEMORY_WC ((u64)0x0000000000000002ULL) /* write-coalescing */ 92#define EFI_MEMORY_WC ((u64)0x0000000000000002ULL) /* write-coalescing */
93#define EFI_MEMORY_WT ((u64)0x0000000000000004ULL) /* write-through */ 93#define EFI_MEMORY_WT ((u64)0x0000000000000004ULL) /* write-through */
94#define EFI_MEMORY_WB ((u64)0x0000000000000008ULL) /* write-back */ 94#define EFI_MEMORY_WB ((u64)0x0000000000000008ULL) /* write-back */
95#define EFI_MEMORY_UCE ((u64)0x0000000000000010ULL) /* uncached, exported */
95#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */ 96#define EFI_MEMORY_WP ((u64)0x0000000000001000ULL) /* write-protect */
96#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */ 97#define EFI_MEMORY_RP ((u64)0x0000000000002000ULL) /* read-protect */
97#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */ 98#define EFI_MEMORY_XP ((u64)0x0000000000004000ULL) /* execute-protect */
@@ -502,6 +503,10 @@ typedef efi_status_t efi_get_next_variable_t (unsigned long *name_size, efi_char
502typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor, 503typedef efi_status_t efi_set_variable_t (efi_char16_t *name, efi_guid_t *vendor,
503 u32 attr, unsigned long data_size, 504 u32 attr, unsigned long data_size,
504 void *data); 505 void *data);
506typedef efi_status_t
507efi_set_variable_nonblocking_t(efi_char16_t *name, efi_guid_t *vendor,
508 u32 attr, unsigned long data_size, void *data);
509
505typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count); 510typedef efi_status_t efi_get_next_high_mono_count_t (u32 *count);
506typedef void efi_reset_system_t (int reset_type, efi_status_t status, 511typedef void efi_reset_system_t (int reset_type, efi_status_t status,
507 unsigned long data_size, efi_char16_t *data); 512 unsigned long data_size, efi_char16_t *data);
@@ -821,6 +826,7 @@ extern struct efi {
821 efi_get_variable_t *get_variable; 826 efi_get_variable_t *get_variable;
822 efi_get_next_variable_t *get_next_variable; 827 efi_get_next_variable_t *get_next_variable;
823 efi_set_variable_t *set_variable; 828 efi_set_variable_t *set_variable;
829 efi_set_variable_nonblocking_t *set_variable_nonblocking;
824 efi_query_variable_info_t *query_variable_info; 830 efi_query_variable_info_t *query_variable_info;
825 efi_update_capsule_t *update_capsule; 831 efi_update_capsule_t *update_capsule;
826 efi_query_capsule_caps_t *query_capsule_caps; 832 efi_query_capsule_caps_t *query_capsule_caps;
@@ -886,6 +892,13 @@ extern bool efi_poweroff_required(void);
886 (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \ 892 (md) <= (efi_memory_desc_t *)((m)->map_end - (m)->desc_size); \
887 (md) = (void *)(md) + (m)->desc_size) 893 (md) = (void *)(md) + (m)->desc_size)
888 894
895/*
896 * Format an EFI memory descriptor's type and attributes to a user-provided
897 * character buffer, as per snprintf(), and return the buffer.
898 */
899char * __init efi_md_typeattr_format(char *buf, size_t size,
900 const efi_memory_desc_t *md);
901
889/** 902/**
890 * efi_range_is_wc - check the WC bit on an address range 903 * efi_range_is_wc - check the WC bit on an address range
891 * @start: starting kvirt address 904 * @start: starting kvirt address
@@ -1034,6 +1047,7 @@ struct efivar_operations {
1034 efi_get_variable_t *get_variable; 1047 efi_get_variable_t *get_variable;
1035 efi_get_next_variable_t *get_next_variable; 1048 efi_get_next_variable_t *get_next_variable;
1036 efi_set_variable_t *set_variable; 1049 efi_set_variable_t *set_variable;
1050 efi_set_variable_nonblocking_t *set_variable_nonblocking;
1037 efi_query_variable_store_t *query_variable_store; 1051 efi_query_variable_store_t *query_variable_store;
1038}; 1052};
1039 1053
@@ -1227,4 +1241,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
1227 unsigned long *load_addr, 1241 unsigned long *load_addr,
1228 unsigned long *load_size); 1242 unsigned long *load_size);
1229 1243
1244efi_status_t efi_parse_options(char *cmdline);
1245
1246bool efi_runtime_disabled(void);
1230#endif /* _LINUX_EFI_H */ 1247#endif /* _LINUX_EFI_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index a957d4366c24..9ab779e8a63c 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -223,6 +223,13 @@ typedef void (dio_iodone_t)(struct kiocb *iocb, loff_t offset,
223#define ATTR_TIMES_SET (1 << 16) 223#define ATTR_TIMES_SET (1 << 16)
224 224
225/* 225/*
226 * Whiteout is represented by a char device. The following constants define the
227 * mode and device number to use.
228 */
229#define WHITEOUT_MODE 0
230#define WHITEOUT_DEV 0
231
232/*
226 * This is the Inode Attributes structure, used for notify_change(). It 233 * This is the Inode Attributes structure, used for notify_change(). It
227 * uses the above definitions as flags, to know which values have changed. 234 * uses the above definitions as flags, to know which values have changed.
228 * Also, in this manner, a Filesystem can look at only the values it cares 235 * Also, in this manner, a Filesystem can look at only the values it cares
@@ -254,6 +261,12 @@ struct iattr {
254 */ 261 */
255#include <linux/quota.h> 262#include <linux/quota.h>
256 263
264/*
265 * Maximum number of layers of fs stack. Needs to be limited to
266 * prevent kernel stack overflow
267 */
268#define FILESYSTEM_MAX_STACK_DEPTH 2
269
257/** 270/**
258 * enum positive_aop_returns - aop return codes with specific semantics 271 * enum positive_aop_returns - aop return codes with specific semantics
259 * 272 *
@@ -626,11 +639,13 @@ static inline int inode_unhashed(struct inode *inode)
626 * 2: child/target 639 * 2: child/target
627 * 3: xattr 640 * 3: xattr
628 * 4: second non-directory 641 * 4: second non-directory
629 * The last is for certain operations (such as rename) which lock two 642 * 5: second parent (when locking independent directories in rename)
643 *
644 * I_MUTEX_NONDIR2 is for certain operations (such as rename) which lock two
630 * non-directories at once. 645 * non-directories at once.
631 * 646 *
632 * The locking order between these classes is 647 * The locking order between these classes is
633 * parent -> child -> normal -> xattr -> second non-directory 648 * parent[2] -> child -> grandchild -> normal -> xattr -> second non-directory
634 */ 649 */
635enum inode_i_mutex_lock_class 650enum inode_i_mutex_lock_class
636{ 651{
@@ -638,7 +653,8 @@ enum inode_i_mutex_lock_class
638 I_MUTEX_PARENT, 653 I_MUTEX_PARENT,
639 I_MUTEX_CHILD, 654 I_MUTEX_CHILD,
640 I_MUTEX_XATTR, 655 I_MUTEX_XATTR,
641 I_MUTEX_NONDIR2 656 I_MUTEX_NONDIR2,
657 I_MUTEX_PARENT2,
642}; 658};
643 659
644void lock_two_nondirectories(struct inode *, struct inode*); 660void lock_two_nondirectories(struct inode *, struct inode*);
@@ -1266,6 +1282,11 @@ struct super_block {
1266 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp; 1282 struct list_lru s_dentry_lru ____cacheline_aligned_in_smp;
1267 struct list_lru s_inode_lru ____cacheline_aligned_in_smp; 1283 struct list_lru s_inode_lru ____cacheline_aligned_in_smp;
1268 struct rcu_head rcu; 1284 struct rcu_head rcu;
1285
1286 /*
1287 * Indicates how deep in a filesystem stack this SB is
1288 */
1289 int s_stack_depth;
1269}; 1290};
1270 1291
1271extern struct timespec current_fs_time(struct super_block *sb); 1292extern struct timespec current_fs_time(struct super_block *sb);
@@ -1398,6 +1419,7 @@ extern int vfs_link(struct dentry *, struct inode *, struct dentry *, struct ino
1398extern int vfs_rmdir(struct inode *, struct dentry *); 1419extern int vfs_rmdir(struct inode *, struct dentry *);
1399extern int vfs_unlink(struct inode *, struct dentry *, struct inode **); 1420extern int vfs_unlink(struct inode *, struct dentry *, struct inode **);
1400extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int); 1421extern int vfs_rename(struct inode *, struct dentry *, struct inode *, struct dentry *, struct inode **, unsigned int);
1422extern int vfs_whiteout(struct inode *, struct dentry *);
1401 1423
1402/* 1424/*
1403 * VFS dentry helper functions. 1425 * VFS dentry helper functions.
@@ -1528,6 +1550,9 @@ struct inode_operations {
1528 umode_t create_mode, int *opened); 1550 umode_t create_mode, int *opened);
1529 int (*tmpfile) (struct inode *, struct dentry *, umode_t); 1551 int (*tmpfile) (struct inode *, struct dentry *, umode_t);
1530 int (*set_acl)(struct inode *, struct posix_acl *, int); 1552 int (*set_acl)(struct inode *, struct posix_acl *, int);
1553
1554 /* WARNING: probably going away soon, do not use! */
1555 int (*dentry_open)(struct dentry *, struct file *, const struct cred *);
1531} ____cacheline_aligned; 1556} ____cacheline_aligned;
1532 1557
1533ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, 1558ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
@@ -1625,6 +1650,9 @@ struct super_operations {
1625#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT) 1650#define IS_AUTOMOUNT(inode) ((inode)->i_flags & S_AUTOMOUNT)
1626#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC) 1651#define IS_NOSEC(inode) ((inode)->i_flags & S_NOSEC)
1627 1652
1653#define IS_WHITEOUT(inode) (S_ISCHR(inode->i_mode) && \
1654 (inode)->i_rdev == WHITEOUT_DEV)
1655
1628/* 1656/*
1629 * Inode state bits. Protected by inode->i_lock 1657 * Inode state bits. Protected by inode->i_lock
1630 * 1658 *
@@ -2040,6 +2068,7 @@ extern struct file *file_open_name(struct filename *, int, umode_t);
2040extern struct file *filp_open(const char *, int, umode_t); 2068extern struct file *filp_open(const char *, int, umode_t);
2041extern struct file *file_open_root(struct dentry *, struct vfsmount *, 2069extern struct file *file_open_root(struct dentry *, struct vfsmount *,
2042 const char *, int); 2070 const char *, int);
2071extern int vfs_open(const struct path *, struct file *, const struct cred *);
2043extern struct file * dentry_open(const struct path *, int, const struct cred *); 2072extern struct file * dentry_open(const struct path *, int, const struct cred *);
2044extern int filp_close(struct file *, fl_owner_t id); 2073extern int filp_close(struct file *, fl_owner_t id);
2045 2074
@@ -2253,7 +2282,9 @@ extern sector_t bmap(struct inode *, sector_t);
2253#endif 2282#endif
2254extern int notify_change(struct dentry *, struct iattr *, struct inode **); 2283extern int notify_change(struct dentry *, struct iattr *, struct inode **);
2255extern int inode_permission(struct inode *, int); 2284extern int inode_permission(struct inode *, int);
2285extern int __inode_permission(struct inode *, int);
2256extern int generic_permission(struct inode *, int); 2286extern int generic_permission(struct inode *, int);
2287extern int __check_sticky(struct inode *dir, struct inode *inode);
2257 2288
2258static inline bool execute_ok(struct inode *inode) 2289static inline bool execute_ok(struct inode *inode)
2259{ 2290{
@@ -2438,6 +2469,7 @@ extern ssize_t new_sync_read(struct file *filp, char __user *buf, size_t len, lo
2438extern ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos); 2469extern ssize_t new_sync_write(struct file *filp, const char __user *buf, size_t len, loff_t *ppos);
2439 2470
2440/* fs/block_dev.c */ 2471/* fs/block_dev.c */
2472extern ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to);
2441extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from); 2473extern ssize_t blkdev_write_iter(struct kiocb *iocb, struct iov_iter *from);
2442extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end, 2474extern int blkdev_fsync(struct file *filp, loff_t start, loff_t end,
2443 int datasync); 2475 int datasync);
@@ -2452,6 +2484,9 @@ extern ssize_t iter_file_splice_write(struct pipe_inode_info *,
2452 struct file *, loff_t *, size_t, unsigned int); 2484 struct file *, loff_t *, size_t, unsigned int);
2453extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe, 2485extern ssize_t generic_splice_sendpage(struct pipe_inode_info *pipe,
2454 struct file *out, loff_t *, size_t len, unsigned int flags); 2486 struct file *out, loff_t *, size_t len, unsigned int flags);
2487extern long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
2488 loff_t *opos, size_t len, unsigned int flags);
2489
2455 2490
2456extern void 2491extern void
2457file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping); 2492file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping);
@@ -2737,6 +2772,14 @@ static inline int is_sxid(umode_t mode)
2737 return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP)); 2772 return (mode & S_ISUID) || ((mode & S_ISGID) && (mode & S_IXGRP));
2738} 2773}
2739 2774
2775static inline int check_sticky(struct inode *dir, struct inode *inode)
2776{
2777 if (!(dir->i_mode & S_ISVTX))
2778 return 0;
2779
2780 return __check_sticky(dir, inode);
2781}
2782
2740static inline void inode_has_no_xattr(struct inode *inode) 2783static inline void inode_has_no_xattr(struct inode *inode)
2741{ 2784{
2742 if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC)) 2785 if (!is_sxid(inode->i_mode) && (inode->i_sb->s_flags & MS_NOSEC))
diff --git a/include/linux/iio/events.h b/include/linux/iio/events.h
index 8bbd7bc1043d..03fa332ad2a8 100644
--- a/include/linux/iio/events.h
+++ b/include/linux/iio/events.h
@@ -72,7 +72,7 @@ struct iio_event_data {
72 72
73#define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF) 73#define IIO_EVENT_CODE_EXTRACT_TYPE(mask) ((mask >> 56) & 0xFF)
74 74
75#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0xCF) 75#define IIO_EVENT_CODE_EXTRACT_DIR(mask) ((mask >> 48) & 0x7F)
76 76
77#define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF) 77#define IIO_EVENT_CODE_EXTRACT_CHAN_TYPE(mask) ((mask >> 32) & 0xFF)
78 78
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index 0068708161ff..0a21fbefdfbe 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -242,7 +242,7 @@ static inline void in_dev_put(struct in_device *idev)
242static __inline__ __be32 inet_make_mask(int logmask) 242static __inline__ __be32 inet_make_mask(int logmask)
243{ 243{
244 if (logmask) 244 if (logmask)
245 return htonl(~((1<<(32-logmask))-1)); 245 return htonl(~((1U<<(32-logmask))-1));
246 return 0; 246 return 0;
247} 247}
248 248
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 0dae71e9971c..704b9a599b26 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -1042,7 +1042,7 @@ void jbd2_update_log_tail(journal_t *journal, tid_t tid, unsigned long block);
1042extern void jbd2_journal_commit_transaction(journal_t *); 1042extern void jbd2_journal_commit_transaction(journal_t *);
1043 1043
1044/* Checkpoint list management */ 1044/* Checkpoint list management */
1045int __jbd2_journal_clean_checkpoint_list(journal_t *journal); 1045void __jbd2_journal_clean_checkpoint_list(journal_t *journal);
1046int __jbd2_journal_remove_checkpoint(struct journal_head *); 1046int __jbd2_journal_remove_checkpoint(struct journal_head *);
1047void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *); 1047void __jbd2_journal_insert_checkpoint(struct journal_head *, transaction_t *);
1048 1048
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 40728cf1c452..3d770f5564b8 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -403,6 +403,7 @@ int vsscanf(const char *, const char *, va_list);
403extern int get_option(char **str, int *pint); 403extern int get_option(char **str, int *pint);
404extern char *get_options(const char *str, int nints, int *ints); 404extern char *get_options(const char *str, int nints, int *ints);
405extern unsigned long long memparse(const char *ptr, char **retptr); 405extern unsigned long long memparse(const char *ptr, char **retptr);
406extern bool parse_option_str(const char *str, const char *option);
406 407
407extern int core_kernel_text(unsigned long addr); 408extern int core_kernel_text(unsigned long addr);
408extern int core_kernel_data(unsigned long addr); 409extern int core_kernel_data(unsigned long addr);
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index 8422b4ed6882..b9376cd5a187 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -77,11 +77,6 @@ static inline unsigned int kstat_cpu_irqs_sum(unsigned int cpu)
77 return kstat_cpu(cpu).irqs_sum; 77 return kstat_cpu(cpu).irqs_sum;
78} 78}
79 79
80/*
81 * Lock/unlock the current runqueue - to extract task statistics:
82 */
83extern unsigned long long task_delta_exec(struct task_struct *);
84
85extern void account_user_time(struct task_struct *, cputime_t, cputime_t); 80extern void account_user_time(struct task_struct *, cputime_t, cputime_t);
86extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t); 81extern void account_system_time(struct task_struct *, int, cputime_t, cputime_t);
87extern void account_steal_time(cputime_t); 82extern void account_steal_time(cputime_t);
diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
index 6b06d378f3df..e465bb15912d 100644
--- a/include/linux/kgdb.h
+++ b/include/linux/kgdb.h
@@ -283,7 +283,7 @@ struct kgdb_io {
283 283
284extern struct kgdb_arch arch_kgdb_ops; 284extern struct kgdb_arch arch_kgdb_ops;
285 285
286extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs); 286extern unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs);
287 287
288#ifdef CONFIG_SERIAL_KGDB_NMI 288#ifdef CONFIG_SERIAL_KGDB_NMI
289extern int kgdb_register_nmi_console(void); 289extern int kgdb_register_nmi_console(void);
diff --git a/include/linux/khugepaged.h b/include/linux/khugepaged.h
index 6b394f0b5148..eeb307985715 100644
--- a/include/linux/khugepaged.h
+++ b/include/linux/khugepaged.h
@@ -6,7 +6,8 @@
6#ifdef CONFIG_TRANSPARENT_HUGEPAGE 6#ifdef CONFIG_TRANSPARENT_HUGEPAGE
7extern int __khugepaged_enter(struct mm_struct *mm); 7extern int __khugepaged_enter(struct mm_struct *mm);
8extern void __khugepaged_exit(struct mm_struct *mm); 8extern void __khugepaged_exit(struct mm_struct *mm);
9extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma); 9extern int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
10 unsigned long vm_flags);
10 11
11#define khugepaged_enabled() \ 12#define khugepaged_enabled() \
12 (transparent_hugepage_flags & \ 13 (transparent_hugepage_flags & \
@@ -35,13 +36,13 @@ static inline void khugepaged_exit(struct mm_struct *mm)
35 __khugepaged_exit(mm); 36 __khugepaged_exit(mm);
36} 37}
37 38
38static inline int khugepaged_enter(struct vm_area_struct *vma) 39static inline int khugepaged_enter(struct vm_area_struct *vma,
40 unsigned long vm_flags)
39{ 41{
40 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags)) 42 if (!test_bit(MMF_VM_HUGEPAGE, &vma->vm_mm->flags))
41 if ((khugepaged_always() || 43 if ((khugepaged_always() ||
42 (khugepaged_req_madv() && 44 (khugepaged_req_madv() && (vm_flags & VM_HUGEPAGE))) &&
43 vma->vm_flags & VM_HUGEPAGE)) && 45 !(vm_flags & VM_NOHUGEPAGE))
44 !(vma->vm_flags & VM_NOHUGEPAGE))
45 if (__khugepaged_enter(vma->vm_mm)) 46 if (__khugepaged_enter(vma->vm_mm))
46 return -ENOMEM; 47 return -ENOMEM;
47 return 0; 48 return 0;
@@ -54,11 +55,13 @@ static inline int khugepaged_fork(struct mm_struct *mm, struct mm_struct *oldmm)
54static inline void khugepaged_exit(struct mm_struct *mm) 55static inline void khugepaged_exit(struct mm_struct *mm)
55{ 56{
56} 57}
57static inline int khugepaged_enter(struct vm_area_struct *vma) 58static inline int khugepaged_enter(struct vm_area_struct *vma,
59 unsigned long vm_flags)
58{ 60{
59 return 0; 61 return 0;
60} 62}
61static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma) 63static inline int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
64 unsigned long vm_flags)
62{ 65{
63 return 0; 66 return 0;
64} 67}
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 28be31f49250..a6059bdf7b03 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -703,7 +703,7 @@ void kvm_arch_sync_events(struct kvm *kvm);
703int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); 703int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
704void kvm_vcpu_kick(struct kvm_vcpu *vcpu); 704void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
705 705
706bool kvm_is_mmio_pfn(pfn_t pfn); 706bool kvm_is_reserved_pfn(pfn_t pfn);
707 707
708struct kvm_irq_ack_notifier { 708struct kvm_irq_ack_notifier {
709 struct hlist_node link; 709 struct hlist_node link;
@@ -1080,6 +1080,7 @@ void kvm_device_get(struct kvm_device *dev);
1080void kvm_device_put(struct kvm_device *dev); 1080void kvm_device_put(struct kvm_device *dev);
1081struct kvm_device *kvm_device_from_filp(struct file *filp); 1081struct kvm_device *kvm_device_from_filp(struct file *filp);
1082int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type); 1082int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
1083void kvm_unregister_device_ops(u32 type);
1083 1084
1084extern struct kvm_device_ops kvm_mpic_ops; 1085extern struct kvm_device_ops kvm_mpic_ops;
1085extern struct kvm_device_ops kvm_xics_ops; 1086extern struct kvm_device_ops kvm_xics_ops;
diff --git a/include/linux/leds.h b/include/linux/leds.h
index e43686472197..a57611d0c94e 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -13,8 +13,8 @@
13#define __LINUX_LEDS_H_INCLUDED 13#define __LINUX_LEDS_H_INCLUDED
14 14
15#include <linux/list.h> 15#include <linux/list.h>
16#include <linux/spinlock.h>
17#include <linux/rwsem.h> 16#include <linux/rwsem.h>
17#include <linux/spinlock.h>
18#include <linux/timer.h> 18#include <linux/timer.h>
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20 20
@@ -31,8 +31,8 @@ enum led_brightness {
31 31
32struct led_classdev { 32struct led_classdev {
33 const char *name; 33 const char *name;
34 int brightness; 34 enum led_brightness brightness;
35 int max_brightness; 35 enum led_brightness max_brightness;
36 int flags; 36 int flags;
37 37
38 /* Lower 16 bits reflect status */ 38 /* Lower 16 bits reflect status */
@@ -140,6 +140,16 @@ extern void led_blink_set_oneshot(struct led_classdev *led_cdev,
140 */ 140 */
141extern void led_set_brightness(struct led_classdev *led_cdev, 141extern void led_set_brightness(struct led_classdev *led_cdev,
142 enum led_brightness brightness); 142 enum led_brightness brightness);
143/**
144 * led_update_brightness - update LED brightness
145 * @led_cdev: the LED to query
146 *
147 * Get an LED's current brightness and update led_cdev->brightness
148 * member with the obtained value.
149 *
150 * Returns: 0 on success or negative error value on failure
151 */
152extern int led_update_brightness(struct led_classdev *led_cdev);
143 153
144/* 154/*
145 * LED Triggers 155 * LED Triggers
diff --git a/include/linux/mailbox_client.h b/include/linux/mailbox_client.h
new file mode 100644
index 000000000000..307d9cab2026
--- /dev/null
+++ b/include/linux/mailbox_client.h
@@ -0,0 +1,46 @@
1/*
2 * Copyright (C) 2013-2014 Linaro Ltd.
3 * Author: Jassi Brar <jassisinghbrar@gmail.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#ifndef __MAILBOX_CLIENT_H
11#define __MAILBOX_CLIENT_H
12
13#include <linux/of.h>
14#include <linux/device.h>
15
16struct mbox_chan;
17
18/**
19 * struct mbox_client - User of a mailbox
20 * @dev: The client device
21 * @tx_block: If the mbox_send_message should block until data is
22 * transmitted.
23 * @tx_tout: Max block period in ms before TX is assumed failure
24 * @knows_txdone: If the client could run the TX state machine. Usually
25 * if the client receives some ACK packet for transmission.
26 * Unused if the controller already has TX_Done/RTR IRQ.
27 * @rx_callback: Atomic callback to provide client the data received
28 * @tx_done: Atomic callback to tell client of data transmission
29 */
30struct mbox_client {
31 struct device *dev;
32 bool tx_block;
33 unsigned long tx_tout;
34 bool knows_txdone;
35
36 void (*rx_callback)(struct mbox_client *cl, void *mssg);
37 void (*tx_done)(struct mbox_client *cl, void *mssg, int r);
38};
39
40struct mbox_chan *mbox_request_channel(struct mbox_client *cl, int index);
41int mbox_send_message(struct mbox_chan *chan, void *mssg);
42void mbox_client_txdone(struct mbox_chan *chan, int r); /* atomic */
43bool mbox_client_peek_data(struct mbox_chan *chan); /* atomic */
44void mbox_free_channel(struct mbox_chan *chan); /* may sleep */
45
46#endif /* __MAILBOX_CLIENT_H */
diff --git a/include/linux/mailbox_controller.h b/include/linux/mailbox_controller.h
new file mode 100644
index 000000000000..d4cf96f07cfc
--- /dev/null
+++ b/include/linux/mailbox_controller.h
@@ -0,0 +1,133 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 */
6
7#ifndef __MAILBOX_CONTROLLER_H
8#define __MAILBOX_CONTROLLER_H
9
10#include <linux/of.h>
11#include <linux/types.h>
12#include <linux/timer.h>
13#include <linux/device.h>
14#include <linux/completion.h>
15
16struct mbox_chan;
17
18/**
19 * struct mbox_chan_ops - methods to control mailbox channels
20 * @send_data: The API asks the MBOX controller driver, in atomic
21 * context try to transmit a message on the bus. Returns 0 if
22 * data is accepted for transmission, -EBUSY while rejecting
23 * if the remote hasn't yet read the last data sent. Actual
24 * transmission of data is reported by the controller via
25 * mbox_chan_txdone (if it has some TX ACK irq). It must not
26 * sleep.
27 * @startup: Called when a client requests the chan. The controller
28 * could ask clients for additional parameters of communication
29 * to be provided via client's chan_data. This call may
30 * block. After this call the Controller must forward any
31 * data received on the chan by calling mbox_chan_received_data.
32 * The controller may do stuff that need to sleep.
33 * @shutdown: Called when a client relinquishes control of a chan.
34 * This call may block too. The controller must not forward
35 * any received data anymore.
36 * The controller may do stuff that need to sleep.
37 * @last_tx_done: If the controller sets 'txdone_poll', the API calls
38 * this to poll status of last TX. The controller must
39 * give priority to IRQ method over polling and never
40 * set both txdone_poll and txdone_irq. Only in polling
41 * mode 'send_data' is expected to return -EBUSY.
42 * The controller may do stuff that need to sleep/block.
43 * Used only if txdone_poll:=true && txdone_irq:=false
44 * @peek_data: Atomic check for any received data. Return true if controller
45 * has some data to push to the client. False otherwise.
46 */
47struct mbox_chan_ops {
48 int (*send_data)(struct mbox_chan *chan, void *data);
49 int (*startup)(struct mbox_chan *chan);
50 void (*shutdown)(struct mbox_chan *chan);
51 bool (*last_tx_done)(struct mbox_chan *chan);
52 bool (*peek_data)(struct mbox_chan *chan);
53};
54
55/**
56 * struct mbox_controller - Controller of a class of communication channels
57 * @dev: Device backing this controller
58 * @ops: Operators that work on each communication chan
59 * @chans: Array of channels
60 * @num_chans: Number of channels in the 'chans' array.
61 * @txdone_irq: Indicates if the controller can report to API when
62 * the last transmitted data was read by the remote.
63 * Eg, if it has some TX ACK irq.
64 * @txdone_poll: If the controller can read but not report the TX
65 * done. Ex, some register shows the TX status but
66 * no interrupt rises. Ignored if 'txdone_irq' is set.
67 * @txpoll_period: If 'txdone_poll' is in effect, the API polls for
68 * last TX's status after these many millisecs
69 * @of_xlate: Controller driver specific mapping of channel via DT
70 * @poll: API private. Used to poll for TXDONE on all channels.
71 * @node: API private. To hook into list of controllers.
72 */
73struct mbox_controller {
74 struct device *dev;
75 struct mbox_chan_ops *ops;
76 struct mbox_chan *chans;
77 int num_chans;
78 bool txdone_irq;
79 bool txdone_poll;
80 unsigned txpoll_period;
81 struct mbox_chan *(*of_xlate)(struct mbox_controller *mbox,
82 const struct of_phandle_args *sp);
83 /* Internal to API */
84 struct timer_list poll;
85 struct list_head node;
86};
87
88/*
89 * The length of circular buffer for queuing messages from a client.
90 * 'msg_count' tracks the number of buffered messages while 'msg_free'
91 * is the index where the next message would be buffered.
92 * We shouldn't need it too big because every transfer is interrupt
93 * triggered and if we have lots of data to transfer, the interrupt
94 * latencies are going to be the bottleneck, not the buffer length.
95 * Besides, mbox_send_message could be called from atomic context and
96 * the client could also queue another message from the notifier 'tx_done'
97 * of the last transfer done.
98 * REVISIT: If too many platforms see the "Try increasing MBOX_TX_QUEUE_LEN"
99 * print, it needs to be taken from config option or somesuch.
100 */
101#define MBOX_TX_QUEUE_LEN 20
102
103/**
104 * struct mbox_chan - s/w representation of a communication chan
105 * @mbox: Pointer to the parent/provider of this channel
106 * @txdone_method: Way to detect TXDone chosen by the API
107 * @cl: Pointer to the current owner of this channel
108 * @tx_complete: Transmission completion
109 * @active_req: Currently active request hook
110 * @msg_count: No. of mssg currently queued
111 * @msg_free: Index of next available mssg slot
112 * @msg_data: Hook for data packet
113 * @lock: Serialise access to the channel
114 * @con_priv: Hook for controller driver to attach private data
115 */
116struct mbox_chan {
117 struct mbox_controller *mbox;
118 unsigned txdone_method;
119 struct mbox_client *cl;
120 struct completion tx_complete;
121 void *active_req;
122 unsigned msg_count, msg_free;
123 void *msg_data[MBOX_TX_QUEUE_LEN];
124 spinlock_t lock; /* Serialise access to the channel */
125 void *con_priv;
126};
127
128int mbox_controller_register(struct mbox_controller *mbox); /* can sleep */
129void mbox_controller_unregister(struct mbox_controller *mbox); /* can sleep */
130void mbox_chan_received_data(struct mbox_chan *chan, void *data); /* atomic */
131void mbox_chan_txdone(struct mbox_chan *chan, int r); /* atomic */
132
133#endif /* __MAILBOX_CONTROLLER_H */
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 19df5d857411..6b75640ef5ab 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -139,48 +139,23 @@ static inline bool mem_cgroup_disabled(void)
139 return false; 139 return false;
140} 140}
141 141
142void __mem_cgroup_begin_update_page_stat(struct page *page, bool *locked, 142struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page, bool *locked,
143 unsigned long *flags); 143 unsigned long *flags);
144 144void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked,
145extern atomic_t memcg_moving; 145 unsigned long flags);
146 146void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
147static inline void mem_cgroup_begin_update_page_stat(struct page *page, 147 enum mem_cgroup_stat_index idx, int val);
148 bool *locked, unsigned long *flags) 148
149{ 149static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
150 if (mem_cgroup_disabled())
151 return;
152 rcu_read_lock();
153 *locked = false;
154 if (atomic_read(&memcg_moving))
155 __mem_cgroup_begin_update_page_stat(page, locked, flags);
156}
157
158void __mem_cgroup_end_update_page_stat(struct page *page,
159 unsigned long *flags);
160static inline void mem_cgroup_end_update_page_stat(struct page *page,
161 bool *locked, unsigned long *flags)
162{
163 if (mem_cgroup_disabled())
164 return;
165 if (*locked)
166 __mem_cgroup_end_update_page_stat(page, flags);
167 rcu_read_unlock();
168}
169
170void mem_cgroup_update_page_stat(struct page *page,
171 enum mem_cgroup_stat_index idx,
172 int val);
173
174static inline void mem_cgroup_inc_page_stat(struct page *page,
175 enum mem_cgroup_stat_index idx) 150 enum mem_cgroup_stat_index idx)
176{ 151{
177 mem_cgroup_update_page_stat(page, idx, 1); 152 mem_cgroup_update_page_stat(memcg, idx, 1);
178} 153}
179 154
180static inline void mem_cgroup_dec_page_stat(struct page *page, 155static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
181 enum mem_cgroup_stat_index idx) 156 enum mem_cgroup_stat_index idx)
182{ 157{
183 mem_cgroup_update_page_stat(page, idx, -1); 158 mem_cgroup_update_page_stat(memcg, idx, -1);
184} 159}
185 160
186unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order, 161unsigned long mem_cgroup_soft_limit_reclaim(struct zone *zone, int order,
@@ -315,13 +290,14 @@ mem_cgroup_print_oom_info(struct mem_cgroup *memcg, struct task_struct *p)
315{ 290{
316} 291}
317 292
318static inline void mem_cgroup_begin_update_page_stat(struct page *page, 293static inline struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
319 bool *locked, unsigned long *flags) 294 bool *locked, unsigned long *flags)
320{ 295{
296 return NULL;
321} 297}
322 298
323static inline void mem_cgroup_end_update_page_stat(struct page *page, 299static inline void mem_cgroup_end_page_stat(struct mem_cgroup *memcg,
324 bool *locked, unsigned long *flags) 300 bool locked, unsigned long flags)
325{ 301{
326} 302}
327 303
@@ -343,12 +319,12 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
343 return false; 319 return false;
344} 320}
345 321
346static inline void mem_cgroup_inc_page_stat(struct page *page, 322static inline void mem_cgroup_inc_page_stat(struct mem_cgroup *memcg,
347 enum mem_cgroup_stat_index idx) 323 enum mem_cgroup_stat_index idx)
348{ 324{
349} 325}
350 326
351static inline void mem_cgroup_dec_page_stat(struct page *page, 327static inline void mem_cgroup_dec_page_stat(struct mem_cgroup *memcg,
352 enum mem_cgroup_stat_index idx) 328 enum mem_cgroup_stat_index idx)
353{ 329{
354} 330}
diff --git a/include/linux/memory.h b/include/linux/memory.h
index bb7384e3c3d8..8b8d8d12348e 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -35,7 +35,7 @@ struct memory_block {
35}; 35};
36 36
37int arch_get_memory_phys_device(unsigned long start_pfn); 37int arch_get_memory_phys_device(unsigned long start_pfn);
38unsigned long __weak memory_block_size_bytes(void); 38unsigned long memory_block_size_bytes(void);
39 39
40/* These states are exposed to userspace as text strings in sysfs */ 40/* These states are exposed to userspace as text strings in sysfs */
41#define MEM_ONLINE (1<<0) /* exposed to userspace */ 41#define MEM_ONLINE (1<<0) /* exposed to userspace */
diff --git a/include/linux/mfd/arizona/core.h b/include/linux/mfd/arizona/core.h
index f34723f7663c..910e3aa1e965 100644
--- a/include/linux/mfd/arizona/core.h
+++ b/include/linux/mfd/arizona/core.h
@@ -141,6 +141,7 @@ struct arizona {
141 141
142 uint16_t dac_comp_coeff; 142 uint16_t dac_comp_coeff;
143 uint8_t dac_comp_enabled; 143 uint8_t dac_comp_enabled;
144 struct mutex dac_comp_lock;
144}; 145};
145 146
146int arizona_clk32k_enable(struct arizona *arizona); 147int arizona_clk32k_enable(struct arizona *arizona);
diff --git a/include/linux/mfd/davinci_voicecodec.h b/include/linux/mfd/davinci_voicecodec.h
index cb01496bfa49..8e1cdbef3dad 100644
--- a/include/linux/mfd/davinci_voicecodec.h
+++ b/include/linux/mfd/davinci_voicecodec.h
@@ -99,12 +99,6 @@ struct davinci_vcif {
99 dma_addr_t dma_rx_addr; 99 dma_addr_t dma_rx_addr;
100}; 100};
101 101
102struct cq93vc {
103 struct platform_device *pdev;
104 struct snd_soc_codec *codec;
105 u32 sysclk;
106};
107
108struct davinci_vc; 102struct davinci_vc;
109 103
110struct davinci_vc { 104struct davinci_vc {
@@ -122,7 +116,6 @@ struct davinci_vc {
122 116
123 /* Client devices */ 117 /* Client devices */
124 struct davinci_vcif davinci_vcif; 118 struct davinci_vcif davinci_vcif;
125 struct cq93vc cq93vc;
126}; 119};
127 120
128#endif 121#endif
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
index fc17d56581b2..582e67f34054 100644
--- a/include/linux/mfd/max77693-private.h
+++ b/include/linux/mfd/max77693-private.h
@@ -330,6 +330,13 @@ enum max77693_irq_source {
330 MAX77693_IRQ_GROUP_NR, 330 MAX77693_IRQ_GROUP_NR,
331}; 331};
332 332
333#define SRC_IRQ_CHARGER BIT(0)
334#define SRC_IRQ_TOP BIT(1)
335#define SRC_IRQ_FLASH BIT(2)
336#define SRC_IRQ_MUIC BIT(3)
337#define SRC_IRQ_ALL (SRC_IRQ_CHARGER | SRC_IRQ_TOP \
338 | SRC_IRQ_FLASH | SRC_IRQ_MUIC)
339
333#define LED_IRQ_FLED2_OPEN BIT(0) 340#define LED_IRQ_FLED2_OPEN BIT(0)
334#define LED_IRQ_FLED2_SHORT BIT(1) 341#define LED_IRQ_FLED2_SHORT BIT(1)
335#define LED_IRQ_FLED1_OPEN BIT(2) 342#define LED_IRQ_FLED1_OPEN BIT(2)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 02d11ee7f19d..b46461116cd2 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1176,6 +1176,7 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
1176 1176
1177extern void truncate_pagecache(struct inode *inode, loff_t new); 1177extern void truncate_pagecache(struct inode *inode, loff_t new);
1178extern void truncate_setsize(struct inode *inode, loff_t newsize); 1178extern void truncate_setsize(struct inode *inode, loff_t newsize);
1179void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to);
1179void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); 1180void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
1180int truncate_inode_page(struct address_space *mapping, struct page *page); 1181int truncate_inode_page(struct address_space *mapping, struct page *page);
1181int generic_error_remove_page(struct address_space *mapping, struct page *page); 1182int generic_error_remove_page(struct address_space *mapping, struct page *page);
@@ -1234,7 +1235,6 @@ int __set_page_dirty_no_writeback(struct page *page);
1234int redirty_page_for_writepage(struct writeback_control *wbc, 1235int redirty_page_for_writepage(struct writeback_control *wbc,
1235 struct page *page); 1236 struct page *page);
1236void account_page_dirtied(struct page *page, struct address_space *mapping); 1237void account_page_dirtied(struct page *page, struct address_space *mapping);
1237void account_page_writeback(struct page *page);
1238int set_page_dirty(struct page *page); 1238int set_page_dirty(struct page *page);
1239int set_page_dirty_lock(struct page *page); 1239int set_page_dirty_lock(struct page *page);
1240int clear_page_dirty_for_io(struct page *page); 1240int clear_page_dirty_for_io(struct page *page);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 48bf12ef6620..ffe66e381c04 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -431,6 +431,15 @@ struct zone {
431 */ 431 */
432 int nr_migrate_reserve_block; 432 int nr_migrate_reserve_block;
433 433
434#ifdef CONFIG_MEMORY_ISOLATION
435 /*
436 * Number of isolated pageblock. It is used to solve incorrect
437 * freepage counting problem due to racy retrieving migratetype
438 * of pageblock. Protected by zone->lock.
439 */
440 unsigned long nr_isolate_pageblock;
441#endif
442
434#ifdef CONFIG_MEMORY_HOTPLUG 443#ifdef CONFIG_MEMORY_HOTPLUG
435 /* see spanned/present_pages for more description */ 444 /* see spanned/present_pages for more description */
436 seqlock_t span_seqlock; 445 seqlock_t span_seqlock;
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 9262e4bf0cc3..c2c561dc0114 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -81,6 +81,9 @@ extern struct vfsmount *mntget(struct vfsmount *mnt);
81extern struct vfsmount *mnt_clone_internal(struct path *path); 81extern struct vfsmount *mnt_clone_internal(struct path *path);
82extern int __mnt_is_readonly(struct vfsmount *mnt); 82extern int __mnt_is_readonly(struct vfsmount *mnt);
83 83
84struct path;
85extern struct vfsmount *clone_private_mount(struct path *path);
86
84struct file_system_type; 87struct file_system_type;
85extern struct vfsmount *vfs_kern_mount(struct file_system_type *type, 88extern struct vfsmount *vfs_kern_mount(struct file_system_type *type,
86 int flags, const char *name, 89 int flags, const char *name,
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 9e6294f32ba8..046a0a2e4c4e 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -187,32 +187,17 @@ struct spi_nor {
187/** 187/**
188 * spi_nor_scan() - scan the SPI NOR 188 * spi_nor_scan() - scan the SPI NOR
189 * @nor: the spi_nor structure 189 * @nor: the spi_nor structure
190 * @id: the spi_device_id provided by the driver 190 * @name: the chip type name
191 * @mode: the read mode supported by the driver 191 * @mode: the read mode supported by the driver
192 * 192 *
193 * The drivers can use this fuction to scan the SPI NOR. 193 * The drivers can use this fuction to scan the SPI NOR.
194 * In the scanning, it will try to get all the necessary information to 194 * In the scanning, it will try to get all the necessary information to
195 * fill the mtd_info{} and the spi_nor{}. 195 * fill the mtd_info{} and the spi_nor{}.
196 * 196 *
197 * The board may assigns a spi_device_id with @id which be used to compared with 197 * The chip type name can be provided through the @name parameter.
198 * the spi_device_id detected by the scanning.
199 * 198 *
200 * Return: 0 for success, others for failure. 199 * Return: 0 for success, others for failure.
201 */ 200 */
202int spi_nor_scan(struct spi_nor *nor, const struct spi_device_id *id, 201int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode);
203 enum read_mode mode);
204extern const struct spi_device_id spi_nor_ids[];
205
206/**
207 * spi_nor_match_id() - find the spi_device_id by the name
208 * @name: the name of the spi_device_id
209 *
210 * The drivers use this function to find the spi_device_id
211 * specified by the @name.
212 *
213 * Return: returns the right spi_device_id pointer on success,
214 * and returns NULL on failure.
215 */
216const struct spi_device_id *spi_nor_match_id(char *name);
217 202
218#endif 203#endif
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 983876f24aed..47ebb4fafd87 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1224,11 +1224,22 @@ struct nfs41_free_stateid_res {
1224 unsigned int status; 1224 unsigned int status;
1225}; 1225};
1226 1226
1227static inline void
1228nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
1229{
1230 kfree(cinfo->buckets);
1231}
1232
1227#else 1233#else
1228 1234
1229struct pnfs_ds_commit_info { 1235struct pnfs_ds_commit_info {
1230}; 1236};
1231 1237
1238static inline void
1239nfs_free_pnfs_ds_cinfo(struct pnfs_ds_commit_info *cinfo)
1240{
1241}
1242
1232#endif /* CONFIG_NFS_V4_1 */ 1243#endif /* CONFIG_NFS_V4_1 */
1233 1244
1234#ifdef CONFIG_NFS_V4_2 1245#ifdef CONFIG_NFS_V4_2
diff --git a/include/linux/of.h b/include/linux/of.h
index 6545e7aec7bb..29f0adc5f3e4 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -267,14 +267,12 @@ extern int of_property_read_u64(const struct device_node *np,
267extern int of_property_read_string(struct device_node *np, 267extern int of_property_read_string(struct device_node *np,
268 const char *propname, 268 const char *propname,
269 const char **out_string); 269 const char **out_string);
270extern int of_property_read_string_index(struct device_node *np,
271 const char *propname,
272 int index, const char **output);
273extern int of_property_match_string(struct device_node *np, 270extern int of_property_match_string(struct device_node *np,
274 const char *propname, 271 const char *propname,
275 const char *string); 272 const char *string);
276extern int of_property_count_strings(struct device_node *np, 273extern int of_property_read_string_helper(struct device_node *np,
277 const char *propname); 274 const char *propname,
275 const char **out_strs, size_t sz, int index);
278extern int of_device_is_compatible(const struct device_node *device, 276extern int of_device_is_compatible(const struct device_node *device,
279 const char *); 277 const char *);
280extern int of_device_is_available(const struct device_node *device); 278extern int of_device_is_available(const struct device_node *device);
@@ -486,15 +484,9 @@ static inline int of_property_read_string(struct device_node *np,
486 return -ENOSYS; 484 return -ENOSYS;
487} 485}
488 486
489static inline int of_property_read_string_index(struct device_node *np, 487static inline int of_property_read_string_helper(struct device_node *np,
490 const char *propname, int index, 488 const char *propname,
491 const char **out_string) 489 const char **out_strs, size_t sz, int index)
492{
493 return -ENOSYS;
494}
495
496static inline int of_property_count_strings(struct device_node *np,
497 const char *propname)
498{ 490{
499 return -ENOSYS; 491 return -ENOSYS;
500} 492}
@@ -668,6 +660,70 @@ static inline int of_property_count_u64_elems(const struct device_node *np,
668} 660}
669 661
670/** 662/**
663 * of_property_read_string_array() - Read an array of strings from a multiple
664 * strings property.
665 * @np: device node from which the property value is to be read.
666 * @propname: name of the property to be searched.
667 * @out_strs: output array of string pointers.
668 * @sz: number of array elements to read.
669 *
670 * Search for a property in a device tree node and retrieve a list of
671 * terminated string values (pointer to data, not a copy) in that property.
672 *
673 * If @out_strs is NULL, the number of strings in the property is returned.
674 */
675static inline int of_property_read_string_array(struct device_node *np,
676 const char *propname, const char **out_strs,
677 size_t sz)
678{
679 return of_property_read_string_helper(np, propname, out_strs, sz, 0);
680}
681
682/**
683 * of_property_count_strings() - Find and return the number of strings from a
684 * multiple strings property.
685 * @np: device node from which the property value is to be read.
686 * @propname: name of the property to be searched.
687 *
688 * Search for a property in a device tree node and retrieve the number of null
689 * terminated string contain in it. Returns the number of strings on
690 * success, -EINVAL if the property does not exist, -ENODATA if property
691 * does not have a value, and -EILSEQ if the string is not null-terminated
692 * within the length of the property data.
693 */
694static inline int of_property_count_strings(struct device_node *np,
695 const char *propname)
696{
697 return of_property_read_string_helper(np, propname, NULL, 0, 0);
698}
699
700/**
701 * of_property_read_string_index() - Find and read a string from a multiple
702 * strings property.
703 * @np: device node from which the property value is to be read.
704 * @propname: name of the property to be searched.
705 * @index: index of the string in the list of strings
706 * @out_string: pointer to null terminated return string, modified only if
707 * return value is 0.
708 *
709 * Search for a property in a device tree node and retrieve a null
710 * terminated string value (pointer to data, not a copy) in the list of strings
711 * contained in that property.
712 * Returns 0 on success, -EINVAL if the property does not exist, -ENODATA if
713 * property does not have a value, and -EILSEQ if the string is not
714 * null-terminated within the length of the property data.
715 *
716 * The out_string pointer is modified only if a valid string can be decoded.
717 */
718static inline int of_property_read_string_index(struct device_node *np,
719 const char *propname,
720 int index, const char **output)
721{
722 int rc = of_property_read_string_helper(np, propname, output, 1, index);
723 return rc < 0 ? rc : 0;
724}
725
726/**
671 * of_property_read_bool - Findfrom a property 727 * of_property_read_bool - Findfrom a property
672 * @np: device node from which the property value is to be read. 728 * @np: device node from which the property value is to be read.
673 * @propname: name of the property to be searched. 729 * @propname: name of the property to be searched.
diff --git a/include/linux/of_reserved_mem.h b/include/linux/of_reserved_mem.h
index 5b5efae09135..ad2f67054372 100644
--- a/include/linux/of_reserved_mem.h
+++ b/include/linux/of_reserved_mem.h
@@ -16,7 +16,7 @@ struct reserved_mem {
16}; 16};
17 17
18struct reserved_mem_ops { 18struct reserved_mem_ops {
19 void (*device_init)(struct reserved_mem *rmem, 19 int (*device_init)(struct reserved_mem *rmem,
20 struct device *dev); 20 struct device *dev);
21 void (*device_release)(struct reserved_mem *rmem, 21 void (*device_release)(struct reserved_mem *rmem,
22 struct device *dev); 22 struct device *dev);
@@ -28,14 +28,17 @@ typedef int (*reservedmem_of_init_fn)(struct reserved_mem *rmem);
28 _OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn) 28 _OF_DECLARE(reservedmem, name, compat, init, reservedmem_of_init_fn)
29 29
30#ifdef CONFIG_OF_RESERVED_MEM 30#ifdef CONFIG_OF_RESERVED_MEM
31void of_reserved_mem_device_init(struct device *dev); 31int of_reserved_mem_device_init(struct device *dev);
32void of_reserved_mem_device_release(struct device *dev); 32void of_reserved_mem_device_release(struct device *dev);
33 33
34void fdt_init_reserved_mem(void); 34void fdt_init_reserved_mem(void);
35void fdt_reserved_mem_save_node(unsigned long node, const char *uname, 35void fdt_reserved_mem_save_node(unsigned long node, const char *uname,
36 phys_addr_t base, phys_addr_t size); 36 phys_addr_t base, phys_addr_t size);
37#else 37#else
38static inline void of_reserved_mem_device_init(struct device *dev) { } 38static inline int of_reserved_mem_device_init(struct device *dev)
39{
40 return -ENOSYS;
41}
39static inline void of_reserved_mem_device_release(struct device *pdev) { } 42static inline void of_reserved_mem_device_release(struct device *pdev) { }
40 43
41static inline void fdt_init_reserved_mem(void) { } 44static inline void fdt_init_reserved_mem(void) { }
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 647395a1a550..e8d6e1058723 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -50,6 +50,9 @@ static inline bool oom_task_origin(const struct task_struct *p)
50extern unsigned long oom_badness(struct task_struct *p, 50extern unsigned long oom_badness(struct task_struct *p,
51 struct mem_cgroup *memcg, const nodemask_t *nodemask, 51 struct mem_cgroup *memcg, const nodemask_t *nodemask,
52 unsigned long totalpages); 52 unsigned long totalpages);
53
54extern int oom_kills_count(void);
55extern void note_oom_kill(void);
53extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, 56extern void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
54 unsigned int points, unsigned long totalpages, 57 unsigned int points, unsigned long totalpages,
55 struct mem_cgroup *memcg, nodemask_t *nodemask, 58 struct mem_cgroup *memcg, nodemask_t *nodemask,
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 3fff8e774067..2dc1e1697b45 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -2,6 +2,10 @@
2#define __LINUX_PAGEISOLATION_H 2#define __LINUX_PAGEISOLATION_H
3 3
4#ifdef CONFIG_MEMORY_ISOLATION 4#ifdef CONFIG_MEMORY_ISOLATION
5static inline bool has_isolate_pageblock(struct zone *zone)
6{
7 return zone->nr_isolate_pageblock;
8}
5static inline bool is_migrate_isolate_page(struct page *page) 9static inline bool is_migrate_isolate_page(struct page *page)
6{ 10{
7 return get_pageblock_migratetype(page) == MIGRATE_ISOLATE; 11 return get_pageblock_migratetype(page) == MIGRATE_ISOLATE;
@@ -11,6 +15,10 @@ static inline bool is_migrate_isolate(int migratetype)
11 return migratetype == MIGRATE_ISOLATE; 15 return migratetype == MIGRATE_ISOLATE;
12} 16}
13#else 17#else
18static inline bool has_isolate_pageblock(struct zone *zone)
19{
20 return false;
21}
14static inline bool is_migrate_isolate_page(struct page *page) 22static inline bool is_migrate_isolate_page(struct page *page)
15{ 23{
16 return false; 24 return false;
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 64dacb7288a6..24c7728ca681 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -41,8 +41,13 @@ static inline acpi_handle acpi_pci_get_bridge_handle(struct pci_bus *pbus)
41 41
42 if (pci_is_root_bus(pbus)) 42 if (pci_is_root_bus(pbus))
43 dev = pbus->bridge; 43 dev = pbus->bridge;
44 else 44 else {
45 /* If pbus is a virtual bus, there is no bridge to it */
46 if (!pbus->self)
47 return NULL;
48
45 dev = &pbus->self->dev; 49 dev = &pbus->self->dev;
50 }
46 51
47 return ACPI_HANDLE(dev); 52 return ACPI_HANDLE(dev);
48} 53}
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 5be8db45e368..4c8ac5fcc224 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -331,6 +331,7 @@ struct pci_dev {
331 unsigned int is_added:1; 331 unsigned int is_added:1;
332 unsigned int is_busmaster:1; /* device is busmaster */ 332 unsigned int is_busmaster:1; /* device is busmaster */
333 unsigned int no_msi:1; /* device may not use msi */ 333 unsigned int no_msi:1; /* device may not use msi */
334 unsigned int no_64bit_msi:1; /* device may only use 32-bit MSIs */
334 unsigned int block_cfg_access:1; /* config space access is blocked */ 335 unsigned int block_cfg_access:1; /* config space access is blocked */
335 unsigned int broken_parity_status:1; /* Device generates false positive parity */ 336 unsigned int broken_parity_status:1; /* Device generates false positive parity */
336 unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */ 337 unsigned int irq_reroute_variant:2; /* device needs IRQ rerouting variant */
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index d5c89e0dd0e6..51ce60c35f4c 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -133,7 +133,13 @@ static inline bool __ref_is_percpu(struct percpu_ref *ref,
133 /* paired with smp_store_release() in percpu_ref_reinit() */ 133 /* paired with smp_store_release() in percpu_ref_reinit() */
134 smp_read_barrier_depends(); 134 smp_read_barrier_depends();
135 135
136 if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC)) 136 /*
137 * Theoretically, the following could test just ATOMIC; however,
138 * then we'd have to mask off DEAD separately as DEAD may be
139 * visible without ATOMIC if we race with percpu_ref_kill(). DEAD
140 * implies ATOMIC anyway. Test them together.
141 */
142 if (unlikely(percpu_ptr & __PERCPU_REF_ATOMIC_DEAD))
137 return false; 143 return false;
138 144
139 *percpu_countp = (unsigned long __percpu *)percpu_ptr; 145 *percpu_countp = (unsigned long __percpu *)percpu_ptr;
diff --git a/include/linux/mailbox.h b/include/linux/pl320-ipc.h
index 5161f63ec1c8..5161f63ec1c8 100644
--- a/include/linux/mailbox.h
+++ b/include/linux/pl320-ipc.h
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 73e938b7e937..2e0e06daf8c0 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -72,8 +72,10 @@ struct generic_pm_domain {
72 bool max_off_time_changed; 72 bool max_off_time_changed;
73 bool cached_power_down_ok; 73 bool cached_power_down_ok;
74 struct gpd_cpuidle_data *cpuidle_data; 74 struct gpd_cpuidle_data *cpuidle_data;
75 void (*attach_dev)(struct device *dev); 75 int (*attach_dev)(struct generic_pm_domain *domain,
76 void (*detach_dev)(struct device *dev); 76 struct device *dev);
77 void (*detach_dev)(struct generic_pm_domain *domain,
78 struct device *dev);
77}; 79};
78 80
79static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd) 81static inline struct generic_pm_domain *pd_to_genpd(struct dev_pm_domain *pd)
@@ -104,7 +106,7 @@ struct generic_pm_domain_data {
104 struct notifier_block nb; 106 struct notifier_block nb;
105 struct mutex lock; 107 struct mutex lock;
106 unsigned int refcount; 108 unsigned int refcount;
107 bool need_restore; 109 int need_restore;
108}; 110};
109 111
110#ifdef CONFIG_PM_GENERIC_DOMAINS 112#ifdef CONFIG_PM_GENERIC_DOMAINS
diff --git a/include/linux/pm_qos.h b/include/linux/pm_qos.h
index 9ab4bf7c4646..636e82834506 100644
--- a/include/linux/pm_qos.h
+++ b/include/linux/pm_qos.h
@@ -15,6 +15,7 @@ enum {
15 PM_QOS_CPU_DMA_LATENCY, 15 PM_QOS_CPU_DMA_LATENCY,
16 PM_QOS_NETWORK_LATENCY, 16 PM_QOS_NETWORK_LATENCY,
17 PM_QOS_NETWORK_THROUGHPUT, 17 PM_QOS_NETWORK_THROUGHPUT,
18 PM_QOS_MEMORY_BANDWIDTH,
18 19
19 /* insert new class ID */ 20 /* insert new class ID */
20 PM_QOS_NUM_CLASSES, 21 PM_QOS_NUM_CLASSES,
@@ -32,6 +33,7 @@ enum pm_qos_flags_status {
32#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) 33#define PM_QOS_CPU_DMA_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
33#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC) 34#define PM_QOS_NETWORK_LAT_DEFAULT_VALUE (2000 * USEC_PER_SEC)
34#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0 35#define PM_QOS_NETWORK_THROUGHPUT_DEFAULT_VALUE 0
36#define PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE 0
35#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0 37#define PM_QOS_RESUME_LATENCY_DEFAULT_VALUE 0
36#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0 38#define PM_QOS_LATENCY_TOLERANCE_DEFAULT_VALUE 0
37#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1) 39#define PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT (-1)
@@ -69,7 +71,8 @@ struct dev_pm_qos_request {
69enum pm_qos_type { 71enum pm_qos_type {
70 PM_QOS_UNITIALIZED, 72 PM_QOS_UNITIALIZED,
71 PM_QOS_MAX, /* return the largest value */ 73 PM_QOS_MAX, /* return the largest value */
72 PM_QOS_MIN /* return the smallest value */ 74 PM_QOS_MIN, /* return the smallest value */
75 PM_QOS_SUM /* return the sum */
73}; 76};
74 77
75/* 78/*
diff --git a/include/linux/pnfs_osd_xdr.h b/include/linux/pnfs_osd_xdr.h
index fe25876c1a5d..17d7d0d20eca 100644
--- a/include/linux/pnfs_osd_xdr.h
+++ b/include/linux/pnfs_osd_xdr.h
@@ -5,7 +5,7 @@
5 * All rights reserved. 5 * All rights reserved.
6 * 6 *
7 * Benny Halevy <bhalevy@panasas.com> 7 * Benny Halevy <bhalevy@panasas.com>
8 * Boaz Harrosh <bharrosh@panasas.com> 8 * Boaz Harrosh <ooo@electrozaur.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 11 * it under the terms of the GNU General Public License version 2
diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h
index 07e7945a1ff2..e97fc656a058 100644
--- a/include/linux/power/charger-manager.h
+++ b/include/linux/power/charger-manager.h
@@ -253,9 +253,6 @@ struct charger_manager {
253 struct device *dev; 253 struct device *dev;
254 struct charger_desc *desc; 254 struct charger_desc *desc;
255 255
256 struct power_supply *fuel_gauge;
257 struct power_supply **charger_stat;
258
259#ifdef CONFIG_THERMAL 256#ifdef CONFIG_THERMAL
260 struct thermal_zone_device *tzd_batt; 257 struct thermal_zone_device *tzd_batt;
261#endif 258#endif
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 3ed049673022..096dbced02ac 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -200,6 +200,12 @@ struct power_supply {
200 void (*external_power_changed)(struct power_supply *psy); 200 void (*external_power_changed)(struct power_supply *psy);
201 void (*set_charged)(struct power_supply *psy); 201 void (*set_charged)(struct power_supply *psy);
202 202
203 /*
204 * Set if thermal zone should not be created for this power supply.
205 * For example for virtual supplies forwarding calls to actual
206 * sensors or other supplies.
207 */
208 bool no_thermal;
203 /* For APM emulation, think legacy userspace. */ 209 /* For APM emulation, think legacy userspace. */
204 int use_for_apm; 210 int use_for_apm;
205 211
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index a4a819ffb2d1..53ff1a752d7e 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -617,6 +617,21 @@ static inline void rcu_preempt_sleep_check(void)
617#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v) 617#define RCU_INITIALIZER(v) (typeof(*(v)) __force __rcu *)(v)
618 618
619/** 619/**
620 * lockless_dereference() - safely load a pointer for later dereference
621 * @p: The pointer to load
622 *
623 * Similar to rcu_dereference(), but for situations where the pointed-to
624 * object's lifetime is managed by something other than RCU. That
625 * "something other" might be reference counting or simple immortality.
626 */
627#define lockless_dereference(p) \
628({ \
629 typeof(p) _________p1 = ACCESS_ONCE(p); \
630 smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
631 (_________p1); \
632})
633
634/**
620 * rcu_assign_pointer() - assign to RCU-protected pointer 635 * rcu_assign_pointer() - assign to RCU-protected pointer
621 * @p: pointer to assign to 636 * @p: pointer to assign to
622 * @v: value to assign (publish) 637 * @v: value to assign (publish)
diff --git a/include/linux/regmap.h b/include/linux/regmap.h
index c5ed83f49c4e..4419b99d8d6e 100644
--- a/include/linux/regmap.h
+++ b/include/linux/regmap.h
@@ -27,6 +27,7 @@ struct spmi_device;
27struct regmap; 27struct regmap;
28struct regmap_range_cfg; 28struct regmap_range_cfg;
29struct regmap_field; 29struct regmap_field;
30struct snd_ac97;
30 31
31/* An enum of all the supported cache types */ 32/* An enum of all the supported cache types */
32enum regcache_type { 33enum regcache_type {
@@ -340,6 +341,8 @@ struct regmap *regmap_init_spmi_ext(struct spmi_device *dev,
340struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id, 341struct regmap *regmap_init_mmio_clk(struct device *dev, const char *clk_id,
341 void __iomem *regs, 342 void __iomem *regs,
342 const struct regmap_config *config); 343 const struct regmap_config *config);
344struct regmap *regmap_init_ac97(struct snd_ac97 *ac97,
345 const struct regmap_config *config);
343 346
344struct regmap *devm_regmap_init(struct device *dev, 347struct regmap *devm_regmap_init(struct device *dev,
345 const struct regmap_bus *bus, 348 const struct regmap_bus *bus,
@@ -356,6 +359,10 @@ struct regmap *devm_regmap_init_spmi_ext(struct spmi_device *dev,
356struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id, 359struct regmap *devm_regmap_init_mmio_clk(struct device *dev, const char *clk_id,
357 void __iomem *regs, 360 void __iomem *regs,
358 const struct regmap_config *config); 361 const struct regmap_config *config);
362struct regmap *devm_regmap_init_ac97(struct snd_ac97 *ac97,
363 const struct regmap_config *config);
364
365bool regmap_ac97_default_volatile(struct device *dev, unsigned int reg);
359 366
360/** 367/**
361 * regmap_init_mmio(): Initialise register map 368 * regmap_init_mmio(): Initialise register map
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index d347c805f923..f540b1496e2f 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -35,6 +35,8 @@
35#ifndef __LINUX_REGULATOR_CONSUMER_H_ 35#ifndef __LINUX_REGULATOR_CONSUMER_H_
36#define __LINUX_REGULATOR_CONSUMER_H_ 36#define __LINUX_REGULATOR_CONSUMER_H_
37 37
38#include <linux/err.h>
39
38struct device; 40struct device;
39struct notifier_block; 41struct notifier_block;
40struct regmap; 42struct regmap;
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index 49a4d6f59108..e2c13cd863bd 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -97,7 +97,7 @@ __ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *k
97 __ring_buffer_alloc((size), (flags), &__key); \ 97 __ring_buffer_alloc((size), (flags), &__key); \
98}) 98})
99 99
100int ring_buffer_wait(struct ring_buffer *buffer, int cpu); 100int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full);
101int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, 101int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
102 struct file *filp, poll_table *poll_table); 102 struct file *filp, poll_table *poll_table);
103 103
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index a59d9343c25b..6c8b6f604e76 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -557,7 +557,9 @@ struct sk_buff {
557 /* fields enclosed in headers_start/headers_end are copied 557 /* fields enclosed in headers_start/headers_end are copied
558 * using a single memcpy() in __copy_skb_header() 558 * using a single memcpy() in __copy_skb_header()
559 */ 559 */
560 /* private: */
560 __u32 headers_start[0]; 561 __u32 headers_start[0];
562 /* public: */
561 563
562/* if you move pkt_type around you also must adapt those constants */ 564/* if you move pkt_type around you also must adapt those constants */
563#ifdef __BIG_ENDIAN_BITFIELD 565#ifdef __BIG_ENDIAN_BITFIELD
@@ -642,7 +644,9 @@ struct sk_buff {
642 __u16 network_header; 644 __u16 network_header;
643 __u16 mac_header; 645 __u16 mac_header;
644 646
647 /* private: */
645 __u32 headers_end[0]; 648 __u32 headers_end[0];
649 /* public: */
646 650
647 /* These elements must be at the end, see alloc_skb() for details. */ 651 /* These elements must be at the end, see alloc_skb() for details. */
648 sk_buff_data_t tail; 652 sk_buff_data_t tail;
@@ -795,15 +799,19 @@ struct sk_buff_fclones {
795 * @skb: buffer 799 * @skb: buffer
796 * 800 *
797 * Returns true is skb is a fast clone, and its clone is not freed. 801 * Returns true is skb is a fast clone, and its clone is not freed.
802 * Some drivers call skb_orphan() in their ndo_start_xmit(),
803 * so we also check that this didnt happen.
798 */ 804 */
799static inline bool skb_fclone_busy(const struct sk_buff *skb) 805static inline bool skb_fclone_busy(const struct sock *sk,
806 const struct sk_buff *skb)
800{ 807{
801 const struct sk_buff_fclones *fclones; 808 const struct sk_buff_fclones *fclones;
802 809
803 fclones = container_of(skb, struct sk_buff_fclones, skb1); 810 fclones = container_of(skb, struct sk_buff_fclones, skb1);
804 811
805 return skb->fclone == SKB_FCLONE_ORIG && 812 return skb->fclone == SKB_FCLONE_ORIG &&
806 fclones->skb2.fclone == SKB_FCLONE_CLONE; 813 fclones->skb2.fclone == SKB_FCLONE_CLONE &&
814 fclones->skb2.sk == sk;
807} 815}
808 816
809static inline struct sk_buff *alloc_skb_fclone(unsigned int size, 817static inline struct sk_buff *alloc_skb_fclone(unsigned int size,
diff --git a/include/linux/socket.h b/include/linux/socket.h
index ec538fc287a6..bb9b83640070 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -256,7 +256,7 @@ struct ucred {
256#define MSG_EOF MSG_FIN 256#define MSG_EOF MSG_FIN
257 257
258#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */ 258#define MSG_FASTOPEN 0x20000000 /* Send data in TCP SYN */
259#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exit for file 259#define MSG_CMSG_CLOEXEC 0x40000000 /* Set close_on_exec for file
260 descriptor received through 260 descriptor received through
261 SCM_RIGHTS */ 261 SCM_RIGHTS */
262#if defined(CONFIG_COMPAT) 262#if defined(CONFIG_COMPAT)
diff --git a/include/linux/string.h b/include/linux/string.h
index e6edfe51575a..2e22a2e58f3a 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -132,7 +132,7 @@ int bprintf(u32 *bin_buf, size_t size, const char *fmt, ...) __printf(3, 4);
132#endif 132#endif
133 133
134extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos, 134extern ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
135 const void *from, size_t available); 135 const void *from, size_t available);
136 136
137/** 137/**
138 * strstarts - does @str start with @prefix? 138 * strstarts - does @str start with @prefix?
@@ -144,7 +144,8 @@ static inline bool strstarts(const char *str, const char *prefix)
144 return strncmp(str, prefix, strlen(prefix)) == 0; 144 return strncmp(str, prefix, strlen(prefix)) == 0;
145} 145}
146 146
147extern size_t memweight(const void *ptr, size_t bytes); 147size_t memweight(const void *ptr, size_t bytes);
148void memzero_explicit(void *s, size_t count);
148 149
149/** 150/**
150 * kbasename - return the last part of a pathname. 151 * kbasename - return the last part of a pathname.
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index 0305cde21a74..ef90838b36a0 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -44,6 +44,10 @@
44#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \ 44#define KELVIN_TO_CELSIUS(t) (long)(((long)t-2732 >= 0) ? \
45 ((long)t-2732+5)/10 : ((long)t-2732-5)/10) 45 ((long)t-2732+5)/10 : ((long)t-2732-5)/10)
46#define CELSIUS_TO_KELVIN(t) ((t)*10+2732) 46#define CELSIUS_TO_KELVIN(t) ((t)*10+2732)
47#define DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, off) (((t) - (off)) * 100)
48#define DECI_KELVIN_TO_MILLICELSIUS(t) DECI_KELVIN_TO_MILLICELSIUS_WITH_OFFSET(t, 2732)
49#define MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, off) (((t) / 100) + (off))
50#define MILLICELSIUS_TO_DECI_KELVIN(t) MILLICELSIUS_TO_DECI_KELVIN_WITH_OFFSET(t, 2732)
47 51
48/* Adding event notification support elements */ 52/* Adding event notification support elements */
49#define THERMAL_GENL_FAMILY_NAME "thermal_event" 53#define THERMAL_GENL_FAMILY_NAME "thermal_event"
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 1ad4724458de..baa81718d985 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -63,7 +63,17 @@ struct uio_port {
63 63
64#define MAX_UIO_PORT_REGIONS 5 64#define MAX_UIO_PORT_REGIONS 5
65 65
66struct uio_device; 66struct uio_device {
67 struct module *owner;
68 struct device *dev;
69 int minor;
70 atomic_t event;
71 struct fasync_struct *async_queue;
72 wait_queue_head_t wait;
73 struct uio_info *info;
74 struct kobject *map_dir;
75 struct kobject *portio_dir;
76};
67 77
68/** 78/**
69 * struct uio_info - UIO device capabilities 79 * struct uio_info - UIO device capabilities
diff --git a/include/linux/uprobes.h b/include/linux/uprobes.h
index 4f844c6b03ee..60beb5dc7977 100644
--- a/include/linux/uprobes.h
+++ b/include/linux/uprobes.h
@@ -98,11 +98,11 @@ struct uprobes_state {
98 struct xol_area *xol_area; 98 struct xol_area *xol_area;
99}; 99};
100 100
101extern int __weak set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); 101extern int set_swbp(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
102extern int __weak set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr); 102extern int set_orig_insn(struct arch_uprobe *aup, struct mm_struct *mm, unsigned long vaddr);
103extern bool __weak is_swbp_insn(uprobe_opcode_t *insn); 103extern bool is_swbp_insn(uprobe_opcode_t *insn);
104extern bool __weak is_trap_insn(uprobe_opcode_t *insn); 104extern bool is_trap_insn(uprobe_opcode_t *insn);
105extern unsigned long __weak uprobe_get_swbp_addr(struct pt_regs *regs); 105extern unsigned long uprobe_get_swbp_addr(struct pt_regs *regs);
106extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs); 106extern unsigned long uprobe_get_trap_addr(struct pt_regs *regs);
107extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t); 107extern int uprobe_write_opcode(struct mm_struct *mm, unsigned long vaddr, uprobe_opcode_t);
108extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc); 108extern int uprobe_register(struct inode *inode, loff_t offset, struct uprobe_consumer *uc);
@@ -128,8 +128,8 @@ extern bool arch_uprobe_xol_was_trapped(struct task_struct *tsk);
128extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data); 128extern int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val, void *data);
129extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs); 129extern void arch_uprobe_abort_xol(struct arch_uprobe *aup, struct pt_regs *regs);
130extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs); 130extern unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs *regs);
131extern bool __weak arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs); 131extern bool arch_uprobe_ignore(struct arch_uprobe *aup, struct pt_regs *regs);
132extern void __weak arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, 132extern void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
133 void *src, unsigned long len); 133 void *src, unsigned long len);
134#else /* !CONFIG_UPROBES */ 134#else /* !CONFIG_UPROBES */
135struct uprobes_state { 135struct uprobes_state {
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 26088feb6608..d9a4905e01d0 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -78,6 +78,7 @@ struct usbnet {
78# define EVENT_NO_RUNTIME_PM 9 78# define EVENT_NO_RUNTIME_PM 9
79# define EVENT_RX_KILL 10 79# define EVENT_RX_KILL 10
80# define EVENT_LINK_CHANGE 11 80# define EVENT_LINK_CHANGE 11
81# define EVENT_SET_RX_MODE 12
81}; 82};
82 83
83static inline struct usb_driver *driver_of(struct usb_interface *intf) 84static inline struct usb_driver *driver_of(struct usb_interface *intf)
@@ -159,6 +160,9 @@ struct driver_info {
159 /* called by minidriver when receiving indication */ 160 /* called by minidriver when receiving indication */
160 void (*indication)(struct usbnet *dev, void *ind, int indlen); 161 void (*indication)(struct usbnet *dev, void *ind, int indlen);
161 162
163 /* rx mode change (device changes address list filtering) */
164 void (*set_rx_mode)(struct usbnet *dev);
165
162 /* for new devices, use the descriptor-reading code instead */ 166 /* for new devices, use the descriptor-reading code instead */
163 int in; /* rx endpoint */ 167 int in; /* rx endpoint */
164 int out; /* tx endpoint */ 168 int out; /* tx endpoint */
diff --git a/include/linux/watchdog.h b/include/linux/watchdog.h
index 2a3038ee17a3..395b70e0eccf 100644
--- a/include/linux/watchdog.h
+++ b/include/linux/watchdog.h
@@ -97,13 +97,8 @@ struct watchdog_device {
97#define WDOG_UNREGISTERED 4 /* Has the device been unregistered */ 97#define WDOG_UNREGISTERED 4 /* Has the device been unregistered */
98}; 98};
99 99
100#ifdef CONFIG_WATCHDOG_NOWAYOUT 100#define WATCHDOG_NOWAYOUT IS_BUILTIN(CONFIG_WATCHDOG_NOWAYOUT)
101#define WATCHDOG_NOWAYOUT 1 101#define WATCHDOG_NOWAYOUT_INIT_STATUS (WATCHDOG_NOWAYOUT << WDOG_NO_WAY_OUT)
102#define WATCHDOG_NOWAYOUT_INIT_STATUS (1 << WDOG_NO_WAY_OUT)
103#else
104#define WATCHDOG_NOWAYOUT 0
105#define WATCHDOG_NOWAYOUT_INIT_STATUS 0
106#endif
107 102
108/* Use the following function to check whether or not the watchdog is active */ 103/* Use the following function to check whether or not the watchdog is active */
109static inline bool watchdog_active(struct watchdog_device *wdd) 104static inline bool watchdog_active(struct watchdog_device *wdd)
diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
index d9fa68f26c41..2a25dec30211 100644
--- a/include/net/9p/transport.h
+++ b/include/net/9p/transport.h
@@ -34,7 +34,6 @@
34 * @list: used to maintain a list of currently available transports 34 * @list: used to maintain a list of currently available transports
35 * @name: the human-readable name of the transport 35 * @name: the human-readable name of the transport
36 * @maxsize: transport provided maximum packet size 36 * @maxsize: transport provided maximum packet size
37 * @pref: Preferences of this transport
38 * @def: set if this transport should be considered the default 37 * @def: set if this transport should be considered the default
39 * @create: member function to create a new connection on this transport 38 * @create: member function to create a new connection on this transport
40 * @close: member function to discard a connection on this transport 39 * @close: member function to discard a connection on this transport
diff --git a/include/net/inet_common.h b/include/net/inet_common.h
index fe7994c48b75..b2828a06a5a6 100644
--- a/include/net/inet_common.h
+++ b/include/net/inet_common.h
@@ -37,6 +37,8 @@ int inet_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg);
37int inet_ctl_sock_create(struct sock **sk, unsigned short family, 37int inet_ctl_sock_create(struct sock **sk, unsigned short family,
38 unsigned short type, unsigned char protocol, 38 unsigned short type, unsigned char protocol,
39 struct net *net); 39 struct net *net);
40int inet_recv_error(struct sock *sk, struct msghdr *msg, int len,
41 int *addr_len);
40 42
41static inline void inet_ctl_sock_destroy(struct sock *sk) 43static inline void inet_ctl_sock_destroy(struct sock *sk)
42{ 44{
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 97f472012438..4292929392b0 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -671,6 +671,8 @@ static inline int ipv6_addr_diff(const struct in6_addr *a1, const struct in6_add
671 return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr)); 671 return __ipv6_addr_diff(a1, a2, sizeof(struct in6_addr));
672} 672}
673 673
674void ipv6_proxy_select_ident(struct sk_buff *skb);
675
674int ip6_dst_hoplimit(struct dst_entry *dst); 676int ip6_dst_hoplimit(struct dst_entry *dst);
675 677
676static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6, 678static inline int ip6_sk_dst_hoplimit(struct ipv6_pinfo *np, struct flowi6 *fl6,
diff --git a/include/net/netfilter/ipv4/nf_reject.h b/include/net/netfilter/ipv4/nf_reject.h
index e8427193c777..03e928a55229 100644
--- a/include/net/netfilter/ipv4/nf_reject.h
+++ b/include/net/netfilter/ipv4/nf_reject.h
@@ -1,6 +1,8 @@
1#ifndef _IPV4_NF_REJECT_H 1#ifndef _IPV4_NF_REJECT_H
2#define _IPV4_NF_REJECT_H 2#define _IPV4_NF_REJECT_H
3 3
4#include <linux/skbuff.h>
5#include <net/ip.h>
4#include <net/icmp.h> 6#include <net/icmp.h>
5 7
6static inline void nf_send_unreach(struct sk_buff *skb_in, int code) 8static inline void nf_send_unreach(struct sk_buff *skb_in, int code)
@@ -10,4 +12,12 @@ static inline void nf_send_unreach(struct sk_buff *skb_in, int code)
10 12
11void nf_send_reset(struct sk_buff *oldskb, int hook); 13void nf_send_reset(struct sk_buff *oldskb, int hook);
12 14
15const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
16 struct tcphdr *_oth, int hook);
17struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
18 const struct sk_buff *oldskb,
19 __be16 protocol, int ttl);
20void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
21 const struct tcphdr *oth);
22
13#endif /* _IPV4_NF_REJECT_H */ 23#endif /* _IPV4_NF_REJECT_H */
diff --git a/include/net/netfilter/ipv6/nf_reject.h b/include/net/netfilter/ipv6/nf_reject.h
index 48e18810a9be..23216d48abf9 100644
--- a/include/net/netfilter/ipv6/nf_reject.h
+++ b/include/net/netfilter/ipv6/nf_reject.h
@@ -15,4 +15,14 @@ nf_send_unreach6(struct net *net, struct sk_buff *skb_in, unsigned char code,
15 15
16void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook); 16void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook);
17 17
18const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
19 struct tcphdr *otcph,
20 unsigned int *otcplen, int hook);
21struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
22 const struct sk_buff *oldskb,
23 __be16 protocol, int hoplimit);
24void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
25 const struct sk_buff *oldskb,
26 const struct tcphdr *oth, unsigned int otcplen);
27
18#endif /* _IPV6_NF_REJECT_H */ 28#endif /* _IPV6_NF_REJECT_H */
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 3d7292392fac..3ae969e3acf0 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -396,14 +396,12 @@ struct nft_rule {
396/** 396/**
397 * struct nft_trans - nf_tables object update in transaction 397 * struct nft_trans - nf_tables object update in transaction
398 * 398 *
399 * @rcu_head: rcu head to defer release of transaction data
400 * @list: used internally 399 * @list: used internally
401 * @msg_type: message type 400 * @msg_type: message type
402 * @ctx: transaction context 401 * @ctx: transaction context
403 * @data: internal information related to the transaction 402 * @data: internal information related to the transaction
404 */ 403 */
405struct nft_trans { 404struct nft_trans {
406 struct rcu_head rcu_head;
407 struct list_head list; 405 struct list_head list;
408 int msg_type; 406 int msg_type;
409 struct nft_ctx ctx; 407 struct nft_ctx ctx;
@@ -530,6 +528,9 @@ enum nft_chain_type {
530 NFT_CHAIN_T_MAX 528 NFT_CHAIN_T_MAX
531}; 529};
532 530
531int nft_chain_validate_dependency(const struct nft_chain *chain,
532 enum nft_chain_type type);
533
533struct nft_stats { 534struct nft_stats {
534 u64 bytes; 535 u64 bytes;
535 u64 pkts; 536 u64 pkts;
diff --git a/include/net/netfilter/nft_masq.h b/include/net/netfilter/nft_masq.h
index c72729f954f4..e2a518b60e19 100644
--- a/include/net/netfilter/nft_masq.h
+++ b/include/net/netfilter/nft_masq.h
@@ -13,4 +13,7 @@ int nft_masq_init(const struct nft_ctx *ctx,
13 13
14int nft_masq_dump(struct sk_buff *skb, const struct nft_expr *expr); 14int nft_masq_dump(struct sk_buff *skb, const struct nft_expr *expr);
15 15
16int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
17 const struct nft_data **data);
18
16#endif /* _NFT_MASQ_H_ */ 19#endif /* _NFT_MASQ_H_ */
diff --git a/include/net/udp_tunnel.h b/include/net/udp_tunnel.h
index a47790bcaa38..2a50a70ef587 100644
--- a/include/net/udp_tunnel.h
+++ b/include/net/udp_tunnel.h
@@ -100,6 +100,15 @@ static inline struct sk_buff *udp_tunnel_handle_offloads(struct sk_buff *skb,
100 return iptunnel_handle_offloads(skb, udp_csum, type); 100 return iptunnel_handle_offloads(skb, udp_csum, type);
101} 101}
102 102
103static inline void udp_tunnel_gro_complete(struct sk_buff *skb, int nhoff)
104{
105 struct udphdr *uh;
106
107 uh = (struct udphdr *)(skb->data + nhoff - sizeof(struct udphdr));
108 skb_shinfo(skb)->gso_type |= uh->check ?
109 SKB_GSO_UDP_TUNNEL_CSUM : SKB_GSO_UDP_TUNNEL;
110}
111
103static inline void udp_tunnel_encap_enable(struct socket *sock) 112static inline void udp_tunnel_encap_enable(struct socket *sock)
104{ 113{
105#if IS_ENABLED(CONFIG_IPV6) 114#if IS_ENABLED(CONFIG_IPV6)
diff --git a/include/net/vxlan.h b/include/net/vxlan.h
index d5f59f3fc35d..57cccd0052e5 100644
--- a/include/net/vxlan.h
+++ b/include/net/vxlan.h
@@ -8,6 +8,12 @@
8#define VNI_HASH_BITS 10 8#define VNI_HASH_BITS 10
9#define VNI_HASH_SIZE (1<<VNI_HASH_BITS) 9#define VNI_HASH_SIZE (1<<VNI_HASH_BITS)
10 10
11/* VXLAN protocol header */
12struct vxlanhdr {
13 __be32 vx_flags;
14 __be32 vx_vni;
15};
16
11struct vxlan_sock; 17struct vxlan_sock;
12typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb, __be32 key); 18typedef void (vxlan_rcv_t)(struct vxlan_sock *vh, struct sk_buff *skb, __be32 key);
13 19
@@ -45,6 +51,18 @@ int vxlan_xmit_skb(struct vxlan_sock *vs,
45 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df, 51 __be32 src, __be32 dst, __u8 tos, __u8 ttl, __be16 df,
46 __be16 src_port, __be16 dst_port, __be32 vni, bool xnet); 52 __be16 src_port, __be16 dst_port, __be32 vni, bool xnet);
47 53
54static inline bool vxlan_gso_check(struct sk_buff *skb)
55{
56 if ((skb_shinfo(skb)->gso_type & SKB_GSO_UDP_TUNNEL) &&
57 (skb->inner_protocol_type != ENCAP_TYPE_ETHER ||
58 skb->inner_protocol != htons(ETH_P_TEB) ||
59 (skb_inner_mac_header(skb) - skb_transport_header(skb) !=
60 sizeof(struct udphdr) + sizeof(struct vxlanhdr))))
61 return false;
62
63 return true;
64}
65
48/* IP header + UDP + VXLAN + Ethernet header */ 66/* IP header + UDP + VXLAN + Ethernet header */
49#define VXLAN_HEADROOM (20 + 8 + 8 + 14) 67#define VXLAN_HEADROOM (20 + 8 + 8 + 14)
50/* IPv6 header + UDP + VXLAN + Ethernet header */ 68/* IPv6 header + UDP + VXLAN + Ethernet header */
diff --git a/include/scsi/osd_initiator.h b/include/scsi/osd_initiator.h
index b2e85fdd2ae0..a09cca829082 100644
--- a/include/scsi/osd_initiator.h
+++ b/include/scsi/osd_initiator.h
@@ -4,7 +4,7 @@
4 * Copyright (C) 2008 Panasas Inc. All rights reserved. 4 * Copyright (C) 2008 Panasas Inc. All rights reserved.
5 * 5 *
6 * Authors: 6 * Authors:
7 * Boaz Harrosh <bharrosh@panasas.com> 7 * Boaz Harrosh <ooo@electrozaur.com>
8 * Benny Halevy <bhalevy@panasas.com> 8 * Benny Halevy <bhalevy@panasas.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
diff --git a/include/scsi/osd_ore.h b/include/scsi/osd_ore.h
index 6ca3265a4dca..7a8d2cd30328 100644
--- a/include/scsi/osd_ore.h
+++ b/include/scsi/osd_ore.h
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2011 2 * Copyright (C) 2011
3 * Boaz Harrosh <bharrosh@panasas.com> 3 * Boaz Harrosh <ooo@electrozaur.com>
4 * 4 *
5 * Public Declarations of the ORE API 5 * Public Declarations of the ORE API
6 * 6 *
diff --git a/include/scsi/osd_protocol.h b/include/scsi/osd_protocol.h
index a2594afe05c7..e0ca835e7bf7 100644
--- a/include/scsi/osd_protocol.h
+++ b/include/scsi/osd_protocol.h
@@ -4,7 +4,7 @@
4 * Copyright (C) 2008 Panasas Inc. All rights reserved. 4 * Copyright (C) 2008 Panasas Inc. All rights reserved.
5 * 5 *
6 * Authors: 6 * Authors:
7 * Boaz Harrosh <bharrosh@panasas.com> 7 * Boaz Harrosh <ooo@electrozaur.com>
8 * Benny Halevy <bhalevy@panasas.com> 8 * Benny Halevy <bhalevy@panasas.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
@@ -496,7 +496,7 @@ struct osd_timestamp {
496 */ 496 */
497 497
498struct osd_key_identifier { 498struct osd_key_identifier {
499 u8 id[7]; /* if you know why 7 please email bharrosh@panasas.com */ 499 u8 id[7]; /* if you know why 7 please email ooo@electrozaur.com */
500} __packed; 500} __packed;
501 501
502/* for osd_capability.format */ 502/* for osd_capability.format */
diff --git a/include/scsi/osd_sec.h b/include/scsi/osd_sec.h
index f96151c9c9e8..7abeb0f0db30 100644
--- a/include/scsi/osd_sec.h
+++ b/include/scsi/osd_sec.h
@@ -4,7 +4,7 @@
4 * Copyright (C) 2008 Panasas Inc. All rights reserved. 4 * Copyright (C) 2008 Panasas Inc. All rights reserved.
5 * 5 *
6 * Authors: 6 * Authors:
7 * Boaz Harrosh <bharrosh@panasas.com> 7 * Boaz Harrosh <ooo@electrozaur.com>
8 * Benny Halevy <bhalevy@panasas.com> 8 * Benny Halevy <bhalevy@panasas.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
diff --git a/include/scsi/osd_sense.h b/include/scsi/osd_sense.h
index 91db543a5502..d52aa93a0b2d 100644
--- a/include/scsi/osd_sense.h
+++ b/include/scsi/osd_sense.h
@@ -4,7 +4,7 @@
4 * Copyright (C) 2008 Panasas Inc. All rights reserved. 4 * Copyright (C) 2008 Panasas Inc. All rights reserved.
5 * 5 *
6 * Authors: 6 * Authors:
7 * Boaz Harrosh <bharrosh@panasas.com> 7 * Boaz Harrosh <ooo@electrozaur.com>
8 * Benny Halevy <bhalevy@panasas.com> 8 * Benny Halevy <bhalevy@panasas.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
diff --git a/include/scsi/osd_types.h b/include/scsi/osd_types.h
index bd0be7ed4bcf..48e8a165e136 100644
--- a/include/scsi/osd_types.h
+++ b/include/scsi/osd_types.h
@@ -4,7 +4,7 @@
4 * Copyright (C) 2008 Panasas Inc. All rights reserved. 4 * Copyright (C) 2008 Panasas Inc. All rights reserved.
5 * 5 *
6 * Authors: 6 * Authors:
7 * Boaz Harrosh <bharrosh@panasas.com> 7 * Boaz Harrosh <ooo@electrozaur.com>
8 * Benny Halevy <bhalevy@panasas.com> 8 * Benny Halevy <bhalevy@panasas.com>
9 * 9 *
10 * This program is free software; you can redistribute it and/or modify 10 * This program is free software; you can redistribute it and/or modify
diff --git a/include/scsi/scsi_tcq.h b/include/scsi/scsi_tcq.h
index e64583560701..56ed843969ca 100644
--- a/include/scsi/scsi_tcq.h
+++ b/include/scsi/scsi_tcq.h
@@ -67,8 +67,9 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth)
67 if (!sdev->tagged_supported) 67 if (!sdev->tagged_supported)
68 return; 68 return;
69 69
70 if (!shost_use_blk_mq(sdev->host) && 70 if (shost_use_blk_mq(sdev->host))
71 !blk_queue_tagged(sdev->request_queue)) 71 queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, sdev->request_queue);
72 else if (!blk_queue_tagged(sdev->request_queue))
72 blk_queue_init_tags(sdev->request_queue, depth, 73 blk_queue_init_tags(sdev->request_queue, depth,
73 sdev->host->bqt); 74 sdev->host->bqt);
74 75
@@ -81,8 +82,7 @@ static inline void scsi_activate_tcq(struct scsi_device *sdev, int depth)
81 **/ 82 **/
82static inline void scsi_deactivate_tcq(struct scsi_device *sdev, int depth) 83static inline void scsi_deactivate_tcq(struct scsi_device *sdev, int depth)
83{ 84{
84 if (!shost_use_blk_mq(sdev->host) && 85 if (blk_queue_tagged(sdev->request_queue))
85 blk_queue_tagged(sdev->request_queue))
86 blk_queue_free_tags(sdev->request_queue); 86 blk_queue_free_tags(sdev->request_queue);
87 scsi_adjust_queue_depth(sdev, 0, depth); 87 scsi_adjust_queue_depth(sdev, 0, depth);
88} 88}
diff --git a/include/sound/pcm.h b/include/sound/pcm.h
index e862497f7556..8bb00a27e219 100644
--- a/include/sound/pcm.h
+++ b/include/sound/pcm.h
@@ -184,6 +184,8 @@ struct snd_pcm_ops {
184#define SNDRV_PCM_FMTBIT_DSD_U8 _SNDRV_PCM_FMTBIT(DSD_U8) 184#define SNDRV_PCM_FMTBIT_DSD_U8 _SNDRV_PCM_FMTBIT(DSD_U8)
185#define SNDRV_PCM_FMTBIT_DSD_U16_LE _SNDRV_PCM_FMTBIT(DSD_U16_LE) 185#define SNDRV_PCM_FMTBIT_DSD_U16_LE _SNDRV_PCM_FMTBIT(DSD_U16_LE)
186#define SNDRV_PCM_FMTBIT_DSD_U32_LE _SNDRV_PCM_FMTBIT(DSD_U32_LE) 186#define SNDRV_PCM_FMTBIT_DSD_U32_LE _SNDRV_PCM_FMTBIT(DSD_U32_LE)
187#define SNDRV_PCM_FMTBIT_DSD_U16_BE _SNDRV_PCM_FMTBIT(DSD_U16_BE)
188#define SNDRV_PCM_FMTBIT_DSD_U32_BE _SNDRV_PCM_FMTBIT(DSD_U32_BE)
187 189
188#ifdef SNDRV_LITTLE_ENDIAN 190#ifdef SNDRV_LITTLE_ENDIAN
189#define SNDRV_PCM_FMTBIT_S16 SNDRV_PCM_FMTBIT_S16_LE 191#define SNDRV_PCM_FMTBIT_S16 SNDRV_PCM_FMTBIT_S16_LE
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index e8b3080d196a..2df96b1384c7 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -206,7 +206,6 @@ struct snd_soc_dai_driver {
206 /* DAI description */ 206 /* DAI description */
207 const char *name; 207 const char *name;
208 unsigned int id; 208 unsigned int id;
209 int ac97_control;
210 unsigned int base; 209 unsigned int base;
211 210
212 /* DAI driver callbacks */ 211 /* DAI driver callbacks */
@@ -216,6 +215,8 @@ struct snd_soc_dai_driver {
216 int (*resume)(struct snd_soc_dai *dai); 215 int (*resume)(struct snd_soc_dai *dai);
217 /* compress dai */ 216 /* compress dai */
218 bool compress_dai; 217 bool compress_dai;
218 /* DAI is also used for the control bus */
219 bool bus_control;
219 220
220 /* ops */ 221 /* ops */
221 const struct snd_soc_dai_ops *ops; 222 const struct snd_soc_dai_ops *ops;
@@ -241,7 +242,6 @@ struct snd_soc_dai {
241 const char *name; 242 const char *name;
242 int id; 243 int id;
243 struct device *dev; 244 struct device *dev;
244 void *ac97_pdata; /* platform_data for the ac97 codec */
245 245
246 /* driver ops */ 246 /* driver ops */
247 struct snd_soc_dai_driver *driver; 247 struct snd_soc_dai_driver *driver;
@@ -268,7 +268,6 @@ struct snd_soc_dai {
268 unsigned int sample_bits; 268 unsigned int sample_bits;
269 269
270 /* parent platform/codec */ 270 /* parent platform/codec */
271 struct snd_soc_platform *platform;
272 struct snd_soc_codec *codec; 271 struct snd_soc_codec *codec;
273 struct snd_soc_component *component; 272 struct snd_soc_component *component;
274 273
@@ -276,8 +275,6 @@ struct snd_soc_dai {
276 unsigned int tx_mask; 275 unsigned int tx_mask;
277 unsigned int rx_mask; 276 unsigned int rx_mask;
278 277
279 struct snd_soc_card *card;
280
281 struct list_head list; 278 struct list_head list;
282}; 279};
283 280
diff --git a/include/sound/soc-dapm.h b/include/sound/soc-dapm.h
index 3a4d7da67b8d..89823cfe6f04 100644
--- a/include/sound/soc-dapm.h
+++ b/include/sound/soc-dapm.h
@@ -435,7 +435,7 @@ void snd_soc_dapm_auto_nc_pins(struct snd_soc_card *card);
435unsigned int dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol); 435unsigned int dapm_kcontrol_get_value(const struct snd_kcontrol *kcontrol);
436 436
437/* Mostly internal - should not normally be used */ 437/* Mostly internal - should not normally be used */
438void dapm_mark_io_dirty(struct snd_soc_dapm_context *dapm); 438void dapm_mark_endpoints_dirty(struct snd_soc_card *card);
439 439
440/* dapm path query */ 440/* dapm path query */
441int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream, 441int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
@@ -508,9 +508,9 @@ struct snd_soc_dapm_path {
508 508
509 /* status */ 509 /* status */
510 u32 connect:1; /* source and sink widgets are connected */ 510 u32 connect:1; /* source and sink widgets are connected */
511 u32 walked:1; /* path has been walked */
512 u32 walking:1; /* path is in the process of being walked */ 511 u32 walking:1; /* path is in the process of being walked */
513 u32 weak:1; /* path ignored for power management */ 512 u32 weak:1; /* path ignored for power management */
513 u32 is_supply:1; /* At least one of the connected widgets is a supply */
514 514
515 int (*connected)(struct snd_soc_dapm_widget *source, 515 int (*connected)(struct snd_soc_dapm_widget *source,
516 struct snd_soc_dapm_widget *sink); 516 struct snd_soc_dapm_widget *sink);
@@ -544,11 +544,13 @@ struct snd_soc_dapm_widget {
544 unsigned char active:1; /* active stream on DAC, ADC's */ 544 unsigned char active:1; /* active stream on DAC, ADC's */
545 unsigned char connected:1; /* connected codec pin */ 545 unsigned char connected:1; /* connected codec pin */
546 unsigned char new:1; /* cnew complete */ 546 unsigned char new:1; /* cnew complete */
547 unsigned char ext:1; /* has external widgets */
548 unsigned char force:1; /* force state */ 547 unsigned char force:1; /* force state */
549 unsigned char ignore_suspend:1; /* kept enabled over suspend */ 548 unsigned char ignore_suspend:1; /* kept enabled over suspend */
550 unsigned char new_power:1; /* power from this run */ 549 unsigned char new_power:1; /* power from this run */
551 unsigned char power_checked:1; /* power checked this run */ 550 unsigned char power_checked:1; /* power checked this run */
551 unsigned char is_supply:1; /* Widget is a supply type widget */
552 unsigned char is_sink:1; /* Widget is a sink type widget */
553 unsigned char is_source:1; /* Widget is a source type widget */
552 int subseq; /* sort within widget type */ 554 int subseq; /* sort within widget type */
553 555
554 int (*power_check)(struct snd_soc_dapm_widget *w); 556 int (*power_check)(struct snd_soc_dapm_widget *w);
@@ -567,6 +569,7 @@ struct snd_soc_dapm_widget {
567 struct list_head sinks; 569 struct list_head sinks;
568 570
569 /* used during DAPM updates */ 571 /* used during DAPM updates */
572 struct list_head work_list;
570 struct list_head power_list; 573 struct list_head power_list;
571 struct list_head dirty; 574 struct list_head dirty;
572 int inputs; 575 int inputs;
diff --git a/include/sound/soc-dpcm.h b/include/sound/soc-dpcm.h
index 2883a7a6f9f3..98f2ade0266e 100644
--- a/include/sound/soc-dpcm.h
+++ b/include/sound/soc-dpcm.h
@@ -102,6 +102,8 @@ struct snd_soc_dpcm_runtime {
102 /* state and update */ 102 /* state and update */
103 enum snd_soc_dpcm_update runtime_update; 103 enum snd_soc_dpcm_update runtime_update;
104 enum snd_soc_dpcm_state state; 104 enum snd_soc_dpcm_state state;
105
106 int trigger_pending; /* trigger cmd + 1 if pending, 0 if not */
105}; 107};
106 108
107/* can this BE stop and free */ 109/* can this BE stop and free */
diff --git a/include/sound/soc.h b/include/sound/soc.h
index 7ba7130037a0..29a52909ddef 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -36,6 +36,11 @@
36 {.reg = xreg, .rreg = xreg, .shift = shift_left, \ 36 {.reg = xreg, .rreg = xreg, .shift = shift_left, \
37 .rshift = shift_right, .max = xmax, .platform_max = xmax, \ 37 .rshift = shift_right, .max = xmax, .platform_max = xmax, \
38 .invert = xinvert, .autodisable = xautodisable}) 38 .invert = xinvert, .autodisable = xautodisable})
39#define SOC_DOUBLE_S_VALUE(xreg, shift_left, shift_right, xmin, xmax, xsign_bit, xinvert, xautodisable) \
40 ((unsigned long)&(struct soc_mixer_control) \
41 {.reg = xreg, .rreg = xreg, .shift = shift_left, \
42 .rshift = shift_right, .min = xmin, .max = xmax, .platform_max = xmax, \
43 .sign_bit = xsign_bit, .invert = xinvert, .autodisable = xautodisable})
39#define SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert, xautodisable) \ 44#define SOC_SINGLE_VALUE(xreg, xshift, xmax, xinvert, xautodisable) \
40 SOC_DOUBLE_VALUE(xreg, xshift, xshift, xmax, xinvert, xautodisable) 45 SOC_DOUBLE_VALUE(xreg, xshift, xshift, xmax, xinvert, xautodisable)
41#define SOC_SINGLE_VALUE_EXT(xreg, xmax, xinvert) \ 46#define SOC_SINGLE_VALUE_EXT(xreg, xmax, xinvert) \
@@ -171,11 +176,9 @@
171 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \ 176 .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ | \
172 SNDRV_CTL_ELEM_ACCESS_READWRITE, \ 177 SNDRV_CTL_ELEM_ACCESS_READWRITE, \
173 .tlv.p = (tlv_array), \ 178 .tlv.p = (tlv_array), \
174 .info = snd_soc_info_volsw_s8, .get = snd_soc_get_volsw_s8, \ 179 .info = snd_soc_info_volsw, .get = snd_soc_get_volsw,\
175 .put = snd_soc_put_volsw_s8, \ 180 .put = snd_soc_put_volsw, \
176 .private_value = (unsigned long)&(struct soc_mixer_control) \ 181 .private_value = SOC_DOUBLE_S_VALUE(xreg, 0, 8, xmin, xmax, 7, 0, 0) }
177 {.reg = xreg, .min = xmin, .max = xmax, \
178 .platform_max = xmax} }
179#define SOC_ENUM_DOUBLE(xreg, xshift_l, xshift_r, xitems, xtexts) \ 182#define SOC_ENUM_DOUBLE(xreg, xshift_l, xshift_r, xitems, xtexts) \
180{ .reg = xreg, .shift_l = xshift_l, .shift_r = xshift_r, \ 183{ .reg = xreg, .shift_l = xshift_l, .shift_r = xshift_r, \
181 .items = xitems, .texts = xtexts, \ 184 .items = xitems, .texts = xtexts, \
@@ -366,8 +369,6 @@ struct snd_soc_jack_gpio;
366 369
367typedef int (*hw_write_t)(void *,const char* ,int); 370typedef int (*hw_write_t)(void *,const char* ,int);
368 371
369extern struct snd_ac97_bus_ops *soc_ac97_ops;
370
371enum snd_soc_pcm_subclass { 372enum snd_soc_pcm_subclass {
372 SND_SOC_PCM_CLASS_PCM = 0, 373 SND_SOC_PCM_CLASS_PCM = 0,
373 SND_SOC_PCM_CLASS_BE = 1, 374 SND_SOC_PCM_CLASS_BE = 1,
@@ -409,13 +410,9 @@ int devm_snd_soc_register_component(struct device *dev,
409 const struct snd_soc_component_driver *cmpnt_drv, 410 const struct snd_soc_component_driver *cmpnt_drv,
410 struct snd_soc_dai_driver *dai_drv, int num_dai); 411 struct snd_soc_dai_driver *dai_drv, int num_dai);
411void snd_soc_unregister_component(struct device *dev); 412void snd_soc_unregister_component(struct device *dev);
412int snd_soc_cache_sync(struct snd_soc_codec *codec);
413int snd_soc_cache_init(struct snd_soc_codec *codec); 413int snd_soc_cache_init(struct snd_soc_codec *codec);
414int snd_soc_cache_exit(struct snd_soc_codec *codec); 414int snd_soc_cache_exit(struct snd_soc_codec *codec);
415int snd_soc_cache_write(struct snd_soc_codec *codec, 415
416 unsigned int reg, unsigned int value);
417int snd_soc_cache_read(struct snd_soc_codec *codec,
418 unsigned int reg, unsigned int *value);
419int snd_soc_platform_read(struct snd_soc_platform *platform, 416int snd_soc_platform_read(struct snd_soc_platform *platform,
420 unsigned int reg); 417 unsigned int reg);
421int snd_soc_platform_write(struct snd_soc_platform *platform, 418int snd_soc_platform_write(struct snd_soc_platform *platform,
@@ -500,14 +497,28 @@ int snd_soc_update_bits_locked(struct snd_soc_codec *codec,
500int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned int reg, 497int snd_soc_test_bits(struct snd_soc_codec *codec, unsigned int reg,
501 unsigned int mask, unsigned int value); 498 unsigned int mask, unsigned int value);
502 499
503int snd_soc_new_ac97_codec(struct snd_soc_codec *codec, 500#ifdef CONFIG_SND_SOC_AC97_BUS
504 struct snd_ac97_bus_ops *ops, int num); 501struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec);
505void snd_soc_free_ac97_codec(struct snd_soc_codec *codec); 502void snd_soc_free_ac97_codec(struct snd_ac97 *ac97);
506 503
507int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops); 504int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops);
508int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops, 505int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
509 struct platform_device *pdev); 506 struct platform_device *pdev);
510 507
508extern struct snd_ac97_bus_ops *soc_ac97_ops;
509#else
510static inline int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
511 struct platform_device *pdev)
512{
513 return 0;
514}
515
516static inline int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops)
517{
518 return 0;
519}
520#endif
521
511/* 522/*
512 *Controls 523 *Controls
513 */ 524 */
@@ -545,12 +556,6 @@ int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol,
545 struct snd_ctl_elem_value *ucontrol); 556 struct snd_ctl_elem_value *ucontrol);
546int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol, 557int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
547 struct snd_ctl_elem_value *ucontrol); 558 struct snd_ctl_elem_value *ucontrol);
548int snd_soc_info_volsw_s8(struct snd_kcontrol *kcontrol,
549 struct snd_ctl_elem_info *uinfo);
550int snd_soc_get_volsw_s8(struct snd_kcontrol *kcontrol,
551 struct snd_ctl_elem_value *ucontrol);
552int snd_soc_put_volsw_s8(struct snd_kcontrol *kcontrol,
553 struct snd_ctl_elem_value *ucontrol);
554int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol, 559int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol,
555 struct snd_ctl_elem_info *uinfo); 560 struct snd_ctl_elem_info *uinfo);
556int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol, 561int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
@@ -780,24 +785,18 @@ struct snd_soc_codec {
780 struct device *dev; 785 struct device *dev;
781 const struct snd_soc_codec_driver *driver; 786 const struct snd_soc_codec_driver *driver;
782 787
783 struct mutex mutex;
784 struct list_head list; 788 struct list_head list;
785 struct list_head card_list; 789 struct list_head card_list;
786 790
787 /* runtime */ 791 /* runtime */
788 struct snd_ac97 *ac97; /* for ad-hoc ac97 devices */
789 unsigned int cache_bypass:1; /* Suppress access to the cache */ 792 unsigned int cache_bypass:1; /* Suppress access to the cache */
790 unsigned int suspended:1; /* Codec is in suspend PM state */ 793 unsigned int suspended:1; /* Codec is in suspend PM state */
791 unsigned int ac97_registered:1; /* Codec has been AC97 registered */
792 unsigned int ac97_created:1; /* Codec has been created by SoC */
793 unsigned int cache_init:1; /* codec cache has been initialized */ 794 unsigned int cache_init:1; /* codec cache has been initialized */
794 u32 cache_sync; /* Cache needs to be synced to hardware */
795 795
796 /* codec IO */ 796 /* codec IO */
797 void *control_data; /* codec control (i2c/3wire) data */ 797 void *control_data; /* codec control (i2c/3wire) data */
798 hw_write_t hw_write; 798 hw_write_t hw_write;
799 void *reg_cache; 799 void *reg_cache;
800 struct mutex cache_rw_mutex;
801 800
802 /* component */ 801 /* component */
803 struct snd_soc_component component; 802 struct snd_soc_component component;
@@ -860,8 +859,6 @@ struct snd_soc_platform_driver {
860 859
861 int (*probe)(struct snd_soc_platform *); 860 int (*probe)(struct snd_soc_platform *);
862 int (*remove)(struct snd_soc_platform *); 861 int (*remove)(struct snd_soc_platform *);
863 int (*suspend)(struct snd_soc_dai *dai);
864 int (*resume)(struct snd_soc_dai *dai);
865 struct snd_soc_component_driver component_driver; 862 struct snd_soc_component_driver component_driver;
866 863
867 /* pcm creation and destruction */ 864 /* pcm creation and destruction */
@@ -886,7 +883,7 @@ struct snd_soc_platform_driver {
886 883
887struct snd_soc_dai_link_component { 884struct snd_soc_dai_link_component {
888 const char *name; 885 const char *name;
889 const struct device_node *of_node; 886 struct device_node *of_node;
890 const char *dai_name; 887 const char *dai_name;
891}; 888};
892 889
@@ -894,8 +891,6 @@ struct snd_soc_platform {
894 struct device *dev; 891 struct device *dev;
895 const struct snd_soc_platform_driver *driver; 892 const struct snd_soc_platform_driver *driver;
896 893
897 unsigned int suspended:1; /* platform is suspended */
898
899 struct list_head list; 894 struct list_head list;
900 895
901 struct snd_soc_component component; 896 struct snd_soc_component component;
@@ -990,7 +985,7 @@ struct snd_soc_codec_conf {
990 * DT/OF node, but not both. 985 * DT/OF node, but not both.
991 */ 986 */
992 const char *dev_name; 987 const char *dev_name;
993 const struct device_node *of_node; 988 struct device_node *of_node;
994 989
995 /* 990 /*
996 * optional map of kcontrol, widget and path name prefixes that are 991 * optional map of kcontrol, widget and path name prefixes that are
@@ -1007,7 +1002,7 @@ struct snd_soc_aux_dev {
1007 * DT/OF node, but not both. 1002 * DT/OF node, but not both.
1008 */ 1003 */
1009 const char *codec_name; 1004 const char *codec_name;
1010 const struct device_node *codec_of_node; 1005 struct device_node *codec_of_node;
1011 1006
1012 /* codec/machine specific init - e.g. add machine controls */ 1007 /* codec/machine specific init - e.g. add machine controls */
1013 int (*init)(struct snd_soc_component *component); 1008 int (*init)(struct snd_soc_component *component);
@@ -1264,6 +1259,17 @@ unsigned int snd_soc_read(struct snd_soc_codec *codec, unsigned int reg);
1264int snd_soc_write(struct snd_soc_codec *codec, unsigned int reg, 1259int snd_soc_write(struct snd_soc_codec *codec, unsigned int reg,
1265 unsigned int val); 1260 unsigned int val);
1266 1261
1262/**
1263 * snd_soc_cache_sync() - Sync the register cache with the hardware
1264 * @codec: CODEC to sync
1265 *
1266 * Note: This function will call regcache_sync()
1267 */
1268static inline int snd_soc_cache_sync(struct snd_soc_codec *codec)
1269{
1270 return regcache_sync(codec->component.regmap);
1271}
1272
1267/* component IO */ 1273/* component IO */
1268int snd_soc_component_read(struct snd_soc_component *component, 1274int snd_soc_component_read(struct snd_soc_component *component,
1269 unsigned int reg, unsigned int *val); 1275 unsigned int reg, unsigned int *val);
@@ -1277,6 +1283,45 @@ void snd_soc_component_async_complete(struct snd_soc_component *component);
1277int snd_soc_component_test_bits(struct snd_soc_component *component, 1283int snd_soc_component_test_bits(struct snd_soc_component *component,
1278 unsigned int reg, unsigned int mask, unsigned int value); 1284 unsigned int reg, unsigned int mask, unsigned int value);
1279 1285
1286#ifdef CONFIG_REGMAP
1287
1288void snd_soc_component_init_regmap(struct snd_soc_component *component,
1289 struct regmap *regmap);
1290void snd_soc_component_exit_regmap(struct snd_soc_component *component);
1291
1292/**
1293 * snd_soc_codec_init_regmap() - Initialize regmap instance for the CODEC
1294 * @codec: The CODEC for which to initialize the regmap instance
1295 * @regmap: The regmap instance that should be used by the CODEC
1296 *
1297 * This function allows deferred assignment of the regmap instance that is
1298 * associated with the CODEC. Only use this if the regmap instance is not yet
1299 * ready when the CODEC is registered. The function must also be called before
1300 * the first IO attempt of the CODEC.
1301 */
1302static inline void snd_soc_codec_init_regmap(struct snd_soc_codec *codec,
1303 struct regmap *regmap)
1304{
1305 snd_soc_component_init_regmap(&codec->component, regmap);
1306}
1307
1308/**
1309 * snd_soc_codec_exit_regmap() - De-initialize regmap instance for the CODEC
1310 * @codec: The CODEC for which to de-initialize the regmap instance
1311 *
1312 * Calls regmap_exit() on the regmap instance associated to the CODEC and
1313 * removes the regmap instance from the CODEC.
1314 *
1315 * This function should only be used if snd_soc_codec_init_regmap() was used to
1316 * initialize the regmap instance.
1317 */
1318static inline void snd_soc_codec_exit_regmap(struct snd_soc_codec *codec)
1319{
1320 snd_soc_component_exit_regmap(&codec->component);
1321}
1322
1323#endif
1324
1280/* device driver data */ 1325/* device driver data */
1281 1326
1282static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card, 1327static inline void snd_soc_card_set_drvdata(struct snd_soc_card *card,
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 9ec9864ecf38..23c518a0340c 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -108,6 +108,8 @@
108#define DA_EMULATE_ALUA 0 108#define DA_EMULATE_ALUA 0
109/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */ 109/* Enforce SCSI Initiator Port TransportID with 'ISID' for PR */
110#define DA_ENFORCE_PR_ISIDS 1 110#define DA_ENFORCE_PR_ISIDS 1
111/* Force SPC-3 PR Activate Persistence across Target Power Loss */
112#define DA_FORCE_PR_APTPL 0
111#define DA_STATUS_MAX_SECTORS_MIN 16 113#define DA_STATUS_MAX_SECTORS_MIN 16
112#define DA_STATUS_MAX_SECTORS_MAX 8192 114#define DA_STATUS_MAX_SECTORS_MAX 8192
113/* By default don't report non-rotating (solid state) medium */ 115/* By default don't report non-rotating (solid state) medium */
@@ -680,6 +682,7 @@ struct se_dev_attrib {
680 enum target_prot_type pi_prot_type; 682 enum target_prot_type pi_prot_type;
681 enum target_prot_type hw_pi_prot_type; 683 enum target_prot_type hw_pi_prot_type;
682 int enforce_pr_isids; 684 int enforce_pr_isids;
685 int force_pr_aptpl;
683 int is_nonrot; 686 int is_nonrot;
684 int emulate_rest_reord; 687 int emulate_rest_reord;
685 u32 hw_block_size; 688 u32 hw_block_size;
@@ -903,4 +906,18 @@ struct se_wwn {
903 struct config_group fabric_stat_group; 906 struct config_group fabric_stat_group;
904}; 907};
905 908
909static inline void atomic_inc_mb(atomic_t *v)
910{
911 smp_mb__before_atomic();
912 atomic_inc(v);
913 smp_mb__after_atomic();
914}
915
916static inline void atomic_dec_mb(atomic_t *v)
917{
918 smp_mb__before_atomic();
919 atomic_dec(v);
920 smp_mb__after_atomic();
921}
922
906#endif /* TARGET_CORE_BASE_H */ 923#endif /* TARGET_CORE_BASE_H */
diff --git a/include/trace/events/asoc.h b/include/trace/events/asoc.h
index b04ee7e5a466..88cf39d96d0f 100644
--- a/include/trace/events/asoc.h
+++ b/include/trace/events/asoc.h
@@ -288,31 +288,6 @@ TRACE_EVENT(snd_soc_jack_notify,
288 TP_printk("jack=%s %x", __get_str(name), (int)__entry->val) 288 TP_printk("jack=%s %x", __get_str(name), (int)__entry->val)
289); 289);
290 290
291TRACE_EVENT(snd_soc_cache_sync,
292
293 TP_PROTO(struct snd_soc_codec *codec, const char *type,
294 const char *status),
295
296 TP_ARGS(codec, type, status),
297
298 TP_STRUCT__entry(
299 __string( name, codec->component.name)
300 __string( status, status )
301 __string( type, type )
302 __field( int, id )
303 ),
304
305 TP_fast_assign(
306 __assign_str(name, codec->component.name);
307 __assign_str(status, status);
308 __assign_str(type, type);
309 __entry->id = codec->component.id;
310 ),
311
312 TP_printk("codec=%s.%d type=%s status=%s", __get_str(name),
313 (int)__entry->id, __get_str(type), __get_str(status))
314);
315
316#endif /* _TRACE_ASOC_H */ 291#endif /* _TRACE_ASOC_H */
317 292
318/* This part must be outside protection */ 293/* This part must be outside protection */
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index d4f70a7fe876..ff4bd1b35246 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -2369,7 +2369,7 @@ TRACE_EVENT(ext4_es_lookup_extent_exit,
2369 show_extent_status(__entry->found ? __entry->status : 0)) 2369 show_extent_status(__entry->found ? __entry->status : 0))
2370); 2370);
2371 2371
2372TRACE_EVENT(ext4_es_shrink_enter, 2372DECLARE_EVENT_CLASS(ext4__es_shrink_enter,
2373 TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt), 2373 TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt),
2374 2374
2375 TP_ARGS(sb, nr_to_scan, cache_cnt), 2375 TP_ARGS(sb, nr_to_scan, cache_cnt),
@@ -2391,26 +2391,38 @@ TRACE_EVENT(ext4_es_shrink_enter,
2391 __entry->nr_to_scan, __entry->cache_cnt) 2391 __entry->nr_to_scan, __entry->cache_cnt)
2392); 2392);
2393 2393
2394TRACE_EVENT(ext4_es_shrink_exit, 2394DEFINE_EVENT(ext4__es_shrink_enter, ext4_es_shrink_count,
2395 TP_PROTO(struct super_block *sb, int shrunk_nr, int cache_cnt), 2395 TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt),
2396 2396
2397 TP_ARGS(sb, shrunk_nr, cache_cnt), 2397 TP_ARGS(sb, nr_to_scan, cache_cnt)
2398);
2399
2400DEFINE_EVENT(ext4__es_shrink_enter, ext4_es_shrink_scan_enter,
2401 TP_PROTO(struct super_block *sb, int nr_to_scan, int cache_cnt),
2402
2403 TP_ARGS(sb, nr_to_scan, cache_cnt)
2404);
2405
2406TRACE_EVENT(ext4_es_shrink_scan_exit,
2407 TP_PROTO(struct super_block *sb, int nr_shrunk, int cache_cnt),
2408
2409 TP_ARGS(sb, nr_shrunk, cache_cnt),
2398 2410
2399 TP_STRUCT__entry( 2411 TP_STRUCT__entry(
2400 __field( dev_t, dev ) 2412 __field( dev_t, dev )
2401 __field( int, shrunk_nr ) 2413 __field( int, nr_shrunk )
2402 __field( int, cache_cnt ) 2414 __field( int, cache_cnt )
2403 ), 2415 ),
2404 2416
2405 TP_fast_assign( 2417 TP_fast_assign(
2406 __entry->dev = sb->s_dev; 2418 __entry->dev = sb->s_dev;
2407 __entry->shrunk_nr = shrunk_nr; 2419 __entry->nr_shrunk = nr_shrunk;
2408 __entry->cache_cnt = cache_cnt; 2420 __entry->cache_cnt = cache_cnt;
2409 ), 2421 ),
2410 2422
2411 TP_printk("dev %d,%d shrunk_nr %d cache_cnt %d", 2423 TP_printk("dev %d,%d nr_shrunk %d cache_cnt %d",
2412 MAJOR(__entry->dev), MINOR(__entry->dev), 2424 MAJOR(__entry->dev), MINOR(__entry->dev),
2413 __entry->shrunk_nr, __entry->cache_cnt) 2425 __entry->nr_shrunk, __entry->cache_cnt)
2414); 2426);
2415 2427
2416TRACE_EVENT(ext4_collapse_range, 2428TRACE_EVENT(ext4_collapse_range,
@@ -2438,6 +2450,37 @@ TRACE_EVENT(ext4_collapse_range,
2438 __entry->offset, __entry->len) 2450 __entry->offset, __entry->len)
2439); 2451);
2440 2452
2453TRACE_EVENT(ext4_es_shrink,
2454 TP_PROTO(struct super_block *sb, int nr_shrunk, u64 scan_time,
2455 int skip_precached, int nr_skipped, int retried),
2456
2457 TP_ARGS(sb, nr_shrunk, scan_time, skip_precached, nr_skipped, retried),
2458
2459 TP_STRUCT__entry(
2460 __field( dev_t, dev )
2461 __field( int, nr_shrunk )
2462 __field( unsigned long long, scan_time )
2463 __field( int, skip_precached )
2464 __field( int, nr_skipped )
2465 __field( int, retried )
2466 ),
2467
2468 TP_fast_assign(
2469 __entry->dev = sb->s_dev;
2470 __entry->nr_shrunk = nr_shrunk;
2471 __entry->scan_time = div_u64(scan_time, 1000);
2472 __entry->skip_precached = skip_precached;
2473 __entry->nr_skipped = nr_skipped;
2474 __entry->retried = retried;
2475 ),
2476
2477 TP_printk("dev %d,%d nr_shrunk %d, scan_time %llu skip_precached %d "
2478 "nr_skipped %d retried %d",
2479 MAJOR(__entry->dev), MINOR(__entry->dev), __entry->nr_shrunk,
2480 __entry->scan_time, __entry->skip_precached,
2481 __entry->nr_skipped, __entry->retried)
2482);
2483
2441#endif /* _TRACE_EXT4_H */ 2484#endif /* _TRACE_EXT4_H */
2442 2485
2443/* This part must be outside protection */ 2486/* This part must be outside protection */
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 9b56f37148cf..e335e7d8c6c2 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -660,18 +660,18 @@ TRACE_EVENT(rcu_torture_read,
660/* 660/*
661 * Tracepoint for _rcu_barrier() execution. The string "s" describes 661 * Tracepoint for _rcu_barrier() execution. The string "s" describes
662 * the _rcu_barrier phase: 662 * the _rcu_barrier phase:
663 * "Begin": rcu_barrier_callback() started. 663 * "Begin": _rcu_barrier() started.
664 * "Check": rcu_barrier_callback() checking for piggybacking. 664 * "Check": _rcu_barrier() checking for piggybacking.
665 * "EarlyExit": rcu_barrier_callback() piggybacked, thus early exit. 665 * "EarlyExit": _rcu_barrier() piggybacked, thus early exit.
666 * "Inc1": rcu_barrier_callback() piggyback check counter incremented. 666 * "Inc1": _rcu_barrier() piggyback check counter incremented.
667 * "Offline": rcu_barrier_callback() found offline CPU 667 * "OfflineNoCB": _rcu_barrier() found callback on never-online CPU
668 * "OnlineNoCB": rcu_barrier_callback() found online no-CBs CPU. 668 * "OnlineNoCB": _rcu_barrier() found online no-CBs CPU.
669 * "OnlineQ": rcu_barrier_callback() found online CPU with callbacks. 669 * "OnlineQ": _rcu_barrier() found online CPU with callbacks.
670 * "OnlineNQ": rcu_barrier_callback() found online CPU, no callbacks. 670 * "OnlineNQ": _rcu_barrier() found online CPU, no callbacks.
671 * "IRQ": An rcu_barrier_callback() callback posted on remote CPU. 671 * "IRQ": An rcu_barrier_callback() callback posted on remote CPU.
672 * "CB": An rcu_barrier_callback() invoked a callback, not the last. 672 * "CB": An rcu_barrier_callback() invoked a callback, not the last.
673 * "LastCB": An rcu_barrier_callback() invoked the last callback. 673 * "LastCB": An rcu_barrier_callback() invoked the last callback.
674 * "Inc2": rcu_barrier_callback() piggyback check counter incremented. 674 * "Inc2": _rcu_barrier() piggyback check counter incremented.
675 * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument 675 * The "cpu" argument is the CPU or -1 if meaningless, the "cnt" argument
676 * is the count of remaining callbacks, and "done" is the piggybacking count. 676 * is the count of remaining callbacks, and "done" is the piggybacking count.
677 */ 677 */
diff --git a/include/trace/events/thermal.h b/include/trace/events/thermal.h
new file mode 100644
index 000000000000..0f4f95d63c03
--- /dev/null
+++ b/include/trace/events/thermal.h
@@ -0,0 +1,83 @@
1#undef TRACE_SYSTEM
2#define TRACE_SYSTEM thermal
3
4#if !defined(_TRACE_THERMAL_H) || defined(TRACE_HEADER_MULTI_READ)
5#define _TRACE_THERMAL_H
6
7#include <linux/thermal.h>
8#include <linux/tracepoint.h>
9
10TRACE_EVENT(thermal_temperature,
11
12 TP_PROTO(struct thermal_zone_device *tz),
13
14 TP_ARGS(tz),
15
16 TP_STRUCT__entry(
17 __string(thermal_zone, tz->type)
18 __field(int, id)
19 __field(int, temp_prev)
20 __field(int, temp)
21 ),
22
23 TP_fast_assign(
24 __assign_str(thermal_zone, tz->type);
25 __entry->id = tz->id;
26 __entry->temp_prev = tz->last_temperature;
27 __entry->temp = tz->temperature;
28 ),
29
30 TP_printk("thermal_zone=%s id=%d temp_prev=%d temp=%d",
31 __get_str(thermal_zone), __entry->id, __entry->temp_prev,
32 __entry->temp)
33);
34
35TRACE_EVENT(cdev_update,
36
37 TP_PROTO(struct thermal_cooling_device *cdev, unsigned long target),
38
39 TP_ARGS(cdev, target),
40
41 TP_STRUCT__entry(
42 __string(type, cdev->type)
43 __field(unsigned long, target)
44 ),
45
46 TP_fast_assign(
47 __assign_str(type, cdev->type);
48 __entry->target = target;
49 ),
50
51 TP_printk("type=%s target=%lu", __get_str(type), __entry->target)
52);
53
54TRACE_EVENT(thermal_zone_trip,
55
56 TP_PROTO(struct thermal_zone_device *tz, int trip,
57 enum thermal_trip_type trip_type),
58
59 TP_ARGS(tz, trip, trip_type),
60
61 TP_STRUCT__entry(
62 __string(thermal_zone, tz->type)
63 __field(int, id)
64 __field(int, trip)
65 __field(enum thermal_trip_type, trip_type)
66 ),
67
68 TP_fast_assign(
69 __assign_str(thermal_zone, tz->type);
70 __entry->id = tz->id;
71 __entry->trip = trip;
72 __entry->trip_type = trip_type;
73 ),
74
75 TP_printk("thermal_zone=%s id=%d trip=%d trip_type=%d",
76 __get_str(thermal_zone), __entry->id, __entry->trip,
77 __entry->trip_type)
78);
79
80#endif /* _TRACE_THERMAL_H */
81
82/* This part must be outside protection */
83#include <trace/define_trace.h>
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 6cad97485bad..8523f9bb72f2 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -125,6 +125,7 @@ header-y += filter.h
125header-y += firewire-cdev.h 125header-y += firewire-cdev.h
126header-y += firewire-constants.h 126header-y += firewire-constants.h
127header-y += flat.h 127header-y += flat.h
128header-y += fou.h
128header-y += fs.h 129header-y += fs.h
129header-y += fsl_hypervisor.h 130header-y += fsl_hypervisor.h
130header-y += fuse.h 131header-y += fuse.h
@@ -141,6 +142,7 @@ header-y += hid.h
141header-y += hiddev.h 142header-y += hiddev.h
142header-y += hidraw.h 143header-y += hidraw.h
143header-y += hpet.h 144header-y += hpet.h
145header-y += hsr_netlink.h
144header-y += hyperv.h 146header-y += hyperv.h
145header-y += hysdn_if.h 147header-y += hysdn_if.h
146header-y += i2c-dev.h 148header-y += i2c-dev.h
@@ -251,6 +253,7 @@ header-y += mii.h
251header-y += minix_fs.h 253header-y += minix_fs.h
252header-y += mman.h 254header-y += mman.h
253header-y += mmtimer.h 255header-y += mmtimer.h
256header-y += mpls.h
254header-y += mqueue.h 257header-y += mqueue.h
255header-y += mroute.h 258header-y += mroute.h
256header-y += mroute6.h 259header-y += mroute6.h
@@ -374,6 +377,7 @@ header-y += swab.h
374header-y += synclink.h 377header-y += synclink.h
375header-y += sysctl.h 378header-y += sysctl.h
376header-y += sysinfo.h 379header-y += sysinfo.h
380header-y += target_core_user.h
377header-y += taskstats.h 381header-y += taskstats.h
378header-y += tcp.h 382header-y += tcp.h
379header-y += tcp_metrics.h 383header-y += tcp_metrics.h
@@ -423,6 +427,7 @@ header-y += virtio_net.h
423header-y += virtio_pci.h 427header-y += virtio_pci.h
424header-y += virtio_ring.h 428header-y += virtio_ring.h
425header-y += virtio_rng.h 429header-y += virtio_rng.h
430header-y += vm_sockets.h
426header-y += vt.h 431header-y += vt.h
427header-y += wait.h 432header-y += wait.h
428header-y += wanrouter.h 433header-y += wanrouter.h
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index ca1a11bb4443..3735fa0a6784 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -37,6 +37,7 @@
37 37
38#define RENAME_NOREPLACE (1 << 0) /* Don't overwrite target */ 38#define RENAME_NOREPLACE (1 << 0) /* Don't overwrite target */
39#define RENAME_EXCHANGE (1 << 1) /* Exchange source and dest */ 39#define RENAME_EXCHANGE (1 << 1) /* Exchange source and dest */
40#define RENAME_WHITEOUT (1 << 2) /* Whiteout source */
40 41
41struct fstrim_range { 42struct fstrim_range {
42 __u64 start; 43 __u64 start;
diff --git a/include/uapi/linux/if_bridge.h b/include/uapi/linux/if_bridge.h
index 39f621a9fe82..da17e456908d 100644
--- a/include/uapi/linux/if_bridge.h
+++ b/include/uapi/linux/if_bridge.h
@@ -15,6 +15,7 @@
15 15
16#include <linux/types.h> 16#include <linux/types.h>
17#include <linux/if_ether.h> 17#include <linux/if_ether.h>
18#include <linux/in6.h>
18 19
19#define SYSFS_BRIDGE_ATTR "bridge" 20#define SYSFS_BRIDGE_ATTR "bridge"
20#define SYSFS_BRIDGE_FDB "brforward" 21#define SYSFS_BRIDGE_FDB "brforward"
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index 1874ebe9ac1e..a1d7e931ab72 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -739,6 +739,13 @@ struct input_keymap_entry {
739#define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */ 739#define KEY_BRIGHTNESS_MIN 0x250 /* Set Brightness to Minimum */
740#define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */ 740#define KEY_BRIGHTNESS_MAX 0x251 /* Set Brightness to Maximum */
741 741
742#define KEY_KBDINPUTASSIST_PREV 0x260
743#define KEY_KBDINPUTASSIST_NEXT 0x261
744#define KEY_KBDINPUTASSIST_PREVGROUP 0x262
745#define KEY_KBDINPUTASSIST_NEXTGROUP 0x263
746#define KEY_KBDINPUTASSIST_ACCEPT 0x264
747#define KEY_KBDINPUTASSIST_CANCEL 0x265
748
742#define BTN_TRIGGER_HAPPY 0x2c0 749#define BTN_TRIGGER_HAPPY 0x2c0
743#define BTN_TRIGGER_HAPPY1 0x2c0 750#define BTN_TRIGGER_HAPPY1 0x2c0
744#define BTN_TRIGGER_HAPPY2 0x2c1 751#define BTN_TRIGGER_HAPPY2 0x2c1
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index 9269de254874..9d845404d875 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -364,7 +364,7 @@ struct perf_event_mmap_page {
364 /* 364 /*
365 * Bits needed to read the hw events in user-space. 365 * Bits needed to read the hw events in user-space.
366 * 366 *
367 * u32 seq, time_mult, time_shift, idx, width; 367 * u32 seq, time_mult, time_shift, index, width;
368 * u64 count, enabled, running; 368 * u64 count, enabled, running;
369 * u64 cyc, time_offset; 369 * u64 cyc, time_offset;
370 * s64 pmc = 0; 370 * s64 pmc = 0;
@@ -383,11 +383,11 @@ struct perf_event_mmap_page {
383 * time_shift = pc->time_shift; 383 * time_shift = pc->time_shift;
384 * } 384 * }
385 * 385 *
386 * idx = pc->index; 386 * index = pc->index;
387 * count = pc->offset; 387 * count = pc->offset;
388 * if (pc->cap_usr_rdpmc && idx) { 388 * if (pc->cap_user_rdpmc && index) {
389 * width = pc->pmc_width; 389 * width = pc->pmc_width;
390 * pmc = rdpmc(idx - 1); 390 * pmc = rdpmc(index - 1);
391 * } 391 * }
392 * 392 *
393 * barrier(); 393 * barrier();
@@ -415,7 +415,7 @@ struct perf_event_mmap_page {
415 }; 415 };
416 416
417 /* 417 /*
418 * If cap_usr_rdpmc this field provides the bit-width of the value 418 * If cap_user_rdpmc this field provides the bit-width of the value
419 * read using the rdpmc() or equivalent instruction. This can be used 419 * read using the rdpmc() or equivalent instruction. This can be used
420 * to sign extend the result like: 420 * to sign extend the result like:
421 * 421 *
@@ -439,10 +439,10 @@ struct perf_event_mmap_page {
439 * 439 *
440 * Where time_offset,time_mult,time_shift and cyc are read in the 440 * Where time_offset,time_mult,time_shift and cyc are read in the
441 * seqcount loop described above. This delta can then be added to 441 * seqcount loop described above. This delta can then be added to
442 * enabled and possible running (if idx), improving the scaling: 442 * enabled and possible running (if index), improving the scaling:
443 * 443 *
444 * enabled += delta; 444 * enabled += delta;
445 * if (idx) 445 * if (index)
446 * running += delta; 446 * running += delta;
447 * 447 *
448 * quot = count / running; 448 * quot = count / running;
diff --git a/include/uapi/linux/sched.h b/include/uapi/linux/sched.h
index 34f9d7387d13..b932be9f5c5b 100644
--- a/include/uapi/linux/sched.h
+++ b/include/uapi/linux/sched.h
@@ -13,7 +13,7 @@
13#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */ 13#define CLONE_VFORK 0x00004000 /* set if the parent wants the child to wake it up on mm_release */
14#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */ 14#define CLONE_PARENT 0x00008000 /* set if we want to have the same parent as the cloner */
15#define CLONE_THREAD 0x00010000 /* Same thread group? */ 15#define CLONE_THREAD 0x00010000 /* Same thread group? */
16#define CLONE_NEWNS 0x00020000 /* New namespace group? */ 16#define CLONE_NEWNS 0x00020000 /* New mount namespace group */
17#define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */ 17#define CLONE_SYSVSEM 0x00040000 /* share system V SEM_UNDO semantics */
18#define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */ 18#define CLONE_SETTLS 0x00080000 /* create a new TLS for the child */
19#define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */ 19#define CLONE_PARENT_SETTID 0x00100000 /* set the TID in the parent */
diff --git a/include/uapi/linux/target_core_user.h b/include/uapi/linux/target_core_user.h
new file mode 100644
index 000000000000..7dcfbe6771b1
--- /dev/null
+++ b/include/uapi/linux/target_core_user.h
@@ -0,0 +1,142 @@
1#ifndef __TARGET_CORE_USER_H
2#define __TARGET_CORE_USER_H
3
4/* This header will be used by application too */
5
6#include <linux/types.h>
7#include <linux/uio.h>
8
9#ifndef __packed
10#define __packed __attribute__((packed))
11#endif
12
13#define TCMU_VERSION "1.0"
14
15/*
16 * Ring Design
17 * -----------
18 *
19 * The mmaped area is divided into three parts:
20 * 1) The mailbox (struct tcmu_mailbox, below)
21 * 2) The command ring
22 * 3) Everything beyond the command ring (data)
23 *
24 * The mailbox tells userspace the offset of the command ring from the
25 * start of the shared memory region, and how big the command ring is.
26 *
27 * The kernel passes SCSI commands to userspace by putting a struct
28 * tcmu_cmd_entry in the ring, updating mailbox->cmd_head, and poking
29 * userspace via uio's interrupt mechanism.
30 *
31 * tcmu_cmd_entry contains a header. If the header type is PAD,
32 * userspace should skip hdr->length bytes (mod cmdr_size) to find the
33 * next cmd_entry.
34 *
35 * Otherwise, the entry will contain offsets into the mmaped area that
36 * contain the cdb and data buffers -- the latter accessible via the
37 * iov array. iov addresses are also offsets into the shared area.
38 *
39 * When userspace is completed handling the command, set
40 * entry->rsp.scsi_status, fill in rsp.sense_buffer if appropriate,
41 * and also set mailbox->cmd_tail equal to the old cmd_tail plus
42 * hdr->length, mod cmdr_size. If cmd_tail doesn't equal cmd_head, it
43 * should process the next packet the same way, and so on.
44 */
45
46#define TCMU_MAILBOX_VERSION 1
47#define ALIGN_SIZE 64 /* Should be enough for most CPUs */
48
49struct tcmu_mailbox {
50 __u16 version;
51 __u16 flags;
52 __u32 cmdr_off;
53 __u32 cmdr_size;
54
55 __u32 cmd_head;
56
57 /* Updated by user. On its own cacheline */
58 __u32 cmd_tail __attribute__((__aligned__(ALIGN_SIZE)));
59
60} __packed;
61
62enum tcmu_opcode {
63 TCMU_OP_PAD = 0,
64 TCMU_OP_CMD,
65};
66
67/*
68 * Only a few opcodes, and length is 8-byte aligned, so use low bits for opcode.
69 */
70struct tcmu_cmd_entry_hdr {
71 __u32 len_op;
72} __packed;
73
74#define TCMU_OP_MASK 0x7
75
76static inline enum tcmu_opcode tcmu_hdr_get_op(struct tcmu_cmd_entry_hdr *hdr)
77{
78 return hdr->len_op & TCMU_OP_MASK;
79}
80
81static inline void tcmu_hdr_set_op(struct tcmu_cmd_entry_hdr *hdr, enum tcmu_opcode op)
82{
83 hdr->len_op &= ~TCMU_OP_MASK;
84 hdr->len_op |= (op & TCMU_OP_MASK);
85}
86
87static inline __u32 tcmu_hdr_get_len(struct tcmu_cmd_entry_hdr *hdr)
88{
89 return hdr->len_op & ~TCMU_OP_MASK;
90}
91
92static inline void tcmu_hdr_set_len(struct tcmu_cmd_entry_hdr *hdr, __u32 len)
93{
94 hdr->len_op &= TCMU_OP_MASK;
95 hdr->len_op |= len;
96}
97
98/* Currently the same as SCSI_SENSE_BUFFERSIZE */
99#define TCMU_SENSE_BUFFERSIZE 96
100
101struct tcmu_cmd_entry {
102 struct tcmu_cmd_entry_hdr hdr;
103
104 uint16_t cmd_id;
105 uint16_t __pad1;
106
107 union {
108 struct {
109 uint64_t cdb_off;
110 uint64_t iov_cnt;
111 struct iovec iov[0];
112 } req;
113 struct {
114 uint8_t scsi_status;
115 uint8_t __pad1;
116 uint16_t __pad2;
117 uint32_t __pad3;
118 char sense_buffer[TCMU_SENSE_BUFFERSIZE];
119 } rsp;
120 };
121
122} __packed;
123
124#define TCMU_OP_ALIGN_SIZE sizeof(uint64_t)
125
126enum tcmu_genl_cmd {
127 TCMU_CMD_UNSPEC,
128 TCMU_CMD_ADDED_DEVICE,
129 TCMU_CMD_REMOVED_DEVICE,
130 __TCMU_CMD_MAX,
131};
132#define TCMU_CMD_MAX (__TCMU_CMD_MAX - 1)
133
134enum tcmu_genl_attr {
135 TCMU_ATTR_UNSPEC,
136 TCMU_ATTR_DEVICE,
137 TCMU_ATTR_MINOR,
138 __TCMU_ATTR_MAX,
139};
140#define TCMU_ATTR_MAX (__TCMU_ATTR_MAX - 1)
141
142#endif
diff --git a/include/uapi/linux/v4l2-dv-timings.h b/include/uapi/linux/v4l2-dv-timings.h
index 6a0764c89fcb..6c8f159e416e 100644
--- a/include/uapi/linux/v4l2-dv-timings.h
+++ b/include/uapi/linux/v4l2-dv-timings.h
@@ -21,8 +21,17 @@
21#ifndef _V4L2_DV_TIMINGS_H 21#ifndef _V4L2_DV_TIMINGS_H
22#define _V4L2_DV_TIMINGS_H 22#define _V4L2_DV_TIMINGS_H
23 23
24#if __GNUC__ < 4 || (__GNUC__ == 4 && (__GNUC_MINOR__ < 6))
25/* Sadly gcc versions older than 4.6 have a bug in how they initialize
26 anonymous unions where they require additional curly brackets.
27 This violates the C1x standard. This workaround adds the curly brackets
28 if needed. */
24#define V4L2_INIT_BT_TIMINGS(_width, args...) \ 29#define V4L2_INIT_BT_TIMINGS(_width, args...) \
25 { .bt = { _width , ## args } } 30 { .bt = { _width , ## args } }
31#else
32#define V4L2_INIT_BT_TIMINGS(_width, args...) \
33 .bt = { _width , ## args }
34#endif
26 35
27/* CEA-861-E timings (i.e. standard HDTV timings) */ 36/* CEA-861-E timings (i.e. standard HDTV timings) */
28 37
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index 6ee586728df9..941d32f007dc 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -220,7 +220,9 @@ typedef int __bitwise snd_pcm_format_t;
220#define SNDRV_PCM_FORMAT_DSD_U8 ((__force snd_pcm_format_t) 48) /* DSD, 1-byte samples DSD (x8) */ 220#define SNDRV_PCM_FORMAT_DSD_U8 ((__force snd_pcm_format_t) 48) /* DSD, 1-byte samples DSD (x8) */
221#define SNDRV_PCM_FORMAT_DSD_U16_LE ((__force snd_pcm_format_t) 49) /* DSD, 2-byte samples DSD (x16), little endian */ 221#define SNDRV_PCM_FORMAT_DSD_U16_LE ((__force snd_pcm_format_t) 49) /* DSD, 2-byte samples DSD (x16), little endian */
222#define SNDRV_PCM_FORMAT_DSD_U32_LE ((__force snd_pcm_format_t) 50) /* DSD, 4-byte samples DSD (x32), little endian */ 222#define SNDRV_PCM_FORMAT_DSD_U32_LE ((__force snd_pcm_format_t) 50) /* DSD, 4-byte samples DSD (x32), little endian */
223#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_LE 223#define SNDRV_PCM_FORMAT_DSD_U16_BE ((__force snd_pcm_format_t) 51) /* DSD, 2-byte samples DSD (x16), big endian */
224#define SNDRV_PCM_FORMAT_DSD_U32_BE ((__force snd_pcm_format_t) 52) /* DSD, 4-byte samples DSD (x32), big endian */
225#define SNDRV_PCM_FORMAT_LAST SNDRV_PCM_FORMAT_DSD_U32_BE
224 226
225#ifdef SNDRV_LITTLE_ENDIAN 227#ifdef SNDRV_LITTLE_ENDIAN
226#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE 228#define SNDRV_PCM_FORMAT_S16 SNDRV_PCM_FORMAT_S16_LE
diff --git a/init/Kconfig b/init/Kconfig
index 3ee28ae02cc8..2081a4d3d917 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1341,6 +1341,10 @@ config SYSCTL_ARCH_UNALIGN_ALLOW
1341config HAVE_PCSPKR_PLATFORM 1341config HAVE_PCSPKR_PLATFORM
1342 bool 1342 bool
1343 1343
1344# interpreter that classic socket filters depend on
1345config BPF
1346 bool
1347
1344menuconfig EXPERT 1348menuconfig EXPERT
1345 bool "Configure standard kernel features (expert users)" 1349 bool "Configure standard kernel features (expert users)"
1346 # Unhide debug options, to make the on-by-default options visible 1350 # Unhide debug options, to make the on-by-default options visible
@@ -1521,6 +1525,16 @@ config EVENTFD
1521 1525
1522 If unsure, say Y. 1526 If unsure, say Y.
1523 1527
1528# syscall, maps, verifier
1529config BPF_SYSCALL
1530 bool "Enable bpf() system call" if EXPERT
1531 select ANON_INODES
1532 select BPF
1533 default n
1534 help
1535 Enable the bpf() system call that allows to manipulate eBPF
1536 programs and maps via file descriptors.
1537
1524config SHMEM 1538config SHMEM
1525 bool "Use full shmem filesystem" if EXPERT 1539 bool "Use full shmem filesystem" if EXPERT
1526 default y 1540 default y
diff --git a/init/main.c b/init/main.c
index 800a0daede7e..321d0ceb26d3 100644
--- a/init/main.c
+++ b/init/main.c
@@ -544,7 +544,7 @@ asmlinkage __visible void __init start_kernel(void)
544 static_command_line, __start___param, 544 static_command_line, __start___param,
545 __stop___param - __start___param, 545 __stop___param - __start___param,
546 -1, -1, &unknown_bootoption); 546 -1, -1, &unknown_bootoption);
547 if (after_dashes) 547 if (!IS_ERR_OR_NULL(after_dashes))
548 parse_args("Setting init args", after_dashes, NULL, 0, -1, -1, 548 parse_args("Setting init args", after_dashes, NULL, 0, -1, -1,
549 set_init_arg); 549 set_init_arg);
550 550
diff --git a/ipc/sem.c b/ipc/sem.c
index 454f6c6020a8..53c3310f41c6 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -507,13 +507,6 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
507 return retval; 507 return retval;
508 } 508 }
509 509
510 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
511 if (id < 0) {
512 ipc_rcu_putref(sma, sem_rcu_free);
513 return id;
514 }
515 ns->used_sems += nsems;
516
517 sma->sem_base = (struct sem *) &sma[1]; 510 sma->sem_base = (struct sem *) &sma[1];
518 511
519 for (i = 0; i < nsems; i++) { 512 for (i = 0; i < nsems; i++) {
@@ -528,6 +521,14 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
528 INIT_LIST_HEAD(&sma->list_id); 521 INIT_LIST_HEAD(&sma->list_id);
529 sma->sem_nsems = nsems; 522 sma->sem_nsems = nsems;
530 sma->sem_ctime = get_seconds(); 523 sma->sem_ctime = get_seconds();
524
525 id = ipc_addid(&sem_ids(ns), &sma->sem_perm, ns->sc_semmni);
526 if (id < 0) {
527 ipc_rcu_putref(sma, sem_rcu_free);
528 return id;
529 }
530 ns->used_sems += nsems;
531
531 sem_unlock(sma, -1); 532 sem_unlock(sma, -1);
532 rcu_read_unlock(); 533 rcu_read_unlock();
533 534
diff --git a/kernel/Makefile b/kernel/Makefile
index dc5c77544fd6..17ea6d4a9a24 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -86,7 +86,7 @@ obj-$(CONFIG_RING_BUFFER) += trace/
86obj-$(CONFIG_TRACEPOINTS) += trace/ 86obj-$(CONFIG_TRACEPOINTS) += trace/
87obj-$(CONFIG_IRQ_WORK) += irq_work.o 87obj-$(CONFIG_IRQ_WORK) += irq_work.o
88obj-$(CONFIG_CPU_PM) += cpu_pm.o 88obj-$(CONFIG_CPU_PM) += cpu_pm.o
89obj-$(CONFIG_NET) += bpf/ 89obj-$(CONFIG_BPF) += bpf/
90 90
91obj-$(CONFIG_PERF_EVENTS) += events/ 91obj-$(CONFIG_PERF_EVENTS) += events/
92 92
diff --git a/kernel/audit.c b/kernel/audit.c
index 80983df92cd4..cebb11db4d34 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -739,7 +739,7 @@ static void audit_log_feature_change(int which, u32 old_feature, u32 new_feature
739 739
740 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE); 740 ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_FEATURE_CHANGE);
741 audit_log_task_info(ab, current); 741 audit_log_task_info(ab, current);
742 audit_log_format(ab, "feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d", 742 audit_log_format(ab, " feature=%s old=%u new=%u old_lock=%u new_lock=%u res=%d",
743 audit_feature_names[which], !!old_feature, !!new_feature, 743 audit_feature_names[which], !!old_feature, !!new_feature,
744 !!old_lock, !!new_lock, res); 744 !!old_lock, !!new_lock, res);
745 audit_log_end(ab); 745 audit_log_end(ab);
diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c
index e242e3a9864a..80f29e015570 100644
--- a/kernel/audit_tree.c
+++ b/kernel/audit_tree.c
@@ -154,6 +154,7 @@ static struct audit_chunk *alloc_chunk(int count)
154 chunk->owners[i].index = i; 154 chunk->owners[i].index = i;
155 } 155 }
156 fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch); 156 fsnotify_init_mark(&chunk->mark, audit_tree_destroy_watch);
157 chunk->mark.mask = FS_IN_IGNORED;
157 return chunk; 158 return chunk;
158} 159}
159 160
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index 45427239f375..0daf7f6ae7df 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -1,5 +1,5 @@
1obj-y := core.o syscall.o verifier.o 1obj-y := core.o
2 2obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o
3ifdef CONFIG_TEST_BPF 3ifdef CONFIG_TEST_BPF
4obj-y += test_stub.o 4obj-$(CONFIG_BPF_SYSCALL) += test_stub.o
5endif 5endif
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index f0c30c59b317..d6594e457a25 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -655,3 +655,12 @@ void bpf_prog_free(struct bpf_prog *fp)
655 schedule_work(&aux->work); 655 schedule_work(&aux->work);
656} 656}
657EXPORT_SYMBOL_GPL(bpf_prog_free); 657EXPORT_SYMBOL_GPL(bpf_prog_free);
658
659/* To execute LD_ABS/LD_IND instructions __bpf_prog_run() may call
660 * skb_copy_bits(), so provide a weak definition of it for NET-less config.
661 */
662int __weak skb_copy_bits(const struct sk_buff *skb, int offset, void *to,
663 int len)
664{
665 return -EFAULT;
666}
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 801f5f3b9307..9f81818f2941 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -1409,7 +1409,8 @@ static bool states_equal(struct verifier_state *old, struct verifier_state *cur)
1409 if (memcmp(&old->regs[i], &cur->regs[i], 1409 if (memcmp(&old->regs[i], &cur->regs[i],
1410 sizeof(old->regs[0])) != 0) { 1410 sizeof(old->regs[0])) != 0) {
1411 if (old->regs[i].type == NOT_INIT || 1411 if (old->regs[i].type == NOT_INIT ||
1412 old->regs[i].type == UNKNOWN_VALUE) 1412 (old->regs[i].type == UNKNOWN_VALUE &&
1413 cur->regs[i].type != NOT_INIT))
1413 continue; 1414 continue;
1414 return false; 1415 return false;
1415 } 1416 }
diff --git a/kernel/context_tracking.c b/kernel/context_tracking.c
index 5664985c46a0..937ecdfdf258 100644
--- a/kernel/context_tracking.c
+++ b/kernel/context_tracking.c
@@ -107,46 +107,6 @@ void context_tracking_user_enter(void)
107} 107}
108NOKPROBE_SYMBOL(context_tracking_user_enter); 108NOKPROBE_SYMBOL(context_tracking_user_enter);
109 109
110#ifdef CONFIG_PREEMPT
111/**
112 * preempt_schedule_context - preempt_schedule called by tracing
113 *
114 * The tracing infrastructure uses preempt_enable_notrace to prevent
115 * recursion and tracing preempt enabling caused by the tracing
116 * infrastructure itself. But as tracing can happen in areas coming
117 * from userspace or just about to enter userspace, a preempt enable
118 * can occur before user_exit() is called. This will cause the scheduler
119 * to be called when the system is still in usermode.
120 *
121 * To prevent this, the preempt_enable_notrace will use this function
122 * instead of preempt_schedule() to exit user context if needed before
123 * calling the scheduler.
124 */
125asmlinkage __visible void __sched notrace preempt_schedule_context(void)
126{
127 enum ctx_state prev_ctx;
128
129 if (likely(!preemptible()))
130 return;
131
132 /*
133 * Need to disable preemption in case user_exit() is traced
134 * and the tracer calls preempt_enable_notrace() causing
135 * an infinite recursion.
136 */
137 preempt_disable_notrace();
138 prev_ctx = exception_enter();
139 preempt_enable_no_resched_notrace();
140
141 preempt_schedule();
142
143 preempt_disable_notrace();
144 exception_exit(prev_ctx);
145 preempt_enable_notrace();
146}
147EXPORT_SYMBOL_GPL(preempt_schedule_context);
148#endif /* CONFIG_PREEMPT */
149
150/** 110/**
151 * context_tracking_user_exit - Inform the context tracking that the CPU is 111 * context_tracking_user_exit - Inform the context tracking that the CPU is
152 * exiting userspace mode and entering the kernel. 112 * exiting userspace mode and entering the kernel.
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 356450f09c1f..90a3d017b90c 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -64,6 +64,8 @@ static struct {
64 * an ongoing cpu hotplug operation. 64 * an ongoing cpu hotplug operation.
65 */ 65 */
66 int refcount; 66 int refcount;
67 /* And allows lockless put_online_cpus(). */
68 atomic_t puts_pending;
67 69
68#ifdef CONFIG_DEBUG_LOCK_ALLOC 70#ifdef CONFIG_DEBUG_LOCK_ALLOC
69 struct lockdep_map dep_map; 71 struct lockdep_map dep_map;
@@ -113,7 +115,11 @@ void put_online_cpus(void)
113{ 115{
114 if (cpu_hotplug.active_writer == current) 116 if (cpu_hotplug.active_writer == current)
115 return; 117 return;
116 mutex_lock(&cpu_hotplug.lock); 118 if (!mutex_trylock(&cpu_hotplug.lock)) {
119 atomic_inc(&cpu_hotplug.puts_pending);
120 cpuhp_lock_release();
121 return;
122 }
117 123
118 if (WARN_ON(!cpu_hotplug.refcount)) 124 if (WARN_ON(!cpu_hotplug.refcount))
119 cpu_hotplug.refcount++; /* try to fix things up */ 125 cpu_hotplug.refcount++; /* try to fix things up */
@@ -155,6 +161,12 @@ void cpu_hotplug_begin(void)
155 cpuhp_lock_acquire(); 161 cpuhp_lock_acquire();
156 for (;;) { 162 for (;;) {
157 mutex_lock(&cpu_hotplug.lock); 163 mutex_lock(&cpu_hotplug.lock);
164 if (atomic_read(&cpu_hotplug.puts_pending)) {
165 int delta;
166
167 delta = atomic_xchg(&cpu_hotplug.puts_pending, 0);
168 cpu_hotplug.refcount -= delta;
169 }
158 if (likely(!cpu_hotplug.refcount)) 170 if (likely(!cpu_hotplug.refcount))
159 break; 171 break;
160 __set_current_state(TASK_UNINTERRUPTIBLE); 172 __set_current_state(TASK_UNINTERRUPTIBLE);
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 1425d07018de..1cd5eef1fcdd 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1562,8 +1562,10 @@ static void perf_remove_from_context(struct perf_event *event, bool detach_group
1562 1562
1563 if (!task) { 1563 if (!task) {
1564 /* 1564 /*
1565 * Per cpu events are removed via an smp call and 1565 * Per cpu events are removed via an smp call. The removal can
1566 * the removal is always successful. 1566 * fail if the CPU is currently offline, but in that case we
1567 * already called __perf_remove_from_context from
1568 * perf_event_exit_cpu.
1567 */ 1569 */
1568 cpu_function_call(event->cpu, __perf_remove_from_context, &re); 1570 cpu_function_call(event->cpu, __perf_remove_from_context, &re);
1569 return; 1571 return;
@@ -6071,11 +6073,6 @@ static int perf_swevent_init(struct perf_event *event)
6071 return 0; 6073 return 0;
6072} 6074}
6073 6075
6074static int perf_swevent_event_idx(struct perf_event *event)
6075{
6076 return 0;
6077}
6078
6079static struct pmu perf_swevent = { 6076static struct pmu perf_swevent = {
6080 .task_ctx_nr = perf_sw_context, 6077 .task_ctx_nr = perf_sw_context,
6081 6078
@@ -6085,8 +6082,6 @@ static struct pmu perf_swevent = {
6085 .start = perf_swevent_start, 6082 .start = perf_swevent_start,
6086 .stop = perf_swevent_stop, 6083 .stop = perf_swevent_stop,
6087 .read = perf_swevent_read, 6084 .read = perf_swevent_read,
6088
6089 .event_idx = perf_swevent_event_idx,
6090}; 6085};
6091 6086
6092#ifdef CONFIG_EVENT_TRACING 6087#ifdef CONFIG_EVENT_TRACING
@@ -6204,8 +6199,6 @@ static struct pmu perf_tracepoint = {
6204 .start = perf_swevent_start, 6199 .start = perf_swevent_start,
6205 .stop = perf_swevent_stop, 6200 .stop = perf_swevent_stop,
6206 .read = perf_swevent_read, 6201 .read = perf_swevent_read,
6207
6208 .event_idx = perf_swevent_event_idx,
6209}; 6202};
6210 6203
6211static inline void perf_tp_register(void) 6204static inline void perf_tp_register(void)
@@ -6431,8 +6424,6 @@ static struct pmu perf_cpu_clock = {
6431 .start = cpu_clock_event_start, 6424 .start = cpu_clock_event_start,
6432 .stop = cpu_clock_event_stop, 6425 .stop = cpu_clock_event_stop,
6433 .read = cpu_clock_event_read, 6426 .read = cpu_clock_event_read,
6434
6435 .event_idx = perf_swevent_event_idx,
6436}; 6427};
6437 6428
6438/* 6429/*
@@ -6511,8 +6502,6 @@ static struct pmu perf_task_clock = {
6511 .start = task_clock_event_start, 6502 .start = task_clock_event_start,
6512 .stop = task_clock_event_stop, 6503 .stop = task_clock_event_stop,
6513 .read = task_clock_event_read, 6504 .read = task_clock_event_read,
6514
6515 .event_idx = perf_swevent_event_idx,
6516}; 6505};
6517 6506
6518static void perf_pmu_nop_void(struct pmu *pmu) 6507static void perf_pmu_nop_void(struct pmu *pmu)
@@ -6542,7 +6531,7 @@ static void perf_pmu_cancel_txn(struct pmu *pmu)
6542 6531
6543static int perf_event_idx_default(struct perf_event *event) 6532static int perf_event_idx_default(struct perf_event *event)
6544{ 6533{
6545 return event->hw.idx + 1; 6534 return 0;
6546} 6535}
6547 6536
6548/* 6537/*
@@ -8130,7 +8119,7 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
8130 8119
8131static void __perf_event_exit_context(void *__info) 8120static void __perf_event_exit_context(void *__info)
8132{ 8121{
8133 struct remove_event re = { .detach_group = false }; 8122 struct remove_event re = { .detach_group = true };
8134 struct perf_event_context *ctx = __info; 8123 struct perf_event_context *ctx = __info;
8135 8124
8136 perf_pmu_rotate_stop(ctx->pmu); 8125 perf_pmu_rotate_stop(ctx->pmu);
diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c
index 1559fb0b9296..9803a6600d49 100644
--- a/kernel/events/hw_breakpoint.c
+++ b/kernel/events/hw_breakpoint.c
@@ -605,11 +605,6 @@ static void hw_breakpoint_stop(struct perf_event *bp, int flags)
605 bp->hw.state = PERF_HES_STOPPED; 605 bp->hw.state = PERF_HES_STOPPED;
606} 606}
607 607
608static int hw_breakpoint_event_idx(struct perf_event *bp)
609{
610 return 0;
611}
612
613static struct pmu perf_breakpoint = { 608static struct pmu perf_breakpoint = {
614 .task_ctx_nr = perf_sw_context, /* could eventually get its own */ 609 .task_ctx_nr = perf_sw_context, /* could eventually get its own */
615 610
@@ -619,8 +614,6 @@ static struct pmu perf_breakpoint = {
619 .start = hw_breakpoint_start, 614 .start = hw_breakpoint_start,
620 .stop = hw_breakpoint_stop, 615 .stop = hw_breakpoint_stop,
621 .read = hw_breakpoint_pmu_read, 616 .read = hw_breakpoint_pmu_read,
622
623 .event_idx = hw_breakpoint_event_idx,
624}; 617};
625 618
626int __init init_hw_breakpoint(void) 619int __init init_hw_breakpoint(void)
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 1d0af8a2c646..ed8f2cde34c5 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -1640,7 +1640,6 @@ bool uprobe_deny_signal(void)
1640 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) { 1640 if (__fatal_signal_pending(t) || arch_uprobe_xol_was_trapped(t)) {
1641 utask->state = UTASK_SSTEP_TRAPPED; 1641 utask->state = UTASK_SSTEP_TRAPPED;
1642 set_tsk_thread_flag(t, TIF_UPROBE); 1642 set_tsk_thread_flag(t, TIF_UPROBE);
1643 set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
1644 } 1643 }
1645 } 1644 }
1646 1645
diff --git a/kernel/freezer.c b/kernel/freezer.c
index aa6a8aadb911..a8900a3bc27a 100644
--- a/kernel/freezer.c
+++ b/kernel/freezer.c
@@ -42,6 +42,9 @@ bool freezing_slow_path(struct task_struct *p)
42 if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK)) 42 if (p->flags & (PF_NOFREEZE | PF_SUSPEND_TASK))
43 return false; 43 return false;
44 44
45 if (test_thread_flag(TIF_MEMDIE))
46 return false;
47
45 if (pm_nosig_freezing || cgroup_freezing(p)) 48 if (pm_nosig_freezing || cgroup_freezing(p))
46 return true; 49 return true;
47 50
@@ -147,12 +150,6 @@ void __thaw_task(struct task_struct *p)
147{ 150{
148 unsigned long flags; 151 unsigned long flags;
149 152
150 /*
151 * Clear freezing and kick @p if FROZEN. Clearing is guaranteed to
152 * be visible to @p as waking up implies wmb. Waking up inside
153 * freezer_lock also prevents wakeups from leaking outside
154 * refrigerator.
155 */
156 spin_lock_irqsave(&freezer_lock, flags); 153 spin_lock_irqsave(&freezer_lock, flags);
157 if (frozen(p)) 154 if (frozen(p))
158 wake_up_process(p); 155 wake_up_process(p);
diff --git a/kernel/futex.c b/kernel/futex.c
index f3a3a071283c..63678b573d61 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -143,9 +143,8 @@
143 * 143 *
144 * Where (A) orders the waiters increment and the futex value read through 144 * Where (A) orders the waiters increment and the futex value read through
145 * atomic operations (see hb_waiters_inc) and where (B) orders the write 145 * atomic operations (see hb_waiters_inc) and where (B) orders the write
146 * to futex and the waiters read -- this is done by the barriers in 146 * to futex and the waiters read -- this is done by the barriers for both
147 * get_futex_key_refs(), through either ihold or atomic_inc, depending on the 147 * shared and private futexes in get_futex_key_refs().
148 * futex type.
149 * 148 *
150 * This yields the following case (where X:=waiters, Y:=futex): 149 * This yields the following case (where X:=waiters, Y:=futex):
151 * 150 *
@@ -344,13 +343,20 @@ static void get_futex_key_refs(union futex_key *key)
344 futex_get_mm(key); /* implies MB (B) */ 343 futex_get_mm(key); /* implies MB (B) */
345 break; 344 break;
346 default: 345 default:
346 /*
347 * Private futexes do not hold reference on an inode or
348 * mm, therefore the only purpose of calling get_futex_key_refs
349 * is because we need the barrier for the lockless waiter check.
350 */
347 smp_mb(); /* explicit MB (B) */ 351 smp_mb(); /* explicit MB (B) */
348 } 352 }
349} 353}
350 354
351/* 355/*
352 * Drop a reference to the resource addressed by a key. 356 * Drop a reference to the resource addressed by a key.
353 * The hash bucket spinlock must not be held. 357 * The hash bucket spinlock must not be held. This is
358 * a no-op for private futexes, see comment in the get
359 * counterpart.
354 */ 360 */
355static void drop_futex_key_refs(union futex_key *key) 361static void drop_futex_key_refs(union futex_key *key)
356{ 362{
@@ -641,8 +647,14 @@ static struct futex_pi_state * alloc_pi_state(void)
641 return pi_state; 647 return pi_state;
642} 648}
643 649
650/*
651 * Must be called with the hb lock held.
652 */
644static void free_pi_state(struct futex_pi_state *pi_state) 653static void free_pi_state(struct futex_pi_state *pi_state)
645{ 654{
655 if (!pi_state)
656 return;
657
646 if (!atomic_dec_and_test(&pi_state->refcount)) 658 if (!atomic_dec_and_test(&pi_state->refcount))
647 return; 659 return;
648 660
@@ -1521,15 +1533,6 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
1521 } 1533 }
1522 1534
1523retry: 1535retry:
1524 if (pi_state != NULL) {
1525 /*
1526 * We will have to lookup the pi_state again, so free this one
1527 * to keep the accounting correct.
1528 */
1529 free_pi_state(pi_state);
1530 pi_state = NULL;
1531 }
1532
1533 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ); 1536 ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
1534 if (unlikely(ret != 0)) 1537 if (unlikely(ret != 0))
1535 goto out; 1538 goto out;
@@ -1619,6 +1622,8 @@ retry_private:
1619 case 0: 1622 case 0:
1620 break; 1623 break;
1621 case -EFAULT: 1624 case -EFAULT:
1625 free_pi_state(pi_state);
1626 pi_state = NULL;
1622 double_unlock_hb(hb1, hb2); 1627 double_unlock_hb(hb1, hb2);
1623 hb_waiters_dec(hb2); 1628 hb_waiters_dec(hb2);
1624 put_futex_key(&key2); 1629 put_futex_key(&key2);
@@ -1634,6 +1639,8 @@ retry_private:
1634 * exit to complete. 1639 * exit to complete.
1635 * - The user space value changed. 1640 * - The user space value changed.
1636 */ 1641 */
1642 free_pi_state(pi_state);
1643 pi_state = NULL;
1637 double_unlock_hb(hb1, hb2); 1644 double_unlock_hb(hb1, hb2);
1638 hb_waiters_dec(hb2); 1645 hb_waiters_dec(hb2);
1639 put_futex_key(&key2); 1646 put_futex_key(&key2);
@@ -1710,6 +1717,7 @@ retry_private:
1710 } 1717 }
1711 1718
1712out_unlock: 1719out_unlock:
1720 free_pi_state(pi_state);
1713 double_unlock_hb(hb1, hb2); 1721 double_unlock_hb(hb1, hb2);
1714 hb_waiters_dec(hb2); 1722 hb_waiters_dec(hb2);
1715 1723
@@ -1727,8 +1735,6 @@ out_put_keys:
1727out_put_key1: 1735out_put_key1:
1728 put_futex_key(&key1); 1736 put_futex_key(&key1);
1729out: 1737out:
1730 if (pi_state != NULL)
1731 free_pi_state(pi_state);
1732 return ret ? ret : task_count; 1738 return ret ? ret : task_count;
1733} 1739}
1734 1740
diff --git a/kernel/gcov/Kconfig b/kernel/gcov/Kconfig
index cf66c5c8458e..3b7408759bdf 100644
--- a/kernel/gcov/Kconfig
+++ b/kernel/gcov/Kconfig
@@ -35,7 +35,7 @@ config GCOV_KERNEL
35config GCOV_PROFILE_ALL 35config GCOV_PROFILE_ALL
36 bool "Profile entire Kernel" 36 bool "Profile entire Kernel"
37 depends on GCOV_KERNEL 37 depends on GCOV_KERNEL
38 depends on SUPERH || S390 || X86 || PPC || MICROBLAZE || ARM 38 depends on SUPERH || S390 || X86 || PPC || MICROBLAZE || ARM || ARM64
39 default n 39 default n
40 ---help--- 40 ---help---
41 This options activates profiling for the entire kernel. 41 This options activates profiling for the entire kernel.
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 8637e041a247..80f7a6d00519 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -196,12 +196,34 @@ int __request_module(bool wait, const char *fmt, ...)
196EXPORT_SYMBOL(__request_module); 196EXPORT_SYMBOL(__request_module);
197#endif /* CONFIG_MODULES */ 197#endif /* CONFIG_MODULES */
198 198
199static void call_usermodehelper_freeinfo(struct subprocess_info *info)
200{
201 if (info->cleanup)
202 (*info->cleanup)(info);
203 kfree(info);
204}
205
206static void umh_complete(struct subprocess_info *sub_info)
207{
208 struct completion *comp = xchg(&sub_info->complete, NULL);
209 /*
210 * See call_usermodehelper_exec(). If xchg() returns NULL
211 * we own sub_info, the UMH_KILLABLE caller has gone away
212 * or the caller used UMH_NO_WAIT.
213 */
214 if (comp)
215 complete(comp);
216 else
217 call_usermodehelper_freeinfo(sub_info);
218}
219
199/* 220/*
200 * This is the task which runs the usermode application 221 * This is the task which runs the usermode application
201 */ 222 */
202static int ____call_usermodehelper(void *data) 223static int ____call_usermodehelper(void *data)
203{ 224{
204 struct subprocess_info *sub_info = data; 225 struct subprocess_info *sub_info = data;
226 int wait = sub_info->wait & ~UMH_KILLABLE;
205 struct cred *new; 227 struct cred *new;
206 int retval; 228 int retval;
207 229
@@ -221,7 +243,7 @@ static int ____call_usermodehelper(void *data)
221 retval = -ENOMEM; 243 retval = -ENOMEM;
222 new = prepare_kernel_cred(current); 244 new = prepare_kernel_cred(current);
223 if (!new) 245 if (!new)
224 goto fail; 246 goto out;
225 247
226 spin_lock(&umh_sysctl_lock); 248 spin_lock(&umh_sysctl_lock);
227 new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset); 249 new->cap_bset = cap_intersect(usermodehelper_bset, new->cap_bset);
@@ -233,7 +255,7 @@ static int ____call_usermodehelper(void *data)
233 retval = sub_info->init(sub_info, new); 255 retval = sub_info->init(sub_info, new);
234 if (retval) { 256 if (retval) {
235 abort_creds(new); 257 abort_creds(new);
236 goto fail; 258 goto out;
237 } 259 }
238 } 260 }
239 261
@@ -242,12 +264,13 @@ static int ____call_usermodehelper(void *data)
242 retval = do_execve(getname_kernel(sub_info->path), 264 retval = do_execve(getname_kernel(sub_info->path),
243 (const char __user *const __user *)sub_info->argv, 265 (const char __user *const __user *)sub_info->argv,
244 (const char __user *const __user *)sub_info->envp); 266 (const char __user *const __user *)sub_info->envp);
267out:
268 sub_info->retval = retval;
269 /* wait_for_helper() will call umh_complete if UHM_WAIT_PROC. */
270 if (wait != UMH_WAIT_PROC)
271 umh_complete(sub_info);
245 if (!retval) 272 if (!retval)
246 return 0; 273 return 0;
247
248 /* Exec failed? */
249fail:
250 sub_info->retval = retval;
251 do_exit(0); 274 do_exit(0);
252} 275}
253 276
@@ -258,26 +281,6 @@ static int call_helper(void *data)
258 return ____call_usermodehelper(data); 281 return ____call_usermodehelper(data);
259} 282}
260 283
261static void call_usermodehelper_freeinfo(struct subprocess_info *info)
262{
263 if (info->cleanup)
264 (*info->cleanup)(info);
265 kfree(info);
266}
267
268static void umh_complete(struct subprocess_info *sub_info)
269{
270 struct completion *comp = xchg(&sub_info->complete, NULL);
271 /*
272 * See call_usermodehelper_exec(). If xchg() returns NULL
273 * we own sub_info, the UMH_KILLABLE caller has gone away.
274 */
275 if (comp)
276 complete(comp);
277 else
278 call_usermodehelper_freeinfo(sub_info);
279}
280
281/* Keventd can't block, but this (a child) can. */ 284/* Keventd can't block, but this (a child) can. */
282static int wait_for_helper(void *data) 285static int wait_for_helper(void *data)
283{ 286{
@@ -336,18 +339,8 @@ static void __call_usermodehelper(struct work_struct *work)
336 kmod_thread_locker = NULL; 339 kmod_thread_locker = NULL;
337 } 340 }
338 341
339 switch (wait) { 342 if (pid < 0) {
340 case UMH_NO_WAIT: 343 sub_info->retval = pid;
341 call_usermodehelper_freeinfo(sub_info);
342 break;
343
344 case UMH_WAIT_PROC:
345 if (pid > 0)
346 break;
347 /* FALLTHROUGH */
348 case UMH_WAIT_EXEC:
349 if (pid < 0)
350 sub_info->retval = pid;
351 umh_complete(sub_info); 344 umh_complete(sub_info);
352 } 345 }
353} 346}
@@ -588,7 +581,12 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
588 goto out; 581 goto out;
589 } 582 }
590 583
591 sub_info->complete = &done; 584 /*
585 * Set the completion pointer only if there is a waiter.
586 * This makes it possible to use umh_complete to free
587 * the data structure in case of UMH_NO_WAIT.
588 */
589 sub_info->complete = (wait == UMH_NO_WAIT) ? NULL : &done;
592 sub_info->wait = wait; 590 sub_info->wait = wait;
593 591
594 queue_work(khelper_wq, &sub_info->work); 592 queue_work(khelper_wq, &sub_info->work);
diff --git a/kernel/panic.c b/kernel/panic.c
index d09dc5c32c67..cf80672b7924 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -244,6 +244,7 @@ static const struct tnt tnts[] = {
244 * 'I' - Working around severe firmware bug. 244 * 'I' - Working around severe firmware bug.
245 * 'O' - Out-of-tree module has been loaded. 245 * 'O' - Out-of-tree module has been loaded.
246 * 'E' - Unsigned module has been loaded. 246 * 'E' - Unsigned module has been loaded.
247 * 'L' - A soft lockup has previously occurred.
247 * 248 *
248 * The string is overwritten by the next call to print_tainted(). 249 * The string is overwritten by the next call to print_tainted().
249 */ 250 */
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index a9dfa79b6bab..1f35a3478f3c 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -502,8 +502,14 @@ int hibernation_restore(int platform_mode)
502 error = dpm_suspend_start(PMSG_QUIESCE); 502 error = dpm_suspend_start(PMSG_QUIESCE);
503 if (!error) { 503 if (!error) {
504 error = resume_target_kernel(platform_mode); 504 error = resume_target_kernel(platform_mode);
505 dpm_resume_end(PMSG_RECOVER); 505 /*
506 * The above should either succeed and jump to the new kernel,
507 * or return with an error. Otherwise things are just
508 * undefined, so let's be paranoid.
509 */
510 BUG_ON(!error);
506 } 511 }
512 dpm_resume_end(PMSG_RECOVER);
507 pm_restore_gfp_mask(); 513 pm_restore_gfp_mask();
508 resume_console(); 514 resume_console();
509 pm_restore_console(); 515 pm_restore_console();
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 7b323221b9ee..5a6ec8678b9a 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -46,13 +46,13 @@ static int try_to_freeze_tasks(bool user_only)
46 while (true) { 46 while (true) {
47 todo = 0; 47 todo = 0;
48 read_lock(&tasklist_lock); 48 read_lock(&tasklist_lock);
49 do_each_thread(g, p) { 49 for_each_process_thread(g, p) {
50 if (p == current || !freeze_task(p)) 50 if (p == current || !freeze_task(p))
51 continue; 51 continue;
52 52
53 if (!freezer_should_skip(p)) 53 if (!freezer_should_skip(p))
54 todo++; 54 todo++;
55 } while_each_thread(g, p); 55 }
56 read_unlock(&tasklist_lock); 56 read_unlock(&tasklist_lock);
57 57
58 if (!user_only) { 58 if (!user_only) {
@@ -93,11 +93,11 @@ static int try_to_freeze_tasks(bool user_only)
93 93
94 if (!wakeup) { 94 if (!wakeup) {
95 read_lock(&tasklist_lock); 95 read_lock(&tasklist_lock);
96 do_each_thread(g, p) { 96 for_each_process_thread(g, p) {
97 if (p != current && !freezer_should_skip(p) 97 if (p != current && !freezer_should_skip(p)
98 && freezing(p) && !frozen(p)) 98 && freezing(p) && !frozen(p))
99 sched_show_task(p); 99 sched_show_task(p);
100 } while_each_thread(g, p); 100 }
101 read_unlock(&tasklist_lock); 101 read_unlock(&tasklist_lock);
102 } 102 }
103 } else { 103 } else {
@@ -108,6 +108,30 @@ static int try_to_freeze_tasks(bool user_only)
108 return todo ? -EBUSY : 0; 108 return todo ? -EBUSY : 0;
109} 109}
110 110
111static bool __check_frozen_processes(void)
112{
113 struct task_struct *g, *p;
114
115 for_each_process_thread(g, p)
116 if (p != current && !freezer_should_skip(p) && !frozen(p))
117 return false;
118
119 return true;
120}
121
122/*
123 * Returns true if all freezable tasks (except for current) are frozen already
124 */
125static bool check_frozen_processes(void)
126{
127 bool ret;
128
129 read_lock(&tasklist_lock);
130 ret = __check_frozen_processes();
131 read_unlock(&tasklist_lock);
132 return ret;
133}
134
111/** 135/**
112 * freeze_processes - Signal user space processes to enter the refrigerator. 136 * freeze_processes - Signal user space processes to enter the refrigerator.
113 * The current thread will not be frozen. The same process that calls 137 * The current thread will not be frozen. The same process that calls
@@ -118,6 +142,7 @@ static int try_to_freeze_tasks(bool user_only)
118int freeze_processes(void) 142int freeze_processes(void)
119{ 143{
120 int error; 144 int error;
145 int oom_kills_saved;
121 146
122 error = __usermodehelper_disable(UMH_FREEZING); 147 error = __usermodehelper_disable(UMH_FREEZING);
123 if (error) 148 if (error)
@@ -132,11 +157,25 @@ int freeze_processes(void)
132 pm_wakeup_clear(); 157 pm_wakeup_clear();
133 printk("Freezing user space processes ... "); 158 printk("Freezing user space processes ... ");
134 pm_freezing = true; 159 pm_freezing = true;
160 oom_kills_saved = oom_kills_count();
135 error = try_to_freeze_tasks(true); 161 error = try_to_freeze_tasks(true);
136 if (!error) { 162 if (!error) {
137 printk("done.");
138 __usermodehelper_set_disable_depth(UMH_DISABLED); 163 __usermodehelper_set_disable_depth(UMH_DISABLED);
139 oom_killer_disable(); 164 oom_killer_disable();
165
166 /*
167 * There might have been an OOM kill while we were
168 * freezing tasks and the killed task might be still
169 * on the way out so we have to double check for race.
170 */
171 if (oom_kills_count() != oom_kills_saved &&
172 !check_frozen_processes()) {
173 __usermodehelper_set_disable_depth(UMH_ENABLED);
174 printk("OOM in progress.");
175 error = -EBUSY;
176 } else {
177 printk("done.");
178 }
140 } 179 }
141 printk("\n"); 180 printk("\n");
142 BUG_ON(in_atomic()); 181 BUG_ON(in_atomic());
@@ -191,11 +230,11 @@ void thaw_processes(void)
191 thaw_workqueues(); 230 thaw_workqueues();
192 231
193 read_lock(&tasklist_lock); 232 read_lock(&tasklist_lock);
194 do_each_thread(g, p) { 233 for_each_process_thread(g, p) {
195 /* No other threads should have PF_SUSPEND_TASK set */ 234 /* No other threads should have PF_SUSPEND_TASK set */
196 WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK)); 235 WARN_ON((p != curr) && (p->flags & PF_SUSPEND_TASK));
197 __thaw_task(p); 236 __thaw_task(p);
198 } while_each_thread(g, p); 237 }
199 read_unlock(&tasklist_lock); 238 read_unlock(&tasklist_lock);
200 239
201 WARN_ON(!(curr->flags & PF_SUSPEND_TASK)); 240 WARN_ON(!(curr->flags & PF_SUSPEND_TASK));
@@ -218,10 +257,10 @@ void thaw_kernel_threads(void)
218 thaw_workqueues(); 257 thaw_workqueues();
219 258
220 read_lock(&tasklist_lock); 259 read_lock(&tasklist_lock);
221 do_each_thread(g, p) { 260 for_each_process_thread(g, p) {
222 if (p->flags & (PF_KTHREAD | PF_WQ_WORKER)) 261 if (p->flags & (PF_KTHREAD | PF_WQ_WORKER))
223 __thaw_task(p); 262 __thaw_task(p);
224 } while_each_thread(g, p); 263 }
225 read_unlock(&tasklist_lock); 264 read_unlock(&tasklist_lock);
226 265
227 schedule(); 266 schedule();
diff --git a/kernel/power/qos.c b/kernel/power/qos.c
index 884b77058864..5f4c006c4b1e 100644
--- a/kernel/power/qos.c
+++ b/kernel/power/qos.c
@@ -105,11 +105,27 @@ static struct pm_qos_object network_throughput_pm_qos = {
105}; 105};
106 106
107 107
108static BLOCKING_NOTIFIER_HEAD(memory_bandwidth_notifier);
109static struct pm_qos_constraints memory_bw_constraints = {
110 .list = PLIST_HEAD_INIT(memory_bw_constraints.list),
111 .target_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
112 .default_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
113 .no_constraint_value = PM_QOS_MEMORY_BANDWIDTH_DEFAULT_VALUE,
114 .type = PM_QOS_SUM,
115 .notifiers = &memory_bandwidth_notifier,
116};
117static struct pm_qos_object memory_bandwidth_pm_qos = {
118 .constraints = &memory_bw_constraints,
119 .name = "memory_bandwidth",
120};
121
122
108static struct pm_qos_object *pm_qos_array[] = { 123static struct pm_qos_object *pm_qos_array[] = {
109 &null_pm_qos, 124 &null_pm_qos,
110 &cpu_dma_pm_qos, 125 &cpu_dma_pm_qos,
111 &network_lat_pm_qos, 126 &network_lat_pm_qos,
112 &network_throughput_pm_qos 127 &network_throughput_pm_qos,
128 &memory_bandwidth_pm_qos,
113}; 129};
114 130
115static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf, 131static ssize_t pm_qos_power_write(struct file *filp, const char __user *buf,
@@ -130,6 +146,9 @@ static const struct file_operations pm_qos_power_fops = {
130/* unlocked internal variant */ 146/* unlocked internal variant */
131static inline int pm_qos_get_value(struct pm_qos_constraints *c) 147static inline int pm_qos_get_value(struct pm_qos_constraints *c)
132{ 148{
149 struct plist_node *node;
150 int total_value = 0;
151
133 if (plist_head_empty(&c->list)) 152 if (plist_head_empty(&c->list))
134 return c->no_constraint_value; 153 return c->no_constraint_value;
135 154
@@ -140,6 +159,12 @@ static inline int pm_qos_get_value(struct pm_qos_constraints *c)
140 case PM_QOS_MAX: 159 case PM_QOS_MAX:
141 return plist_last(&c->list)->prio; 160 return plist_last(&c->list)->prio;
142 161
162 case PM_QOS_SUM:
163 plist_for_each(node, &c->list)
164 total_value += node->prio;
165
166 return total_value;
167
143 default: 168 default:
144 /* runtime check for not using enum */ 169 /* runtime check for not using enum */
145 BUG(); 170 BUG();
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 4ca9a33ff620..c347e3ce3a55 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -146,7 +146,7 @@ static int platform_suspend_prepare(suspend_state_t state)
146 146
147static int platform_suspend_prepare_late(suspend_state_t state) 147static int platform_suspend_prepare_late(suspend_state_t state)
148{ 148{
149 return state == PM_SUSPEND_FREEZE && freeze_ops->prepare ? 149 return state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->prepare ?
150 freeze_ops->prepare() : 0; 150 freeze_ops->prepare() : 0;
151} 151}
152 152
@@ -164,7 +164,7 @@ static void platform_resume_noirq(suspend_state_t state)
164 164
165static void platform_resume_early(suspend_state_t state) 165static void platform_resume_early(suspend_state_t state)
166{ 166{
167 if (state == PM_SUSPEND_FREEZE && freeze_ops->restore) 167 if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->restore)
168 freeze_ops->restore(); 168 freeze_ops->restore();
169} 169}
170 170
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 133e47223095..9815447d22e0 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -3299,11 +3299,16 @@ static void _rcu_barrier(struct rcu_state *rsp)
3299 continue; 3299 continue;
3300 rdp = per_cpu_ptr(rsp->rda, cpu); 3300 rdp = per_cpu_ptr(rsp->rda, cpu);
3301 if (rcu_is_nocb_cpu(cpu)) { 3301 if (rcu_is_nocb_cpu(cpu)) {
3302 _rcu_barrier_trace(rsp, "OnlineNoCB", cpu, 3302 if (!rcu_nocb_cpu_needs_barrier(rsp, cpu)) {
3303 rsp->n_barrier_done); 3303 _rcu_barrier_trace(rsp, "OfflineNoCB", cpu,
3304 atomic_inc(&rsp->barrier_cpu_count); 3304 rsp->n_barrier_done);
3305 __call_rcu(&rdp->barrier_head, rcu_barrier_callback, 3305 } else {
3306 rsp, cpu, 0); 3306 _rcu_barrier_trace(rsp, "OnlineNoCB", cpu,
3307 rsp->n_barrier_done);
3308 atomic_inc(&rsp->barrier_cpu_count);
3309 __call_rcu(&rdp->barrier_head,
3310 rcu_barrier_callback, rsp, cpu, 0);
3311 }
3307 } else if (ACCESS_ONCE(rdp->qlen)) { 3312 } else if (ACCESS_ONCE(rdp->qlen)) {
3308 _rcu_barrier_trace(rsp, "OnlineQ", cpu, 3313 _rcu_barrier_trace(rsp, "OnlineQ", cpu,
3309 rsp->n_barrier_done); 3314 rsp->n_barrier_done);
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index d03764652d91..bbdc45d8d74f 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -587,6 +587,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu);
587static void print_cpu_stall_info_end(void); 587static void print_cpu_stall_info_end(void);
588static void zero_cpu_stall_ticks(struct rcu_data *rdp); 588static void zero_cpu_stall_ticks(struct rcu_data *rdp);
589static void increment_cpu_stall_ticks(void); 589static void increment_cpu_stall_ticks(void);
590static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu);
590static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq); 591static void rcu_nocb_gp_set(struct rcu_node *rnp, int nrq);
591static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp); 592static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp);
592static void rcu_init_one_nocb(struct rcu_node *rnp); 593static void rcu_init_one_nocb(struct rcu_node *rnp);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 387dd4599344..c1d7f27bd38f 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2050,6 +2050,33 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
2050} 2050}
2051 2051
2052/* 2052/*
2053 * Does the specified CPU need an RCU callback for the specified flavor
2054 * of rcu_barrier()?
2055 */
2056static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
2057{
2058 struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
2059 struct rcu_head *rhp;
2060
2061 /* No-CBs CPUs might have callbacks on any of three lists. */
2062 rhp = ACCESS_ONCE(rdp->nocb_head);
2063 if (!rhp)
2064 rhp = ACCESS_ONCE(rdp->nocb_gp_head);
2065 if (!rhp)
2066 rhp = ACCESS_ONCE(rdp->nocb_follower_head);
2067
2068 /* Having no rcuo kthread but CBs after scheduler starts is bad! */
2069 if (!ACCESS_ONCE(rdp->nocb_kthread) && rhp) {
2070 /* RCU callback enqueued before CPU first came online??? */
2071 pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n",
2072 cpu, rhp->func);
2073 WARN_ON_ONCE(1);
2074 }
2075
2076 return !!rhp;
2077}
2078
2079/*
2053 * Enqueue the specified string of rcu_head structures onto the specified 2080 * Enqueue the specified string of rcu_head structures onto the specified
2054 * CPU's no-CBs lists. The CPU is specified by rdp, the head of the 2081 * CPU's no-CBs lists. The CPU is specified by rdp, the head of the
2055 * string by rhp, and the tail of the string by rhtp. The non-lazy/lazy 2082 * string by rhp, and the tail of the string by rhtp. The non-lazy/lazy
@@ -2642,6 +2669,12 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
2642 2669
2643#else /* #ifdef CONFIG_RCU_NOCB_CPU */ 2670#else /* #ifdef CONFIG_RCU_NOCB_CPU */
2644 2671
2672static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
2673{
2674 WARN_ON_ONCE(1); /* Should be dead code. */
2675 return false;
2676}
2677
2645static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp) 2678static void rcu_nocb_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
2646{ 2679{
2647} 2680}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 44999505e1bf..89e7283015a6 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -2475,44 +2475,6 @@ EXPORT_PER_CPU_SYMBOL(kstat);
2475EXPORT_PER_CPU_SYMBOL(kernel_cpustat); 2475EXPORT_PER_CPU_SYMBOL(kernel_cpustat);
2476 2476
2477/* 2477/*
2478 * Return any ns on the sched_clock that have not yet been accounted in
2479 * @p in case that task is currently running.
2480 *
2481 * Called with task_rq_lock() held on @rq.
2482 */
2483static u64 do_task_delta_exec(struct task_struct *p, struct rq *rq)
2484{
2485 u64 ns = 0;
2486
2487 /*
2488 * Must be ->curr _and_ ->on_rq. If dequeued, we would
2489 * project cycles that may never be accounted to this
2490 * thread, breaking clock_gettime().
2491 */
2492 if (task_current(rq, p) && task_on_rq_queued(p)) {
2493 update_rq_clock(rq);
2494 ns = rq_clock_task(rq) - p->se.exec_start;
2495 if ((s64)ns < 0)
2496 ns = 0;
2497 }
2498
2499 return ns;
2500}
2501
2502unsigned long long task_delta_exec(struct task_struct *p)
2503{
2504 unsigned long flags;
2505 struct rq *rq;
2506 u64 ns = 0;
2507
2508 rq = task_rq_lock(p, &flags);
2509 ns = do_task_delta_exec(p, rq);
2510 task_rq_unlock(rq, p, &flags);
2511
2512 return ns;
2513}
2514
2515/*
2516 * Return accounted runtime for the task. 2478 * Return accounted runtime for the task.
2517 * In case the task is currently running, return the runtime plus current's 2479 * In case the task is currently running, return the runtime plus current's
2518 * pending runtime that have not been accounted yet. 2480 * pending runtime that have not been accounted yet.
@@ -2521,7 +2483,7 @@ unsigned long long task_sched_runtime(struct task_struct *p)
2521{ 2483{
2522 unsigned long flags; 2484 unsigned long flags;
2523 struct rq *rq; 2485 struct rq *rq;
2524 u64 ns = 0; 2486 u64 ns;
2525 2487
2526#if defined(CONFIG_64BIT) && defined(CONFIG_SMP) 2488#if defined(CONFIG_64BIT) && defined(CONFIG_SMP)
2527 /* 2489 /*
@@ -2540,7 +2502,16 @@ unsigned long long task_sched_runtime(struct task_struct *p)
2540#endif 2502#endif
2541 2503
2542 rq = task_rq_lock(p, &flags); 2504 rq = task_rq_lock(p, &flags);
2543 ns = p->se.sum_exec_runtime + do_task_delta_exec(p, rq); 2505 /*
2506 * Must be ->curr _and_ ->on_rq. If dequeued, we would
2507 * project cycles that may never be accounted to this
2508 * thread, breaking clock_gettime().
2509 */
2510 if (task_current(rq, p) && task_on_rq_queued(p)) {
2511 update_rq_clock(rq);
2512 p->sched_class->update_curr(rq);
2513 }
2514 ns = p->se.sum_exec_runtime;
2544 task_rq_unlock(rq, p, &flags); 2515 task_rq_unlock(rq, p, &flags);
2545 2516
2546 return ns; 2517 return ns;
@@ -2903,10 +2874,14 @@ asmlinkage __visible void __sched schedule_user(void)
2903 * or we have been woken up remotely but the IPI has not yet arrived, 2874 * or we have been woken up remotely but the IPI has not yet arrived,
2904 * we haven't yet exited the RCU idle mode. Do it here manually until 2875 * we haven't yet exited the RCU idle mode. Do it here manually until
2905 * we find a better solution. 2876 * we find a better solution.
2877 *
2878 * NB: There are buggy callers of this function. Ideally we
2879 * should warn if prev_state != IN_USER, but that will trigger
2880 * too frequently to make sense yet.
2906 */ 2881 */
2907 user_exit(); 2882 enum ctx_state prev_state = exception_enter();
2908 schedule(); 2883 schedule();
2909 user_enter(); 2884 exception_exit(prev_state);
2910} 2885}
2911#endif 2886#endif
2912 2887
@@ -2951,6 +2926,47 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
2951} 2926}
2952NOKPROBE_SYMBOL(preempt_schedule); 2927NOKPROBE_SYMBOL(preempt_schedule);
2953EXPORT_SYMBOL(preempt_schedule); 2928EXPORT_SYMBOL(preempt_schedule);
2929
2930#ifdef CONFIG_CONTEXT_TRACKING
2931/**
2932 * preempt_schedule_context - preempt_schedule called by tracing
2933 *
2934 * The tracing infrastructure uses preempt_enable_notrace to prevent
2935 * recursion and tracing preempt enabling caused by the tracing
2936 * infrastructure itself. But as tracing can happen in areas coming
2937 * from userspace or just about to enter userspace, a preempt enable
2938 * can occur before user_exit() is called. This will cause the scheduler
2939 * to be called when the system is still in usermode.
2940 *
2941 * To prevent this, the preempt_enable_notrace will use this function
2942 * instead of preempt_schedule() to exit user context if needed before
2943 * calling the scheduler.
2944 */
2945asmlinkage __visible void __sched notrace preempt_schedule_context(void)
2946{
2947 enum ctx_state prev_ctx;
2948
2949 if (likely(!preemptible()))
2950 return;
2951
2952 do {
2953 __preempt_count_add(PREEMPT_ACTIVE);
2954 /*
2955 * Needs preempt disabled in case user_exit() is traced
2956 * and the tracer calls preempt_enable_notrace() causing
2957 * an infinite recursion.
2958 */
2959 prev_ctx = exception_enter();
2960 __schedule();
2961 exception_exit(prev_ctx);
2962
2963 __preempt_count_sub(PREEMPT_ACTIVE);
2964 barrier();
2965 } while (need_resched());
2966}
2967EXPORT_SYMBOL_GPL(preempt_schedule_context);
2968#endif /* CONFIG_CONTEXT_TRACKING */
2969
2954#endif /* CONFIG_PREEMPT */ 2970#endif /* CONFIG_PREEMPT */
2955 2971
2956/* 2972/*
@@ -6327,6 +6343,10 @@ static void sched_init_numa(void)
6327 if (!sched_debug()) 6343 if (!sched_debug())
6328 break; 6344 break;
6329 } 6345 }
6346
6347 if (!level)
6348 return;
6349
6330 /* 6350 /*
6331 * 'level' contains the number of unique distances, excluding the 6351 * 'level' contains the number of unique distances, excluding the
6332 * identity distance node_distance(i,i). 6352 * identity distance node_distance(i,i).
@@ -7403,8 +7423,12 @@ void sched_move_task(struct task_struct *tsk)
7403 if (unlikely(running)) 7423 if (unlikely(running))
7404 put_prev_task(rq, tsk); 7424 put_prev_task(rq, tsk);
7405 7425
7406 tg = container_of(task_css_check(tsk, cpu_cgrp_id, 7426 /*
7407 lockdep_is_held(&tsk->sighand->siglock)), 7427 * All callers are synchronized by task_rq_lock(); we do not use RCU
7428 * which is pointless here. Thus, we pass "true" to task_css_check()
7429 * to prevent lockdep warnings.
7430 */
7431 tg = container_of(task_css_check(tsk, cpu_cgrp_id, true),
7408 struct task_group, css); 7432 struct task_group, css);
7409 tg = autogroup_task_group(tsk, tg); 7433 tg = autogroup_task_group(tsk, tg);
7410 tsk->sched_task_group = tg; 7434 tsk->sched_task_group = tg;
@@ -7833,6 +7857,11 @@ static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
7833 sched_offline_group(tg); 7857 sched_offline_group(tg);
7834} 7858}
7835 7859
7860static void cpu_cgroup_fork(struct task_struct *task)
7861{
7862 sched_move_task(task);
7863}
7864
7836static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css, 7865static int cpu_cgroup_can_attach(struct cgroup_subsys_state *css,
7837 struct cgroup_taskset *tset) 7866 struct cgroup_taskset *tset)
7838{ 7867{
@@ -8205,6 +8234,7 @@ struct cgroup_subsys cpu_cgrp_subsys = {
8205 .css_free = cpu_cgroup_css_free, 8234 .css_free = cpu_cgroup_css_free,
8206 .css_online = cpu_cgroup_css_online, 8235 .css_online = cpu_cgroup_css_online,
8207 .css_offline = cpu_cgroup_css_offline, 8236 .css_offline = cpu_cgroup_css_offline,
8237 .fork = cpu_cgroup_fork,
8208 .can_attach = cpu_cgroup_can_attach, 8238 .can_attach = cpu_cgroup_can_attach,
8209 .attach = cpu_cgroup_attach, 8239 .attach = cpu_cgroup_attach,
8210 .exit = cpu_cgroup_exit, 8240 .exit = cpu_cgroup_exit,
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index 256e577faf1b..28fa9d9e9201 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -518,12 +518,20 @@ again:
518 } 518 }
519 519
520 /* 520 /*
521 * We need to take care of a possible races here. In fact, the 521 * We need to take care of several possible races here:
522 * task might have changed its scheduling policy to something 522 *
523 * different from SCHED_DEADLINE or changed its reservation 523 * - the task might have changed its scheduling policy
524 * parameters (through sched_setattr()). 524 * to something different than SCHED_DEADLINE
525 * - the task might have changed its reservation parameters
526 * (through sched_setattr())
527 * - the task might have been boosted by someone else and
528 * might be in the boosting/deboosting path
529 *
530 * In all this cases we bail out, as the task is already
531 * in the runqueue or is going to be enqueued back anyway.
525 */ 532 */
526 if (!dl_task(p) || dl_se->dl_new) 533 if (!dl_task(p) || dl_se->dl_new ||
534 dl_se->dl_boosted || !dl_se->dl_throttled)
527 goto unlock; 535 goto unlock;
528 536
529 sched_clock_tick(); 537 sched_clock_tick();
@@ -532,7 +540,7 @@ again:
532 dl_se->dl_yielded = 0; 540 dl_se->dl_yielded = 0;
533 if (task_on_rq_queued(p)) { 541 if (task_on_rq_queued(p)) {
534 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH); 542 enqueue_task_dl(rq, p, ENQUEUE_REPLENISH);
535 if (task_has_dl_policy(rq->curr)) 543 if (dl_task(rq->curr))
536 check_preempt_curr_dl(rq, p, 0); 544 check_preempt_curr_dl(rq, p, 0);
537 else 545 else
538 resched_curr(rq); 546 resched_curr(rq);
@@ -847,8 +855,19 @@ static void enqueue_task_dl(struct rq *rq, struct task_struct *p, int flags)
847 * smaller than our one... OTW we keep our runtime and 855 * smaller than our one... OTW we keep our runtime and
848 * deadline. 856 * deadline.
849 */ 857 */
850 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) 858 if (pi_task && p->dl.dl_boosted && dl_prio(pi_task->normal_prio)) {
851 pi_se = &pi_task->dl; 859 pi_se = &pi_task->dl;
860 } else if (!dl_prio(p->normal_prio)) {
861 /*
862 * Special case in which we have a !SCHED_DEADLINE task
863 * that is going to be deboosted, but exceedes its
864 * runtime while doing so. No point in replenishing
865 * it, as it's going to return back to its original
866 * scheduling class after this.
867 */
868 BUG_ON(!p->dl.dl_boosted || flags != ENQUEUE_REPLENISH);
869 return;
870 }
852 871
853 /* 872 /*
854 * If p is throttled, we do nothing. In fact, if it exhausted 873 * If p is throttled, we do nothing. In fact, if it exhausted
@@ -1607,8 +1626,12 @@ static void switched_to_dl(struct rq *rq, struct task_struct *p)
1607 /* Only reschedule if pushing failed */ 1626 /* Only reschedule if pushing failed */
1608 check_resched = 0; 1627 check_resched = 0;
1609#endif /* CONFIG_SMP */ 1628#endif /* CONFIG_SMP */
1610 if (check_resched && task_has_dl_policy(rq->curr)) 1629 if (check_resched) {
1611 check_preempt_curr_dl(rq, p, 0); 1630 if (dl_task(rq->curr))
1631 check_preempt_curr_dl(rq, p, 0);
1632 else
1633 resched_curr(rq);
1634 }
1612 } 1635 }
1613} 1636}
1614 1637
@@ -1678,4 +1701,6 @@ const struct sched_class dl_sched_class = {
1678 .prio_changed = prio_changed_dl, 1701 .prio_changed = prio_changed_dl,
1679 .switched_from = switched_from_dl, 1702 .switched_from = switched_from_dl,
1680 .switched_to = switched_to_dl, 1703 .switched_to = switched_to_dl,
1704
1705 .update_curr = update_curr_dl,
1681}; 1706};
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0b069bf3e708..ef2b104b254c 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -726,6 +726,11 @@ static void update_curr(struct cfs_rq *cfs_rq)
726 account_cfs_rq_runtime(cfs_rq, delta_exec); 726 account_cfs_rq_runtime(cfs_rq, delta_exec);
727} 727}
728 728
729static void update_curr_fair(struct rq *rq)
730{
731 update_curr(cfs_rq_of(&rq->curr->se));
732}
733
729static inline void 734static inline void
730update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se) 735update_stats_wait_start(struct cfs_rq *cfs_rq, struct sched_entity *se)
731{ 736{
@@ -828,11 +833,12 @@ static unsigned int task_nr_scan_windows(struct task_struct *p)
828 833
829static unsigned int task_scan_min(struct task_struct *p) 834static unsigned int task_scan_min(struct task_struct *p)
830{ 835{
836 unsigned int scan_size = ACCESS_ONCE(sysctl_numa_balancing_scan_size);
831 unsigned int scan, floor; 837 unsigned int scan, floor;
832 unsigned int windows = 1; 838 unsigned int windows = 1;
833 839
834 if (sysctl_numa_balancing_scan_size < MAX_SCAN_WINDOW) 840 if (scan_size < MAX_SCAN_WINDOW)
835 windows = MAX_SCAN_WINDOW / sysctl_numa_balancing_scan_size; 841 windows = MAX_SCAN_WINDOW / scan_size;
836 floor = 1000 / windows; 842 floor = 1000 / windows;
837 843
838 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p); 844 scan = sysctl_numa_balancing_scan_period_min / task_nr_scan_windows(p);
@@ -1164,9 +1170,26 @@ static void task_numa_compare(struct task_numa_env *env,
1164 long moveimp = imp; 1170 long moveimp = imp;
1165 1171
1166 rcu_read_lock(); 1172 rcu_read_lock();
1167 cur = ACCESS_ONCE(dst_rq->curr); 1173
1168 if (cur->pid == 0) /* idle */ 1174 raw_spin_lock_irq(&dst_rq->lock);
1175 cur = dst_rq->curr;
1176 /*
1177 * No need to move the exiting task, and this ensures that ->curr
1178 * wasn't reaped and thus get_task_struct() in task_numa_assign()
1179 * is safe under RCU read lock.
1180 * Note that rcu_read_lock() itself can't protect from the final
1181 * put_task_struct() after the last schedule().
1182 */
1183 if ((cur->flags & PF_EXITING) || is_idle_task(cur))
1169 cur = NULL; 1184 cur = NULL;
1185 raw_spin_unlock_irq(&dst_rq->lock);
1186
1187 /*
1188 * Because we have preemption enabled we can get migrated around and
1189 * end try selecting ourselves (current == env->p) as a swap candidate.
1190 */
1191 if (cur == env->p)
1192 goto unlock;
1170 1193
1171 /* 1194 /*
1172 * "imp" is the fault differential for the source task between the 1195 * "imp" is the fault differential for the source task between the
@@ -1520,7 +1543,7 @@ static void update_task_scan_period(struct task_struct *p,
1520 * scanning faster if shared accesses dominate as it may 1543 * scanning faster if shared accesses dominate as it may
1521 * simply bounce migrations uselessly 1544 * simply bounce migrations uselessly
1522 */ 1545 */
1523 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared)); 1546 ratio = DIV_ROUND_UP(private * NUMA_PERIOD_SLOTS, (private + shared + 1));
1524 diff = (diff * ratio) / NUMA_PERIOD_SLOTS; 1547 diff = (diff * ratio) / NUMA_PERIOD_SLOTS;
1525 } 1548 }
1526 1549
@@ -7938,6 +7961,8 @@ const struct sched_class fair_sched_class = {
7938 7961
7939 .get_rr_interval = get_rr_interval_fair, 7962 .get_rr_interval = get_rr_interval_fair,
7940 7963
7964 .update_curr = update_curr_fair,
7965
7941#ifdef CONFIG_FAIR_GROUP_SCHED 7966#ifdef CONFIG_FAIR_GROUP_SCHED
7942 .task_move_group = task_move_group_fair, 7967 .task_move_group = task_move_group_fair,
7943#endif 7968#endif
diff --git a/kernel/sched/idle_task.c b/kernel/sched/idle_task.c
index 67ad4e7f506a..c65dac8c97cd 100644
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -75,6 +75,10 @@ static unsigned int get_rr_interval_idle(struct rq *rq, struct task_struct *task
75 return 0; 75 return 0;
76} 76}
77 77
78static void update_curr_idle(struct rq *rq)
79{
80}
81
78/* 82/*
79 * Simple, special scheduling class for the per-CPU idle tasks: 83 * Simple, special scheduling class for the per-CPU idle tasks:
80 */ 84 */
@@ -101,4 +105,5 @@ const struct sched_class idle_sched_class = {
101 105
102 .prio_changed = prio_changed_idle, 106 .prio_changed = prio_changed_idle,
103 .switched_to = switched_to_idle, 107 .switched_to = switched_to_idle,
108 .update_curr = update_curr_idle,
104}; 109};
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index d024e6ce30ba..20bca398084a 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -2128,6 +2128,8 @@ const struct sched_class rt_sched_class = {
2128 2128
2129 .prio_changed = prio_changed_rt, 2129 .prio_changed = prio_changed_rt,
2130 .switched_to = switched_to_rt, 2130 .switched_to = switched_to_rt,
2131
2132 .update_curr = update_curr_rt,
2131}; 2133};
2132 2134
2133#ifdef CONFIG_SCHED_DEBUG 2135#ifdef CONFIG_SCHED_DEBUG
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 24156c8434d1..2df8ef067cc5 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1135,6 +1135,8 @@ struct sched_class {
1135 unsigned int (*get_rr_interval) (struct rq *rq, 1135 unsigned int (*get_rr_interval) (struct rq *rq,
1136 struct task_struct *task); 1136 struct task_struct *task);
1137 1137
1138 void (*update_curr) (struct rq *rq);
1139
1138#ifdef CONFIG_FAIR_GROUP_SCHED 1140#ifdef CONFIG_FAIR_GROUP_SCHED
1139 void (*task_move_group) (struct task_struct *p, int on_rq); 1141 void (*task_move_group) (struct task_struct *p, int on_rq);
1140#endif 1142#endif
diff --git a/kernel/sched/stop_task.c b/kernel/sched/stop_task.c
index 67426e529f59..79ffec45a6ac 100644
--- a/kernel/sched/stop_task.c
+++ b/kernel/sched/stop_task.c
@@ -102,6 +102,10 @@ get_rr_interval_stop(struct rq *rq, struct task_struct *task)
102 return 0; 102 return 0;
103} 103}
104 104
105static void update_curr_stop(struct rq *rq)
106{
107}
108
105/* 109/*
106 * Simple, special scheduling class for the per-CPU stop tasks: 110 * Simple, special scheduling class for the per-CPU stop tasks:
107 */ 111 */
@@ -128,4 +132,5 @@ const struct sched_class stop_sched_class = {
128 132
129 .prio_changed = prio_changed_stop, 133 .prio_changed = prio_changed_stop,
130 .switched_to = switched_to_stop, 134 .switched_to = switched_to_stop,
135 .update_curr = update_curr_stop,
131}; 136};
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 4aada6d9fe74..15f2511a1b7c 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -387,7 +387,8 @@ static struct ctl_table kern_table[] = {
387 .data = &sysctl_numa_balancing_scan_size, 387 .data = &sysctl_numa_balancing_scan_size,
388 .maxlen = sizeof(unsigned int), 388 .maxlen = sizeof(unsigned int),
389 .mode = 0644, 389 .mode = 0644,
390 .proc_handler = proc_dointvec, 390 .proc_handler = proc_dointvec_minmax,
391 .extra1 = &one,
391 }, 392 },
392 { 393 {
393 .procname = "numa_balancing", 394 .procname = "numa_balancing",
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 9c94c19f1305..55449909f114 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -72,7 +72,7 @@ static u64 cev_delta2ns(unsigned long latch, struct clock_event_device *evt,
72 * Also omit the add if it would overflow the u64 boundary. 72 * Also omit the add if it would overflow the u64 boundary.
73 */ 73 */
74 if ((~0ULL - clc > rnd) && 74 if ((~0ULL - clc > rnd) &&
75 (!ismax || evt->mult <= (1U << evt->shift))) 75 (!ismax || evt->mult <= (1ULL << evt->shift)))
76 clc += rnd; 76 clc += rnd;
77 77
78 do_div(clc, evt->mult); 78 do_div(clc, evt->mult);
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 492b986195d5..a16b67859e2a 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -553,7 +553,7 @@ static int cpu_timer_sample_group(const clockid_t which_clock,
553 *sample = cputime_to_expires(cputime.utime); 553 *sample = cputime_to_expires(cputime.utime);
554 break; 554 break;
555 case CPUCLOCK_SCHED: 555 case CPUCLOCK_SCHED:
556 *sample = cputime.sum_exec_runtime + task_delta_exec(p); 556 *sample = cputime.sum_exec_runtime;
557 break; 557 break;
558 } 558 }
559 return 0; 559 return 0;
diff --git a/kernel/time/posix-timers.c b/kernel/time/posix-timers.c
index 42b463ad90f2..31ea01f42e1f 100644
--- a/kernel/time/posix-timers.c
+++ b/kernel/time/posix-timers.c
@@ -636,6 +636,7 @@ SYSCALL_DEFINE3(timer_create, const clockid_t, which_clock,
636 goto out; 636 goto out;
637 } 637 }
638 } else { 638 } else {
639 memset(&event.sigev_value, 0, sizeof(event.sigev_value));
639 event.sigev_notify = SIGEV_SIGNAL; 640 event.sigev_notify = SIGEV_SIGNAL;
640 event.sigev_signo = SIGALRM; 641 event.sigev_signo = SIGALRM;
641 event.sigev_value.sival_int = new_timer->it_id; 642 event.sigev_value.sival_int = new_timer->it_id;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index fb186b9ddf51..31c90fec4158 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -1925,8 +1925,16 @@ ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
1925 * when we are adding another op to the rec or removing the 1925 * when we are adding another op to the rec or removing the
1926 * current one. Thus, if the op is being added, we can 1926 * current one. Thus, if the op is being added, we can
1927 * ignore it because it hasn't attached itself to the rec 1927 * ignore it because it hasn't attached itself to the rec
1928 * yet. That means we just need to find the op that has a 1928 * yet.
1929 * trampoline and is not beeing added. 1929 *
1930 * If an ops is being modified (hooking to different functions)
1931 * then we don't care about the new functions that are being
1932 * added, just the old ones (that are probably being removed).
1933 *
1934 * If we are adding an ops to a function that already is using
1935 * a trampoline, it needs to be removed (trampolines are only
1936 * for single ops connected), then an ops that is not being
1937 * modified also needs to be checked.
1930 */ 1938 */
1931 do_for_each_ftrace_op(op, ftrace_ops_list) { 1939 do_for_each_ftrace_op(op, ftrace_ops_list) {
1932 1940
@@ -1940,17 +1948,23 @@ ftrace_find_tramp_ops_curr(struct dyn_ftrace *rec)
1940 if (op->flags & FTRACE_OPS_FL_ADDING) 1948 if (op->flags & FTRACE_OPS_FL_ADDING)
1941 continue; 1949 continue;
1942 1950
1951
1943 /* 1952 /*
1944 * If the ops is not being added and has a trampoline, 1953 * If the ops is being modified and is in the old
1945 * then it must be the one that we want! 1954 * hash, then it is probably being removed from this
1955 * function.
1946 */ 1956 */
1947 if (hash_contains_ip(ip, op->func_hash))
1948 return op;
1949
1950 /* If the ops is being modified, it may be in the old hash. */
1951 if ((op->flags & FTRACE_OPS_FL_MODIFYING) && 1957 if ((op->flags & FTRACE_OPS_FL_MODIFYING) &&
1952 hash_contains_ip(ip, &op->old_hash)) 1958 hash_contains_ip(ip, &op->old_hash))
1953 return op; 1959 return op;
1960 /*
1961 * If the ops is not being added or modified, and it's
1962 * in its normal filter hash, then this must be the one
1963 * we want!
1964 */
1965 if (!(op->flags & FTRACE_OPS_FL_MODIFYING) &&
1966 hash_contains_ip(ip, op->func_hash))
1967 return op;
1954 1968
1955 } while_for_each_ftrace_op(op); 1969 } while_for_each_ftrace_op(op);
1956 1970
@@ -2293,10 +2307,13 @@ static void ftrace_run_update_code(int command)
2293 FTRACE_WARN_ON(ret); 2307 FTRACE_WARN_ON(ret);
2294} 2308}
2295 2309
2296static void ftrace_run_modify_code(struct ftrace_ops *ops, int command) 2310static void ftrace_run_modify_code(struct ftrace_ops *ops, int command,
2311 struct ftrace_hash *old_hash)
2297{ 2312{
2298 ops->flags |= FTRACE_OPS_FL_MODIFYING; 2313 ops->flags |= FTRACE_OPS_FL_MODIFYING;
2314 ops->old_hash.filter_hash = old_hash;
2299 ftrace_run_update_code(command); 2315 ftrace_run_update_code(command);
2316 ops->old_hash.filter_hash = NULL;
2300 ops->flags &= ~FTRACE_OPS_FL_MODIFYING; 2317 ops->flags &= ~FTRACE_OPS_FL_MODIFYING;
2301} 2318}
2302 2319
@@ -3340,7 +3357,7 @@ static struct ftrace_ops trace_probe_ops __read_mostly =
3340 3357
3341static int ftrace_probe_registered; 3358static int ftrace_probe_registered;
3342 3359
3343static void __enable_ftrace_function_probe(void) 3360static void __enable_ftrace_function_probe(struct ftrace_hash *old_hash)
3344{ 3361{
3345 int ret; 3362 int ret;
3346 int i; 3363 int i;
@@ -3348,7 +3365,8 @@ static void __enable_ftrace_function_probe(void)
3348 if (ftrace_probe_registered) { 3365 if (ftrace_probe_registered) {
3349 /* still need to update the function call sites */ 3366 /* still need to update the function call sites */
3350 if (ftrace_enabled) 3367 if (ftrace_enabled)
3351 ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS); 3368 ftrace_run_modify_code(&trace_probe_ops, FTRACE_UPDATE_CALLS,
3369 old_hash);
3352 return; 3370 return;
3353 } 3371 }
3354 3372
@@ -3477,13 +3495,14 @@ register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
3477 } while_for_each_ftrace_rec(); 3495 } while_for_each_ftrace_rec();
3478 3496
3479 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash); 3497 ret = ftrace_hash_move(&trace_probe_ops, 1, orig_hash, hash);
3498
3499 __enable_ftrace_function_probe(old_hash);
3500
3480 if (!ret) 3501 if (!ret)
3481 free_ftrace_hash_rcu(old_hash); 3502 free_ftrace_hash_rcu(old_hash);
3482 else 3503 else
3483 count = ret; 3504 count = ret;
3484 3505
3485 __enable_ftrace_function_probe();
3486
3487 out_unlock: 3506 out_unlock:
3488 mutex_unlock(&ftrace_lock); 3507 mutex_unlock(&ftrace_lock);
3489 out: 3508 out:
@@ -3764,10 +3783,11 @@ ftrace_match_addr(struct ftrace_hash *hash, unsigned long ip, int remove)
3764 return add_hash_entry(hash, ip); 3783 return add_hash_entry(hash, ip);
3765} 3784}
3766 3785
3767static void ftrace_ops_update_code(struct ftrace_ops *ops) 3786static void ftrace_ops_update_code(struct ftrace_ops *ops,
3787 struct ftrace_hash *old_hash)
3768{ 3788{
3769 if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled) 3789 if (ops->flags & FTRACE_OPS_FL_ENABLED && ftrace_enabled)
3770 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS); 3790 ftrace_run_modify_code(ops, FTRACE_UPDATE_CALLS, old_hash);
3771} 3791}
3772 3792
3773static int 3793static int
@@ -3813,7 +3833,7 @@ ftrace_set_hash(struct ftrace_ops *ops, unsigned char *buf, int len,
3813 old_hash = *orig_hash; 3833 old_hash = *orig_hash;
3814 ret = ftrace_hash_move(ops, enable, orig_hash, hash); 3834 ret = ftrace_hash_move(ops, enable, orig_hash, hash);
3815 if (!ret) { 3835 if (!ret) {
3816 ftrace_ops_update_code(ops); 3836 ftrace_ops_update_code(ops, old_hash);
3817 free_ftrace_hash_rcu(old_hash); 3837 free_ftrace_hash_rcu(old_hash);
3818 } 3838 }
3819 mutex_unlock(&ftrace_lock); 3839 mutex_unlock(&ftrace_lock);
@@ -4058,7 +4078,7 @@ int ftrace_regex_release(struct inode *inode, struct file *file)
4058 ret = ftrace_hash_move(iter->ops, filter_hash, 4078 ret = ftrace_hash_move(iter->ops, filter_hash,
4059 orig_hash, iter->hash); 4079 orig_hash, iter->hash);
4060 if (!ret) { 4080 if (!ret) {
4061 ftrace_ops_update_code(iter->ops); 4081 ftrace_ops_update_code(iter->ops, old_hash);
4062 free_ftrace_hash_rcu(old_hash); 4082 free_ftrace_hash_rcu(old_hash);
4063 } 4083 }
4064 mutex_unlock(&ftrace_lock); 4084 mutex_unlock(&ftrace_lock);
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 2d75c94ae87d..a56e07c8d15b 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -538,16 +538,18 @@ static void rb_wake_up_waiters(struct irq_work *work)
538 * ring_buffer_wait - wait for input to the ring buffer 538 * ring_buffer_wait - wait for input to the ring buffer
539 * @buffer: buffer to wait on 539 * @buffer: buffer to wait on
540 * @cpu: the cpu buffer to wait on 540 * @cpu: the cpu buffer to wait on
541 * @full: wait until a full page is available, if @cpu != RING_BUFFER_ALL_CPUS
541 * 542 *
542 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon 543 * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon
543 * as data is added to any of the @buffer's cpu buffers. Otherwise 544 * as data is added to any of the @buffer's cpu buffers. Otherwise
544 * it will wait for data to be added to a specific cpu buffer. 545 * it will wait for data to be added to a specific cpu buffer.
545 */ 546 */
546int ring_buffer_wait(struct ring_buffer *buffer, int cpu) 547int ring_buffer_wait(struct ring_buffer *buffer, int cpu, bool full)
547{ 548{
548 struct ring_buffer_per_cpu *cpu_buffer; 549 struct ring_buffer_per_cpu *uninitialized_var(cpu_buffer);
549 DEFINE_WAIT(wait); 550 DEFINE_WAIT(wait);
550 struct rb_irq_work *work; 551 struct rb_irq_work *work;
552 int ret = 0;
551 553
552 /* 554 /*
553 * Depending on what the caller is waiting for, either any 555 * Depending on what the caller is waiting for, either any
@@ -564,36 +566,61 @@ int ring_buffer_wait(struct ring_buffer *buffer, int cpu)
564 } 566 }
565 567
566 568
567 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); 569 while (true) {
570 prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE);
568 571
569 /* 572 /*
570 * The events can happen in critical sections where 573 * The events can happen in critical sections where
571 * checking a work queue can cause deadlocks. 574 * checking a work queue can cause deadlocks.
572 * After adding a task to the queue, this flag is set 575 * After adding a task to the queue, this flag is set
573 * only to notify events to try to wake up the queue 576 * only to notify events to try to wake up the queue
574 * using irq_work. 577 * using irq_work.
575 * 578 *
576 * We don't clear it even if the buffer is no longer 579 * We don't clear it even if the buffer is no longer
577 * empty. The flag only causes the next event to run 580 * empty. The flag only causes the next event to run
578 * irq_work to do the work queue wake up. The worse 581 * irq_work to do the work queue wake up. The worse
579 * that can happen if we race with !trace_empty() is that 582 * that can happen if we race with !trace_empty() is that
580 * an event will cause an irq_work to try to wake up 583 * an event will cause an irq_work to try to wake up
581 * an empty queue. 584 * an empty queue.
582 * 585 *
583 * There's no reason to protect this flag either, as 586 * There's no reason to protect this flag either, as
584 * the work queue and irq_work logic will do the necessary 587 * the work queue and irq_work logic will do the necessary
585 * synchronization for the wake ups. The only thing 588 * synchronization for the wake ups. The only thing
586 * that is necessary is that the wake up happens after 589 * that is necessary is that the wake up happens after
587 * a task has been queued. It's OK for spurious wake ups. 590 * a task has been queued. It's OK for spurious wake ups.
588 */ 591 */
589 work->waiters_pending = true; 592 work->waiters_pending = true;
593
594 if (signal_pending(current)) {
595 ret = -EINTR;
596 break;
597 }
598
599 if (cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer))
600 break;
601
602 if (cpu != RING_BUFFER_ALL_CPUS &&
603 !ring_buffer_empty_cpu(buffer, cpu)) {
604 unsigned long flags;
605 bool pagebusy;
606
607 if (!full)
608 break;
609
610 raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags);
611 pagebusy = cpu_buffer->reader_page == cpu_buffer->commit_page;
612 raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags);
613
614 if (!pagebusy)
615 break;
616 }
590 617
591 if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) ||
592 (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu)))
593 schedule(); 618 schedule();
619 }
594 620
595 finish_wait(&work->waiters, &wait); 621 finish_wait(&work->waiters, &wait);
596 return 0; 622
623 return ret;
597} 624}
598 625
599/** 626/**
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 8a528392b1f4..92f4a6cee172 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1076,13 +1076,14 @@ update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
1076} 1076}
1077#endif /* CONFIG_TRACER_MAX_TRACE */ 1077#endif /* CONFIG_TRACER_MAX_TRACE */
1078 1078
1079static int wait_on_pipe(struct trace_iterator *iter) 1079static int wait_on_pipe(struct trace_iterator *iter, bool full)
1080{ 1080{
1081 /* Iterators are static, they should be filled or empty */ 1081 /* Iterators are static, they should be filled or empty */
1082 if (trace_buffer_iter(iter, iter->cpu_file)) 1082 if (trace_buffer_iter(iter, iter->cpu_file))
1083 return 0; 1083 return 0;
1084 1084
1085 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file); 1085 return ring_buffer_wait(iter->trace_buffer->buffer, iter->cpu_file,
1086 full);
1086} 1087}
1087 1088
1088#ifdef CONFIG_FTRACE_STARTUP_TEST 1089#ifdef CONFIG_FTRACE_STARTUP_TEST
@@ -4434,15 +4435,12 @@ static int tracing_wait_pipe(struct file *filp)
4434 4435
4435 mutex_unlock(&iter->mutex); 4436 mutex_unlock(&iter->mutex);
4436 4437
4437 ret = wait_on_pipe(iter); 4438 ret = wait_on_pipe(iter, false);
4438 4439
4439 mutex_lock(&iter->mutex); 4440 mutex_lock(&iter->mutex);
4440 4441
4441 if (ret) 4442 if (ret)
4442 return ret; 4443 return ret;
4443
4444 if (signal_pending(current))
4445 return -EINTR;
4446 } 4444 }
4447 4445
4448 return 1; 4446 return 1;
@@ -5372,16 +5370,12 @@ tracing_buffers_read(struct file *filp, char __user *ubuf,
5372 goto out_unlock; 5370 goto out_unlock;
5373 } 5371 }
5374 mutex_unlock(&trace_types_lock); 5372 mutex_unlock(&trace_types_lock);
5375 ret = wait_on_pipe(iter); 5373 ret = wait_on_pipe(iter, false);
5376 mutex_lock(&trace_types_lock); 5374 mutex_lock(&trace_types_lock);
5377 if (ret) { 5375 if (ret) {
5378 size = ret; 5376 size = ret;
5379 goto out_unlock; 5377 goto out_unlock;
5380 } 5378 }
5381 if (signal_pending(current)) {
5382 size = -EINTR;
5383 goto out_unlock;
5384 }
5385 goto again; 5379 goto again;
5386 } 5380 }
5387 size = 0; 5381 size = 0;
@@ -5500,7 +5494,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5500 }; 5494 };
5501 struct buffer_ref *ref; 5495 struct buffer_ref *ref;
5502 int entries, size, i; 5496 int entries, size, i;
5503 ssize_t ret; 5497 ssize_t ret = 0;
5504 5498
5505 mutex_lock(&trace_types_lock); 5499 mutex_lock(&trace_types_lock);
5506 5500
@@ -5538,13 +5532,16 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5538 int r; 5532 int r;
5539 5533
5540 ref = kzalloc(sizeof(*ref), GFP_KERNEL); 5534 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
5541 if (!ref) 5535 if (!ref) {
5536 ret = -ENOMEM;
5542 break; 5537 break;
5538 }
5543 5539
5544 ref->ref = 1; 5540 ref->ref = 1;
5545 ref->buffer = iter->trace_buffer->buffer; 5541 ref->buffer = iter->trace_buffer->buffer;
5546 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file); 5542 ref->page = ring_buffer_alloc_read_page(ref->buffer, iter->cpu_file);
5547 if (!ref->page) { 5543 if (!ref->page) {
5544 ret = -ENOMEM;
5548 kfree(ref); 5545 kfree(ref);
5549 break; 5546 break;
5550 } 5547 }
@@ -5582,19 +5579,19 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos,
5582 5579
5583 /* did we read anything? */ 5580 /* did we read anything? */
5584 if (!spd.nr_pages) { 5581 if (!spd.nr_pages) {
5582 if (ret)
5583 goto out;
5584
5585 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) { 5585 if ((file->f_flags & O_NONBLOCK) || (flags & SPLICE_F_NONBLOCK)) {
5586 ret = -EAGAIN; 5586 ret = -EAGAIN;
5587 goto out; 5587 goto out;
5588 } 5588 }
5589 mutex_unlock(&trace_types_lock); 5589 mutex_unlock(&trace_types_lock);
5590 ret = wait_on_pipe(iter); 5590 ret = wait_on_pipe(iter, true);
5591 mutex_lock(&trace_types_lock); 5591 mutex_lock(&trace_types_lock);
5592 if (ret) 5592 if (ret)
5593 goto out; 5593 goto out;
5594 if (signal_pending(current)) { 5594
5595 ret = -EINTR;
5596 goto out;
5597 }
5598 goto again; 5595 goto again;
5599 } 5596 }
5600 5597
diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c
index 4dc8b79c5f75..29228c4d5696 100644
--- a/kernel/trace/trace_syscalls.c
+++ b/kernel/trace/trace_syscalls.c
@@ -313,7 +313,7 @@ static void ftrace_syscall_enter(void *data, struct pt_regs *regs, long id)
313 int size; 313 int size;
314 314
315 syscall_nr = trace_get_syscall_nr(current, regs); 315 syscall_nr = trace_get_syscall_nr(current, regs);
316 if (syscall_nr < 0) 316 if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
317 return; 317 return;
318 318
319 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */ 319 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE) */
@@ -360,7 +360,7 @@ static void ftrace_syscall_exit(void *data, struct pt_regs *regs, long ret)
360 int syscall_nr; 360 int syscall_nr;
361 361
362 syscall_nr = trace_get_syscall_nr(current, regs); 362 syscall_nr = trace_get_syscall_nr(current, regs);
363 if (syscall_nr < 0) 363 if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
364 return; 364 return;
365 365
366 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */ 366 /* Here we're inside tp handler's rcu_read_lock_sched (__DO_TRACE()) */
@@ -567,7 +567,7 @@ static void perf_syscall_enter(void *ignore, struct pt_regs *regs, long id)
567 int size; 567 int size;
568 568
569 syscall_nr = trace_get_syscall_nr(current, regs); 569 syscall_nr = trace_get_syscall_nr(current, regs);
570 if (syscall_nr < 0) 570 if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
571 return; 571 return;
572 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls)) 572 if (!test_bit(syscall_nr, enabled_perf_enter_syscalls))
573 return; 573 return;
@@ -641,7 +641,7 @@ static void perf_syscall_exit(void *ignore, struct pt_regs *regs, long ret)
641 int size; 641 int size;
642 642
643 syscall_nr = trace_get_syscall_nr(current, regs); 643 syscall_nr = trace_get_syscall_nr(current, regs);
644 if (syscall_nr < 0) 644 if (syscall_nr < 0 || syscall_nr >= NR_syscalls)
645 return; 645 return;
646 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls)) 646 if (!test_bit(syscall_nr, enabled_perf_exit_syscalls))
647 return; 647 return;
diff --git a/lib/Makefile b/lib/Makefile
index 7512dc978f18..0211d2bd5e17 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -10,7 +10,7 @@ endif
10lib-y := ctype.o string.o vsprintf.o cmdline.o \ 10lib-y := ctype.o string.o vsprintf.o cmdline.o \
11 rbtree.o radix-tree.o dump_stack.o timerqueue.o\ 11 rbtree.o radix-tree.o dump_stack.o timerqueue.o\
12 idr.o int_sqrt.o extable.o \ 12 idr.o int_sqrt.o extable.o \
13 sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \ 13 sha1.o md5.o irq_regs.o argv_split.o \
14 proportions.o flex_proportions.o ratelimit.o show_mem.o \ 14 proportions.o flex_proportions.o ratelimit.o show_mem.o \
15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \ 15 is_single_threaded.o plist.o decompress.o kobject_uevent.o \
16 earlycpio.o 16 earlycpio.o
@@ -26,7 +26,7 @@ obj-y += bcd.o div64.o sort.o parser.o halfmd4.o debug_locks.o random32.o \
26 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \ 26 bust_spinlocks.o hexdump.o kasprintf.o bitmap.o scatterlist.o \
27 gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \ 27 gcd.o lcm.o list_sort.o uuid.o flex_array.o iovec.o clz_ctz.o \
28 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \ 28 bsearch.o find_last_bit.o find_next_bit.o llist.o memweight.o kfifo.o \
29 percpu-refcount.o percpu_ida.o hash.o rhashtable.o 29 percpu-refcount.o percpu_ida.o hash.o rhashtable.o reciprocal_div.o
30obj-y += string_helpers.o 30obj-y += string_helpers.o
31obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o 31obj-$(CONFIG_TEST_STRING_HELPERS) += test-string_helpers.o
32obj-y += kstrtox.o 32obj-y += kstrtox.o
diff --git a/lib/bitmap.c b/lib/bitmap.c
index cd250a2e14cb..b499ab6ada29 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -131,7 +131,9 @@ void __bitmap_shift_right(unsigned long *dst,
131 lower = src[off + k]; 131 lower = src[off + k];
132 if (left && off + k == lim - 1) 132 if (left && off + k == lim - 1)
133 lower &= mask; 133 lower &= mask;
134 dst[k] = upper << (BITS_PER_LONG - rem) | lower >> rem; 134 dst[k] = lower >> rem;
135 if (rem)
136 dst[k] |= upper << (BITS_PER_LONG - rem);
135 if (left && k == lim - 1) 137 if (left && k == lim - 1)
136 dst[k] &= mask; 138 dst[k] &= mask;
137 } 139 }
@@ -172,7 +174,9 @@ void __bitmap_shift_left(unsigned long *dst,
172 upper = src[k]; 174 upper = src[k];
173 if (left && k == lim - 1) 175 if (left && k == lim - 1)
174 upper &= (1UL << left) - 1; 176 upper &= (1UL << left) - 1;
175 dst[k + off] = lower >> (BITS_PER_LONG - rem) | upper << rem; 177 dst[k + off] = upper << rem;
178 if (rem)
179 dst[k + off] |= lower >> (BITS_PER_LONG - rem);
176 if (left && k + off == lim - 1) 180 if (left && k + off == lim - 1)
177 dst[k + off] &= (1UL << left) - 1; 181 dst[k + off] &= (1UL << left) - 1;
178 } 182 }
diff --git a/lib/cmdline.c b/lib/cmdline.c
index 76a712e6e20e..8f13cf73c2ec 100644
--- a/lib/cmdline.c
+++ b/lib/cmdline.c
@@ -160,3 +160,32 @@ unsigned long long memparse(const char *ptr, char **retptr)
160 return ret; 160 return ret;
161} 161}
162EXPORT_SYMBOL(memparse); 162EXPORT_SYMBOL(memparse);
163
164/**
165 * parse_option_str - Parse a string and check an option is set or not
166 * @str: String to be parsed
167 * @option: option name
168 *
169 * This function parses a string containing a comma-separated list of
170 * strings like a=b,c.
171 *
172 * Return true if there's such option in the string, or return false.
173 */
174bool parse_option_str(const char *str, const char *option)
175{
176 while (*str) {
177 if (!strncmp(str, option, strlen(option))) {
178 str += strlen(option);
179 if (!*str || *str == ',')
180 return true;
181 }
182
183 while (*str && *str != ',')
184 str++;
185
186 if (*str == ',')
187 str++;
188 }
189
190 return false;
191}
diff --git a/lib/genalloc.c b/lib/genalloc.c
index cce4dd68c40d..2e65d206b01c 100644
--- a/lib/genalloc.c
+++ b/lib/genalloc.c
@@ -598,6 +598,7 @@ struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
598 598
599 return pool; 599 return pool;
600} 600}
601EXPORT_SYMBOL(devm_gen_pool_create);
601 602
602/** 603/**
603 * dev_get_gen_pool - Obtain the gen_pool (if any) for a device 604 * dev_get_gen_pool - Obtain the gen_pool (if any) for a device
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 081be3ba9ea8..624a0b7c05ef 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -230,7 +230,7 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
230 ht->shift++; 230 ht->shift++;
231 231
232 /* For each new bucket, search the corresponding old bucket 232 /* For each new bucket, search the corresponding old bucket
233 * for the rst entry that hashes to the new bucket, and 233 * for the first entry that hashes to the new bucket, and
234 * link the new bucket to that entry. Since all the entries 234 * link the new bucket to that entry. Since all the entries
235 * which will end up in the new bucket appear in the same 235 * which will end up in the new bucket appear in the same
236 * old bucket, this constructs an entirely valid new hash 236 * old bucket, this constructs an entirely valid new hash
@@ -248,8 +248,8 @@ int rhashtable_expand(struct rhashtable *ht, gfp_t flags)
248 } 248 }
249 249
250 /* Publish the new table pointer. Lookups may now traverse 250 /* Publish the new table pointer. Lookups may now traverse
251 * the new table, but they will not benet from any 251 * the new table, but they will not benefit from any
252 * additional efciency until later steps unzip the buckets. 252 * additional efficiency until later steps unzip the buckets.
253 */ 253 */
254 rcu_assign_pointer(ht->tbl, new_tbl); 254 rcu_assign_pointer(ht->tbl, new_tbl);
255 255
@@ -306,14 +306,14 @@ int rhashtable_shrink(struct rhashtable *ht, gfp_t flags)
306 306
307 ht->shift--; 307 ht->shift--;
308 308
309 /* Link each bucket in the new table to the rst bucket 309 /* Link each bucket in the new table to the first bucket
310 * in the old table that contains entries which will hash 310 * in the old table that contains entries which will hash
311 * to the new bucket. 311 * to the new bucket.
312 */ 312 */
313 for (i = 0; i < ntbl->size; i++) { 313 for (i = 0; i < ntbl->size; i++) {
314 ntbl->buckets[i] = tbl->buckets[i]; 314 ntbl->buckets[i] = tbl->buckets[i];
315 315
316 /* Link each bucket in the new table to the rst bucket 316 /* Link each bucket in the new table to the first bucket
317 * in the old table that contains entries which will hash 317 * in the old table that contains entries which will hash
318 * to the new bucket. 318 * to the new bucket.
319 */ 319 */
diff --git a/lib/scatterlist.c b/lib/scatterlist.c
index 9cdf62f8accd..c9f2e8c6ccc9 100644
--- a/lib/scatterlist.c
+++ b/lib/scatterlist.c
@@ -203,10 +203,10 @@ void __sg_free_table(struct sg_table *table, unsigned int max_ents,
203 } 203 }
204 204
205 table->orig_nents -= sg_size; 205 table->orig_nents -= sg_size;
206 if (!skip_first_chunk) { 206 if (skip_first_chunk)
207 free_fn(sgl, alloc_size);
208 skip_first_chunk = false; 207 skip_first_chunk = false;
209 } 208 else
209 free_fn(sgl, alloc_size);
210 sgl = next; 210 sgl = next;
211 } 211 }
212 212
diff --git a/lib/show_mem.c b/lib/show_mem.c
index 09225796991a..5e256271b47b 100644
--- a/lib/show_mem.c
+++ b/lib/show_mem.c
@@ -28,7 +28,7 @@ void show_mem(unsigned int filter)
28 continue; 28 continue;
29 29
30 total += zone->present_pages; 30 total += zone->present_pages;
31 reserved = zone->present_pages - zone->managed_pages; 31 reserved += zone->present_pages - zone->managed_pages;
32 32
33 if (is_highmem_idx(zoneid)) 33 if (is_highmem_idx(zoneid))
34 highmem += zone->present_pages; 34 highmem += zone->present_pages;
diff --git a/lib/string.c b/lib/string.c
index 2fc20aa06f84..10063300b830 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -598,6 +598,22 @@ void *memset(void *s, int c, size_t count)
598EXPORT_SYMBOL(memset); 598EXPORT_SYMBOL(memset);
599#endif 599#endif
600 600
601/**
602 * memzero_explicit - Fill a region of memory (e.g. sensitive
603 * keying data) with 0s.
604 * @s: Pointer to the start of the area.
605 * @count: The size of the area.
606 *
607 * memzero_explicit() doesn't need an arch-specific version as
608 * it just invokes the one of memset() implicitly.
609 */
610void memzero_explicit(void *s, size_t count)
611{
612 memset(s, 0, count);
613 OPTIMIZER_HIDE_VAR(s);
614}
615EXPORT_SYMBOL(memzero_explicit);
616
601#ifndef __HAVE_ARCH_MEMCPY 617#ifndef __HAVE_ARCH_MEMCPY
602/** 618/**
603 * memcpy - Copy one area of memory to another 619 * memcpy - Copy one area of memory to another
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
index b3cbe19f71b5..fcad8322ef36 100644
--- a/mm/balloon_compaction.c
+++ b/mm/balloon_compaction.c
@@ -68,11 +68,13 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
68 * to be released by the balloon driver. 68 * to be released by the balloon driver.
69 */ 69 */
70 if (trylock_page(page)) { 70 if (trylock_page(page)) {
71#ifdef CONFIG_BALLOON_COMPACTION
71 if (!PagePrivate(page)) { 72 if (!PagePrivate(page)) {
72 /* raced with isolation */ 73 /* raced with isolation */
73 unlock_page(page); 74 unlock_page(page);
74 continue; 75 continue;
75 } 76 }
77#endif
76 spin_lock_irqsave(&b_dev_info->pages_lock, flags); 78 spin_lock_irqsave(&b_dev_info->pages_lock, flags);
77 balloon_page_delete(page); 79 balloon_page_delete(page);
78 __count_vm_event(BALLOON_DEFLATE); 80 __count_vm_event(BALLOON_DEFLATE);
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 8a000cebb0d7..477be696511d 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -243,13 +243,10 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
243 243
244static int reset_managed_pages_done __initdata; 244static int reset_managed_pages_done __initdata;
245 245
246static inline void __init reset_node_managed_pages(pg_data_t *pgdat) 246void reset_node_managed_pages(pg_data_t *pgdat)
247{ 247{
248 struct zone *z; 248 struct zone *z;
249 249
250 if (reset_managed_pages_done)
251 return;
252
253 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) 250 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
254 z->managed_pages = 0; 251 z->managed_pages = 0;
255} 252}
@@ -258,8 +255,12 @@ void __init reset_all_zones_managed_pages(void)
258{ 255{
259 struct pglist_data *pgdat; 256 struct pglist_data *pgdat;
260 257
258 if (reset_managed_pages_done)
259 return;
260
261 for_each_online_pgdat(pgdat) 261 for_each_online_pgdat(pgdat)
262 reset_node_managed_pages(pgdat); 262 reset_node_managed_pages(pgdat);
263
263 reset_managed_pages_done = 1; 264 reset_managed_pages_done = 1;
264} 265}
265 266
diff --git a/mm/cma.c b/mm/cma.c
index 963bc4add9af..fde706e1284f 100644
--- a/mm/cma.c
+++ b/mm/cma.c
@@ -124,6 +124,7 @@ static int __init cma_activate_area(struct cma *cma)
124 124
125err: 125err:
126 kfree(cma->bitmap); 126 kfree(cma->bitmap);
127 cma->count = 0;
127 return -EINVAL; 128 return -EINVAL;
128} 129}
129 130
@@ -217,9 +218,8 @@ int __init cma_declare_contiguous(phys_addr_t base,
217 phys_addr_t highmem_start = __pa(high_memory); 218 phys_addr_t highmem_start = __pa(high_memory);
218 int ret = 0; 219 int ret = 0;
219 220
220 pr_debug("%s(size %lx, base %08lx, limit %08lx alignment %08lx)\n", 221 pr_debug("%s(size %pa, base %pa, limit %pa alignment %pa)\n",
221 __func__, (unsigned long)size, (unsigned long)base, 222 __func__, &size, &base, &limit, &alignment);
222 (unsigned long)limit, (unsigned long)alignment);
223 223
224 if (cma_area_count == ARRAY_SIZE(cma_areas)) { 224 if (cma_area_count == ARRAY_SIZE(cma_areas)) {
225 pr_err("Not enough slots for CMA reserved regions!\n"); 225 pr_err("Not enough slots for CMA reserved regions!\n");
@@ -244,52 +244,72 @@ int __init cma_declare_contiguous(phys_addr_t base,
244 size = ALIGN(size, alignment); 244 size = ALIGN(size, alignment);
245 limit &= ~(alignment - 1); 245 limit &= ~(alignment - 1);
246 246
247 if (!base)
248 fixed = false;
249
247 /* size should be aligned with order_per_bit */ 250 /* size should be aligned with order_per_bit */
248 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit)) 251 if (!IS_ALIGNED(size >> PAGE_SHIFT, 1 << order_per_bit))
249 return -EINVAL; 252 return -EINVAL;
250 253
251 /* 254 /*
252 * adjust limit to avoid crossing low/high memory boundary for 255 * If allocating at a fixed base the request region must not cross the
253 * automatically allocated regions 256 * low/high memory boundary.
254 */ 257 */
255 if (((limit == 0 || limit > memblock_end) && 258 if (fixed && base < highmem_start && base + size > highmem_start) {
256 (memblock_end - size < highmem_start &&
257 memblock_end > highmem_start)) ||
258 (!fixed && limit > highmem_start && limit - size < highmem_start)) {
259 limit = highmem_start;
260 }
261
262 if (fixed && base < highmem_start && base+size > highmem_start) {
263 ret = -EINVAL; 259 ret = -EINVAL;
264 pr_err("Region at %08lx defined on low/high memory boundary (%08lx)\n", 260 pr_err("Region at %pa defined on low/high memory boundary (%pa)\n",
265 (unsigned long)base, (unsigned long)highmem_start); 261 &base, &highmem_start);
266 goto err; 262 goto err;
267 } 263 }
268 264
265 /*
266 * If the limit is unspecified or above the memblock end, its effective
267 * value will be the memblock end. Set it explicitly to simplify further
268 * checks.
269 */
270 if (limit == 0 || limit > memblock_end)
271 limit = memblock_end;
272
269 /* Reserve memory */ 273 /* Reserve memory */
270 if (base && fixed) { 274 if (fixed) {
271 if (memblock_is_region_reserved(base, size) || 275 if (memblock_is_region_reserved(base, size) ||
272 memblock_reserve(base, size) < 0) { 276 memblock_reserve(base, size) < 0) {
273 ret = -EBUSY; 277 ret = -EBUSY;
274 goto err; 278 goto err;
275 } 279 }
276 } else { 280 } else {
277 phys_addr_t addr = memblock_alloc_range(size, alignment, base, 281 phys_addr_t addr = 0;
278 limit); 282
283 /*
284 * All pages in the reserved area must come from the same zone.
285 * If the requested region crosses the low/high memory boundary,
286 * try allocating from high memory first and fall back to low
287 * memory in case of failure.
288 */
289 if (base < highmem_start && limit > highmem_start) {
290 addr = memblock_alloc_range(size, alignment,
291 highmem_start, limit);
292 limit = highmem_start;
293 }
294
279 if (!addr) { 295 if (!addr) {
280 ret = -ENOMEM; 296 addr = memblock_alloc_range(size, alignment, base,
281 goto err; 297 limit);
282 } else { 298 if (!addr) {
283 base = addr; 299 ret = -ENOMEM;
300 goto err;
301 }
284 } 302 }
303
304 base = addr;
285 } 305 }
286 306
287 ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma); 307 ret = cma_init_reserved_mem(base, size, order_per_bit, res_cma);
288 if (ret) 308 if (ret)
289 goto err; 309 goto err;
290 310
291 pr_info("Reserved %ld MiB at %08lx\n", (unsigned long)size / SZ_1M, 311 pr_info("Reserved %ld MiB at %pa\n", (unsigned long)size / SZ_1M,
292 (unsigned long)base); 312 &base);
293 return 0; 313 return 0;
294 314
295err: 315err:
diff --git a/mm/compaction.c b/mm/compaction.c
index edba18aed173..f9792ba3537c 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -479,6 +479,16 @@ isolate_freepages_range(struct compact_control *cc,
479 479
480 block_end_pfn = min(block_end_pfn, end_pfn); 480 block_end_pfn = min(block_end_pfn, end_pfn);
481 481
482 /*
483 * pfn could pass the block_end_pfn if isolated freepage
484 * is more than pageblock order. In this case, we adjust
485 * scanning range to right one.
486 */
487 if (pfn >= block_end_pfn) {
488 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
489 block_end_pfn = min(block_end_pfn, end_pfn);
490 }
491
482 if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone)) 492 if (!pageblock_pfn_to_page(pfn, block_end_pfn, cc->zone))
483 break; 493 break;
484 494
@@ -784,6 +794,9 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
784 cc->nr_migratepages = 0; 794 cc->nr_migratepages = 0;
785 break; 795 break;
786 } 796 }
797
798 if (cc->nr_migratepages == COMPACT_CLUSTER_MAX)
799 break;
787 } 800 }
788 acct_isolated(cc->zone, cc); 801 acct_isolated(cc->zone, cc);
789 802
@@ -1026,8 +1039,12 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
1026 } 1039 }
1027 1040
1028 acct_isolated(zone, cc); 1041 acct_isolated(zone, cc);
1029 /* Record where migration scanner will be restarted */ 1042 /*
1030 cc->migrate_pfn = low_pfn; 1043 * Record where migration scanner will be restarted. If we end up in
1044 * the same pageblock as the free scanner, make the scanners fully
1045 * meet so that compact_finished() terminates compaction.
1046 */
1047 cc->migrate_pfn = (end_pfn <= cc->free_pfn) ? low_pfn : cc->free_pfn;
1031 1048
1032 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1049 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
1033} 1050}
diff --git a/mm/frontswap.c b/mm/frontswap.c
index c30eec536f03..f2a3571c6e22 100644
--- a/mm/frontswap.c
+++ b/mm/frontswap.c
@@ -244,8 +244,10 @@ int __frontswap_store(struct page *page)
244 the (older) page from frontswap 244 the (older) page from frontswap
245 */ 245 */
246 inc_frontswap_failed_stores(); 246 inc_frontswap_failed_stores();
247 if (dup) 247 if (dup) {
248 __frontswap_clear(sis, offset); 248 __frontswap_clear(sis, offset);
249 frontswap_ops->invalidate_page(type, offset);
250 }
249 } 251 }
250 if (frontswap_writethrough_enabled) 252 if (frontswap_writethrough_enabled)
251 /* report failure so swap also writes to swap device */ 253 /* report failure so swap also writes to swap device */
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 74c78aa8bc2f..de984159cf0b 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -200,7 +200,7 @@ retry:
200 preempt_disable(); 200 preempt_disable();
201 if (cmpxchg(&huge_zero_page, NULL, zero_page)) { 201 if (cmpxchg(&huge_zero_page, NULL, zero_page)) {
202 preempt_enable(); 202 preempt_enable();
203 __free_page(zero_page); 203 __free_pages(zero_page, compound_order(zero_page));
204 goto retry; 204 goto retry;
205 } 205 }
206 206
@@ -232,7 +232,7 @@ static unsigned long shrink_huge_zero_page_scan(struct shrinker *shrink,
232 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) { 232 if (atomic_cmpxchg(&huge_zero_refcount, 1, 0) == 1) {
233 struct page *zero_page = xchg(&huge_zero_page, NULL); 233 struct page *zero_page = xchg(&huge_zero_page, NULL);
234 BUG_ON(zero_page == NULL); 234 BUG_ON(zero_page == NULL);
235 __free_page(zero_page); 235 __free_pages(zero_page, compound_order(zero_page));
236 return HPAGE_PMD_NR; 236 return HPAGE_PMD_NR;
237 } 237 }
238 238
@@ -803,7 +803,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
803 return VM_FAULT_FALLBACK; 803 return VM_FAULT_FALLBACK;
804 if (unlikely(anon_vma_prepare(vma))) 804 if (unlikely(anon_vma_prepare(vma)))
805 return VM_FAULT_OOM; 805 return VM_FAULT_OOM;
806 if (unlikely(khugepaged_enter(vma))) 806 if (unlikely(khugepaged_enter(vma, vma->vm_flags)))
807 return VM_FAULT_OOM; 807 return VM_FAULT_OOM;
808 if (!(flags & FAULT_FLAG_WRITE) && 808 if (!(flags & FAULT_FLAG_WRITE) &&
809 transparent_hugepage_use_zero_page()) { 809 transparent_hugepage_use_zero_page()) {
@@ -1970,7 +1970,7 @@ int hugepage_madvise(struct vm_area_struct *vma,
1970 * register it here without waiting a page fault that 1970 * register it here without waiting a page fault that
1971 * may not happen any time soon. 1971 * may not happen any time soon.
1972 */ 1972 */
1973 if (unlikely(khugepaged_enter_vma_merge(vma))) 1973 if (unlikely(khugepaged_enter_vma_merge(vma, *vm_flags)))
1974 return -ENOMEM; 1974 return -ENOMEM;
1975 break; 1975 break;
1976 case MADV_NOHUGEPAGE: 1976 case MADV_NOHUGEPAGE:
@@ -2071,7 +2071,8 @@ int __khugepaged_enter(struct mm_struct *mm)
2071 return 0; 2071 return 0;
2072} 2072}
2073 2073
2074int khugepaged_enter_vma_merge(struct vm_area_struct *vma) 2074int khugepaged_enter_vma_merge(struct vm_area_struct *vma,
2075 unsigned long vm_flags)
2075{ 2076{
2076 unsigned long hstart, hend; 2077 unsigned long hstart, hend;
2077 if (!vma->anon_vma) 2078 if (!vma->anon_vma)
@@ -2083,11 +2084,11 @@ int khugepaged_enter_vma_merge(struct vm_area_struct *vma)
2083 if (vma->vm_ops) 2084 if (vma->vm_ops)
2084 /* khugepaged not yet working on file or special mappings */ 2085 /* khugepaged not yet working on file or special mappings */
2085 return 0; 2086 return 0;
2086 VM_BUG_ON_VMA(vma->vm_flags & VM_NO_THP, vma); 2087 VM_BUG_ON_VMA(vm_flags & VM_NO_THP, vma);
2087 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; 2088 hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK;
2088 hend = vma->vm_end & HPAGE_PMD_MASK; 2089 hend = vma->vm_end & HPAGE_PMD_MASK;
2089 if (hstart < hend) 2090 if (hstart < hend)
2090 return khugepaged_enter(vma); 2091 return khugepaged_enter(vma, vm_flags);
2091 return 0; 2092 return 0;
2092} 2093}
2093 2094
diff --git a/mm/internal.h b/mm/internal.h
index 829304090b90..a4f90ba7068e 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -108,6 +108,31 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
108/* 108/*
109 * in mm/page_alloc.c 109 * in mm/page_alloc.c
110 */ 110 */
111
112/*
113 * Locate the struct page for both the matching buddy in our
114 * pair (buddy1) and the combined O(n+1) page they form (page).
115 *
116 * 1) Any buddy B1 will have an order O twin B2 which satisfies
117 * the following equation:
118 * B2 = B1 ^ (1 << O)
119 * For example, if the starting buddy (buddy2) is #8 its order
120 * 1 buddy is #10:
121 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
122 *
123 * 2) Any buddy B will have an order O+1 parent P which
124 * satisfies the following equation:
125 * P = B & ~(1 << O)
126 *
127 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
128 */
129static inline unsigned long
130__find_buddy_index(unsigned long page_idx, unsigned int order)
131{
132 return page_idx ^ (1 << order);
133}
134
135extern int __isolate_free_page(struct page *page, unsigned int order);
111extern void __free_pages_bootmem(struct page *page, unsigned int order); 136extern void __free_pages_bootmem(struct page *page, unsigned int order);
112extern void prep_compound_page(struct page *page, unsigned long order); 137extern void prep_compound_page(struct page *page, unsigned long order);
113#ifdef CONFIG_MEMORY_FAILURE 138#ifdef CONFIG_MEMORY_FAILURE
diff --git a/mm/iov_iter.c b/mm/iov_iter.c
index eafcf60f6b83..e34a3cb6aad6 100644
--- a/mm/iov_iter.c
+++ b/mm/iov_iter.c
@@ -911,9 +911,9 @@ size_t iov_iter_single_seg_count(const struct iov_iter *i)
911 if (i->nr_segs == 1) 911 if (i->nr_segs == 1)
912 return i->count; 912 return i->count;
913 else if (i->type & ITER_BVEC) 913 else if (i->type & ITER_BVEC)
914 return min(i->count, i->iov->iov_len - i->iov_offset);
915 else
916 return min(i->count, i->bvec->bv_len - i->iov_offset); 914 return min(i->count, i->bvec->bv_len - i->iov_offset);
915 else
916 return min(i->count, i->iov->iov_len - i->iov_offset);
917} 917}
918EXPORT_SYMBOL(iov_iter_single_seg_count); 918EXPORT_SYMBOL(iov_iter_single_seg_count);
919 919
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index 23976fd885fd..d6ac0e33e150 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1536,12 +1536,8 @@ int mem_cgroup_swappiness(struct mem_cgroup *memcg)
1536 * start move here. 1536 * start move here.
1537 */ 1537 */
1538 1538
1539/* for quick checking without looking up memcg */
1540atomic_t memcg_moving __read_mostly;
1541
1542static void mem_cgroup_start_move(struct mem_cgroup *memcg) 1539static void mem_cgroup_start_move(struct mem_cgroup *memcg)
1543{ 1540{
1544 atomic_inc(&memcg_moving);
1545 atomic_inc(&memcg->moving_account); 1541 atomic_inc(&memcg->moving_account);
1546 synchronize_rcu(); 1542 synchronize_rcu();
1547} 1543}
@@ -1552,10 +1548,8 @@ static void mem_cgroup_end_move(struct mem_cgroup *memcg)
1552 * Now, mem_cgroup_clear_mc() may call this function with NULL. 1548 * Now, mem_cgroup_clear_mc() may call this function with NULL.
1553 * We check NULL in callee rather than caller. 1549 * We check NULL in callee rather than caller.
1554 */ 1550 */
1555 if (memcg) { 1551 if (memcg)
1556 atomic_dec(&memcg_moving);
1557 atomic_dec(&memcg->moving_account); 1552 atomic_dec(&memcg->moving_account);
1558 }
1559} 1553}
1560 1554
1561/* 1555/*
@@ -2204,41 +2198,52 @@ cleanup:
2204 return true; 2198 return true;
2205} 2199}
2206 2200
2207/* 2201/**
2208 * Used to update mapped file or writeback or other statistics. 2202 * mem_cgroup_begin_page_stat - begin a page state statistics transaction
2203 * @page: page that is going to change accounted state
2204 * @locked: &memcg->move_lock slowpath was taken
2205 * @flags: IRQ-state flags for &memcg->move_lock
2209 * 2206 *
2210 * Notes: Race condition 2207 * This function must mark the beginning of an accounted page state
2208 * change to prevent double accounting when the page is concurrently
2209 * being moved to another memcg:
2211 * 2210 *
2212 * Charging occurs during page instantiation, while the page is 2211 * memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
2213 * unmapped and locked in page migration, or while the page table is 2212 * if (TestClearPageState(page))
2214 * locked in THP migration. No race is possible. 2213 * mem_cgroup_update_page_stat(memcg, state, -1);
2214 * mem_cgroup_end_page_stat(memcg, locked, flags);
2215 * 2215 *
2216 * Uncharge happens to pages with zero references, no race possible. 2216 * The RCU lock is held throughout the transaction. The fast path can
2217 * get away without acquiring the memcg->move_lock (@locked is false)
2218 * because page moving starts with an RCU grace period.
2217 * 2219 *
2218 * Charge moving between groups is protected by checking mm->moving 2220 * The RCU lock also protects the memcg from being freed when the page
2219 * account and taking the move_lock in the slowpath. 2221 * state that is going to change is the only thing preventing the page
2222 * from being uncharged. E.g. end-writeback clearing PageWriteback(),
2223 * which allows migration to go ahead and uncharge the page before the
2224 * account transaction might be complete.
2220 */ 2225 */
2221 2226struct mem_cgroup *mem_cgroup_begin_page_stat(struct page *page,
2222void __mem_cgroup_begin_update_page_stat(struct page *page, 2227 bool *locked,
2223 bool *locked, unsigned long *flags) 2228 unsigned long *flags)
2224{ 2229{
2225 struct mem_cgroup *memcg; 2230 struct mem_cgroup *memcg;
2226 struct page_cgroup *pc; 2231 struct page_cgroup *pc;
2227 2232
2233 rcu_read_lock();
2234
2235 if (mem_cgroup_disabled())
2236 return NULL;
2237
2228 pc = lookup_page_cgroup(page); 2238 pc = lookup_page_cgroup(page);
2229again: 2239again:
2230 memcg = pc->mem_cgroup; 2240 memcg = pc->mem_cgroup;
2231 if (unlikely(!memcg || !PageCgroupUsed(pc))) 2241 if (unlikely(!memcg || !PageCgroupUsed(pc)))
2232 return; 2242 return NULL;
2233 /* 2243
2234 * If this memory cgroup is not under account moving, we don't 2244 *locked = false;
2235 * need to take move_lock_mem_cgroup(). Because we already hold
2236 * rcu_read_lock(), any calls to move_account will be delayed until
2237 * rcu_read_unlock().
2238 */
2239 VM_BUG_ON(!rcu_read_lock_held());
2240 if (atomic_read(&memcg->moving_account) <= 0) 2245 if (atomic_read(&memcg->moving_account) <= 0)
2241 return; 2246 return memcg;
2242 2247
2243 move_lock_mem_cgroup(memcg, flags); 2248 move_lock_mem_cgroup(memcg, flags);
2244 if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) { 2249 if (memcg != pc->mem_cgroup || !PageCgroupUsed(pc)) {
@@ -2246,36 +2251,40 @@ again:
2246 goto again; 2251 goto again;
2247 } 2252 }
2248 *locked = true; 2253 *locked = true;
2254
2255 return memcg;
2249} 2256}
2250 2257
2251void __mem_cgroup_end_update_page_stat(struct page *page, unsigned long *flags) 2258/**
2259 * mem_cgroup_end_page_stat - finish a page state statistics transaction
2260 * @memcg: the memcg that was accounted against
2261 * @locked: value received from mem_cgroup_begin_page_stat()
2262 * @flags: value received from mem_cgroup_begin_page_stat()
2263 */
2264void mem_cgroup_end_page_stat(struct mem_cgroup *memcg, bool locked,
2265 unsigned long flags)
2252{ 2266{
2253 struct page_cgroup *pc = lookup_page_cgroup(page); 2267 if (memcg && locked)
2268 move_unlock_mem_cgroup(memcg, &flags);
2254 2269
2255 /* 2270 rcu_read_unlock();
2256 * It's guaranteed that pc->mem_cgroup never changes while
2257 * lock is held because a routine modifies pc->mem_cgroup
2258 * should take move_lock_mem_cgroup().
2259 */
2260 move_unlock_mem_cgroup(pc->mem_cgroup, flags);
2261} 2271}
2262 2272
2263void mem_cgroup_update_page_stat(struct page *page, 2273/**
2274 * mem_cgroup_update_page_stat - update page state statistics
2275 * @memcg: memcg to account against
2276 * @idx: page state item to account
2277 * @val: number of pages (positive or negative)
2278 *
2279 * See mem_cgroup_begin_page_stat() for locking requirements.
2280 */
2281void mem_cgroup_update_page_stat(struct mem_cgroup *memcg,
2264 enum mem_cgroup_stat_index idx, int val) 2282 enum mem_cgroup_stat_index idx, int val)
2265{ 2283{
2266 struct mem_cgroup *memcg;
2267 struct page_cgroup *pc = lookup_page_cgroup(page);
2268 unsigned long uninitialized_var(flags);
2269
2270 if (mem_cgroup_disabled())
2271 return;
2272
2273 VM_BUG_ON(!rcu_read_lock_held()); 2284 VM_BUG_ON(!rcu_read_lock_held());
2274 memcg = pc->mem_cgroup;
2275 if (unlikely(!memcg || !PageCgroupUsed(pc)))
2276 return;
2277 2285
2278 this_cpu_add(memcg->stat->count[idx], val); 2286 if (memcg)
2287 this_cpu_add(memcg->stat->count[idx], val);
2279} 2288}
2280 2289
2281/* 2290/*
diff --git a/mm/memory.c b/mm/memory.c
index 1cc6bfbd872e..d5f2ae9c4a23 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -815,20 +815,20 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm,
815 if (!pte_file(pte)) { 815 if (!pte_file(pte)) {
816 swp_entry_t entry = pte_to_swp_entry(pte); 816 swp_entry_t entry = pte_to_swp_entry(pte);
817 817
818 if (swap_duplicate(entry) < 0) 818 if (likely(!non_swap_entry(entry))) {
819 return entry.val; 819 if (swap_duplicate(entry) < 0)
820 820 return entry.val;
821 /* make sure dst_mm is on swapoff's mmlist. */ 821
822 if (unlikely(list_empty(&dst_mm->mmlist))) { 822 /* make sure dst_mm is on swapoff's mmlist. */
823 spin_lock(&mmlist_lock); 823 if (unlikely(list_empty(&dst_mm->mmlist))) {
824 if (list_empty(&dst_mm->mmlist)) 824 spin_lock(&mmlist_lock);
825 list_add(&dst_mm->mmlist, 825 if (list_empty(&dst_mm->mmlist))
826 &src_mm->mmlist); 826 list_add(&dst_mm->mmlist,
827 spin_unlock(&mmlist_lock); 827 &src_mm->mmlist);
828 } 828 spin_unlock(&mmlist_lock);
829 if (likely(!non_swap_entry(entry))) 829 }
830 rss[MM_SWAPENTS]++; 830 rss[MM_SWAPENTS]++;
831 else if (is_migration_entry(entry)) { 831 } else if (is_migration_entry(entry)) {
832 page = migration_entry_to_page(entry); 832 page = migration_entry_to_page(entry);
833 833
834 if (PageAnon(page)) 834 if (PageAnon(page))
@@ -1147,6 +1147,7 @@ again:
1147 print_bad_pte(vma, addr, ptent, page); 1147 print_bad_pte(vma, addr, ptent, page);
1148 if (unlikely(!__tlb_remove_page(tlb, page))) { 1148 if (unlikely(!__tlb_remove_page(tlb, page))) {
1149 force_flush = 1; 1149 force_flush = 1;
1150 addr += PAGE_SIZE;
1150 break; 1151 break;
1151 } 1152 }
1152 continue; 1153 continue;
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 29d8693d0c61..1bf4807cb21e 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -31,6 +31,7 @@
31#include <linux/stop_machine.h> 31#include <linux/stop_machine.h>
32#include <linux/hugetlb.h> 32#include <linux/hugetlb.h>
33#include <linux/memblock.h> 33#include <linux/memblock.h>
34#include <linux/bootmem.h>
34 35
35#include <asm/tlbflush.h> 36#include <asm/tlbflush.h>
36 37
@@ -1066,6 +1067,16 @@ out:
1066} 1067}
1067#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */ 1068#endif /* CONFIG_MEMORY_HOTPLUG_SPARSE */
1068 1069
1070static void reset_node_present_pages(pg_data_t *pgdat)
1071{
1072 struct zone *z;
1073
1074 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
1075 z->present_pages = 0;
1076
1077 pgdat->node_present_pages = 0;
1078}
1079
1069/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */ 1080/* we are OK calling __meminit stuff here - we have CONFIG_MEMORY_HOTPLUG */
1070static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start) 1081static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
1071{ 1082{
@@ -1096,6 +1107,21 @@ static pg_data_t __ref *hotadd_new_pgdat(int nid, u64 start)
1096 build_all_zonelists(pgdat, NULL); 1107 build_all_zonelists(pgdat, NULL);
1097 mutex_unlock(&zonelists_mutex); 1108 mutex_unlock(&zonelists_mutex);
1098 1109
1110 /*
1111 * zone->managed_pages is set to an approximate value in
1112 * free_area_init_core(), which will cause
1113 * /sys/device/system/node/nodeX/meminfo has wrong data.
1114 * So reset it to 0 before any memory is onlined.
1115 */
1116 reset_node_managed_pages(pgdat);
1117
1118 /*
1119 * When memory is hot-added, all the memory is in offline state. So
1120 * clear all zones' present_pages because they will be updated in
1121 * online_pages() and offline_pages().
1122 */
1123 reset_node_present_pages(pgdat);
1124
1099 return pgdat; 1125 return pgdat;
1100} 1126}
1101 1127
@@ -1912,7 +1938,6 @@ void try_offline_node(int nid)
1912 unsigned long start_pfn = pgdat->node_start_pfn; 1938 unsigned long start_pfn = pgdat->node_start_pfn;
1913 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages; 1939 unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1914 unsigned long pfn; 1940 unsigned long pfn;
1915 struct page *pgdat_page = virt_to_page(pgdat);
1916 int i; 1941 int i;
1917 1942
1918 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { 1943 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
@@ -1941,10 +1966,6 @@ void try_offline_node(int nid)
1941 node_set_offline(nid); 1966 node_set_offline(nid);
1942 unregister_one_node(nid); 1967 unregister_one_node(nid);
1943 1968
1944 if (!PageSlab(pgdat_page) && !PageCompound(pgdat_page))
1945 /* node data is allocated from boot memory */
1946 return;
1947
1948 /* free waittable in each zone */ 1969 /* free waittable in each zone */
1949 for (i = 0; i < MAX_NR_ZONES; i++) { 1970 for (i = 0; i < MAX_NR_ZONES; i++) {
1950 struct zone *zone = pgdat->node_zones + i; 1971 struct zone *zone = pgdat->node_zones + i;
diff --git a/mm/mmap.c b/mm/mmap.c
index 7f855206e7fb..ae919891a087 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -776,8 +776,11 @@ again: remove_next = 1 + (end > next->vm_end);
776 * shrinking vma had, to cover any anon pages imported. 776 * shrinking vma had, to cover any anon pages imported.
777 */ 777 */
778 if (exporter && exporter->anon_vma && !importer->anon_vma) { 778 if (exporter && exporter->anon_vma && !importer->anon_vma) {
779 if (anon_vma_clone(importer, exporter)) 779 int error;
780 return -ENOMEM; 780
781 error = anon_vma_clone(importer, exporter);
782 if (error)
783 return error;
781 importer->anon_vma = exporter->anon_vma; 784 importer->anon_vma = exporter->anon_vma;
782 } 785 }
783 } 786 }
@@ -1080,7 +1083,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
1080 end, prev->vm_pgoff, NULL); 1083 end, prev->vm_pgoff, NULL);
1081 if (err) 1084 if (err)
1082 return NULL; 1085 return NULL;
1083 khugepaged_enter_vma_merge(prev); 1086 khugepaged_enter_vma_merge(prev, vm_flags);
1084 return prev; 1087 return prev;
1085 } 1088 }
1086 1089
@@ -1099,7 +1102,7 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
1099 next->vm_pgoff - pglen, NULL); 1102 next->vm_pgoff - pglen, NULL);
1100 if (err) 1103 if (err)
1101 return NULL; 1104 return NULL;
1102 khugepaged_enter_vma_merge(area); 1105 khugepaged_enter_vma_merge(area, vm_flags);
1103 return area; 1106 return area;
1104 } 1107 }
1105 1108
@@ -2208,7 +2211,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2208 } 2211 }
2209 } 2212 }
2210 vma_unlock_anon_vma(vma); 2213 vma_unlock_anon_vma(vma);
2211 khugepaged_enter_vma_merge(vma); 2214 khugepaged_enter_vma_merge(vma, vma->vm_flags);
2212 validate_mm(vma->vm_mm); 2215 validate_mm(vma->vm_mm);
2213 return error; 2216 return error;
2214} 2217}
@@ -2277,7 +2280,7 @@ int expand_downwards(struct vm_area_struct *vma,
2277 } 2280 }
2278 } 2281 }
2279 vma_unlock_anon_vma(vma); 2282 vma_unlock_anon_vma(vma);
2280 khugepaged_enter_vma_merge(vma); 2283 khugepaged_enter_vma_merge(vma, vma->vm_flags);
2281 validate_mm(vma->vm_mm); 2284 validate_mm(vma->vm_mm);
2282 return error; 2285 return error;
2283} 2286}
@@ -2469,7 +2472,8 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2469 if (err) 2472 if (err)
2470 goto out_free_vma; 2473 goto out_free_vma;
2471 2474
2472 if (anon_vma_clone(new, vma)) 2475 err = anon_vma_clone(new, vma);
2476 if (err)
2473 goto out_free_mpol; 2477 goto out_free_mpol;
2474 2478
2475 if (new->vm_file) 2479 if (new->vm_file)
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 7c7ab32ee503..90b50468333e 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -145,12 +145,10 @@ static unsigned long __init free_low_memory_core_early(void)
145 145
146static int reset_managed_pages_done __initdata; 146static int reset_managed_pages_done __initdata;
147 147
148static inline void __init reset_node_managed_pages(pg_data_t *pgdat) 148void reset_node_managed_pages(pg_data_t *pgdat)
149{ 149{
150 struct zone *z; 150 struct zone *z;
151 151
152 if (reset_managed_pages_done)
153 return;
154 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++) 152 for (z = pgdat->node_zones; z < pgdat->node_zones + MAX_NR_ZONES; z++)
155 z->managed_pages = 0; 153 z->managed_pages = 0;
156} 154}
@@ -159,8 +157,12 @@ void __init reset_all_zones_managed_pages(void)
159{ 157{
160 struct pglist_data *pgdat; 158 struct pglist_data *pgdat;
161 159
160 if (reset_managed_pages_done)
161 return;
162
162 for_each_online_pgdat(pgdat) 163 for_each_online_pgdat(pgdat)
163 reset_node_managed_pages(pgdat); 164 reset_node_managed_pages(pgdat);
165
164 reset_managed_pages_done = 1; 166 reset_managed_pages_done = 1;
165} 167}
166 168
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index bbf405a3a18f..5340f6b91312 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -404,6 +404,23 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
404 dump_tasks(memcg, nodemask); 404 dump_tasks(memcg, nodemask);
405} 405}
406 406
407/*
408 * Number of OOM killer invocations (including memcg OOM killer).
409 * Primarily used by PM freezer to check for potential races with
410 * OOM killed frozen task.
411 */
412static atomic_t oom_kills = ATOMIC_INIT(0);
413
414int oom_kills_count(void)
415{
416 return atomic_read(&oom_kills);
417}
418
419void note_oom_kill(void)
420{
421 atomic_inc(&oom_kills);
422}
423
407#define K(x) ((x) << (PAGE_SHIFT-10)) 424#define K(x) ((x) << (PAGE_SHIFT-10))
408/* 425/*
409 * Must be called while holding a reference to p, which will be released upon 426 * Must be called while holding a reference to p, which will be released upon
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index ff24c9d83112..19ceae87522d 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2116,23 +2116,6 @@ void account_page_dirtied(struct page *page, struct address_space *mapping)
2116EXPORT_SYMBOL(account_page_dirtied); 2116EXPORT_SYMBOL(account_page_dirtied);
2117 2117
2118/* 2118/*
2119 * Helper function for set_page_writeback family.
2120 *
2121 * The caller must hold mem_cgroup_begin/end_update_page_stat() lock
2122 * while calling this function.
2123 * See test_set_page_writeback for example.
2124 *
2125 * NOTE: Unlike account_page_dirtied this does not rely on being atomic
2126 * wrt interrupts.
2127 */
2128void account_page_writeback(struct page *page)
2129{
2130 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_WRITEBACK);
2131 inc_zone_page_state(page, NR_WRITEBACK);
2132}
2133EXPORT_SYMBOL(account_page_writeback);
2134
2135/*
2136 * For address_spaces which do not use buffers. Just tag the page as dirty in 2119 * For address_spaces which do not use buffers. Just tag the page as dirty in
2137 * its radix tree. 2120 * its radix tree.
2138 * 2121 *
@@ -2344,11 +2327,12 @@ EXPORT_SYMBOL(clear_page_dirty_for_io);
2344int test_clear_page_writeback(struct page *page) 2327int test_clear_page_writeback(struct page *page)
2345{ 2328{
2346 struct address_space *mapping = page_mapping(page); 2329 struct address_space *mapping = page_mapping(page);
2347 int ret;
2348 bool locked;
2349 unsigned long memcg_flags; 2330 unsigned long memcg_flags;
2331 struct mem_cgroup *memcg;
2332 bool locked;
2333 int ret;
2350 2334
2351 mem_cgroup_begin_update_page_stat(page, &locked, &memcg_flags); 2335 memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags);
2352 if (mapping) { 2336 if (mapping) {
2353 struct backing_dev_info *bdi = mapping->backing_dev_info; 2337 struct backing_dev_info *bdi = mapping->backing_dev_info;
2354 unsigned long flags; 2338 unsigned long flags;
@@ -2369,22 +2353,23 @@ int test_clear_page_writeback(struct page *page)
2369 ret = TestClearPageWriteback(page); 2353 ret = TestClearPageWriteback(page);
2370 } 2354 }
2371 if (ret) { 2355 if (ret) {
2372 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_WRITEBACK); 2356 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
2373 dec_zone_page_state(page, NR_WRITEBACK); 2357 dec_zone_page_state(page, NR_WRITEBACK);
2374 inc_zone_page_state(page, NR_WRITTEN); 2358 inc_zone_page_state(page, NR_WRITTEN);
2375 } 2359 }
2376 mem_cgroup_end_update_page_stat(page, &locked, &memcg_flags); 2360 mem_cgroup_end_page_stat(memcg, locked, memcg_flags);
2377 return ret; 2361 return ret;
2378} 2362}
2379 2363
2380int __test_set_page_writeback(struct page *page, bool keep_write) 2364int __test_set_page_writeback(struct page *page, bool keep_write)
2381{ 2365{
2382 struct address_space *mapping = page_mapping(page); 2366 struct address_space *mapping = page_mapping(page);
2383 int ret;
2384 bool locked;
2385 unsigned long memcg_flags; 2367 unsigned long memcg_flags;
2368 struct mem_cgroup *memcg;
2369 bool locked;
2370 int ret;
2386 2371
2387 mem_cgroup_begin_update_page_stat(page, &locked, &memcg_flags); 2372 memcg = mem_cgroup_begin_page_stat(page, &locked, &memcg_flags);
2388 if (mapping) { 2373 if (mapping) {
2389 struct backing_dev_info *bdi = mapping->backing_dev_info; 2374 struct backing_dev_info *bdi = mapping->backing_dev_info;
2390 unsigned long flags; 2375 unsigned long flags;
@@ -2410,9 +2395,11 @@ int __test_set_page_writeback(struct page *page, bool keep_write)
2410 } else { 2395 } else {
2411 ret = TestSetPageWriteback(page); 2396 ret = TestSetPageWriteback(page);
2412 } 2397 }
2413 if (!ret) 2398 if (!ret) {
2414 account_page_writeback(page); 2399 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_WRITEBACK);
2415 mem_cgroup_end_update_page_stat(page, &locked, &memcg_flags); 2400 inc_zone_page_state(page, NR_WRITEBACK);
2401 }
2402 mem_cgroup_end_page_stat(memcg, locked, memcg_flags);
2416 return ret; 2403 return ret;
2417 2404
2418} 2405}
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 736d8e1b6381..616a2c956b4b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -467,29 +467,6 @@ static inline void rmv_page_order(struct page *page)
467} 467}
468 468
469/* 469/*
470 * Locate the struct page for both the matching buddy in our
471 * pair (buddy1) and the combined O(n+1) page they form (page).
472 *
473 * 1) Any buddy B1 will have an order O twin B2 which satisfies
474 * the following equation:
475 * B2 = B1 ^ (1 << O)
476 * For example, if the starting buddy (buddy2) is #8 its order
477 * 1 buddy is #10:
478 * B2 = 8 ^ (1 << 1) = 8 ^ 2 = 10
479 *
480 * 2) Any buddy B will have an order O+1 parent P which
481 * satisfies the following equation:
482 * P = B & ~(1 << O)
483 *
484 * Assumption: *_mem_map is contiguous at least up to MAX_ORDER
485 */
486static inline unsigned long
487__find_buddy_index(unsigned long page_idx, unsigned int order)
488{
489 return page_idx ^ (1 << order);
490}
491
492/*
493 * This function checks whether a page is free && is the buddy 470 * This function checks whether a page is free && is the buddy
494 * we can do coalesce a page and its buddy if 471 * we can do coalesce a page and its buddy if
495 * (a) the buddy is not in a hole && 472 * (a) the buddy is not in a hole &&
@@ -569,6 +546,7 @@ static inline void __free_one_page(struct page *page,
569 unsigned long combined_idx; 546 unsigned long combined_idx;
570 unsigned long uninitialized_var(buddy_idx); 547 unsigned long uninitialized_var(buddy_idx);
571 struct page *buddy; 548 struct page *buddy;
549 int max_order = MAX_ORDER;
572 550
573 VM_BUG_ON(!zone_is_initialized(zone)); 551 VM_BUG_ON(!zone_is_initialized(zone));
574 552
@@ -577,13 +555,24 @@ static inline void __free_one_page(struct page *page,
577 return; 555 return;
578 556
579 VM_BUG_ON(migratetype == -1); 557 VM_BUG_ON(migratetype == -1);
558 if (is_migrate_isolate(migratetype)) {
559 /*
560 * We restrict max order of merging to prevent merge
561 * between freepages on isolate pageblock and normal
562 * pageblock. Without this, pageblock isolation
563 * could cause incorrect freepage accounting.
564 */
565 max_order = min(MAX_ORDER, pageblock_order + 1);
566 } else {
567 __mod_zone_freepage_state(zone, 1 << order, migratetype);
568 }
580 569
581 page_idx = pfn & ((1 << MAX_ORDER) - 1); 570 page_idx = pfn & ((1 << max_order) - 1);
582 571
583 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page); 572 VM_BUG_ON_PAGE(page_idx & ((1 << order) - 1), page);
584 VM_BUG_ON_PAGE(bad_range(zone, page), page); 573 VM_BUG_ON_PAGE(bad_range(zone, page), page);
585 574
586 while (order < MAX_ORDER-1) { 575 while (order < max_order - 1) {
587 buddy_idx = __find_buddy_index(page_idx, order); 576 buddy_idx = __find_buddy_index(page_idx, order);
588 buddy = page + (buddy_idx - page_idx); 577 buddy = page + (buddy_idx - page_idx);
589 if (!page_is_buddy(page, buddy, order)) 578 if (!page_is_buddy(page, buddy, order))
@@ -594,9 +583,11 @@ static inline void __free_one_page(struct page *page,
594 */ 583 */
595 if (page_is_guard(buddy)) { 584 if (page_is_guard(buddy)) {
596 clear_page_guard_flag(buddy); 585 clear_page_guard_flag(buddy);
597 set_page_private(page, 0); 586 set_page_private(buddy, 0);
598 __mod_zone_freepage_state(zone, 1 << order, 587 if (!is_migrate_isolate(migratetype)) {
599 migratetype); 588 __mod_zone_freepage_state(zone, 1 << order,
589 migratetype);
590 }
600 } else { 591 } else {
601 list_del(&buddy->lru); 592 list_del(&buddy->lru);
602 zone->free_area[order].nr_free--; 593 zone->free_area[order].nr_free--;
@@ -715,14 +706,12 @@ static void free_pcppages_bulk(struct zone *zone, int count,
715 /* must delete as __free_one_page list manipulates */ 706 /* must delete as __free_one_page list manipulates */
716 list_del(&page->lru); 707 list_del(&page->lru);
717 mt = get_freepage_migratetype(page); 708 mt = get_freepage_migratetype(page);
709 if (unlikely(has_isolate_pageblock(zone)))
710 mt = get_pageblock_migratetype(page);
711
718 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */ 712 /* MIGRATE_MOVABLE list may include MIGRATE_RESERVEs */
719 __free_one_page(page, page_to_pfn(page), zone, 0, mt); 713 __free_one_page(page, page_to_pfn(page), zone, 0, mt);
720 trace_mm_page_pcpu_drain(page, 0, mt); 714 trace_mm_page_pcpu_drain(page, 0, mt);
721 if (likely(!is_migrate_isolate_page(page))) {
722 __mod_zone_page_state(zone, NR_FREE_PAGES, 1);
723 if (is_migrate_cma(mt))
724 __mod_zone_page_state(zone, NR_FREE_CMA_PAGES, 1);
725 }
726 } while (--to_free && --batch_free && !list_empty(list)); 715 } while (--to_free && --batch_free && !list_empty(list));
727 } 716 }
728 spin_unlock(&zone->lock); 717 spin_unlock(&zone->lock);
@@ -739,9 +728,11 @@ static void free_one_page(struct zone *zone,
739 if (nr_scanned) 728 if (nr_scanned)
740 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned); 729 __mod_zone_page_state(zone, NR_PAGES_SCANNED, -nr_scanned);
741 730
731 if (unlikely(has_isolate_pageblock(zone) ||
732 is_migrate_isolate(migratetype))) {
733 migratetype = get_pfnblock_migratetype(page, pfn);
734 }
742 __free_one_page(page, pfn, zone, order, migratetype); 735 __free_one_page(page, pfn, zone, order, migratetype);
743 if (unlikely(!is_migrate_isolate(migratetype)))
744 __mod_zone_freepage_state(zone, 1 << order, migratetype);
745 spin_unlock(&zone->lock); 736 spin_unlock(&zone->lock);
746} 737}
747 738
@@ -1484,7 +1475,7 @@ void split_page(struct page *page, unsigned int order)
1484} 1475}
1485EXPORT_SYMBOL_GPL(split_page); 1476EXPORT_SYMBOL_GPL(split_page);
1486 1477
1487static int __isolate_free_page(struct page *page, unsigned int order) 1478int __isolate_free_page(struct page *page, unsigned int order)
1488{ 1479{
1489 unsigned long watermark; 1480 unsigned long watermark;
1490 struct zone *zone; 1481 struct zone *zone;
@@ -2252,6 +2243,14 @@ __alloc_pages_may_oom(gfp_t gfp_mask, unsigned int order,
2252 } 2243 }
2253 2244
2254 /* 2245 /*
2246 * PM-freezer should be notified that there might be an OOM killer on
2247 * its way to kill and wake somebody up. This is too early and we might
2248 * end up not killing anything but false positives are acceptable.
2249 * See freeze_processes.
2250 */
2251 note_oom_kill();
2252
2253 /*
2255 * Go through the zonelist yet one more time, keep very high watermark 2254 * Go through the zonelist yet one more time, keep very high watermark
2256 * here, this is only to catch a parallel oom killing, we must fail if 2255 * here, this is only to catch a parallel oom killing, we must fail if
2257 * we're still under heavy pressure. 2256 * we're still under heavy pressure.
@@ -6400,13 +6399,12 @@ int alloc_contig_range(unsigned long start, unsigned long end,
6400 6399
6401 /* Make sure the range is really isolated. */ 6400 /* Make sure the range is really isolated. */
6402 if (test_pages_isolated(outer_start, end, false)) { 6401 if (test_pages_isolated(outer_start, end, false)) {
6403 pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n", 6402 pr_info("%s: [%lx, %lx) PFNs busy\n",
6404 outer_start, end); 6403 __func__, outer_start, end);
6405 ret = -EBUSY; 6404 ret = -EBUSY;
6406 goto done; 6405 goto done;
6407 } 6406 }
6408 6407
6409
6410 /* Grab isolated pages from freelists. */ 6408 /* Grab isolated pages from freelists. */
6411 outer_end = isolate_freepages_range(&cc, outer_start, end); 6409 outer_end = isolate_freepages_range(&cc, outer_start, end);
6412 if (!outer_end) { 6410 if (!outer_end) {
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 3708264d2833..5331c2bd85a2 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -171,6 +171,7 @@ static void free_page_cgroup(void *addr)
171 sizeof(struct page_cgroup) * PAGES_PER_SECTION; 171 sizeof(struct page_cgroup) * PAGES_PER_SECTION;
172 172
173 BUG_ON(PageReserved(page)); 173 BUG_ON(PageReserved(page));
174 kmemleak_free(addr);
174 free_pages_exact(addr, table_size); 175 free_pages_exact(addr, table_size);
175 } 176 }
176} 177}
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index d1473b2e9481..c8778f7e208e 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -60,6 +60,7 @@ out:
60 int migratetype = get_pageblock_migratetype(page); 60 int migratetype = get_pageblock_migratetype(page);
61 61
62 set_pageblock_migratetype(page, MIGRATE_ISOLATE); 62 set_pageblock_migratetype(page, MIGRATE_ISOLATE);
63 zone->nr_isolate_pageblock++;
63 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE); 64 nr_pages = move_freepages_block(zone, page, MIGRATE_ISOLATE);
64 65
65 __mod_zone_freepage_state(zone, -nr_pages, migratetype); 66 __mod_zone_freepage_state(zone, -nr_pages, migratetype);
@@ -75,16 +76,54 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype)
75{ 76{
76 struct zone *zone; 77 struct zone *zone;
77 unsigned long flags, nr_pages; 78 unsigned long flags, nr_pages;
79 struct page *isolated_page = NULL;
80 unsigned int order;
81 unsigned long page_idx, buddy_idx;
82 struct page *buddy;
78 83
79 zone = page_zone(page); 84 zone = page_zone(page);
80 spin_lock_irqsave(&zone->lock, flags); 85 spin_lock_irqsave(&zone->lock, flags);
81 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) 86 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
82 goto out; 87 goto out;
83 nr_pages = move_freepages_block(zone, page, migratetype); 88
84 __mod_zone_freepage_state(zone, nr_pages, migratetype); 89 /*
90 * Because freepage with more than pageblock_order on isolated
91 * pageblock is restricted to merge due to freepage counting problem,
92 * it is possible that there is free buddy page.
93 * move_freepages_block() doesn't care of merge so we need other
94 * approach in order to merge them. Isolation and free will make
95 * these pages to be merged.
96 */
97 if (PageBuddy(page)) {
98 order = page_order(page);
99 if (order >= pageblock_order) {
100 page_idx = page_to_pfn(page) & ((1 << MAX_ORDER) - 1);
101 buddy_idx = __find_buddy_index(page_idx, order);
102 buddy = page + (buddy_idx - page_idx);
103
104 if (!is_migrate_isolate_page(buddy)) {
105 __isolate_free_page(page, order);
106 set_page_refcounted(page);
107 isolated_page = page;
108 }
109 }
110 }
111
112 /*
113 * If we isolate freepage with more than pageblock_order, there
114 * should be no freepage in the range, so we could avoid costly
115 * pageblock scanning for freepage moving.
116 */
117 if (!isolated_page) {
118 nr_pages = move_freepages_block(zone, page, migratetype);
119 __mod_zone_freepage_state(zone, nr_pages, migratetype);
120 }
85 set_pageblock_migratetype(page, migratetype); 121 set_pageblock_migratetype(page, migratetype);
122 zone->nr_isolate_pageblock--;
86out: 123out:
87 spin_unlock_irqrestore(&zone->lock, flags); 124 spin_unlock_irqrestore(&zone->lock, flags);
125 if (isolated_page)
126 __free_pages(isolated_page, order);
88} 127}
89 128
90static inline struct page * 129static inline struct page *
diff --git a/mm/rmap.c b/mm/rmap.c
index 116a5053415b..3e4c7213210c 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -274,6 +274,7 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
274{ 274{
275 struct anon_vma_chain *avc; 275 struct anon_vma_chain *avc;
276 struct anon_vma *anon_vma; 276 struct anon_vma *anon_vma;
277 int error;
277 278
278 /* Don't bother if the parent process has no anon_vma here. */ 279 /* Don't bother if the parent process has no anon_vma here. */
279 if (!pvma->anon_vma) 280 if (!pvma->anon_vma)
@@ -283,8 +284,9 @@ int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
283 * First, attach the new VMA to the parent VMA's anon_vmas, 284 * First, attach the new VMA to the parent VMA's anon_vmas,
284 * so rmap can find non-COWed pages in child processes. 285 * so rmap can find non-COWed pages in child processes.
285 */ 286 */
286 if (anon_vma_clone(vma, pvma)) 287 error = anon_vma_clone(vma, pvma);
287 return -ENOMEM; 288 if (error)
289 return error;
288 290
289 /* Then add our own anon_vma. */ 291 /* Then add our own anon_vma. */
290 anon_vma = anon_vma_alloc(); 292 anon_vma = anon_vma_alloc();
@@ -1042,15 +1044,46 @@ void page_add_new_anon_rmap(struct page *page,
1042 */ 1044 */
1043void page_add_file_rmap(struct page *page) 1045void page_add_file_rmap(struct page *page)
1044{ 1046{
1045 bool locked; 1047 struct mem_cgroup *memcg;
1046 unsigned long flags; 1048 unsigned long flags;
1049 bool locked;
1047 1050
1048 mem_cgroup_begin_update_page_stat(page, &locked, &flags); 1051 memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
1049 if (atomic_inc_and_test(&page->_mapcount)) { 1052 if (atomic_inc_and_test(&page->_mapcount)) {
1050 __inc_zone_page_state(page, NR_FILE_MAPPED); 1053 __inc_zone_page_state(page, NR_FILE_MAPPED);
1051 mem_cgroup_inc_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED); 1054 mem_cgroup_inc_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
1052 } 1055 }
1053 mem_cgroup_end_update_page_stat(page, &locked, &flags); 1056 mem_cgroup_end_page_stat(memcg, locked, flags);
1057}
1058
1059static void page_remove_file_rmap(struct page *page)
1060{
1061 struct mem_cgroup *memcg;
1062 unsigned long flags;
1063 bool locked;
1064
1065 memcg = mem_cgroup_begin_page_stat(page, &locked, &flags);
1066
1067 /* page still mapped by someone else? */
1068 if (!atomic_add_negative(-1, &page->_mapcount))
1069 goto out;
1070
1071 /* Hugepages are not counted in NR_FILE_MAPPED for now. */
1072 if (unlikely(PageHuge(page)))
1073 goto out;
1074
1075 /*
1076 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1077 * these counters are not modified in interrupt context, and
1078 * pte lock(a spinlock) is held, which implies preemption disabled.
1079 */
1080 __dec_zone_page_state(page, NR_FILE_MAPPED);
1081 mem_cgroup_dec_page_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
1082
1083 if (unlikely(PageMlocked(page)))
1084 clear_page_mlock(page);
1085out:
1086 mem_cgroup_end_page_stat(memcg, locked, flags);
1054} 1087}
1055 1088
1056/** 1089/**
@@ -1061,46 +1094,33 @@ void page_add_file_rmap(struct page *page)
1061 */ 1094 */
1062void page_remove_rmap(struct page *page) 1095void page_remove_rmap(struct page *page)
1063{ 1096{
1064 bool anon = PageAnon(page); 1097 if (!PageAnon(page)) {
1065 bool locked; 1098 page_remove_file_rmap(page);
1066 unsigned long flags; 1099 return;
1067 1100 }
1068 /*
1069 * The anon case has no mem_cgroup page_stat to update; but may
1070 * uncharge_page() below, where the lock ordering can deadlock if
1071 * we hold the lock against page_stat move: so avoid it on anon.
1072 */
1073 if (!anon)
1074 mem_cgroup_begin_update_page_stat(page, &locked, &flags);
1075 1101
1076 /* page still mapped by someone else? */ 1102 /* page still mapped by someone else? */
1077 if (!atomic_add_negative(-1, &page->_mapcount)) 1103 if (!atomic_add_negative(-1, &page->_mapcount))
1078 goto out; 1104 return;
1105
1106 /* Hugepages are not counted in NR_ANON_PAGES for now. */
1107 if (unlikely(PageHuge(page)))
1108 return;
1079 1109
1080 /* 1110 /*
1081 * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
1082 * and not charged by memcg for now.
1083 *
1084 * We use the irq-unsafe __{inc|mod}_zone_page_stat because 1111 * We use the irq-unsafe __{inc|mod}_zone_page_stat because
1085 * these counters are not modified in interrupt context, and 1112 * these counters are not modified in interrupt context, and
1086 * these counters are not modified in interrupt context, and
1087 * pte lock(a spinlock) is held, which implies preemption disabled. 1113 * pte lock(a spinlock) is held, which implies preemption disabled.
1088 */ 1114 */
1089 if (unlikely(PageHuge(page))) 1115 if (PageTransHuge(page))
1090 goto out; 1116 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES);
1091 if (anon) { 1117
1092 if (PageTransHuge(page)) 1118 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1093 __dec_zone_page_state(page, 1119 -hpage_nr_pages(page));
1094 NR_ANON_TRANSPARENT_HUGEPAGES); 1120
1095 __mod_zone_page_state(page_zone(page), NR_ANON_PAGES,
1096 -hpage_nr_pages(page));
1097 } else {
1098 __dec_zone_page_state(page, NR_FILE_MAPPED);
1099 mem_cgroup_dec_page_stat(page, MEM_CGROUP_STAT_FILE_MAPPED);
1100 mem_cgroup_end_update_page_stat(page, &locked, &flags);
1101 }
1102 if (unlikely(PageMlocked(page))) 1121 if (unlikely(PageMlocked(page)))
1103 clear_page_mlock(page); 1122 clear_page_mlock(page);
1123
1104 /* 1124 /*
1105 * It would be tidy to reset the PageAnon mapping here, 1125 * It would be tidy to reset the PageAnon mapping here,
1106 * but that might overwrite a racing page_add_anon_rmap 1126 * but that might overwrite a racing page_add_anon_rmap
@@ -1110,10 +1130,6 @@ void page_remove_rmap(struct page *page)
1110 * Leaving it set also helps swapoff to reinstate ptes 1130 * Leaving it set also helps swapoff to reinstate ptes
1111 * faster for those pages still in swapcache. 1131 * faster for those pages still in swapcache.
1112 */ 1132 */
1113 return;
1114out:
1115 if (!anon)
1116 mem_cgroup_end_update_page_stat(page, &locked, &flags);
1117} 1133}
1118 1134
1119/* 1135/*
diff --git a/mm/shmem.c b/mm/shmem.c
index cd6fc7590e54..185836ba53ef 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2345,6 +2345,32 @@ static int shmem_exchange(struct inode *old_dir, struct dentry *old_dentry, stru
2345 return 0; 2345 return 0;
2346} 2346}
2347 2347
2348static int shmem_whiteout(struct inode *old_dir, struct dentry *old_dentry)
2349{
2350 struct dentry *whiteout;
2351 int error;
2352
2353 whiteout = d_alloc(old_dentry->d_parent, &old_dentry->d_name);
2354 if (!whiteout)
2355 return -ENOMEM;
2356
2357 error = shmem_mknod(old_dir, whiteout,
2358 S_IFCHR | WHITEOUT_MODE, WHITEOUT_DEV);
2359 dput(whiteout);
2360 if (error)
2361 return error;
2362
2363 /*
2364 * Cheat and hash the whiteout while the old dentry is still in
2365 * place, instead of playing games with FS_RENAME_DOES_D_MOVE.
2366 *
2367 * d_lookup() will consistently find one of them at this point,
2368 * not sure which one, but that isn't even important.
2369 */
2370 d_rehash(whiteout);
2371 return 0;
2372}
2373
2348/* 2374/*
2349 * The VFS layer already does all the dentry stuff for rename, 2375 * The VFS layer already does all the dentry stuff for rename,
2350 * we just have to decrement the usage count for the target if 2376 * we just have to decrement the usage count for the target if
@@ -2356,7 +2382,7 @@ static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struc
2356 struct inode *inode = old_dentry->d_inode; 2382 struct inode *inode = old_dentry->d_inode;
2357 int they_are_dirs = S_ISDIR(inode->i_mode); 2383 int they_are_dirs = S_ISDIR(inode->i_mode);
2358 2384
2359 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) 2385 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE | RENAME_WHITEOUT))
2360 return -EINVAL; 2386 return -EINVAL;
2361 2387
2362 if (flags & RENAME_EXCHANGE) 2388 if (flags & RENAME_EXCHANGE)
@@ -2365,6 +2391,14 @@ static int shmem_rename2(struct inode *old_dir, struct dentry *old_dentry, struc
2365 if (!simple_empty(new_dentry)) 2391 if (!simple_empty(new_dentry))
2366 return -ENOTEMPTY; 2392 return -ENOTEMPTY;
2367 2393
2394 if (flags & RENAME_WHITEOUT) {
2395 int error;
2396
2397 error = shmem_whiteout(old_dir, old_dentry);
2398 if (error)
2399 return error;
2400 }
2401
2368 if (new_dentry->d_inode) { 2402 if (new_dentry->d_inode) {
2369 (void) shmem_unlink(new_dir, new_dentry); 2403 (void) shmem_unlink(new_dir, new_dentry);
2370 if (they_are_dirs) { 2404 if (they_are_dirs) {
diff --git a/mm/slab.c b/mm/slab.c
index eb2b2ea30130..f34e053ec46e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3076,7 +3076,7 @@ static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3076 void *obj; 3076 void *obj;
3077 int x; 3077 int x;
3078 3078
3079 VM_BUG_ON(nodeid > num_online_nodes()); 3079 VM_BUG_ON(nodeid < 0 || nodeid >= MAX_NUMNODES);
3080 n = get_node(cachep, nodeid); 3080 n = get_node(cachep, nodeid);
3081 BUG_ON(!n); 3081 BUG_ON(!n);
3082 3082
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 3a6e0cfdf03a..dcdab81bd240 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -93,16 +93,6 @@ static int kmem_cache_sanity_check(const char *name, size_t size)
93 s->object_size); 93 s->object_size);
94 continue; 94 continue;
95 } 95 }
96
97#if !defined(CONFIG_SLUB)
98 if (!strcmp(s->name, name)) {
99 pr_err("%s (%s): Cache name already exists.\n",
100 __func__, name);
101 dump_stack();
102 s = NULL;
103 return -EINVAL;
104 }
105#endif
106 } 96 }
107 97
108 WARN_ON(strchr(name, ' ')); /* It confuses parsers */ 98 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
@@ -269,6 +259,10 @@ struct kmem_cache *find_mergeable(size_t size, size_t align,
269 if (s->size - size >= sizeof(void *)) 259 if (s->size - size >= sizeof(void *))
270 continue; 260 continue;
271 261
262 if (IS_ENABLED(CONFIG_SLAB) && align &&
263 (align > s->align || s->align % align))
264 continue;
265
272 return s; 266 return s;
273 } 267 }
274 return NULL; 268 return NULL;
diff --git a/mm/truncate.c b/mm/truncate.c
index 96d167372d89..f1e4d6052369 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -20,6 +20,7 @@
20#include <linux/buffer_head.h> /* grr. try_to_release_page, 20#include <linux/buffer_head.h> /* grr. try_to_release_page,
21 do_invalidatepage */ 21 do_invalidatepage */
22#include <linux/cleancache.h> 22#include <linux/cleancache.h>
23#include <linux/rmap.h>
23#include "internal.h" 24#include "internal.h"
24 25
25static void clear_exceptional_entry(struct address_space *mapping, 26static void clear_exceptional_entry(struct address_space *mapping,
@@ -714,17 +715,73 @@ EXPORT_SYMBOL(truncate_pagecache);
714 * necessary) to @newsize. It will be typically be called from the filesystem's 715 * necessary) to @newsize. It will be typically be called from the filesystem's
715 * setattr function when ATTR_SIZE is passed in. 716 * setattr function when ATTR_SIZE is passed in.
716 * 717 *
717 * Must be called with inode_mutex held and before all filesystem specific 718 * Must be called with a lock serializing truncates and writes (generally
718 * block truncation has been performed. 719 * i_mutex but e.g. xfs uses a different lock) and before all filesystem
720 * specific block truncation has been performed.
719 */ 721 */
720void truncate_setsize(struct inode *inode, loff_t newsize) 722void truncate_setsize(struct inode *inode, loff_t newsize)
721{ 723{
724 loff_t oldsize = inode->i_size;
725
722 i_size_write(inode, newsize); 726 i_size_write(inode, newsize);
727 if (newsize > oldsize)
728 pagecache_isize_extended(inode, oldsize, newsize);
723 truncate_pagecache(inode, newsize); 729 truncate_pagecache(inode, newsize);
724} 730}
725EXPORT_SYMBOL(truncate_setsize); 731EXPORT_SYMBOL(truncate_setsize);
726 732
727/** 733/**
734 * pagecache_isize_extended - update pagecache after extension of i_size
735 * @inode: inode for which i_size was extended
736 * @from: original inode size
737 * @to: new inode size
738 *
739 * Handle extension of inode size either caused by extending truncate or by
740 * write starting after current i_size. We mark the page straddling current
741 * i_size RO so that page_mkwrite() is called on the nearest write access to
742 * the page. This way filesystem can be sure that page_mkwrite() is called on
743 * the page before user writes to the page via mmap after the i_size has been
744 * changed.
745 *
746 * The function must be called after i_size is updated so that page fault
747 * coming after we unlock the page will already see the new i_size.
748 * The function must be called while we still hold i_mutex - this not only
749 * makes sure i_size is stable but also that userspace cannot observe new
750 * i_size value before we are prepared to store mmap writes at new inode size.
751 */
752void pagecache_isize_extended(struct inode *inode, loff_t from, loff_t to)
753{
754 int bsize = 1 << inode->i_blkbits;
755 loff_t rounded_from;
756 struct page *page;
757 pgoff_t index;
758
759 WARN_ON(to > inode->i_size);
760
761 if (from >= to || bsize == PAGE_CACHE_SIZE)
762 return;
763 /* Page straddling @from will not have any hole block created? */
764 rounded_from = round_up(from, bsize);
765 if (to <= rounded_from || !(rounded_from & (PAGE_CACHE_SIZE - 1)))
766 return;
767
768 index = from >> PAGE_CACHE_SHIFT;
769 page = find_lock_page(inode->i_mapping, index);
770 /* Page not cached? Nothing to do */
771 if (!page)
772 return;
773 /*
774 * See clear_page_dirty_for_io() for details why set_page_dirty()
775 * is needed.
776 */
777 if (page_mkclean(page))
778 set_page_dirty(page);
779 unlock_page(page);
780 page_cache_release(page);
781}
782EXPORT_SYMBOL(pagecache_isize_extended);
783
784/**
728 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched 785 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
729 * @inode: inode 786 * @inode: inode
730 * @lstart: offset of beginning of hole 787 * @lstart: offset of beginning of hole
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index d4042e75f7c7..c5afd573d7da 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -165,6 +165,7 @@ static void vmpressure_work_fn(struct work_struct *work)
165 unsigned long scanned; 165 unsigned long scanned;
166 unsigned long reclaimed; 166 unsigned long reclaimed;
167 167
168 spin_lock(&vmpr->sr_lock);
168 /* 169 /*
169 * Several contexts might be calling vmpressure(), so it is 170 * Several contexts might be calling vmpressure(), so it is
170 * possible that the work was rescheduled again before the old 171 * possible that the work was rescheduled again before the old
@@ -173,11 +174,12 @@ static void vmpressure_work_fn(struct work_struct *work)
173 * here. No need for any locks here since we don't care if 174 * here. No need for any locks here since we don't care if
174 * vmpr->reclaimed is in sync. 175 * vmpr->reclaimed is in sync.
175 */ 176 */
176 if (!vmpr->scanned) 177 scanned = vmpr->scanned;
178 if (!scanned) {
179 spin_unlock(&vmpr->sr_lock);
177 return; 180 return;
181 }
178 182
179 spin_lock(&vmpr->sr_lock);
180 scanned = vmpr->scanned;
181 reclaimed = vmpr->reclaimed; 183 reclaimed = vmpr->reclaimed;
182 vmpr->scanned = 0; 184 vmpr->scanned = 0;
183 vmpr->reclaimed = 0; 185 vmpr->reclaimed = 0;
diff --git a/net/Kconfig b/net/Kconfig
index 6272420a721b..99815b5454bf 100644
--- a/net/Kconfig
+++ b/net/Kconfig
@@ -6,7 +6,7 @@ menuconfig NET
6 bool "Networking support" 6 bool "Networking support"
7 select NLATTR 7 select NLATTR
8 select GENERIC_NET_UTILS 8 select GENERIC_NET_UTILS
9 select ANON_INODES 9 select BPF
10 ---help--- 10 ---help---
11 Unless you really know what you are doing, you should say Y here. 11 Unless you really know what you are doing, you should say Y here.
12 The reason is that some programs need kernel networking support even 12 The reason is that some programs need kernel networking support even
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 992ec49a96aa..44cb786b925a 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -112,6 +112,7 @@ void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
112 112
113 kfree_skb(skb); 113 kfree_skb(skb);
114} 114}
115EXPORT_SYMBOL_GPL(br_deliver);
115 116
116/* called with rcu_read_lock */ 117/* called with rcu_read_lock */
117void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0) 118void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 648d79ccf462..c465876c7861 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -813,10 +813,9 @@ static void __br_multicast_send_query(struct net_bridge *br,
813 return; 813 return;
814 814
815 if (port) { 815 if (port) {
816 __skb_push(skb, sizeof(struct ethhdr));
817 skb->dev = port->dev; 816 skb->dev = port->dev;
818 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, 817 NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
819 dev_queue_xmit); 818 br_dev_queue_push_xmit);
820 } else { 819 } else {
821 br_multicast_select_own_querier(br, ip, skb); 820 br_multicast_select_own_querier(br, ip, skb);
822 netif_rx(skb); 821 netif_rx(skb);
diff --git a/net/bridge/br_netfilter.c b/net/bridge/br_netfilter.c
index 1bada53bb195..1a4f32c09ad5 100644
--- a/net/bridge/br_netfilter.c
+++ b/net/bridge/br_netfilter.c
@@ -192,7 +192,6 @@ static inline void nf_bridge_save_header(struct sk_buff *skb)
192 192
193static int br_parse_ip_options(struct sk_buff *skb) 193static int br_parse_ip_options(struct sk_buff *skb)
194{ 194{
195 struct ip_options *opt;
196 const struct iphdr *iph; 195 const struct iphdr *iph;
197 struct net_device *dev = skb->dev; 196 struct net_device *dev = skb->dev;
198 u32 len; 197 u32 len;
@@ -201,7 +200,6 @@ static int br_parse_ip_options(struct sk_buff *skb)
201 goto inhdr_error; 200 goto inhdr_error;
202 201
203 iph = ip_hdr(skb); 202 iph = ip_hdr(skb);
204 opt = &(IPCB(skb)->opt);
205 203
206 /* Basic sanity checks */ 204 /* Basic sanity checks */
207 if (iph->ihl < 5 || iph->version != 4) 205 if (iph->ihl < 5 || iph->version != 4)
@@ -227,23 +225,11 @@ static int br_parse_ip_options(struct sk_buff *skb)
227 } 225 }
228 226
229 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm)); 227 memset(IPCB(skb), 0, sizeof(struct inet_skb_parm));
230 if (iph->ihl == 5) 228 /* We should really parse IP options here but until
231 return 0; 229 * somebody who actually uses IP options complains to
232 230 * us we'll just silently ignore the options because
233 opt->optlen = iph->ihl*4 - sizeof(struct iphdr); 231 * we're lazy!
234 if (ip_options_compile(dev_net(dev), opt, skb)) 232 */
235 goto inhdr_error;
236
237 /* Check correct handling of SRR option */
238 if (unlikely(opt->srr)) {
239 struct in_device *in_dev = __in_dev_get_rcu(dev);
240 if (in_dev && !IN_DEV_SOURCE_ROUTE(in_dev))
241 goto drop;
242
243 if (ip_options_rcv_srr(skb))
244 goto drop;
245 }
246
247 return 0; 233 return 0;
248 234
249inhdr_error: 235inhdr_error:
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 2ff9706647f2..e5ec470b851f 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -280,6 +280,7 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
280 [IFLA_BRPORT_MODE] = { .type = NLA_U8 }, 280 [IFLA_BRPORT_MODE] = { .type = NLA_U8 },
281 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 }, 281 [IFLA_BRPORT_GUARD] = { .type = NLA_U8 },
282 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 }, 282 [IFLA_BRPORT_PROTECT] = { .type = NLA_U8 },
283 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
283 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, 284 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
284 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, 285 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
285}; 286};
diff --git a/net/bridge/netfilter/nf_tables_bridge.c b/net/bridge/netfilter/nf_tables_bridge.c
index da17a5eab8b4..074c557ab505 100644
--- a/net/bridge/netfilter/nf_tables_bridge.c
+++ b/net/bridge/netfilter/nf_tables_bridge.c
@@ -75,9 +75,11 @@ static const struct nf_chain_type filter_bridge = {
75 .type = NFT_CHAIN_T_DEFAULT, 75 .type = NFT_CHAIN_T_DEFAULT,
76 .family = NFPROTO_BRIDGE, 76 .family = NFPROTO_BRIDGE,
77 .owner = THIS_MODULE, 77 .owner = THIS_MODULE,
78 .hook_mask = (1 << NF_BR_LOCAL_IN) | 78 .hook_mask = (1 << NF_BR_PRE_ROUTING) |
79 (1 << NF_BR_LOCAL_IN) |
79 (1 << NF_BR_FORWARD) | 80 (1 << NF_BR_FORWARD) |
80 (1 << NF_BR_LOCAL_OUT), 81 (1 << NF_BR_LOCAL_OUT) |
82 (1 << NF_BR_POST_ROUTING),
81}; 83};
82 84
83static int __init nf_tables_bridge_init(void) 85static int __init nf_tables_bridge_init(void)
diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c
index a76479535df2..48da2c54a69e 100644
--- a/net/bridge/netfilter/nft_reject_bridge.c
+++ b/net/bridge/netfilter/nft_reject_bridge.c
@@ -16,6 +16,239 @@
16#include <net/netfilter/nft_reject.h> 16#include <net/netfilter/nft_reject.h>
17#include <net/netfilter/ipv4/nf_reject.h> 17#include <net/netfilter/ipv4/nf_reject.h>
18#include <net/netfilter/ipv6/nf_reject.h> 18#include <net/netfilter/ipv6/nf_reject.h>
19#include <linux/ip.h>
20#include <net/ip.h>
21#include <net/ip6_checksum.h>
22#include <linux/netfilter_bridge.h>
23#include "../br_private.h"
24
25static void nft_reject_br_push_etherhdr(struct sk_buff *oldskb,
26 struct sk_buff *nskb)
27{
28 struct ethhdr *eth;
29
30 eth = (struct ethhdr *)skb_push(nskb, ETH_HLEN);
31 skb_reset_mac_header(nskb);
32 ether_addr_copy(eth->h_source, eth_hdr(oldskb)->h_dest);
33 ether_addr_copy(eth->h_dest, eth_hdr(oldskb)->h_source);
34 eth->h_proto = eth_hdr(oldskb)->h_proto;
35 skb_pull(nskb, ETH_HLEN);
36}
37
38static int nft_reject_iphdr_validate(struct sk_buff *oldskb)
39{
40 struct iphdr *iph;
41 u32 len;
42
43 if (!pskb_may_pull(oldskb, sizeof(struct iphdr)))
44 return 0;
45
46 iph = ip_hdr(oldskb);
47 if (iph->ihl < 5 || iph->version != 4)
48 return 0;
49
50 len = ntohs(iph->tot_len);
51 if (oldskb->len < len)
52 return 0;
53 else if (len < (iph->ihl*4))
54 return 0;
55
56 if (!pskb_may_pull(oldskb, iph->ihl*4))
57 return 0;
58
59 return 1;
60}
61
62static void nft_reject_br_send_v4_tcp_reset(struct sk_buff *oldskb, int hook)
63{
64 struct sk_buff *nskb;
65 struct iphdr *niph;
66 const struct tcphdr *oth;
67 struct tcphdr _oth;
68
69 if (!nft_reject_iphdr_validate(oldskb))
70 return;
71
72 oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
73 if (!oth)
74 return;
75
76 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
77 LL_MAX_HEADER, GFP_ATOMIC);
78 if (!nskb)
79 return;
80
81 skb_reserve(nskb, LL_MAX_HEADER);
82 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
83 sysctl_ip_default_ttl);
84 nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
85 niph->ttl = sysctl_ip_default_ttl;
86 niph->tot_len = htons(nskb->len);
87 ip_send_check(niph);
88
89 nft_reject_br_push_etherhdr(oldskb, nskb);
90
91 br_deliver(br_port_get_rcu(oldskb->dev), nskb);
92}
93
94static void nft_reject_br_send_v4_unreach(struct sk_buff *oldskb, int hook,
95 u8 code)
96{
97 struct sk_buff *nskb;
98 struct iphdr *niph;
99 struct icmphdr *icmph;
100 unsigned int len;
101 void *payload;
102 __wsum csum;
103
104 if (!nft_reject_iphdr_validate(oldskb))
105 return;
106
107 /* IP header checks: fragment. */
108 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
109 return;
110
111 /* RFC says return as much as we can without exceeding 576 bytes. */
112 len = min_t(unsigned int, 536, oldskb->len);
113
114 if (!pskb_may_pull(oldskb, len))
115 return;
116
117 if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), 0))
118 return;
119
120 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmphdr) +
121 LL_MAX_HEADER + len, GFP_ATOMIC);
122 if (!nskb)
123 return;
124
125 skb_reserve(nskb, LL_MAX_HEADER);
126 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_ICMP,
127 sysctl_ip_default_ttl);
128
129 skb_reset_transport_header(nskb);
130 icmph = (struct icmphdr *)skb_put(nskb, sizeof(struct icmphdr));
131 memset(icmph, 0, sizeof(*icmph));
132 icmph->type = ICMP_DEST_UNREACH;
133 icmph->code = code;
134
135 payload = skb_put(nskb, len);
136 memcpy(payload, skb_network_header(oldskb), len);
137
138 csum = csum_partial((void *)icmph, len + sizeof(struct icmphdr), 0);
139 icmph->checksum = csum_fold(csum);
140
141 niph->tot_len = htons(nskb->len);
142 ip_send_check(niph);
143
144 nft_reject_br_push_etherhdr(oldskb, nskb);
145
146 br_deliver(br_port_get_rcu(oldskb->dev), nskb);
147}
148
149static int nft_reject_ip6hdr_validate(struct sk_buff *oldskb)
150{
151 struct ipv6hdr *hdr;
152 u32 pkt_len;
153
154 if (!pskb_may_pull(oldskb, sizeof(struct ipv6hdr)))
155 return 0;
156
157 hdr = ipv6_hdr(oldskb);
158 if (hdr->version != 6)
159 return 0;
160
161 pkt_len = ntohs(hdr->payload_len);
162 if (pkt_len + sizeof(struct ipv6hdr) > oldskb->len)
163 return 0;
164
165 return 1;
166}
167
168static void nft_reject_br_send_v6_tcp_reset(struct net *net,
169 struct sk_buff *oldskb, int hook)
170{
171 struct sk_buff *nskb;
172 const struct tcphdr *oth;
173 struct tcphdr _oth;
174 unsigned int otcplen;
175 struct ipv6hdr *nip6h;
176
177 if (!nft_reject_ip6hdr_validate(oldskb))
178 return;
179
180 oth = nf_reject_ip6_tcphdr_get(oldskb, &_oth, &otcplen, hook);
181 if (!oth)
182 return;
183
184 nskb = alloc_skb(sizeof(struct ipv6hdr) + sizeof(struct tcphdr) +
185 LL_MAX_HEADER, GFP_ATOMIC);
186 if (!nskb)
187 return;
188
189 skb_reserve(nskb, LL_MAX_HEADER);
190 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
191 net->ipv6.devconf_all->hop_limit);
192 nf_reject_ip6_tcphdr_put(nskb, oldskb, oth, otcplen);
193 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
194
195 nft_reject_br_push_etherhdr(oldskb, nskb);
196
197 br_deliver(br_port_get_rcu(oldskb->dev), nskb);
198}
199
200static void nft_reject_br_send_v6_unreach(struct net *net,
201 struct sk_buff *oldskb, int hook,
202 u8 code)
203{
204 struct sk_buff *nskb;
205 struct ipv6hdr *nip6h;
206 struct icmp6hdr *icmp6h;
207 unsigned int len;
208 void *payload;
209
210 if (!nft_reject_ip6hdr_validate(oldskb))
211 return;
212
213 /* Include "As much of invoking packet as possible without the ICMPv6
214 * packet exceeding the minimum IPv6 MTU" in the ICMP payload.
215 */
216 len = min_t(unsigned int, 1220, oldskb->len);
217
218 if (!pskb_may_pull(oldskb, len))
219 return;
220
221 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct icmp6hdr) +
222 LL_MAX_HEADER + len, GFP_ATOMIC);
223 if (!nskb)
224 return;
225
226 skb_reserve(nskb, LL_MAX_HEADER);
227 nip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_ICMPV6,
228 net->ipv6.devconf_all->hop_limit);
229
230 skb_reset_transport_header(nskb);
231 icmp6h = (struct icmp6hdr *)skb_put(nskb, sizeof(struct icmp6hdr));
232 memset(icmp6h, 0, sizeof(*icmp6h));
233 icmp6h->icmp6_type = ICMPV6_DEST_UNREACH;
234 icmp6h->icmp6_code = code;
235
236 payload = skb_put(nskb, len);
237 memcpy(payload, skb_network_header(oldskb), len);
238 nip6h->payload_len = htons(nskb->len - sizeof(struct ipv6hdr));
239
240 icmp6h->icmp6_cksum =
241 csum_ipv6_magic(&nip6h->saddr, &nip6h->daddr,
242 nskb->len - sizeof(struct ipv6hdr),
243 IPPROTO_ICMPV6,
244 csum_partial(icmp6h,
245 nskb->len - sizeof(struct ipv6hdr),
246 0));
247
248 nft_reject_br_push_etherhdr(oldskb, nskb);
249
250 br_deliver(br_port_get_rcu(oldskb->dev), nskb);
251}
19 252
20static void nft_reject_bridge_eval(const struct nft_expr *expr, 253static void nft_reject_bridge_eval(const struct nft_expr *expr,
21 struct nft_data data[NFT_REG_MAX + 1], 254 struct nft_data data[NFT_REG_MAX + 1],
@@ -23,35 +256,46 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
23{ 256{
24 struct nft_reject *priv = nft_expr_priv(expr); 257 struct nft_reject *priv = nft_expr_priv(expr);
25 struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out); 258 struct net *net = dev_net((pkt->in != NULL) ? pkt->in : pkt->out);
259 const unsigned char *dest = eth_hdr(pkt->skb)->h_dest;
260
261 if (is_broadcast_ether_addr(dest) ||
262 is_multicast_ether_addr(dest))
263 goto out;
26 264
27 switch (eth_hdr(pkt->skb)->h_proto) { 265 switch (eth_hdr(pkt->skb)->h_proto) {
28 case htons(ETH_P_IP): 266 case htons(ETH_P_IP):
29 switch (priv->type) { 267 switch (priv->type) {
30 case NFT_REJECT_ICMP_UNREACH: 268 case NFT_REJECT_ICMP_UNREACH:
31 nf_send_unreach(pkt->skb, priv->icmp_code); 269 nft_reject_br_send_v4_unreach(pkt->skb,
270 pkt->ops->hooknum,
271 priv->icmp_code);
32 break; 272 break;
33 case NFT_REJECT_TCP_RST: 273 case NFT_REJECT_TCP_RST:
34 nf_send_reset(pkt->skb, pkt->ops->hooknum); 274 nft_reject_br_send_v4_tcp_reset(pkt->skb,
275 pkt->ops->hooknum);
35 break; 276 break;
36 case NFT_REJECT_ICMPX_UNREACH: 277 case NFT_REJECT_ICMPX_UNREACH:
37 nf_send_unreach(pkt->skb, 278 nft_reject_br_send_v4_unreach(pkt->skb,
38 nft_reject_icmp_code(priv->icmp_code)); 279 pkt->ops->hooknum,
280 nft_reject_icmp_code(priv->icmp_code));
39 break; 281 break;
40 } 282 }
41 break; 283 break;
42 case htons(ETH_P_IPV6): 284 case htons(ETH_P_IPV6):
43 switch (priv->type) { 285 switch (priv->type) {
44 case NFT_REJECT_ICMP_UNREACH: 286 case NFT_REJECT_ICMP_UNREACH:
45 nf_send_unreach6(net, pkt->skb, priv->icmp_code, 287 nft_reject_br_send_v6_unreach(net, pkt->skb,
46 pkt->ops->hooknum); 288 pkt->ops->hooknum,
289 priv->icmp_code);
47 break; 290 break;
48 case NFT_REJECT_TCP_RST: 291 case NFT_REJECT_TCP_RST:
49 nf_send_reset6(net, pkt->skb, pkt->ops->hooknum); 292 nft_reject_br_send_v6_tcp_reset(net, pkt->skb,
293 pkt->ops->hooknum);
50 break; 294 break;
51 case NFT_REJECT_ICMPX_UNREACH: 295 case NFT_REJECT_ICMPX_UNREACH:
52 nf_send_unreach6(net, pkt->skb, 296 nft_reject_br_send_v6_unreach(net, pkt->skb,
53 nft_reject_icmpv6_code(priv->icmp_code), 297 pkt->ops->hooknum,
54 pkt->ops->hooknum); 298 nft_reject_icmpv6_code(priv->icmp_code));
55 break; 299 break;
56 } 300 }
57 break; 301 break;
@@ -59,15 +303,38 @@ static void nft_reject_bridge_eval(const struct nft_expr *expr,
59 /* No explicit way to reject this protocol, drop it. */ 303 /* No explicit way to reject this protocol, drop it. */
60 break; 304 break;
61 } 305 }
306out:
62 data[NFT_REG_VERDICT].verdict = NF_DROP; 307 data[NFT_REG_VERDICT].verdict = NF_DROP;
63} 308}
64 309
310static int nft_reject_bridge_validate_hooks(const struct nft_chain *chain)
311{
312 struct nft_base_chain *basechain;
313
314 if (chain->flags & NFT_BASE_CHAIN) {
315 basechain = nft_base_chain(chain);
316
317 switch (basechain->ops[0].hooknum) {
318 case NF_BR_PRE_ROUTING:
319 case NF_BR_LOCAL_IN:
320 break;
321 default:
322 return -EOPNOTSUPP;
323 }
324 }
325 return 0;
326}
327
65static int nft_reject_bridge_init(const struct nft_ctx *ctx, 328static int nft_reject_bridge_init(const struct nft_ctx *ctx,
66 const struct nft_expr *expr, 329 const struct nft_expr *expr,
67 const struct nlattr * const tb[]) 330 const struct nlattr * const tb[])
68{ 331{
69 struct nft_reject *priv = nft_expr_priv(expr); 332 struct nft_reject *priv = nft_expr_priv(expr);
70 int icmp_code; 333 int icmp_code, err;
334
335 err = nft_reject_bridge_validate_hooks(ctx->chain);
336 if (err < 0)
337 return err;
71 338
72 if (tb[NFTA_REJECT_TYPE] == NULL) 339 if (tb[NFTA_REJECT_TYPE] == NULL)
73 return -EINVAL; 340 return -EINVAL;
@@ -116,6 +383,13 @@ nla_put_failure:
116 return -1; 383 return -1;
117} 384}
118 385
386static int nft_reject_bridge_validate(const struct nft_ctx *ctx,
387 const struct nft_expr *expr,
388 const struct nft_data **data)
389{
390 return nft_reject_bridge_validate_hooks(ctx->chain);
391}
392
119static struct nft_expr_type nft_reject_bridge_type; 393static struct nft_expr_type nft_reject_bridge_type;
120static const struct nft_expr_ops nft_reject_bridge_ops = { 394static const struct nft_expr_ops nft_reject_bridge_ops = {
121 .type = &nft_reject_bridge_type, 395 .type = &nft_reject_bridge_type,
@@ -123,6 +397,7 @@ static const struct nft_expr_ops nft_reject_bridge_ops = {
123 .eval = nft_reject_bridge_eval, 397 .eval = nft_reject_bridge_eval,
124 .init = nft_reject_bridge_init, 398 .init = nft_reject_bridge_init,
125 .dump = nft_reject_bridge_dump, 399 .dump = nft_reject_bridge_dump,
400 .validate = nft_reject_bridge_validate,
126}; 401};
127 402
128static struct nft_expr_type nft_reject_bridge_type __read_mostly = { 403static struct nft_expr_type nft_reject_bridge_type __read_mostly = {
diff --git a/net/ceph/auth_x.c b/net/ceph/auth_x.c
index de6662b14e1f..7e38b729696a 100644
--- a/net/ceph/auth_x.c
+++ b/net/ceph/auth_x.c
@@ -149,6 +149,7 @@ static int process_one_ticket(struct ceph_auth_client *ac,
149 struct ceph_crypto_key old_key; 149 struct ceph_crypto_key old_key;
150 void *ticket_buf = NULL; 150 void *ticket_buf = NULL;
151 void *tp, *tpend; 151 void *tp, *tpend;
152 void **ptp;
152 struct ceph_timespec new_validity; 153 struct ceph_timespec new_validity;
153 struct ceph_crypto_key new_session_key; 154 struct ceph_crypto_key new_session_key;
154 struct ceph_buffer *new_ticket_blob; 155 struct ceph_buffer *new_ticket_blob;
@@ -208,25 +209,19 @@ static int process_one_ticket(struct ceph_auth_client *ac,
208 goto out; 209 goto out;
209 } 210 }
210 tp = ticket_buf; 211 tp = ticket_buf;
211 dlen = ceph_decode_32(&tp); 212 ptp = &tp;
213 tpend = *ptp + dlen;
212 } else { 214 } else {
213 /* unencrypted */ 215 /* unencrypted */
214 ceph_decode_32_safe(p, end, dlen, bad); 216 ptp = p;
215 ticket_buf = kmalloc(dlen, GFP_NOFS); 217 tpend = end;
216 if (!ticket_buf) {
217 ret = -ENOMEM;
218 goto out;
219 }
220 tp = ticket_buf;
221 ceph_decode_need(p, end, dlen, bad);
222 ceph_decode_copy(p, ticket_buf, dlen);
223 } 218 }
224 tpend = tp + dlen; 219 ceph_decode_32_safe(ptp, tpend, dlen, bad);
225 dout(" ticket blob is %d bytes\n", dlen); 220 dout(" ticket blob is %d bytes\n", dlen);
226 ceph_decode_need(&tp, tpend, 1 + sizeof(u64), bad); 221 ceph_decode_need(ptp, tpend, 1 + sizeof(u64), bad);
227 blob_struct_v = ceph_decode_8(&tp); 222 blob_struct_v = ceph_decode_8(ptp);
228 new_secret_id = ceph_decode_64(&tp); 223 new_secret_id = ceph_decode_64(ptp);
229 ret = ceph_decode_buffer(&new_ticket_blob, &tp, tpend); 224 ret = ceph_decode_buffer(&new_ticket_blob, ptp, tpend);
230 if (ret) 225 if (ret)
231 goto out; 226 goto out;
232 227
diff --git a/net/ceph/crypto.c b/net/ceph/crypto.c
index 62fc5e7a9acf..790fe89d90c0 100644
--- a/net/ceph/crypto.c
+++ b/net/ceph/crypto.c
@@ -90,11 +90,82 @@ static struct crypto_blkcipher *ceph_crypto_alloc_cipher(void)
90 90
91static const u8 *aes_iv = (u8 *)CEPH_AES_IV; 91static const u8 *aes_iv = (u8 *)CEPH_AES_IV;
92 92
93/*
94 * Should be used for buffers allocated with ceph_kvmalloc().
95 * Currently these are encrypt out-buffer (ceph_buffer) and decrypt
96 * in-buffer (msg front).
97 *
98 * Dispose of @sgt with teardown_sgtable().
99 *
100 * @prealloc_sg is to avoid memory allocation inside sg_alloc_table()
101 * in cases where a single sg is sufficient. No attempt to reduce the
102 * number of sgs by squeezing physically contiguous pages together is
103 * made though, for simplicity.
104 */
105static int setup_sgtable(struct sg_table *sgt, struct scatterlist *prealloc_sg,
106 const void *buf, unsigned int buf_len)
107{
108 struct scatterlist *sg;
109 const bool is_vmalloc = is_vmalloc_addr(buf);
110 unsigned int off = offset_in_page(buf);
111 unsigned int chunk_cnt = 1;
112 unsigned int chunk_len = PAGE_ALIGN(off + buf_len);
113 int i;
114 int ret;
115
116 if (buf_len == 0) {
117 memset(sgt, 0, sizeof(*sgt));
118 return -EINVAL;
119 }
120
121 if (is_vmalloc) {
122 chunk_cnt = chunk_len >> PAGE_SHIFT;
123 chunk_len = PAGE_SIZE;
124 }
125
126 if (chunk_cnt > 1) {
127 ret = sg_alloc_table(sgt, chunk_cnt, GFP_NOFS);
128 if (ret)
129 return ret;
130 } else {
131 WARN_ON(chunk_cnt != 1);
132 sg_init_table(prealloc_sg, 1);
133 sgt->sgl = prealloc_sg;
134 sgt->nents = sgt->orig_nents = 1;
135 }
136
137 for_each_sg(sgt->sgl, sg, sgt->orig_nents, i) {
138 struct page *page;
139 unsigned int len = min(chunk_len - off, buf_len);
140
141 if (is_vmalloc)
142 page = vmalloc_to_page(buf);
143 else
144 page = virt_to_page(buf);
145
146 sg_set_page(sg, page, len, off);
147
148 off = 0;
149 buf += len;
150 buf_len -= len;
151 }
152 WARN_ON(buf_len != 0);
153
154 return 0;
155}
156
157static void teardown_sgtable(struct sg_table *sgt)
158{
159 if (sgt->orig_nents > 1)
160 sg_free_table(sgt);
161}
162
93static int ceph_aes_encrypt(const void *key, int key_len, 163static int ceph_aes_encrypt(const void *key, int key_len,
94 void *dst, size_t *dst_len, 164 void *dst, size_t *dst_len,
95 const void *src, size_t src_len) 165 const void *src, size_t src_len)
96{ 166{
97 struct scatterlist sg_in[2], sg_out[1]; 167 struct scatterlist sg_in[2], prealloc_sg;
168 struct sg_table sg_out;
98 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); 169 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
99 struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; 170 struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
100 int ret; 171 int ret;
@@ -110,16 +181,18 @@ static int ceph_aes_encrypt(const void *key, int key_len,
110 181
111 *dst_len = src_len + zero_padding; 182 *dst_len = src_len + zero_padding;
112 183
113 crypto_blkcipher_setkey((void *)tfm, key, key_len);
114 sg_init_table(sg_in, 2); 184 sg_init_table(sg_in, 2);
115 sg_set_buf(&sg_in[0], src, src_len); 185 sg_set_buf(&sg_in[0], src, src_len);
116 sg_set_buf(&sg_in[1], pad, zero_padding); 186 sg_set_buf(&sg_in[1], pad, zero_padding);
117 sg_init_table(sg_out, 1); 187 ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
118 sg_set_buf(sg_out, dst, *dst_len); 188 if (ret)
189 goto out_tfm;
190
191 crypto_blkcipher_setkey((void *)tfm, key, key_len);
119 iv = crypto_blkcipher_crt(tfm)->iv; 192 iv = crypto_blkcipher_crt(tfm)->iv;
120 ivsize = crypto_blkcipher_ivsize(tfm); 193 ivsize = crypto_blkcipher_ivsize(tfm);
121
122 memcpy(iv, aes_iv, ivsize); 194 memcpy(iv, aes_iv, ivsize);
195
123 /* 196 /*
124 print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, 197 print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
125 key, key_len, 1); 198 key, key_len, 1);
@@ -128,16 +201,22 @@ static int ceph_aes_encrypt(const void *key, int key_len,
128 print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, 201 print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
129 pad, zero_padding, 1); 202 pad, zero_padding, 1);
130 */ 203 */
131 ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, 204 ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
132 src_len + zero_padding); 205 src_len + zero_padding);
133 crypto_free_blkcipher(tfm); 206 if (ret < 0) {
134 if (ret < 0)
135 pr_err("ceph_aes_crypt failed %d\n", ret); 207 pr_err("ceph_aes_crypt failed %d\n", ret);
208 goto out_sg;
209 }
136 /* 210 /*
137 print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, 211 print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
138 dst, *dst_len, 1); 212 dst, *dst_len, 1);
139 */ 213 */
140 return 0; 214
215out_sg:
216 teardown_sgtable(&sg_out);
217out_tfm:
218 crypto_free_blkcipher(tfm);
219 return ret;
141} 220}
142 221
143static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, 222static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
@@ -145,7 +224,8 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
145 const void *src1, size_t src1_len, 224 const void *src1, size_t src1_len,
146 const void *src2, size_t src2_len) 225 const void *src2, size_t src2_len)
147{ 226{
148 struct scatterlist sg_in[3], sg_out[1]; 227 struct scatterlist sg_in[3], prealloc_sg;
228 struct sg_table sg_out;
149 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); 229 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
150 struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; 230 struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
151 int ret; 231 int ret;
@@ -161,17 +241,19 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
161 241
162 *dst_len = src1_len + src2_len + zero_padding; 242 *dst_len = src1_len + src2_len + zero_padding;
163 243
164 crypto_blkcipher_setkey((void *)tfm, key, key_len);
165 sg_init_table(sg_in, 3); 244 sg_init_table(sg_in, 3);
166 sg_set_buf(&sg_in[0], src1, src1_len); 245 sg_set_buf(&sg_in[0], src1, src1_len);
167 sg_set_buf(&sg_in[1], src2, src2_len); 246 sg_set_buf(&sg_in[1], src2, src2_len);
168 sg_set_buf(&sg_in[2], pad, zero_padding); 247 sg_set_buf(&sg_in[2], pad, zero_padding);
169 sg_init_table(sg_out, 1); 248 ret = setup_sgtable(&sg_out, &prealloc_sg, dst, *dst_len);
170 sg_set_buf(sg_out, dst, *dst_len); 249 if (ret)
250 goto out_tfm;
251
252 crypto_blkcipher_setkey((void *)tfm, key, key_len);
171 iv = crypto_blkcipher_crt(tfm)->iv; 253 iv = crypto_blkcipher_crt(tfm)->iv;
172 ivsize = crypto_blkcipher_ivsize(tfm); 254 ivsize = crypto_blkcipher_ivsize(tfm);
173
174 memcpy(iv, aes_iv, ivsize); 255 memcpy(iv, aes_iv, ivsize);
256
175 /* 257 /*
176 print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, 258 print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
177 key, key_len, 1); 259 key, key_len, 1);
@@ -182,23 +264,30 @@ static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
182 print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, 264 print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
183 pad, zero_padding, 1); 265 pad, zero_padding, 1);
184 */ 266 */
185 ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, 267 ret = crypto_blkcipher_encrypt(&desc, sg_out.sgl, sg_in,
186 src1_len + src2_len + zero_padding); 268 src1_len + src2_len + zero_padding);
187 crypto_free_blkcipher(tfm); 269 if (ret < 0) {
188 if (ret < 0)
189 pr_err("ceph_aes_crypt2 failed %d\n", ret); 270 pr_err("ceph_aes_crypt2 failed %d\n", ret);
271 goto out_sg;
272 }
190 /* 273 /*
191 print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, 274 print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
192 dst, *dst_len, 1); 275 dst, *dst_len, 1);
193 */ 276 */
194 return 0; 277
278out_sg:
279 teardown_sgtable(&sg_out);
280out_tfm:
281 crypto_free_blkcipher(tfm);
282 return ret;
195} 283}
196 284
197static int ceph_aes_decrypt(const void *key, int key_len, 285static int ceph_aes_decrypt(const void *key, int key_len,
198 void *dst, size_t *dst_len, 286 void *dst, size_t *dst_len,
199 const void *src, size_t src_len) 287 const void *src, size_t src_len)
200{ 288{
201 struct scatterlist sg_in[1], sg_out[2]; 289 struct sg_table sg_in;
290 struct scatterlist sg_out[2], prealloc_sg;
202 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); 291 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
203 struct blkcipher_desc desc = { .tfm = tfm }; 292 struct blkcipher_desc desc = { .tfm = tfm };
204 char pad[16]; 293 char pad[16];
@@ -210,16 +299,16 @@ static int ceph_aes_decrypt(const void *key, int key_len,
210 if (IS_ERR(tfm)) 299 if (IS_ERR(tfm))
211 return PTR_ERR(tfm); 300 return PTR_ERR(tfm);
212 301
213 crypto_blkcipher_setkey((void *)tfm, key, key_len);
214 sg_init_table(sg_in, 1);
215 sg_init_table(sg_out, 2); 302 sg_init_table(sg_out, 2);
216 sg_set_buf(sg_in, src, src_len);
217 sg_set_buf(&sg_out[0], dst, *dst_len); 303 sg_set_buf(&sg_out[0], dst, *dst_len);
218 sg_set_buf(&sg_out[1], pad, sizeof(pad)); 304 sg_set_buf(&sg_out[1], pad, sizeof(pad));
305 ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
306 if (ret)
307 goto out_tfm;
219 308
309 crypto_blkcipher_setkey((void *)tfm, key, key_len);
220 iv = crypto_blkcipher_crt(tfm)->iv; 310 iv = crypto_blkcipher_crt(tfm)->iv;
221 ivsize = crypto_blkcipher_ivsize(tfm); 311 ivsize = crypto_blkcipher_ivsize(tfm);
222
223 memcpy(iv, aes_iv, ivsize); 312 memcpy(iv, aes_iv, ivsize);
224 313
225 /* 314 /*
@@ -228,12 +317,10 @@ static int ceph_aes_decrypt(const void *key, int key_len,
228 print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, 317 print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
229 src, src_len, 1); 318 src, src_len, 1);
230 */ 319 */
231 320 ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
232 ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
233 crypto_free_blkcipher(tfm);
234 if (ret < 0) { 321 if (ret < 0) {
235 pr_err("ceph_aes_decrypt failed %d\n", ret); 322 pr_err("ceph_aes_decrypt failed %d\n", ret);
236 return ret; 323 goto out_sg;
237 } 324 }
238 325
239 if (src_len <= *dst_len) 326 if (src_len <= *dst_len)
@@ -251,7 +338,12 @@ static int ceph_aes_decrypt(const void *key, int key_len,
251 print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1, 338 print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
252 dst, *dst_len, 1); 339 dst, *dst_len, 1);
253 */ 340 */
254 return 0; 341
342out_sg:
343 teardown_sgtable(&sg_in);
344out_tfm:
345 crypto_free_blkcipher(tfm);
346 return ret;
255} 347}
256 348
257static int ceph_aes_decrypt2(const void *key, int key_len, 349static int ceph_aes_decrypt2(const void *key, int key_len,
@@ -259,7 +351,8 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
259 void *dst2, size_t *dst2_len, 351 void *dst2, size_t *dst2_len,
260 const void *src, size_t src_len) 352 const void *src, size_t src_len)
261{ 353{
262 struct scatterlist sg_in[1], sg_out[3]; 354 struct sg_table sg_in;
355 struct scatterlist sg_out[3], prealloc_sg;
263 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); 356 struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
264 struct blkcipher_desc desc = { .tfm = tfm }; 357 struct blkcipher_desc desc = { .tfm = tfm };
265 char pad[16]; 358 char pad[16];
@@ -271,17 +364,17 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
271 if (IS_ERR(tfm)) 364 if (IS_ERR(tfm))
272 return PTR_ERR(tfm); 365 return PTR_ERR(tfm);
273 366
274 sg_init_table(sg_in, 1);
275 sg_set_buf(sg_in, src, src_len);
276 sg_init_table(sg_out, 3); 367 sg_init_table(sg_out, 3);
277 sg_set_buf(&sg_out[0], dst1, *dst1_len); 368 sg_set_buf(&sg_out[0], dst1, *dst1_len);
278 sg_set_buf(&sg_out[1], dst2, *dst2_len); 369 sg_set_buf(&sg_out[1], dst2, *dst2_len);
279 sg_set_buf(&sg_out[2], pad, sizeof(pad)); 370 sg_set_buf(&sg_out[2], pad, sizeof(pad));
371 ret = setup_sgtable(&sg_in, &prealloc_sg, src, src_len);
372 if (ret)
373 goto out_tfm;
280 374
281 crypto_blkcipher_setkey((void *)tfm, key, key_len); 375 crypto_blkcipher_setkey((void *)tfm, key, key_len);
282 iv = crypto_blkcipher_crt(tfm)->iv; 376 iv = crypto_blkcipher_crt(tfm)->iv;
283 ivsize = crypto_blkcipher_ivsize(tfm); 377 ivsize = crypto_blkcipher_ivsize(tfm);
284
285 memcpy(iv, aes_iv, ivsize); 378 memcpy(iv, aes_iv, ivsize);
286 379
287 /* 380 /*
@@ -290,12 +383,10 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
290 print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, 383 print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1,
291 src, src_len, 1); 384 src, src_len, 1);
292 */ 385 */
293 386 ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in.sgl, src_len);
294 ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
295 crypto_free_blkcipher(tfm);
296 if (ret < 0) { 387 if (ret < 0) {
297 pr_err("ceph_aes_decrypt failed %d\n", ret); 388 pr_err("ceph_aes_decrypt failed %d\n", ret);
298 return ret; 389 goto out_sg;
299 } 390 }
300 391
301 if (src_len <= *dst1_len) 392 if (src_len <= *dst1_len)
@@ -325,7 +416,11 @@ static int ceph_aes_decrypt2(const void *key, int key_len,
325 dst2, *dst2_len, 1); 416 dst2, *dst2_len, 1);
326 */ 417 */
327 418
328 return 0; 419out_sg:
420 teardown_sgtable(&sg_in);
421out_tfm:
422 crypto_free_blkcipher(tfm);
423 return ret;
329} 424}
330 425
331 426
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 559c9f619c20..8d1653caffdb 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -484,7 +484,7 @@ static int ceph_tcp_connect(struct ceph_connection *con)
484 IPPROTO_TCP, &sock); 484 IPPROTO_TCP, &sock);
485 if (ret) 485 if (ret)
486 return ret; 486 return ret;
487 sock->sk->sk_allocation = GFP_NOFS; 487 sock->sk->sk_allocation = GFP_NOFS | __GFP_MEMALLOC;
488 488
489#ifdef CONFIG_LOCKDEP 489#ifdef CONFIG_LOCKDEP
490 lockdep_set_class(&sock->sk->sk_lock, &socket_class); 490 lockdep_set_class(&sock->sk->sk_lock, &socket_class);
@@ -509,6 +509,9 @@ static int ceph_tcp_connect(struct ceph_connection *con)
509 509
510 return ret; 510 return ret;
511 } 511 }
512
513 sk_set_memalloc(sock->sk);
514
512 con->sock = sock; 515 con->sock = sock;
513 return 0; 516 return 0;
514} 517}
@@ -2769,8 +2772,11 @@ static void con_work(struct work_struct *work)
2769{ 2772{
2770 struct ceph_connection *con = container_of(work, struct ceph_connection, 2773 struct ceph_connection *con = container_of(work, struct ceph_connection,
2771 work.work); 2774 work.work);
2775 unsigned long pflags = current->flags;
2772 bool fault; 2776 bool fault;
2773 2777
2778 current->flags |= PF_MEMALLOC;
2779
2774 mutex_lock(&con->mutex); 2780 mutex_lock(&con->mutex);
2775 while (true) { 2781 while (true) {
2776 int ret; 2782 int ret;
@@ -2824,6 +2830,8 @@ static void con_work(struct work_struct *work)
2824 con_fault_finish(con); 2830 con_fault_finish(con);
2825 2831
2826 con->ops->put(con); 2832 con->ops->put(con);
2833
2834 tsk_restore_flags(current, pflags, PF_MEMALLOC);
2827} 2835}
2828 2836
2829/* 2837/*
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index f3fc54eac09d..6f164289bde8 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -1007,8 +1007,8 @@ static void put_osd(struct ceph_osd *osd)
1007static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd) 1007static void __remove_osd(struct ceph_osd_client *osdc, struct ceph_osd *osd)
1008{ 1008{
1009 dout("__remove_osd %p\n", osd); 1009 dout("__remove_osd %p\n", osd);
1010 BUG_ON(!list_empty(&osd->o_requests)); 1010 WARN_ON(!list_empty(&osd->o_requests));
1011 BUG_ON(!list_empty(&osd->o_linger_requests)); 1011 WARN_ON(!list_empty(&osd->o_linger_requests));
1012 1012
1013 rb_erase(&osd->o_node, &osdc->osds); 1013 rb_erase(&osd->o_node, &osdc->osds);
1014 list_del_init(&osd->o_osd_lru); 1014 list_del_init(&osd->o_osd_lru);
@@ -1254,6 +1254,8 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
1254 if (list_empty(&req->r_osd_item)) 1254 if (list_empty(&req->r_osd_item))
1255 req->r_osd = NULL; 1255 req->r_osd = NULL;
1256 } 1256 }
1257
1258 list_del_init(&req->r_req_lru_item); /* can be on notarget */
1257 ceph_osdc_put_request(req); 1259 ceph_osdc_put_request(req);
1258} 1260}
1259 1261
@@ -1395,6 +1397,7 @@ static int __map_request(struct ceph_osd_client *osdc,
1395 if (req->r_osd) { 1397 if (req->r_osd) {
1396 __cancel_request(req); 1398 __cancel_request(req);
1397 list_del_init(&req->r_osd_item); 1399 list_del_init(&req->r_osd_item);
1400 list_del_init(&req->r_linger_osd_item);
1398 req->r_osd = NULL; 1401 req->r_osd = NULL;
1399 } 1402 }
1400 1403
diff --git a/net/core/dev.c b/net/core/dev.c
index b793e3521a36..945bbd001359 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -4157,6 +4157,10 @@ EXPORT_SYMBOL(napi_gro_receive);
4157 4157
4158static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb) 4158static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
4159{ 4159{
4160 if (unlikely(skb->pfmemalloc)) {
4161 consume_skb(skb);
4162 return;
4163 }
4160 __skb_pull(skb, skb_headlen(skb)); 4164 __skb_pull(skb, skb_headlen(skb));
4161 /* restore the reserve we had after netdev_alloc_skb_ip_align() */ 4165 /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4162 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb)); 4166 skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
diff --git a/net/core/ethtool.c b/net/core/ethtool.c
index 1600aa24d36b..06dfb293e5aa 100644
--- a/net/core/ethtool.c
+++ b/net/core/ethtool.c
@@ -1036,7 +1036,8 @@ static int ethtool_get_eeprom(struct net_device *dev, void __user *useraddr)
1036{ 1036{
1037 const struct ethtool_ops *ops = dev->ethtool_ops; 1037 const struct ethtool_ops *ops = dev->ethtool_ops;
1038 1038
1039 if (!ops->get_eeprom || !ops->get_eeprom_len) 1039 if (!ops->get_eeprom || !ops->get_eeprom_len ||
1040 !ops->get_eeprom_len(dev))
1040 return -EOPNOTSUPP; 1041 return -EOPNOTSUPP;
1041 1042
1042 return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom, 1043 return ethtool_get_any_eeprom(dev, useraddr, ops->get_eeprom,
@@ -1052,7 +1053,8 @@ static int ethtool_set_eeprom(struct net_device *dev, void __user *useraddr)
1052 u8 *data; 1053 u8 *data;
1053 int ret = 0; 1054 int ret = 0;
1054 1055
1055 if (!ops->set_eeprom || !ops->get_eeprom_len) 1056 if (!ops->set_eeprom || !ops->get_eeprom_len ||
1057 !ops->get_eeprom_len(dev))
1056 return -EOPNOTSUPP; 1058 return -EOPNOTSUPP;
1057 1059
1058 if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) 1060 if (copy_from_user(&eeprom, useraddr, sizeof(eeprom)))
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index a6882686ca3a..76321ea442c3 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1498,6 +1498,7 @@ static int do_setlink(const struct sk_buff *skb,
1498 goto errout; 1498 goto errout;
1499 } 1499 }
1500 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) { 1500 if (!netlink_ns_capable(skb, net->user_ns, CAP_NET_ADMIN)) {
1501 put_net(net);
1501 err = -EPERM; 1502 err = -EPERM;
1502 goto errout; 1503 goto errout;
1503 } 1504 }
@@ -2685,13 +2686,20 @@ static int rtnl_bridge_getlink(struct sk_buff *skb, struct netlink_callback *cb)
2685 int idx = 0; 2686 int idx = 0;
2686 u32 portid = NETLINK_CB(cb->skb).portid; 2687 u32 portid = NETLINK_CB(cb->skb).portid;
2687 u32 seq = cb->nlh->nlmsg_seq; 2688 u32 seq = cb->nlh->nlmsg_seq;
2688 struct nlattr *extfilt;
2689 u32 filter_mask = 0; 2689 u32 filter_mask = 0;
2690 2690
2691 extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg), 2691 if (nlmsg_len(cb->nlh) > sizeof(struct ifinfomsg)) {
2692 IFLA_EXT_MASK); 2692 struct nlattr *extfilt;
2693 if (extfilt) 2693
2694 filter_mask = nla_get_u32(extfilt); 2694 extfilt = nlmsg_find_attr(cb->nlh, sizeof(struct ifinfomsg),
2695 IFLA_EXT_MASK);
2696 if (extfilt) {
2697 if (nla_len(extfilt) < sizeof(filter_mask))
2698 return -EINVAL;
2699
2700 filter_mask = nla_get_u32(extfilt);
2701 }
2702 }
2695 2703
2696 rcu_read_lock(); 2704 rcu_read_lock();
2697 for_each_netdev_rcu(net, dev) { 2705 for_each_netdev_rcu(net, dev) {
@@ -2798,6 +2806,9 @@ static int rtnl_bridge_setlink(struct sk_buff *skb, struct nlmsghdr *nlh)
2798 if (br_spec) { 2806 if (br_spec) {
2799 nla_for_each_nested(attr, br_spec, rem) { 2807 nla_for_each_nested(attr, br_spec, rem) {
2800 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { 2808 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
2809 if (nla_len(attr) < sizeof(flags))
2810 return -EINVAL;
2811
2801 have_flags = true; 2812 have_flags = true;
2802 flags = nla_get_u16(attr); 2813 flags = nla_get_u16(attr);
2803 break; 2814 break;
@@ -2868,6 +2879,9 @@ static int rtnl_bridge_dellink(struct sk_buff *skb, struct nlmsghdr *nlh)
2868 if (br_spec) { 2879 if (br_spec) {
2869 nla_for_each_nested(attr, br_spec, rem) { 2880 nla_for_each_nested(attr, br_spec, rem) {
2870 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) { 2881 if (nla_type(attr) == IFLA_BRIDGE_FLAGS) {
2882 if (nla_len(attr) < sizeof(flags))
2883 return -EINVAL;
2884
2871 have_flags = true; 2885 have_flags = true;
2872 flags = nla_get_u16(attr); 2886 flags = nla_get_u16(attr);
2873 break; 2887 break;
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 61059a05ec95..32e31c299631 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -552,20 +552,13 @@ static void kfree_skbmem(struct sk_buff *skb)
552 case SKB_FCLONE_CLONE: 552 case SKB_FCLONE_CLONE:
553 fclones = container_of(skb, struct sk_buff_fclones, skb2); 553 fclones = container_of(skb, struct sk_buff_fclones, skb2);
554 554
555 /* Warning : We must perform the atomic_dec_and_test() before 555 /* The clone portion is available for
556 * setting skb->fclone back to SKB_FCLONE_FREE, otherwise 556 * fast-cloning again.
557 * skb_clone() could set clone_ref to 2 before our decrement.
558 * Anyway, if we are going to free the structure, no need to
559 * rewrite skb->fclone.
560 */ 557 */
561 if (atomic_dec_and_test(&fclones->fclone_ref)) { 558 skb->fclone = SKB_FCLONE_FREE;
559
560 if (atomic_dec_and_test(&fclones->fclone_ref))
562 kmem_cache_free(skbuff_fclone_cache, fclones); 561 kmem_cache_free(skbuff_fclone_cache, fclones);
563 } else {
564 /* The clone portion is available for
565 * fast-cloning again.
566 */
567 skb->fclone = SKB_FCLONE_FREE;
568 }
569 break; 562 break;
570 } 563 }
571} 564}
@@ -887,11 +880,7 @@ struct sk_buff *skb_clone(struct sk_buff *skb, gfp_t gfp_mask)
887 if (skb->fclone == SKB_FCLONE_ORIG && 880 if (skb->fclone == SKB_FCLONE_ORIG &&
888 n->fclone == SKB_FCLONE_FREE) { 881 n->fclone == SKB_FCLONE_FREE) {
889 n->fclone = SKB_FCLONE_CLONE; 882 n->fclone = SKB_FCLONE_CLONE;
890 /* As our fastclone was free, clone_ref must be 1 at this point. 883 atomic_inc(&fclones->fclone_ref);
891 * We could use atomic_inc() here, but it is faster
892 * to set the final value.
893 */
894 atomic_set(&fclones->fclone_ref, 2);
895 } else { 884 } else {
896 if (skb_pfmemalloc(skb)) 885 if (skb_pfmemalloc(skb))
897 gfp_mask |= __GFP_MEMALLOC; 886 gfp_mask |= __GFP_MEMALLOC;
@@ -4070,15 +4059,22 @@ EXPORT_SYMBOL_GPL(skb_scrub_packet);
4070unsigned int skb_gso_transport_seglen(const struct sk_buff *skb) 4059unsigned int skb_gso_transport_seglen(const struct sk_buff *skb)
4071{ 4060{
4072 const struct skb_shared_info *shinfo = skb_shinfo(skb); 4061 const struct skb_shared_info *shinfo = skb_shinfo(skb);
4062 unsigned int thlen = 0;
4073 4063
4074 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) 4064 if (skb->encapsulation) {
4075 return tcp_hdrlen(skb) + shinfo->gso_size; 4065 thlen = skb_inner_transport_header(skb) -
4066 skb_transport_header(skb);
4076 4067
4068 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
4069 thlen += inner_tcp_hdrlen(skb);
4070 } else if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))) {
4071 thlen = tcp_hdrlen(skb);
4072 }
4077 /* UFO sets gso_size to the size of the fragmentation 4073 /* UFO sets gso_size to the size of the fragmentation
4078 * payload, i.e. the size of the L4 (UDP) header is already 4074 * payload, i.e. the size of the L4 (UDP) header is already
4079 * accounted for. 4075 * accounted for.
4080 */ 4076 */
4081 return shinfo->gso_size; 4077 return thlen + shinfo->gso_size;
4082} 4078}
4083EXPORT_SYMBOL_GPL(skb_gso_transport_seglen); 4079EXPORT_SYMBOL_GPL(skb_gso_transport_seglen);
4084 4080
diff --git a/net/core/tso.c b/net/core/tso.c
index 8c3203c585b0..630b30b4fb53 100644
--- a/net/core/tso.c
+++ b/net/core/tso.c
@@ -1,6 +1,7 @@
1#include <linux/export.h> 1#include <linux/export.h>
2#include <net/ip.h> 2#include <net/ip.h>
3#include <net/tso.h> 3#include <net/tso.h>
4#include <asm/unaligned.h>
4 5
5/* Calculate expected number of TX descriptors */ 6/* Calculate expected number of TX descriptors */
6int tso_count_descs(struct sk_buff *skb) 7int tso_count_descs(struct sk_buff *skb)
@@ -23,7 +24,7 @@ void tso_build_hdr(struct sk_buff *skb, char *hdr, struct tso_t *tso,
23 iph->id = htons(tso->ip_id); 24 iph->id = htons(tso->ip_id);
24 iph->tot_len = htons(size + hdr_len - mac_hdr_len); 25 iph->tot_len = htons(size + hdr_len - mac_hdr_len);
25 tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb)); 26 tcph = (struct tcphdr *)(hdr + skb_transport_offset(skb));
26 tcph->seq = htonl(tso->tcp_seq); 27 put_unaligned_be32(tso->tcp_seq, &tcph->seq);
27 tso->ip_id++; 28 tso->ip_id++;
28 29
29 if (!is_last) { 30 if (!is_last) {
diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c
index ca11d283bbeb..93ea80196f0e 100644
--- a/net/dcb/dcbnl.c
+++ b/net/dcb/dcbnl.c
@@ -1080,13 +1080,13 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1080 if (!app) 1080 if (!app)
1081 return -EMSGSIZE; 1081 return -EMSGSIZE;
1082 1082
1083 spin_lock(&dcb_lock); 1083 spin_lock_bh(&dcb_lock);
1084 list_for_each_entry(itr, &dcb_app_list, list) { 1084 list_for_each_entry(itr, &dcb_app_list, list) {
1085 if (itr->ifindex == netdev->ifindex) { 1085 if (itr->ifindex == netdev->ifindex) {
1086 err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app), 1086 err = nla_put(skb, DCB_ATTR_IEEE_APP, sizeof(itr->app),
1087 &itr->app); 1087 &itr->app);
1088 if (err) { 1088 if (err) {
1089 spin_unlock(&dcb_lock); 1089 spin_unlock_bh(&dcb_lock);
1090 return -EMSGSIZE; 1090 return -EMSGSIZE;
1091 } 1091 }
1092 } 1092 }
@@ -1097,7 +1097,7 @@ static int dcbnl_ieee_fill(struct sk_buff *skb, struct net_device *netdev)
1097 else 1097 else
1098 dcbx = -EOPNOTSUPP; 1098 dcbx = -EOPNOTSUPP;
1099 1099
1100 spin_unlock(&dcb_lock); 1100 spin_unlock_bh(&dcb_lock);
1101 nla_nest_end(skb, app); 1101 nla_nest_end(skb, app);
1102 1102
1103 /* get peer info if available */ 1103 /* get peer info if available */
@@ -1234,7 +1234,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1234 } 1234 }
1235 1235
1236 /* local app */ 1236 /* local app */
1237 spin_lock(&dcb_lock); 1237 spin_lock_bh(&dcb_lock);
1238 app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE); 1238 app = nla_nest_start(skb, DCB_ATTR_CEE_APP_TABLE);
1239 if (!app) 1239 if (!app)
1240 goto dcb_unlock; 1240 goto dcb_unlock;
@@ -1271,7 +1271,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1271 else 1271 else
1272 dcbx = -EOPNOTSUPP; 1272 dcbx = -EOPNOTSUPP;
1273 1273
1274 spin_unlock(&dcb_lock); 1274 spin_unlock_bh(&dcb_lock);
1275 1275
1276 /* features flags */ 1276 /* features flags */
1277 if (ops->getfeatcfg) { 1277 if (ops->getfeatcfg) {
@@ -1326,7 +1326,7 @@ static int dcbnl_cee_fill(struct sk_buff *skb, struct net_device *netdev)
1326 return 0; 1326 return 0;
1327 1327
1328dcb_unlock: 1328dcb_unlock:
1329 spin_unlock(&dcb_lock); 1329 spin_unlock_bh(&dcb_lock);
1330nla_put_failure: 1330nla_put_failure:
1331 return err; 1331 return err;
1332} 1332}
@@ -1762,10 +1762,10 @@ u8 dcb_getapp(struct net_device *dev, struct dcb_app *app)
1762 struct dcb_app_type *itr; 1762 struct dcb_app_type *itr;
1763 u8 prio = 0; 1763 u8 prio = 0;
1764 1764
1765 spin_lock(&dcb_lock); 1765 spin_lock_bh(&dcb_lock);
1766 if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) 1766 if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
1767 prio = itr->app.priority; 1767 prio = itr->app.priority;
1768 spin_unlock(&dcb_lock); 1768 spin_unlock_bh(&dcb_lock);
1769 1769
1770 return prio; 1770 return prio;
1771} 1771}
@@ -1789,7 +1789,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
1789 if (dev->dcbnl_ops->getdcbx) 1789 if (dev->dcbnl_ops->getdcbx)
1790 event.dcbx = dev->dcbnl_ops->getdcbx(dev); 1790 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1791 1791
1792 spin_lock(&dcb_lock); 1792 spin_lock_bh(&dcb_lock);
1793 /* Search for existing match and replace */ 1793 /* Search for existing match and replace */
1794 if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) { 1794 if ((itr = dcb_app_lookup(new, dev->ifindex, 0))) {
1795 if (new->priority) 1795 if (new->priority)
@@ -1804,7 +1804,7 @@ int dcb_setapp(struct net_device *dev, struct dcb_app *new)
1804 if (new->priority) 1804 if (new->priority)
1805 err = dcb_app_add(new, dev->ifindex); 1805 err = dcb_app_add(new, dev->ifindex);
1806out: 1806out:
1807 spin_unlock(&dcb_lock); 1807 spin_unlock_bh(&dcb_lock);
1808 if (!err) 1808 if (!err)
1809 call_dcbevent_notifiers(DCB_APP_EVENT, &event); 1809 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1810 return err; 1810 return err;
@@ -1823,10 +1823,10 @@ u8 dcb_ieee_getapp_mask(struct net_device *dev, struct dcb_app *app)
1823 struct dcb_app_type *itr; 1823 struct dcb_app_type *itr;
1824 u8 prio = 0; 1824 u8 prio = 0;
1825 1825
1826 spin_lock(&dcb_lock); 1826 spin_lock_bh(&dcb_lock);
1827 if ((itr = dcb_app_lookup(app, dev->ifindex, 0))) 1827 if ((itr = dcb_app_lookup(app, dev->ifindex, 0)))
1828 prio |= 1 << itr->app.priority; 1828 prio |= 1 << itr->app.priority;
1829 spin_unlock(&dcb_lock); 1829 spin_unlock_bh(&dcb_lock);
1830 1830
1831 return prio; 1831 return prio;
1832} 1832}
@@ -1850,7 +1850,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
1850 if (dev->dcbnl_ops->getdcbx) 1850 if (dev->dcbnl_ops->getdcbx)
1851 event.dcbx = dev->dcbnl_ops->getdcbx(dev); 1851 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1852 1852
1853 spin_lock(&dcb_lock); 1853 spin_lock_bh(&dcb_lock);
1854 /* Search for existing match and abort if found */ 1854 /* Search for existing match and abort if found */
1855 if (dcb_app_lookup(new, dev->ifindex, new->priority)) { 1855 if (dcb_app_lookup(new, dev->ifindex, new->priority)) {
1856 err = -EEXIST; 1856 err = -EEXIST;
@@ -1859,7 +1859,7 @@ int dcb_ieee_setapp(struct net_device *dev, struct dcb_app *new)
1859 1859
1860 err = dcb_app_add(new, dev->ifindex); 1860 err = dcb_app_add(new, dev->ifindex);
1861out: 1861out:
1862 spin_unlock(&dcb_lock); 1862 spin_unlock_bh(&dcb_lock);
1863 if (!err) 1863 if (!err)
1864 call_dcbevent_notifiers(DCB_APP_EVENT, &event); 1864 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1865 return err; 1865 return err;
@@ -1882,7 +1882,7 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
1882 if (dev->dcbnl_ops->getdcbx) 1882 if (dev->dcbnl_ops->getdcbx)
1883 event.dcbx = dev->dcbnl_ops->getdcbx(dev); 1883 event.dcbx = dev->dcbnl_ops->getdcbx(dev);
1884 1884
1885 spin_lock(&dcb_lock); 1885 spin_lock_bh(&dcb_lock);
1886 /* Search for existing match and remove it. */ 1886 /* Search for existing match and remove it. */
1887 if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) { 1887 if ((itr = dcb_app_lookup(del, dev->ifindex, del->priority))) {
1888 list_del(&itr->list); 1888 list_del(&itr->list);
@@ -1890,7 +1890,7 @@ int dcb_ieee_delapp(struct net_device *dev, struct dcb_app *del)
1890 err = 0; 1890 err = 0;
1891 } 1891 }
1892 1892
1893 spin_unlock(&dcb_lock); 1893 spin_unlock_bh(&dcb_lock);
1894 if (!err) 1894 if (!err)
1895 call_dcbevent_notifiers(DCB_APP_EVENT, &event); 1895 call_dcbevent_notifiers(DCB_APP_EVENT, &event);
1896 return err; 1896 return err;
@@ -1902,12 +1902,12 @@ static void dcb_flushapp(void)
1902 struct dcb_app_type *app; 1902 struct dcb_app_type *app;
1903 struct dcb_app_type *tmp; 1903 struct dcb_app_type *tmp;
1904 1904
1905 spin_lock(&dcb_lock); 1905 spin_lock_bh(&dcb_lock);
1906 list_for_each_entry_safe(app, tmp, &dcb_app_list, list) { 1906 list_for_each_entry_safe(app, tmp, &dcb_app_list, list) {
1907 list_del(&app->list); 1907 list_del(&app->list);
1908 kfree(app); 1908 kfree(app);
1909 } 1909 }
1910 spin_unlock(&dcb_lock); 1910 spin_unlock_bh(&dcb_lock);
1911} 1911}
1912 1912
1913static int __init dcbnl_init(void) 1913static int __init dcbnl_init(void)
diff --git a/net/dsa/dsa.c b/net/dsa/dsa.c
index 22f34cf4cb27..6317b41c99b0 100644
--- a/net/dsa/dsa.c
+++ b/net/dsa/dsa.c
@@ -174,8 +174,11 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
174 dst->rcv = brcm_netdev_ops.rcv; 174 dst->rcv = brcm_netdev_ops.rcv;
175 break; 175 break;
176#endif 176#endif
177 default: 177 case DSA_TAG_PROTO_NONE:
178 break; 178 break;
179 default:
180 ret = -ENOPROTOOPT;
181 goto out;
179 } 182 }
180 183
181 dst->tag_protocol = drv->tag_protocol; 184 dst->tag_protocol = drv->tag_protocol;
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 6d1817449c36..ab03e00ffe8f 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -489,11 +489,14 @@ static void dsa_slave_phy_setup(struct dsa_slave_priv *p,
489 /* We could not connect to a designated PHY, so use the switch internal 489 /* We could not connect to a designated PHY, so use the switch internal
490 * MDIO bus instead 490 * MDIO bus instead
491 */ 491 */
492 if (!p->phy) 492 if (!p->phy) {
493 p->phy = ds->slave_mii_bus->phy_map[p->port]; 493 p->phy = ds->slave_mii_bus->phy_map[p->port];
494 else 494 phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
495 p->phy_interface);
496 } else {
495 pr_info("attached PHY at address %d [%s]\n", 497 pr_info("attached PHY at address %d [%s]\n",
496 p->phy->addr, p->phy->drv->name); 498 p->phy->addr, p->phy->drv->name);
499 }
497} 500}
498 501
499int dsa_slave_suspend(struct net_device *slave_dev) 502int dsa_slave_suspend(struct net_device *slave_dev)
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 92db7a69f2b9..e67da4e6c324 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1246,7 +1246,7 @@ static struct sk_buff *inet_gso_segment(struct sk_buff *skb,
1246 1246
1247 encap = SKB_GSO_CB(skb)->encap_level > 0; 1247 encap = SKB_GSO_CB(skb)->encap_level > 0;
1248 if (encap) 1248 if (encap)
1249 features = skb->dev->hw_enc_features & netif_skb_features(skb); 1249 features &= skb->dev->hw_enc_features;
1250 SKB_GSO_CB(skb)->encap_level += ihl; 1250 SKB_GSO_CB(skb)->encap_level += ihl;
1251 1251
1252 skb_reset_transport_header(skb); 1252 skb_reset_transport_header(skb);
@@ -1386,6 +1386,17 @@ out:
1386 return pp; 1386 return pp;
1387} 1387}
1388 1388
1389int inet_recv_error(struct sock *sk, struct msghdr *msg, int len, int *addr_len)
1390{
1391 if (sk->sk_family == AF_INET)
1392 return ip_recv_error(sk, msg, len, addr_len);
1393#if IS_ENABLED(CONFIG_IPV6)
1394 if (sk->sk_family == AF_INET6)
1395 return pingv6_ops.ipv6_recv_error(sk, msg, len, addr_len);
1396#endif
1397 return -EINVAL;
1398}
1399
1389static int inet_gro_complete(struct sk_buff *skb, int nhoff) 1400static int inet_gro_complete(struct sk_buff *skb, int nhoff)
1390{ 1401{
1391 __be16 newlen = htons(skb->len - nhoff); 1402 __be16 newlen = htons(skb->len - nhoff);
diff --git a/net/ipv4/fib_rules.c b/net/ipv4/fib_rules.c
index f2e15738534d..8f7bd56955b0 100644
--- a/net/ipv4/fib_rules.c
+++ b/net/ipv4/fib_rules.c
@@ -62,6 +62,10 @@ int __fib_lookup(struct net *net, struct flowi4 *flp, struct fib_result *res)
62 else 62 else
63 res->tclassid = 0; 63 res->tclassid = 0;
64#endif 64#endif
65
66 if (err == -ESRCH)
67 err = -ENETUNREACH;
68
65 return err; 69 return err;
66} 70}
67EXPORT_SYMBOL_GPL(__fib_lookup); 71EXPORT_SYMBOL_GPL(__fib_lookup);
diff --git a/net/ipv4/fou.c b/net/ipv4/fou.c
index 32e78924e246..606c520ffd5a 100644
--- a/net/ipv4/fou.c
+++ b/net/ipv4/fou.c
@@ -133,6 +133,8 @@ static int fou_gro_complete(struct sk_buff *skb, int nhoff)
133 int err = -ENOSYS; 133 int err = -ENOSYS;
134 const struct net_offload **offloads; 134 const struct net_offload **offloads;
135 135
136 udp_tunnel_gro_complete(skb, nhoff);
137
136 rcu_read_lock(); 138 rcu_read_lock();
137 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads; 139 offloads = NAPI_GRO_CB(skb)->is_ipv6 ? inet6_offloads : inet_offloads;
138 ops = rcu_dereference(offloads[proto]); 140 ops = rcu_dereference(offloads[proto]);
diff --git a/net/ipv4/geneve.c b/net/ipv4/geneve.c
index 065cd94c640c..dedb21e99914 100644
--- a/net/ipv4/geneve.c
+++ b/net/ipv4/geneve.c
@@ -144,6 +144,8 @@ int geneve_xmit_skb(struct geneve_sock *gs, struct rtable *rt,
144 gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len); 144 gnvh = (struct genevehdr *)__skb_push(skb, sizeof(*gnvh) + opt_len);
145 geneve_build_header(gnvh, tun_flags, vni, opt_len, opt); 145 geneve_build_header(gnvh, tun_flags, vni, opt_len, opt);
146 146
147 skb_set_inner_protocol(skb, htons(ETH_P_TEB));
148
147 return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst, 149 return udp_tunnel_xmit_skb(gs->sock, rt, skb, src, dst,
148 tos, ttl, df, src_port, dst_port, xnet); 150 tos, ttl, df, src_port, dst_port, xnet);
149} 151}
@@ -364,6 +366,7 @@ late_initcall(geneve_init_module);
364static void __exit geneve_cleanup_module(void) 366static void __exit geneve_cleanup_module(void)
365{ 367{
366 destroy_workqueue(geneve_wq); 368 destroy_workqueue(geneve_wq);
369 unregister_pernet_subsys(&geneve_net_ops);
367} 370}
368module_exit(geneve_cleanup_module); 371module_exit(geneve_cleanup_module);
369 372
diff --git a/net/ipv4/gre_offload.c b/net/ipv4/gre_offload.c
index ccda09628de7..bb5947b0ce2d 100644
--- a/net/ipv4/gre_offload.c
+++ b/net/ipv4/gre_offload.c
@@ -47,7 +47,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
47 47
48 greh = (struct gre_base_hdr *)skb_transport_header(skb); 48 greh = (struct gre_base_hdr *)skb_transport_header(skb);
49 49
50 ghl = skb_inner_network_header(skb) - skb_transport_header(skb); 50 ghl = skb_inner_mac_header(skb) - skb_transport_header(skb);
51 if (unlikely(ghl < sizeof(*greh))) 51 if (unlikely(ghl < sizeof(*greh)))
52 goto out; 52 goto out;
53 53
@@ -68,7 +68,7 @@ static struct sk_buff *gre_gso_segment(struct sk_buff *skb,
68 skb->mac_len = skb_inner_network_offset(skb); 68 skb->mac_len = skb_inner_network_offset(skb);
69 69
70 /* segment inner packet. */ 70 /* segment inner packet. */
71 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); 71 enc_features = skb->dev->hw_enc_features & features;
72 segs = skb_mac_gso_segment(skb, enc_features); 72 segs = skb_mac_gso_segment(skb, enc_features);
73 if (IS_ERR_OR_NULL(segs)) { 73 if (IS_ERR_OR_NULL(segs)) {
74 skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len); 74 skb_gso_error_unwind(skb, protocol, ghl, mac_offset, mac_len);
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index fb70e3ecc3e4..bb15d0e03d4f 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -318,9 +318,7 @@ igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
318 return scount; 318 return scount;
319} 319}
320 320
321#define igmp_skb_size(skb) (*(unsigned int *)((skb)->cb)) 321static struct sk_buff *igmpv3_newpack(struct net_device *dev, unsigned int mtu)
322
323static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
324{ 322{
325 struct sk_buff *skb; 323 struct sk_buff *skb;
326 struct rtable *rt; 324 struct rtable *rt;
@@ -330,6 +328,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
330 struct flowi4 fl4; 328 struct flowi4 fl4;
331 int hlen = LL_RESERVED_SPACE(dev); 329 int hlen = LL_RESERVED_SPACE(dev);
332 int tlen = dev->needed_tailroom; 330 int tlen = dev->needed_tailroom;
331 unsigned int size = mtu;
333 332
334 while (1) { 333 while (1) {
335 skb = alloc_skb(size + hlen + tlen, 334 skb = alloc_skb(size + hlen + tlen,
@@ -341,7 +340,6 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
341 return NULL; 340 return NULL;
342 } 341 }
343 skb->priority = TC_PRIO_CONTROL; 342 skb->priority = TC_PRIO_CONTROL;
344 igmp_skb_size(skb) = size;
345 343
346 rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0, 344 rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0,
347 0, 0, 345 0, 0,
@@ -354,6 +352,8 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
354 skb_dst_set(skb, &rt->dst); 352 skb_dst_set(skb, &rt->dst);
355 skb->dev = dev; 353 skb->dev = dev;
356 354
355 skb->reserved_tailroom = skb_end_offset(skb) -
356 min(mtu, skb_end_offset(skb));
357 skb_reserve(skb, hlen); 357 skb_reserve(skb, hlen);
358 358
359 skb_reset_network_header(skb); 359 skb_reset_network_header(skb);
@@ -423,8 +423,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
423 return skb; 423 return skb;
424} 424}
425 425
426#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? igmp_skb_size(skb) - (skb)->len : \ 426#define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0)
427 skb_tailroom(skb)) : 0)
428 427
429static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc, 428static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
430 int type, int gdeleted, int sdeleted) 429 int type, int gdeleted, int sdeleted)
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 9eb89f3f0ee4..19419b60cb37 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -146,7 +146,6 @@ evict_again:
146 atomic_inc(&fq->refcnt); 146 atomic_inc(&fq->refcnt);
147 spin_unlock(&hb->chain_lock); 147 spin_unlock(&hb->chain_lock);
148 del_timer_sync(&fq->timer); 148 del_timer_sync(&fq->timer);
149 WARN_ON(atomic_read(&fq->refcnt) != 1);
150 inet_frag_put(fq, f); 149 inet_frag_put(fq, f);
151 goto evict_again; 150 goto evict_again;
152 } 151 }
@@ -285,7 +284,8 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
285 struct inet_frag_bucket *hb; 284 struct inet_frag_bucket *hb;
286 285
287 hb = get_frag_bucket_locked(fq, f); 286 hb = get_frag_bucket_locked(fq, f);
288 hlist_del(&fq->list); 287 if (!(fq->flags & INET_FRAG_EVICTED))
288 hlist_del(&fq->list);
289 spin_unlock(&hb->chain_lock); 289 spin_unlock(&hb->chain_lock);
290} 290}
291 291
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index 88e5ef2c7f51..bc6471d4abcd 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -231,7 +231,7 @@ static int ip_finish_output_gso(struct sk_buff *skb)
231 */ 231 */
232 features = netif_skb_features(skb); 232 features = netif_skb_features(skb);
233 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK); 233 segs = skb_gso_segment(skb, features & ~NETIF_F_GSO_MASK);
234 if (IS_ERR(segs)) { 234 if (IS_ERR_OR_NULL(segs)) {
235 kfree_skb(skb); 235 kfree_skb(skb);
236 return -ENOMEM; 236 return -ENOMEM;
237 } 237 }
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index c373a9ad4555..9daf2177dc00 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -195,7 +195,7 @@ int ip_cmsg_send(struct net *net, struct msghdr *msg, struct ipcm_cookie *ipc,
195 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { 195 for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) {
196 if (!CMSG_OK(msg, cmsg)) 196 if (!CMSG_OK(msg, cmsg))
197 return -EINVAL; 197 return -EINVAL;
198#if defined(CONFIG_IPV6) 198#if IS_ENABLED(CONFIG_IPV6)
199 if (allow_ipv6 && 199 if (allow_ipv6 &&
200 cmsg->cmsg_level == SOL_IPV6 && 200 cmsg->cmsg_level == SOL_IPV6 &&
201 cmsg->cmsg_type == IPV6_PKTINFO) { 201 cmsg->cmsg_type == IPV6_PKTINFO) {
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 3e861011e4a3..1a7e979e80ba 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -528,6 +528,7 @@ static struct rtnl_link_ops vti_link_ops __read_mostly = {
528 .validate = vti_tunnel_validate, 528 .validate = vti_tunnel_validate,
529 .newlink = vti_newlink, 529 .newlink = vti_newlink,
530 .changelink = vti_changelink, 530 .changelink = vti_changelink,
531 .dellink = ip_tunnel_dellink,
531 .get_size = vti_get_size, 532 .get_size = vti_get_size,
532 .fill_info = vti_fill_info, 533 .fill_info = vti_fill_info,
533}; 534};
diff --git a/net/ipv4/netfilter/nf_reject_ipv4.c b/net/ipv4/netfilter/nf_reject_ipv4.c
index b023b4eb1a96..1baaa83dfe5c 100644
--- a/net/ipv4/netfilter/nf_reject_ipv4.c
+++ b/net/ipv4/netfilter/nf_reject_ipv4.c
@@ -6,48 +6,45 @@
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8 8
9#include <linux/module.h>
9#include <net/ip.h> 10#include <net/ip.h>
10#include <net/tcp.h> 11#include <net/tcp.h>
11#include <net/route.h> 12#include <net/route.h>
12#include <net/dst.h> 13#include <net/dst.h>
13#include <linux/netfilter_ipv4.h> 14#include <linux/netfilter_ipv4.h>
15#include <net/netfilter/ipv4/nf_reject.h>
14 16
15/* Send RST reply */ 17const struct tcphdr *nf_reject_ip_tcphdr_get(struct sk_buff *oldskb,
16void nf_send_reset(struct sk_buff *oldskb, int hook) 18 struct tcphdr *_oth, int hook)
17{ 19{
18 struct sk_buff *nskb;
19 const struct iphdr *oiph;
20 struct iphdr *niph;
21 const struct tcphdr *oth; 20 const struct tcphdr *oth;
22 struct tcphdr _otcph, *tcph;
23 21
24 /* IP header checks: fragment. */ 22 /* IP header checks: fragment. */
25 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET)) 23 if (ip_hdr(oldskb)->frag_off & htons(IP_OFFSET))
26 return; 24 return NULL;
27 25
28 oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb), 26 oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb),
29 sizeof(_otcph), &_otcph); 27 sizeof(struct tcphdr), _oth);
30 if (oth == NULL) 28 if (oth == NULL)
31 return; 29 return NULL;
32 30
33 /* No RST for RST. */ 31 /* No RST for RST. */
34 if (oth->rst) 32 if (oth->rst)
35 return; 33 return NULL;
36
37 if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
38 return;
39 34
40 /* Check checksum */ 35 /* Check checksum */
41 if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP)) 36 if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
42 return; 37 return NULL;
43 oiph = ip_hdr(oldskb);
44 38
45 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) + 39 return oth;
46 LL_MAX_HEADER, GFP_ATOMIC); 40}
47 if (!nskb) 41EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_get);
48 return;
49 42
50 skb_reserve(nskb, LL_MAX_HEADER); 43struct iphdr *nf_reject_iphdr_put(struct sk_buff *nskb,
44 const struct sk_buff *oldskb,
45 __be16 protocol, int ttl)
46{
47 struct iphdr *niph, *oiph = ip_hdr(oldskb);
51 48
52 skb_reset_network_header(nskb); 49 skb_reset_network_header(nskb);
53 niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr)); 50 niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
@@ -56,10 +53,23 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
56 niph->tos = 0; 53 niph->tos = 0;
57 niph->id = 0; 54 niph->id = 0;
58 niph->frag_off = htons(IP_DF); 55 niph->frag_off = htons(IP_DF);
59 niph->protocol = IPPROTO_TCP; 56 niph->protocol = protocol;
60 niph->check = 0; 57 niph->check = 0;
61 niph->saddr = oiph->daddr; 58 niph->saddr = oiph->daddr;
62 niph->daddr = oiph->saddr; 59 niph->daddr = oiph->saddr;
60 niph->ttl = ttl;
61
62 nskb->protocol = htons(ETH_P_IP);
63
64 return niph;
65}
66EXPORT_SYMBOL_GPL(nf_reject_iphdr_put);
67
68void nf_reject_ip_tcphdr_put(struct sk_buff *nskb, const struct sk_buff *oldskb,
69 const struct tcphdr *oth)
70{
71 struct iphdr *niph = ip_hdr(nskb);
72 struct tcphdr *tcph;
63 73
64 skb_reset_transport_header(nskb); 74 skb_reset_transport_header(nskb);
65 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); 75 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
@@ -68,9 +78,9 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
68 tcph->dest = oth->source; 78 tcph->dest = oth->source;
69 tcph->doff = sizeof(struct tcphdr) / 4; 79 tcph->doff = sizeof(struct tcphdr) / 4;
70 80
71 if (oth->ack) 81 if (oth->ack) {
72 tcph->seq = oth->ack_seq; 82 tcph->seq = oth->ack_seq;
73 else { 83 } else {
74 tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin + 84 tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
75 oldskb->len - ip_hdrlen(oldskb) - 85 oldskb->len - ip_hdrlen(oldskb) -
76 (oth->doff << 2)); 86 (oth->doff << 2));
@@ -83,16 +93,43 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
83 nskb->ip_summed = CHECKSUM_PARTIAL; 93 nskb->ip_summed = CHECKSUM_PARTIAL;
84 nskb->csum_start = (unsigned char *)tcph - nskb->head; 94 nskb->csum_start = (unsigned char *)tcph - nskb->head;
85 nskb->csum_offset = offsetof(struct tcphdr, check); 95 nskb->csum_offset = offsetof(struct tcphdr, check);
96}
97EXPORT_SYMBOL_GPL(nf_reject_ip_tcphdr_put);
98
99/* Send RST reply */
100void nf_send_reset(struct sk_buff *oldskb, int hook)
101{
102 struct sk_buff *nskb;
103 const struct iphdr *oiph;
104 struct iphdr *niph;
105 const struct tcphdr *oth;
106 struct tcphdr _oth;
107
108 oth = nf_reject_ip_tcphdr_get(oldskb, &_oth, hook);
109 if (!oth)
110 return;
111
112 if (skb_rtable(oldskb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
113 return;
114
115 oiph = ip_hdr(oldskb);
116
117 nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
118 LL_MAX_HEADER, GFP_ATOMIC);
119 if (!nskb)
120 return;
86 121
87 /* ip_route_me_harder expects skb->dst to be set */ 122 /* ip_route_me_harder expects skb->dst to be set */
88 skb_dst_set_noref(nskb, skb_dst(oldskb)); 123 skb_dst_set_noref(nskb, skb_dst(oldskb));
89 124
90 nskb->protocol = htons(ETH_P_IP); 125 skb_reserve(nskb, LL_MAX_HEADER);
126 niph = nf_reject_iphdr_put(nskb, oldskb, IPPROTO_TCP,
127 ip4_dst_hoplimit(skb_dst(nskb)));
128 nf_reject_ip_tcphdr_put(nskb, oldskb, oth);
129
91 if (ip_route_me_harder(nskb, RTN_UNSPEC)) 130 if (ip_route_me_harder(nskb, RTN_UNSPEC))
92 goto free_nskb; 131 goto free_nskb;
93 132
94 niph->ttl = ip4_dst_hoplimit(skb_dst(nskb));
95
96 /* "Never happens" */ 133 /* "Never happens" */
97 if (nskb->len > dst_mtu(skb_dst(nskb))) 134 if (nskb->len > dst_mtu(skb_dst(nskb)))
98 goto free_nskb; 135 goto free_nskb;
@@ -125,3 +162,5 @@ void nf_send_reset(struct sk_buff *oldskb, int hook)
125 kfree_skb(nskb); 162 kfree_skb(nskb);
126} 163}
127EXPORT_SYMBOL_GPL(nf_send_reset); 164EXPORT_SYMBOL_GPL(nf_send_reset);
165
166MODULE_LICENSE("GPL");
diff --git a/net/ipv4/netfilter/nft_masq_ipv4.c b/net/ipv4/netfilter/nft_masq_ipv4.c
index 1c636d6b5b50..665de06561cd 100644
--- a/net/ipv4/netfilter/nft_masq_ipv4.c
+++ b/net/ipv4/netfilter/nft_masq_ipv4.c
@@ -24,6 +24,7 @@ static void nft_masq_ipv4_eval(const struct nft_expr *expr,
24 struct nf_nat_range range; 24 struct nf_nat_range range;
25 unsigned int verdict; 25 unsigned int verdict;
26 26
27 memset(&range, 0, sizeof(range));
27 range.flags = priv->flags; 28 range.flags = priv->flags;
28 29
29 verdict = nf_nat_masquerade_ipv4(pkt->skb, pkt->ops->hooknum, 30 verdict = nf_nat_masquerade_ipv4(pkt->skb, pkt->ops->hooknum,
@@ -39,6 +40,7 @@ static const struct nft_expr_ops nft_masq_ipv4_ops = {
39 .eval = nft_masq_ipv4_eval, 40 .eval = nft_masq_ipv4_eval,
40 .init = nft_masq_init, 41 .init = nft_masq_init,
41 .dump = nft_masq_dump, 42 .dump = nft_masq_dump,
43 .validate = nft_masq_validate,
42}; 44};
43 45
44static struct nft_expr_type nft_masq_ipv4_type __read_mostly = { 46static struct nft_expr_type nft_masq_ipv4_type __read_mostly = {
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 57f7c9804139..5d740cccf69e 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -217,6 +217,8 @@ static struct sock *ping_lookup(struct net *net, struct sk_buff *skb, u16 ident)
217 &ipv6_hdr(skb)->daddr)) 217 &ipv6_hdr(skb)->daddr))
218 continue; 218 continue;
219#endif 219#endif
220 } else {
221 continue;
220 } 222 }
221 223
222 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) 224 if (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif)
@@ -853,16 +855,8 @@ int ping_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
853 if (flags & MSG_OOB) 855 if (flags & MSG_OOB)
854 goto out; 856 goto out;
855 857
856 if (flags & MSG_ERRQUEUE) { 858 if (flags & MSG_ERRQUEUE)
857 if (family == AF_INET) { 859 return inet_recv_error(sk, msg, len, addr_len);
858 return ip_recv_error(sk, msg, len, addr_len);
859#if IS_ENABLED(CONFIG_IPV6)
860 } else if (family == AF_INET6) {
861 return pingv6_ops.ipv6_recv_error(sk, msg, len,
862 addr_len);
863#endif
864 }
865 }
866 860
867 skb = skb_recv_datagram(sk, flags, noblock, &err); 861 skb = skb_recv_datagram(sk, flags, noblock, &err);
868 if (!skb) 862 if (!skb)
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 2d4ae469b471..6a2155b02602 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1798,6 +1798,7 @@ local_input:
1798no_route: 1798no_route:
1799 RT_CACHE_STAT_INC(in_no_route); 1799 RT_CACHE_STAT_INC(in_no_route);
1800 res.type = RTN_UNREACHABLE; 1800 res.type = RTN_UNREACHABLE;
1801 res.fi = NULL;
1801 goto local_input; 1802 goto local_input;
1802 1803
1803 /* 1804 /*
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 1bec4e76d88c..38c2bcb8dd5d 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1598,7 +1598,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1598 u32 urg_hole = 0; 1598 u32 urg_hole = 0;
1599 1599
1600 if (unlikely(flags & MSG_ERRQUEUE)) 1600 if (unlikely(flags & MSG_ERRQUEUE))
1601 return ip_recv_error(sk, msg, len, addr_len); 1601 return inet_recv_error(sk, msg, len, addr_len);
1602 1602
1603 if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) && 1603 if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
1604 (sk->sk_state == TCP_ESTABLISHED)) 1604 (sk->sk_state == TCP_ESTABLISHED))
@@ -2868,61 +2868,42 @@ EXPORT_SYMBOL(compat_tcp_getsockopt);
2868#endif 2868#endif
2869 2869
2870#ifdef CONFIG_TCP_MD5SIG 2870#ifdef CONFIG_TCP_MD5SIG
2871static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly; 2871static DEFINE_PER_CPU(struct tcp_md5sig_pool, tcp_md5sig_pool);
2872static DEFINE_MUTEX(tcp_md5sig_mutex); 2872static DEFINE_MUTEX(tcp_md5sig_mutex);
2873 2873static bool tcp_md5sig_pool_populated = false;
2874static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
2875{
2876 int cpu;
2877
2878 for_each_possible_cpu(cpu) {
2879 struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
2880
2881 if (p->md5_desc.tfm)
2882 crypto_free_hash(p->md5_desc.tfm);
2883 }
2884 free_percpu(pool);
2885}
2886 2874
2887static void __tcp_alloc_md5sig_pool(void) 2875static void __tcp_alloc_md5sig_pool(void)
2888{ 2876{
2889 int cpu; 2877 int cpu;
2890 struct tcp_md5sig_pool __percpu *pool;
2891
2892 pool = alloc_percpu(struct tcp_md5sig_pool);
2893 if (!pool)
2894 return;
2895 2878
2896 for_each_possible_cpu(cpu) { 2879 for_each_possible_cpu(cpu) {
2897 struct crypto_hash *hash; 2880 if (!per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm) {
2898 2881 struct crypto_hash *hash;
2899 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2900 if (IS_ERR_OR_NULL(hash))
2901 goto out_free;
2902 2882
2903 per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash; 2883 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2884 if (IS_ERR_OR_NULL(hash))
2885 return;
2886 per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash;
2887 }
2904 } 2888 }
2905 /* before setting tcp_md5sig_pool, we must commit all writes 2889 /* before setting tcp_md5sig_pool_populated, we must commit all writes
2906 * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool() 2890 * to memory. See smp_rmb() in tcp_get_md5sig_pool()
2907 */ 2891 */
2908 smp_wmb(); 2892 smp_wmb();
2909 tcp_md5sig_pool = pool; 2893 tcp_md5sig_pool_populated = true;
2910 return;
2911out_free:
2912 __tcp_free_md5sig_pool(pool);
2913} 2894}
2914 2895
2915bool tcp_alloc_md5sig_pool(void) 2896bool tcp_alloc_md5sig_pool(void)
2916{ 2897{
2917 if (unlikely(!tcp_md5sig_pool)) { 2898 if (unlikely(!tcp_md5sig_pool_populated)) {
2918 mutex_lock(&tcp_md5sig_mutex); 2899 mutex_lock(&tcp_md5sig_mutex);
2919 2900
2920 if (!tcp_md5sig_pool) 2901 if (!tcp_md5sig_pool_populated)
2921 __tcp_alloc_md5sig_pool(); 2902 __tcp_alloc_md5sig_pool();
2922 2903
2923 mutex_unlock(&tcp_md5sig_mutex); 2904 mutex_unlock(&tcp_md5sig_mutex);
2924 } 2905 }
2925 return tcp_md5sig_pool != NULL; 2906 return tcp_md5sig_pool_populated;
2926} 2907}
2927EXPORT_SYMBOL(tcp_alloc_md5sig_pool); 2908EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2928 2909
@@ -2936,13 +2917,13 @@ EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2936 */ 2917 */
2937struct tcp_md5sig_pool *tcp_get_md5sig_pool(void) 2918struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
2938{ 2919{
2939 struct tcp_md5sig_pool __percpu *p;
2940
2941 local_bh_disable(); 2920 local_bh_disable();
2942 p = ACCESS_ONCE(tcp_md5sig_pool);
2943 if (p)
2944 return raw_cpu_ptr(p);
2945 2921
2922 if (tcp_md5sig_pool_populated) {
2923 /* coupled with smp_wmb() in __tcp_alloc_md5sig_pool() */
2924 smp_rmb();
2925 return this_cpu_ptr(&tcp_md5sig_pool);
2926 }
2946 local_bh_enable(); 2927 local_bh_enable();
2947 return NULL; 2928 return NULL;
2948} 2929}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index a12b455928e5..d107ee246a1d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2315,6 +2315,35 @@ static inline bool tcp_packet_delayed(const struct tcp_sock *tp)
2315 2315
2316/* Undo procedures. */ 2316/* Undo procedures. */
2317 2317
2318/* We can clear retrans_stamp when there are no retransmissions in the
2319 * window. It would seem that it is trivially available for us in
2320 * tp->retrans_out, however, that kind of assumptions doesn't consider
2321 * what will happen if errors occur when sending retransmission for the
2322 * second time. ...It could the that such segment has only
2323 * TCPCB_EVER_RETRANS set at the present time. It seems that checking
2324 * the head skb is enough except for some reneging corner cases that
2325 * are not worth the effort.
2326 *
2327 * Main reason for all this complexity is the fact that connection dying
2328 * time now depends on the validity of the retrans_stamp, in particular,
2329 * that successive retransmissions of a segment must not advance
2330 * retrans_stamp under any conditions.
2331 */
2332static bool tcp_any_retrans_done(const struct sock *sk)
2333{
2334 const struct tcp_sock *tp = tcp_sk(sk);
2335 struct sk_buff *skb;
2336
2337 if (tp->retrans_out)
2338 return true;
2339
2340 skb = tcp_write_queue_head(sk);
2341 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
2342 return true;
2343
2344 return false;
2345}
2346
2318#if FASTRETRANS_DEBUG > 1 2347#if FASTRETRANS_DEBUG > 1
2319static void DBGUNDO(struct sock *sk, const char *msg) 2348static void DBGUNDO(struct sock *sk, const char *msg)
2320{ 2349{
@@ -2410,6 +2439,8 @@ static bool tcp_try_undo_recovery(struct sock *sk)
2410 * is ACKed. For Reno it is MUST to prevent false 2439 * is ACKed. For Reno it is MUST to prevent false
2411 * fast retransmits (RFC2582). SACK TCP is safe. */ 2440 * fast retransmits (RFC2582). SACK TCP is safe. */
2412 tcp_moderate_cwnd(tp); 2441 tcp_moderate_cwnd(tp);
2442 if (!tcp_any_retrans_done(sk))
2443 tp->retrans_stamp = 0;
2413 return true; 2444 return true;
2414 } 2445 }
2415 tcp_set_ca_state(sk, TCP_CA_Open); 2446 tcp_set_ca_state(sk, TCP_CA_Open);
@@ -2430,35 +2461,6 @@ static bool tcp_try_undo_dsack(struct sock *sk)
2430 return false; 2461 return false;
2431} 2462}
2432 2463
2433/* We can clear retrans_stamp when there are no retransmissions in the
2434 * window. It would seem that it is trivially available for us in
2435 * tp->retrans_out, however, that kind of assumptions doesn't consider
2436 * what will happen if errors occur when sending retransmission for the
2437 * second time. ...It could the that such segment has only
2438 * TCPCB_EVER_RETRANS set at the present time. It seems that checking
2439 * the head skb is enough except for some reneging corner cases that
2440 * are not worth the effort.
2441 *
2442 * Main reason for all this complexity is the fact that connection dying
2443 * time now depends on the validity of the retrans_stamp, in particular,
2444 * that successive retransmissions of a segment must not advance
2445 * retrans_stamp under any conditions.
2446 */
2447static bool tcp_any_retrans_done(const struct sock *sk)
2448{
2449 const struct tcp_sock *tp = tcp_sk(sk);
2450 struct sk_buff *skb;
2451
2452 if (tp->retrans_out)
2453 return true;
2454
2455 skb = tcp_write_queue_head(sk);
2456 if (unlikely(skb && TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS))
2457 return true;
2458
2459 return false;
2460}
2461
2462/* Undo during loss recovery after partial ACK or using F-RTO. */ 2464/* Undo during loss recovery after partial ACK or using F-RTO. */
2463static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo) 2465static bool tcp_try_undo_loss(struct sock *sk, bool frto_undo)
2464{ 2466{
@@ -5229,7 +5231,7 @@ slow_path:
5229 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb)) 5231 if (len < (th->doff << 2) || tcp_checksum_complete_user(sk, skb))
5230 goto csum_error; 5232 goto csum_error;
5231 5233
5232 if (!th->ack && !th->rst) 5234 if (!th->ack && !th->rst && !th->syn)
5233 goto discard; 5235 goto discard;
5234 5236
5235 /* 5237 /*
@@ -5648,7 +5650,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
5648 goto discard; 5650 goto discard;
5649 } 5651 }
5650 5652
5651 if (!th->ack && !th->rst) 5653 if (!th->ack && !th->rst && !th->syn)
5652 goto discard; 5654 goto discard;
5653 5655
5654 if (!tcp_validate_incoming(sk, skb, th, 0)) 5656 if (!tcp_validate_incoming(sk, skb, th, 0))
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 94d1a7757ff7..147be2024290 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -206,8 +206,6 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
206 inet->inet_dport = usin->sin_port; 206 inet->inet_dport = usin->sin_port;
207 inet->inet_daddr = daddr; 207 inet->inet_daddr = daddr;
208 208
209 inet_set_txhash(sk);
210
211 inet_csk(sk)->icsk_ext_hdr_len = 0; 209 inet_csk(sk)->icsk_ext_hdr_len = 0;
212 if (inet_opt) 210 if (inet_opt)
213 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen; 211 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
@@ -224,6 +222,8 @@ int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
224 if (err) 222 if (err)
225 goto failure; 223 goto failure;
226 224
225 inet_set_txhash(sk);
226
227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport, 227 rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228 inet->inet_sport, inet->inet_dport, sk); 228 inet->inet_sport, inet->inet_dport, sk);
229 if (IS_ERR(rt)) { 229 if (IS_ERR(rt)) {
@@ -598,7 +598,10 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb)
598 if (th->rst) 598 if (th->rst)
599 return; 599 return;
600 600
601 if (skb_rtable(skb)->rt_type != RTN_LOCAL) 601 /* If sk not NULL, it means we did a successful lookup and incoming
602 * route had to be correct. prequeue might have dropped our dst.
603 */
604 if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
602 return; 605 return;
603 606
604 /* Swap the send and the receive. */ 607 /* Swap the send and the receive. */
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index 3af21296d967..a3d453b94747 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2126,7 +2126,7 @@ bool tcp_schedule_loss_probe(struct sock *sk)
2126static bool skb_still_in_host_queue(const struct sock *sk, 2126static bool skb_still_in_host_queue(const struct sock *sk,
2127 const struct sk_buff *skb) 2127 const struct sk_buff *skb)
2128{ 2128{
2129 if (unlikely(skb_fclone_busy(skb))) { 2129 if (unlikely(skb_fclone_busy(sk, skb))) {
2130 NET_INC_STATS_BH(sock_net(sk), 2130 NET_INC_STATS_BH(sock_net(sk),
2131 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES); 2131 LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES);
2132 return true; 2132 return true;
diff --git a/net/ipv4/udp_offload.c b/net/ipv4/udp_offload.c
index 507310ef4b56..6480cea7aa53 100644
--- a/net/ipv4/udp_offload.c
+++ b/net/ipv4/udp_offload.c
@@ -58,7 +58,7 @@ static struct sk_buff *__skb_udp_tunnel_segment(struct sk_buff *skb,
58 skb->encap_hdr_csum = 1; 58 skb->encap_hdr_csum = 1;
59 59
60 /* segment inner packet. */ 60 /* segment inner packet. */
61 enc_features = skb->dev->hw_enc_features & netif_skb_features(skb); 61 enc_features = skb->dev->hw_enc_features & features;
62 segs = gso_inner_segment(skb, enc_features); 62 segs = gso_inner_segment(skb, enc_features);
63 if (IS_ERR_OR_NULL(segs)) { 63 if (IS_ERR_OR_NULL(segs)) {
64 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset, 64 skb_gso_error_unwind(skb, protocol, tnl_hlen, mac_offset,
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 725c763270a0..0169ccf5aa4f 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4531,6 +4531,7 @@ static int inet6_set_iftoken(struct inet6_dev *idev, struct in6_addr *token)
4531 } 4531 }
4532 4532
4533 write_unlock_bh(&idev->lock); 4533 write_unlock_bh(&idev->lock);
4534 inet6_ifinfo_notify(RTM_NEWLINK, idev);
4534 addrconf_verify_rtnl(); 4535 addrconf_verify_rtnl();
4535 return 0; 4536 return 0;
4536} 4537}
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index 12c3c8ef3849..0e32d2e1bdbf 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -502,11 +502,11 @@ static int ip6gre_rcv(struct sk_buff *skb)
502 502
503 skb->protocol = gre_proto; 503 skb->protocol = gre_proto;
504 /* WCCP version 1 and 2 protocol decoding. 504 /* WCCP version 1 and 2 protocol decoding.
505 * - Change protocol to IP 505 * - Change protocol to IPv6
506 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header 506 * - When dealing with WCCPv2, Skip extra 4 bytes in GRE header
507 */ 507 */
508 if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) { 508 if (flags == 0 && gre_proto == htons(ETH_P_WCCP)) {
509 skb->protocol = htons(ETH_P_IP); 509 skb->protocol = htons(ETH_P_IPV6);
510 if ((*(h + offset) & 0xF0) != 0x40) 510 if ((*(h + offset) & 0xF0) != 0x40)
511 offset += 4; 511 offset += 4;
512 } 512 }
@@ -961,8 +961,6 @@ static void ip6gre_tnl_link_config(struct ip6_tnl *t, int set_mtu)
961 else 961 else
962 dev->flags &= ~IFF_POINTOPOINT; 962 dev->flags &= ~IFF_POINTOPOINT;
963 963
964 dev->iflink = p->link;
965
966 /* Precalculate GRE options length */ 964 /* Precalculate GRE options length */
967 if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) { 965 if (t->parms.o_flags&(GRE_CSUM|GRE_KEY|GRE_SEQ)) {
968 if (t->parms.o_flags&GRE_CSUM) 966 if (t->parms.o_flags&GRE_CSUM)
@@ -1272,6 +1270,7 @@ static int ip6gre_tunnel_init(struct net_device *dev)
1272 u64_stats_init(&ip6gre_tunnel_stats->syncp); 1270 u64_stats_init(&ip6gre_tunnel_stats->syncp);
1273 } 1271 }
1274 1272
1273 dev->iflink = tunnel->parms.link;
1275 1274
1276 return 0; 1275 return 0;
1277} 1276}
@@ -1481,6 +1480,8 @@ static int ip6gre_tap_init(struct net_device *dev)
1481 if (!dev->tstats) 1480 if (!dev->tstats)
1482 return -ENOMEM; 1481 return -ENOMEM;
1483 1482
1483 dev->iflink = tunnel->parms.link;
1484
1484 return 0; 1485 return 0;
1485} 1486}
1486 1487
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 91014d32488d..01e12d0d8fcc 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -69,7 +69,8 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
69 int nhoff; 69 int nhoff;
70 70
71 if (unlikely(skb_shinfo(skb)->gso_type & 71 if (unlikely(skb_shinfo(skb)->gso_type &
72 ~(SKB_GSO_UDP | 72 ~(SKB_GSO_TCPV4 |
73 SKB_GSO_UDP |
73 SKB_GSO_DODGY | 74 SKB_GSO_DODGY |
74 SKB_GSO_TCP_ECN | 75 SKB_GSO_TCP_ECN |
75 SKB_GSO_GRE | 76 SKB_GSO_GRE |
@@ -90,7 +91,7 @@ static struct sk_buff *ipv6_gso_segment(struct sk_buff *skb,
90 91
91 encap = SKB_GSO_CB(skb)->encap_level > 0; 92 encap = SKB_GSO_CB(skb)->encap_level > 0;
92 if (encap) 93 if (encap)
93 features = skb->dev->hw_enc_features & netif_skb_features(skb); 94 features &= skb->dev->hw_enc_features;
94 SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h); 95 SKB_GSO_CB(skb)->encap_level += sizeof(*ipv6h);
95 96
96 ipv6h = ipv6_hdr(skb); 97 ipv6h = ipv6_hdr(skb);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 9409887fb664..9cb94cfa0ae7 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -272,9 +272,6 @@ static int ip6_tnl_create2(struct net_device *dev)
272 int err; 272 int err;
273 273
274 t = netdev_priv(dev); 274 t = netdev_priv(dev);
275 err = ip6_tnl_dev_init(dev);
276 if (err < 0)
277 goto out;
278 275
279 err = register_netdevice(dev); 276 err = register_netdevice(dev);
280 if (err < 0) 277 if (err < 0)
@@ -1462,6 +1459,7 @@ ip6_tnl_change_mtu(struct net_device *dev, int new_mtu)
1462 1459
1463 1460
1464static const struct net_device_ops ip6_tnl_netdev_ops = { 1461static const struct net_device_ops ip6_tnl_netdev_ops = {
1462 .ndo_init = ip6_tnl_dev_init,
1465 .ndo_uninit = ip6_tnl_dev_uninit, 1463 .ndo_uninit = ip6_tnl_dev_uninit,
1466 .ndo_start_xmit = ip6_tnl_xmit, 1464 .ndo_start_xmit = ip6_tnl_xmit,
1467 .ndo_do_ioctl = ip6_tnl_ioctl, 1465 .ndo_do_ioctl = ip6_tnl_ioctl,
@@ -1546,16 +1544,10 @@ static int __net_init ip6_fb_tnl_dev_init(struct net_device *dev)
1546 struct ip6_tnl *t = netdev_priv(dev); 1544 struct ip6_tnl *t = netdev_priv(dev);
1547 struct net *net = dev_net(dev); 1545 struct net *net = dev_net(dev);
1548 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id); 1546 struct ip6_tnl_net *ip6n = net_generic(net, ip6_tnl_net_id);
1549 int err = ip6_tnl_dev_init_gen(dev);
1550
1551 if (err)
1552 return err;
1553 1547
1554 t->parms.proto = IPPROTO_IPV6; 1548 t->parms.proto = IPPROTO_IPV6;
1555 dev_hold(dev); 1549 dev_hold(dev);
1556 1550
1557 ip6_tnl_link_config(t);
1558
1559 rcu_assign_pointer(ip6n->tnls_wc[0], t); 1551 rcu_assign_pointer(ip6n->tnls_wc[0], t);
1560 return 0; 1552 return 0;
1561} 1553}
diff --git a/net/ipv6/ip6_udp_tunnel.c b/net/ipv6/ip6_udp_tunnel.c
index b04ed72c4542..8db6c98fe218 100644
--- a/net/ipv6/ip6_udp_tunnel.c
+++ b/net/ipv6/ip6_udp_tunnel.c
@@ -79,15 +79,13 @@ int udp_tunnel6_xmit_skb(struct socket *sock, struct dst_entry *dst,
79 uh->source = src_port; 79 uh->source = src_port;
80 80
81 uh->len = htons(skb->len); 81 uh->len = htons(skb->len);
82 uh->check = 0;
83 82
84 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); 83 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
85 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED 84 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
86 | IPSKB_REROUTED); 85 | IPSKB_REROUTED);
87 skb_dst_set(skb, dst); 86 skb_dst_set(skb, dst);
88 87
89 udp6_set_csum(udp_get_no_check6_tx(sk), skb, &inet6_sk(sk)->saddr, 88 udp6_set_csum(udp_get_no_check6_tx(sk), skb, saddr, daddr, skb->len);
90 &sk->sk_v6_daddr, skb->len);
91 89
92 __skb_push(skb, sizeof(*ip6h)); 90 __skb_push(skb, sizeof(*ip6h));
93 skb_reset_network_header(skb); 91 skb_reset_network_header(skb);
diff --git a/net/ipv6/ip6_vti.c b/net/ipv6/ip6_vti.c
index d440bb585524..bcda14de7f84 100644
--- a/net/ipv6/ip6_vti.c
+++ b/net/ipv6/ip6_vti.c
@@ -172,10 +172,6 @@ static int vti6_tnl_create2(struct net_device *dev)
172 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 172 struct vti6_net *ip6n = net_generic(net, vti6_net_id);
173 int err; 173 int err;
174 174
175 err = vti6_dev_init(dev);
176 if (err < 0)
177 goto out;
178
179 err = register_netdevice(dev); 175 err = register_netdevice(dev);
180 if (err < 0) 176 if (err < 0)
181 goto out; 177 goto out;
@@ -783,6 +779,7 @@ static int vti6_change_mtu(struct net_device *dev, int new_mtu)
783} 779}
784 780
785static const struct net_device_ops vti6_netdev_ops = { 781static const struct net_device_ops vti6_netdev_ops = {
782 .ndo_init = vti6_dev_init,
786 .ndo_uninit = vti6_dev_uninit, 783 .ndo_uninit = vti6_dev_uninit,
787 .ndo_start_xmit = vti6_tnl_xmit, 784 .ndo_start_xmit = vti6_tnl_xmit,
788 .ndo_do_ioctl = vti6_ioctl, 785 .ndo_do_ioctl = vti6_ioctl,
@@ -852,16 +849,10 @@ static int __net_init vti6_fb_tnl_dev_init(struct net_device *dev)
852 struct ip6_tnl *t = netdev_priv(dev); 849 struct ip6_tnl *t = netdev_priv(dev);
853 struct net *net = dev_net(dev); 850 struct net *net = dev_net(dev);
854 struct vti6_net *ip6n = net_generic(net, vti6_net_id); 851 struct vti6_net *ip6n = net_generic(net, vti6_net_id);
855 int err = vti6_dev_init_gen(dev);
856
857 if (err)
858 return err;
859 852
860 t->parms.proto = IPPROTO_IPV6; 853 t->parms.proto = IPPROTO_IPV6;
861 dev_hold(dev); 854 dev_hold(dev);
862 855
863 vti6_link_config(t);
864
865 rcu_assign_pointer(ip6n->tnls_wc[0], t); 856 rcu_assign_pointer(ip6n->tnls_wc[0], t);
866 return 0; 857 return 0;
867} 858}
@@ -914,6 +905,15 @@ static int vti6_newlink(struct net *src_net, struct net_device *dev,
914 return vti6_tnl_create2(dev); 905 return vti6_tnl_create2(dev);
915} 906}
916 907
908static void vti6_dellink(struct net_device *dev, struct list_head *head)
909{
910 struct net *net = dev_net(dev);
911 struct vti6_net *ip6n = net_generic(net, vti6_net_id);
912
913 if (dev != ip6n->fb_tnl_dev)
914 unregister_netdevice_queue(dev, head);
915}
916
917static int vti6_changelink(struct net_device *dev, struct nlattr *tb[], 917static int vti6_changelink(struct net_device *dev, struct nlattr *tb[],
918 struct nlattr *data[]) 918 struct nlattr *data[])
919{ 919{
@@ -989,6 +989,7 @@ static struct rtnl_link_ops vti6_link_ops __read_mostly = {
989 .setup = vti6_dev_setup, 989 .setup = vti6_dev_setup,
990 .validate = vti6_validate, 990 .validate = vti6_validate,
991 .newlink = vti6_newlink, 991 .newlink = vti6_newlink,
992 .dellink = vti6_dellink,
992 .changelink = vti6_changelink, 993 .changelink = vti6_changelink,
993 .get_size = vti6_get_size, 994 .get_size = vti6_get_size,
994 .fill_info = vti6_fill_info, 995 .fill_info = vti6_fill_info,
@@ -1029,6 +1030,7 @@ static int __net_init vti6_init_net(struct net *net)
1029 if (!ip6n->fb_tnl_dev) 1030 if (!ip6n->fb_tnl_dev)
1030 goto err_alloc_dev; 1031 goto err_alloc_dev;
1031 dev_net_set(ip6n->fb_tnl_dev, net); 1032 dev_net_set(ip6n->fb_tnl_dev, net);
1033 ip6n->fb_tnl_dev->rtnl_link_ops = &vti6_link_ops;
1032 1034
1033 err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev); 1035 err = vti6_fb_tnl_dev_init(ip6n->fb_tnl_dev);
1034 if (err < 0) 1036 if (err < 0)
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 0171f08325c3..1a01d79b8698 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1439,6 +1439,10 @@ reg_pernet_fail:
1439 1439
1440void ip6_mr_cleanup(void) 1440void ip6_mr_cleanup(void)
1441{ 1441{
1442 rtnl_unregister(RTNL_FAMILY_IP6MR, RTM_GETROUTE);
1443#ifdef CONFIG_IPV6_PIMSM_V2
1444 inet6_del_protocol(&pim6_protocol, IPPROTO_PIM);
1445#endif
1442 unregister_netdevice_notifier(&ip6_mr_notifier); 1446 unregister_netdevice_notifier(&ip6_mr_notifier);
1443 unregister_pernet_subsys(&ip6mr_net_ops); 1447 unregister_pernet_subsys(&ip6mr_net_ops);
1444 kmem_cache_destroy(mrt_cachep); 1448 kmem_cache_destroy(mrt_cachep);
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 9648de2b6745..ed2c4e400b46 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1550,7 +1550,7 @@ static void ip6_mc_hdr(struct sock *sk, struct sk_buff *skb,
1550 hdr->daddr = *daddr; 1550 hdr->daddr = *daddr;
1551} 1551}
1552 1552
1553static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) 1553static struct sk_buff *mld_newpack(struct inet6_dev *idev, unsigned int mtu)
1554{ 1554{
1555 struct net_device *dev = idev->dev; 1555 struct net_device *dev = idev->dev;
1556 struct net *net = dev_net(dev); 1556 struct net *net = dev_net(dev);
@@ -1561,13 +1561,13 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
1561 const struct in6_addr *saddr; 1561 const struct in6_addr *saddr;
1562 int hlen = LL_RESERVED_SPACE(dev); 1562 int hlen = LL_RESERVED_SPACE(dev);
1563 int tlen = dev->needed_tailroom; 1563 int tlen = dev->needed_tailroom;
1564 unsigned int size = mtu + hlen + tlen;
1564 int err; 1565 int err;
1565 u8 ra[8] = { IPPROTO_ICMPV6, 0, 1566 u8 ra[8] = { IPPROTO_ICMPV6, 0,
1566 IPV6_TLV_ROUTERALERT, 2, 0, 0, 1567 IPV6_TLV_ROUTERALERT, 2, 0, 0,
1567 IPV6_TLV_PADN, 0 }; 1568 IPV6_TLV_PADN, 0 };
1568 1569
1569 /* we assume size > sizeof(ra) here */ 1570 /* we assume size > sizeof(ra) here */
1570 size += hlen + tlen;
1571 /* limit our allocations to order-0 page */ 1571 /* limit our allocations to order-0 page */
1572 size = min_t(int, size, SKB_MAX_ORDER(0, 0)); 1572 size = min_t(int, size, SKB_MAX_ORDER(0, 0));
1573 skb = sock_alloc_send_skb(sk, size, 1, &err); 1573 skb = sock_alloc_send_skb(sk, size, 1, &err);
@@ -1576,6 +1576,8 @@ static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size)
1576 return NULL; 1576 return NULL;
1577 1577
1578 skb->priority = TC_PRIO_CONTROL; 1578 skb->priority = TC_PRIO_CONTROL;
1579 skb->reserved_tailroom = skb_end_offset(skb) -
1580 min(mtu, skb_end_offset(skb));
1579 skb_reserve(skb, hlen); 1581 skb_reserve(skb, hlen);
1580 1582
1581 if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) { 1583 if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) {
@@ -1690,8 +1692,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1690 return skb; 1692 return skb;
1691} 1693}
1692 1694
1693#define AVAILABLE(skb) ((skb) ? ((skb)->dev ? (skb)->dev->mtu - (skb)->len : \ 1695#define AVAILABLE(skb) ((skb) ? skb_availroom(skb) : 0)
1694 skb_tailroom(skb)) : 0)
1695 1696
1696static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, 1697static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc,
1697 int type, int gdeleted, int sdeleted, int crsend) 1698 int type, int gdeleted, int sdeleted, int crsend)
diff --git a/net/ipv6/netfilter/nf_reject_ipv6.c b/net/ipv6/netfilter/nf_reject_ipv6.c
index 5f5f0438d74d..015eb8a80766 100644
--- a/net/ipv6/netfilter/nf_reject_ipv6.c
+++ b/net/ipv6/netfilter/nf_reject_ipv6.c
@@ -5,121 +5,109 @@
5 * it under the terms of the GNU General Public License version 2 as 5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8
9#include <linux/module.h>
8#include <net/ipv6.h> 10#include <net/ipv6.h>
9#include <net/ip6_route.h> 11#include <net/ip6_route.h>
10#include <net/ip6_fib.h> 12#include <net/ip6_fib.h>
11#include <net/ip6_checksum.h> 13#include <net/ip6_checksum.h>
12#include <linux/netfilter_ipv6.h> 14#include <linux/netfilter_ipv6.h>
15#include <net/netfilter/ipv6/nf_reject.h>
13 16
14void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook) 17const struct tcphdr *nf_reject_ip6_tcphdr_get(struct sk_buff *oldskb,
18 struct tcphdr *otcph,
19 unsigned int *otcplen, int hook)
15{ 20{
16 struct sk_buff *nskb;
17 struct tcphdr otcph, *tcph;
18 unsigned int otcplen, hh_len;
19 int tcphoff, needs_ack;
20 const struct ipv6hdr *oip6h = ipv6_hdr(oldskb); 21 const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
21 struct ipv6hdr *ip6h;
22#define DEFAULT_TOS_VALUE 0x0U
23 const __u8 tclass = DEFAULT_TOS_VALUE;
24 struct dst_entry *dst = NULL;
25 u8 proto; 22 u8 proto;
26 __be16 frag_off; 23 __be16 frag_off;
27 struct flowi6 fl6; 24 int tcphoff;
28
29 if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
30 (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
31 pr_debug("addr is not unicast.\n");
32 return;
33 }
34 25
35 proto = oip6h->nexthdr; 26 proto = oip6h->nexthdr;
36 tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off); 27 tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data),
28 &proto, &frag_off);
37 29
38 if ((tcphoff < 0) || (tcphoff > oldskb->len)) { 30 if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
39 pr_debug("Cannot get TCP header.\n"); 31 pr_debug("Cannot get TCP header.\n");
40 return; 32 return NULL;
41 } 33 }
42 34
43 otcplen = oldskb->len - tcphoff; 35 *otcplen = oldskb->len - tcphoff;
44 36
45 /* IP header checks: fragment, too short. */ 37 /* IP header checks: fragment, too short. */
46 if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) { 38 if (proto != IPPROTO_TCP || *otcplen < sizeof(struct tcphdr)) {
47 pr_debug("proto(%d) != IPPROTO_TCP, " 39 pr_debug("proto(%d) != IPPROTO_TCP or too short (len = %d)\n",
48 "or too short. otcplen = %d\n", 40 proto, *otcplen);
49 proto, otcplen); 41 return NULL;
50 return;
51 } 42 }
52 43
53 if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr))) 44 otcph = skb_header_pointer(oldskb, tcphoff, sizeof(struct tcphdr),
54 BUG(); 45 otcph);
46 if (otcph == NULL)
47 return NULL;
55 48
56 /* No RST for RST. */ 49 /* No RST for RST. */
57 if (otcph.rst) { 50 if (otcph->rst) {
58 pr_debug("RST is set\n"); 51 pr_debug("RST is set\n");
59 return; 52 return NULL;
60 } 53 }
61 54
62 /* Check checksum. */ 55 /* Check checksum. */
63 if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) { 56 if (nf_ip6_checksum(oldskb, hook, tcphoff, IPPROTO_TCP)) {
64 pr_debug("TCP checksum is invalid\n"); 57 pr_debug("TCP checksum is invalid\n");
65 return; 58 return NULL;
66 } 59 }
67 60
68 memset(&fl6, 0, sizeof(fl6)); 61 return otcph;
69 fl6.flowi6_proto = IPPROTO_TCP; 62}
70 fl6.saddr = oip6h->daddr; 63EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_get);
71 fl6.daddr = oip6h->saddr;
72 fl6.fl6_sport = otcph.dest;
73 fl6.fl6_dport = otcph.source;
74 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
75 dst = ip6_route_output(net, NULL, &fl6);
76 if (dst == NULL || dst->error) {
77 dst_release(dst);
78 return;
79 }
80 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
81 if (IS_ERR(dst))
82 return;
83
84 hh_len = (dst->dev->hard_header_len + 15)&~15;
85 nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
86 + sizeof(struct tcphdr) + dst->trailer_len,
87 GFP_ATOMIC);
88
89 if (!nskb) {
90 net_dbg_ratelimited("cannot alloc skb\n");
91 dst_release(dst);
92 return;
93 }
94
95 skb_dst_set(nskb, dst);
96 64
97 skb_reserve(nskb, hh_len + dst->header_len); 65struct ipv6hdr *nf_reject_ip6hdr_put(struct sk_buff *nskb,
66 const struct sk_buff *oldskb,
67 __be16 protocol, int hoplimit)
68{
69 struct ipv6hdr *ip6h;
70 const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
71#define DEFAULT_TOS_VALUE 0x0U
72 const __u8 tclass = DEFAULT_TOS_VALUE;
98 73
99 skb_put(nskb, sizeof(struct ipv6hdr)); 74 skb_put(nskb, sizeof(struct ipv6hdr));
100 skb_reset_network_header(nskb); 75 skb_reset_network_header(nskb);
101 ip6h = ipv6_hdr(nskb); 76 ip6h = ipv6_hdr(nskb);
102 ip6_flow_hdr(ip6h, tclass, 0); 77 ip6_flow_hdr(ip6h, tclass, 0);
103 ip6h->hop_limit = ip6_dst_hoplimit(dst); 78 ip6h->hop_limit = hoplimit;
104 ip6h->nexthdr = IPPROTO_TCP; 79 ip6h->nexthdr = protocol;
105 ip6h->saddr = oip6h->daddr; 80 ip6h->saddr = oip6h->daddr;
106 ip6h->daddr = oip6h->saddr; 81 ip6h->daddr = oip6h->saddr;
107 82
83 nskb->protocol = htons(ETH_P_IPV6);
84
85 return ip6h;
86}
87EXPORT_SYMBOL_GPL(nf_reject_ip6hdr_put);
88
89void nf_reject_ip6_tcphdr_put(struct sk_buff *nskb,
90 const struct sk_buff *oldskb,
91 const struct tcphdr *oth, unsigned int otcplen)
92{
93 struct tcphdr *tcph;
94 int needs_ack;
95
108 skb_reset_transport_header(nskb); 96 skb_reset_transport_header(nskb);
109 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr)); 97 tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
110 /* Truncate to length (no data) */ 98 /* Truncate to length (no data) */
111 tcph->doff = sizeof(struct tcphdr)/4; 99 tcph->doff = sizeof(struct tcphdr)/4;
112 tcph->source = otcph.dest; 100 tcph->source = oth->dest;
113 tcph->dest = otcph.source; 101 tcph->dest = oth->source;
114 102
115 if (otcph.ack) { 103 if (oth->ack) {
116 needs_ack = 0; 104 needs_ack = 0;
117 tcph->seq = otcph.ack_seq; 105 tcph->seq = oth->ack_seq;
118 tcph->ack_seq = 0; 106 tcph->ack_seq = 0;
119 } else { 107 } else {
120 needs_ack = 1; 108 needs_ack = 1;
121 tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin 109 tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
122 + otcplen - (otcph.doff<<2)); 110 otcplen - (oth->doff<<2));
123 tcph->seq = 0; 111 tcph->seq = 0;
124 } 112 }
125 113
@@ -137,6 +125,63 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
137 sizeof(struct tcphdr), IPPROTO_TCP, 125 sizeof(struct tcphdr), IPPROTO_TCP,
138 csum_partial(tcph, 126 csum_partial(tcph,
139 sizeof(struct tcphdr), 0)); 127 sizeof(struct tcphdr), 0));
128}
129EXPORT_SYMBOL_GPL(nf_reject_ip6_tcphdr_put);
130
131void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
132{
133 struct sk_buff *nskb;
134 struct tcphdr _otcph;
135 const struct tcphdr *otcph;
136 unsigned int otcplen, hh_len;
137 const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
138 struct ipv6hdr *ip6h;
139 struct dst_entry *dst = NULL;
140 struct flowi6 fl6;
141
142 if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
143 (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
144 pr_debug("addr is not unicast.\n");
145 return;
146 }
147
148 otcph = nf_reject_ip6_tcphdr_get(oldskb, &_otcph, &otcplen, hook);
149 if (!otcph)
150 return;
151
152 memset(&fl6, 0, sizeof(fl6));
153 fl6.flowi6_proto = IPPROTO_TCP;
154 fl6.saddr = oip6h->daddr;
155 fl6.daddr = oip6h->saddr;
156 fl6.fl6_sport = otcph->dest;
157 fl6.fl6_dport = otcph->source;
158 security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
159 dst = ip6_route_output(net, NULL, &fl6);
160 if (dst == NULL || dst->error) {
161 dst_release(dst);
162 return;
163 }
164 dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
165 if (IS_ERR(dst))
166 return;
167
168 hh_len = (dst->dev->hard_header_len + 15)&~15;
169 nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
170 + sizeof(struct tcphdr) + dst->trailer_len,
171 GFP_ATOMIC);
172
173 if (!nskb) {
174 net_dbg_ratelimited("cannot alloc skb\n");
175 dst_release(dst);
176 return;
177 }
178
179 skb_dst_set(nskb, dst);
180
181 skb_reserve(nskb, hh_len + dst->header_len);
182 ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
183 ip6_dst_hoplimit(dst));
184 nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen);
140 185
141 nf_ct_attach(nskb, oldskb); 186 nf_ct_attach(nskb, oldskb);
142 187
@@ -161,3 +206,5 @@ void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
161 ip6_local_out(nskb); 206 ip6_local_out(nskb);
162} 207}
163EXPORT_SYMBOL_GPL(nf_send_reset6); 208EXPORT_SYMBOL_GPL(nf_send_reset6);
209
210MODULE_LICENSE("GPL");
diff --git a/net/ipv6/netfilter/nft_masq_ipv6.c b/net/ipv6/netfilter/nft_masq_ipv6.c
index 556262f40761..529c119cbb14 100644
--- a/net/ipv6/netfilter/nft_masq_ipv6.c
+++ b/net/ipv6/netfilter/nft_masq_ipv6.c
@@ -25,6 +25,7 @@ static void nft_masq_ipv6_eval(const struct nft_expr *expr,
25 struct nf_nat_range range; 25 struct nf_nat_range range;
26 unsigned int verdict; 26 unsigned int verdict;
27 27
28 memset(&range, 0, sizeof(range));
28 range.flags = priv->flags; 29 range.flags = priv->flags;
29 30
30 verdict = nf_nat_masquerade_ipv6(pkt->skb, &range, pkt->out); 31 verdict = nf_nat_masquerade_ipv6(pkt->skb, &range, pkt->out);
@@ -39,6 +40,7 @@ static const struct nft_expr_ops nft_masq_ipv6_ops = {
39 .eval = nft_masq_ipv6_eval, 40 .eval = nft_masq_ipv6_eval,
40 .init = nft_masq_init, 41 .init = nft_masq_init,
41 .dump = nft_masq_dump, 42 .dump = nft_masq_dump,
43 .validate = nft_masq_validate,
42}; 44};
43 45
44static struct nft_expr_type nft_masq_ipv6_type __read_mostly = { 46static struct nft_expr_type nft_masq_ipv6_type __read_mostly = {
diff --git a/net/ipv6/output_core.c b/net/ipv6/output_core.c
index fc24c390af05..97f41a3e68d9 100644
--- a/net/ipv6/output_core.c
+++ b/net/ipv6/output_core.c
@@ -3,11 +3,45 @@
3 * not configured or static. These functions are needed by GSO/GRO implementation. 3 * not configured or static. These functions are needed by GSO/GRO implementation.
4 */ 4 */
5#include <linux/export.h> 5#include <linux/export.h>
6#include <net/ip.h>
6#include <net/ipv6.h> 7#include <net/ipv6.h>
7#include <net/ip6_fib.h> 8#include <net/ip6_fib.h>
8#include <net/addrconf.h> 9#include <net/addrconf.h>
9#include <net/secure_seq.h> 10#include <net/secure_seq.h>
10 11
12/* This function exists only for tap drivers that must support broken
13 * clients requesting UFO without specifying an IPv6 fragment ID.
14 *
15 * This is similar to ipv6_select_ident() but we use an independent hash
16 * seed to limit information leakage.
17 *
18 * The network header must be set before calling this.
19 */
20void ipv6_proxy_select_ident(struct sk_buff *skb)
21{
22 static u32 ip6_proxy_idents_hashrnd __read_mostly;
23 struct in6_addr buf[2];
24 struct in6_addr *addrs;
25 u32 hash, id;
26
27 addrs = skb_header_pointer(skb,
28 skb_network_offset(skb) +
29 offsetof(struct ipv6hdr, saddr),
30 sizeof(buf), buf);
31 if (!addrs)
32 return;
33
34 net_get_random_once(&ip6_proxy_idents_hashrnd,
35 sizeof(ip6_proxy_idents_hashrnd));
36
37 hash = __ipv6_addr_jhash(&addrs[1], ip6_proxy_idents_hashrnd);
38 hash = __ipv6_addr_jhash(&addrs[0], hash);
39
40 id = ip_idents_reserve(hash, 1);
41 skb_shinfo(skb)->ip6_frag_id = htonl(id);
42}
43EXPORT_SYMBOL_GPL(ipv6_proxy_select_ident);
44
11int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr) 45int ip6_find_1stfragopt(struct sk_buff *skb, u8 **nexthdr)
12{ 46{
13 u16 offset = sizeof(struct ipv6hdr); 47 u16 offset = sizeof(struct ipv6hdr);
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 58e5b4710127..a24557a1c1d8 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -195,10 +195,8 @@ static int ipip6_tunnel_create(struct net_device *dev)
195 struct sit_net *sitn = net_generic(net, sit_net_id); 195 struct sit_net *sitn = net_generic(net, sit_net_id);
196 int err; 196 int err;
197 197
198 err = ipip6_tunnel_init(dev); 198 memcpy(dev->dev_addr, &t->parms.iph.saddr, 4);
199 if (err < 0) 199 memcpy(dev->broadcast, &t->parms.iph.daddr, 4);
200 goto out;
201 ipip6_tunnel_clone_6rd(dev, sitn);
202 200
203 if ((__force u16)t->parms.i_flags & SIT_ISATAP) 201 if ((__force u16)t->parms.i_flags & SIT_ISATAP)
204 dev->priv_flags |= IFF_ISATAP; 202 dev->priv_flags |= IFF_ISATAP;
@@ -207,7 +205,8 @@ static int ipip6_tunnel_create(struct net_device *dev)
207 if (err < 0) 205 if (err < 0)
208 goto out; 206 goto out;
209 207
210 strcpy(t->parms.name, dev->name); 208 ipip6_tunnel_clone_6rd(dev, sitn);
209
211 dev->rtnl_link_ops = &sit_link_ops; 210 dev->rtnl_link_ops = &sit_link_ops;
212 211
213 dev_hold(dev); 212 dev_hold(dev);
@@ -1330,6 +1329,7 @@ static int ipip6_tunnel_change_mtu(struct net_device *dev, int new_mtu)
1330} 1329}
1331 1330
1332static const struct net_device_ops ipip6_netdev_ops = { 1331static const struct net_device_ops ipip6_netdev_ops = {
1332 .ndo_init = ipip6_tunnel_init,
1333 .ndo_uninit = ipip6_tunnel_uninit, 1333 .ndo_uninit = ipip6_tunnel_uninit,
1334 .ndo_start_xmit = sit_tunnel_xmit, 1334 .ndo_start_xmit = sit_tunnel_xmit,
1335 .ndo_do_ioctl = ipip6_tunnel_ioctl, 1335 .ndo_do_ioctl = ipip6_tunnel_ioctl,
@@ -1378,9 +1378,7 @@ static int ipip6_tunnel_init(struct net_device *dev)
1378 1378
1379 tunnel->dev = dev; 1379 tunnel->dev = dev;
1380 tunnel->net = dev_net(dev); 1380 tunnel->net = dev_net(dev);
1381 1381 strcpy(tunnel->parms.name, dev->name);
1382 memcpy(dev->dev_addr, &tunnel->parms.iph.saddr, 4);
1383 memcpy(dev->broadcast, &tunnel->parms.iph.daddr, 4);
1384 1382
1385 ipip6_tunnel_bind_dev(dev); 1383 ipip6_tunnel_bind_dev(dev);
1386 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); 1384 dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats);
@@ -1405,7 +1403,6 @@ static int __net_init ipip6_fb_tunnel_init(struct net_device *dev)
1405 1403
1406 tunnel->dev = dev; 1404 tunnel->dev = dev;
1407 tunnel->net = dev_net(dev); 1405 tunnel->net = dev_net(dev);
1408 strcpy(tunnel->parms.name, dev->name);
1409 1406
1410 iph->version = 4; 1407 iph->version = 4;
1411 iph->protocol = IPPROTO_IPV6; 1408 iph->protocol = IPPROTO_IPV6;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 831495529b82..dc495ae2ead0 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -200,8 +200,6 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
200 sk->sk_v6_daddr = usin->sin6_addr; 200 sk->sk_v6_daddr = usin->sin6_addr;
201 np->flow_label = fl6.flowlabel; 201 np->flow_label = fl6.flowlabel;
202 202
203 ip6_set_txhash(sk);
204
205 /* 203 /*
206 * TCP over IPv4 204 * TCP over IPv4
207 */ 205 */
@@ -297,6 +295,8 @@ static int tcp_v6_connect(struct sock *sk, struct sockaddr *uaddr,
297 if (err) 295 if (err)
298 goto late_failure; 296 goto late_failure;
299 297
298 ip6_set_txhash(sk);
299
300 if (!tp->write_seq && likely(!tp->repair)) 300 if (!tp->write_seq && likely(!tp->repair))
301 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32, 301 tp->write_seq = secure_tcpv6_sequence_number(np->saddr.s6_addr32,
302 sk->sk_v6_daddr.s6_addr32, 302 sk->sk_v6_daddr.s6_addr32,
@@ -903,7 +903,10 @@ static void tcp_v6_send_reset(struct sock *sk, struct sk_buff *skb)
903 if (th->rst) 903 if (th->rst)
904 return; 904 return;
905 905
906 if (!ipv6_unicast_destination(skb)) 906 /* If sk not NULL, it means we did a successful lookup and incoming
907 * route had to be correct. prequeue might have dropped our dst.
908 */
909 if (!sk && !ipv6_unicast_destination(skb))
907 return; 910 return;
908 911
909#ifdef CONFIG_TCP_MD5SIG 912#ifdef CONFIG_TCP_MD5SIG
diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
index ac49f84fe2c3..5f983644373a 100644
--- a/net/ipv6/xfrm6_policy.c
+++ b/net/ipv6/xfrm6_policy.c
@@ -170,8 +170,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
170 case IPPROTO_DCCP: 170 case IPPROTO_DCCP:
171 if (!onlyproto && (nh + offset + 4 < skb->data || 171 if (!onlyproto && (nh + offset + 4 < skb->data ||
172 pskb_may_pull(skb, nh + offset + 4 - skb->data))) { 172 pskb_may_pull(skb, nh + offset + 4 - skb->data))) {
173 __be16 *ports = (__be16 *)exthdr; 173 __be16 *ports;
174 174
175 nh = skb_network_header(skb);
176 ports = (__be16 *)(nh + offset);
175 fl6->fl6_sport = ports[!!reverse]; 177 fl6->fl6_sport = ports[!!reverse];
176 fl6->fl6_dport = ports[!reverse]; 178 fl6->fl6_dport = ports[!reverse];
177 } 179 }
@@ -180,8 +182,10 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
180 182
181 case IPPROTO_ICMPV6: 183 case IPPROTO_ICMPV6:
182 if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) { 184 if (!onlyproto && pskb_may_pull(skb, nh + offset + 2 - skb->data)) {
183 u8 *icmp = (u8 *)exthdr; 185 u8 *icmp;
184 186
187 nh = skb_network_header(skb);
188 icmp = (u8 *)(nh + offset);
185 fl6->fl6_icmp_type = icmp[0]; 189 fl6->fl6_icmp_type = icmp[0];
186 fl6->fl6_icmp_code = icmp[1]; 190 fl6->fl6_icmp_code = icmp[1];
187 } 191 }
@@ -192,8 +196,9 @@ _decode_session6(struct sk_buff *skb, struct flowi *fl, int reverse)
192 case IPPROTO_MH: 196 case IPPROTO_MH:
193 if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) { 197 if (!onlyproto && pskb_may_pull(skb, nh + offset + 3 - skb->data)) {
194 struct ip6_mh *mh; 198 struct ip6_mh *mh;
195 mh = (struct ip6_mh *)exthdr;
196 199
200 nh = skb_network_header(skb);
201 mh = (struct ip6_mh *)(nh + offset);
197 fl6->fl6_mh_type = mh->ip6mh_type; 202 fl6->fl6_mh_type = mh->ip6mh_type;
198 } 203 }
199 fl6->flowi6_proto = nexthdr; 204 fl6->flowi6_proto = nexthdr;
diff --git a/net/ipx/af_ipx.c b/net/ipx/af_ipx.c
index 91729b807c7d..1b095ca37aa4 100644
--- a/net/ipx/af_ipx.c
+++ b/net/ipx/af_ipx.c
@@ -1764,6 +1764,7 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
1764 struct ipxhdr *ipx = NULL; 1764 struct ipxhdr *ipx = NULL;
1765 struct sk_buff *skb; 1765 struct sk_buff *skb;
1766 int copied, rc; 1766 int copied, rc;
1767 bool locked = true;
1767 1768
1768 lock_sock(sk); 1769 lock_sock(sk);
1769 /* put the autobinding in */ 1770 /* put the autobinding in */
@@ -1790,6 +1791,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
1790 if (sock_flag(sk, SOCK_ZAPPED)) 1791 if (sock_flag(sk, SOCK_ZAPPED))
1791 goto out; 1792 goto out;
1792 1793
1794 release_sock(sk);
1795 locked = false;
1793 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, 1796 skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT,
1794 flags & MSG_DONTWAIT, &rc); 1797 flags & MSG_DONTWAIT, &rc);
1795 if (!skb) { 1798 if (!skb) {
@@ -1826,7 +1829,8 @@ static int ipx_recvmsg(struct kiocb *iocb, struct socket *sock,
1826out_free: 1829out_free:
1827 skb_free_datagram(sk, skb); 1830 skb_free_datagram(sk, skb);
1828out: 1831out:
1829 release_sock(sk); 1832 if (locked)
1833 release_sock(sk);
1830 return rc; 1834 return rc;
1831} 1835}
1832 1836
diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c
index 92fafd485deb..3f3a6cbdceb7 100644
--- a/net/irda/af_irda.c
+++ b/net/irda/af_irda.c
@@ -1064,8 +1064,6 @@ static int irda_connect(struct socket *sock, struct sockaddr *uaddr,
1064 1064
1065 if (sk->sk_state != TCP_ESTABLISHED) { 1065 if (sk->sk_state != TCP_ESTABLISHED) {
1066 sock->state = SS_UNCONNECTED; 1066 sock->state = SS_UNCONNECTED;
1067 if (sk->sk_prot->disconnect(sk, flags))
1068 sock->state = SS_DISCONNECTING;
1069 err = sock_error(sk); 1067 err = sock_error(sk);
1070 if (!err) 1068 if (!err)
1071 err = -ECONNRESET; 1069 err = -ECONNRESET;
diff --git a/net/mac80211/aes_ccm.c b/net/mac80211/aes_ccm.c
index ec24378caaaf..09d9caaec591 100644
--- a/net/mac80211/aes_ccm.c
+++ b/net/mac80211/aes_ccm.c
@@ -53,6 +53,9 @@ int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
53 __aligned(__alignof__(struct aead_request)); 53 __aligned(__alignof__(struct aead_request));
54 struct aead_request *aead_req = (void *) aead_req_data; 54 struct aead_request *aead_req = (void *) aead_req_data;
55 55
56 if (data_len == 0)
57 return -EINVAL;
58
56 memset(aead_req, 0, sizeof(aead_req_data)); 59 memset(aead_req, 0, sizeof(aead_req_data));
57 60
58 sg_init_one(&pt, data, data_len); 61 sg_init_one(&pt, data, data_len);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index fb6a1502b6df..343da1e35025 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -3458,7 +3458,7 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
3458 rcu_read_lock(); 3458 rcu_read_lock();
3459 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf); 3459 chanctx_conf = rcu_dereference(sdata->vif.chanctx_conf);
3460 if (chanctx_conf) { 3460 if (chanctx_conf) {
3461 *chandef = chanctx_conf->def; 3461 *chandef = sdata->vif.bss_conf.chandef;
3462 ret = 0; 3462 ret = 0;
3463 } else if (local->open_count > 0 && 3463 } else if (local->open_count > 0 &&
3464 local->open_count == local->monitors && 3464 local->open_count == local->monitors &&
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index 56b53571c807..509bc157ce55 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -805,7 +805,7 @@ ieee80211_ibss_process_chanswitch(struct ieee80211_sub_if_data *sdata,
805 805
806 memset(&params, 0, sizeof(params)); 806 memset(&params, 0, sizeof(params));
807 memset(&csa_ie, 0, sizeof(csa_ie)); 807 memset(&csa_ie, 0, sizeof(csa_ie));
808 err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, 808 err = ieee80211_parse_ch_switch_ie(sdata, elems,
809 ifibss->chandef.chan->band, 809 ifibss->chandef.chan->band,
810 sta_flags, ifibss->bssid, &csa_ie); 810 sta_flags, ifibss->bssid, &csa_ie);
811 /* can't switch to destination channel, fail */ 811 /* can't switch to destination channel, fail */
diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
index c2aaec4dfcf0..8c68da30595d 100644
--- a/net/mac80211/ieee80211_i.h
+++ b/net/mac80211/ieee80211_i.h
@@ -1642,7 +1642,6 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
1642 * ieee80211_parse_ch_switch_ie - parses channel switch IEs 1642 * ieee80211_parse_ch_switch_ie - parses channel switch IEs
1643 * @sdata: the sdata of the interface which has received the frame 1643 * @sdata: the sdata of the interface which has received the frame
1644 * @elems: parsed 802.11 elements received with the frame 1644 * @elems: parsed 802.11 elements received with the frame
1645 * @beacon: indicates if the frame was a beacon or probe response
1646 * @current_band: indicates the current band 1645 * @current_band: indicates the current band
1647 * @sta_flags: contains information about own capabilities and restrictions 1646 * @sta_flags: contains information about own capabilities and restrictions
1648 * to decide which channel switch announcements can be accepted. Only the 1647 * to decide which channel switch announcements can be accepted. Only the
@@ -1656,7 +1655,7 @@ void ieee80211_process_measurement_req(struct ieee80211_sub_if_data *sdata,
1656 * Return: 0 on success, <0 on error and >0 if there is nothing to parse. 1655 * Return: 0 on success, <0 on error and >0 if there is nothing to parse.
1657 */ 1656 */
1658int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, 1657int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
1659 struct ieee802_11_elems *elems, bool beacon, 1658 struct ieee802_11_elems *elems,
1660 enum ieee80211_band current_band, 1659 enum ieee80211_band current_band,
1661 u32 sta_flags, u8 *bssid, 1660 u32 sta_flags, u8 *bssid,
1662 struct ieee80211_csa_ie *csa_ie); 1661 struct ieee80211_csa_ie *csa_ie);
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index af237223a8cd..653f5eb07a27 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -766,10 +766,12 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
766 int i, flushed; 766 int i, flushed;
767 struct ps_data *ps; 767 struct ps_data *ps;
768 struct cfg80211_chan_def chandef; 768 struct cfg80211_chan_def chandef;
769 bool cancel_scan;
769 770
770 clear_bit(SDATA_STATE_RUNNING, &sdata->state); 771 clear_bit(SDATA_STATE_RUNNING, &sdata->state);
771 772
772 if (rcu_access_pointer(local->scan_sdata) == sdata) 773 cancel_scan = rcu_access_pointer(local->scan_sdata) == sdata;
774 if (cancel_scan)
773 ieee80211_scan_cancel(local); 775 ieee80211_scan_cancel(local);
774 776
775 /* 777 /*
@@ -898,6 +900,8 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
898 list_del(&sdata->u.vlan.list); 900 list_del(&sdata->u.vlan.list);
899 mutex_unlock(&local->mtx); 901 mutex_unlock(&local->mtx);
900 RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL); 902 RCU_INIT_POINTER(sdata->vif.chanctx_conf, NULL);
903 /* see comment in the default case below */
904 ieee80211_free_keys(sdata, true);
901 /* no need to tell driver */ 905 /* no need to tell driver */
902 break; 906 break;
903 case NL80211_IFTYPE_MONITOR: 907 case NL80211_IFTYPE_MONITOR:
@@ -923,17 +927,16 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
923 /* 927 /*
924 * When we get here, the interface is marked down. 928 * When we get here, the interface is marked down.
925 * Free the remaining keys, if there are any 929 * Free the remaining keys, if there are any
926 * (shouldn't be, except maybe in WDS mode?) 930 * (which can happen in AP mode if userspace sets
931 * keys before the interface is operating, and maybe
932 * also in WDS mode)
927 * 933 *
928 * Force the key freeing to always synchronize_net() 934 * Force the key freeing to always synchronize_net()
929 * to wait for the RX path in case it is using this 935 * to wait for the RX path in case it is using this
930 * interface enqueuing frames * at this very time on 936 * interface enqueuing frames at this very time on
931 * another CPU. 937 * another CPU.
932 */ 938 */
933 ieee80211_free_keys(sdata, true); 939 ieee80211_free_keys(sdata, true);
934
935 /* fall through */
936 case NL80211_IFTYPE_AP:
937 skb_queue_purge(&sdata->skb_queue); 940 skb_queue_purge(&sdata->skb_queue);
938 } 941 }
939 942
@@ -991,6 +994,9 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
991 994
992 ieee80211_recalc_ps(local, -1); 995 ieee80211_recalc_ps(local, -1);
993 996
997 if (cancel_scan)
998 flush_delayed_work(&local->scan_work);
999
994 if (local->open_count == 0) { 1000 if (local->open_count == 0) {
995 ieee80211_stop_device(local); 1001 ieee80211_stop_device(local);
996 1002
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index e9f99c1e3fad..0c8b2a77d312 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -874,7 +874,7 @@ ieee80211_mesh_process_chnswitch(struct ieee80211_sub_if_data *sdata,
874 874
875 memset(&params, 0, sizeof(params)); 875 memset(&params, 0, sizeof(params));
876 memset(&csa_ie, 0, sizeof(csa_ie)); 876 memset(&csa_ie, 0, sizeof(csa_ie));
877 err = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, band, 877 err = ieee80211_parse_ch_switch_ie(sdata, elems, band,
878 sta_flags, sdata->vif.addr, 878 sta_flags, sdata->vif.addr,
879 &csa_ie); 879 &csa_ie);
880 if (err < 0) 880 if (err < 0)
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 2de88704278b..93af0f1c9d99 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1072,7 +1072,7 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1072 1072
1073 current_band = cbss->channel->band; 1073 current_band = cbss->channel->band;
1074 memset(&csa_ie, 0, sizeof(csa_ie)); 1074 memset(&csa_ie, 0, sizeof(csa_ie));
1075 res = ieee80211_parse_ch_switch_ie(sdata, elems, beacon, current_band, 1075 res = ieee80211_parse_ch_switch_ie(sdata, elems, current_band,
1076 ifmgd->flags, 1076 ifmgd->flags,
1077 ifmgd->associated->bssid, &csa_ie); 1077 ifmgd->associated->bssid, &csa_ie);
1078 if (res < 0) 1078 if (res < 0)
@@ -1168,7 +1168,8 @@ ieee80211_sta_process_chanswitch(struct ieee80211_sub_if_data *sdata,
1168 ieee80211_queue_work(&local->hw, &ifmgd->chswitch_work); 1168 ieee80211_queue_work(&local->hw, &ifmgd->chswitch_work);
1169 else 1169 else
1170 mod_timer(&ifmgd->chswitch_timer, 1170 mod_timer(&ifmgd->chswitch_timer,
1171 TU_TO_EXP_TIME(csa_ie.count * cbss->beacon_interval)); 1171 TU_TO_EXP_TIME((csa_ie.count - 1) *
1172 cbss->beacon_interval));
1172} 1173}
1173 1174
1174static bool 1175static bool
diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
index 8fdadfd94ba8..6081329784dd 100644
--- a/net/mac80211/rate.c
+++ b/net/mac80211/rate.c
@@ -448,7 +448,7 @@ static void rate_fixup_ratelist(struct ieee80211_vif *vif,
448 */ 448 */
449 if (!(rates[0].flags & IEEE80211_TX_RC_MCS)) { 449 if (!(rates[0].flags & IEEE80211_TX_RC_MCS)) {
450 u32 basic_rates = vif->bss_conf.basic_rates; 450 u32 basic_rates = vif->bss_conf.basic_rates;
451 s8 baserate = basic_rates ? ffs(basic_rates - 1) : 0; 451 s8 baserate = basic_rates ? ffs(basic_rates) - 1 : 0;
452 452
453 rate = &sband->bitrates[rates[0].idx]; 453 rate = &sband->bitrates[rates[0].idx];
454 454
diff --git a/net/mac80211/rc80211_minstrel_debugfs.c b/net/mac80211/rc80211_minstrel_debugfs.c
index edde723f9f00..2acab1bcaa4b 100644
--- a/net/mac80211/rc80211_minstrel_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_debugfs.c
@@ -62,14 +62,14 @@ minstrel_stats_open(struct inode *inode, struct file *file)
62 unsigned int i, tp, prob, eprob; 62 unsigned int i, tp, prob, eprob;
63 char *p; 63 char *p;
64 64
65 ms = kmalloc(sizeof(*ms) + 4096, GFP_KERNEL); 65 ms = kmalloc(2048, GFP_KERNEL);
66 if (!ms) 66 if (!ms)
67 return -ENOMEM; 67 return -ENOMEM;
68 68
69 file->private_data = ms; 69 file->private_data = ms;
70 p = ms->buf; 70 p = ms->buf;
71 p += sprintf(p, "rate throughput ewma prob this prob " 71 p += sprintf(p, "rate tpt eprob *prob"
72 "this succ/attempt success attempts\n"); 72 " *ok(*cum) ok( cum)\n");
73 for (i = 0; i < mi->n_rates; i++) { 73 for (i = 0; i < mi->n_rates; i++) {
74 struct minstrel_rate *mr = &mi->r[i]; 74 struct minstrel_rate *mr = &mi->r[i];
75 struct minstrel_rate_stats *mrs = &mi->r[i].stats; 75 struct minstrel_rate_stats *mrs = &mi->r[i].stats;
@@ -86,8 +86,8 @@ minstrel_stats_open(struct inode *inode, struct file *file)
86 prob = MINSTREL_TRUNC(mrs->cur_prob * 1000); 86 prob = MINSTREL_TRUNC(mrs->cur_prob * 1000);
87 eprob = MINSTREL_TRUNC(mrs->probability * 1000); 87 eprob = MINSTREL_TRUNC(mrs->probability * 1000);
88 88
89 p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u " 89 p += sprintf(p, " %4u.%1u %3u.%1u %3u.%1u"
90 " %3u(%3u) %8llu %8llu\n", 90 " %4u(%4u) %9llu(%9llu)\n",
91 tp / 10, tp % 10, 91 tp / 10, tp % 10,
92 eprob / 10, eprob % 10, 92 eprob / 10, eprob % 10,
93 prob / 10, prob % 10, 93 prob / 10, prob % 10,
@@ -102,6 +102,8 @@ minstrel_stats_open(struct inode *inode, struct file *file)
102 mi->sample_packets); 102 mi->sample_packets);
103 ms->len = p - ms->buf; 103 ms->len = p - ms->buf;
104 104
105 WARN_ON(ms->len + sizeof(*ms) > 2048);
106
105 return 0; 107 return 0;
106} 108}
107 109
diff --git a/net/mac80211/rc80211_minstrel_ht.c b/net/mac80211/rc80211_minstrel_ht.c
index df90ce2db00c..408fd8ab4eef 100644
--- a/net/mac80211/rc80211_minstrel_ht.c
+++ b/net/mac80211/rc80211_minstrel_ht.c
@@ -252,19 +252,16 @@ minstrel_ht_sort_best_tp_rates(struct minstrel_ht_sta *mi, u8 index,
252 cur_thr = mi->groups[cur_group].rates[cur_idx].cur_tp; 252 cur_thr = mi->groups[cur_group].rates[cur_idx].cur_tp;
253 cur_prob = mi->groups[cur_group].rates[cur_idx].probability; 253 cur_prob = mi->groups[cur_group].rates[cur_idx].probability;
254 254
255 tmp_group = tp_list[j - 1] / MCS_GROUP_RATES; 255 do {
256 tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
257 tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
258 tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;
259
260 while (j > 0 && (cur_thr > tmp_thr ||
261 (cur_thr == tmp_thr && cur_prob > tmp_prob))) {
262 j--;
263 tmp_group = tp_list[j - 1] / MCS_GROUP_RATES; 256 tmp_group = tp_list[j - 1] / MCS_GROUP_RATES;
264 tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES; 257 tmp_idx = tp_list[j - 1] % MCS_GROUP_RATES;
265 tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp; 258 tmp_thr = mi->groups[tmp_group].rates[tmp_idx].cur_tp;
266 tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability; 259 tmp_prob = mi->groups[tmp_group].rates[tmp_idx].probability;
267 } 260 if (cur_thr < tmp_thr ||
261 (cur_thr == tmp_thr && cur_prob <= tmp_prob))
262 break;
263 j--;
264 } while (j > 0);
268 265
269 if (j < MAX_THR_RATES - 1) { 266 if (j < MAX_THR_RATES - 1) {
270 memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) * 267 memmove(&tp_list[j + 1], &tp_list[j], (sizeof(*tp_list) *
diff --git a/net/mac80211/rc80211_minstrel_ht_debugfs.c b/net/mac80211/rc80211_minstrel_ht_debugfs.c
index a72ad46f2a04..d537bec93754 100644
--- a/net/mac80211/rc80211_minstrel_ht_debugfs.c
+++ b/net/mac80211/rc80211_minstrel_ht_debugfs.c
@@ -63,8 +63,8 @@ minstrel_ht_stats_dump(struct minstrel_ht_sta *mi, int i, char *p)
63 prob = MINSTREL_TRUNC(mr->cur_prob * 1000); 63 prob = MINSTREL_TRUNC(mr->cur_prob * 1000);
64 eprob = MINSTREL_TRUNC(mr->probability * 1000); 64 eprob = MINSTREL_TRUNC(mr->probability * 1000);
65 65
66 p += sprintf(p, " %6u.%1u %6u.%1u %6u.%1u " 66 p += sprintf(p, " %4u.%1u %3u.%1u %3u.%1u "
67 "%3u %3u(%3u) %8llu %8llu\n", 67 "%3u %4u(%4u) %9llu(%9llu)\n",
68 tp / 10, tp % 10, 68 tp / 10, tp % 10,
69 eprob / 10, eprob % 10, 69 eprob / 10, eprob % 10,
70 prob / 10, prob % 10, 70 prob / 10, prob % 10,
@@ -96,14 +96,15 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file)
96 return ret; 96 return ret;
97 } 97 }
98 98
99 ms = kmalloc(sizeof(*ms) + 8192, GFP_KERNEL); 99 ms = kmalloc(8192, GFP_KERNEL);
100 if (!ms) 100 if (!ms)
101 return -ENOMEM; 101 return -ENOMEM;
102 102
103 file->private_data = ms; 103 file->private_data = ms;
104 p = ms->buf; 104 p = ms->buf;
105 p += sprintf(p, "type rate throughput ewma prob " 105 p += sprintf(p, "type rate tpt eprob *prob "
106 "this prob retry this succ/attempt success attempts\n"); 106 "ret *ok(*cum) ok( cum)\n");
107
107 108
108 p = minstrel_ht_stats_dump(mi, max_mcs, p); 109 p = minstrel_ht_stats_dump(mi, max_mcs, p);
109 for (i = 0; i < max_mcs; i++) 110 for (i = 0; i < max_mcs; i++)
@@ -118,6 +119,8 @@ minstrel_ht_stats_open(struct inode *inode, struct file *file)
118 MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10); 119 MINSTREL_TRUNC(mi->avg_ampdu_len * 10) % 10);
119 ms->len = p - ms->buf; 120 ms->len = p - ms->buf;
120 121
122 WARN_ON(ms->len + sizeof(*ms) > 8192);
123
121 return nonseekable_open(inode, file); 124 return nonseekable_open(inode, file);
122} 125}
123 126
diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c
index b04ca4049c95..a37f9af634cb 100644
--- a/net/mac80211/rx.c
+++ b/net/mac80211/rx.c
@@ -1678,11 +1678,14 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1678 sc = le16_to_cpu(hdr->seq_ctrl); 1678 sc = le16_to_cpu(hdr->seq_ctrl);
1679 frag = sc & IEEE80211_SCTL_FRAG; 1679 frag = sc & IEEE80211_SCTL_FRAG;
1680 1680
1681 if (likely((!ieee80211_has_morefrags(fc) && frag == 0) || 1681 if (likely(!ieee80211_has_morefrags(fc) && frag == 0))
1682 is_multicast_ether_addr(hdr->addr1))) { 1682 goto out;
1683 /* not fragmented */ 1683
1684 if (is_multicast_ether_addr(hdr->addr1)) {
1685 rx->local->dot11MulticastReceivedFrameCount++;
1684 goto out; 1686 goto out;
1685 } 1687 }
1688
1686 I802_DEBUG_INC(rx->local->rx_handlers_fragments); 1689 I802_DEBUG_INC(rx->local->rx_handlers_fragments);
1687 1690
1688 if (skb_linearize(rx->skb)) 1691 if (skb_linearize(rx->skb))
@@ -1775,10 +1778,7 @@ ieee80211_rx_h_defragment(struct ieee80211_rx_data *rx)
1775 out: 1778 out:
1776 if (rx->sta) 1779 if (rx->sta)
1777 rx->sta->rx_packets++; 1780 rx->sta->rx_packets++;
1778 if (is_multicast_ether_addr(hdr->addr1)) 1781 ieee80211_led_rx(rx->local);
1779 rx->local->dot11MulticastReceivedFrameCount++;
1780 else
1781 ieee80211_led_rx(rx->local);
1782 return RX_CONTINUE; 1782 return RX_CONTINUE;
1783} 1783}
1784 1784
diff --git a/net/mac80211/spectmgmt.c b/net/mac80211/spectmgmt.c
index 6ab009070084..efeba56c913b 100644
--- a/net/mac80211/spectmgmt.c
+++ b/net/mac80211/spectmgmt.c
@@ -22,7 +22,7 @@
22#include "wme.h" 22#include "wme.h"
23 23
24int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata, 24int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
25 struct ieee802_11_elems *elems, bool beacon, 25 struct ieee802_11_elems *elems,
26 enum ieee80211_band current_band, 26 enum ieee80211_band current_band,
27 u32 sta_flags, u8 *bssid, 27 u32 sta_flags, u8 *bssid,
28 struct ieee80211_csa_ie *csa_ie) 28 struct ieee80211_csa_ie *csa_ie)
@@ -91,19 +91,13 @@ int ieee80211_parse_ch_switch_ie(struct ieee80211_sub_if_data *sdata,
91 return -EINVAL; 91 return -EINVAL;
92 } 92 }
93 93
94 if (!beacon && sec_chan_offs) { 94 if (sec_chan_offs) {
95 secondary_channel_offset = sec_chan_offs->sec_chan_offs; 95 secondary_channel_offset = sec_chan_offs->sec_chan_offs;
96 } else if (beacon && ht_oper) {
97 secondary_channel_offset =
98 ht_oper->ht_param & IEEE80211_HT_PARAM_CHA_SEC_OFFSET;
99 } else if (!(sta_flags & IEEE80211_STA_DISABLE_HT)) { 96 } else if (!(sta_flags & IEEE80211_STA_DISABLE_HT)) {
100 /* If it's not a beacon, HT is enabled and the IE not present, 97 /* If the secondary channel offset IE is not present,
101 * it's 20 MHz, 802.11-2012 8.5.2.6: 98 * we can't know what's the post-CSA offset, so the
102 * This element [the Secondary Channel Offset Element] is 99 * best we can do is use 20MHz.
103 * present when switching to a 40 MHz channel. It may be 100 */
104 * present when switching to a 20 MHz channel (in which
105 * case the secondary channel offset is set to SCN).
106 */
107 secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE; 101 secondary_channel_offset = IEEE80211_HT_PARAM_CHA_SEC_NONE;
108 } 102 }
109 103
diff --git a/net/mac80211/sta_info.h b/net/mac80211/sta_info.h
index 42f68cb8957e..bcda2ac7d844 100644
--- a/net/mac80211/sta_info.h
+++ b/net/mac80211/sta_info.h
@@ -336,6 +336,7 @@ struct ieee80211_tx_latency_stat {
336 * @known_smps_mode: the smps_mode the client thinks we are in. Relevant for 336 * @known_smps_mode: the smps_mode the client thinks we are in. Relevant for
337 * AP only. 337 * AP only.
338 * @cipher_scheme: optional cipher scheme for this station 338 * @cipher_scheme: optional cipher scheme for this station
339 * @last_tdls_pkt_time: holds the time in jiffies of last TDLS pkt ACKed
339 */ 340 */
340struct sta_info { 341struct sta_info {
341 /* General information, mostly static */ 342 /* General information, mostly static */
diff --git a/net/mpls/Makefile b/net/mpls/Makefile
index 0a3c171be537..6dec088c2d0f 100644
--- a/net/mpls/Makefile
+++ b/net/mpls/Makefile
@@ -1,4 +1,4 @@
1# 1#
2# Makefile for MPLS. 2# Makefile for MPLS.
3# 3#
4obj-y += mpls_gso.o 4obj-$(CONFIG_NET_MPLS_GSO) += mpls_gso.o
diff --git a/net/mpls/mpls_gso.c b/net/mpls/mpls_gso.c
index e28ed2ef5b06..e3545f21a099 100644
--- a/net/mpls/mpls_gso.c
+++ b/net/mpls/mpls_gso.c
@@ -48,7 +48,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
48 __skb_push(skb, skb->mac_len); 48 __skb_push(skb, skb->mac_len);
49 49
50 /* Segment inner packet. */ 50 /* Segment inner packet. */
51 mpls_features = skb->dev->mpls_features & netif_skb_features(skb); 51 mpls_features = skb->dev->mpls_features & features;
52 segs = skb_mac_gso_segment(skb, mpls_features); 52 segs = skb_mac_gso_segment(skb, mpls_features);
53 53
54 54
@@ -59,8 +59,7 @@ static struct sk_buff *mpls_gso_segment(struct sk_buff *skb,
59 * above pulled. It will be re-pushed after returning 59 * above pulled. It will be re-pushed after returning
60 * skb_mac_gso_segment(), an indirect caller of this function. 60 * skb_mac_gso_segment(), an indirect caller of this function.
61 */ 61 */
62 __skb_push(skb, skb->data - skb_mac_header(skb)); 62 __skb_pull(skb, skb->data - skb_mac_header(skb));
63
64out: 63out:
65 return segs; 64 return segs;
66} 65}
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 912e5a05b79d..d259da3ce67a 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -659,7 +659,7 @@ ip_set_nfnl_get_byindex(struct net *net, ip_set_id_t index)
659 struct ip_set *set; 659 struct ip_set *set;
660 struct ip_set_net *inst = ip_set_pernet(net); 660 struct ip_set_net *inst = ip_set_pernet(net);
661 661
662 if (index > inst->ip_set_max) 662 if (index >= inst->ip_set_max)
663 return IPSET_INVALID_ID; 663 return IPSET_INVALID_ID;
664 664
665 nfnl_lock(NFNL_SUBSYS_IPSET); 665 nfnl_lock(NFNL_SUBSYS_IPSET);
@@ -1863,6 +1863,12 @@ ip_set_sockfn_get(struct sock *sk, int optval, void __user *user, int *len)
1863 if (*op < IP_SET_OP_VERSION) { 1863 if (*op < IP_SET_OP_VERSION) {
1864 /* Check the version at the beginning of operations */ 1864 /* Check the version at the beginning of operations */
1865 struct ip_set_req_version *req_version = data; 1865 struct ip_set_req_version *req_version = data;
1866
1867 if (*len < sizeof(struct ip_set_req_version)) {
1868 ret = -EINVAL;
1869 goto done;
1870 }
1871
1866 if (req_version->version != IPSET_PROTOCOL) { 1872 if (req_version->version != IPSET_PROTOCOL) {
1867 ret = -EPROTO; 1873 ret = -EPROTO;
1868 goto done; 1874 goto done;
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index 91f17c1eb8a2..bd90bf8107da 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -316,7 +316,7 @@ __ip_vs_get_out_rt(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
316 if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode, 316 if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode,
317 local))) { 317 local))) {
318 IP_VS_DBG_RL("We are crossing local and non-local addresses" 318 IP_VS_DBG_RL("We are crossing local and non-local addresses"
319 " daddr=%pI4\n", &dest->addr.ip); 319 " daddr=%pI4\n", &daddr);
320 goto err_put; 320 goto err_put;
321 } 321 }
322 322
@@ -458,7 +458,7 @@ __ip_vs_get_out_rt_v6(int skb_af, struct sk_buff *skb, struct ip_vs_dest *dest,
458 if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode, 458 if (unlikely(crosses_local_route_boundary(skb_af, skb, rt_mode,
459 local))) { 459 local))) {
460 IP_VS_DBG_RL("We are crossing local and non-local addresses" 460 IP_VS_DBG_RL("We are crossing local and non-local addresses"
461 " daddr=%pI6\n", &dest->addr.in6); 461 " daddr=%pI6\n", daddr);
462 goto err_put; 462 goto err_put;
463 } 463 }
464 464
@@ -846,6 +846,8 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
846 new_skb = skb_realloc_headroom(skb, max_headroom); 846 new_skb = skb_realloc_headroom(skb, max_headroom);
847 if (!new_skb) 847 if (!new_skb)
848 goto error; 848 goto error;
849 if (skb->sk)
850 skb_set_owner_w(new_skb, skb->sk);
849 consume_skb(skb); 851 consume_skb(skb);
850 skb = new_skb; 852 skb = new_skb;
851 } 853 }
diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c
index 44d1ea32570a..d87b6423ffb2 100644
--- a/net/netfilter/nf_conntrack_proto_tcp.c
+++ b/net/netfilter/nf_conntrack_proto_tcp.c
@@ -213,7 +213,7 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
213 { 213 {
214/* REPLY */ 214/* REPLY */
215/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ 215/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
216/*syn*/ { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sIV, sIV, sS2 }, 216/*syn*/ { sIV, sS2, sIV, sIV, sIV, sIV, sIV, sSS, sIV, sS2 },
217/* 217/*
218 * sNO -> sIV Never reached. 218 * sNO -> sIV Never reached.
219 * sSS -> sS2 Simultaneous open 219 * sSS -> sS2 Simultaneous open
@@ -223,7 +223,7 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
223 * sFW -> sIV 223 * sFW -> sIV
224 * sCW -> sIV 224 * sCW -> sIV
225 * sLA -> sIV 225 * sLA -> sIV
226 * sTW -> sIV Reopened connection, but server may not do it. 226 * sTW -> sSS Reopened connection, but server may have switched role
227 * sCL -> sIV 227 * sCL -> sIV
228 */ 228 */
229/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ 229/* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 556a0dfa4abc..66e8425dbfe7 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1328,10 +1328,10 @@ static int nf_tables_newchain(struct sock *nlsk, struct sk_buff *skb,
1328 basechain->stats = stats; 1328 basechain->stats = stats;
1329 } else { 1329 } else {
1330 stats = netdev_alloc_pcpu_stats(struct nft_stats); 1330 stats = netdev_alloc_pcpu_stats(struct nft_stats);
1331 if (IS_ERR(stats)) { 1331 if (stats == NULL) {
1332 module_put(type->owner); 1332 module_put(type->owner);
1333 kfree(basechain); 1333 kfree(basechain);
1334 return PTR_ERR(stats); 1334 return -ENOMEM;
1335 } 1335 }
1336 rcu_assign_pointer(basechain->stats, stats); 1336 rcu_assign_pointer(basechain->stats, stats);
1337 } 1337 }
@@ -3484,13 +3484,8 @@ static void nft_chain_commit_update(struct nft_trans *trans)
3484 } 3484 }
3485} 3485}
3486 3486
3487/* Schedule objects for release via rcu to make sure no packets are accesing 3487static void nf_tables_commit_release(struct nft_trans *trans)
3488 * removed rules.
3489 */
3490static void nf_tables_commit_release_rcu(struct rcu_head *rt)
3491{ 3488{
3492 struct nft_trans *trans = container_of(rt, struct nft_trans, rcu_head);
3493
3494 switch (trans->msg_type) { 3489 switch (trans->msg_type) {
3495 case NFT_MSG_DELTABLE: 3490 case NFT_MSG_DELTABLE:
3496 nf_tables_table_destroy(&trans->ctx); 3491 nf_tables_table_destroy(&trans->ctx);
@@ -3612,10 +3607,11 @@ static int nf_tables_commit(struct sk_buff *skb)
3612 } 3607 }
3613 } 3608 }
3614 3609
3610 synchronize_rcu();
3611
3615 list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) { 3612 list_for_each_entry_safe(trans, next, &net->nft.commit_list, list) {
3616 list_del(&trans->list); 3613 list_del(&trans->list);
3617 trans->ctx.nla = NULL; 3614 nf_tables_commit_release(trans);
3618 call_rcu(&trans->rcu_head, nf_tables_commit_release_rcu);
3619 } 3615 }
3620 3616
3621 nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); 3617 nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN);
@@ -3623,13 +3619,8 @@ static int nf_tables_commit(struct sk_buff *skb)
3623 return 0; 3619 return 0;
3624} 3620}
3625 3621
3626/* Schedule objects for release via rcu to make sure no packets are accesing 3622static void nf_tables_abort_release(struct nft_trans *trans)
3627 * aborted rules.
3628 */
3629static void nf_tables_abort_release_rcu(struct rcu_head *rt)
3630{ 3623{
3631 struct nft_trans *trans = container_of(rt, struct nft_trans, rcu_head);
3632
3633 switch (trans->msg_type) { 3624 switch (trans->msg_type) {
3634 case NFT_MSG_NEWTABLE: 3625 case NFT_MSG_NEWTABLE:
3635 nf_tables_table_destroy(&trans->ctx); 3626 nf_tables_table_destroy(&trans->ctx);
@@ -3725,11 +3716,12 @@ static int nf_tables_abort(struct sk_buff *skb)
3725 } 3716 }
3726 } 3717 }
3727 3718
3719 synchronize_rcu();
3720
3728 list_for_each_entry_safe_reverse(trans, next, 3721 list_for_each_entry_safe_reverse(trans, next,
3729 &net->nft.commit_list, list) { 3722 &net->nft.commit_list, list) {
3730 list_del(&trans->list); 3723 list_del(&trans->list);
3731 trans->ctx.nla = NULL; 3724 nf_tables_abort_release(trans);
3732 call_rcu(&trans->rcu_head, nf_tables_abort_release_rcu);
3733 } 3725 }
3734 3726
3735 return 0; 3727 return 0;
@@ -3744,6 +3736,20 @@ static const struct nfnetlink_subsystem nf_tables_subsys = {
3744 .abort = nf_tables_abort, 3736 .abort = nf_tables_abort,
3745}; 3737};
3746 3738
3739int nft_chain_validate_dependency(const struct nft_chain *chain,
3740 enum nft_chain_type type)
3741{
3742 const struct nft_base_chain *basechain;
3743
3744 if (chain->flags & NFT_BASE_CHAIN) {
3745 basechain = nft_base_chain(chain);
3746 if (basechain->type->type != type)
3747 return -EOPNOTSUPP;
3748 }
3749 return 0;
3750}
3751EXPORT_SYMBOL_GPL(nft_chain_validate_dependency);
3752
3747/* 3753/*
3748 * Loop detection - walk through the ruleset beginning at the destination chain 3754 * Loop detection - walk through the ruleset beginning at the destination chain
3749 * of a new jump until either the source chain is reached (loop) or all 3755 * of a new jump until either the source chain is reached (loop) or all
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index 6c5a915cfa75..13c2e17bbe27 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -47,6 +47,8 @@ static const int nfnl_group2type[NFNLGRP_MAX+1] = {
47 [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP, 47 [NFNLGRP_CONNTRACK_EXP_NEW] = NFNL_SUBSYS_CTNETLINK_EXP,
48 [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP, 48 [NFNLGRP_CONNTRACK_EXP_UPDATE] = NFNL_SUBSYS_CTNETLINK_EXP,
49 [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP, 49 [NFNLGRP_CONNTRACK_EXP_DESTROY] = NFNL_SUBSYS_CTNETLINK_EXP,
50 [NFNLGRP_NFTABLES] = NFNL_SUBSYS_NFTABLES,
51 [NFNLGRP_ACCT_QUOTA] = NFNL_SUBSYS_ACCT,
50}; 52};
51 53
52void nfnl_lock(__u8 subsys_id) 54void nfnl_lock(__u8 subsys_id)
@@ -464,7 +466,12 @@ static void nfnetlink_rcv(struct sk_buff *skb)
464static int nfnetlink_bind(int group) 466static int nfnetlink_bind(int group)
465{ 467{
466 const struct nfnetlink_subsystem *ss; 468 const struct nfnetlink_subsystem *ss;
467 int type = nfnl_group2type[group]; 469 int type;
470
471 if (group <= NFNLGRP_NONE || group > NFNLGRP_MAX)
472 return -EINVAL;
473
474 type = nfnl_group2type[group];
468 475
469 rcu_read_lock(); 476 rcu_read_lock();
470 ss = nfnetlink_get_subsys(type); 477 ss = nfnetlink_get_subsys(type);
@@ -514,6 +521,9 @@ static int __init nfnetlink_init(void)
514{ 521{
515 int i; 522 int i;
516 523
524 for (i = NFNLGRP_NONE + 1; i <= NFNLGRP_MAX; i++)
525 BUG_ON(nfnl_group2type[i] == NFNL_SUBSYS_NONE);
526
517 for (i=0; i<NFNL_SUBSYS_COUNT; i++) 527 for (i=0; i<NFNL_SUBSYS_COUNT; i++)
518 mutex_init(&table[i].mutex); 528 mutex_init(&table[i].mutex);
519 529
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index b1e3a0579416..5f1be5ba3559 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -43,7 +43,8 @@
43#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE 43#define NFULNL_NLBUFSIZ_DEFAULT NLMSG_GOODSIZE
44#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */ 44#define NFULNL_TIMEOUT_DEFAULT 100 /* every second */
45#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */ 45#define NFULNL_QTHRESH_DEFAULT 100 /* 100 packets */
46#define NFULNL_COPY_RANGE_MAX 0xFFFF /* max packet size is limited by 16-bit struct nfattr nfa_len field */ 46/* max packet size is limited by 16-bit struct nfattr nfa_len field */
47#define NFULNL_COPY_RANGE_MAX (0xFFFF - NLA_HDRLEN)
47 48
48#define PRINTR(x, args...) do { if (net_ratelimit()) \ 49#define PRINTR(x, args...) do { if (net_ratelimit()) \
49 printk(x, ## args); } while (0); 50 printk(x, ## args); } while (0);
@@ -252,6 +253,8 @@ nfulnl_set_mode(struct nfulnl_instance *inst, u_int8_t mode,
252 253
253 case NFULNL_COPY_PACKET: 254 case NFULNL_COPY_PACKET:
254 inst->copy_mode = mode; 255 inst->copy_mode = mode;
256 if (range == 0)
257 range = NFULNL_COPY_RANGE_MAX;
255 inst->copy_range = min_t(unsigned int, 258 inst->copy_range = min_t(unsigned int,
256 range, NFULNL_COPY_RANGE_MAX); 259 range, NFULNL_COPY_RANGE_MAX);
257 break; 260 break;
@@ -343,26 +346,25 @@ nfulnl_alloc_skb(struct net *net, u32 peer_portid, unsigned int inst_size,
343 return skb; 346 return skb;
344} 347}
345 348
346static int 349static void
347__nfulnl_send(struct nfulnl_instance *inst) 350__nfulnl_send(struct nfulnl_instance *inst)
348{ 351{
349 int status = -1;
350
351 if (inst->qlen > 1) { 352 if (inst->qlen > 1) {
352 struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0, 353 struct nlmsghdr *nlh = nlmsg_put(inst->skb, 0, 0,
353 NLMSG_DONE, 354 NLMSG_DONE,
354 sizeof(struct nfgenmsg), 355 sizeof(struct nfgenmsg),
355 0); 356 0);
356 if (!nlh) 357 if (WARN_ONCE(!nlh, "bad nlskb size: %u, tailroom %d\n",
358 inst->skb->len, skb_tailroom(inst->skb))) {
359 kfree_skb(inst->skb);
357 goto out; 360 goto out;
361 }
358 } 362 }
359 status = nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid, 363 nfnetlink_unicast(inst->skb, inst->net, inst->peer_portid,
360 MSG_DONTWAIT); 364 MSG_DONTWAIT);
361 365out:
362 inst->qlen = 0; 366 inst->qlen = 0;
363 inst->skb = NULL; 367 inst->skb = NULL;
364out:
365 return status;
366} 368}
367 369
368static void 370static void
@@ -649,7 +651,8 @@ nfulnl_log_packet(struct net *net,
649 + nla_total_size(sizeof(u_int32_t)) /* gid */ 651 + nla_total_size(sizeof(u_int32_t)) /* gid */
650 + nla_total_size(plen) /* prefix */ 652 + nla_total_size(plen) /* prefix */
651 + nla_total_size(sizeof(struct nfulnl_msg_packet_hw)) 653 + nla_total_size(sizeof(struct nfulnl_msg_packet_hw))
652 + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp)); 654 + nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp))
655 + nla_total_size(sizeof(struct nfgenmsg)); /* NLMSG_DONE */
653 656
654 if (in && skb_mac_header_was_set(skb)) { 657 if (in && skb_mac_header_was_set(skb)) {
655 size += nla_total_size(skb->dev->hard_header_len) 658 size += nla_total_size(skb->dev->hard_header_len)
@@ -678,8 +681,7 @@ nfulnl_log_packet(struct net *net,
678 break; 681 break;
679 682
680 case NFULNL_COPY_PACKET: 683 case NFULNL_COPY_PACKET:
681 if (inst->copy_range == 0 684 if (inst->copy_range > skb->len)
682 || inst->copy_range > skb->len)
683 data_len = skb->len; 685 data_len = skb->len;
684 else 686 else
685 data_len = inst->copy_range; 687 data_len = inst->copy_range;
@@ -692,8 +694,7 @@ nfulnl_log_packet(struct net *net,
692 goto unlock_and_release; 694 goto unlock_and_release;
693 } 695 }
694 696
695 if (inst->skb && 697 if (inst->skb && size > skb_tailroom(inst->skb)) {
696 size > skb_tailroom(inst->skb) - sizeof(struct nfgenmsg)) {
697 /* either the queue len is too high or we don't have 698 /* either the queue len is too high or we don't have
698 * enough room in the skb left. flush to userspace. */ 699 * enough room in the skb left. flush to userspace. */
699 __nfulnl_flush(inst); 700 __nfulnl_flush(inst);
diff --git a/net/netfilter/nfnetlink_queue_core.c b/net/netfilter/nfnetlink_queue_core.c
index a82077d9f59b..7c60ccd61a3e 100644
--- a/net/netfilter/nfnetlink_queue_core.c
+++ b/net/netfilter/nfnetlink_queue_core.c
@@ -665,7 +665,7 @@ nfqnl_enqueue_packet(struct nf_queue_entry *entry, unsigned int queuenum)
665 * returned by nf_queue. For instance, callers rely on -ECANCELED to 665 * returned by nf_queue. For instance, callers rely on -ECANCELED to
666 * mean 'ignore this hook'. 666 * mean 'ignore this hook'.
667 */ 667 */
668 if (IS_ERR(segs)) 668 if (IS_ERR_OR_NULL(segs))
669 goto out_err; 669 goto out_err;
670 queued = 0; 670 queued = 0;
671 err = 0; 671 err = 0;
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 7e2683c8a44a..265e190f2218 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -19,9 +19,24 @@
19#include <linux/netfilter/x_tables.h> 19#include <linux/netfilter/x_tables.h>
20#include <linux/netfilter_ipv4/ip_tables.h> 20#include <linux/netfilter_ipv4/ip_tables.h>
21#include <linux/netfilter_ipv6/ip6_tables.h> 21#include <linux/netfilter_ipv6/ip6_tables.h>
22#include <asm/uaccess.h> /* for set_fs */
23#include <net/netfilter/nf_tables.h> 22#include <net/netfilter/nf_tables.h>
24 23
24static int nft_compat_chain_validate_dependency(const char *tablename,
25 const struct nft_chain *chain)
26{
27 const struct nft_base_chain *basechain;
28
29 if (!tablename || !(chain->flags & NFT_BASE_CHAIN))
30 return 0;
31
32 basechain = nft_base_chain(chain);
33 if (strcmp(tablename, "nat") == 0 &&
34 basechain->type->type != NFT_CHAIN_T_NAT)
35 return -EINVAL;
36
37 return 0;
38}
39
25union nft_entry { 40union nft_entry {
26 struct ipt_entry e4; 41 struct ipt_entry e4;
27 struct ip6t_entry e6; 42 struct ip6t_entry e6;
@@ -74,7 +89,7 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
74 struct xt_target *target, void *info, 89 struct xt_target *target, void *info,
75 union nft_entry *entry, u8 proto, bool inv) 90 union nft_entry *entry, u8 proto, bool inv)
76{ 91{
77 par->net = &init_net; 92 par->net = ctx->net;
78 par->table = ctx->table->name; 93 par->table = ctx->table->name;
79 switch (ctx->afi->family) { 94 switch (ctx->afi->family) {
80 case AF_INET: 95 case AF_INET:
@@ -95,6 +110,8 @@ nft_target_set_tgchk_param(struct xt_tgchk_param *par,
95 const struct nf_hook_ops *ops = &basechain->ops[0]; 110 const struct nf_hook_ops *ops = &basechain->ops[0];
96 111
97 par->hook_mask = 1 << ops->hooknum; 112 par->hook_mask = 1 << ops->hooknum;
113 } else {
114 par->hook_mask = 0;
98 } 115 }
99 par->family = ctx->afi->family; 116 par->family = ctx->afi->family;
100} 117}
@@ -151,6 +168,10 @@ nft_target_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
151 union nft_entry e = {}; 168 union nft_entry e = {};
152 int ret; 169 int ret;
153 170
171 ret = nft_compat_chain_validate_dependency(target->table, ctx->chain);
172 if (ret < 0)
173 goto err;
174
154 target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info); 175 target_compat_from_user(target, nla_data(tb[NFTA_TARGET_INFO]), info);
155 176
156 if (ctx->nla[NFTA_RULE_COMPAT]) { 177 if (ctx->nla[NFTA_RULE_COMPAT]) {
@@ -216,6 +237,7 @@ static int nft_target_validate(const struct nft_ctx *ctx,
216{ 237{
217 struct xt_target *target = expr->ops->data; 238 struct xt_target *target = expr->ops->data;
218 unsigned int hook_mask = 0; 239 unsigned int hook_mask = 0;
240 int ret;
219 241
220 if (ctx->chain->flags & NFT_BASE_CHAIN) { 242 if (ctx->chain->flags & NFT_BASE_CHAIN) {
221 const struct nft_base_chain *basechain = 243 const struct nft_base_chain *basechain =
@@ -223,11 +245,13 @@ static int nft_target_validate(const struct nft_ctx *ctx,
223 const struct nf_hook_ops *ops = &basechain->ops[0]; 245 const struct nf_hook_ops *ops = &basechain->ops[0];
224 246
225 hook_mask = 1 << ops->hooknum; 247 hook_mask = 1 << ops->hooknum;
226 if (hook_mask & target->hooks) 248 if (!(hook_mask & target->hooks))
227 return 0; 249 return -EINVAL;
228 250
229 /* This target is being called from an invalid chain */ 251 ret = nft_compat_chain_validate_dependency(target->table,
230 return -EINVAL; 252 ctx->chain);
253 if (ret < 0)
254 return ret;
231 } 255 }
232 return 0; 256 return 0;
233} 257}
@@ -272,7 +296,7 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
272 struct xt_match *match, void *info, 296 struct xt_match *match, void *info,
273 union nft_entry *entry, u8 proto, bool inv) 297 union nft_entry *entry, u8 proto, bool inv)
274{ 298{
275 par->net = &init_net; 299 par->net = ctx->net;
276 par->table = ctx->table->name; 300 par->table = ctx->table->name;
277 switch (ctx->afi->family) { 301 switch (ctx->afi->family) {
278 case AF_INET: 302 case AF_INET:
@@ -293,6 +317,8 @@ nft_match_set_mtchk_param(struct xt_mtchk_param *par, const struct nft_ctx *ctx,
293 const struct nf_hook_ops *ops = &basechain->ops[0]; 317 const struct nf_hook_ops *ops = &basechain->ops[0];
294 318
295 par->hook_mask = 1 << ops->hooknum; 319 par->hook_mask = 1 << ops->hooknum;
320 } else {
321 par->hook_mask = 0;
296 } 322 }
297 par->family = ctx->afi->family; 323 par->family = ctx->afi->family;
298} 324}
@@ -320,6 +346,10 @@ nft_match_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
320 union nft_entry e = {}; 346 union nft_entry e = {};
321 int ret; 347 int ret;
322 348
349 ret = nft_compat_chain_validate_dependency(match->table, ctx->chain);
350 if (ret < 0)
351 goto err;
352
323 match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info); 353 match_compat_from_user(match, nla_data(tb[NFTA_MATCH_INFO]), info);
324 354
325 if (ctx->nla[NFTA_RULE_COMPAT]) { 355 if (ctx->nla[NFTA_RULE_COMPAT]) {
@@ -379,6 +409,7 @@ static int nft_match_validate(const struct nft_ctx *ctx,
379{ 409{
380 struct xt_match *match = expr->ops->data; 410 struct xt_match *match = expr->ops->data;
381 unsigned int hook_mask = 0; 411 unsigned int hook_mask = 0;
412 int ret;
382 413
383 if (ctx->chain->flags & NFT_BASE_CHAIN) { 414 if (ctx->chain->flags & NFT_BASE_CHAIN) {
384 const struct nft_base_chain *basechain = 415 const struct nft_base_chain *basechain =
@@ -386,11 +417,13 @@ static int nft_match_validate(const struct nft_ctx *ctx,
386 const struct nf_hook_ops *ops = &basechain->ops[0]; 417 const struct nf_hook_ops *ops = &basechain->ops[0];
387 418
388 hook_mask = 1 << ops->hooknum; 419 hook_mask = 1 << ops->hooknum;
389 if (hook_mask & match->hooks) 420 if (!(hook_mask & match->hooks))
390 return 0; 421 return -EINVAL;
391 422
392 /* This match is being called from an invalid chain */ 423 ret = nft_compat_chain_validate_dependency(match->table,
393 return -EINVAL; 424 ctx->chain);
425 if (ret < 0)
426 return ret;
394 } 427 }
395 return 0; 428 return 0;
396} 429}
@@ -611,7 +644,7 @@ nft_target_select_ops(const struct nft_ctx *ctx,
611 family = ctx->afi->family; 644 family = ctx->afi->family;
612 645
613 /* Re-use the existing target if it's already loaded. */ 646 /* Re-use the existing target if it's already loaded. */
614 list_for_each_entry(nft_target, &nft_match_list, head) { 647 list_for_each_entry(nft_target, &nft_target_list, head) {
615 struct xt_target *target = nft_target->ops.data; 648 struct xt_target *target = nft_target->ops.data;
616 649
617 if (strcmp(target->name, tg_name) == 0 && 650 if (strcmp(target->name, tg_name) == 0 &&
diff --git a/net/netfilter/nft_masq.c b/net/netfilter/nft_masq.c
index 6637bab00567..d1ffd5eb3a9b 100644
--- a/net/netfilter/nft_masq.c
+++ b/net/netfilter/nft_masq.c
@@ -26,6 +26,11 @@ int nft_masq_init(const struct nft_ctx *ctx,
26 const struct nlattr * const tb[]) 26 const struct nlattr * const tb[])
27{ 27{
28 struct nft_masq *priv = nft_expr_priv(expr); 28 struct nft_masq *priv = nft_expr_priv(expr);
29 int err;
30
31 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
32 if (err < 0)
33 return err;
29 34
30 if (tb[NFTA_MASQ_FLAGS] == NULL) 35 if (tb[NFTA_MASQ_FLAGS] == NULL)
31 return 0; 36 return 0;
@@ -55,5 +60,12 @@ nla_put_failure:
55} 60}
56EXPORT_SYMBOL_GPL(nft_masq_dump); 61EXPORT_SYMBOL_GPL(nft_masq_dump);
57 62
63int nft_masq_validate(const struct nft_ctx *ctx, const struct nft_expr *expr,
64 const struct nft_data **data)
65{
66 return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
67}
68EXPORT_SYMBOL_GPL(nft_masq_validate);
69
58MODULE_LICENSE("GPL"); 70MODULE_LICENSE("GPL");
59MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>"); 71MODULE_AUTHOR("Arturo Borrero Gonzalez <arturo.borrero.glez@gmail.com>");
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index 799550b476fb..afe2b0b45ec4 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -95,7 +95,13 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
95 u32 family; 95 u32 family;
96 int err; 96 int err;
97 97
98 if (tb[NFTA_NAT_TYPE] == NULL) 98 err = nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
99 if (err < 0)
100 return err;
101
102 if (tb[NFTA_NAT_TYPE] == NULL ||
103 (tb[NFTA_NAT_REG_ADDR_MIN] == NULL &&
104 tb[NFTA_NAT_REG_PROTO_MIN] == NULL))
99 return -EINVAL; 105 return -EINVAL;
100 106
101 switch (ntohl(nla_get_be32(tb[NFTA_NAT_TYPE]))) { 107 switch (ntohl(nla_get_be32(tb[NFTA_NAT_TYPE]))) {
@@ -120,38 +126,44 @@ static int nft_nat_init(const struct nft_ctx *ctx, const struct nft_expr *expr,
120 priv->family = family; 126 priv->family = family;
121 127
122 if (tb[NFTA_NAT_REG_ADDR_MIN]) { 128 if (tb[NFTA_NAT_REG_ADDR_MIN]) {
123 priv->sreg_addr_min = ntohl(nla_get_be32( 129 priv->sreg_addr_min =
124 tb[NFTA_NAT_REG_ADDR_MIN])); 130 ntohl(nla_get_be32(tb[NFTA_NAT_REG_ADDR_MIN]));
131
125 err = nft_validate_input_register(priv->sreg_addr_min); 132 err = nft_validate_input_register(priv->sreg_addr_min);
126 if (err < 0) 133 if (err < 0)
127 return err; 134 return err;
128 }
129 135
130 if (tb[NFTA_NAT_REG_ADDR_MAX]) { 136 if (tb[NFTA_NAT_REG_ADDR_MAX]) {
131 priv->sreg_addr_max = ntohl(nla_get_be32( 137 priv->sreg_addr_max =
132 tb[NFTA_NAT_REG_ADDR_MAX])); 138 ntohl(nla_get_be32(tb[NFTA_NAT_REG_ADDR_MAX]));
133 err = nft_validate_input_register(priv->sreg_addr_max); 139
134 if (err < 0) 140 err = nft_validate_input_register(priv->sreg_addr_max);
135 return err; 141 if (err < 0)
136 } else 142 return err;
137 priv->sreg_addr_max = priv->sreg_addr_min; 143 } else {
144 priv->sreg_addr_max = priv->sreg_addr_min;
145 }
146 }
138 147
139 if (tb[NFTA_NAT_REG_PROTO_MIN]) { 148 if (tb[NFTA_NAT_REG_PROTO_MIN]) {
140 priv->sreg_proto_min = ntohl(nla_get_be32( 149 priv->sreg_proto_min =
141 tb[NFTA_NAT_REG_PROTO_MIN])); 150 ntohl(nla_get_be32(tb[NFTA_NAT_REG_PROTO_MIN]));
151
142 err = nft_validate_input_register(priv->sreg_proto_min); 152 err = nft_validate_input_register(priv->sreg_proto_min);
143 if (err < 0) 153 if (err < 0)
144 return err; 154 return err;
145 }
146 155
147 if (tb[NFTA_NAT_REG_PROTO_MAX]) { 156 if (tb[NFTA_NAT_REG_PROTO_MAX]) {
148 priv->sreg_proto_max = ntohl(nla_get_be32( 157 priv->sreg_proto_max =
149 tb[NFTA_NAT_REG_PROTO_MAX])); 158 ntohl(nla_get_be32(tb[NFTA_NAT_REG_PROTO_MAX]));
150 err = nft_validate_input_register(priv->sreg_proto_max); 159
151 if (err < 0) 160 err = nft_validate_input_register(priv->sreg_proto_max);
152 return err; 161 if (err < 0)
153 } else 162 return err;
154 priv->sreg_proto_max = priv->sreg_proto_min; 163 } else {
164 priv->sreg_proto_max = priv->sreg_proto_min;
165 }
166 }
155 167
156 if (tb[NFTA_NAT_FLAGS]) { 168 if (tb[NFTA_NAT_FLAGS]) {
157 priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS])); 169 priv->flags = ntohl(nla_get_be32(tb[NFTA_NAT_FLAGS]));
@@ -179,17 +191,19 @@ static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr)
179 191
180 if (nla_put_be32(skb, NFTA_NAT_FAMILY, htonl(priv->family))) 192 if (nla_put_be32(skb, NFTA_NAT_FAMILY, htonl(priv->family)))
181 goto nla_put_failure; 193 goto nla_put_failure;
182 if (nla_put_be32(skb, 194
183 NFTA_NAT_REG_ADDR_MIN, htonl(priv->sreg_addr_min))) 195 if (priv->sreg_addr_min) {
184 goto nla_put_failure; 196 if (nla_put_be32(skb, NFTA_NAT_REG_ADDR_MIN,
185 if (nla_put_be32(skb, 197 htonl(priv->sreg_addr_min)) ||
186 NFTA_NAT_REG_ADDR_MAX, htonl(priv->sreg_addr_max))) 198 nla_put_be32(skb, NFTA_NAT_REG_ADDR_MAX,
187 goto nla_put_failure; 199 htonl(priv->sreg_addr_max)))
200 goto nla_put_failure;
201 }
202
188 if (priv->sreg_proto_min) { 203 if (priv->sreg_proto_min) {
189 if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MIN, 204 if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MIN,
190 htonl(priv->sreg_proto_min))) 205 htonl(priv->sreg_proto_min)) ||
191 goto nla_put_failure; 206 nla_put_be32(skb, NFTA_NAT_REG_PROTO_MAX,
192 if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MAX,
193 htonl(priv->sreg_proto_max))) 207 htonl(priv->sreg_proto_max)))
194 goto nla_put_failure; 208 goto nla_put_failure;
195 } 209 }
@@ -205,6 +219,13 @@ nla_put_failure:
205 return -1; 219 return -1;
206} 220}
207 221
222static int nft_nat_validate(const struct nft_ctx *ctx,
223 const struct nft_expr *expr,
224 const struct nft_data **data)
225{
226 return nft_chain_validate_dependency(ctx->chain, NFT_CHAIN_T_NAT);
227}
228
208static struct nft_expr_type nft_nat_type; 229static struct nft_expr_type nft_nat_type;
209static const struct nft_expr_ops nft_nat_ops = { 230static const struct nft_expr_ops nft_nat_ops = {
210 .type = &nft_nat_type, 231 .type = &nft_nat_type,
@@ -212,6 +233,7 @@ static const struct nft_expr_ops nft_nat_ops = {
212 .eval = nft_nat_eval, 233 .eval = nft_nat_eval,
213 .init = nft_nat_init, 234 .init = nft_nat_init,
214 .dump = nft_nat_dump, 235 .dump = nft_nat_dump,
236 .validate = nft_nat_validate,
215}; 237};
216 238
217static struct nft_expr_type nft_nat_type __read_mostly = { 239static struct nft_expr_type nft_nat_type __read_mostly = {
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 7a186e74b1b3..0007b8180397 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -96,6 +96,14 @@ static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
96static int netlink_dump(struct sock *sk); 96static int netlink_dump(struct sock *sk);
97static void netlink_skb_destructor(struct sk_buff *skb); 97static void netlink_skb_destructor(struct sk_buff *skb);
98 98
99/* nl_table locking explained:
100 * Lookup and traversal are protected with nl_sk_hash_lock or nl_table_lock
101 * combined with an RCU read-side lock. Insertion and removal are protected
102 * with nl_sk_hash_lock while using RCU list modification primitives and may
103 * run in parallel to nl_table_lock protected lookups. Destruction of the
104 * Netlink socket may only occur *after* nl_table_lock has been acquired
105 * either during or after the socket has been removed from the list.
106 */
99DEFINE_RWLOCK(nl_table_lock); 107DEFINE_RWLOCK(nl_table_lock);
100EXPORT_SYMBOL_GPL(nl_table_lock); 108EXPORT_SYMBOL_GPL(nl_table_lock);
101static atomic_t nl_table_users = ATOMIC_INIT(0); 109static atomic_t nl_table_users = ATOMIC_INIT(0);
@@ -109,10 +117,10 @@ EXPORT_SYMBOL_GPL(nl_sk_hash_lock);
109static int lockdep_nl_sk_hash_is_held(void) 117static int lockdep_nl_sk_hash_is_held(void)
110{ 118{
111#ifdef CONFIG_LOCKDEP 119#ifdef CONFIG_LOCKDEP
112 return (debug_locks) ? lockdep_is_held(&nl_sk_hash_lock) : 1; 120 if (debug_locks)
113#else 121 return lockdep_is_held(&nl_sk_hash_lock) || lockdep_is_held(&nl_table_lock);
114 return 1;
115#endif 122#endif
123 return 1;
116} 124}
117 125
118static ATOMIC_NOTIFIER_HEAD(netlink_chain); 126static ATOMIC_NOTIFIER_HEAD(netlink_chain);
@@ -1028,11 +1036,13 @@ static struct sock *netlink_lookup(struct net *net, int protocol, u32 portid)
1028 struct netlink_table *table = &nl_table[protocol]; 1036 struct netlink_table *table = &nl_table[protocol];
1029 struct sock *sk; 1037 struct sock *sk;
1030 1038
1039 read_lock(&nl_table_lock);
1031 rcu_read_lock(); 1040 rcu_read_lock();
1032 sk = __netlink_lookup(table, portid, net); 1041 sk = __netlink_lookup(table, portid, net);
1033 if (sk) 1042 if (sk)
1034 sock_hold(sk); 1043 sock_hold(sk);
1035 rcu_read_unlock(); 1044 rcu_read_unlock();
1045 read_unlock(&nl_table_lock);
1036 1046
1037 return sk; 1047 return sk;
1038} 1048}
@@ -1257,9 +1267,6 @@ static int netlink_release(struct socket *sock)
1257 } 1267 }
1258 netlink_table_ungrab(); 1268 netlink_table_ungrab();
1259 1269
1260 /* Wait for readers to complete */
1261 synchronize_net();
1262
1263 kfree(nlk->groups); 1270 kfree(nlk->groups);
1264 nlk->groups = NULL; 1271 nlk->groups = NULL;
1265 1272
@@ -1281,6 +1288,7 @@ static int netlink_autobind(struct socket *sock)
1281 1288
1282retry: 1289retry:
1283 cond_resched(); 1290 cond_resched();
1291 netlink_table_grab();
1284 rcu_read_lock(); 1292 rcu_read_lock();
1285 if (__netlink_lookup(table, portid, net)) { 1293 if (__netlink_lookup(table, portid, net)) {
1286 /* Bind collision, search negative portid values. */ 1294 /* Bind collision, search negative portid values. */
@@ -1288,9 +1296,11 @@ retry:
1288 if (rover > -4097) 1296 if (rover > -4097)
1289 rover = -4097; 1297 rover = -4097;
1290 rcu_read_unlock(); 1298 rcu_read_unlock();
1299 netlink_table_ungrab();
1291 goto retry; 1300 goto retry;
1292 } 1301 }
1293 rcu_read_unlock(); 1302 rcu_read_unlock();
1303 netlink_table_ungrab();
1294 1304
1295 err = netlink_insert(sk, net, portid); 1305 err = netlink_insert(sk, net, portid);
1296 if (err == -EADDRINUSE) 1306 if (err == -EADDRINUSE)
@@ -1430,7 +1440,7 @@ static void netlink_unbind(int group, long unsigned int groups,
1430 return; 1440 return;
1431 1441
1432 for (undo = 0; undo < group; undo++) 1442 for (undo = 0; undo < group; undo++)
1433 if (test_bit(group, &groups)) 1443 if (test_bit(undo, &groups))
1434 nlk->netlink_unbind(undo); 1444 nlk->netlink_unbind(undo);
1435} 1445}
1436 1446
@@ -1482,7 +1492,7 @@ static int netlink_bind(struct socket *sock, struct sockaddr *addr,
1482 netlink_insert(sk, net, nladdr->nl_pid) : 1492 netlink_insert(sk, net, nladdr->nl_pid) :
1483 netlink_autobind(sock); 1493 netlink_autobind(sock);
1484 if (err) { 1494 if (err) {
1485 netlink_unbind(nlk->ngroups - 1, groups, nlk); 1495 netlink_unbind(nlk->ngroups, groups, nlk);
1486 return err; 1496 return err;
1487 } 1497 }
1488 } 1498 }
@@ -2499,6 +2509,7 @@ __netlink_kernel_create(struct net *net, int unit, struct module *module,
2499 nl_table[unit].module = module; 2509 nl_table[unit].module = module;
2500 if (cfg) { 2510 if (cfg) {
2501 nl_table[unit].bind = cfg->bind; 2511 nl_table[unit].bind = cfg->bind;
2512 nl_table[unit].unbind = cfg->unbind;
2502 nl_table[unit].flags = cfg->flags; 2513 nl_table[unit].flags = cfg->flags;
2503 if (cfg->compare) 2514 if (cfg->compare)
2504 nl_table[unit].compare = cfg->compare; 2515 nl_table[unit].compare = cfg->compare;
@@ -2921,14 +2932,16 @@ static struct sock *netlink_seq_socket_idx(struct seq_file *seq, loff_t pos)
2921} 2932}
2922 2933
2923static void *netlink_seq_start(struct seq_file *seq, loff_t *pos) 2934static void *netlink_seq_start(struct seq_file *seq, loff_t *pos)
2924 __acquires(RCU) 2935 __acquires(nl_table_lock) __acquires(RCU)
2925{ 2936{
2937 read_lock(&nl_table_lock);
2926 rcu_read_lock(); 2938 rcu_read_lock();
2927 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN; 2939 return *pos ? netlink_seq_socket_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2928} 2940}
2929 2941
2930static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos) 2942static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2931{ 2943{
2944 struct rhashtable *ht;
2932 struct netlink_sock *nlk; 2945 struct netlink_sock *nlk;
2933 struct nl_seq_iter *iter; 2946 struct nl_seq_iter *iter;
2934 struct net *net; 2947 struct net *net;
@@ -2943,19 +2956,19 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2943 iter = seq->private; 2956 iter = seq->private;
2944 nlk = v; 2957 nlk = v;
2945 2958
2946 rht_for_each_entry_rcu(nlk, nlk->node.next, node) 2959 i = iter->link;
2960 ht = &nl_table[i].hash;
2961 rht_for_each_entry(nlk, nlk->node.next, ht, node)
2947 if (net_eq(sock_net((struct sock *)nlk), net)) 2962 if (net_eq(sock_net((struct sock *)nlk), net))
2948 return nlk; 2963 return nlk;
2949 2964
2950 i = iter->link;
2951 j = iter->hash_idx + 1; 2965 j = iter->hash_idx + 1;
2952 2966
2953 do { 2967 do {
2954 struct rhashtable *ht = &nl_table[i].hash;
2955 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht); 2968 const struct bucket_table *tbl = rht_dereference_rcu(ht->tbl, ht);
2956 2969
2957 for (; j < tbl->size; j++) { 2970 for (; j < tbl->size; j++) {
2958 rht_for_each_entry_rcu(nlk, tbl->buckets[j], node) { 2971 rht_for_each_entry(nlk, tbl->buckets[j], ht, node) {
2959 if (net_eq(sock_net((struct sock *)nlk), net)) { 2972 if (net_eq(sock_net((struct sock *)nlk), net)) {
2960 iter->link = i; 2973 iter->link = i;
2961 iter->hash_idx = j; 2974 iter->hash_idx = j;
@@ -2971,9 +2984,10 @@ static void *netlink_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2971} 2984}
2972 2985
2973static void netlink_seq_stop(struct seq_file *seq, void *v) 2986static void netlink_seq_stop(struct seq_file *seq, void *v)
2974 __releases(RCU) 2987 __releases(RCU) __releases(nl_table_lock)
2975{ 2988{
2976 rcu_read_unlock(); 2989 rcu_read_unlock();
2990 read_unlock(&nl_table_lock);
2977} 2991}
2978 2992
2979 2993
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 006886dbee36..8c4229b11c34 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -246,11 +246,11 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
246{ 246{
247 int transport_len = skb->len - skb_transport_offset(skb); 247 int transport_len = skb->len - skb_transport_offset(skb);
248 248
249 if (l4_proto == IPPROTO_TCP) { 249 if (l4_proto == NEXTHDR_TCP) {
250 if (likely(transport_len >= sizeof(struct tcphdr))) 250 if (likely(transport_len >= sizeof(struct tcphdr)))
251 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb, 251 inet_proto_csum_replace16(&tcp_hdr(skb)->check, skb,
252 addr, new_addr, 1); 252 addr, new_addr, 1);
253 } else if (l4_proto == IPPROTO_UDP) { 253 } else if (l4_proto == NEXTHDR_UDP) {
254 if (likely(transport_len >= sizeof(struct udphdr))) { 254 if (likely(transport_len >= sizeof(struct udphdr))) {
255 struct udphdr *uh = udp_hdr(skb); 255 struct udphdr *uh = udp_hdr(skb);
256 256
@@ -261,6 +261,10 @@ static void update_ipv6_checksum(struct sk_buff *skb, u8 l4_proto,
261 uh->check = CSUM_MANGLED_0; 261 uh->check = CSUM_MANGLED_0;
262 } 262 }
263 } 263 }
264 } else if (l4_proto == NEXTHDR_ICMP) {
265 if (likely(transport_len >= sizeof(struct icmp6hdr)))
266 inet_proto_csum_replace16(&icmp6_hdr(skb)->icmp6_cksum,
267 skb, addr, new_addr, 1);
264 } 268 }
265} 269}
266 270
@@ -722,8 +726,6 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
722 726
723 case OVS_ACTION_ATTR_SAMPLE: 727 case OVS_ACTION_ATTR_SAMPLE:
724 err = sample(dp, skb, key, a); 728 err = sample(dp, skb, key, a);
725 if (unlikely(err)) /* skb already freed. */
726 return err;
727 break; 729 break;
728 } 730 }
729 731
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 2e31d9e7f4dc..f9e556b56086 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -324,6 +324,8 @@ static int queue_gso_packets(struct datapath *dp, struct sk_buff *skb,
324 segs = __skb_gso_segment(skb, NETIF_F_SG, false); 324 segs = __skb_gso_segment(skb, NETIF_F_SG, false);
325 if (IS_ERR(segs)) 325 if (IS_ERR(segs))
326 return PTR_ERR(segs); 326 return PTR_ERR(segs);
327 if (segs == NULL)
328 return -EINVAL;
327 329
328 /* Queue all of the segments. */ 330 /* Queue all of the segments. */
329 skb = segs; 331 skb = segs;
@@ -1263,7 +1265,7 @@ static size_t ovs_dp_cmd_msg_size(void)
1263 return msgsize; 1265 return msgsize;
1264} 1266}
1265 1267
1266/* Called with ovs_mutex or RCU read lock. */ 1268/* Called with ovs_mutex. */
1267static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb, 1269static int ovs_dp_cmd_fill_info(struct datapath *dp, struct sk_buff *skb,
1268 u32 portid, u32 seq, u32 flags, u8 cmd) 1270 u32 portid, u32 seq, u32 flags, u8 cmd)
1269{ 1271{
@@ -1553,7 +1555,7 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1553 if (!reply) 1555 if (!reply)
1554 return -ENOMEM; 1556 return -ENOMEM;
1555 1557
1556 rcu_read_lock(); 1558 ovs_lock();
1557 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs); 1559 dp = lookup_datapath(sock_net(skb->sk), info->userhdr, info->attrs);
1558 if (IS_ERR(dp)) { 1560 if (IS_ERR(dp)) {
1559 err = PTR_ERR(dp); 1561 err = PTR_ERR(dp);
@@ -1562,12 +1564,12 @@ static int ovs_dp_cmd_get(struct sk_buff *skb, struct genl_info *info)
1562 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid, 1564 err = ovs_dp_cmd_fill_info(dp, reply, info->snd_portid,
1563 info->snd_seq, 0, OVS_DP_CMD_NEW); 1565 info->snd_seq, 0, OVS_DP_CMD_NEW);
1564 BUG_ON(err < 0); 1566 BUG_ON(err < 0);
1565 rcu_read_unlock(); 1567 ovs_unlock();
1566 1568
1567 return genlmsg_reply(reply, info); 1569 return genlmsg_reply(reply, info);
1568 1570
1569err_unlock_free: 1571err_unlock_free:
1570 rcu_read_unlock(); 1572 ovs_unlock();
1571 kfree_skb(reply); 1573 kfree_skb(reply);
1572 return err; 1574 return err;
1573} 1575}
@@ -1579,8 +1581,8 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1579 int skip = cb->args[0]; 1581 int skip = cb->args[0];
1580 int i = 0; 1582 int i = 0;
1581 1583
1582 rcu_read_lock(); 1584 ovs_lock();
1583 list_for_each_entry_rcu(dp, &ovs_net->dps, list_node) { 1585 list_for_each_entry(dp, &ovs_net->dps, list_node) {
1584 if (i >= skip && 1586 if (i >= skip &&
1585 ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid, 1587 ovs_dp_cmd_fill_info(dp, skb, NETLINK_CB(cb->skb).portid,
1586 cb->nlh->nlmsg_seq, NLM_F_MULTI, 1588 cb->nlh->nlmsg_seq, NLM_F_MULTI,
@@ -1588,7 +1590,7 @@ static int ovs_dp_cmd_dump(struct sk_buff *skb, struct netlink_callback *cb)
1588 break; 1590 break;
1589 i++; 1591 i++;
1590 } 1592 }
1591 rcu_read_unlock(); 1593 ovs_unlock();
1592 1594
1593 cb->args[0] = i; 1595 cb->args[0] = i;
1594 1596
diff --git a/net/openvswitch/flow_netlink.c b/net/openvswitch/flow_netlink.c
index 939bcb32100f..089b195c064a 100644
--- a/net/openvswitch/flow_netlink.c
+++ b/net/openvswitch/flow_netlink.c
@@ -145,7 +145,7 @@ static bool match_validate(const struct sw_flow_match *match,
145 if (match->key->eth.type == htons(ETH_P_ARP) 145 if (match->key->eth.type == htons(ETH_P_ARP)
146 || match->key->eth.type == htons(ETH_P_RARP)) { 146 || match->key->eth.type == htons(ETH_P_RARP)) {
147 key_expected |= 1 << OVS_KEY_ATTR_ARP; 147 key_expected |= 1 << OVS_KEY_ATTR_ARP;
148 if (match->mask && (match->mask->key.eth.type == htons(0xffff))) 148 if (match->mask && (match->mask->key.tp.src == htons(0xff)))
149 mask_allowed |= 1 << OVS_KEY_ATTR_ARP; 149 mask_allowed |= 1 << OVS_KEY_ATTR_ARP;
150 } 150 }
151 151
@@ -689,6 +689,13 @@ static int ovs_key_from_nlattrs(struct sw_flow_match *match, u64 attrs,
689 ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX); 689 ipv6_key->ipv6_frag, OVS_FRAG_TYPE_MAX);
690 return -EINVAL; 690 return -EINVAL;
691 } 691 }
692
693 if (!is_mask && ipv6_key->ipv6_label & htonl(0xFFF00000)) {
694 OVS_NLERR("IPv6 flow label %x is out of range (max=%x).\n",
695 ntohl(ipv6_key->ipv6_label), (1 << 20) - 1);
696 return -EINVAL;
697 }
698
692 SW_FLOW_KEY_PUT(match, ipv6.label, 699 SW_FLOW_KEY_PUT(match, ipv6.label,
693 ipv6_key->ipv6_label, is_mask); 700 ipv6_key->ipv6_label, is_mask);
694 SW_FLOW_KEY_PUT(match, ip.proto, 701 SW_FLOW_KEY_PUT(match, ip.proto,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 87d20f48ff06..07c04a841ba0 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -378,7 +378,7 @@ static void unregister_prot_hook(struct sock *sk, bool sync)
378 __unregister_prot_hook(sk, sync); 378 __unregister_prot_hook(sk, sync);
379} 379}
380 380
381static inline __pure struct page *pgv_to_page(void *addr) 381static inline struct page * __pure pgv_to_page(void *addr)
382{ 382{
383 if (is_vmalloc_addr(addr)) 383 if (is_vmalloc_addr(addr))
384 return vmalloc_to_page(addr); 384 return vmalloc_to_page(addr);
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index 2cf61b3e633c..76f402e05bd6 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -947,7 +947,7 @@ qdisc_create(struct net_device *dev, struct netdev_queue *dev_queue,
947 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) { 947 if (!ops->init || (err = ops->init(sch, tca[TCA_OPTIONS])) == 0) {
948 if (qdisc_is_percpu_stats(sch)) { 948 if (qdisc_is_percpu_stats(sch)) {
949 sch->cpu_bstats = 949 sch->cpu_bstats =
950 alloc_percpu(struct gnet_stats_basic_cpu); 950 netdev_alloc_pcpu_stats(struct gnet_stats_basic_cpu);
951 if (!sch->cpu_bstats) 951 if (!sch->cpu_bstats)
952 goto err_out4; 952 goto err_out4;
953 953
diff --git a/net/sched/sch_pie.c b/net/sched/sch_pie.c
index 33d7a98a7a97..b783a446d884 100644
--- a/net/sched/sch_pie.c
+++ b/net/sched/sch_pie.c
@@ -445,7 +445,6 @@ static int pie_init(struct Qdisc *sch, struct nlattr *opt)
445 sch->limit = q->params.limit; 445 sch->limit = q->params.limit;
446 446
447 setup_timer(&q->adapt_timer, pie_timer, (unsigned long)sch); 447 setup_timer(&q->adapt_timer, pie_timer, (unsigned long)sch);
448 mod_timer(&q->adapt_timer, jiffies + HZ / 2);
449 448
450 if (opt) { 449 if (opt) {
451 int err = pie_change(sch, opt); 450 int err = pie_change(sch, opt);
@@ -454,6 +453,7 @@ static int pie_init(struct Qdisc *sch, struct nlattr *opt)
454 return err; 453 return err;
455 } 454 }
456 455
456 mod_timer(&q->adapt_timer, jiffies + HZ / 2);
457 return 0; 457 return 0;
458} 458}
459 459
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 0e8529113dc5..fb7976aee61c 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -862,8 +862,6 @@ int sctp_auth_set_key(struct sctp_endpoint *ep,
862 list_add(&cur_key->key_list, sh_keys); 862 list_add(&cur_key->key_list, sh_keys);
863 863
864 cur_key->key = key; 864 cur_key->key = key;
865 sctp_auth_key_hold(key);
866
867 return 0; 865 return 0;
868nomem: 866nomem:
869 if (!replace) 867 if (!replace)
diff --git a/net/sctp/sm_make_chunk.c b/net/sctp/sm_make_chunk.c
index ab734be8cb20..9f32741abb1c 100644
--- a/net/sctp/sm_make_chunk.c
+++ b/net/sctp/sm_make_chunk.c
@@ -2609,6 +2609,9 @@ do_addr_param:
2609 addr_param = param.v + sizeof(sctp_addip_param_t); 2609 addr_param = param.v + sizeof(sctp_addip_param_t);
2610 2610
2611 af = sctp_get_af_specific(param_type2af(param.p->type)); 2611 af = sctp_get_af_specific(param_type2af(param.p->type));
2612 if (af == NULL)
2613 break;
2614
2612 af->from_addr_param(&addr, addr_param, 2615 af->from_addr_param(&addr, addr_param,
2613 htons(asoc->peer.port), 0); 2616 htons(asoc->peer.port), 0);
2614 2617
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index afb292cd797d..53ed8d3f8897 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -1353,6 +1353,7 @@ gss_stringify_acceptor(struct rpc_cred *cred)
1353 char *string = NULL; 1353 char *string = NULL;
1354 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base); 1354 struct gss_cred *gss_cred = container_of(cred, struct gss_cred, gc_base);
1355 struct gss_cl_ctx *ctx; 1355 struct gss_cl_ctx *ctx;
1356 unsigned int len;
1356 struct xdr_netobj *acceptor; 1357 struct xdr_netobj *acceptor;
1357 1358
1358 rcu_read_lock(); 1359 rcu_read_lock();
@@ -1360,15 +1361,39 @@ gss_stringify_acceptor(struct rpc_cred *cred)
1360 if (!ctx) 1361 if (!ctx)
1361 goto out; 1362 goto out;
1362 1363
1363 acceptor = &ctx->gc_acceptor; 1364 len = ctx->gc_acceptor.len;
1365 rcu_read_unlock();
1364 1366
1365 /* no point if there's no string */ 1367 /* no point if there's no string */
1366 if (!acceptor->len) 1368 if (!len)
1367 goto out; 1369 return NULL;
1368 1370realloc:
1369 string = kmalloc(acceptor->len + 1, GFP_KERNEL); 1371 string = kmalloc(len + 1, GFP_KERNEL);
1370 if (!string) 1372 if (!string)
1373 return NULL;
1374
1375 rcu_read_lock();
1376 ctx = rcu_dereference(gss_cred->gc_ctx);
1377
1378 /* did the ctx disappear or was it replaced by one with no acceptor? */
1379 if (!ctx || !ctx->gc_acceptor.len) {
1380 kfree(string);
1381 string = NULL;
1371 goto out; 1382 goto out;
1383 }
1384
1385 acceptor = &ctx->gc_acceptor;
1386
1387 /*
1388 * Did we find a new acceptor that's longer than the original? Allocate
1389 * a longer buffer and try again.
1390 */
1391 if (len < acceptor->len) {
1392 len = acceptor->len;
1393 rcu_read_unlock();
1394 kfree(string);
1395 goto realloc;
1396 }
1372 1397
1373 memcpy(string, acceptor->data, acceptor->len); 1398 memcpy(string, acceptor->data, acceptor->len);
1374 string[acceptor->len] = '\0'; 1399 string[acceptor->len] = '\0';
diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c
index 3f959c681885..f9c052d508f0 100644
--- a/net/sunrpc/svcsock.c
+++ b/net/sunrpc/svcsock.c
@@ -1019,17 +1019,12 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
1019 xid = *p++; 1019 xid = *p++;
1020 calldir = *p; 1020 calldir = *p;
1021 1021
1022 if (bc_xprt) 1022 if (!bc_xprt)
1023 req = xprt_lookup_rqst(bc_xprt, xid);
1024
1025 if (!req) {
1026 printk(KERN_NOTICE
1027 "%s: Got unrecognized reply: "
1028 "calldir 0x%x xpt_bc_xprt %p xid %08x\n",
1029 __func__, ntohl(calldir),
1030 bc_xprt, ntohl(xid));
1031 return -EAGAIN; 1023 return -EAGAIN;
1032 } 1024 spin_lock_bh(&bc_xprt->transport_lock);
1025 req = xprt_lookup_rqst(bc_xprt, xid);
1026 if (!req)
1027 goto unlock_notfound;
1033 1028
1034 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf)); 1029 memcpy(&req->rq_private_buf, &req->rq_rcv_buf, sizeof(struct xdr_buf));
1035 /* 1030 /*
@@ -1040,11 +1035,21 @@ static int receive_cb_reply(struct svc_sock *svsk, struct svc_rqst *rqstp)
1040 dst = &req->rq_private_buf.head[0]; 1035 dst = &req->rq_private_buf.head[0];
1041 src = &rqstp->rq_arg.head[0]; 1036 src = &rqstp->rq_arg.head[0];
1042 if (dst->iov_len < src->iov_len) 1037 if (dst->iov_len < src->iov_len)
1043 return -EAGAIN; /* whatever; just giving up. */ 1038 goto unlock_eagain; /* whatever; just giving up. */
1044 memcpy(dst->iov_base, src->iov_base, src->iov_len); 1039 memcpy(dst->iov_base, src->iov_base, src->iov_len);
1045 xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len); 1040 xprt_complete_rqst(req->rq_task, rqstp->rq_arg.len);
1046 rqstp->rq_arg.len = 0; 1041 rqstp->rq_arg.len = 0;
1042 spin_unlock_bh(&bc_xprt->transport_lock);
1047 return 0; 1043 return 0;
1044unlock_notfound:
1045 printk(KERN_NOTICE
1046 "%s: Got unrecognized reply: "
1047 "calldir 0x%x xpt_bc_xprt %p xid %08x\n",
1048 __func__, ntohl(calldir),
1049 bc_xprt, ntohl(xid));
1050unlock_eagain:
1051 spin_unlock_bh(&bc_xprt->transport_lock);
1052 return -EAGAIN;
1048} 1053}
1049 1054
1050static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len) 1055static int copy_pages_to_kvecs(struct kvec *vec, struct page **pages, int len)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index 90cee4a6fce4..5781634e957d 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -219,11 +219,11 @@ void tipc_node_abort_sock_conns(struct list_head *conns)
219void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 219void tipc_node_link_up(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
220{ 220{
221 struct tipc_link **active = &n_ptr->active_links[0]; 221 struct tipc_link **active = &n_ptr->active_links[0];
222 u32 addr = n_ptr->addr;
223 222
224 n_ptr->working_links++; 223 n_ptr->working_links++;
225 tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr, TIPC_NODE_SCOPE, 224 n_ptr->action_flags |= TIPC_NOTIFY_LINK_UP;
226 l_ptr->bearer_id, addr); 225 n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
226
227 pr_info("Established link <%s> on network plane %c\n", 227 pr_info("Established link <%s> on network plane %c\n",
228 l_ptr->name, l_ptr->net_plane); 228 l_ptr->name, l_ptr->net_plane);
229 229
@@ -284,10 +284,10 @@ static void node_select_active_links(struct tipc_node *n_ptr)
284void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr) 284void tipc_node_link_down(struct tipc_node *n_ptr, struct tipc_link *l_ptr)
285{ 285{
286 struct tipc_link **active; 286 struct tipc_link **active;
287 u32 addr = n_ptr->addr;
288 287
289 n_ptr->working_links--; 288 n_ptr->working_links--;
290 tipc_nametbl_withdraw(TIPC_LINK_STATE, addr, l_ptr->bearer_id, addr); 289 n_ptr->action_flags |= TIPC_NOTIFY_LINK_DOWN;
290 n_ptr->link_id = l_ptr->peer_bearer_id << 16 | l_ptr->bearer_id;
291 291
292 if (!tipc_link_is_active(l_ptr)) { 292 if (!tipc_link_is_active(l_ptr)) {
293 pr_info("Lost standby link <%s> on network plane %c\n", 293 pr_info("Lost standby link <%s> on network plane %c\n",
@@ -552,28 +552,30 @@ void tipc_node_unlock(struct tipc_node *node)
552 LIST_HEAD(conn_sks); 552 LIST_HEAD(conn_sks);
553 struct sk_buff_head waiting_sks; 553 struct sk_buff_head waiting_sks;
554 u32 addr = 0; 554 u32 addr = 0;
555 unsigned int flags = node->action_flags; 555 int flags = node->action_flags;
556 u32 link_id = 0;
556 557
557 if (likely(!node->action_flags)) { 558 if (likely(!flags)) {
558 spin_unlock_bh(&node->lock); 559 spin_unlock_bh(&node->lock);
559 return; 560 return;
560 } 561 }
561 562
563 addr = node->addr;
564 link_id = node->link_id;
562 __skb_queue_head_init(&waiting_sks); 565 __skb_queue_head_init(&waiting_sks);
563 if (node->action_flags & TIPC_WAKEUP_USERS) { 566
567 if (flags & TIPC_WAKEUP_USERS)
564 skb_queue_splice_init(&node->waiting_sks, &waiting_sks); 568 skb_queue_splice_init(&node->waiting_sks, &waiting_sks);
565 node->action_flags &= ~TIPC_WAKEUP_USERS; 569
566 } 570 if (flags & TIPC_NOTIFY_NODE_DOWN) {
567 if (node->action_flags & TIPC_NOTIFY_NODE_DOWN) {
568 list_replace_init(&node->nsub, &nsub_list); 571 list_replace_init(&node->nsub, &nsub_list);
569 list_replace_init(&node->conn_sks, &conn_sks); 572 list_replace_init(&node->conn_sks, &conn_sks);
570 node->action_flags &= ~TIPC_NOTIFY_NODE_DOWN;
571 } 573 }
572 if (node->action_flags & TIPC_NOTIFY_NODE_UP) { 574 node->action_flags &= ~(TIPC_WAKEUP_USERS | TIPC_NOTIFY_NODE_DOWN |
573 node->action_flags &= ~TIPC_NOTIFY_NODE_UP; 575 TIPC_NOTIFY_NODE_UP | TIPC_NOTIFY_LINK_UP |
574 addr = node->addr; 576 TIPC_NOTIFY_LINK_DOWN |
575 } 577 TIPC_WAKEUP_BCAST_USERS);
576 node->action_flags &= ~TIPC_WAKEUP_BCAST_USERS; 578
577 spin_unlock_bh(&node->lock); 579 spin_unlock_bh(&node->lock);
578 580
579 while (!skb_queue_empty(&waiting_sks)) 581 while (!skb_queue_empty(&waiting_sks))
@@ -588,6 +590,14 @@ void tipc_node_unlock(struct tipc_node *node)
588 if (flags & TIPC_WAKEUP_BCAST_USERS) 590 if (flags & TIPC_WAKEUP_BCAST_USERS)
589 tipc_bclink_wakeup_users(); 591 tipc_bclink_wakeup_users();
590 592
591 if (addr) 593 if (flags & TIPC_NOTIFY_NODE_UP)
592 tipc_named_node_up(addr); 594 tipc_named_node_up(addr);
595
596 if (flags & TIPC_NOTIFY_LINK_UP)
597 tipc_nametbl_publish(TIPC_LINK_STATE, addr, addr,
598 TIPC_NODE_SCOPE, link_id, addr);
599
600 if (flags & TIPC_NOTIFY_LINK_DOWN)
601 tipc_nametbl_withdraw(TIPC_LINK_STATE, addr,
602 link_id, addr);
593} 603}
diff --git a/net/tipc/node.h b/net/tipc/node.h
index 67513c3c852c..04e91458bb29 100644
--- a/net/tipc/node.h
+++ b/net/tipc/node.h
@@ -53,6 +53,7 @@
53 * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down 53 * TIPC_WAIT_OWN_LINKS_DOWN: wait until peer node is declared down
54 * TIPC_NOTIFY_NODE_DOWN: notify node is down 54 * TIPC_NOTIFY_NODE_DOWN: notify node is down
55 * TIPC_NOTIFY_NODE_UP: notify node is up 55 * TIPC_NOTIFY_NODE_UP: notify node is up
56 * TIPC_DISTRIBUTE_NAME: publish or withdraw link state name type
56 */ 57 */
57enum { 58enum {
58 TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1), 59 TIPC_WAIT_PEER_LINKS_DOWN = (1 << 1),
@@ -60,7 +61,9 @@ enum {
60 TIPC_NOTIFY_NODE_DOWN = (1 << 3), 61 TIPC_NOTIFY_NODE_DOWN = (1 << 3),
61 TIPC_NOTIFY_NODE_UP = (1 << 4), 62 TIPC_NOTIFY_NODE_UP = (1 << 4),
62 TIPC_WAKEUP_USERS = (1 << 5), 63 TIPC_WAKEUP_USERS = (1 << 5),
63 TIPC_WAKEUP_BCAST_USERS = (1 << 6) 64 TIPC_WAKEUP_BCAST_USERS = (1 << 6),
65 TIPC_NOTIFY_LINK_UP = (1 << 7),
66 TIPC_NOTIFY_LINK_DOWN = (1 << 8)
64}; 67};
65 68
66/** 69/**
@@ -100,6 +103,7 @@ struct tipc_node_bclink {
100 * @working_links: number of working links to node (both active and standby) 103 * @working_links: number of working links to node (both active and standby)
101 * @link_cnt: number of links to node 104 * @link_cnt: number of links to node
102 * @signature: node instance identifier 105 * @signature: node instance identifier
106 * @link_id: local and remote bearer ids of changing link, if any
103 * @nsub: list of "node down" subscriptions monitoring node 107 * @nsub: list of "node down" subscriptions monitoring node
104 * @rcu: rcu struct for tipc_node 108 * @rcu: rcu struct for tipc_node
105 */ 109 */
@@ -116,6 +120,7 @@ struct tipc_node {
116 int link_cnt; 120 int link_cnt;
117 int working_links; 121 int working_links;
118 u32 signature; 122 u32 signature;
123 u32 link_id;
119 struct list_head nsub; 124 struct list_head nsub;
120 struct sk_buff_head waiting_sks; 125 struct sk_buff_head waiting_sks;
121 struct list_head conn_sks; 126 struct list_head conn_sks;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index 75275c5cf929..51bddc236a15 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -1776,7 +1776,7 @@ int tipc_sk_rcv(struct sk_buff *buf)
1776 sk = &tsk->sk; 1776 sk = &tsk->sk;
1777 1777
1778 /* Queue message */ 1778 /* Queue message */
1779 bh_lock_sock(sk); 1779 spin_lock_bh(&sk->sk_lock.slock);
1780 1780
1781 if (!sock_owned_by_user(sk)) { 1781 if (!sock_owned_by_user(sk)) {
1782 rc = filter_rcv(sk, buf); 1782 rc = filter_rcv(sk, buf);
@@ -1787,7 +1787,7 @@ int tipc_sk_rcv(struct sk_buff *buf)
1787 if (sk_add_backlog(sk, buf, limit)) 1787 if (sk_add_backlog(sk, buf, limit))
1788 rc = -TIPC_ERR_OVERLOAD; 1788 rc = -TIPC_ERR_OVERLOAD;
1789 } 1789 }
1790 bh_unlock_sock(sk); 1790 spin_unlock_bh(&sk->sk_lock.slock);
1791 tipc_sk_put(tsk); 1791 tipc_sk_put(tsk);
1792 if (likely(!rc)) 1792 if (likely(!rc))
1793 return 0; 1793 return 0;
@@ -2673,7 +2673,7 @@ static int tipc_ioctl(struct socket *sk, unsigned int cmd, unsigned long arg)
2673 case SIOCGETLINKNAME: 2673 case SIOCGETLINKNAME:
2674 if (copy_from_user(&lnr, argp, sizeof(lnr))) 2674 if (copy_from_user(&lnr, argp, sizeof(lnr)))
2675 return -EFAULT; 2675 return -EFAULT;
2676 if (!tipc_node_get_linkname(lnr.bearer_id, lnr.peer, 2676 if (!tipc_node_get_linkname(lnr.bearer_id & 0xffff, lnr.peer,
2677 lnr.linkname, TIPC_MAX_LINK_NAME)) { 2677 lnr.linkname, TIPC_MAX_LINK_NAME)) {
2678 if (copy_to_user(argp, &lnr, sizeof(lnr))) 2678 if (copy_to_user(argp, &lnr, sizeof(lnr)))
2679 return -EFAULT; 2679 return -EFAULT;
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index cb9f5a44ffad..5839c85075f1 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -5927,6 +5927,7 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
5927 int err; 5927 int err;
5928 bool need_new_beacon = false; 5928 bool need_new_beacon = false;
5929 int len, i; 5929 int len, i;
5930 u32 cs_count;
5930 5931
5931 if (!rdev->ops->channel_switch || 5932 if (!rdev->ops->channel_switch ||
5932 !(rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)) 5933 !(rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH))
@@ -5963,7 +5964,14 @@ static int nl80211_channel_switch(struct sk_buff *skb, struct genl_info *info)
5963 if (need_new_beacon && !info->attrs[NL80211_ATTR_CSA_IES]) 5964 if (need_new_beacon && !info->attrs[NL80211_ATTR_CSA_IES])
5964 return -EINVAL; 5965 return -EINVAL;
5965 5966
5966 params.count = nla_get_u32(info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]); 5967 /* Even though the attribute is u32, the specification says
5968 * u8, so let's make sure we don't overflow.
5969 */
5970 cs_count = nla_get_u32(info->attrs[NL80211_ATTR_CH_SWITCH_COUNT]);
5971 if (cs_count > 255)
5972 return -EINVAL;
5973
5974 params.count = cs_count;
5967 5975
5968 if (!need_new_beacon) 5976 if (!need_new_beacon)
5969 goto skip_beacons; 5977 goto skip_beacons;
diff --git a/net/xfrm/xfrm_output.c b/net/xfrm/xfrm_output.c
index 499d6c18a8ce..7c532856b398 100644
--- a/net/xfrm/xfrm_output.c
+++ b/net/xfrm/xfrm_output.c
@@ -157,6 +157,8 @@ static int xfrm_output_gso(struct sk_buff *skb)
157 kfree_skb(skb); 157 kfree_skb(skb);
158 if (IS_ERR(segs)) 158 if (IS_ERR(segs))
159 return PTR_ERR(segs); 159 return PTR_ERR(segs);
160 if (segs == NULL)
161 return -EINVAL;
160 162
161 do { 163 do {
162 struct sk_buff *nskb = segs->next; 164 struct sk_buff *nskb = segs->next;
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index 4c4e457e7888..88bf289abdc9 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1962,7 +1962,7 @@ static int xdst_queue_output(struct sock *sk, struct sk_buff *skb)
1962 struct xfrm_policy *pol = xdst->pols[0]; 1962 struct xfrm_policy *pol = xdst->pols[0];
1963 struct xfrm_policy_queue *pq = &pol->polq; 1963 struct xfrm_policy_queue *pq = &pol->polq;
1964 1964
1965 if (unlikely(skb_fclone_busy(skb))) { 1965 if (unlikely(skb_fclone_busy(sk, skb))) {
1966 kfree_skb(skb); 1966 kfree_skb(skb);
1967 return 0; 1967 return 0;
1968 } 1968 }
diff --git a/samples/bpf/test_verifier.c b/samples/bpf/test_verifier.c
index f44ef11f65a7..eb4bec0ad8af 100644
--- a/samples/bpf/test_verifier.c
+++ b/samples/bpf/test_verifier.c
@@ -209,6 +209,17 @@ static struct bpf_test tests[] = {
209 .result = REJECT, 209 .result = REJECT,
210 }, 210 },
211 { 211 {
212 "program doesn't init R0 before exit in all branches",
213 .insns = {
214 BPF_JMP_IMM(BPF_JGE, BPF_REG_1, 0, 2),
215 BPF_MOV64_IMM(BPF_REG_0, 1),
216 BPF_ALU64_IMM(BPF_ADD, BPF_REG_0, 2),
217 BPF_EXIT_INSN(),
218 },
219 .errstr = "R0 !read_ok",
220 .result = REJECT,
221 },
222 {
212 "stack out of bounds", 223 "stack out of bounds",
213 .insns = { 224 .insns = {
214 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0), 225 BPF_ST_MEM(BPF_DW, BPF_REG_10, 8, 0),
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index 9685af330de5..c5ee1a7c5e8a 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -319,9 +319,12 @@ int evm_inode_setxattr(struct dentry *dentry, const char *xattr_name,
319{ 319{
320 const struct evm_ima_xattr_data *xattr_data = xattr_value; 320 const struct evm_ima_xattr_data *xattr_data = xattr_value;
321 321
322 if ((strcmp(xattr_name, XATTR_NAME_EVM) == 0) 322 if (strcmp(xattr_name, XATTR_NAME_EVM) == 0) {
323 && (xattr_data->type == EVM_XATTR_HMAC)) 323 if (!xattr_value_len)
324 return -EPERM; 324 return -EINVAL;
325 if (xattr_data->type != EVM_IMA_XATTR_DIGSIG)
326 return -EPERM;
327 }
325 return evm_protect_xattr(dentry, xattr_name, xattr_value, 328 return evm_protect_xattr(dentry, xattr_name, xattr_value,
326 xattr_value_len); 329 xattr_value_len);
327} 330}
diff --git a/security/integrity/ima/ima_appraise.c b/security/integrity/ima/ima_appraise.c
index 922685483bd3..7c8f41e618b6 100644
--- a/security/integrity/ima/ima_appraise.c
+++ b/security/integrity/ima/ima_appraise.c
@@ -378,6 +378,8 @@ int ima_inode_setxattr(struct dentry *dentry, const char *xattr_name,
378 result = ima_protect_xattr(dentry, xattr_name, xattr_value, 378 result = ima_protect_xattr(dentry, xattr_name, xattr_value,
379 xattr_value_len); 379 xattr_value_len);
380 if (result == 1) { 380 if (result == 1) {
381 if (!xattr_value_len || (xvalue->type >= IMA_XATTR_LAST))
382 return -EINVAL;
381 ima_reset_appraise_flags(dentry->d_inode, 383 ima_reset_appraise_flags(dentry->d_inode,
382 (xvalue->type == EVM_IMA_XATTR_DIGSIG) ? 1 : 0); 384 (xvalue->type == EVM_IMA_XATTR_DIGSIG) ? 1 : 0);
383 result = 0; 385 result = 0;
diff --git a/security/integrity/integrity.h b/security/integrity/integrity.h
index c0379d13dbe1..9d1c2ebfe12a 100644
--- a/security/integrity/integrity.h
+++ b/security/integrity/integrity.h
@@ -61,6 +61,7 @@ enum evm_ima_xattr_type {
61 EVM_XATTR_HMAC, 61 EVM_XATTR_HMAC,
62 EVM_IMA_XATTR_DIGSIG, 62 EVM_IMA_XATTR_DIGSIG,
63 IMA_XATTR_DIGEST_NG, 63 IMA_XATTR_DIGEST_NG,
64 IMA_XATTR_LAST
64}; 65};
65 66
66struct evm_ima_xattr_data { 67struct evm_ima_xattr_data {
diff --git a/security/keys/internal.h b/security/keys/internal.h
index b8960c4959a5..200e37867336 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -117,6 +117,7 @@ struct keyring_search_context {
117#define KEYRING_SEARCH_NO_UPDATE_TIME 0x0004 /* Don't update times */ 117#define KEYRING_SEARCH_NO_UPDATE_TIME 0x0004 /* Don't update times */
118#define KEYRING_SEARCH_NO_CHECK_PERM 0x0008 /* Don't check permissions */ 118#define KEYRING_SEARCH_NO_CHECK_PERM 0x0008 /* Don't check permissions */
119#define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0010 /* Give an error on excessive depth */ 119#define KEYRING_SEARCH_DETECT_TOO_DEEP 0x0010 /* Give an error on excessive depth */
120#define KEYRING_SEARCH_SKIP_EXPIRED 0x0020 /* Ignore expired keys (intention to replace) */
120 121
121 int (*iterator)(const void *object, void *iterator_data); 122 int (*iterator)(const void *object, void *iterator_data);
122 123
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index eff88a5f5d40..4743d71e4aa6 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -26,6 +26,8 @@
26#include <asm/uaccess.h> 26#include <asm/uaccess.h>
27#include "internal.h" 27#include "internal.h"
28 28
29#define KEY_MAX_DESC_SIZE 4096
30
29static int key_get_type_from_user(char *type, 31static int key_get_type_from_user(char *type,
30 const char __user *_type, 32 const char __user *_type,
31 unsigned len) 33 unsigned len)
@@ -78,7 +80,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
78 80
79 description = NULL; 81 description = NULL;
80 if (_description) { 82 if (_description) {
81 description = strndup_user(_description, PAGE_SIZE); 83 description = strndup_user(_description, KEY_MAX_DESC_SIZE);
82 if (IS_ERR(description)) { 84 if (IS_ERR(description)) {
83 ret = PTR_ERR(description); 85 ret = PTR_ERR(description);
84 goto error; 86 goto error;
@@ -177,7 +179,7 @@ SYSCALL_DEFINE4(request_key, const char __user *, _type,
177 goto error; 179 goto error;
178 180
179 /* pull the description into kernel space */ 181 /* pull the description into kernel space */
180 description = strndup_user(_description, PAGE_SIZE); 182 description = strndup_user(_description, KEY_MAX_DESC_SIZE);
181 if (IS_ERR(description)) { 183 if (IS_ERR(description)) {
182 ret = PTR_ERR(description); 184 ret = PTR_ERR(description);
183 goto error; 185 goto error;
@@ -287,7 +289,7 @@ long keyctl_join_session_keyring(const char __user *_name)
287 /* fetch the name from userspace */ 289 /* fetch the name from userspace */
288 name = NULL; 290 name = NULL;
289 if (_name) { 291 if (_name) {
290 name = strndup_user(_name, PAGE_SIZE); 292 name = strndup_user(_name, KEY_MAX_DESC_SIZE);
291 if (IS_ERR(name)) { 293 if (IS_ERR(name)) {
292 ret = PTR_ERR(name); 294 ret = PTR_ERR(name);
293 goto error; 295 goto error;
@@ -562,8 +564,9 @@ long keyctl_describe_key(key_serial_t keyid,
562{ 564{
563 struct key *key, *instkey; 565 struct key *key, *instkey;
564 key_ref_t key_ref; 566 key_ref_t key_ref;
565 char *tmpbuf; 567 char *infobuf;
566 long ret; 568 long ret;
569 int desclen, infolen;
567 570
568 key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW); 571 key_ref = lookup_user_key(keyid, KEY_LOOKUP_PARTIAL, KEY_NEED_VIEW);
569 if (IS_ERR(key_ref)) { 572 if (IS_ERR(key_ref)) {
@@ -586,38 +589,31 @@ long keyctl_describe_key(key_serial_t keyid,
586 } 589 }
587 590
588okay: 591okay:
589 /* calculate how much description we're going to return */
590 ret = -ENOMEM;
591 tmpbuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
592 if (!tmpbuf)
593 goto error2;
594
595 key = key_ref_to_ptr(key_ref); 592 key = key_ref_to_ptr(key_ref);
593 desclen = strlen(key->description);
596 594
597 ret = snprintf(tmpbuf, PAGE_SIZE - 1, 595 /* calculate how much information we're going to return */
598 "%s;%d;%d;%08x;%s", 596 ret = -ENOMEM;
599 key->type->name, 597 infobuf = kasprintf(GFP_KERNEL,
600 from_kuid_munged(current_user_ns(), key->uid), 598 "%s;%d;%d;%08x;",
601 from_kgid_munged(current_user_ns(), key->gid), 599 key->type->name,
602 key->perm, 600 from_kuid_munged(current_user_ns(), key->uid),
603 key->description ?: ""); 601 from_kgid_munged(current_user_ns(), key->gid),
604 602 key->perm);
605 /* include a NUL char at the end of the data */ 603 if (!infobuf)
606 if (ret > PAGE_SIZE - 1) 604 goto error2;
607 ret = PAGE_SIZE - 1; 605 infolen = strlen(infobuf);
608 tmpbuf[ret] = 0; 606 ret = infolen + desclen + 1;
609 ret++;
610 607
611 /* consider returning the data */ 608 /* consider returning the data */
612 if (buffer && buflen > 0) { 609 if (buffer && buflen >= ret) {
613 if (buflen > ret) 610 if (copy_to_user(buffer, infobuf, infolen) != 0 ||
614 buflen = ret; 611 copy_to_user(buffer + infolen, key->description,
615 612 desclen + 1) != 0)
616 if (copy_to_user(buffer, tmpbuf, buflen) != 0)
617 ret = -EFAULT; 613 ret = -EFAULT;
618 } 614 }
619 615
620 kfree(tmpbuf); 616 kfree(infobuf);
621error2: 617error2:
622 key_ref_put(key_ref); 618 key_ref_put(key_ref);
623error: 619error:
@@ -649,7 +645,7 @@ long keyctl_keyring_search(key_serial_t ringid,
649 if (ret < 0) 645 if (ret < 0)
650 goto error; 646 goto error;
651 647
652 description = strndup_user(_description, PAGE_SIZE); 648 description = strndup_user(_description, KEY_MAX_DESC_SIZE);
653 if (IS_ERR(description)) { 649 if (IS_ERR(description)) {
654 ret = PTR_ERR(description); 650 ret = PTR_ERR(description);
655 goto error; 651 goto error;
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index 8177010174f7..e72548b5897e 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -546,7 +546,8 @@ static int keyring_search_iterator(const void *object, void *iterator_data)
546 } 546 }
547 547
548 if (key->expiry && ctx->now.tv_sec >= key->expiry) { 548 if (key->expiry && ctx->now.tv_sec >= key->expiry) {
549 ctx->result = ERR_PTR(-EKEYEXPIRED); 549 if (!(ctx->flags & KEYRING_SEARCH_SKIP_EXPIRED))
550 ctx->result = ERR_PTR(-EKEYEXPIRED);
550 kleave(" = %d [expire]", ctx->skipped_ret); 551 kleave(" = %d [expire]", ctx->skipped_ret);
551 goto skipped; 552 goto skipped;
552 } 553 }
@@ -628,6 +629,10 @@ static bool search_nested_keyrings(struct key *keyring,
628 ctx->index_key.type->name, 629 ctx->index_key.type->name,
629 ctx->index_key.description); 630 ctx->index_key.description);
630 631
632#define STATE_CHECKS (KEYRING_SEARCH_NO_STATE_CHECK | KEYRING_SEARCH_DO_STATE_CHECK)
633 BUG_ON((ctx->flags & STATE_CHECKS) == 0 ||
634 (ctx->flags & STATE_CHECKS) == STATE_CHECKS);
635
631 if (ctx->index_key.description) 636 if (ctx->index_key.description)
632 ctx->index_key.desc_len = strlen(ctx->index_key.description); 637 ctx->index_key.desc_len = strlen(ctx->index_key.description);
633 638
@@ -637,7 +642,6 @@ static bool search_nested_keyrings(struct key *keyring,
637 if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE || 642 if (ctx->match_data.lookup_type == KEYRING_SEARCH_LOOKUP_ITERATE ||
638 keyring_compare_object(keyring, &ctx->index_key)) { 643 keyring_compare_object(keyring, &ctx->index_key)) {
639 ctx->skipped_ret = 2; 644 ctx->skipped_ret = 2;
640 ctx->flags |= KEYRING_SEARCH_DO_STATE_CHECK;
641 switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) { 645 switch (ctx->iterator(keyring_key_to_ptr(keyring), ctx)) {
642 case 1: 646 case 1:
643 goto found; 647 goto found;
@@ -649,8 +653,6 @@ static bool search_nested_keyrings(struct key *keyring,
649 } 653 }
650 654
651 ctx->skipped_ret = 0; 655 ctx->skipped_ret = 0;
652 if (ctx->flags & KEYRING_SEARCH_NO_STATE_CHECK)
653 ctx->flags &= ~KEYRING_SEARCH_DO_STATE_CHECK;
654 656
655 /* Start processing a new keyring */ 657 /* Start processing a new keyring */
656descend_to_keyring: 658descend_to_keyring:
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index bb4337c7ae1b..0c7aea4dea54 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -516,6 +516,8 @@ struct key *request_key_and_link(struct key_type *type,
516 .match_data.cmp = key_default_cmp, 516 .match_data.cmp = key_default_cmp,
517 .match_data.raw_data = description, 517 .match_data.raw_data = description,
518 .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, 518 .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
519 .flags = (KEYRING_SEARCH_DO_STATE_CHECK |
520 KEYRING_SEARCH_SKIP_EXPIRED),
519 }; 521 };
520 struct key *key; 522 struct key *key;
521 key_ref_t key_ref; 523 key_ref_t key_ref;
diff --git a/security/keys/request_key_auth.c b/security/keys/request_key_auth.c
index 6639e2cb8853..5d672f7580dd 100644
--- a/security/keys/request_key_auth.c
+++ b/security/keys/request_key_auth.c
@@ -249,6 +249,7 @@ struct key *key_get_instantiation_authkey(key_serial_t target_id)
249 .match_data.cmp = key_default_cmp, 249 .match_data.cmp = key_default_cmp,
250 .match_data.raw_data = description, 250 .match_data.raw_data = description,
251 .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT, 251 .match_data.lookup_type = KEYRING_SEARCH_LOOKUP_DIRECT,
252 .flags = KEYRING_SEARCH_DO_STATE_CHECK,
252 }; 253 };
253 struct key *authkey; 254 struct key *authkey;
254 key_ref_t authkey_ref; 255 key_ref_t authkey_ref;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index e66314138b38..c603b20356ad 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -4725,9 +4725,10 @@ static int selinux_nlmsg_perm(struct sock *sk, struct sk_buff *skb)
4725 err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm); 4725 err = selinux_nlmsg_lookup(sksec->sclass, nlh->nlmsg_type, &perm);
4726 if (err) { 4726 if (err) {
4727 if (err == -EINVAL) { 4727 if (err == -EINVAL) {
4728 WARN_ONCE(1, "selinux_nlmsg_perm: unrecognized netlink message:" 4728 printk(KERN_WARNING
4729 " protocol=%hu nlmsg_type=%hu sclass=%hu\n", 4729 "SELinux: unrecognized netlink message:"
4730 sk->sk_protocol, nlh->nlmsg_type, sksec->sclass); 4730 " protocol=%hu nlmsg_type=%hu sclass=%hu\n",
4731 sk->sk_protocol, nlh->nlmsg_type, sksec->sclass);
4731 if (!selinux_enforcing || security_get_allow_unknown()) 4732 if (!selinux_enforcing || security_get_allow_unknown())
4732 err = 0; 4733 err = 0;
4733 } 4734 }
diff --git a/sound/core/pcm.c b/sound/core/pcm.c
index 42ded997b223..c6ff94ab1ad6 100644
--- a/sound/core/pcm.c
+++ b/sound/core/pcm.c
@@ -216,6 +216,8 @@ static char *snd_pcm_format_names[] = {
216 FORMAT(DSD_U8), 216 FORMAT(DSD_U8),
217 FORMAT(DSD_U16_LE), 217 FORMAT(DSD_U16_LE),
218 FORMAT(DSD_U32_LE), 218 FORMAT(DSD_U32_LE),
219 FORMAT(DSD_U16_BE),
220 FORMAT(DSD_U32_BE),
219}; 221};
220 222
221const char *snd_pcm_format_name(snd_pcm_format_t format) 223const char *snd_pcm_format_name(snd_pcm_format_t format)
diff --git a/sound/core/pcm_compat.c b/sound/core/pcm_compat.c
index 102e8fd1d450..2d957ba63557 100644
--- a/sound/core/pcm_compat.c
+++ b/sound/core/pcm_compat.c
@@ -210,6 +210,8 @@ static int snd_pcm_status_user_compat(struct snd_pcm_substream *substream,
210 if (err < 0) 210 if (err < 0)
211 return err; 211 return err;
212 212
213 if (clear_user(src, sizeof(*src)))
214 return -EFAULT;
213 if (put_user(status.state, &src->state) || 215 if (put_user(status.state, &src->state) ||
214 compat_put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) || 216 compat_put_timespec(&status.trigger_tstamp, &src->trigger_tstamp) ||
215 compat_put_timespec(&status.tstamp, &src->tstamp) || 217 compat_put_timespec(&status.tstamp, &src->tstamp) ||
diff --git a/sound/core/pcm_misc.c b/sound/core/pcm_misc.c
index ae7a0feb3b76..ebe8444de6c6 100644
--- a/sound/core/pcm_misc.c
+++ b/sound/core/pcm_misc.c
@@ -152,6 +152,14 @@ static struct pcm_format_data pcm_formats[(INT)SNDRV_PCM_FORMAT_LAST+1] = {
152 .width = 32, .phys = 32, .le = 1, .signd = 0, 152 .width = 32, .phys = 32, .le = 1, .signd = 0,
153 .silence = { 0x69, 0x69, 0x69, 0x69 }, 153 .silence = { 0x69, 0x69, 0x69, 0x69 },
154 }, 154 },
155 [SNDRV_PCM_FORMAT_DSD_U16_BE] = {
156 .width = 16, .phys = 16, .le = 0, .signd = 0,
157 .silence = { 0x69, 0x69 },
158 },
159 [SNDRV_PCM_FORMAT_DSD_U32_BE] = {
160 .width = 32, .phys = 32, .le = 0, .signd = 0,
161 .silence = { 0x69, 0x69, 0x69, 0x69 },
162 },
155 /* FIXME: the following three formats are not defined properly yet */ 163 /* FIXME: the following three formats are not defined properly yet */
156 [SNDRV_PCM_FORMAT_MPEG] = { 164 [SNDRV_PCM_FORMAT_MPEG] = {
157 .le = -1, .signd = -1, 165 .le = -1, .signd = -1,
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index bfe1cf6b492f..166d59cdc86b 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -781,16 +781,15 @@ static int snd_pcm_action_group(struct action_ops *ops,
781{ 781{
782 struct snd_pcm_substream *s = NULL; 782 struct snd_pcm_substream *s = NULL;
783 struct snd_pcm_substream *s1; 783 struct snd_pcm_substream *s1;
784 int res = 0; 784 int res = 0, depth = 1;
785 785
786 snd_pcm_group_for_each_entry(s, substream) { 786 snd_pcm_group_for_each_entry(s, substream) {
787 if (do_lock && s != substream) { 787 if (do_lock && s != substream) {
788 if (s->pcm->nonatomic) 788 if (s->pcm->nonatomic)
789 mutex_lock_nested(&s->self_group.mutex, 789 mutex_lock_nested(&s->self_group.mutex, depth);
790 SINGLE_DEPTH_NESTING);
791 else 790 else
792 spin_lock_nested(&s->self_group.lock, 791 spin_lock_nested(&s->self_group.lock, depth);
793 SINGLE_DEPTH_NESTING); 792 depth++;
794 } 793 }
795 res = ops->pre_action(s, state); 794 res = ops->pre_action(s, state);
796 if (res < 0) 795 if (res < 0)
@@ -906,8 +905,7 @@ static int snd_pcm_action_lock_mutex(struct action_ops *ops,
906 down_read(&snd_pcm_link_rwsem); 905 down_read(&snd_pcm_link_rwsem);
907 if (snd_pcm_stream_linked(substream)) { 906 if (snd_pcm_stream_linked(substream)) {
908 mutex_lock(&substream->group->mutex); 907 mutex_lock(&substream->group->mutex);
909 mutex_lock_nested(&substream->self_group.mutex, 908 mutex_lock(&substream->self_group.mutex);
910 SINGLE_DEPTH_NESTING);
911 res = snd_pcm_action_group(ops, substream, state, 1); 909 res = snd_pcm_action_group(ops, substream, state, 1);
912 mutex_unlock(&substream->self_group.mutex); 910 mutex_unlock(&substream->self_group.mutex);
913 mutex_unlock(&substream->group->mutex); 911 mutex_unlock(&substream->group->mutex);
@@ -3311,7 +3309,7 @@ static const struct vm_operations_struct snd_pcm_vm_ops_data_fault = {
3311 3309
3312#ifndef ARCH_HAS_DMA_MMAP_COHERENT 3310#ifndef ARCH_HAS_DMA_MMAP_COHERENT
3313/* This should be defined / handled globally! */ 3311/* This should be defined / handled globally! */
3314#ifdef CONFIG_ARM 3312#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
3315#define ARCH_HAS_DMA_MMAP_COHERENT 3313#define ARCH_HAS_DMA_MMAP_COHERENT
3316#endif 3314#endif
3317#endif 3315#endif
diff --git a/sound/firewire/bebob/bebob_focusrite.c b/sound/firewire/bebob/bebob_focusrite.c
index 45a0eed6d5b1..3b052ed0fbf5 100644
--- a/sound/firewire/bebob/bebob_focusrite.c
+++ b/sound/firewire/bebob/bebob_focusrite.c
@@ -27,12 +27,14 @@
27#define SAFFIRE_CLOCK_SOURCE_INTERNAL 0 27#define SAFFIRE_CLOCK_SOURCE_INTERNAL 0
28#define SAFFIRE_CLOCK_SOURCE_SPDIF 1 28#define SAFFIRE_CLOCK_SOURCE_SPDIF 1
29 29
30/* '1' is absent, why... */ 30/* clock sources as returned from register of Saffire Pro 10 and 26 */
31#define SAFFIREPRO_CLOCK_SOURCE_INTERNAL 0 31#define SAFFIREPRO_CLOCK_SOURCE_INTERNAL 0
32#define SAFFIREPRO_CLOCK_SOURCE_SKIP 1 /* never used on hardware */
32#define SAFFIREPRO_CLOCK_SOURCE_SPDIF 2 33#define SAFFIREPRO_CLOCK_SOURCE_SPDIF 2
33#define SAFFIREPRO_CLOCK_SOURCE_ADAT1 3 34#define SAFFIREPRO_CLOCK_SOURCE_ADAT1 3 /* not used on s.pro. 10 */
34#define SAFFIREPRO_CLOCK_SOURCE_ADAT2 4 35#define SAFFIREPRO_CLOCK_SOURCE_ADAT2 4 /* not used on s.pro. 10 */
35#define SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK 5 36#define SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK 5
37#define SAFFIREPRO_CLOCK_SOURCE_COUNT 6
36 38
37/* S/PDIF, ADAT1, ADAT2 is enabled or not. three quadlets */ 39/* S/PDIF, ADAT1, ADAT2 is enabled or not. three quadlets */
38#define SAFFIREPRO_ENABLE_DIG_IFACES 0x01a4 40#define SAFFIREPRO_ENABLE_DIG_IFACES 0x01a4
@@ -101,13 +103,34 @@ saffire_write_quad(struct snd_bebob *bebob, u64 offset, u32 value)
101 &data, sizeof(__be32), 0); 103 &data, sizeof(__be32), 0);
102} 104}
103 105
106static char *const saffirepro_10_clk_src_labels[] = {
107 SND_BEBOB_CLOCK_INTERNAL, "S/PDIF", "Word Clock"
108};
104static char *const saffirepro_26_clk_src_labels[] = { 109static char *const saffirepro_26_clk_src_labels[] = {
105 SND_BEBOB_CLOCK_INTERNAL, "S/PDIF", "ADAT1", "ADAT2", "Word Clock" 110 SND_BEBOB_CLOCK_INTERNAL, "S/PDIF", "ADAT1", "ADAT2", "Word Clock"
106}; 111};
107 112/* Value maps between registers and labels for SaffirePro 10/26. */
108static char *const saffirepro_10_clk_src_labels[] = { 113static const signed char saffirepro_clk_maps[][SAFFIREPRO_CLOCK_SOURCE_COUNT] = {
109 SND_BEBOB_CLOCK_INTERNAL, "S/PDIF", "Word Clock" 114 /* SaffirePro 10 */
115 [0] = {
116 [SAFFIREPRO_CLOCK_SOURCE_INTERNAL] = 0,
117 [SAFFIREPRO_CLOCK_SOURCE_SKIP] = -1, /* not supported */
118 [SAFFIREPRO_CLOCK_SOURCE_SPDIF] = 1,
119 [SAFFIREPRO_CLOCK_SOURCE_ADAT1] = -1, /* not supported */
120 [SAFFIREPRO_CLOCK_SOURCE_ADAT2] = -1, /* not supported */
121 [SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK] = 2,
122 },
123 /* SaffirePro 26 */
124 [1] = {
125 [SAFFIREPRO_CLOCK_SOURCE_INTERNAL] = 0,
126 [SAFFIREPRO_CLOCK_SOURCE_SKIP] = -1, /* not supported */
127 [SAFFIREPRO_CLOCK_SOURCE_SPDIF] = 1,
128 [SAFFIREPRO_CLOCK_SOURCE_ADAT1] = 2,
129 [SAFFIREPRO_CLOCK_SOURCE_ADAT2] = 3,
130 [SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK] = 4,
131 }
110}; 132};
133
111static int 134static int
112saffirepro_both_clk_freq_get(struct snd_bebob *bebob, unsigned int *rate) 135saffirepro_both_clk_freq_get(struct snd_bebob *bebob, unsigned int *rate)
113{ 136{
@@ -138,24 +161,35 @@ saffirepro_both_clk_freq_set(struct snd_bebob *bebob, unsigned int rate)
138 161
139 return saffire_write_quad(bebob, SAFFIREPRO_RATE_NOREBOOT, id); 162 return saffire_write_quad(bebob, SAFFIREPRO_RATE_NOREBOOT, id);
140} 163}
164
165/*
166 * query hardware for current clock source, return our internally
167 * used clock index in *id, depending on hardware.
168 */
141static int 169static int
142saffirepro_both_clk_src_get(struct snd_bebob *bebob, unsigned int *id) 170saffirepro_both_clk_src_get(struct snd_bebob *bebob, unsigned int *id)
143{ 171{
144 int err; 172 int err;
145 u32 value; 173 u32 value; /* clock source read from hw register */
174 const signed char *map;
146 175
147 err = saffire_read_quad(bebob, SAFFIREPRO_OFFSET_CLOCK_SOURCE, &value); 176 err = saffire_read_quad(bebob, SAFFIREPRO_OFFSET_CLOCK_SOURCE, &value);
148 if (err < 0) 177 if (err < 0)
149 goto end; 178 goto end;
150 179
151 if (bebob->spec->clock->labels == saffirepro_10_clk_src_labels) { 180 /* depending on hardware, use a different mapping */
152 if (value == SAFFIREPRO_CLOCK_SOURCE_WORDCLOCK) 181 if (bebob->spec->clock->labels == saffirepro_10_clk_src_labels)
153 *id = 2; 182 map = saffirepro_clk_maps[0];
154 else if (value == SAFFIREPRO_CLOCK_SOURCE_SPDIF) 183 else
155 *id = 1; 184 map = saffirepro_clk_maps[1];
156 } else if (value > 1) { 185
157 *id = value - 1; 186 /* In a case that this driver cannot handle the value of register. */
187 if (value >= SAFFIREPRO_CLOCK_SOURCE_COUNT || map[value] < 0) {
188 err = -EIO;
189 goto end;
158 } 190 }
191
192 *id = (unsigned int)map[value];
159end: 193end:
160 return err; 194 return err;
161} 195}
diff --git a/sound/firewire/bebob/bebob_stream.c b/sound/firewire/bebob/bebob_stream.c
index ef4d0c9f6578..1aab0a32870c 100644
--- a/sound/firewire/bebob/bebob_stream.c
+++ b/sound/firewire/bebob/bebob_stream.c
@@ -129,12 +129,24 @@ snd_bebob_stream_check_internal_clock(struct snd_bebob *bebob, bool *internal)
129 /* 1.The device has its own operation to switch source of clock */ 129 /* 1.The device has its own operation to switch source of clock */
130 if (clk_spec) { 130 if (clk_spec) {
131 err = clk_spec->get(bebob, &id); 131 err = clk_spec->get(bebob, &id);
132 if (err < 0) 132 if (err < 0) {
133 dev_err(&bebob->unit->device, 133 dev_err(&bebob->unit->device,
134 "fail to get clock source: %d\n", err); 134 "fail to get clock source: %d\n", err);
135 else if (strncmp(clk_spec->labels[id], SND_BEBOB_CLOCK_INTERNAL, 135 goto end;
136 strlen(SND_BEBOB_CLOCK_INTERNAL)) == 0) 136 }
137
138 if (id >= clk_spec->num) {
139 dev_err(&bebob->unit->device,
140 "clock source %d out of range 0..%d\n",
141 id, clk_spec->num - 1);
142 err = -EIO;
143 goto end;
144 }
145
146 if (strncmp(clk_spec->labels[id], SND_BEBOB_CLOCK_INTERNAL,
147 strlen(SND_BEBOB_CLOCK_INTERNAL)) == 0)
137 *internal = true; 148 *internal = true;
149
138 goto end; 150 goto end;
139 } 151 }
140 152
diff --git a/sound/firewire/bebob/bebob_terratec.c b/sound/firewire/bebob/bebob_terratec.c
index 0e4c0bfc463b..9940611f2e1b 100644
--- a/sound/firewire/bebob/bebob_terratec.c
+++ b/sound/firewire/bebob/bebob_terratec.c
@@ -24,7 +24,12 @@ phase88_rack_clk_src_get(struct snd_bebob *bebob, unsigned int *id)
24 if (err < 0) 24 if (err < 0)
25 goto end; 25 goto end;
26 26
27 *id = (enable_ext & 0x01) | ((enable_word & 0x01) << 1); 27 if (enable_ext == 0)
28 *id = 0;
29 else if (enable_word == 0)
30 *id = 1;
31 else
32 *id = 2;
28end: 33end:
29 return err; 34 return err;
30} 35}
diff --git a/sound/pci/ad1889.c b/sound/pci/ad1889.c
index 7bfdf9c51416..1610c38337af 100644
--- a/sound/pci/ad1889.c
+++ b/sound/pci/ad1889.c
@@ -681,7 +681,7 @@ snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffe
681 681
682 /* WARQ is at offset 12 */ 682 /* WARQ is at offset 12 */
683 tmp = (reg & AD_DS_WSMC_WARQ) ? 683 tmp = (reg & AD_DS_WSMC_WARQ) ?
684 (((reg & AD_DS_WSMC_WARQ >> 12) & 0x01) ? 12 : 18) : 4; 684 ((((reg & AD_DS_WSMC_WARQ) >> 12) & 0x01) ? 12 : 18) : 4;
685 tmp /= (reg & AD_DS_WSMC_WAST) ? 2 : 1; 685 tmp /= (reg & AD_DS_WSMC_WAST) ? 2 : 1;
686 686
687 snd_iprintf(buffer, "Wave FIFO: %d %s words\n\n", tmp, 687 snd_iprintf(buffer, "Wave FIFO: %d %s words\n\n", tmp,
@@ -693,7 +693,7 @@ snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffe
693 693
694 /* SYRQ is at offset 4 */ 694 /* SYRQ is at offset 4 */
695 tmp = (reg & AD_DS_WSMC_SYRQ) ? 695 tmp = (reg & AD_DS_WSMC_SYRQ) ?
696 (((reg & AD_DS_WSMC_SYRQ >> 4) & 0x01) ? 12 : 18) : 4; 696 ((((reg & AD_DS_WSMC_SYRQ) >> 4) & 0x01) ? 12 : 18) : 4;
697 tmp /= (reg & AD_DS_WSMC_WAST) ? 2 : 1; 697 tmp /= (reg & AD_DS_WSMC_WAST) ? 2 : 1;
698 698
699 snd_iprintf(buffer, "Synthesis FIFO: %d %s words\n\n", tmp, 699 snd_iprintf(buffer, "Synthesis FIFO: %d %s words\n\n", tmp,
@@ -709,7 +709,7 @@ snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffe
709 709
710 /* ACRQ is at offset 4 */ 710 /* ACRQ is at offset 4 */
711 tmp = (reg & AD_DS_RAMC_ACRQ) ? 711 tmp = (reg & AD_DS_RAMC_ACRQ) ?
712 (((reg & AD_DS_RAMC_ACRQ >> 4) & 0x01) ? 12 : 18) : 4; 712 ((((reg & AD_DS_RAMC_ACRQ) >> 4) & 0x01) ? 12 : 18) : 4;
713 tmp /= (reg & AD_DS_RAMC_ADST) ? 2 : 1; 713 tmp /= (reg & AD_DS_RAMC_ADST) ? 2 : 1;
714 714
715 snd_iprintf(buffer, "ADC FIFO: %d %s words\n\n", tmp, 715 snd_iprintf(buffer, "ADC FIFO: %d %s words\n\n", tmp,
@@ -720,7 +720,7 @@ snd_ad1889_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffe
720 720
721 /* RERQ is at offset 12 */ 721 /* RERQ is at offset 12 */
722 tmp = (reg & AD_DS_RAMC_RERQ) ? 722 tmp = (reg & AD_DS_RAMC_RERQ) ?
723 (((reg & AD_DS_RAMC_RERQ >> 12) & 0x01) ? 12 : 18) : 4; 723 ((((reg & AD_DS_RAMC_RERQ) >> 12) & 0x01) ? 12 : 18) : 4;
724 tmp /= (reg & AD_DS_RAMC_ADST) ? 2 : 1; 724 tmp /= (reg & AD_DS_RAMC_ADST) ? 2 : 1;
725 725
726 snd_iprintf(buffer, "Resampler FIFO: %d %s words\n\n", tmp, 726 snd_iprintf(buffer, "Resampler FIFO: %d %s words\n\n", tmp,
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index cfcca4c30d4d..48b6c5a3884f 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -219,6 +219,7 @@ MODULE_SUPPORTED_DEVICE("{{Intel, ICH6},"
219 "{Intel, LPT_LP}," 219 "{Intel, LPT_LP},"
220 "{Intel, WPT_LP}," 220 "{Intel, WPT_LP},"
221 "{Intel, SPT}," 221 "{Intel, SPT},"
222 "{Intel, SPT_LP},"
222 "{Intel, HPT}," 223 "{Intel, HPT},"
223 "{Intel, PBG}," 224 "{Intel, PBG},"
224 "{Intel, SCH}," 225 "{Intel, SCH},"
@@ -297,7 +298,8 @@ enum {
297 298
298/* quirks for ATI/AMD HDMI */ 299/* quirks for ATI/AMD HDMI */
299#define AZX_DCAPS_PRESET_ATI_HDMI \ 300#define AZX_DCAPS_PRESET_ATI_HDMI \
300 (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB) 301 (AZX_DCAPS_NO_TCSEL | AZX_DCAPS_SYNC_WRITE | AZX_DCAPS_POSFIX_LPIB|\
302 AZX_DCAPS_NO_MSI64)
301 303
302/* quirks for Nvidia */ 304/* quirks for Nvidia */
303#define AZX_DCAPS_PRESET_NVIDIA \ 305#define AZX_DCAPS_PRESET_NVIDIA \
@@ -374,6 +376,8 @@ static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool
374#ifdef CONFIG_SND_DMA_SGBUF 376#ifdef CONFIG_SND_DMA_SGBUF
375 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG) { 377 if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_SG) {
376 struct snd_sg_buf *sgbuf = dmab->private_data; 378 struct snd_sg_buf *sgbuf = dmab->private_data;
379 if (chip->driver_type == AZX_DRIVER_CMEDIA)
380 return; /* deal with only CORB/RIRB buffers */
377 if (on) 381 if (on)
378 set_pages_array_wc(sgbuf->page_table, sgbuf->pages); 382 set_pages_array_wc(sgbuf->page_table, sgbuf->pages);
379 else 383 else
@@ -1483,6 +1487,7 @@ static int azx_first_init(struct azx *chip)
1483 struct snd_card *card = chip->card; 1487 struct snd_card *card = chip->card;
1484 int err; 1488 int err;
1485 unsigned short gcap; 1489 unsigned short gcap;
1490 unsigned int dma_bits = 64;
1486 1491
1487#if BITS_PER_LONG != 64 1492#if BITS_PER_LONG != 64
1488 /* Fix up base address on ULI M5461 */ 1493 /* Fix up base address on ULI M5461 */
@@ -1506,9 +1511,14 @@ static int azx_first_init(struct azx *chip)
1506 return -ENXIO; 1511 return -ENXIO;
1507 } 1512 }
1508 1513
1509 if (chip->msi) 1514 if (chip->msi) {
1515 if (chip->driver_caps & AZX_DCAPS_NO_MSI64) {
1516 dev_dbg(card->dev, "Disabling 64bit MSI\n");
1517 pci->no_64bit_msi = true;
1518 }
1510 if (pci_enable_msi(pci) < 0) 1519 if (pci_enable_msi(pci) < 0)
1511 chip->msi = 0; 1520 chip->msi = 0;
1521 }
1512 1522
1513 if (azx_acquire_irq(chip, 0) < 0) 1523 if (azx_acquire_irq(chip, 0) < 0)
1514 return -EBUSY; 1524 return -EBUSY;
@@ -1519,9 +1529,14 @@ static int azx_first_init(struct azx *chip)
1519 gcap = azx_readw(chip, GCAP); 1529 gcap = azx_readw(chip, GCAP);
1520 dev_dbg(card->dev, "chipset global capabilities = 0x%x\n", gcap); 1530 dev_dbg(card->dev, "chipset global capabilities = 0x%x\n", gcap);
1521 1531
1532 /* AMD devices support 40 or 48bit DMA, take the safe one */
1533 if (chip->pci->vendor == PCI_VENDOR_ID_AMD)
1534 dma_bits = 40;
1535
1522 /* disable SB600 64bit support for safety */ 1536 /* disable SB600 64bit support for safety */
1523 if (chip->pci->vendor == PCI_VENDOR_ID_ATI) { 1537 if (chip->pci->vendor == PCI_VENDOR_ID_ATI) {
1524 struct pci_dev *p_smbus; 1538 struct pci_dev *p_smbus;
1539 dma_bits = 40;
1525 p_smbus = pci_get_device(PCI_VENDOR_ID_ATI, 1540 p_smbus = pci_get_device(PCI_VENDOR_ID_ATI,
1526 PCI_DEVICE_ID_ATI_SBX00_SMBUS, 1541 PCI_DEVICE_ID_ATI_SBX00_SMBUS,
1527 NULL); 1542 NULL);
@@ -1551,9 +1566,11 @@ static int azx_first_init(struct azx *chip)
1551 } 1566 }
1552 1567
1553 /* allow 64bit DMA address if supported by H/W */ 1568 /* allow 64bit DMA address if supported by H/W */
1554 if ((gcap & AZX_GCAP_64OK) && !pci_set_dma_mask(pci, DMA_BIT_MASK(64))) 1569 if (!(gcap & AZX_GCAP_64OK))
1555 pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(64)); 1570 dma_bits = 32;
1556 else { 1571 if (!pci_set_dma_mask(pci, DMA_BIT_MASK(dma_bits))) {
1572 pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(dma_bits));
1573 } else {
1557 pci_set_dma_mask(pci, DMA_BIT_MASK(32)); 1574 pci_set_dma_mask(pci, DMA_BIT_MASK(32));
1558 pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32)); 1575 pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(32));
1559 } 1576 }
@@ -1769,7 +1786,7 @@ static void pcm_mmap_prepare(struct snd_pcm_substream *substream,
1769#ifdef CONFIG_X86 1786#ifdef CONFIG_X86
1770 struct azx_pcm *apcm = snd_pcm_substream_chip(substream); 1787 struct azx_pcm *apcm = snd_pcm_substream_chip(substream);
1771 struct azx *chip = apcm->chip; 1788 struct azx *chip = apcm->chip;
1772 if (!azx_snoop(chip)) 1789 if (!azx_snoop(chip) && chip->driver_type != AZX_DRIVER_CMEDIA)
1773 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot); 1790 area->vm_page_prot = pgprot_writecombine(area->vm_page_prot);
1774#endif 1791#endif
1775} 1792}
@@ -2002,6 +2019,9 @@ static const struct pci_device_id azx_ids[] = {
2002 /* Sunrise Point */ 2019 /* Sunrise Point */
2003 { PCI_DEVICE(0x8086, 0xa170), 2020 { PCI_DEVICE(0x8086, 0xa170),
2004 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH }, 2021 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
2022 /* Sunrise Point-LP */
2023 { PCI_DEVICE(0x8086, 0x9d70),
2024 .driver_data = AZX_DRIVER_PCH | AZX_DCAPS_INTEL_PCH },
2005 /* Haswell */ 2025 /* Haswell */
2006 { PCI_DEVICE(0x8086, 0x0a0c), 2026 { PCI_DEVICE(0x8086, 0x0a0c),
2007 .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL }, 2027 .driver_data = AZX_DRIVER_HDMI | AZX_DCAPS_INTEL_HASWELL },
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index 7eb44e78e141..62658f2f8c9f 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -419,7 +419,7 @@ struct snd_hda_pin_quirk {
419 .subvendor = _subvendor,\ 419 .subvendor = _subvendor,\
420 .name = _name,\ 420 .name = _name,\
421 .value = _value,\ 421 .value = _value,\
422 .pins = (const struct hda_pintbl[]) { _pins } \ 422 .pins = (const struct hda_pintbl[]) { _pins, {0, 0}} \
423 } 423 }
424#else 424#else
425 425
@@ -427,7 +427,7 @@ struct snd_hda_pin_quirk {
427 { .codec = _codec,\ 427 { .codec = _codec,\
428 .subvendor = _subvendor,\ 428 .subvendor = _subvendor,\
429 .value = _value,\ 429 .value = _value,\
430 .pins = (const struct hda_pintbl[]) { _pins } \ 430 .pins = (const struct hda_pintbl[]) { _pins, {0, 0}} \
431 } 431 }
432 432
433#endif 433#endif
diff --git a/sound/pci/hda/hda_priv.h b/sound/pci/hda/hda_priv.h
index 949cd437eeb2..5016014e57f2 100644
--- a/sound/pci/hda/hda_priv.h
+++ b/sound/pci/hda/hda_priv.h
@@ -171,6 +171,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
171#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ 171#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
172#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */ 172#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */
173#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */ 173#define AZX_DCAPS_CORBRP_SELF_CLEAR (1 << 28) /* CORBRP clears itself after reset */
174#define AZX_DCAPS_NO_MSI64 (1 << 29) /* Stick to 32-bit MSIs */
174 175
175/* HD Audio class code */ 176/* HD Audio class code */
176#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403 177#define PCI_CLASS_MULTIMEDIA_HD_AUDIO 0x0403
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 71e4bad06345..e9ebc7bd752c 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -43,6 +43,7 @@ struct conexant_spec {
43 unsigned int num_eapds; 43 unsigned int num_eapds;
44 hda_nid_t eapds[4]; 44 hda_nid_t eapds[4];
45 bool dynamic_eapd; 45 bool dynamic_eapd;
46 hda_nid_t mute_led_eapd;
46 47
47 unsigned int parse_flags; /* flag for snd_hda_parse_pin_defcfg() */ 48 unsigned int parse_flags; /* flag for snd_hda_parse_pin_defcfg() */
48 49
@@ -163,6 +164,17 @@ static void cx_auto_vmaster_hook(void *private_data, int enabled)
163 cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, enabled); 164 cx_auto_turn_eapd(codec, spec->num_eapds, spec->eapds, enabled);
164} 165}
165 166
167/* turn on/off EAPD according to Master switch (inversely!) for mute LED */
168static void cx_auto_vmaster_hook_mute_led(void *private_data, int enabled)
169{
170 struct hda_codec *codec = private_data;
171 struct conexant_spec *spec = codec->spec;
172
173 snd_hda_codec_write(codec, spec->mute_led_eapd, 0,
174 AC_VERB_SET_EAPD_BTLENABLE,
175 enabled ? 0x00 : 0x02);
176}
177
166static int cx_auto_build_controls(struct hda_codec *codec) 178static int cx_auto_build_controls(struct hda_codec *codec)
167{ 179{
168 int err; 180 int err;
@@ -223,6 +235,7 @@ enum {
223 CXT_FIXUP_TOSHIBA_P105, 235 CXT_FIXUP_TOSHIBA_P105,
224 CXT_FIXUP_HP_530, 236 CXT_FIXUP_HP_530,
225 CXT_FIXUP_CAP_MIX_AMP_5047, 237 CXT_FIXUP_CAP_MIX_AMP_5047,
238 CXT_FIXUP_MUTE_LED_EAPD,
226}; 239};
227 240
228/* for hda_fixup_thinkpad_acpi() */ 241/* for hda_fixup_thinkpad_acpi() */
@@ -557,6 +570,18 @@ static void cxt_fixup_olpc_xo(struct hda_codec *codec,
557 } 570 }
558} 571}
559 572
573static void cxt_fixup_mute_led_eapd(struct hda_codec *codec,
574 const struct hda_fixup *fix, int action)
575{
576 struct conexant_spec *spec = codec->spec;
577
578 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
579 spec->mute_led_eapd = 0x1b;
580 spec->dynamic_eapd = 1;
581 spec->gen.vmaster_mute.hook = cx_auto_vmaster_hook_mute_led;
582 }
583}
584
560/* 585/*
561 * Fix max input level on mixer widget to 0dB 586 * Fix max input level on mixer widget to 0dB
562 * (originally it has 0x2b steps with 0dB offset 0x14) 587 * (originally it has 0x2b steps with 0dB offset 0x14)
@@ -705,6 +730,10 @@ static const struct hda_fixup cxt_fixups[] = {
705 .type = HDA_FIXUP_FUNC, 730 .type = HDA_FIXUP_FUNC,
706 .v.func = cxt_fixup_cap_mix_amp_5047, 731 .v.func = cxt_fixup_cap_mix_amp_5047,
707 }, 732 },
733 [CXT_FIXUP_MUTE_LED_EAPD] = {
734 .type = HDA_FIXUP_FUNC,
735 .v.func = cxt_fixup_mute_led_eapd,
736 },
708}; 737};
709 738
710static const struct snd_pci_quirk cxt5045_fixups[] = { 739static const struct snd_pci_quirk cxt5045_fixups[] = {
@@ -762,6 +791,7 @@ static const struct snd_pci_quirk cxt5066_fixups[] = {
762 SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410), 791 SND_PCI_QUIRK(0x17aa, 0x21cf, "Lenovo T520", CXT_PINCFG_LENOVO_TP410),
763 SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410), 792 SND_PCI_QUIRK(0x17aa, 0x21da, "Lenovo X220", CXT_PINCFG_LENOVO_TP410),
764 SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410), 793 SND_PCI_QUIRK(0x17aa, 0x21db, "Lenovo X220-tablet", CXT_PINCFG_LENOVO_TP410),
794 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo IdeaPad Z560", CXT_FIXUP_MUTE_LED_EAPD),
765 SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC), 795 SND_PCI_QUIRK(0x17aa, 0x3975, "Lenovo U300s", CXT_FIXUP_STEREO_DMIC),
766 SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC), 796 SND_PCI_QUIRK(0x17aa, 0x3977, "Lenovo IdeaPad U310", CXT_FIXUP_STEREO_DMIC),
767 SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC), 797 SND_PCI_QUIRK(0x17aa, 0x397b, "Lenovo S205", CXT_FIXUP_STEREO_DMIC),
@@ -780,6 +810,7 @@ static const struct hda_model_fixup cxt5066_fixup_models[] = {
780 { .id = CXT_PINCFG_LEMOTE_A1004, .name = "lemote-a1004" }, 810 { .id = CXT_PINCFG_LEMOTE_A1004, .name = "lemote-a1004" },
781 { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" }, 811 { .id = CXT_PINCFG_LEMOTE_A1205, .name = "lemote-a1205" },
782 { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" }, 812 { .id = CXT_FIXUP_OLPC_XO, .name = "olpc-xo" },
813 { .id = CXT_FIXUP_MUTE_LED_EAPD, .name = "mute-led-eapd" },
783 {} 814 {}
784}; 815};
785 816
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 39862e98551c..9dc9cf8c90e9 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -1583,19 +1583,22 @@ static bool hdmi_present_sense(struct hdmi_spec_per_pin *per_pin, int repoll)
1583 } 1583 }
1584 } 1584 }
1585 1585
1586 if (pin_eld->eld_valid && !eld->eld_valid) { 1586 if (pin_eld->eld_valid != eld->eld_valid)
1587 update_eld = true;
1588 eld_changed = true; 1587 eld_changed = true;
1589 } 1588
1589 if (pin_eld->eld_valid && !eld->eld_valid)
1590 update_eld = true;
1591
1590 if (update_eld) { 1592 if (update_eld) {
1591 bool old_eld_valid = pin_eld->eld_valid; 1593 bool old_eld_valid = pin_eld->eld_valid;
1592 pin_eld->eld_valid = eld->eld_valid; 1594 pin_eld->eld_valid = eld->eld_valid;
1593 eld_changed = pin_eld->eld_size != eld->eld_size || 1595 if (pin_eld->eld_size != eld->eld_size ||
1594 memcmp(pin_eld->eld_buffer, eld->eld_buffer, 1596 memcmp(pin_eld->eld_buffer, eld->eld_buffer,
1595 eld->eld_size) != 0; 1597 eld->eld_size) != 0) {
1596 if (eld_changed)
1597 memcpy(pin_eld->eld_buffer, eld->eld_buffer, 1598 memcpy(pin_eld->eld_buffer, eld->eld_buffer,
1598 eld->eld_size); 1599 eld->eld_size);
1600 eld_changed = true;
1601 }
1599 pin_eld->eld_size = eld->eld_size; 1602 pin_eld->eld_size = eld->eld_size;
1600 pin_eld->info = eld->info; 1603 pin_eld->info = eld->info;
1601 1604
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index bc86c36b4bfa..b118a5be18df 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -288,21 +288,91 @@ static void alc880_unsol_event(struct hda_codec *codec, unsigned int res)
288 snd_hda_jack_unsol_event(codec, res >> 2); 288 snd_hda_jack_unsol_event(codec, res >> 2);
289} 289}
290 290
291/* additional initialization for ALC888 variants */ 291/* Change EAPD to verb control */
292static void alc888_coef_init(struct hda_codec *codec) 292static void alc_fill_eapd_coef(struct hda_codec *codec)
293{ 293{
294 if (alc_get_coef0(codec) == 0x20) 294 int coef;
295 /* alc888S-VC */ 295
296 alc_write_coef_idx(codec, 7, 0x830); 296 coef = alc_get_coef0(codec);
297 else 297
298 /* alc888-VB */ 298 switch (codec->vendor_id) {
299 alc_write_coef_idx(codec, 7, 0x3030); 299 case 0x10ec0262:
300 alc_update_coef_idx(codec, 0x7, 0, 1<<5);
301 break;
302 case 0x10ec0267:
303 case 0x10ec0268:
304 alc_update_coef_idx(codec, 0x7, 0, 1<<13);
305 break;
306 case 0x10ec0269:
307 if ((coef & 0x00f0) == 0x0010)
308 alc_update_coef_idx(codec, 0xd, 0, 1<<14);
309 if ((coef & 0x00f0) == 0x0020)
310 alc_update_coef_idx(codec, 0x4, 1<<15, 0);
311 if ((coef & 0x00f0) == 0x0030)
312 alc_update_coef_idx(codec, 0x10, 1<<9, 0);
313 break;
314 case 0x10ec0280:
315 case 0x10ec0284:
316 case 0x10ec0290:
317 case 0x10ec0292:
318 alc_update_coef_idx(codec, 0x4, 1<<15, 0);
319 break;
320 case 0x10ec0233:
321 case 0x10ec0255:
322 case 0x10ec0282:
323 case 0x10ec0283:
324 case 0x10ec0286:
325 case 0x10ec0288:
326 alc_update_coef_idx(codec, 0x10, 1<<9, 0);
327 break;
328 case 0x10ec0285:
329 case 0x10ec0293:
330 alc_update_coef_idx(codec, 0xa, 1<<13, 0);
331 break;
332 case 0x10ec0662:
333 if ((coef & 0x00f0) == 0x0030)
334 alc_update_coef_idx(codec, 0x4, 1<<10, 0); /* EAPD Ctrl */
335 break;
336 case 0x10ec0272:
337 case 0x10ec0273:
338 case 0x10ec0663:
339 case 0x10ec0665:
340 case 0x10ec0670:
341 case 0x10ec0671:
342 case 0x10ec0672:
343 alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */
344 break;
345 case 0x10ec0668:
346 alc_update_coef_idx(codec, 0x7, 3<<13, 0);
347 break;
348 case 0x10ec0867:
349 alc_update_coef_idx(codec, 0x4, 1<<10, 0);
350 break;
351 case 0x10ec0888:
352 if ((coef & 0x00f0) == 0x0020 || (coef & 0x00f0) == 0x0030)
353 alc_update_coef_idx(codec, 0x7, 1<<5, 0);
354 break;
355 case 0x10ec0892:
356 alc_update_coef_idx(codec, 0x7, 1<<5, 0);
357 break;
358 case 0x10ec0899:
359 case 0x10ec0900:
360 alc_update_coef_idx(codec, 0x7, 1<<1, 0);
361 break;
362 }
300} 363}
301 364
302/* additional initialization for ALC889 variants */ 365/* additional initialization for ALC888 variants */
303static void alc889_coef_init(struct hda_codec *codec) 366static void alc888_coef_init(struct hda_codec *codec)
304{ 367{
305 alc_update_coef_idx(codec, 7, 0, 0x2010); 368 switch (alc_get_coef0(codec) & 0x00f0) {
369 /* alc888-VA */
370 case 0x00:
371 /* alc888-VB */
372 case 0x10:
373 alc_update_coef_idx(codec, 7, 0, 0x2030); /* Turn EAPD to High */
374 break;
375 }
306} 376}
307 377
308/* turn on/off EAPD control (only if available) */ 378/* turn on/off EAPD control (only if available) */
@@ -343,6 +413,7 @@ static void alc_eapd_shutup(struct hda_codec *codec)
343/* generic EAPD initialization */ 413/* generic EAPD initialization */
344static void alc_auto_init_amp(struct hda_codec *codec, int type) 414static void alc_auto_init_amp(struct hda_codec *codec, int type)
345{ 415{
416 alc_fill_eapd_coef(codec);
346 alc_auto_setup_eapd(codec, true); 417 alc_auto_setup_eapd(codec, true);
347 switch (type) { 418 switch (type) {
348 case ALC_INIT_GPIO1: 419 case ALC_INIT_GPIO1:
@@ -359,25 +430,15 @@ static void alc_auto_init_amp(struct hda_codec *codec, int type)
359 case 0x10ec0260: 430 case 0x10ec0260:
360 alc_update_coefex_idx(codec, 0x1a, 7, 0, 0x2010); 431 alc_update_coefex_idx(codec, 0x1a, 7, 0, 0x2010);
361 break; 432 break;
362 case 0x10ec0262:
363 case 0x10ec0880: 433 case 0x10ec0880:
364 case 0x10ec0882: 434 case 0x10ec0882:
365 case 0x10ec0883: 435 case 0x10ec0883:
366 case 0x10ec0885: 436 case 0x10ec0885:
367 case 0x10ec0887: 437 alc_update_coef_idx(codec, 7, 0, 0x2030);
368 /*case 0x10ec0889:*/ /* this causes an SPDIF problem */
369 case 0x10ec0900:
370 alc889_coef_init(codec);
371 break; 438 break;
372 case 0x10ec0888: 439 case 0x10ec0888:
373 alc888_coef_init(codec); 440 alc888_coef_init(codec);
374 break; 441 break;
375#if 0 /* XXX: This may cause the silent output on speaker on some machines */
376 case 0x10ec0267:
377 case 0x10ec0268:
378 alc_update_coef_idx(codec, 7, 0, 0x3000);
379 break;
380#endif /* XXX */
381 } 442 }
382 break; 443 break;
383 } 444 }
@@ -1710,7 +1771,7 @@ static void alc889_fixup_coef(struct hda_codec *codec,
1710{ 1771{
1711 if (action != HDA_FIXUP_ACT_INIT) 1772 if (action != HDA_FIXUP_ACT_INIT)
1712 return; 1773 return;
1713 alc889_coef_init(codec); 1774 alc_update_coef_idx(codec, 7, 0, 0x2030);
1714} 1775}
1715 1776
1716/* toggle speaker-output according to the hp-jack state */ 1777/* toggle speaker-output according to the hp-jack state */
@@ -2675,7 +2736,7 @@ static void alc269_shutup(struct hda_codec *codec)
2675 2736
2676static struct coef_fw alc282_coefs[] = { 2737static struct coef_fw alc282_coefs[] = {
2677 WRITE_COEF(0x03, 0x0002), /* Power Down Control */ 2738 WRITE_COEF(0x03, 0x0002), /* Power Down Control */
2678 WRITE_COEF(0x05, 0x0700), /* FIFO and filter clock */ 2739 UPDATE_COEF(0x05, 0xff3f, 0x0700), /* FIFO and filter clock */
2679 WRITE_COEF(0x07, 0x0200), /* DMIC control */ 2740 WRITE_COEF(0x07, 0x0200), /* DMIC control */
2680 UPDATE_COEF(0x06, 0x00f0, 0), /* Analog clock */ 2741 UPDATE_COEF(0x06, 0x00f0, 0), /* Analog clock */
2681 UPDATE_COEF(0x08, 0xfffc, 0x0c2c), /* JD */ 2742 UPDATE_COEF(0x08, 0xfffc, 0x0c2c), /* JD */
@@ -2786,7 +2847,7 @@ static void alc282_shutup(struct hda_codec *codec)
2786 2847
2787static struct coef_fw alc283_coefs[] = { 2848static struct coef_fw alc283_coefs[] = {
2788 WRITE_COEF(0x03, 0x0002), /* Power Down Control */ 2849 WRITE_COEF(0x03, 0x0002), /* Power Down Control */
2789 WRITE_COEF(0x05, 0x0700), /* FIFO and filter clock */ 2850 UPDATE_COEF(0x05, 0xff3f, 0x0700), /* FIFO and filter clock */
2790 WRITE_COEF(0x07, 0x0200), /* DMIC control */ 2851 WRITE_COEF(0x07, 0x0200), /* DMIC control */
2791 UPDATE_COEF(0x06, 0x00f0, 0), /* Analog clock */ 2852 UPDATE_COEF(0x06, 0x00f0, 0), /* Analog clock */
2792 UPDATE_COEF(0x08, 0xfffc, 0x0c2c), /* JD */ 2853 UPDATE_COEF(0x08, 0xfffc, 0x0c2c), /* JD */
@@ -2817,6 +2878,7 @@ static struct coef_fw alc283_coefs[] = {
2817 UPDATE_COEF(0x40, 0xf800, 0x9800), /* Class D DC enable */ 2878 UPDATE_COEF(0x40, 0xf800, 0x9800), /* Class D DC enable */
2818 UPDATE_COEF(0x42, 0xf000, 0x2000), /* DC offset */ 2879 UPDATE_COEF(0x42, 0xf000, 0x2000), /* DC offset */
2819 WRITE_COEF(0x37, 0xfc06), /* Class D amp control */ 2880 WRITE_COEF(0x37, 0xfc06), /* Class D amp control */
2881 UPDATE_COEF(0x1b, 0x8000, 0), /* HP JD control */
2820 {} 2882 {}
2821}; 2883};
2822 2884
@@ -2884,6 +2946,9 @@ static void alc283_shutup(struct hda_codec *codec)
2884 2946
2885 alc_write_coef_idx(codec, 0x43, 0x9004); 2947 alc_write_coef_idx(codec, 0x43, 0x9004);
2886 2948
2949 /*depop hp during suspend*/
2950 alc_write_coef_idx(codec, 0x06, 0x2100);
2951
2887 snd_hda_codec_write(codec, hp_pin, 0, 2952 snd_hda_codec_write(codec, hp_pin, 0,
2888 AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE); 2953 AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_MUTE);
2889 2954
@@ -3346,6 +3411,27 @@ static void alc269_fixup_hp_gpio_mic1_led(struct hda_codec *codec,
3346 } 3411 }
3347} 3412}
3348 3413
3414static void alc280_fixup_hp_gpio4(struct hda_codec *codec,
3415 const struct hda_fixup *fix, int action)
3416{
3417 /* Like hp_gpio_mic1_led, but also needs GPIO4 low to enable headphone amp */
3418 struct alc_spec *spec = codec->spec;
3419 static const struct hda_verb gpio_init[] = {
3420 { 0x01, AC_VERB_SET_GPIO_MASK, 0x18 },
3421 { 0x01, AC_VERB_SET_GPIO_DIRECTION, 0x18 },
3422 {}
3423 };
3424
3425 if (action == HDA_FIXUP_ACT_PRE_PROBE) {
3426 spec->gen.vmaster_mute.hook = alc269_fixup_hp_gpio_mute_hook;
3427 spec->gen.cap_sync_hook = alc269_fixup_hp_cap_mic_mute_hook;
3428 spec->gpio_led = 0;
3429 spec->cap_mute_led_nid = 0x18;
3430 snd_hda_add_verbs(codec, gpio_init);
3431 codec->power_filter = led_power_filter;
3432 }
3433}
3434
3349static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec, 3435static void alc269_fixup_hp_line1_mic1_led(struct hda_codec *codec,
3350 const struct hda_fixup *fix, int action) 3436 const struct hda_fixup *fix, int action)
3351{ 3437{
@@ -4213,6 +4299,7 @@ enum {
4213 ALC283_FIXUP_BXBT2807_MIC, 4299 ALC283_FIXUP_BXBT2807_MIC,
4214 ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED, 4300 ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED,
4215 ALC282_FIXUP_ASPIRE_V5_PINS, 4301 ALC282_FIXUP_ASPIRE_V5_PINS,
4302 ALC280_FIXUP_HP_GPIO4,
4216}; 4303};
4217 4304
4218static const struct hda_fixup alc269_fixups[] = { 4305static const struct hda_fixup alc269_fixups[] = {
@@ -4433,6 +4520,8 @@ static const struct hda_fixup alc269_fixups[] = {
4433 [ALC269_FIXUP_HEADSET_MODE] = { 4520 [ALC269_FIXUP_HEADSET_MODE] = {
4434 .type = HDA_FIXUP_FUNC, 4521 .type = HDA_FIXUP_FUNC,
4435 .v.func = alc_fixup_headset_mode, 4522 .v.func = alc_fixup_headset_mode,
4523 .chained = true,
4524 .chain_id = ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED
4436 }, 4525 },
4437 [ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC] = { 4526 [ALC269_FIXUP_HEADSET_MODE_NO_HP_MIC] = {
4438 .type = HDA_FIXUP_FUNC, 4527 .type = HDA_FIXUP_FUNC,
@@ -4622,6 +4711,8 @@ static const struct hda_fixup alc269_fixups[] = {
4622 [ALC255_FIXUP_HEADSET_MODE] = { 4711 [ALC255_FIXUP_HEADSET_MODE] = {
4623 .type = HDA_FIXUP_FUNC, 4712 .type = HDA_FIXUP_FUNC,
4624 .v.func = alc_fixup_headset_mode_alc255, 4713 .v.func = alc_fixup_headset_mode_alc255,
4714 .chained = true,
4715 .chain_id = ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED
4625 }, 4716 },
4626 [ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC] = { 4717 [ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC] = {
4627 .type = HDA_FIXUP_FUNC, 4718 .type = HDA_FIXUP_FUNC,
@@ -4657,8 +4748,6 @@ static const struct hda_fixup alc269_fixups[] = {
4657 [ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED] = { 4748 [ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED] = {
4658 .type = HDA_FIXUP_FUNC, 4749 .type = HDA_FIXUP_FUNC,
4659 .v.func = alc_fixup_dell_wmi, 4750 .v.func = alc_fixup_dell_wmi,
4660 .chained_before = true,
4661 .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
4662 }, 4751 },
4663 [ALC282_FIXUP_ASPIRE_V5_PINS] = { 4752 [ALC282_FIXUP_ASPIRE_V5_PINS] = {
4664 .type = HDA_FIXUP_PINS, 4753 .type = HDA_FIXUP_PINS,
@@ -4676,7 +4765,10 @@ static const struct hda_fixup alc269_fixups[] = {
4676 { }, 4765 { },
4677 }, 4766 },
4678 }, 4767 },
4679 4768 [ALC280_FIXUP_HP_GPIO4] = {
4769 .type = HDA_FIXUP_FUNC,
4770 .v.func = alc280_fixup_hp_gpio4,
4771 },
4680}; 4772};
4681 4773
4682static const struct snd_pci_quirk alc269_fixup_tbl[] = { 4774static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -4693,13 +4785,13 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4693 SND_PCI_QUIRK(0x1028, 0x05f4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4785 SND_PCI_QUIRK(0x1028, 0x05f4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4694 SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4786 SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4695 SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE), 4787 SND_PCI_QUIRK(0x1028, 0x05f6, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
4696 SND_PCI_QUIRK(0x1028, 0x0610, "Dell", ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED),
4697 SND_PCI_QUIRK(0x1028, 0x0615, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK), 4788 SND_PCI_QUIRK(0x1028, 0x0615, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
4698 SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK), 4789 SND_PCI_QUIRK(0x1028, 0x0616, "Dell Vostro 5470", ALC290_FIXUP_SUBWOOFER_HSJACK),
4699 SND_PCI_QUIRK(0x1028, 0x061f, "Dell", ALC255_FIXUP_DELL_WMI_MIC_MUTE_LED),
4700 SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK), 4790 SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK),
4701 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 4791 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4702 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 4792 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4793 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4794 SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4703 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 4795 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4704 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 4796 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
4705 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 4797 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -4724,21 +4816,15 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4724 SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4816 SND_PCI_QUIRK(0x103c, 0x22cf, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4725 SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 4817 SND_PCI_QUIRK(0x103c, 0x22dc, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4726 SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 4818 SND_PCI_QUIRK(0x103c, 0x22fb, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4727 SND_PCI_QUIRK(0x103c, 0x8004, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4728 /* ALC290 */ 4819 /* ALC290 */
4729 SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 4820 SND_PCI_QUIRK(0x103c, 0x221b, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4730 SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 4821 SND_PCI_QUIRK(0x103c, 0x2221, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4731 SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 4822 SND_PCI_QUIRK(0x103c, 0x2225, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4732 SND_PCI_QUIRK(0x103c, 0x2246, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4733 SND_PCI_QUIRK(0x103c, 0x2247, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4734 SND_PCI_QUIRK(0x103c, 0x2248, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4735 SND_PCI_QUIRK(0x103c, 0x2249, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4736 SND_PCI_QUIRK(0x103c, 0x2253, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 4823 SND_PCI_QUIRK(0x103c, 0x2253, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4737 SND_PCI_QUIRK(0x103c, 0x2254, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 4824 SND_PCI_QUIRK(0x103c, 0x2254, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4738 SND_PCI_QUIRK(0x103c, 0x2255, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 4825 SND_PCI_QUIRK(0x103c, 0x2255, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4739 SND_PCI_QUIRK(0x103c, 0x2256, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 4826 SND_PCI_QUIRK(0x103c, 0x2256, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4740 SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 4827 SND_PCI_QUIRK(0x103c, 0x2257, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4741 SND_PCI_QUIRK(0x103c, 0x2258, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4742 SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 4828 SND_PCI_QUIRK(0x103c, 0x2259, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4743 SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 4829 SND_PCI_QUIRK(0x103c, 0x225a, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4744 SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4830 SND_PCI_QUIRK(0x103c, 0x2260, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
@@ -4747,7 +4833,6 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4747 SND_PCI_QUIRK(0x103c, 0x2265, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4833 SND_PCI_QUIRK(0x103c, 0x2265, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4748 SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 4834 SND_PCI_QUIRK(0x103c, 0x2272, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4749 SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 4835 SND_PCI_QUIRK(0x103c, 0x2273, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4750 SND_PCI_QUIRK(0x103c, 0x2277, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4751 SND_PCI_QUIRK(0x103c, 0x2278, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED), 4836 SND_PCI_QUIRK(0x103c, 0x2278, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED),
4752 SND_PCI_QUIRK(0x103c, 0x227f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4837 SND_PCI_QUIRK(0x103c, 0x227f, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
4753 SND_PCI_QUIRK(0x103c, 0x2282, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1), 4838 SND_PCI_QUIRK(0x103c, 0x2282, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
@@ -4800,7 +4885,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4800 SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK), 4885 SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
4801 SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK), 4886 SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
4802 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK), 4887 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad T440", ALC292_FIXUP_TPT440_DOCK),
4803 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4888 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad X240", ALC292_FIXUP_TPT440_DOCK),
4804 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4889 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4805 SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP), 4890 SND_PCI_QUIRK(0x17aa, 0x3978, "IdeaPad Y410P", ALC269_FIXUP_NO_SHUTUP),
4806 SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4891 SND_PCI_QUIRK(0x17aa, 0x5013, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -4980,6 +5065,19 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
4980 {0x17, 0x40000000}, 5065 {0x17, 0x40000000},
4981 {0x1d, 0x40700001}, 5066 {0x1d, 0x40700001},
4982 {0x21, 0x02211040}), 5067 {0x21, 0x02211040}),
5068 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
5069 {0x12, 0x90a60130},
5070 {0x13, 0x40000000},
5071 {0x14, 0x90170110},
5072 {0x15, 0x0421101f},
5073 {0x16, 0x411111f0},
5074 {0x17, 0x411111f0},
5075 {0x18, 0x411111f0},
5076 {0x19, 0x411111f0},
5077 {0x1a, 0x04a11020},
5078 {0x1b, 0x411111f0},
5079 {0x1d, 0x40748605},
5080 {0x1e, 0x411111f0}),
4983 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED, 5081 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC269_FIXUP_HP_GPIO_MIC1_LED,
4984 {0x12, 0x90a60140}, 5082 {0x12, 0x90a60140},
4985 {0x13, 0x40000000}, 5083 {0x13, 0x40000000},
@@ -5190,9 +5288,6 @@ static void alc269_fill_coef(struct hda_codec *codec)
5190 } 5288 }
5191 } 5289 }
5192 5290
5193 /* Class D */
5194 alc_update_coef_idx(codec, 0xd, 0, 1<<14);
5195
5196 /* HP */ 5291 /* HP */
5197 alc_update_coef_idx(codec, 0x4, 0, 1<<11); 5292 alc_update_coef_idx(codec, 0x4, 0, 1<<11);
5198} 5293}
@@ -5610,9 +5705,9 @@ static void alc662_led_gpio1_mute_hook(void *private_data, int enabled)
5610 unsigned int oldval = spec->gpio_led; 5705 unsigned int oldval = spec->gpio_led;
5611 5706
5612 if (enabled) 5707 if (enabled)
5613 spec->gpio_led &= ~0x01;
5614 else
5615 spec->gpio_led |= 0x01; 5708 spec->gpio_led |= 0x01;
5709 else
5710 spec->gpio_led &= ~0x01;
5616 if (spec->gpio_led != oldval) 5711 if (spec->gpio_led != oldval)
5617 snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA, 5712 snd_hda_codec_write(codec, 0x01, 0, AC_VERB_SET_GPIO_DATA,
5618 spec->gpio_led); 5713 spec->gpio_led);
@@ -5647,6 +5742,35 @@ static void alc662_fixup_led_gpio1(struct hda_codec *codec,
5647 } 5742 }
5648} 5743}
5649 5744
5745static struct coef_fw alc668_coefs[] = {
5746 WRITE_COEF(0x01, 0xbebe), WRITE_COEF(0x02, 0xaaaa), WRITE_COEF(0x03, 0x0),
5747 WRITE_COEF(0x04, 0x0180), WRITE_COEF(0x06, 0x0), WRITE_COEF(0x07, 0x0f80),
5748 WRITE_COEF(0x08, 0x0031), WRITE_COEF(0x0a, 0x0060), WRITE_COEF(0x0b, 0x0),
5749 WRITE_COEF(0x0c, 0x7cf7), WRITE_COEF(0x0d, 0x1080), WRITE_COEF(0x0e, 0x7f7f),
5750 WRITE_COEF(0x0f, 0xcccc), WRITE_COEF(0x10, 0xddcc), WRITE_COEF(0x11, 0x0001),
5751 WRITE_COEF(0x13, 0x0), WRITE_COEF(0x14, 0x2aa0), WRITE_COEF(0x17, 0xa940),
5752 WRITE_COEF(0x19, 0x0), WRITE_COEF(0x1a, 0x0), WRITE_COEF(0x1b, 0x0),
5753 WRITE_COEF(0x1c, 0x0), WRITE_COEF(0x1d, 0x0), WRITE_COEF(0x1e, 0x7418),
5754 WRITE_COEF(0x1f, 0x0804), WRITE_COEF(0x20, 0x4200), WRITE_COEF(0x21, 0x0468),
5755 WRITE_COEF(0x22, 0x8ccc), WRITE_COEF(0x23, 0x0250), WRITE_COEF(0x24, 0x7418),
5756 WRITE_COEF(0x27, 0x0), WRITE_COEF(0x28, 0x8ccc), WRITE_COEF(0x2a, 0xff00),
5757 WRITE_COEF(0x2b, 0x8000), WRITE_COEF(0xa7, 0xff00), WRITE_COEF(0xa8, 0x8000),
5758 WRITE_COEF(0xaa, 0x2e17), WRITE_COEF(0xab, 0xa0c0), WRITE_COEF(0xac, 0x0),
5759 WRITE_COEF(0xad, 0x0), WRITE_COEF(0xae, 0x2ac6), WRITE_COEF(0xaf, 0xa480),
5760 WRITE_COEF(0xb0, 0x0), WRITE_COEF(0xb1, 0x0), WRITE_COEF(0xb2, 0x0),
5761 WRITE_COEF(0xb3, 0x0), WRITE_COEF(0xb4, 0x0), WRITE_COEF(0xb5, 0x1040),
5762 WRITE_COEF(0xb6, 0xd697), WRITE_COEF(0xb7, 0x902b), WRITE_COEF(0xb8, 0xd697),
5763 WRITE_COEF(0xb9, 0x902b), WRITE_COEF(0xba, 0xb8ba), WRITE_COEF(0xbb, 0xaaab),
5764 WRITE_COEF(0xbc, 0xaaaf), WRITE_COEF(0xbd, 0x6aaa), WRITE_COEF(0xbe, 0x1c02),
5765 WRITE_COEF(0xc0, 0x00ff), WRITE_COEF(0xc1, 0x0fa6),
5766 {}
5767};
5768
5769static void alc668_restore_default_value(struct hda_codec *codec)
5770{
5771 alc_process_coef_fw(codec, alc668_coefs);
5772}
5773
5650enum { 5774enum {
5651 ALC662_FIXUP_ASPIRE, 5775 ALC662_FIXUP_ASPIRE,
5652 ALC662_FIXUP_LED_GPIO1, 5776 ALC662_FIXUP_LED_GPIO1,
@@ -5919,6 +6043,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
5919 SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 6043 SND_PCI_QUIRK(0x1028, 0x0626, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5920 SND_PCI_QUIRK(0x1028, 0x0696, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 6044 SND_PCI_QUIRK(0x1028, 0x0696, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5921 SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE), 6045 SND_PCI_QUIRK(0x1028, 0x0698, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
6046 SND_PCI_QUIRK(0x1028, 0x069f, "Dell", ALC668_FIXUP_DELL_MIC_NO_PRESENCE),
5922 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800), 6047 SND_PCI_QUIRK(0x103c, 0x1632, "HP RP5800", ALC662_FIXUP_HP_RP5800),
5923 SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A), 6048 SND_PCI_QUIRK(0x1043, 0x11cd, "Asus N550", ALC662_FIXUP_BASS_1A),
5924 SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP), 6049 SND_PCI_QUIRK(0x1043, 0x1477, "ASUS N56VZ", ALC662_FIXUP_BASS_MODE4_CHMAP),
@@ -6072,29 +6197,6 @@ static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
6072 {} 6197 {}
6073}; 6198};
6074 6199
6075static void alc662_fill_coef(struct hda_codec *codec)
6076{
6077 int coef;
6078
6079 coef = alc_get_coef0(codec);
6080
6081 switch (codec->vendor_id) {
6082 case 0x10ec0662:
6083 if ((coef & 0x00f0) == 0x0030)
6084 alc_update_coef_idx(codec, 0x4, 1<<10, 0); /* EAPD Ctrl */
6085 break;
6086 case 0x10ec0272:
6087 case 0x10ec0273:
6088 case 0x10ec0663:
6089 case 0x10ec0665:
6090 case 0x10ec0670:
6091 case 0x10ec0671:
6092 case 0x10ec0672:
6093 alc_update_coef_idx(codec, 0xd, 0, 1<<14); /* EAPD Ctrl */
6094 break;
6095 }
6096}
6097
6098/* 6200/*
6099 */ 6201 */
6100static int patch_alc662(struct hda_codec *codec) 6202static int patch_alc662(struct hda_codec *codec)
@@ -6113,8 +6215,11 @@ static int patch_alc662(struct hda_codec *codec)
6113 6215
6114 alc_fix_pll_init(codec, 0x20, 0x04, 15); 6216 alc_fix_pll_init(codec, 0x20, 0x04, 15);
6115 6217
6116 spec->init_hook = alc662_fill_coef; 6218 switch (codec->vendor_id) {
6117 alc662_fill_coef(codec); 6219 case 0x10ec0668:
6220 spec->init_hook = alc668_restore_default_value;
6221 break;
6222 }
6118 6223
6119 snd_hda_pick_fixup(codec, alc662_fixup_models, 6224 snd_hda_pick_fixup(codec, alc662_fixup_models,
6120 alc662_fixup_tbl, alc662_fixups); 6225 alc662_fixup_tbl, alc662_fixups);
diff --git a/sound/soc/Kconfig b/sound/soc/Kconfig
index 0e9623368ab0..7d5d6444a837 100644
--- a/sound/soc/Kconfig
+++ b/sound/soc/Kconfig
@@ -49,7 +49,6 @@ source "sound/soc/mxs/Kconfig"
49source "sound/soc/pxa/Kconfig" 49source "sound/soc/pxa/Kconfig"
50source "sound/soc/rockchip/Kconfig" 50source "sound/soc/rockchip/Kconfig"
51source "sound/soc/samsung/Kconfig" 51source "sound/soc/samsung/Kconfig"
52source "sound/soc/s6000/Kconfig"
53source "sound/soc/sh/Kconfig" 52source "sound/soc/sh/Kconfig"
54source "sound/soc/sirf/Kconfig" 53source "sound/soc/sirf/Kconfig"
55source "sound/soc/spear/Kconfig" 54source "sound/soc/spear/Kconfig"
diff --git a/sound/soc/Makefile b/sound/soc/Makefile
index 534714a1ca44..865e090c8061 100644
--- a/sound/soc/Makefile
+++ b/sound/soc/Makefile
@@ -1,10 +1,14 @@
1snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o 1snd-soc-core-objs := soc-core.o soc-dapm.o soc-jack.o soc-cache.o soc-utils.o
2snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o 2snd-soc-core-objs += soc-pcm.o soc-compress.o soc-io.o soc-devres.o soc-ops.o
3 3
4ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),) 4ifneq ($(CONFIG_SND_SOC_GENERIC_DMAENGINE_PCM),)
5snd-soc-core-objs += soc-generic-dmaengine-pcm.o 5snd-soc-core-objs += soc-generic-dmaengine-pcm.o
6endif 6endif
7 7
8ifneq ($(CONFIG_SND_SOC_AC97_BUS),)
9snd-soc-core-objs += soc-ac97.o
10endif
11
8obj-$(CONFIG_SND_SOC) += snd-soc-core.o 12obj-$(CONFIG_SND_SOC) += snd-soc-core.o
9obj-$(CONFIG_SND_SOC) += codecs/ 13obj-$(CONFIG_SND_SOC) += codecs/
10obj-$(CONFIG_SND_SOC) += generic/ 14obj-$(CONFIG_SND_SOC) += generic/
@@ -26,7 +30,6 @@ obj-$(CONFIG_SND_SOC) += kirkwood/
26obj-$(CONFIG_SND_SOC) += pxa/ 30obj-$(CONFIG_SND_SOC) += pxa/
27obj-$(CONFIG_SND_SOC) += rockchip/ 31obj-$(CONFIG_SND_SOC) += rockchip/
28obj-$(CONFIG_SND_SOC) += samsung/ 32obj-$(CONFIG_SND_SOC) += samsung/
29obj-$(CONFIG_SND_SOC) += s6000/
30obj-$(CONFIG_SND_SOC) += sh/ 33obj-$(CONFIG_SND_SOC) += sh/
31obj-$(CONFIG_SND_SOC) += sirf/ 34obj-$(CONFIG_SND_SOC) += sirf/
32obj-$(CONFIG_SND_SOC) += spear/ 35obj-$(CONFIG_SND_SOC) += spear/
diff --git a/sound/soc/atmel/Kconfig b/sound/soc/atmel/Kconfig
index 27e3fc4a536b..fb3878312bf8 100644
--- a/sound/soc/atmel/Kconfig
+++ b/sound/soc/atmel/Kconfig
@@ -52,12 +52,3 @@ config SND_AT91_SOC_SAM9X5_WM8731
52 help 52 help
53 Say Y if you want to add support for audio SoC on an 53 Say Y if you want to add support for audio SoC on an
54 at91sam9x5 based board that is using WM8731 codec. 54 at91sam9x5 based board that is using WM8731 codec.
55
56config SND_AT91_SOC_AFEB9260
57 tristate "SoC Audio support for AFEB9260 board"
58 depends on ARCH_AT91 && ATMEL_SSC && ARCH_AT91 && MACH_AFEB9260 && SND_ATMEL_SOC
59 select SND_ATMEL_SOC_PDC
60 select SND_ATMEL_SOC_SSC
61 select SND_SOC_TLV320AIC23_I2C
62 help
63 Say Y here to support sound on AFEB9260 board.
diff --git a/sound/soc/atmel/Makefile b/sound/soc/atmel/Makefile
index 5baabc8bde3a..466a821da98c 100644
--- a/sound/soc/atmel/Makefile
+++ b/sound/soc/atmel/Makefile
@@ -17,4 +17,3 @@ snd-soc-sam9x5-wm8731-objs := sam9x5_wm8731.o
17obj-$(CONFIG_SND_AT91_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o 17obj-$(CONFIG_SND_AT91_SOC_SAM9G20_WM8731) += snd-soc-sam9g20-wm8731.o
18obj-$(CONFIG_SND_ATMEL_SOC_WM8904) += snd-atmel-soc-wm8904.o 18obj-$(CONFIG_SND_ATMEL_SOC_WM8904) += snd-atmel-soc-wm8904.o
19obj-$(CONFIG_SND_AT91_SOC_SAM9X5_WM8731) += snd-soc-sam9x5-wm8731.o 19obj-$(CONFIG_SND_AT91_SOC_SAM9X5_WM8731) += snd-soc-sam9x5-wm8731.o
20obj-$(CONFIG_SND_AT91_SOC_AFEB9260) += snd-soc-afeb9260.o
diff --git a/sound/soc/atmel/atmel_ssc_dai.c b/sound/soc/atmel/atmel_ssc_dai.c
index f403f399808a..b1cc2a4a7fc0 100644
--- a/sound/soc/atmel/atmel_ssc_dai.c
+++ b/sound/soc/atmel/atmel_ssc_dai.c
@@ -310,7 +310,10 @@ static int atmel_ssc_set_dai_clkdiv(struct snd_soc_dai *cpu_dai,
310 * transmit and receive, so if a value has already 310 * transmit and receive, so if a value has already
311 * been set, it must match this value. 311 * been set, it must match this value.
312 */ 312 */
313 if (ssc_p->cmr_div == 0) 313 if (ssc_p->dir_mask !=
314 (SSC_DIR_MASK_PLAYBACK | SSC_DIR_MASK_CAPTURE))
315 ssc_p->cmr_div = div;
316 else if (ssc_p->cmr_div == 0)
314 ssc_p->cmr_div = div; 317 ssc_p->cmr_div = div;
315 else 318 else
316 if (div != ssc_p->cmr_div) 319 if (div != ssc_p->cmr_div)
diff --git a/sound/soc/atmel/snd-soc-afeb9260.c b/sound/soc/atmel/snd-soc-afeb9260.c
deleted file mode 100644
index 9579799ace54..000000000000
--- a/sound/soc/atmel/snd-soc-afeb9260.c
+++ /dev/null
@@ -1,151 +0,0 @@
1/*
2 * afeb9260.c -- SoC audio for AFEB9260
3 *
4 * Copyright (C) 2009 Sergey Lapin <slapin@ossfans.org>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * version 2 as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
18 * 02110-1301 USA
19 *
20 */
21
22#include <linux/module.h>
23#include <linux/moduleparam.h>
24#include <linux/kernel.h>
25#include <linux/clk.h>
26#include <linux/platform_device.h>
27
28#include <linux/atmel-ssc.h>
29#include <sound/core.h>
30#include <sound/pcm.h>
31#include <sound/pcm_params.h>
32#include <sound/soc.h>
33
34#include <asm/mach-types.h>
35#include <mach/hardware.h>
36#include <linux/gpio.h>
37
38#include "../codecs/tlv320aic23.h"
39#include "atmel-pcm.h"
40#include "atmel_ssc_dai.h"
41
42#define CODEC_CLOCK 12000000
43
44static int afeb9260_hw_params(struct snd_pcm_substream *substream,
45 struct snd_pcm_hw_params *params)
46{
47 struct snd_soc_pcm_runtime *rtd = substream->private_data;
48 struct snd_soc_dai *codec_dai = rtd->codec_dai;
49 int err;
50
51 /* Set the codec system clock for DAC and ADC */
52 err =
53 snd_soc_dai_set_sysclk(codec_dai, 0, CODEC_CLOCK, SND_SOC_CLOCK_IN);
54
55 if (err < 0) {
56 printk(KERN_ERR "can't set codec system clock\n");
57 return err;
58 }
59
60 return err;
61}
62
63static struct snd_soc_ops afeb9260_ops = {
64 .hw_params = afeb9260_hw_params,
65};
66
67static const struct snd_soc_dapm_widget tlv320aic23_dapm_widgets[] = {
68 SND_SOC_DAPM_HP("Headphone Jack", NULL),
69 SND_SOC_DAPM_LINE("Line In", NULL),
70 SND_SOC_DAPM_MIC("Mic Jack", NULL),
71};
72
73static const struct snd_soc_dapm_route afeb9260_audio_map[] = {
74 {"Headphone Jack", NULL, "LHPOUT"},
75 {"Headphone Jack", NULL, "RHPOUT"},
76
77 {"LLINEIN", NULL, "Line In"},
78 {"RLINEIN", NULL, "Line In"},
79
80 {"MICIN", NULL, "Mic Jack"},
81};
82
83
84/* Digital audio interface glue - connects codec <--> CPU */
85static struct snd_soc_dai_link afeb9260_dai = {
86 .name = "TLV320AIC23",
87 .stream_name = "AIC23",
88 .cpu_dai_name = "atmel-ssc-dai.0",
89 .codec_dai_name = "tlv320aic23-hifi",
90 .platform_name = "atmel_pcm-audio",
91 .codec_name = "tlv320aic23-codec.0-001a",
92 .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_IF |
93 SND_SOC_DAIFMT_CBM_CFM,
94 .ops = &afeb9260_ops,
95};
96
97/* Audio machine driver */
98static struct snd_soc_card snd_soc_machine_afeb9260 = {
99 .name = "AFEB9260",
100 .owner = THIS_MODULE,
101 .dai_link = &afeb9260_dai,
102 .num_links = 1,
103
104 .dapm_widgets = tlv320aic23_dapm_widgets,
105 .num_dapm_widgets = ARRAY_SIZE(tlv320aic23_dapm_widgets),
106 .dapm_routes = afeb9260_audio_map,
107 .num_dapm_routes = ARRAY_SIZE(afeb9260_audio_map),
108};
109
110static struct platform_device *afeb9260_snd_device;
111
112static int __init afeb9260_soc_init(void)
113{
114 int err;
115 struct device *dev;
116
117 if (!(machine_is_afeb9260()))
118 return -ENODEV;
119
120
121 afeb9260_snd_device = platform_device_alloc("soc-audio", -1);
122 if (!afeb9260_snd_device) {
123 printk(KERN_ERR "ASoC: Platform device allocation failed\n");
124 return -ENOMEM;
125 }
126
127 platform_set_drvdata(afeb9260_snd_device, &snd_soc_machine_afeb9260);
128 err = platform_device_add(afeb9260_snd_device);
129 if (err)
130 goto err1;
131
132 dev = &afeb9260_snd_device->dev;
133
134 return 0;
135err1:
136 platform_device_put(afeb9260_snd_device);
137 return err;
138}
139
140static void __exit afeb9260_soc_exit(void)
141{
142 platform_device_unregister(afeb9260_snd_device);
143}
144
145module_init(afeb9260_soc_init);
146module_exit(afeb9260_soc_exit);
147
148MODULE_AUTHOR("Sergey Lapin <slapin@ossfans.org>");
149MODULE_DESCRIPTION("ALSA SoC for AFEB9260");
150MODULE_LICENSE("GPL");
151
diff --git a/sound/soc/au1x/ac97c.c b/sound/soc/au1x/ac97c.c
index c8a2de103c5f..5159a50a45a6 100644
--- a/sound/soc/au1x/ac97c.c
+++ b/sound/soc/au1x/ac97c.c
@@ -205,7 +205,7 @@ static int au1xac97c_dai_probe(struct snd_soc_dai *dai)
205 205
206static struct snd_soc_dai_driver au1xac97c_dai_driver = { 206static struct snd_soc_dai_driver au1xac97c_dai_driver = {
207 .name = "alchemy-ac97c", 207 .name = "alchemy-ac97c",
208 .ac97_control = 1, 208 .bus_control = true,
209 .probe = au1xac97c_dai_probe, 209 .probe = au1xac97c_dai_probe,
210 .playback = { 210 .playback = {
211 .rates = AC97_RATES, 211 .rates = AC97_RATES,
diff --git a/sound/soc/au1x/psc-ac97.c b/sound/soc/au1x/psc-ac97.c
index 84f31e1f9d24..c6daec98ff89 100644
--- a/sound/soc/au1x/psc-ac97.c
+++ b/sound/soc/au1x/psc-ac97.c
@@ -343,7 +343,7 @@ static const struct snd_soc_dai_ops au1xpsc_ac97_dai_ops = {
343}; 343};
344 344
345static const struct snd_soc_dai_driver au1xpsc_ac97_dai_template = { 345static const struct snd_soc_dai_driver au1xpsc_ac97_dai_template = {
346 .ac97_control = 1, 346 .bus_control = true,
347 .probe = au1xpsc_ac97_probe, 347 .probe = au1xpsc_ac97_probe,
348 .playback = { 348 .playback = {
349 .rates = AC97_RATES, 349 .rates = AC97_RATES,
diff --git a/sound/soc/blackfin/bf5xx-ac97.c b/sound/soc/blackfin/bf5xx-ac97.c
index e82eb373a731..6bf21a6c02e4 100644
--- a/sound/soc/blackfin/bf5xx-ac97.c
+++ b/sound/soc/blackfin/bf5xx-ac97.c
@@ -260,7 +260,7 @@ static int bf5xx_ac97_resume(struct snd_soc_dai *dai)
260#endif 260#endif
261 261
262static struct snd_soc_dai_driver bfin_ac97_dai = { 262static struct snd_soc_dai_driver bfin_ac97_dai = {
263 .ac97_control = 1, 263 .bus_control = true,
264 .suspend = bf5xx_ac97_suspend, 264 .suspend = bf5xx_ac97_suspend,
265 .resume = bf5xx_ac97_resume, 265 .resume = bf5xx_ac97_resume,
266 .playback = { 266 .playback = {
diff --git a/sound/soc/blackfin/bf5xx-ad1980.c b/sound/soc/blackfin/bf5xx-ad1980.c
index 3450e8f9080d..0fa81a523b8a 100644
--- a/sound/soc/blackfin/bf5xx-ad1980.c
+++ b/sound/soc/blackfin/bf5xx-ad1980.c
@@ -46,8 +46,6 @@
46#include <linux/gpio.h> 46#include <linux/gpio.h>
47#include <asm/portmux.h> 47#include <asm/portmux.h>
48 48
49#include "../codecs/ad1980.h"
50
51#include "bf5xx-ac97.h" 49#include "bf5xx-ac97.h"
52 50
53static struct snd_soc_card bf5xx_board; 51static struct snd_soc_card bf5xx_board;
diff --git a/sound/soc/cirrus/Kconfig b/sound/soc/cirrus/Kconfig
index 5477c5475923..7b7fbcd49e5e 100644
--- a/sound/soc/cirrus/Kconfig
+++ b/sound/soc/cirrus/Kconfig
@@ -36,7 +36,8 @@ config SND_EP93XX_SOC_EDB93XX
36 tristate "SoC Audio support for Cirrus Logic EDB93xx boards" 36 tristate "SoC Audio support for Cirrus Logic EDB93xx boards"
37 depends on SND_EP93XX_SOC && (MACH_EDB9301 || MACH_EDB9302 || MACH_EDB9302A || MACH_EDB9307A || MACH_EDB9315A) 37 depends on SND_EP93XX_SOC && (MACH_EDB9301 || MACH_EDB9302 || MACH_EDB9302A || MACH_EDB9307A || MACH_EDB9315A)
38 select SND_EP93XX_SOC_I2S 38 select SND_EP93XX_SOC_I2S
39 select SND_SOC_CS4271 39 select SND_SOC_CS4271_I2C if I2C
40 select SND_SOC_CS4271_SPI if SPI_MASTER
40 help 41 help
41 Say Y or M here if you want to add support for I2S audio on the 42 Say Y or M here if you want to add support for I2S audio on the
42 Cirrus Logic EDB93xx boards. 43 Cirrus Logic EDB93xx boards.
diff --git a/sound/soc/cirrus/ep93xx-ac97.c b/sound/soc/cirrus/ep93xx-ac97.c
index f30dadf85b99..6b8a366b0211 100644
--- a/sound/soc/cirrus/ep93xx-ac97.c
+++ b/sound/soc/cirrus/ep93xx-ac97.c
@@ -338,7 +338,7 @@ static const struct snd_soc_dai_ops ep93xx_ac97_dai_ops = {
338static struct snd_soc_dai_driver ep93xx_ac97_dai = { 338static struct snd_soc_dai_driver ep93xx_ac97_dai = {
339 .name = "ep93xx-ac97", 339 .name = "ep93xx-ac97",
340 .id = 0, 340 .id = 0,
341 .ac97_control = 1, 341 .bus_control = true,
342 .probe = ep93xx_ac97_dai_probe, 342 .probe = ep93xx_ac97_dai_probe,
343 .playback = { 343 .playback = {
344 .stream_name = "AC97 Playback", 344 .stream_name = "AC97 Playback",
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index a68d1731a8fd..1362edd380e5 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -50,7 +50,8 @@ config SND_SOC_ALL_CODECS
50 select SND_SOC_CS42L73 if I2C 50 select SND_SOC_CS42L73 if I2C
51 select SND_SOC_CS4265 if I2C 51 select SND_SOC_CS4265 if I2C
52 select SND_SOC_CS4270 if I2C 52 select SND_SOC_CS4270 if I2C
53 select SND_SOC_CS4271 if SND_SOC_I2C_AND_SPI 53 select SND_SOC_CS4271_I2C if I2C
54 select SND_SOC_CS4271_SPI if SPI_MASTER
54 select SND_SOC_CS42XX8_I2C if I2C 55 select SND_SOC_CS42XX8_I2C if I2C
55 select SND_SOC_CX20442 if TTY 56 select SND_SOC_CX20442 if TTY
56 select SND_SOC_DA7210 if I2C 57 select SND_SOC_DA7210 if I2C
@@ -223,6 +224,7 @@ config SND_SOC_AD193X_I2C
223 select SND_SOC_AD193X 224 select SND_SOC_AD193X
224 225
225config SND_SOC_AD1980 226config SND_SOC_AD1980
227 select REGMAP_AC97
226 tristate 228 tristate
227 229
228config SND_SOC_AD73311 230config SND_SOC_AD73311
@@ -336,7 +338,8 @@ config SND_SOC_CS42L51
336 tristate 338 tristate
337 339
338config SND_SOC_CS42L51_I2C 340config SND_SOC_CS42L51_I2C
339 tristate 341 tristate "Cirrus Logic CS42L51 CODEC (I2C)"
342 depends on I2C
340 select SND_SOC_CS42L51 343 select SND_SOC_CS42L51
341 344
342config SND_SOC_CS42L52 345config SND_SOC_CS42L52
@@ -370,8 +373,19 @@ config SND_SOC_CS4270_VD33_ERRATA
370 depends on SND_SOC_CS4270 373 depends on SND_SOC_CS4270
371 374
372config SND_SOC_CS4271 375config SND_SOC_CS4271
373 tristate "Cirrus Logic CS4271 CODEC" 376 tristate
374 depends on SND_SOC_I2C_AND_SPI 377
378config SND_SOC_CS4271_I2C
379 tristate "Cirrus Logic CS4271 CODEC (I2C)"
380 depends on I2C
381 select SND_SOC_CS4271
382 select REGMAP_I2C
383
384config SND_SOC_CS4271_SPI
385 tristate "Cirrus Logic CS4271 CODEC (SPI)"
386 depends on SPI_MASTER
387 select SND_SOC_CS4271
388 select REGMAP_SPI
375 389
376config SND_SOC_CS42XX8 390config SND_SOC_CS42XX8
377 tristate 391 tristate
diff --git a/sound/soc/codecs/Makefile b/sound/soc/codecs/Makefile
index 5dce451661e4..ac7ec31f8cbe 100644
--- a/sound/soc/codecs/Makefile
+++ b/sound/soc/codecs/Makefile
@@ -41,6 +41,8 @@ snd-soc-cs42l73-objs := cs42l73.o
41snd-soc-cs4265-objs := cs4265.o 41snd-soc-cs4265-objs := cs4265.o
42snd-soc-cs4270-objs := cs4270.o 42snd-soc-cs4270-objs := cs4270.o
43snd-soc-cs4271-objs := cs4271.o 43snd-soc-cs4271-objs := cs4271.o
44snd-soc-cs4271-i2c-objs := cs4271-i2c.o
45snd-soc-cs4271-spi-objs := cs4271-spi.o
44snd-soc-cs42xx8-objs := cs42xx8.o 46snd-soc-cs42xx8-objs := cs42xx8.o
45snd-soc-cs42xx8-i2c-objs := cs42xx8-i2c.o 47snd-soc-cs42xx8-i2c-objs := cs42xx8-i2c.o
46snd-soc-cx20442-objs := cx20442.o 48snd-soc-cx20442-objs := cx20442.o
@@ -217,6 +219,8 @@ obj-$(CONFIG_SND_SOC_CS42L73) += snd-soc-cs42l73.o
217obj-$(CONFIG_SND_SOC_CS4265) += snd-soc-cs4265.o 219obj-$(CONFIG_SND_SOC_CS4265) += snd-soc-cs4265.o
218obj-$(CONFIG_SND_SOC_CS4270) += snd-soc-cs4270.o 220obj-$(CONFIG_SND_SOC_CS4270) += snd-soc-cs4270.o
219obj-$(CONFIG_SND_SOC_CS4271) += snd-soc-cs4271.o 221obj-$(CONFIG_SND_SOC_CS4271) += snd-soc-cs4271.o
222obj-$(CONFIG_SND_SOC_CS4271_I2C) += snd-soc-cs4271-i2c.o
223obj-$(CONFIG_SND_SOC_CS4271_SPI) += snd-soc-cs4271-spi.o
220obj-$(CONFIG_SND_SOC_CS42XX8) += snd-soc-cs42xx8.o 224obj-$(CONFIG_SND_SOC_CS42XX8) += snd-soc-cs42xx8.o
221obj-$(CONFIG_SND_SOC_CS42XX8_I2C) += snd-soc-cs42xx8-i2c.o 225obj-$(CONFIG_SND_SOC_CS42XX8_I2C) += snd-soc-cs42xx8-i2c.o
222obj-$(CONFIG_SND_SOC_CX20442) += snd-soc-cx20442.o 226obj-$(CONFIG_SND_SOC_CX20442) += snd-soc-cx20442.o
diff --git a/sound/soc/codecs/ab8500-codec.c b/sound/soc/codecs/ab8500-codec.c
index fd43827bb856..7dfbc9921e91 100644
--- a/sound/soc/codecs/ab8500-codec.c
+++ b/sound/soc/codecs/ab8500-codec.c
@@ -126,13 +126,13 @@ struct ab8500_codec_drvdata_dbg {
126/* Private data for AB8500 device-driver */ 126/* Private data for AB8500 device-driver */
127struct ab8500_codec_drvdata { 127struct ab8500_codec_drvdata {
128 struct regmap *regmap; 128 struct regmap *regmap;
129 struct mutex ctrl_lock;
129 130
130 /* Sidetone */ 131 /* Sidetone */
131 long *sid_fir_values; 132 long *sid_fir_values;
132 enum sid_state sid_status; 133 enum sid_state sid_status;
133 134
134 /* ANC */ 135 /* ANC */
135 struct mutex anc_lock;
136 long *anc_fir_values; 136 long *anc_fir_values;
137 long *anc_iir_values; 137 long *anc_iir_values;
138 enum anc_state anc_status; 138 enum anc_state anc_status;
@@ -1129,9 +1129,9 @@ static int sid_status_control_get(struct snd_kcontrol *kcontrol,
1129 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 1129 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
1130 struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(codec->dev); 1130 struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(codec->dev);
1131 1131
1132 mutex_lock(&codec->mutex); 1132 mutex_lock(&drvdata->ctrl_lock);
1133 ucontrol->value.integer.value[0] = drvdata->sid_status; 1133 ucontrol->value.integer.value[0] = drvdata->sid_status;
1134 mutex_unlock(&codec->mutex); 1134 mutex_unlock(&drvdata->ctrl_lock);
1135 1135
1136 return 0; 1136 return 0;
1137} 1137}
@@ -1154,7 +1154,7 @@ static int sid_status_control_put(struct snd_kcontrol *kcontrol,
1154 return -EIO; 1154 return -EIO;
1155 } 1155 }
1156 1156
1157 mutex_lock(&codec->mutex); 1157 mutex_lock(&drvdata->ctrl_lock);
1158 1158
1159 sidconf = snd_soc_read(codec, AB8500_SIDFIRCONF); 1159 sidconf = snd_soc_read(codec, AB8500_SIDFIRCONF);
1160 if (((sidconf & BIT(AB8500_SIDFIRCONF_FIRSIDBUSY)) != 0)) { 1160 if (((sidconf & BIT(AB8500_SIDFIRCONF_FIRSIDBUSY)) != 0)) {
@@ -1185,7 +1185,7 @@ static int sid_status_control_put(struct snd_kcontrol *kcontrol,
1185 drvdata->sid_status = SID_FIR_CONFIGURED; 1185 drvdata->sid_status = SID_FIR_CONFIGURED;
1186 1186
1187out: 1187out:
1188 mutex_unlock(&codec->mutex); 1188 mutex_unlock(&drvdata->ctrl_lock);
1189 1189
1190 dev_dbg(codec->dev, "%s: Exit\n", __func__); 1190 dev_dbg(codec->dev, "%s: Exit\n", __func__);
1191 1191
@@ -1198,9 +1198,9 @@ static int anc_status_control_get(struct snd_kcontrol *kcontrol,
1198 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 1198 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
1199 struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(codec->dev); 1199 struct ab8500_codec_drvdata *drvdata = dev_get_drvdata(codec->dev);
1200 1200
1201 mutex_lock(&codec->mutex); 1201 mutex_lock(&drvdata->ctrl_lock);
1202 ucontrol->value.integer.value[0] = drvdata->anc_status; 1202 ucontrol->value.integer.value[0] = drvdata->anc_status;
1203 mutex_unlock(&codec->mutex); 1203 mutex_unlock(&drvdata->ctrl_lock);
1204 1204
1205 return 0; 1205 return 0;
1206} 1206}
@@ -1217,7 +1217,7 @@ static int anc_status_control_put(struct snd_kcontrol *kcontrol,
1217 1217
1218 dev_dbg(dev, "%s: Enter.\n", __func__); 1218 dev_dbg(dev, "%s: Enter.\n", __func__);
1219 1219
1220 mutex_lock(&drvdata->anc_lock); 1220 mutex_lock(&drvdata->ctrl_lock);
1221 1221
1222 req = ucontrol->value.integer.value[0]; 1222 req = ucontrol->value.integer.value[0];
1223 if (req >= ARRAY_SIZE(enum_anc_state)) { 1223 if (req >= ARRAY_SIZE(enum_anc_state)) {
@@ -1244,9 +1244,7 @@ static int anc_status_control_put(struct snd_kcontrol *kcontrol,
1244 } 1244 }
1245 snd_soc_dapm_sync(&codec->dapm); 1245 snd_soc_dapm_sync(&codec->dapm);
1246 1246
1247 mutex_lock(&codec->mutex);
1248 anc_configure(codec, apply_fir, apply_iir); 1247 anc_configure(codec, apply_fir, apply_iir);
1249 mutex_unlock(&codec->mutex);
1250 1248
1251 if (apply_fir) { 1249 if (apply_fir) {
1252 if (drvdata->anc_status == ANC_IIR_CONFIGURED) 1250 if (drvdata->anc_status == ANC_IIR_CONFIGURED)
@@ -1265,7 +1263,7 @@ static int anc_status_control_put(struct snd_kcontrol *kcontrol,
1265 snd_soc_dapm_sync(&codec->dapm); 1263 snd_soc_dapm_sync(&codec->dapm);
1266 1264
1267cleanup: 1265cleanup:
1268 mutex_unlock(&drvdata->anc_lock); 1266 mutex_unlock(&drvdata->ctrl_lock);
1269 1267
1270 if (status < 0) 1268 if (status < 0)
1271 dev_err(dev, "%s: Unable to configure ANC! (status = %d)\n", 1269 dev_err(dev, "%s: Unable to configure ANC! (status = %d)\n",
@@ -1294,14 +1292,15 @@ static int filter_control_get(struct snd_kcontrol *kcontrol,
1294 struct snd_ctl_elem_value *ucontrol) 1292 struct snd_ctl_elem_value *ucontrol)
1295{ 1293{
1296 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 1294 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
1295 struct ab8500_codec_drvdata *drvdata = snd_soc_codec_get_drvdata(codec);
1297 struct filter_control *fc = 1296 struct filter_control *fc =
1298 (struct filter_control *)kcontrol->private_value; 1297 (struct filter_control *)kcontrol->private_value;
1299 unsigned int i; 1298 unsigned int i;
1300 1299
1301 mutex_lock(&codec->mutex); 1300 mutex_lock(&drvdata->ctrl_lock);
1302 for (i = 0; i < fc->count; i++) 1301 for (i = 0; i < fc->count; i++)
1303 ucontrol->value.integer.value[i] = fc->value[i]; 1302 ucontrol->value.integer.value[i] = fc->value[i];
1304 mutex_unlock(&codec->mutex); 1303 mutex_unlock(&drvdata->ctrl_lock);
1305 1304
1306 return 0; 1305 return 0;
1307} 1306}
@@ -1310,14 +1309,15 @@ static int filter_control_put(struct snd_kcontrol *kcontrol,
1310 struct snd_ctl_elem_value *ucontrol) 1309 struct snd_ctl_elem_value *ucontrol)
1311{ 1310{
1312 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 1311 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
1312 struct ab8500_codec_drvdata *drvdata = snd_soc_codec_get_drvdata(codec);
1313 struct filter_control *fc = 1313 struct filter_control *fc =
1314 (struct filter_control *)kcontrol->private_value; 1314 (struct filter_control *)kcontrol->private_value;
1315 unsigned int i; 1315 unsigned int i;
1316 1316
1317 mutex_lock(&codec->mutex); 1317 mutex_lock(&drvdata->ctrl_lock);
1318 for (i = 0; i < fc->count; i++) 1318 for (i = 0; i < fc->count; i++)
1319 fc->value[i] = ucontrol->value.integer.value[i]; 1319 fc->value[i] = ucontrol->value.integer.value[i];
1320 mutex_unlock(&codec->mutex); 1320 mutex_unlock(&drvdata->ctrl_lock);
1321 1321
1322 return 0; 1322 return 0;
1323} 1323}
@@ -2545,7 +2545,7 @@ static int ab8500_codec_probe(struct snd_soc_codec *codec)
2545 2545
2546 (void)snd_soc_dapm_disable_pin(&codec->dapm, "ANC Configure Input"); 2546 (void)snd_soc_dapm_disable_pin(&codec->dapm, "ANC Configure Input");
2547 2547
2548 mutex_init(&drvdata->anc_lock); 2548 mutex_init(&drvdata->ctrl_lock);
2549 2549
2550 return status; 2550 return status;
2551} 2551}
diff --git a/sound/soc/codecs/ac97.c b/sound/soc/codecs/ac97.c
index bd9b1839c8b0..c6e5a313ebf4 100644
--- a/sound/soc/codecs/ac97.c
+++ b/sound/soc/codecs/ac97.c
@@ -37,10 +37,11 @@ static int ac97_prepare(struct snd_pcm_substream *substream,
37 struct snd_soc_dai *dai) 37 struct snd_soc_dai *dai)
38{ 38{
39 struct snd_soc_codec *codec = dai->codec; 39 struct snd_soc_codec *codec = dai->codec;
40 struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
40 41
41 int reg = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? 42 int reg = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
42 AC97_PCM_FRONT_DAC_RATE : AC97_PCM_LR_ADC_RATE; 43 AC97_PCM_FRONT_DAC_RATE : AC97_PCM_LR_ADC_RATE;
43 return snd_ac97_set_rate(codec->ac97, reg, substream->runtime->rate); 44 return snd_ac97_set_rate(ac97, reg, substream->runtime->rate);
44} 45}
45 46
46#define STD_AC97_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\ 47#define STD_AC97_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\
@@ -53,7 +54,6 @@ static const struct snd_soc_dai_ops ac97_dai_ops = {
53 54
54static struct snd_soc_dai_driver ac97_dai = { 55static struct snd_soc_dai_driver ac97_dai = {
55 .name = "ac97-hifi", 56 .name = "ac97-hifi",
56 .ac97_control = 1,
57 .playback = { 57 .playback = {
58 .stream_name = "AC97 Playback", 58 .stream_name = "AC97 Playback",
59 .channels_min = 1, 59 .channels_min = 1,
@@ -71,6 +71,7 @@ static struct snd_soc_dai_driver ac97_dai = {
71 71
72static int ac97_soc_probe(struct snd_soc_codec *codec) 72static int ac97_soc_probe(struct snd_soc_codec *codec)
73{ 73{
74 struct snd_ac97 *ac97;
74 struct snd_ac97_bus *ac97_bus; 75 struct snd_ac97_bus *ac97_bus;
75 struct snd_ac97_template ac97_template; 76 struct snd_ac97_template ac97_template;
76 int ret; 77 int ret;
@@ -82,24 +83,31 @@ static int ac97_soc_probe(struct snd_soc_codec *codec)
82 return ret; 83 return ret;
83 84
84 memset(&ac97_template, 0, sizeof(struct snd_ac97_template)); 85 memset(&ac97_template, 0, sizeof(struct snd_ac97_template));
85 ret = snd_ac97_mixer(ac97_bus, &ac97_template, &codec->ac97); 86 ret = snd_ac97_mixer(ac97_bus, &ac97_template, &ac97);
86 if (ret < 0) 87 if (ret < 0)
87 return ret; 88 return ret;
88 89
90 snd_soc_codec_set_drvdata(codec, ac97);
91
89 return 0; 92 return 0;
90} 93}
91 94
92#ifdef CONFIG_PM 95#ifdef CONFIG_PM
93static int ac97_soc_suspend(struct snd_soc_codec *codec) 96static int ac97_soc_suspend(struct snd_soc_codec *codec)
94{ 97{
95 snd_ac97_suspend(codec->ac97); 98 struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
99
100 snd_ac97_suspend(ac97);
96 101
97 return 0; 102 return 0;
98} 103}
99 104
100static int ac97_soc_resume(struct snd_soc_codec *codec) 105static int ac97_soc_resume(struct snd_soc_codec *codec)
101{ 106{
102 snd_ac97_resume(codec->ac97); 107
108 struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
109
110 snd_ac97_resume(ac97);
103 111
104 return 0; 112 return 0;
105} 113}
diff --git a/sound/soc/codecs/ad193x.c b/sound/soc/codecs/ad193x.c
index 6844d0b2af68..387530b0b0fd 100644
--- a/sound/soc/codecs/ad193x.c
+++ b/sound/soc/codecs/ad193x.c
@@ -72,11 +72,13 @@ static const struct snd_kcontrol_new ad193x_snd_controls[] = {
72}; 72};
73 73
74static const struct snd_soc_dapm_widget ad193x_dapm_widgets[] = { 74static const struct snd_soc_dapm_widget ad193x_dapm_widgets[] = {
75 SND_SOC_DAPM_DAC("DAC", "Playback", AD193X_DAC_CTRL0, 0, 1), 75 SND_SOC_DAPM_DAC("DAC", "Playback", SND_SOC_NOPM, 0, 0),
76 SND_SOC_DAPM_PGA("DAC Output", AD193X_DAC_CTRL0, 0, 1, NULL, 0),
76 SND_SOC_DAPM_ADC("ADC", "Capture", SND_SOC_NOPM, 0, 0), 77 SND_SOC_DAPM_ADC("ADC", "Capture", SND_SOC_NOPM, 0, 0),
77 SND_SOC_DAPM_SUPPLY("PLL_PWR", AD193X_PLL_CLK_CTRL0, 0, 1, NULL, 0), 78 SND_SOC_DAPM_SUPPLY("PLL_PWR", AD193X_PLL_CLK_CTRL0, 0, 1, NULL, 0),
78 SND_SOC_DAPM_SUPPLY("ADC_PWR", AD193X_ADC_CTRL0, 0, 1, NULL, 0), 79 SND_SOC_DAPM_SUPPLY("ADC_PWR", AD193X_ADC_CTRL0, 0, 1, NULL, 0),
79 SND_SOC_DAPM_SUPPLY("SYSCLK", AD193X_PLL_CLK_CTRL0, 7, 0, NULL, 0), 80 SND_SOC_DAPM_SUPPLY("SYSCLK", AD193X_PLL_CLK_CTRL0, 7, 0, NULL, 0),
81 SND_SOC_DAPM_VMID("VMID"),
80 SND_SOC_DAPM_OUTPUT("DAC1OUT"), 82 SND_SOC_DAPM_OUTPUT("DAC1OUT"),
81 SND_SOC_DAPM_OUTPUT("DAC2OUT"), 83 SND_SOC_DAPM_OUTPUT("DAC2OUT"),
82 SND_SOC_DAPM_OUTPUT("DAC3OUT"), 84 SND_SOC_DAPM_OUTPUT("DAC3OUT"),
@@ -87,13 +89,15 @@ static const struct snd_soc_dapm_widget ad193x_dapm_widgets[] = {
87 89
88static const struct snd_soc_dapm_route audio_paths[] = { 90static const struct snd_soc_dapm_route audio_paths[] = {
89 { "DAC", NULL, "SYSCLK" }, 91 { "DAC", NULL, "SYSCLK" },
92 { "DAC Output", NULL, "DAC" },
93 { "DAC Output", NULL, "VMID" },
90 { "ADC", NULL, "SYSCLK" }, 94 { "ADC", NULL, "SYSCLK" },
91 { "DAC", NULL, "ADC_PWR" }, 95 { "DAC", NULL, "ADC_PWR" },
92 { "ADC", NULL, "ADC_PWR" }, 96 { "ADC", NULL, "ADC_PWR" },
93 { "DAC1OUT", NULL, "DAC" }, 97 { "DAC1OUT", NULL, "DAC Output" },
94 { "DAC2OUT", NULL, "DAC" }, 98 { "DAC2OUT", NULL, "DAC Output" },
95 { "DAC3OUT", NULL, "DAC" }, 99 { "DAC3OUT", NULL, "DAC Output" },
96 { "DAC4OUT", NULL, "DAC" }, 100 { "DAC4OUT", NULL, "DAC Output" },
97 { "ADC", NULL, "ADC1IN" }, 101 { "ADC", NULL, "ADC1IN" },
98 { "ADC", NULL, "ADC2IN" }, 102 { "ADC", NULL, "ADC2IN" },
99 { "SYSCLK", NULL, "PLL_PWR" }, 103 { "SYSCLK", NULL, "PLL_PWR" },
diff --git a/sound/soc/codecs/ad1980.c b/sound/soc/codecs/ad1980.c
index 304d3003339a..2860eef8610c 100644
--- a/sound/soc/codecs/ad1980.c
+++ b/sound/soc/codecs/ad1980.c
@@ -24,34 +24,86 @@
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/kernel.h> 25#include <linux/kernel.h>
26#include <linux/device.h> 26#include <linux/device.h>
27#include <linux/regmap.h>
27#include <sound/core.h> 28#include <sound/core.h>
28#include <sound/pcm.h> 29#include <sound/pcm.h>
29#include <sound/ac97_codec.h> 30#include <sound/ac97_codec.h>
30#include <sound/initval.h> 31#include <sound/initval.h>
31#include <sound/soc.h> 32#include <sound/soc.h>
32 33
33#include "ad1980.h" 34static const struct reg_default ad1980_reg_defaults[] = {
35 { 0x02, 0x8000 },
36 { 0x04, 0x8000 },
37 { 0x06, 0x8000 },
38 { 0x0c, 0x8008 },
39 { 0x0e, 0x8008 },
40 { 0x10, 0x8808 },
41 { 0x12, 0x8808 },
42 { 0x16, 0x8808 },
43 { 0x18, 0x8808 },
44 { 0x1a, 0x0000 },
45 { 0x1c, 0x8000 },
46 { 0x20, 0x0000 },
47 { 0x28, 0x03c7 },
48 { 0x2c, 0xbb80 },
49 { 0x2e, 0xbb80 },
50 { 0x30, 0xbb80 },
51 { 0x32, 0xbb80 },
52 { 0x36, 0x8080 },
53 { 0x38, 0x8080 },
54 { 0x3a, 0x2000 },
55 { 0x60, 0x0000 },
56 { 0x62, 0x0000 },
57 { 0x72, 0x0000 },
58 { 0x74, 0x1001 },
59 { 0x76, 0x0000 },
60};
34 61
35/* 62static bool ad1980_readable_reg(struct device *dev, unsigned int reg)
36 * AD1980 register cache 63{
37 */ 64 switch (reg) {
38static const u16 ad1980_reg[] = { 65 case AC97_RESET ... AC97_MASTER_MONO:
39 0x0090, 0x8000, 0x8000, 0x8000, /* 0 - 6 */ 66 case AC97_PHONE ... AC97_CD:
40 0x0000, 0x0000, 0x8008, 0x8008, /* 8 - e */ 67 case AC97_AUX ... AC97_GENERAL_PURPOSE:
41 0x8808, 0x8808, 0x0000, 0x8808, /* 10 - 16 */ 68 case AC97_POWERDOWN ... AC97_PCM_LR_ADC_RATE:
42 0x8808, 0x0000, 0x8000, 0x0000, /* 18 - 1e */ 69 case AC97_SPDIF:
43 0x0000, 0x0000, 0x0000, 0x0000, /* 20 - 26 */ 70 case AC97_CODEC_CLASS_REV:
44 0x03c7, 0x0000, 0xbb80, 0xbb80, /* 28 - 2e */ 71 case AC97_PCI_SVID:
45 0xbb80, 0xbb80, 0x0000, 0x8080, /* 30 - 36 */ 72 case AC97_AD_CODEC_CFG:
46 0x8080, 0x2000, 0x0000, 0x0000, /* 38 - 3e */ 73 case AC97_AD_JACK_SPDIF:
47 0x0000, 0x0000, 0x0000, 0x0000, /* reserved */ 74 case AC97_AD_SERIAL_CFG:
48 0x0000, 0x0000, 0x0000, 0x0000, /* reserved */ 75 case AC97_VENDOR_ID1:
49 0x0000, 0x0000, 0x0000, 0x0000, /* reserved */ 76 case AC97_VENDOR_ID2:
50 0x0000, 0x0000, 0x0000, 0x0000, /* reserved */ 77 return true;
51 0x8080, 0x0000, 0x0000, 0x0000, /* 60 - 66 */ 78 default:
52 0x0000, 0x0000, 0x0000, 0x0000, /* reserved */ 79 return false;
53 0x0000, 0x0000, 0x1001, 0x0000, /* 70 - 76 */ 80 }
54 0x0000, 0x0000, 0x4144, 0x5370 /* 78 - 7e */ 81}
82
83static bool ad1980_writeable_reg(struct device *dev, unsigned int reg)
84{
85 switch (reg) {
86 case AC97_VENDOR_ID1:
87 case AC97_VENDOR_ID2:
88 return false;
89 default:
90 return ad1980_readable_reg(dev, reg);
91 }
92}
93
94static const struct regmap_config ad1980_regmap_config = {
95 .reg_bits = 16,
96 .reg_stride = 2,
97 .val_bits = 16,
98 .max_register = 0x7e,
99 .cache_type = REGCACHE_RBTREE,
100
101 .volatile_reg = regmap_ac97_default_volatile,
102 .readable_reg = ad1980_readable_reg,
103 .writeable_reg = ad1980_writeable_reg,
104
105 .reg_defaults = ad1980_reg_defaults,
106 .num_reg_defaults = ARRAY_SIZE(ad1980_reg_defaults),
55}; 107};
56 108
57static const char *ad1980_rec_sel[] = {"Mic", "CD", "NC", "AUX", "Line", 109static const char *ad1980_rec_sel[] = {"Mic", "CD", "NC", "AUX", "Line",
@@ -134,45 +186,8 @@ static const struct snd_soc_dapm_route ad1980_dapm_routes[] = {
134 { "HP_OUT_R", NULL, "Playback" }, 186 { "HP_OUT_R", NULL, "Playback" },
135}; 187};
136 188
137static unsigned int ac97_read(struct snd_soc_codec *codec,
138 unsigned int reg)
139{
140 u16 *cache = codec->reg_cache;
141
142 switch (reg) {
143 case AC97_RESET:
144 case AC97_INT_PAGING:
145 case AC97_POWERDOWN:
146 case AC97_EXTENDED_STATUS:
147 case AC97_VENDOR_ID1:
148 case AC97_VENDOR_ID2:
149 return soc_ac97_ops->read(codec->ac97, reg);
150 default:
151 reg = reg >> 1;
152
153 if (reg >= ARRAY_SIZE(ad1980_reg))
154 return -EINVAL;
155
156 return cache[reg];
157 }
158}
159
160static int ac97_write(struct snd_soc_codec *codec, unsigned int reg,
161 unsigned int val)
162{
163 u16 *cache = codec->reg_cache;
164
165 soc_ac97_ops->write(codec->ac97, reg, val);
166 reg = reg >> 1;
167 if (reg < ARRAY_SIZE(ad1980_reg))
168 cache[reg] = val;
169
170 return 0;
171}
172
173static struct snd_soc_dai_driver ad1980_dai = { 189static struct snd_soc_dai_driver ad1980_dai = {
174 .name = "ad1980-hifi", 190 .name = "ad1980-hifi",
175 .ac97_control = 1,
176 .playback = { 191 .playback = {
177 .stream_name = "Playback", 192 .stream_name = "Playback",
178 .channels_min = 2, 193 .channels_min = 2,
@@ -189,108 +204,115 @@ static struct snd_soc_dai_driver ad1980_dai = {
189 204
190static int ad1980_reset(struct snd_soc_codec *codec, int try_warm) 205static int ad1980_reset(struct snd_soc_codec *codec, int try_warm)
191{ 206{
207 struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
192 unsigned int retry_cnt = 0; 208 unsigned int retry_cnt = 0;
193 209
194 do { 210 do {
195 if (try_warm && soc_ac97_ops->warm_reset) { 211 if (try_warm && soc_ac97_ops->warm_reset) {
196 soc_ac97_ops->warm_reset(codec->ac97); 212 soc_ac97_ops->warm_reset(ac97);
197 if (ac97_read(codec, AC97_RESET) == 0x0090) 213 if (snd_soc_read(codec, AC97_RESET) == 0x0090)
198 return 1; 214 return 1;
199 } 215 }
200 216
201 soc_ac97_ops->reset(codec->ac97); 217 soc_ac97_ops->reset(ac97);
202 /* 218 /*
203 * Set bit 16slot in register 74h, then every slot will has only 219 * Set bit 16slot in register 74h, then every slot will has only
204 * 16 bits. This command is sent out in 20bit mode, in which 220 * 16 bits. This command is sent out in 20bit mode, in which
205 * case the first nibble of data is eaten by the addr. (Tag is 221 * case the first nibble of data is eaten by the addr. (Tag is
206 * always 16 bit) 222 * always 16 bit)
207 */ 223 */
208 ac97_write(codec, AC97_AD_SERIAL_CFG, 0x9900); 224 snd_soc_write(codec, AC97_AD_SERIAL_CFG, 0x9900);
209 225
210 if (ac97_read(codec, AC97_RESET) == 0x0090) 226 if (snd_soc_read(codec, AC97_RESET) == 0x0090)
211 return 0; 227 return 0;
212 } while (retry_cnt++ < 10); 228 } while (retry_cnt++ < 10);
213 229
214 printk(KERN_ERR "AD1980 AC97 reset failed\n"); 230 dev_err(codec->dev, "Failed to reset: AC97 link error\n");
231
215 return -EIO; 232 return -EIO;
216} 233}
217 234
218static int ad1980_soc_probe(struct snd_soc_codec *codec) 235static int ad1980_soc_probe(struct snd_soc_codec *codec)
219{ 236{
237 struct snd_ac97 *ac97;
238 struct regmap *regmap;
220 int ret; 239 int ret;
221 u16 vendor_id2; 240 u16 vendor_id2;
222 u16 ext_status; 241 u16 ext_status;
223 242
224 printk(KERN_INFO "AD1980 SoC Audio Codec\n"); 243 ac97 = snd_soc_new_ac97_codec(codec);
225 244 if (IS_ERR(ac97)) {
226 ret = snd_soc_new_ac97_codec(codec, soc_ac97_ops, 0); 245 ret = PTR_ERR(ac97);
227 if (ret < 0) { 246 dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret);
228 printk(KERN_ERR "ad1980: failed to register AC97 codec\n");
229 return ret; 247 return ret;
230 } 248 }
231 249
250 regmap = regmap_init_ac97(ac97, &ad1980_regmap_config);
251 if (IS_ERR(regmap)) {
252 ret = PTR_ERR(regmap);
253 goto err_free_ac97;
254 }
255
256 snd_soc_codec_init_regmap(codec, regmap);
257 snd_soc_codec_set_drvdata(codec, ac97);
258
232 ret = ad1980_reset(codec, 0); 259 ret = ad1980_reset(codec, 0);
233 if (ret < 0) { 260 if (ret < 0)
234 printk(KERN_ERR "Failed to reset AD1980: AC97 link error\n");
235 goto reset_err; 261 goto reset_err;
236 }
237 262
238 /* Read out vendor ID to make sure it is ad1980 */ 263 /* Read out vendor ID to make sure it is ad1980 */
239 if (ac97_read(codec, AC97_VENDOR_ID1) != 0x4144) { 264 if (snd_soc_read(codec, AC97_VENDOR_ID1) != 0x4144) {
240 ret = -ENODEV; 265 ret = -ENODEV;
241 goto reset_err; 266 goto reset_err;
242 } 267 }
243 268
244 vendor_id2 = ac97_read(codec, AC97_VENDOR_ID2); 269 vendor_id2 = snd_soc_read(codec, AC97_VENDOR_ID2);
245 270
246 if (vendor_id2 != 0x5370) { 271 if (vendor_id2 != 0x5370) {
247 if (vendor_id2 != 0x5374) { 272 if (vendor_id2 != 0x5374) {
248 ret = -ENODEV; 273 ret = -ENODEV;
249 goto reset_err; 274 goto reset_err;
250 } else { 275 } else {
251 printk(KERN_WARNING "ad1980: " 276 dev_warn(codec->dev,
252 "Found AD1981 - only 2/2 IN/OUT Channels " 277 "Found AD1981 - only 2/2 IN/OUT Channels supported\n");
253 "supported\n");
254 } 278 }
255 } 279 }
256 280
257 /* unmute captures and playbacks volume */ 281 /* unmute captures and playbacks volume */
258 ac97_write(codec, AC97_MASTER, 0x0000); 282 snd_soc_write(codec, AC97_MASTER, 0x0000);
259 ac97_write(codec, AC97_PCM, 0x0000); 283 snd_soc_write(codec, AC97_PCM, 0x0000);
260 ac97_write(codec, AC97_REC_GAIN, 0x0000); 284 snd_soc_write(codec, AC97_REC_GAIN, 0x0000);
261 ac97_write(codec, AC97_CENTER_LFE_MASTER, 0x0000); 285 snd_soc_write(codec, AC97_CENTER_LFE_MASTER, 0x0000);
262 ac97_write(codec, AC97_SURROUND_MASTER, 0x0000); 286 snd_soc_write(codec, AC97_SURROUND_MASTER, 0x0000);
263 287
264 /*power on LFE/CENTER/Surround DACs*/ 288 /*power on LFE/CENTER/Surround DACs*/
265 ext_status = ac97_read(codec, AC97_EXTENDED_STATUS); 289 ext_status = snd_soc_read(codec, AC97_EXTENDED_STATUS);
266 ac97_write(codec, AC97_EXTENDED_STATUS, ext_status&~0x3800); 290 snd_soc_write(codec, AC97_EXTENDED_STATUS, ext_status&~0x3800);
267
268 snd_soc_add_codec_controls(codec, ad1980_snd_ac97_controls,
269 ARRAY_SIZE(ad1980_snd_ac97_controls));
270 291
271 return 0; 292 return 0;
272 293
273reset_err: 294reset_err:
274 snd_soc_free_ac97_codec(codec); 295 snd_soc_codec_exit_regmap(codec);
296err_free_ac97:
297 snd_soc_free_ac97_codec(ac97);
275 return ret; 298 return ret;
276} 299}
277 300
278static int ad1980_soc_remove(struct snd_soc_codec *codec) 301static int ad1980_soc_remove(struct snd_soc_codec *codec)
279{ 302{
280 snd_soc_free_ac97_codec(codec); 303 struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
304
305 snd_soc_codec_exit_regmap(codec);
306 snd_soc_free_ac97_codec(ac97);
281 return 0; 307 return 0;
282} 308}
283 309
284static struct snd_soc_codec_driver soc_codec_dev_ad1980 = { 310static struct snd_soc_codec_driver soc_codec_dev_ad1980 = {
285 .probe = ad1980_soc_probe, 311 .probe = ad1980_soc_probe,
286 .remove = ad1980_soc_remove, 312 .remove = ad1980_soc_remove,
287 .reg_cache_size = ARRAY_SIZE(ad1980_reg),
288 .reg_word_size = sizeof(u16),
289 .reg_cache_default = ad1980_reg,
290 .reg_cache_step = 2,
291 .write = ac97_write,
292 .read = ac97_read,
293 313
314 .controls = ad1980_snd_ac97_controls,
315 .num_controls = ARRAY_SIZE(ad1980_snd_ac97_controls),
294 .dapm_widgets = ad1980_dapm_widgets, 316 .dapm_widgets = ad1980_dapm_widgets,
295 .num_dapm_widgets = ARRAY_SIZE(ad1980_dapm_widgets), 317 .num_dapm_widgets = ARRAY_SIZE(ad1980_dapm_widgets),
296 .dapm_routes = ad1980_dapm_routes, 318 .dapm_routes = ad1980_dapm_routes,
diff --git a/sound/soc/codecs/ad1980.h b/sound/soc/codecs/ad1980.h
deleted file mode 100644
index eb0af44ad3df..000000000000
--- a/sound/soc/codecs/ad1980.h
+++ /dev/null
@@ -1,26 +0,0 @@
1/*
2 * ad1980.h -- ad1980 Soc Audio driver
3 *
4 * WARNING:
5 *
6 * Because Analog Devices Inc. discontinued the ad1980 sound chip since
7 * Sep. 2009, this ad1980 driver is not maintained, tested and supported
8 * by ADI now.
9 */
10
11#ifndef _AD1980_H
12#define _AD1980_H
13/* Bit definition of Power-Down Control/Status Register */
14#define ADC 0x0001
15#define DAC 0x0002
16#define ANL 0x0004
17#define REF 0x0008
18#define PR0 0x0100
19#define PR1 0x0200
20#define PR2 0x0400
21#define PR3 0x0800
22#define PR4 0x1000
23#define PR5 0x2000
24#define PR6 0x4000
25
26#endif
diff --git a/sound/soc/codecs/adau1373.c b/sound/soc/codecs/adau1373.c
index 7c784ad3e8b2..783dcb57043a 100644
--- a/sound/soc/codecs/adau1373.c
+++ b/sound/soc/codecs/adau1373.c
@@ -551,7 +551,7 @@ static const struct snd_kcontrol_new adau1373_drc_controls[] = {
551static int adau1373_pll_event(struct snd_soc_dapm_widget *w, 551static int adau1373_pll_event(struct snd_soc_dapm_widget *w,
552 struct snd_kcontrol *kcontrol, int event) 552 struct snd_kcontrol *kcontrol, int event)
553{ 553{
554 struct snd_soc_codec *codec = w->codec; 554 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
555 struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec); 555 struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec);
556 unsigned int pll_id = w->name[3] - '1'; 556 unsigned int pll_id = w->name[3] - '1';
557 unsigned int val; 557 unsigned int val;
@@ -823,7 +823,7 @@ static const struct snd_soc_dapm_widget adau1373_dapm_widgets[] = {
823static int adau1373_check_aif_clk(struct snd_soc_dapm_widget *source, 823static int adau1373_check_aif_clk(struct snd_soc_dapm_widget *source,
824 struct snd_soc_dapm_widget *sink) 824 struct snd_soc_dapm_widget *sink)
825{ 825{
826 struct snd_soc_codec *codec = source->codec; 826 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
827 struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec); 827 struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec);
828 unsigned int dai; 828 unsigned int dai;
829 const char *clk; 829 const char *clk;
@@ -844,7 +844,7 @@ static int adau1373_check_aif_clk(struct snd_soc_dapm_widget *source,
844static int adau1373_check_src(struct snd_soc_dapm_widget *source, 844static int adau1373_check_src(struct snd_soc_dapm_widget *source,
845 struct snd_soc_dapm_widget *sink) 845 struct snd_soc_dapm_widget *sink)
846{ 846{
847 struct snd_soc_codec *codec = source->codec; 847 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
848 struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec); 848 struct adau1373 *adau1373 = snd_soc_codec_get_drvdata(codec);
849 unsigned int dai; 849 unsigned int dai;
850 850
diff --git a/sound/soc/codecs/adau1761.c b/sound/soc/codecs/adau1761.c
index 5518ebd6947c..16093dc89441 100644
--- a/sound/soc/codecs/adau1761.c
+++ b/sound/soc/codecs/adau1761.c
@@ -255,7 +255,8 @@ static const struct snd_kcontrol_new adau1761_input_mux_control =
255static int adau1761_dejitter_fixup(struct snd_soc_dapm_widget *w, 255static int adau1761_dejitter_fixup(struct snd_soc_dapm_widget *w,
256 struct snd_kcontrol *kcontrol, int event) 256 struct snd_kcontrol *kcontrol, int event)
257{ 257{
258 struct adau *adau = snd_soc_codec_get_drvdata(w->codec); 258 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
259 struct adau *adau = snd_soc_codec_get_drvdata(codec);
259 260
260 /* After any power changes have been made the dejitter circuit 261 /* After any power changes have been made the dejitter circuit
261 * has to be reinitialized. */ 262 * has to be reinitialized. */
@@ -405,6 +406,7 @@ static const struct snd_soc_dapm_widget adau1761_dapm_widgets[] = {
405 2, 0, NULL, 0), 406 2, 0, NULL, 0),
406 407
407 SND_SOC_DAPM_SUPPLY("Slew Clock", ADAU1761_CLK_ENABLE0, 6, 0, NULL, 0), 408 SND_SOC_DAPM_SUPPLY("Slew Clock", ADAU1761_CLK_ENABLE0, 6, 0, NULL, 0),
409 SND_SOC_DAPM_SUPPLY("ALC Clock", ADAU1761_CLK_ENABLE0, 5, 0, NULL, 0),
408 410
409 SND_SOC_DAPM_SUPPLY_S("Digital Clock 0", 1, ADAU1761_CLK_ENABLE1, 411 SND_SOC_DAPM_SUPPLY_S("Digital Clock 0", 1, ADAU1761_CLK_ENABLE1,
410 0, 0, NULL, 0), 412 0, 0, NULL, 0),
@@ -436,6 +438,9 @@ static const struct snd_soc_dapm_route adau1761_dapm_routes[] = {
436 { "Right Playback Mixer", NULL, "Slew Clock" }, 438 { "Right Playback Mixer", NULL, "Slew Clock" },
437 { "Left Playback Mixer", NULL, "Slew Clock" }, 439 { "Left Playback Mixer", NULL, "Slew Clock" },
438 440
441 { "Left Input Mixer", NULL, "ALC Clock" },
442 { "Right Input Mixer", NULL, "ALC Clock" },
443
439 { "Digital Clock 0", NULL, "SYSCLK" }, 444 { "Digital Clock 0", NULL, "SYSCLK" },
440 { "Digital Clock 1", NULL, "SYSCLK" }, 445 { "Digital Clock 1", NULL, "SYSCLK" },
441}; 446};
diff --git a/sound/soc/codecs/adau1781.c b/sound/soc/codecs/adau1781.c
index e9fc00fb13dd..aa6a37cc44b7 100644
--- a/sound/soc/codecs/adau1781.c
+++ b/sound/soc/codecs/adau1781.c
@@ -174,7 +174,7 @@ static const struct snd_kcontrol_new adau1781_mono_mixer_controls[] = {
174static int adau1781_dejitter_fixup(struct snd_soc_dapm_widget *w, 174static int adau1781_dejitter_fixup(struct snd_soc_dapm_widget *w,
175 struct snd_kcontrol *kcontrol, int event) 175 struct snd_kcontrol *kcontrol, int event)
176{ 176{
177 struct snd_soc_codec *codec = w->codec; 177 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
178 struct adau *adau = snd_soc_codec_get_drvdata(codec); 178 struct adau *adau = snd_soc_codec_get_drvdata(codec);
179 179
180 /* After any power changes have been made the dejitter circuit 180 /* After any power changes have been made the dejitter circuit
diff --git a/sound/soc/codecs/adau17x1.c b/sound/soc/codecs/adau17x1.c
index 3e16c1c64115..427ad77bfe56 100644
--- a/sound/soc/codecs/adau17x1.c
+++ b/sound/soc/codecs/adau17x1.c
@@ -61,7 +61,8 @@ static const struct snd_kcontrol_new adau17x1_controls[] = {
61static int adau17x1_pll_event(struct snd_soc_dapm_widget *w, 61static int adau17x1_pll_event(struct snd_soc_dapm_widget *w,
62 struct snd_kcontrol *kcontrol, int event) 62 struct snd_kcontrol *kcontrol, int event)
63{ 63{
64 struct adau *adau = snd_soc_codec_get_drvdata(w->codec); 64 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
65 struct adau *adau = snd_soc_codec_get_drvdata(codec);
65 int ret; 66 int ret;
66 67
67 if (SND_SOC_DAPM_EVENT_ON(event)) { 68 if (SND_SOC_DAPM_EVENT_ON(event)) {
diff --git a/sound/soc/codecs/adav80x.c b/sound/soc/codecs/adav80x.c
index ce3cdca9fc62..b67480f1b1aa 100644
--- a/sound/soc/codecs/adav80x.c
+++ b/sound/soc/codecs/adav80x.c
@@ -212,7 +212,7 @@ static const struct snd_soc_dapm_widget adav80x_dapm_widgets[] = {
212static int adav80x_dapm_sysclk_check(struct snd_soc_dapm_widget *source, 212static int adav80x_dapm_sysclk_check(struct snd_soc_dapm_widget *source,
213 struct snd_soc_dapm_widget *sink) 213 struct snd_soc_dapm_widget *sink)
214{ 214{
215 struct snd_soc_codec *codec = source->codec; 215 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
216 struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec); 216 struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
217 const char *clk; 217 const char *clk;
218 218
@@ -236,7 +236,7 @@ static int adav80x_dapm_sysclk_check(struct snd_soc_dapm_widget *source,
236static int adav80x_dapm_pll_check(struct snd_soc_dapm_widget *source, 236static int adav80x_dapm_pll_check(struct snd_soc_dapm_widget *source,
237 struct snd_soc_dapm_widget *sink) 237 struct snd_soc_dapm_widget *sink)
238{ 238{
239 struct snd_soc_codec *codec = source->codec; 239 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(source->dapm);
240 struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec); 240 struct adav80x *adav80x = snd_soc_codec_get_drvdata(codec);
241 241
242 return adav80x->pll_src == ADAV80X_PLL_SRC_XTAL; 242 return adav80x->pll_src == ADAV80X_PLL_SRC_XTAL;
diff --git a/sound/soc/codecs/ak4535.c b/sound/soc/codecs/ak4535.c
index 30e297890fec..9130d916f2f4 100644
--- a/sound/soc/codecs/ak4535.c
+++ b/sound/soc/codecs/ak4535.c
@@ -373,33 +373,9 @@ static struct snd_soc_dai_driver ak4535_dai = {
373 .ops = &ak4535_dai_ops, 373 .ops = &ak4535_dai_ops,
374}; 374};
375 375
376static int ak4535_suspend(struct snd_soc_codec *codec)
377{
378 ak4535_set_bias_level(codec, SND_SOC_BIAS_OFF);
379 return 0;
380}
381
382static int ak4535_resume(struct snd_soc_codec *codec) 376static int ak4535_resume(struct snd_soc_codec *codec)
383{ 377{
384 snd_soc_cache_sync(codec); 378 snd_soc_cache_sync(codec);
385 ak4535_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
386 return 0;
387}
388
389static int ak4535_probe(struct snd_soc_codec *codec)
390{
391 /* power on device */
392 ak4535_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
393
394 snd_soc_add_codec_controls(codec, ak4535_snd_controls,
395 ARRAY_SIZE(ak4535_snd_controls));
396 return 0;
397}
398
399/* power down chip */
400static int ak4535_remove(struct snd_soc_codec *codec)
401{
402 ak4535_set_bias_level(codec, SND_SOC_BIAS_OFF);
403 return 0; 379 return 0;
404} 380}
405 381
@@ -416,11 +392,12 @@ static const struct regmap_config ak4535_regmap = {
416}; 392};
417 393
418static struct snd_soc_codec_driver soc_codec_dev_ak4535 = { 394static struct snd_soc_codec_driver soc_codec_dev_ak4535 = {
419 .probe = ak4535_probe,
420 .remove = ak4535_remove,
421 .suspend = ak4535_suspend,
422 .resume = ak4535_resume, 395 .resume = ak4535_resume,
423 .set_bias_level = ak4535_set_bias_level, 396 .set_bias_level = ak4535_set_bias_level,
397 .suspend_bias_off = true,
398
399 .controls = ak4535_snd_controls,
400 .num_controls = ARRAY_SIZE(ak4535_snd_controls),
424 .dapm_widgets = ak4535_dapm_widgets, 401 .dapm_widgets = ak4535_dapm_widgets,
425 .num_dapm_widgets = ARRAY_SIZE(ak4535_dapm_widgets), 402 .num_dapm_widgets = ARRAY_SIZE(ak4535_dapm_widgets),
426 .dapm_routes = ak4535_audio_map, 403 .dapm_routes = ak4535_audio_map,
diff --git a/sound/soc/codecs/ak4641.c b/sound/soc/codecs/ak4641.c
index 7afe8f482088..70861c7b1631 100644
--- a/sound/soc/codecs/ak4641.c
+++ b/sound/soc/codecs/ak4641.c
@@ -505,39 +505,7 @@ static struct snd_soc_dai_driver ak4641_dai[] = {
505}, 505},
506}; 506};
507 507
508static int ak4641_suspend(struct snd_soc_codec *codec)
509{
510 ak4641_set_bias_level(codec, SND_SOC_BIAS_OFF);
511 return 0;
512}
513
514static int ak4641_resume(struct snd_soc_codec *codec)
515{
516 ak4641_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
517 return 0;
518}
519
520static int ak4641_probe(struct snd_soc_codec *codec)
521{
522 /* power on device */
523 ak4641_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
524
525 return 0;
526}
527
528static int ak4641_remove(struct snd_soc_codec *codec)
529{
530 ak4641_set_bias_level(codec, SND_SOC_BIAS_OFF);
531
532 return 0;
533}
534
535
536static struct snd_soc_codec_driver soc_codec_dev_ak4641 = { 508static struct snd_soc_codec_driver soc_codec_dev_ak4641 = {
537 .probe = ak4641_probe,
538 .remove = ak4641_remove,
539 .suspend = ak4641_suspend,
540 .resume = ak4641_resume,
541 .controls = ak4641_snd_controls, 509 .controls = ak4641_snd_controls,
542 .num_controls = ARRAY_SIZE(ak4641_snd_controls), 510 .num_controls = ARRAY_SIZE(ak4641_snd_controls),
543 .dapm_widgets = ak4641_dapm_widgets, 511 .dapm_widgets = ak4641_dapm_widgets,
@@ -545,6 +513,7 @@ static struct snd_soc_codec_driver soc_codec_dev_ak4641 = {
545 .dapm_routes = ak4641_audio_map, 513 .dapm_routes = ak4641_audio_map,
546 .num_dapm_routes = ARRAY_SIZE(ak4641_audio_map), 514 .num_dapm_routes = ARRAY_SIZE(ak4641_audio_map),
547 .set_bias_level = ak4641_set_bias_level, 515 .set_bias_level = ak4641_set_bias_level,
516 .suspend_bias_off = true,
548}; 517};
549 518
550static const struct regmap_config ak4641_regmap = { 519static const struct regmap_config ak4641_regmap = {
diff --git a/sound/soc/codecs/ak4642.c b/sound/soc/codecs/ak4642.c
index 041712592e29..dde8b49c19ad 100644
--- a/sound/soc/codecs/ak4642.c
+++ b/sound/soc/codecs/ak4642.c
@@ -491,23 +491,7 @@ static int ak4642_resume(struct snd_soc_codec *codec)
491 return 0; 491 return 0;
492} 492}
493 493
494
495static int ak4642_probe(struct snd_soc_codec *codec)
496{
497 ak4642_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
498
499 return 0;
500}
501
502static int ak4642_remove(struct snd_soc_codec *codec)
503{
504 ak4642_set_bias_level(codec, SND_SOC_BIAS_OFF);
505 return 0;
506}
507
508static struct snd_soc_codec_driver soc_codec_dev_ak4642 = { 494static struct snd_soc_codec_driver soc_codec_dev_ak4642 = {
509 .probe = ak4642_probe,
510 .remove = ak4642_remove,
511 .resume = ak4642_resume, 495 .resume = ak4642_resume,
512 .set_bias_level = ak4642_set_bias_level, 496 .set_bias_level = ak4642_set_bias_level,
513 .controls = ak4642_snd_controls, 497 .controls = ak4642_snd_controls,
diff --git a/sound/soc/codecs/ak4671.c b/sound/soc/codecs/ak4671.c
index 998fa0c5a0b9..686cacb0e835 100644
--- a/sound/soc/codecs/ak4671.c
+++ b/sound/soc/codecs/ak4671.c
@@ -611,20 +611,7 @@ static struct snd_soc_dai_driver ak4671_dai = {
611 .ops = &ak4671_dai_ops, 611 .ops = &ak4671_dai_ops,
612}; 612};
613 613
614static int ak4671_probe(struct snd_soc_codec *codec)
615{
616 return ak4671_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
617}
618
619static int ak4671_remove(struct snd_soc_codec *codec)
620{
621 ak4671_set_bias_level(codec, SND_SOC_BIAS_OFF);
622 return 0;
623}
624
625static struct snd_soc_codec_driver soc_codec_dev_ak4671 = { 614static struct snd_soc_codec_driver soc_codec_dev_ak4671 = {
626 .probe = ak4671_probe,
627 .remove = ak4671_remove,
628 .set_bias_level = ak4671_set_bias_level, 615 .set_bias_level = ak4671_set_bias_level,
629 .controls = ak4671_snd_controls, 616 .controls = ak4671_snd_controls,
630 .num_controls = ARRAY_SIZE(ak4671_snd_controls), 617 .num_controls = ARRAY_SIZE(ak4671_snd_controls),
diff --git a/sound/soc/codecs/alc5623.c b/sound/soc/codecs/alc5623.c
index 9d0755aa1d16..bdf8c5ac8ca4 100644
--- a/sound/soc/codecs/alc5623.c
+++ b/sound/soc/codecs/alc5623.c
@@ -866,7 +866,6 @@ static int alc5623_suspend(struct snd_soc_codec *codec)
866{ 866{
867 struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec); 867 struct alc5623_priv *alc5623 = snd_soc_codec_get_drvdata(codec);
868 868
869 alc5623_set_bias_level(codec, SND_SOC_BIAS_OFF);
870 regcache_cache_only(alc5623->regmap, true); 869 regcache_cache_only(alc5623->regmap, true);
871 870
872 return 0; 871 return 0;
@@ -887,15 +886,6 @@ static int alc5623_resume(struct snd_soc_codec *codec)
887 return ret; 886 return ret;
888 } 887 }
889 888
890 alc5623_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
891
892 /* charge alc5623 caps */
893 if (codec->dapm.suspend_bias_level == SND_SOC_BIAS_ON) {
894 alc5623_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
895 codec->dapm.bias_level = SND_SOC_BIAS_ON;
896 alc5623_set_bias_level(codec, codec->dapm.bias_level);
897 }
898
899 return 0; 889 return 0;
900} 890}
901 891
@@ -906,9 +896,6 @@ static int alc5623_probe(struct snd_soc_codec *codec)
906 896
907 alc5623_reset(codec); 897 alc5623_reset(codec);
908 898
909 /* power on device */
910 alc5623_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
911
912 if (alc5623->add_ctrl) { 899 if (alc5623->add_ctrl) {
913 snd_soc_write(codec, ALC5623_ADD_CTRL_REG, 900 snd_soc_write(codec, ALC5623_ADD_CTRL_REG,
914 alc5623->add_ctrl); 901 alc5623->add_ctrl);
@@ -964,19 +951,12 @@ static int alc5623_probe(struct snd_soc_codec *codec)
964 return 0; 951 return 0;
965} 952}
966 953
967/* power down chip */
968static int alc5623_remove(struct snd_soc_codec *codec)
969{
970 alc5623_set_bias_level(codec, SND_SOC_BIAS_OFF);
971 return 0;
972}
973
974static struct snd_soc_codec_driver soc_codec_device_alc5623 = { 954static struct snd_soc_codec_driver soc_codec_device_alc5623 = {
975 .probe = alc5623_probe, 955 .probe = alc5623_probe,
976 .remove = alc5623_remove,
977 .suspend = alc5623_suspend, 956 .suspend = alc5623_suspend,
978 .resume = alc5623_resume, 957 .resume = alc5623_resume,
979 .set_bias_level = alc5623_set_bias_level, 958 .set_bias_level = alc5623_set_bias_level,
959 .suspend_bias_off = true,
980}; 960};
981 961
982static const struct regmap_config alc5623_regmap = { 962static const struct regmap_config alc5623_regmap = {
diff --git a/sound/soc/codecs/alc5632.c b/sound/soc/codecs/alc5632.c
index 85942ca36cbf..d1fdbc266631 100644
--- a/sound/soc/codecs/alc5632.c
+++ b/sound/soc/codecs/alc5632.c
@@ -1038,23 +1038,15 @@ static struct snd_soc_dai_driver alc5632_dai = {
1038}; 1038};
1039 1039
1040#ifdef CONFIG_PM 1040#ifdef CONFIG_PM
1041static int alc5632_suspend(struct snd_soc_codec *codec)
1042{
1043 alc5632_set_bias_level(codec, SND_SOC_BIAS_OFF);
1044 return 0;
1045}
1046
1047static int alc5632_resume(struct snd_soc_codec *codec) 1041static int alc5632_resume(struct snd_soc_codec *codec)
1048{ 1042{
1049 struct alc5632_priv *alc5632 = snd_soc_codec_get_drvdata(codec); 1043 struct alc5632_priv *alc5632 = snd_soc_codec_get_drvdata(codec);
1050 1044
1051 regcache_sync(alc5632->regmap); 1045 regcache_sync(alc5632->regmap);
1052 1046
1053 alc5632_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
1054 return 0; 1047 return 0;
1055} 1048}
1056#else 1049#else
1057#define alc5632_suspend NULL
1058#define alc5632_resume NULL 1050#define alc5632_resume NULL
1059#endif 1051#endif
1060 1052
@@ -1062,9 +1054,6 @@ static int alc5632_probe(struct snd_soc_codec *codec)
1062{ 1054{
1063 struct alc5632_priv *alc5632 = snd_soc_codec_get_drvdata(codec); 1055 struct alc5632_priv *alc5632 = snd_soc_codec_get_drvdata(codec);
1064 1056
1065 /* power on device */
1066 alc5632_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
1067
1068 switch (alc5632->id) { 1057 switch (alc5632->id) {
1069 case 0x5c: 1058 case 0x5c:
1070 snd_soc_add_codec_controls(codec, alc5632_vol_snd_controls, 1059 snd_soc_add_codec_controls(codec, alc5632_vol_snd_controls,
@@ -1077,19 +1066,12 @@ static int alc5632_probe(struct snd_soc_codec *codec)
1077 return 0; 1066 return 0;
1078} 1067}
1079 1068
1080/* power down chip */
1081static int alc5632_remove(struct snd_soc_codec *codec)
1082{
1083 alc5632_set_bias_level(codec, SND_SOC_BIAS_OFF);
1084 return 0;
1085}
1086
1087static struct snd_soc_codec_driver soc_codec_device_alc5632 = { 1069static struct snd_soc_codec_driver soc_codec_device_alc5632 = {
1088 .probe = alc5632_probe, 1070 .probe = alc5632_probe,
1089 .remove = alc5632_remove,
1090 .suspend = alc5632_suspend,
1091 .resume = alc5632_resume, 1071 .resume = alc5632_resume,
1092 .set_bias_level = alc5632_set_bias_level, 1072 .set_bias_level = alc5632_set_bias_level,
1073 .suspend_bias_off = true,
1074
1093 .controls = alc5632_snd_controls, 1075 .controls = alc5632_snd_controls,
1094 .num_controls = ARRAY_SIZE(alc5632_snd_controls), 1076 .num_controls = ARRAY_SIZE(alc5632_snd_controls),
1095 .dapm_widgets = alc5632_dapm_widgets, 1077 .dapm_widgets = alc5632_dapm_widgets,
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 0c05e7a7945f..9550d7433ad0 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -61,6 +61,11 @@
61#define ARIZONA_FLL_MIN_OUTDIV 2 61#define ARIZONA_FLL_MIN_OUTDIV 2
62#define ARIZONA_FLL_MAX_OUTDIV 7 62#define ARIZONA_FLL_MAX_OUTDIV 7
63 63
64#define ARIZONA_FMT_DSP_MODE_A 0
65#define ARIZONA_FMT_DSP_MODE_B 1
66#define ARIZONA_FMT_I2S_MODE 2
67#define ARIZONA_FMT_LEFT_JUSTIFIED_MODE 3
68
64#define arizona_fll_err(_fll, fmt, ...) \ 69#define arizona_fll_err(_fll, fmt, ...) \
65 dev_err(_fll->arizona->dev, "FLL%d: " fmt, _fll->id, ##__VA_ARGS__) 70 dev_err(_fll->arizona->dev, "FLL%d: " fmt, _fll->id, ##__VA_ARGS__)
66#define arizona_fll_warn(_fll, fmt, ...) \ 71#define arizona_fll_warn(_fll, fmt, ...) \
@@ -648,7 +653,7 @@ SOC_ENUM_SINGLE_DECL(arizona_in_hpf_cut_enum,
648EXPORT_SYMBOL_GPL(arizona_in_hpf_cut_enum); 653EXPORT_SYMBOL_GPL(arizona_in_hpf_cut_enum);
649 654
650static const char * const arizona_in_dmic_osr_text[] = { 655static const char * const arizona_in_dmic_osr_text[] = {
651 "1.536MHz", "3.072MHz", "6.144MHz", 656 "1.536MHz", "3.072MHz", "6.144MHz", "768kHz",
652}; 657};
653 658
654const struct soc_enum arizona_in_dmic_osr[] = { 659const struct soc_enum arizona_in_dmic_osr[] = {
@@ -946,10 +951,26 @@ static int arizona_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
946 951
947 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { 952 switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
948 case SND_SOC_DAIFMT_DSP_A: 953 case SND_SOC_DAIFMT_DSP_A:
949 mode = 0; 954 mode = ARIZONA_FMT_DSP_MODE_A;
955 break;
956 case SND_SOC_DAIFMT_DSP_B:
957 if ((fmt & SND_SOC_DAIFMT_MASTER_MASK)
958 != SND_SOC_DAIFMT_CBM_CFM) {
959 arizona_aif_err(dai, "DSP_B not valid in slave mode\n");
960 return -EINVAL;
961 }
962 mode = ARIZONA_FMT_DSP_MODE_B;
950 break; 963 break;
951 case SND_SOC_DAIFMT_I2S: 964 case SND_SOC_DAIFMT_I2S:
952 mode = 2; 965 mode = ARIZONA_FMT_I2S_MODE;
966 break;
967 case SND_SOC_DAIFMT_LEFT_J:
968 if ((fmt & SND_SOC_DAIFMT_MASTER_MASK)
969 != SND_SOC_DAIFMT_CBM_CFM) {
970 arizona_aif_err(dai, "LEFT_J not valid in slave mode\n");
971 return -EINVAL;
972 }
973 mode = ARIZONA_FMT_LEFT_JUSTIFIED_MODE;
953 break; 974 break;
954 default: 975 default:
955 arizona_aif_err(dai, "Unsupported DAI format %d\n", 976 arizona_aif_err(dai, "Unsupported DAI format %d\n",
@@ -1164,13 +1185,13 @@ static void arizona_wm5102_set_dac_comp(struct snd_soc_codec *codec,
1164 { 0x80, 0x0 }, 1185 { 0x80, 0x0 },
1165 }; 1186 };
1166 1187
1167 mutex_lock(&codec->mutex); 1188 mutex_lock(&arizona->dac_comp_lock);
1168 1189
1169 dac_comp[1].def = arizona->dac_comp_coeff; 1190 dac_comp[1].def = arizona->dac_comp_coeff;
1170 if (rate >= 176400) 1191 if (rate >= 176400)
1171 dac_comp[2].def = arizona->dac_comp_enabled; 1192 dac_comp[2].def = arizona->dac_comp_enabled;
1172 1193
1173 mutex_unlock(&codec->mutex); 1194 mutex_unlock(&arizona->dac_comp_lock);
1174 1195
1175 regmap_multi_reg_write(arizona->regmap, 1196 regmap_multi_reg_write(arizona->regmap,
1176 dac_comp, 1197 dac_comp,
@@ -1298,7 +1319,8 @@ static int arizona_hw_params(struct snd_pcm_substream *substream,
1298 1319
1299 /* Force multiple of 2 channels for I2S mode */ 1320 /* Force multiple of 2 channels for I2S mode */
1300 val = snd_soc_read(codec, base + ARIZONA_AIF_FORMAT); 1321 val = snd_soc_read(codec, base + ARIZONA_AIF_FORMAT);
1301 if ((channels & 1) && (val & ARIZONA_AIF1_FMT_MASK)) { 1322 val &= ARIZONA_AIF1_FMT_MASK;
1323 if ((channels & 1) && (val == ARIZONA_FMT_I2S_MODE)) {
1302 arizona_aif_dbg(dai, "Forcing stereo mode\n"); 1324 arizona_aif_dbg(dai, "Forcing stereo mode\n");
1303 bclk_target /= channels; 1325 bclk_target /= channels;
1304 bclk_target *= channels + 1; 1326 bclk_target *= channels + 1;
diff --git a/sound/soc/codecs/cq93vc.c b/sound/soc/codecs/cq93vc.c
index 537327c7f7f1..8d638e8aa8eb 100644
--- a/sound/soc/codecs/cq93vc.c
+++ b/sound/soc/codecs/cq93vc.c
@@ -62,14 +62,10 @@ static int cq93vc_mute(struct snd_soc_dai *dai, int mute)
62static int cq93vc_set_dai_sysclk(struct snd_soc_dai *codec_dai, 62static int cq93vc_set_dai_sysclk(struct snd_soc_dai *codec_dai,
63 int clk_id, unsigned int freq, int dir) 63 int clk_id, unsigned int freq, int dir)
64{ 64{
65 struct snd_soc_codec *codec = codec_dai->codec;
66 struct davinci_vc *davinci_vc = codec->dev->platform_data;
67
68 switch (freq) { 65 switch (freq) {
69 case 22579200: 66 case 22579200:
70 case 27000000: 67 case 27000000:
71 case 33868800: 68 case 33868800:
72 davinci_vc->cq93vc.sysclk = freq;
73 return 0; 69 return 0;
74 } 70 }
75 71
@@ -126,32 +122,6 @@ static struct snd_soc_dai_driver cq93vc_dai = {
126 .ops = &cq93vc_dai_ops, 122 .ops = &cq93vc_dai_ops,
127}; 123};
128 124
129static int cq93vc_resume(struct snd_soc_codec *codec)
130{
131 cq93vc_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
132
133 return 0;
134}
135
136static int cq93vc_probe(struct snd_soc_codec *codec)
137{
138 struct davinci_vc *davinci_vc = codec->dev->platform_data;
139
140 davinci_vc->cq93vc.codec = codec;
141
142 /* Off, with power on */
143 cq93vc_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
144
145 return 0;
146}
147
148static int cq93vc_remove(struct snd_soc_codec *codec)
149{
150 cq93vc_set_bias_level(codec, SND_SOC_BIAS_OFF);
151
152 return 0;
153}
154
155static struct regmap *cq93vc_get_regmap(struct device *dev) 125static struct regmap *cq93vc_get_regmap(struct device *dev)
156{ 126{
157 struct davinci_vc *davinci_vc = dev->platform_data; 127 struct davinci_vc *davinci_vc = dev->platform_data;
@@ -161,9 +131,6 @@ static struct regmap *cq93vc_get_regmap(struct device *dev)
161 131
162static struct snd_soc_codec_driver soc_codec_dev_cq93vc = { 132static struct snd_soc_codec_driver soc_codec_dev_cq93vc = {
163 .set_bias_level = cq93vc_set_bias_level, 133 .set_bias_level = cq93vc_set_bias_level,
164 .probe = cq93vc_probe,
165 .remove = cq93vc_remove,
166 .resume = cq93vc_resume,
167 .get_regmap = cq93vc_get_regmap, 134 .get_regmap = cq93vc_get_regmap,
168 .controls = cq93vc_snd_controls, 135 .controls = cq93vc_snd_controls,
169 .num_controls = ARRAY_SIZE(cq93vc_snd_controls), 136 .num_controls = ARRAY_SIZE(cq93vc_snd_controls),
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index 4fdd47d700e3..ce6086835ebd 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -32,7 +32,6 @@
32#include "cs4265.h" 32#include "cs4265.h"
33 33
34struct cs4265_private { 34struct cs4265_private {
35 struct device *dev;
36 struct regmap *regmap; 35 struct regmap *regmap;
37 struct gpio_desc *reset_gpio; 36 struct gpio_desc *reset_gpio;
38 u8 format; 37 u8 format;
@@ -598,7 +597,6 @@ static int cs4265_i2c_probe(struct i2c_client *i2c_client,
598 GFP_KERNEL); 597 GFP_KERNEL);
599 if (cs4265 == NULL) 598 if (cs4265 == NULL)
600 return -ENOMEM; 599 return -ENOMEM;
601 cs4265->dev = &i2c_client->dev;
602 600
603 cs4265->regmap = devm_regmap_init_i2c(i2c_client, &cs4265_regmap); 601 cs4265->regmap = devm_regmap_init_i2c(i2c_client, &cs4265_regmap);
604 if (IS_ERR(cs4265->regmap)) { 602 if (IS_ERR(cs4265->regmap)) {
diff --git a/sound/soc/codecs/cs4271-i2c.c b/sound/soc/codecs/cs4271-i2c.c
new file mode 100644
index 000000000000..b264da030340
--- /dev/null
+++ b/sound/soc/codecs/cs4271-i2c.c
@@ -0,0 +1,62 @@
1/*
2 * CS4271 I2C audio driver
3 *
4 * Copyright (c) 2010 Alexander Sverdlin <subaparts@yandex.ru>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/module.h>
18#include <linux/i2c.h>
19#include <linux/regmap.h>
20#include <sound/soc.h>
21#include "cs4271.h"
22
23static int cs4271_i2c_probe(struct i2c_client *client,
24 const struct i2c_device_id *id)
25{
26 struct regmap_config config;
27
28 config = cs4271_regmap_config;
29 config.reg_bits = 8;
30 config.val_bits = 8;
31
32 return cs4271_probe(&client->dev,
33 devm_regmap_init_i2c(client, &config));
34}
35
36static int cs4271_i2c_remove(struct i2c_client *client)
37{
38 snd_soc_unregister_codec(&client->dev);
39 return 0;
40}
41
42static const struct i2c_device_id cs4271_i2c_id[] = {
43 { "cs4271", 0 },
44 { }
45};
46MODULE_DEVICE_TABLE(i2c, cs4271_i2c_id);
47
48static struct i2c_driver cs4271_i2c_driver = {
49 .driver = {
50 .name = "cs4271",
51 .owner = THIS_MODULE,
52 .of_match_table = of_match_ptr(cs4271_dt_ids),
53 },
54 .probe = cs4271_i2c_probe,
55 .remove = cs4271_i2c_remove,
56 .id_table = cs4271_i2c_id,
57};
58module_i2c_driver(cs4271_i2c_driver);
59
60MODULE_DESCRIPTION("ASoC CS4271 I2C Driver");
61MODULE_AUTHOR("Alexander Sverdlin <subaparts@yandex.ru>");
62MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cs4271-spi.c b/sound/soc/codecs/cs4271-spi.c
new file mode 100644
index 000000000000..acd49d86e706
--- /dev/null
+++ b/sound/soc/codecs/cs4271-spi.c
@@ -0,0 +1,55 @@
1/*
2 * CS4271 SPI audio driver
3 *
4 * Copyright (c) 2010 Alexander Sverdlin <subaparts@yandex.ru>
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17#include <linux/module.h>
18#include <linux/spi/spi.h>
19#include <linux/regmap.h>
20#include <sound/soc.h>
21#include "cs4271.h"
22
23static int cs4271_spi_probe(struct spi_device *spi)
24{
25 struct regmap_config config;
26
27 config = cs4271_regmap_config;
28 config.reg_bits = 16;
29 config.val_bits = 8;
30 config.read_flag_mask = 0x21;
31 config.write_flag_mask = 0x20;
32
33 return cs4271_probe(&spi->dev, devm_regmap_init_spi(spi, &config));
34}
35
36static int cs4271_spi_remove(struct spi_device *spi)
37{
38 snd_soc_unregister_codec(&spi->dev);
39 return 0;
40}
41
42static struct spi_driver cs4271_spi_driver = {
43 .driver = {
44 .name = "cs4271",
45 .owner = THIS_MODULE,
46 .of_match_table = of_match_ptr(cs4271_dt_ids),
47 },
48 .probe = cs4271_spi_probe,
49 .remove = cs4271_spi_remove,
50};
51module_spi_driver(cs4271_spi_driver);
52
53MODULE_DESCRIPTION("ASoC CS4271 SPI Driver");
54MODULE_AUTHOR("Alexander Sverdlin <subaparts@yandex.ru>");
55MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cs4271.c b/sound/soc/codecs/cs4271.c
index 93cec52f4733..79a4efcb894c 100644
--- a/sound/soc/codecs/cs4271.c
+++ b/sound/soc/codecs/cs4271.c
@@ -23,8 +23,6 @@
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/gpio.h> 25#include <linux/gpio.h>
26#include <linux/i2c.h>
27#include <linux/spi/spi.h>
28#include <linux/of.h> 26#include <linux/of.h>
29#include <linux/of_device.h> 27#include <linux/of_device.h>
30#include <linux/of_gpio.h> 28#include <linux/of_gpio.h>
@@ -32,6 +30,7 @@
32#include <sound/soc.h> 30#include <sound/soc.h>
33#include <sound/tlv.h> 31#include <sound/tlv.h>
34#include <sound/cs4271.h> 32#include <sound/cs4271.h>
33#include "cs4271.h"
35 34
36#define CS4271_PCM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \ 35#define CS4271_PCM_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
37 SNDRV_PCM_FMTBIT_S24_LE | \ 36 SNDRV_PCM_FMTBIT_S24_LE | \
@@ -527,14 +526,15 @@ static int cs4271_soc_resume(struct snd_soc_codec *codec)
527#endif /* CONFIG_PM */ 526#endif /* CONFIG_PM */
528 527
529#ifdef CONFIG_OF 528#ifdef CONFIG_OF
530static const struct of_device_id cs4271_dt_ids[] = { 529const struct of_device_id cs4271_dt_ids[] = {
531 { .compatible = "cirrus,cs4271", }, 530 { .compatible = "cirrus,cs4271", },
532 { } 531 { }
533}; 532};
534MODULE_DEVICE_TABLE(of, cs4271_dt_ids); 533MODULE_DEVICE_TABLE(of, cs4271_dt_ids);
534EXPORT_SYMBOL_GPL(cs4271_dt_ids);
535#endif 535#endif
536 536
537static int cs4271_probe(struct snd_soc_codec *codec) 537static int cs4271_codec_probe(struct snd_soc_codec *codec)
538{ 538{
539 struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); 539 struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
540 struct cs4271_platform_data *cs4271plat = codec->dev->platform_data; 540 struct cs4271_platform_data *cs4271plat = codec->dev->platform_data;
@@ -587,7 +587,7 @@ static int cs4271_probe(struct snd_soc_codec *codec)
587 return 0; 587 return 0;
588} 588}
589 589
590static int cs4271_remove(struct snd_soc_codec *codec) 590static int cs4271_codec_remove(struct snd_soc_codec *codec)
591{ 591{
592 struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec); 592 struct cs4271_private *cs4271 = snd_soc_codec_get_drvdata(codec);
593 593
@@ -599,8 +599,8 @@ static int cs4271_remove(struct snd_soc_codec *codec)
599}; 599};
600 600
601static struct snd_soc_codec_driver soc_codec_dev_cs4271 = { 601static struct snd_soc_codec_driver soc_codec_dev_cs4271 = {
602 .probe = cs4271_probe, 602 .probe = cs4271_codec_probe,
603 .remove = cs4271_remove, 603 .remove = cs4271_codec_remove,
604 .suspend = cs4271_soc_suspend, 604 .suspend = cs4271_soc_suspend,
605 .resume = cs4271_soc_resume, 605 .resume = cs4271_soc_resume,
606 606
@@ -642,14 +642,8 @@ static int cs4271_common_probe(struct device *dev,
642 return 0; 642 return 0;
643} 643}
644 644
645#if defined(CONFIG_SPI_MASTER) 645const struct regmap_config cs4271_regmap_config = {
646
647static const struct regmap_config cs4271_spi_regmap = {
648 .reg_bits = 16,
649 .val_bits = 8,
650 .max_register = CS4271_LASTREG, 646 .max_register = CS4271_LASTREG,
651 .read_flag_mask = 0x21,
652 .write_flag_mask = 0x20,
653 647
654 .reg_defaults = cs4271_reg_defaults, 648 .reg_defaults = cs4271_reg_defaults,
655 .num_reg_defaults = ARRAY_SIZE(cs4271_reg_defaults), 649 .num_reg_defaults = ARRAY_SIZE(cs4271_reg_defaults),
@@ -657,140 +651,27 @@ static const struct regmap_config cs4271_spi_regmap = {
657 651
658 .volatile_reg = cs4271_volatile_reg, 652 .volatile_reg = cs4271_volatile_reg,
659}; 653};
654EXPORT_SYMBOL_GPL(cs4271_regmap_config);
660 655
661static int cs4271_spi_probe(struct spi_device *spi) 656int cs4271_probe(struct device *dev, struct regmap *regmap)
662{ 657{
663 struct cs4271_private *cs4271; 658 struct cs4271_private *cs4271;
664 int ret; 659 int ret;
665 660
666 ret = cs4271_common_probe(&spi->dev, &cs4271); 661 if (IS_ERR(regmap))
667 if (ret < 0) 662 return PTR_ERR(regmap);
668 return ret;
669
670 spi_set_drvdata(spi, cs4271);
671 cs4271->regmap = devm_regmap_init_spi(spi, &cs4271_spi_regmap);
672 if (IS_ERR(cs4271->regmap))
673 return PTR_ERR(cs4271->regmap);
674
675 return snd_soc_register_codec(&spi->dev, &soc_codec_dev_cs4271,
676 &cs4271_dai, 1);
677}
678
679static int cs4271_spi_remove(struct spi_device *spi)
680{
681 snd_soc_unregister_codec(&spi->dev);
682 return 0;
683}
684
685static struct spi_driver cs4271_spi_driver = {
686 .driver = {
687 .name = "cs4271",
688 .owner = THIS_MODULE,
689 .of_match_table = of_match_ptr(cs4271_dt_ids),
690 },
691 .probe = cs4271_spi_probe,
692 .remove = cs4271_spi_remove,
693};
694#endif /* defined(CONFIG_SPI_MASTER) */
695
696#if IS_ENABLED(CONFIG_I2C)
697static const struct i2c_device_id cs4271_i2c_id[] = {
698 {"cs4271", 0},
699 {}
700};
701MODULE_DEVICE_TABLE(i2c, cs4271_i2c_id);
702 663
703static const struct regmap_config cs4271_i2c_regmap = { 664 ret = cs4271_common_probe(dev, &cs4271);
704 .reg_bits = 8,
705 .val_bits = 8,
706 .max_register = CS4271_LASTREG,
707
708 .reg_defaults = cs4271_reg_defaults,
709 .num_reg_defaults = ARRAY_SIZE(cs4271_reg_defaults),
710 .cache_type = REGCACHE_RBTREE,
711
712 .volatile_reg = cs4271_volatile_reg,
713};
714
715static int cs4271_i2c_probe(struct i2c_client *client,
716 const struct i2c_device_id *id)
717{
718 struct cs4271_private *cs4271;
719 int ret;
720
721 ret = cs4271_common_probe(&client->dev, &cs4271);
722 if (ret < 0) 665 if (ret < 0)
723 return ret; 666 return ret;
724 667
725 i2c_set_clientdata(client, cs4271); 668 dev_set_drvdata(dev, cs4271);
726 cs4271->regmap = devm_regmap_init_i2c(client, &cs4271_i2c_regmap); 669 cs4271->regmap = regmap;
727 if (IS_ERR(cs4271->regmap))
728 return PTR_ERR(cs4271->regmap);
729 670
730 return snd_soc_register_codec(&client->dev, &soc_codec_dev_cs4271, 671 return snd_soc_register_codec(dev, &soc_codec_dev_cs4271, &cs4271_dai,
731 &cs4271_dai, 1); 672 1);
732}
733
734static int cs4271_i2c_remove(struct i2c_client *client)
735{
736 snd_soc_unregister_codec(&client->dev);
737 return 0;
738}
739
740static struct i2c_driver cs4271_i2c_driver = {
741 .driver = {
742 .name = "cs4271",
743 .owner = THIS_MODULE,
744 .of_match_table = of_match_ptr(cs4271_dt_ids),
745 },
746 .id_table = cs4271_i2c_id,
747 .probe = cs4271_i2c_probe,
748 .remove = cs4271_i2c_remove,
749};
750#endif /* IS_ENABLED(CONFIG_I2C) */
751
752/*
753 * We only register our serial bus driver here without
754 * assignment to particular chip. So if any of the below
755 * fails, there is some problem with I2C or SPI subsystem.
756 * In most cases this module will be compiled with support
757 * of only one serial bus.
758 */
759static int __init cs4271_modinit(void)
760{
761 int ret;
762
763#if IS_ENABLED(CONFIG_I2C)
764 ret = i2c_add_driver(&cs4271_i2c_driver);
765 if (ret) {
766 pr_err("Failed to register CS4271 I2C driver: %d\n", ret);
767 return ret;
768 }
769#endif
770
771#if defined(CONFIG_SPI_MASTER)
772 ret = spi_register_driver(&cs4271_spi_driver);
773 if (ret) {
774 pr_err("Failed to register CS4271 SPI driver: %d\n", ret);
775 return ret;
776 }
777#endif
778
779 return 0;
780}
781module_init(cs4271_modinit);
782
783static void __exit cs4271_modexit(void)
784{
785#if defined(CONFIG_SPI_MASTER)
786 spi_unregister_driver(&cs4271_spi_driver);
787#endif
788
789#if IS_ENABLED(CONFIG_I2C)
790 i2c_del_driver(&cs4271_i2c_driver);
791#endif
792} 673}
793module_exit(cs4271_modexit); 674EXPORT_SYMBOL_GPL(cs4271_probe);
794 675
795MODULE_AUTHOR("Alexander Sverdlin <subaparts@yandex.ru>"); 676MODULE_AUTHOR("Alexander Sverdlin <subaparts@yandex.ru>");
796MODULE_DESCRIPTION("Cirrus Logic CS4271 ALSA SoC Codec Driver"); 677MODULE_DESCRIPTION("Cirrus Logic CS4271 ALSA SoC Codec Driver");
diff --git a/sound/soc/codecs/cs4271.h b/sound/soc/codecs/cs4271.h
new file mode 100644
index 000000000000..9adad8eefdc9
--- /dev/null
+++ b/sound/soc/codecs/cs4271.h
@@ -0,0 +1,11 @@
1#ifndef _CS4271_PRIV_H
2#define _CS4271_PRIV_H
3
4#include <linux/regmap.h>
5
6extern const struct of_device_id cs4271_dt_ids[];
7extern const struct regmap_config cs4271_regmap_config;
8
9int cs4271_probe(struct device *dev, struct regmap *regmap);
10
11#endif
diff --git a/sound/soc/codecs/cs42l51-i2c.c b/sound/soc/codecs/cs42l51-i2c.c
index cee51ae177c1..c40428f25ba5 100644
--- a/sound/soc/codecs/cs42l51-i2c.c
+++ b/sound/soc/codecs/cs42l51-i2c.c
@@ -46,6 +46,7 @@ static struct i2c_driver cs42l51_i2c_driver = {
46 .driver = { 46 .driver = {
47 .name = "cs42l51", 47 .name = "cs42l51",
48 .owner = THIS_MODULE, 48 .owner = THIS_MODULE,
49 .of_match_table = cs42l51_of_match,
49 }, 50 },
50 .probe = cs42l51_i2c_probe, 51 .probe = cs42l51_i2c_probe,
51 .remove = cs42l51_i2c_remove, 52 .remove = cs42l51_i2c_remove,
diff --git a/sound/soc/codecs/cs42l51.c b/sound/soc/codecs/cs42l51.c
index 09488d97de60..b3951524339f 100644
--- a/sound/soc/codecs/cs42l51.c
+++ b/sound/soc/codecs/cs42l51.c
@@ -153,15 +153,17 @@ static const struct snd_kcontrol_new cs42l51_snd_controls[] = {
153static int cs42l51_pdn_event(struct snd_soc_dapm_widget *w, 153static int cs42l51_pdn_event(struct snd_soc_dapm_widget *w,
154 struct snd_kcontrol *kcontrol, int event) 154 struct snd_kcontrol *kcontrol, int event)
155{ 155{
156 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
157
156 switch (event) { 158 switch (event) {
157 case SND_SOC_DAPM_PRE_PMD: 159 case SND_SOC_DAPM_PRE_PMD:
158 snd_soc_update_bits(w->codec, CS42L51_POWER_CTL1, 160 snd_soc_update_bits(codec, CS42L51_POWER_CTL1,
159 CS42L51_POWER_CTL1_PDN, 161 CS42L51_POWER_CTL1_PDN,
160 CS42L51_POWER_CTL1_PDN); 162 CS42L51_POWER_CTL1_PDN);
161 break; 163 break;
162 default: 164 default:
163 case SND_SOC_DAPM_POST_PMD: 165 case SND_SOC_DAPM_POST_PMD:
164 snd_soc_update_bits(w->codec, CS42L51_POWER_CTL1, 166 snd_soc_update_bits(codec, CS42L51_POWER_CTL1,
165 CS42L51_POWER_CTL1_PDN, 0); 167 CS42L51_POWER_CTL1_PDN, 0);
166 break; 168 break;
167 } 169 }
@@ -558,11 +560,13 @@ error:
558} 560}
559EXPORT_SYMBOL_GPL(cs42l51_probe); 561EXPORT_SYMBOL_GPL(cs42l51_probe);
560 562
561static const struct of_device_id cs42l51_of_match[] = { 563const struct of_device_id cs42l51_of_match[] = {
562 { .compatible = "cirrus,cs42l51", }, 564 { .compatible = "cirrus,cs42l51", },
563 { } 565 { }
564}; 566};
565MODULE_DEVICE_TABLE(of, cs42l51_of_match); 567MODULE_DEVICE_TABLE(of, cs42l51_of_match);
568EXPORT_SYMBOL_GPL(cs42l51_of_match);
569
566MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>"); 570MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>");
567MODULE_DESCRIPTION("Cirrus Logic CS42L51 ALSA SoC Codec Driver"); 571MODULE_DESCRIPTION("Cirrus Logic CS42L51 ALSA SoC Codec Driver");
568MODULE_LICENSE("GPL"); 572MODULE_LICENSE("GPL");
diff --git a/sound/soc/codecs/cs42l51.h b/sound/soc/codecs/cs42l51.h
index 8c55bf384bc6..0ca805492ac4 100644
--- a/sound/soc/codecs/cs42l51.h
+++ b/sound/soc/codecs/cs42l51.h
@@ -22,6 +22,7 @@ struct device;
22 22
23extern const struct regmap_config cs42l51_regmap; 23extern const struct regmap_config cs42l51_regmap;
24int cs42l51_probe(struct device *dev, struct regmap *regmap); 24int cs42l51_probe(struct device *dev, struct regmap *regmap);
25extern const struct of_device_id cs42l51_of_match[];
25 26
26#define CS42L51_CHIP_ID 0x1B 27#define CS42L51_CHIP_ID 0x1B
27#define CS42L51_CHIP_REV_A 0x00 28#define CS42L51_CHIP_REV_A 0x00
diff --git a/sound/soc/codecs/cs42l73.c b/sound/soc/codecs/cs42l73.c
index 2f8b94683e83..7c55537c69cf 100644
--- a/sound/soc/codecs/cs42l73.c
+++ b/sound/soc/codecs/cs42l73.c
@@ -584,7 +584,7 @@ static const struct snd_kcontrol_new cs42l73_snd_controls[] = {
584static int cs42l73_spklo_spk_amp_event(struct snd_soc_dapm_widget *w, 584static int cs42l73_spklo_spk_amp_event(struct snd_soc_dapm_widget *w,
585 struct snd_kcontrol *kcontrol, int event) 585 struct snd_kcontrol *kcontrol, int event)
586{ 586{
587 struct snd_soc_codec *codec = w->codec; 587 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
588 struct cs42l73_private *priv = snd_soc_codec_get_drvdata(codec); 588 struct cs42l73_private *priv = snd_soc_codec_get_drvdata(codec);
589 switch (event) { 589 switch (event) {
590 case SND_SOC_DAPM_POST_PMD: 590 case SND_SOC_DAPM_POST_PMD:
@@ -600,7 +600,7 @@ static int cs42l73_spklo_spk_amp_event(struct snd_soc_dapm_widget *w,
600static int cs42l73_ear_amp_event(struct snd_soc_dapm_widget *w, 600static int cs42l73_ear_amp_event(struct snd_soc_dapm_widget *w,
601 struct snd_kcontrol *kcontrol, int event) 601 struct snd_kcontrol *kcontrol, int event)
602{ 602{
603 struct snd_soc_codec *codec = w->codec; 603 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
604 struct cs42l73_private *priv = snd_soc_codec_get_drvdata(codec); 604 struct cs42l73_private *priv = snd_soc_codec_get_drvdata(codec);
605 switch (event) { 605 switch (event) {
606 case SND_SOC_DAPM_POST_PMD: 606 case SND_SOC_DAPM_POST_PMD:
@@ -618,7 +618,7 @@ static int cs42l73_ear_amp_event(struct snd_soc_dapm_widget *w,
618static int cs42l73_hp_amp_event(struct snd_soc_dapm_widget *w, 618static int cs42l73_hp_amp_event(struct snd_soc_dapm_widget *w,
619 struct snd_kcontrol *kcontrol, int event) 619 struct snd_kcontrol *kcontrol, int event)
620{ 620{
621 struct snd_soc_codec *codec = w->codec; 621 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
622 struct cs42l73_private *priv = snd_soc_codec_get_drvdata(codec); 622 struct cs42l73_private *priv = snd_soc_codec_get_drvdata(codec);
623 switch (event) { 623 switch (event) {
624 case SND_SOC_DAPM_POST_PMD: 624 case SND_SOC_DAPM_POST_PMD:
diff --git a/sound/soc/codecs/es8328-i2c.c b/sound/soc/codecs/es8328-i2c.c
index aae410d122ee..2d05b5d3a6ce 100644
--- a/sound/soc/codecs/es8328-i2c.c
+++ b/sound/soc/codecs/es8328-i2c.c
@@ -19,7 +19,7 @@
19#include "es8328.h" 19#include "es8328.h"
20 20
21static const struct i2c_device_id es8328_id[] = { 21static const struct i2c_device_id es8328_id[] = {
22 { "everest,es8328", 0 }, 22 { "es8328", 0 },
23 { } 23 { }
24}; 24};
25MODULE_DEVICE_TABLE(i2c, es8328_id); 25MODULE_DEVICE_TABLE(i2c, es8328_id);
diff --git a/sound/soc/codecs/max98090.c b/sound/soc/codecs/max98090.c
index d519294f57c7..34ed9a91f392 100644
--- a/sound/soc/codecs/max98090.c
+++ b/sound/soc/codecs/max98090.c
@@ -1311,6 +1311,10 @@ static const struct snd_soc_dapm_route max98090_dapm_routes[] = {
1311 {"MIC1 Input", NULL, "MIC1"}, 1311 {"MIC1 Input", NULL, "MIC1"},
1312 {"MIC2 Input", NULL, "MIC2"}, 1312 {"MIC2 Input", NULL, "MIC2"},
1313 1313
1314 {"DMICL", NULL, "DMICL_ENA"},
1315 {"DMICL", NULL, "DMICR_ENA"},
1316 {"DMICR", NULL, "DMICL_ENA"},
1317 {"DMICR", NULL, "DMICR_ENA"},
1314 {"DMICL", NULL, "AHPF"}, 1318 {"DMICL", NULL, "AHPF"},
1315 {"DMICR", NULL, "AHPF"}, 1319 {"DMICR", NULL, "AHPF"},
1316 1320
@@ -1368,8 +1372,6 @@ static const struct snd_soc_dapm_route max98090_dapm_routes[] = {
1368 {"DMIC Mux", "ADC", "ADCR"}, 1372 {"DMIC Mux", "ADC", "ADCR"},
1369 {"DMIC Mux", "DMIC", "DMICL"}, 1373 {"DMIC Mux", "DMIC", "DMICL"},
1370 {"DMIC Mux", "DMIC", "DMICR"}, 1374 {"DMIC Mux", "DMIC", "DMICR"},
1371 {"DMIC Mux", "DMIC", "DMICL_ENA"},
1372 {"DMIC Mux", "DMIC", "DMICR_ENA"},
1373 1375
1374 {"LBENL Mux", "Normal", "DMIC Mux"}, 1376 {"LBENL Mux", "Normal", "DMIC Mux"},
1375 {"LBENL Mux", "Loopback", "LTENL Mux"}, 1377 {"LBENL Mux", "Loopback", "LTENL Mux"},
@@ -1395,8 +1397,8 @@ static const struct snd_soc_dapm_route max98090_dapm_routes[] = {
1395 {"STENL Mux", "Sidetone Left", "DMICL"}, 1397 {"STENL Mux", "Sidetone Left", "DMICL"},
1396 {"STENR Mux", "Sidetone Right", "ADCR"}, 1398 {"STENR Mux", "Sidetone Right", "ADCR"},
1397 {"STENR Mux", "Sidetone Right", "DMICR"}, 1399 {"STENR Mux", "Sidetone Right", "DMICR"},
1398 {"DACL", "NULL", "STENL Mux"}, 1400 {"DACL", NULL, "STENL Mux"},
1399 {"DACR", "NULL", "STENL Mux"}, 1401 {"DACR", NULL, "STENR Mux"},
1400 1402
1401 {"AIFINL", NULL, "SHDN"}, 1403 {"AIFINL", NULL, "SHDN"},
1402 {"AIFINR", NULL, "SHDN"}, 1404 {"AIFINR", NULL, "SHDN"},
@@ -1941,13 +1943,13 @@ static int max98090_dai_set_sysclk(struct snd_soc_dai *dai,
1941 * 0x02 (when master clk is 20MHz to 40MHz).. 1943 * 0x02 (when master clk is 20MHz to 40MHz)..
1942 * 0x03 (when master clk is 40MHz to 60MHz).. 1944 * 0x03 (when master clk is 40MHz to 60MHz)..
1943 */ 1945 */
1944 if ((freq >= 10000000) && (freq < 20000000)) { 1946 if ((freq >= 10000000) && (freq <= 20000000)) {
1945 snd_soc_write(codec, M98090_REG_SYSTEM_CLOCK, 1947 snd_soc_write(codec, M98090_REG_SYSTEM_CLOCK,
1946 M98090_PSCLK_DIV1); 1948 M98090_PSCLK_DIV1);
1947 } else if ((freq >= 20000000) && (freq < 40000000)) { 1949 } else if ((freq > 20000000) && (freq <= 40000000)) {
1948 snd_soc_write(codec, M98090_REG_SYSTEM_CLOCK, 1950 snd_soc_write(codec, M98090_REG_SYSTEM_CLOCK,
1949 M98090_PSCLK_DIV2); 1951 M98090_PSCLK_DIV2);
1950 } else if ((freq >= 40000000) && (freq < 60000000)) { 1952 } else if ((freq > 40000000) && (freq <= 60000000)) {
1951 snd_soc_write(codec, M98090_REG_SYSTEM_CLOCK, 1953 snd_soc_write(codec, M98090_REG_SYSTEM_CLOCK,
1952 M98090_PSCLK_DIV4); 1954 M98090_PSCLK_DIV4);
1953 } else { 1955 } else {
diff --git a/sound/soc/codecs/max98095.c b/sound/soc/codecs/max98095.c
index 0ee6797d5083..01f3cc9c780f 100644
--- a/sound/soc/codecs/max98095.c
+++ b/sound/soc/codecs/max98095.c
@@ -16,6 +16,7 @@
16#include <linux/pm.h> 16#include <linux/pm.h>
17#include <linux/i2c.h> 17#include <linux/i2c.h>
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/mutex.h>
19#include <sound/core.h> 20#include <sound/core.h>
20#include <sound/pcm.h> 21#include <sound/pcm.h>
21#include <sound/pcm_params.h> 22#include <sound/pcm_params.h>
@@ -57,6 +58,7 @@ struct max98095_priv {
57 unsigned int mic2pre; 58 unsigned int mic2pre;
58 struct snd_soc_jack *headphone_jack; 59 struct snd_soc_jack *headphone_jack;
59 struct snd_soc_jack *mic_jack; 60 struct snd_soc_jack *mic_jack;
61 struct mutex lock;
60}; 62};
61 63
62static const struct reg_default max98095_reg_def[] = { 64static const struct reg_default max98095_reg_def[] = {
@@ -1803,7 +1805,7 @@ static int max98095_put_eq_enum(struct snd_kcontrol *kcontrol,
1803 regsave = snd_soc_read(codec, M98095_088_CFG_LEVEL); 1805 regsave = snd_soc_read(codec, M98095_088_CFG_LEVEL);
1804 snd_soc_update_bits(codec, M98095_088_CFG_LEVEL, regmask, 0); 1806 snd_soc_update_bits(codec, M98095_088_CFG_LEVEL, regmask, 0);
1805 1807
1806 mutex_lock(&codec->mutex); 1808 mutex_lock(&max98095->lock);
1807 snd_soc_update_bits(codec, M98095_00F_HOST_CFG, M98095_SEG, M98095_SEG); 1809 snd_soc_update_bits(codec, M98095_00F_HOST_CFG, M98095_SEG, M98095_SEG);
1808 m98095_eq_band(codec, channel, 0, coef_set->band1); 1810 m98095_eq_band(codec, channel, 0, coef_set->band1);
1809 m98095_eq_band(codec, channel, 1, coef_set->band2); 1811 m98095_eq_band(codec, channel, 1, coef_set->band2);
@@ -1811,7 +1813,7 @@ static int max98095_put_eq_enum(struct snd_kcontrol *kcontrol,
1811 m98095_eq_band(codec, channel, 3, coef_set->band4); 1813 m98095_eq_band(codec, channel, 3, coef_set->band4);
1812 m98095_eq_band(codec, channel, 4, coef_set->band5); 1814 m98095_eq_band(codec, channel, 4, coef_set->band5);
1813 snd_soc_update_bits(codec, M98095_00F_HOST_CFG, M98095_SEG, 0); 1815 snd_soc_update_bits(codec, M98095_00F_HOST_CFG, M98095_SEG, 0);
1814 mutex_unlock(&codec->mutex); 1816 mutex_unlock(&max98095->lock);
1815 1817
1816 /* Restore the original on/off state */ 1818 /* Restore the original on/off state */
1817 snd_soc_update_bits(codec, M98095_088_CFG_LEVEL, regmask, regsave); 1819 snd_soc_update_bits(codec, M98095_088_CFG_LEVEL, regmask, regsave);
@@ -1957,12 +1959,12 @@ static int max98095_put_bq_enum(struct snd_kcontrol *kcontrol,
1957 regsave = snd_soc_read(codec, M98095_088_CFG_LEVEL); 1959 regsave = snd_soc_read(codec, M98095_088_CFG_LEVEL);
1958 snd_soc_update_bits(codec, M98095_088_CFG_LEVEL, regmask, 0); 1960 snd_soc_update_bits(codec, M98095_088_CFG_LEVEL, regmask, 0);
1959 1961
1960 mutex_lock(&codec->mutex); 1962 mutex_lock(&max98095->lock);
1961 snd_soc_update_bits(codec, M98095_00F_HOST_CFG, M98095_SEG, M98095_SEG); 1963 snd_soc_update_bits(codec, M98095_00F_HOST_CFG, M98095_SEG, M98095_SEG);
1962 m98095_biquad_band(codec, channel, 0, coef_set->band1); 1964 m98095_biquad_band(codec, channel, 0, coef_set->band1);
1963 m98095_biquad_band(codec, channel, 1, coef_set->band2); 1965 m98095_biquad_band(codec, channel, 1, coef_set->band2);
1964 snd_soc_update_bits(codec, M98095_00F_HOST_CFG, M98095_SEG, 0); 1966 snd_soc_update_bits(codec, M98095_00F_HOST_CFG, M98095_SEG, 0);
1965 mutex_unlock(&codec->mutex); 1967 mutex_unlock(&max98095->lock);
1966 1968
1967 /* Restore the original on/off state */ 1969 /* Restore the original on/off state */
1968 snd_soc_update_bits(codec, M98095_088_CFG_LEVEL, regmask, regsave); 1970 snd_soc_update_bits(codec, M98095_088_CFG_LEVEL, regmask, regsave);
@@ -2395,6 +2397,8 @@ static int max98095_i2c_probe(struct i2c_client *i2c,
2395 if (max98095 == NULL) 2397 if (max98095 == NULL)
2396 return -ENOMEM; 2398 return -ENOMEM;
2397 2399
2400 mutex_init(&max98095->lock);
2401
2398 max98095->regmap = devm_regmap_init_i2c(i2c, &max98095_regmap); 2402 max98095->regmap = devm_regmap_init_i2c(i2c, &max98095_regmap);
2399 if (IS_ERR(max98095->regmap)) { 2403 if (IS_ERR(max98095->regmap)) {
2400 ret = PTR_ERR(max98095->regmap); 2404 ret = PTR_ERR(max98095->regmap);
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 3fb83bf09768..d16331e0b64d 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -139,6 +139,7 @@ static const struct reg_default rt5645_reg[] = {
139 { 0x76, 0x000a }, 139 { 0x76, 0x000a },
140 { 0x77, 0x0c00 }, 140 { 0x77, 0x0c00 },
141 { 0x78, 0x0000 }, 141 { 0x78, 0x0000 },
142 { 0x79, 0x0123 },
142 { 0x80, 0x0000 }, 143 { 0x80, 0x0000 },
143 { 0x81, 0x0000 }, 144 { 0x81, 0x0000 },
144 { 0x82, 0x0000 }, 145 { 0x82, 0x0000 },
@@ -334,6 +335,7 @@ static bool rt5645_readable_register(struct device *dev, unsigned int reg)
334 case RT5645_DMIC_CTRL2: 335 case RT5645_DMIC_CTRL2:
335 case RT5645_TDM_CTRL_1: 336 case RT5645_TDM_CTRL_1:
336 case RT5645_TDM_CTRL_2: 337 case RT5645_TDM_CTRL_2:
338 case RT5645_TDM_CTRL_3:
337 case RT5645_GLB_CLK: 339 case RT5645_GLB_CLK:
338 case RT5645_PLL_CTRL1: 340 case RT5645_PLL_CTRL1:
339 case RT5645_PLL_CTRL2: 341 case RT5645_PLL_CTRL2:
diff --git a/sound/soc/codecs/rt5670.c b/sound/soc/codecs/rt5670.c
index ba9d9b4d4857..9bd8b4f63303 100644
--- a/sound/soc/codecs/rt5670.c
+++ b/sound/soc/codecs/rt5670.c
@@ -100,18 +100,18 @@ static const struct reg_default rt5670_reg[] = {
100 { 0x4c, 0x5380 }, 100 { 0x4c, 0x5380 },
101 { 0x4f, 0x0073 }, 101 { 0x4f, 0x0073 },
102 { 0x52, 0x00d3 }, 102 { 0x52, 0x00d3 },
103 { 0x53, 0xf0f0 }, 103 { 0x53, 0xf000 },
104 { 0x61, 0x0000 }, 104 { 0x61, 0x0000 },
105 { 0x62, 0x0001 }, 105 { 0x62, 0x0001 },
106 { 0x63, 0x00c3 }, 106 { 0x63, 0x00c3 },
107 { 0x64, 0x0000 }, 107 { 0x64, 0x0000 },
108 { 0x65, 0x0000 }, 108 { 0x65, 0x0001 },
109 { 0x66, 0x0000 }, 109 { 0x66, 0x0000 },
110 { 0x6f, 0x8000 }, 110 { 0x6f, 0x8000 },
111 { 0x70, 0x8000 }, 111 { 0x70, 0x8000 },
112 { 0x71, 0x8000 }, 112 { 0x71, 0x8000 },
113 { 0x72, 0x8000 }, 113 { 0x72, 0x8000 },
114 { 0x73, 0x1110 }, 114 { 0x73, 0x7770 },
115 { 0x74, 0x0e00 }, 115 { 0x74, 0x0e00 },
116 { 0x75, 0x1505 }, 116 { 0x75, 0x1505 },
117 { 0x76, 0x0015 }, 117 { 0x76, 0x0015 },
@@ -125,21 +125,21 @@ static const struct reg_default rt5670_reg[] = {
125 { 0x83, 0x0000 }, 125 { 0x83, 0x0000 },
126 { 0x84, 0x0000 }, 126 { 0x84, 0x0000 },
127 { 0x85, 0x0000 }, 127 { 0x85, 0x0000 },
128 { 0x86, 0x0008 }, 128 { 0x86, 0x0004 },
129 { 0x87, 0x0000 }, 129 { 0x87, 0x0000 },
130 { 0x88, 0x0000 }, 130 { 0x88, 0x0000 },
131 { 0x89, 0x0000 }, 131 { 0x89, 0x0000 },
132 { 0x8a, 0x0000 }, 132 { 0x8a, 0x0000 },
133 { 0x8b, 0x0000 }, 133 { 0x8b, 0x0000 },
134 { 0x8c, 0x0007 }, 134 { 0x8c, 0x0003 },
135 { 0x8d, 0x0000 }, 135 { 0x8d, 0x0000 },
136 { 0x8e, 0x0004 }, 136 { 0x8e, 0x0004 },
137 { 0x8f, 0x1100 }, 137 { 0x8f, 0x1100 },
138 { 0x90, 0x0646 }, 138 { 0x90, 0x0646 },
139 { 0x91, 0x0c06 }, 139 { 0x91, 0x0c06 },
140 { 0x93, 0x0000 }, 140 { 0x93, 0x0000 },
141 { 0x94, 0x0000 }, 141 { 0x94, 0x1270 },
142 { 0x95, 0x0000 }, 142 { 0x95, 0x1000 },
143 { 0x97, 0x0000 }, 143 { 0x97, 0x0000 },
144 { 0x98, 0x0000 }, 144 { 0x98, 0x0000 },
145 { 0x99, 0x0000 }, 145 { 0x99, 0x0000 },
@@ -150,11 +150,11 @@ static const struct reg_default rt5670_reg[] = {
150 { 0x9e, 0x0400 }, 150 { 0x9e, 0x0400 },
151 { 0xae, 0x7000 }, 151 { 0xae, 0x7000 },
152 { 0xaf, 0x0000 }, 152 { 0xaf, 0x0000 },
153 { 0xb0, 0x6000 }, 153 { 0xb0, 0x7000 },
154 { 0xb1, 0x0000 }, 154 { 0xb1, 0x0000 },
155 { 0xb2, 0x0000 }, 155 { 0xb2, 0x0000 },
156 { 0xb3, 0x001f }, 156 { 0xb3, 0x001f },
157 { 0xb4, 0x2206 }, 157 { 0xb4, 0x220c },
158 { 0xb5, 0x1f00 }, 158 { 0xb5, 0x1f00 },
159 { 0xb6, 0x0000 }, 159 { 0xb6, 0x0000 },
160 { 0xb7, 0x0000 }, 160 { 0xb7, 0x0000 },
@@ -171,25 +171,25 @@ static const struct reg_default rt5670_reg[] = {
171 { 0xcf, 0x1813 }, 171 { 0xcf, 0x1813 },
172 { 0xd0, 0x0690 }, 172 { 0xd0, 0x0690 },
173 { 0xd1, 0x1c17 }, 173 { 0xd1, 0x1c17 },
174 { 0xd3, 0xb320 }, 174 { 0xd3, 0xa220 },
175 { 0xd4, 0x0000 }, 175 { 0xd4, 0x0000 },
176 { 0xd6, 0x0400 }, 176 { 0xd6, 0x0400 },
177 { 0xd9, 0x0809 }, 177 { 0xd9, 0x0809 },
178 { 0xda, 0x0000 }, 178 { 0xda, 0x0000 },
179 { 0xdb, 0x0001 }, 179 { 0xdb, 0x0001 },
180 { 0xdc, 0x0049 }, 180 { 0xdc, 0x0049 },
181 { 0xdd, 0x0009 }, 181 { 0xdd, 0x0024 },
182 { 0xe6, 0x8000 }, 182 { 0xe6, 0x8000 },
183 { 0xe7, 0x0000 }, 183 { 0xe7, 0x0000 },
184 { 0xec, 0xb300 }, 184 { 0xec, 0xa200 },
185 { 0xed, 0x0000 }, 185 { 0xed, 0x0000 },
186 { 0xee, 0xb300 }, 186 { 0xee, 0xa200 },
187 { 0xef, 0x0000 }, 187 { 0xef, 0x0000 },
188 { 0xf8, 0x0000 }, 188 { 0xf8, 0x0000 },
189 { 0xf9, 0x0000 }, 189 { 0xf9, 0x0000 },
190 { 0xfa, 0x8010 }, 190 { 0xfa, 0x8010 },
191 { 0xfb, 0x0033 }, 191 { 0xfb, 0x0033 },
192 { 0xfc, 0x0080 }, 192 { 0xfc, 0x0100 },
193}; 193};
194 194
195static bool rt5670_volatile_register(struct device *dev, unsigned int reg) 195static bool rt5670_volatile_register(struct device *dev, unsigned int reg)
@@ -1877,6 +1877,10 @@ static const struct snd_soc_dapm_route rt5670_dapm_routes[] = {
1877 { "DAC1 MIXR", "DAC1 Switch", "DAC1 R Mux" }, 1877 { "DAC1 MIXR", "DAC1 Switch", "DAC1 R Mux" },
1878 { "DAC1 MIXR", NULL, "DAC Stereo1 Filter" }, 1878 { "DAC1 MIXR", NULL, "DAC Stereo1 Filter" },
1879 1879
1880 { "DAC Stereo1 Filter", NULL, "PLL1", is_sys_clk_from_pll },
1881 { "DAC Mono Left Filter", NULL, "PLL1", is_sys_clk_from_pll },
1882 { "DAC Mono Right Filter", NULL, "PLL1", is_sys_clk_from_pll },
1883
1880 { "DAC MIX", NULL, "DAC1 MIXL" }, 1884 { "DAC MIX", NULL, "DAC1 MIXL" },
1881 { "DAC MIX", NULL, "DAC1 MIXR" }, 1885 { "DAC MIX", NULL, "DAC1 MIXR" },
1882 1886
@@ -1926,14 +1930,10 @@ static const struct snd_soc_dapm_route rt5670_dapm_routes[] = {
1926 1930
1927 { "DAC L1", NULL, "DAC L1 Power" }, 1931 { "DAC L1", NULL, "DAC L1 Power" },
1928 { "DAC L1", NULL, "Stereo DAC MIXL" }, 1932 { "DAC L1", NULL, "Stereo DAC MIXL" },
1929 { "DAC L1", NULL, "PLL1", is_sys_clk_from_pll },
1930 { "DAC R1", NULL, "DAC R1 Power" }, 1933 { "DAC R1", NULL, "DAC R1 Power" },
1931 { "DAC R1", NULL, "Stereo DAC MIXR" }, 1934 { "DAC R1", NULL, "Stereo DAC MIXR" },
1932 { "DAC R1", NULL, "PLL1", is_sys_clk_from_pll },
1933 { "DAC L2", NULL, "Mono DAC MIXL" }, 1935 { "DAC L2", NULL, "Mono DAC MIXL" },
1934 { "DAC L2", NULL, "PLL1", is_sys_clk_from_pll },
1935 { "DAC R2", NULL, "Mono DAC MIXR" }, 1936 { "DAC R2", NULL, "Mono DAC MIXR" },
1936 { "DAC R2", NULL, "PLL1", is_sys_clk_from_pll },
1937 1937
1938 { "OUT MIXL", "BST1 Switch", "BST1" }, 1938 { "OUT MIXL", "BST1 Switch", "BST1" },
1939 { "OUT MIXL", "INL Switch", "INL VOL" }, 1939 { "OUT MIXL", "INL Switch", "INL VOL" },
diff --git a/sound/soc/codecs/sgtl5000.c b/sound/soc/codecs/sgtl5000.c
index 6bb77d76561b..dab9b15304af 100644
--- a/sound/soc/codecs/sgtl5000.c
+++ b/sound/soc/codecs/sgtl5000.c
@@ -1299,8 +1299,7 @@ static int sgtl5000_probe(struct snd_soc_codec *codec)
1299 1299
1300 /* enable small pop, introduce 400ms delay in turning off */ 1300 /* enable small pop, introduce 400ms delay in turning off */
1301 snd_soc_update_bits(codec, SGTL5000_CHIP_REF_CTRL, 1301 snd_soc_update_bits(codec, SGTL5000_CHIP_REF_CTRL,
1302 SGTL5000_SMALL_POP, 1302 SGTL5000_SMALL_POP, 1);
1303 SGTL5000_SMALL_POP);
1304 1303
1305 /* disable short cut detector */ 1304 /* disable short cut detector */
1306 snd_soc_write(codec, SGTL5000_CHIP_SHORT_CTRL, 0); 1305 snd_soc_write(codec, SGTL5000_CHIP_SHORT_CTRL, 0);
diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h
index 2f8c88931f69..bd7a344bf8c5 100644
--- a/sound/soc/codecs/sgtl5000.h
+++ b/sound/soc/codecs/sgtl5000.h
@@ -275,7 +275,7 @@
275#define SGTL5000_BIAS_CTRL_MASK 0x000e 275#define SGTL5000_BIAS_CTRL_MASK 0x000e
276#define SGTL5000_BIAS_CTRL_SHIFT 1 276#define SGTL5000_BIAS_CTRL_SHIFT 1
277#define SGTL5000_BIAS_CTRL_WIDTH 3 277#define SGTL5000_BIAS_CTRL_WIDTH 3
278#define SGTL5000_SMALL_POP 0x0001 278#define SGTL5000_SMALL_POP 0
279 279
280/* 280/*
281 * SGTL5000_CHIP_MIC_CTRL 281 * SGTL5000_CHIP_MIC_CTRL
diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c
index f2de7e049bc6..81a38dd9af1f 100644
--- a/sound/soc/codecs/sigmadsp.c
+++ b/sound/soc/codecs/sigmadsp.c
@@ -159,6 +159,13 @@ int _process_sigma_firmware(struct device *dev,
159 goto done; 159 goto done;
160 } 160 }
161 161
162 if (ssfw_head->version != 1) {
163 dev_err(dev,
164 "Failed to load firmware: Invalid version %d. Supported firmware versions: 1\n",
165 ssfw_head->version);
166 goto done;
167 }
168
162 crc = crc32(0, fw->data + sizeof(*ssfw_head), 169 crc = crc32(0, fw->data + sizeof(*ssfw_head),
163 fw->size - sizeof(*ssfw_head)); 170 fw->size - sizeof(*ssfw_head));
164 pr_debug("%s: crc=%x\n", __func__, crc); 171 pr_debug("%s: crc=%x\n", __func__, crc);
diff --git a/sound/soc/codecs/stac9766.c b/sound/soc/codecs/stac9766.c
index 53b810d23fea..f37a79ec45e6 100644
--- a/sound/soc/codecs/stac9766.c
+++ b/sound/soc/codecs/stac9766.c
@@ -139,18 +139,19 @@ static const struct snd_kcontrol_new stac9766_snd_ac97_controls[] = {
139static int stac9766_ac97_write(struct snd_soc_codec *codec, unsigned int reg, 139static int stac9766_ac97_write(struct snd_soc_codec *codec, unsigned int reg,
140 unsigned int val) 140 unsigned int val)
141{ 141{
142 struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
142 u16 *cache = codec->reg_cache; 143 u16 *cache = codec->reg_cache;
143 144
144 if (reg > AC97_STAC_PAGE0) { 145 if (reg > AC97_STAC_PAGE0) {
145 stac9766_ac97_write(codec, AC97_INT_PAGING, 0); 146 stac9766_ac97_write(codec, AC97_INT_PAGING, 0);
146 soc_ac97_ops->write(codec->ac97, reg, val); 147 soc_ac97_ops->write(ac97, reg, val);
147 stac9766_ac97_write(codec, AC97_INT_PAGING, 1); 148 stac9766_ac97_write(codec, AC97_INT_PAGING, 1);
148 return 0; 149 return 0;
149 } 150 }
150 if (reg / 2 >= ARRAY_SIZE(stac9766_reg)) 151 if (reg / 2 >= ARRAY_SIZE(stac9766_reg))
151 return -EIO; 152 return -EIO;
152 153
153 soc_ac97_ops->write(codec->ac97, reg, val); 154 soc_ac97_ops->write(ac97, reg, val);
154 cache[reg / 2] = val; 155 cache[reg / 2] = val;
155 return 0; 156 return 0;
156} 157}
@@ -158,11 +159,12 @@ static int stac9766_ac97_write(struct snd_soc_codec *codec, unsigned int reg,
158static unsigned int stac9766_ac97_read(struct snd_soc_codec *codec, 159static unsigned int stac9766_ac97_read(struct snd_soc_codec *codec,
159 unsigned int reg) 160 unsigned int reg)
160{ 161{
162 struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
161 u16 val = 0, *cache = codec->reg_cache; 163 u16 val = 0, *cache = codec->reg_cache;
162 164
163 if (reg > AC97_STAC_PAGE0) { 165 if (reg > AC97_STAC_PAGE0) {
164 stac9766_ac97_write(codec, AC97_INT_PAGING, 0); 166 stac9766_ac97_write(codec, AC97_INT_PAGING, 0);
165 val = soc_ac97_ops->read(codec->ac97, reg - AC97_STAC_PAGE0); 167 val = soc_ac97_ops->read(ac97, reg - AC97_STAC_PAGE0);
166 stac9766_ac97_write(codec, AC97_INT_PAGING, 1); 168 stac9766_ac97_write(codec, AC97_INT_PAGING, 1);
167 return val; 169 return val;
168 } 170 }
@@ -173,7 +175,7 @@ static unsigned int stac9766_ac97_read(struct snd_soc_codec *codec,
173 reg == AC97_INT_PAGING || reg == AC97_VENDOR_ID1 || 175 reg == AC97_INT_PAGING || reg == AC97_VENDOR_ID1 ||
174 reg == AC97_VENDOR_ID2) { 176 reg == AC97_VENDOR_ID2) {
175 177
176 val = soc_ac97_ops->read(codec->ac97, reg); 178 val = soc_ac97_ops->read(ac97, reg);
177 return val; 179 return val;
178 } 180 }
179 return cache[reg / 2]; 181 return cache[reg / 2];
@@ -240,15 +242,17 @@ static int stac9766_set_bias_level(struct snd_soc_codec *codec,
240 242
241static int stac9766_reset(struct snd_soc_codec *codec, int try_warm) 243static int stac9766_reset(struct snd_soc_codec *codec, int try_warm)
242{ 244{
245 struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
246
243 if (try_warm && soc_ac97_ops->warm_reset) { 247 if (try_warm && soc_ac97_ops->warm_reset) {
244 soc_ac97_ops->warm_reset(codec->ac97); 248 soc_ac97_ops->warm_reset(ac97);
245 if (stac9766_ac97_read(codec, 0) == stac9766_reg[0]) 249 if (stac9766_ac97_read(codec, 0) == stac9766_reg[0])
246 return 1; 250 return 1;
247 } 251 }
248 252
249 soc_ac97_ops->reset(codec->ac97); 253 soc_ac97_ops->reset(ac97);
250 if (soc_ac97_ops->warm_reset) 254 if (soc_ac97_ops->warm_reset)
251 soc_ac97_ops->warm_reset(codec->ac97); 255 soc_ac97_ops->warm_reset(ac97);
252 if (stac9766_ac97_read(codec, 0) != stac9766_reg[0]) 256 if (stac9766_ac97_read(codec, 0) != stac9766_reg[0])
253 return -EIO; 257 return -EIO;
254 return 0; 258 return 0;
@@ -262,6 +266,7 @@ static int stac9766_codec_suspend(struct snd_soc_codec *codec)
262 266
263static int stac9766_codec_resume(struct snd_soc_codec *codec) 267static int stac9766_codec_resume(struct snd_soc_codec *codec)
264{ 268{
269 struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
265 u16 id, reset; 270 u16 id, reset;
266 271
267 reset = 0; 272 reset = 0;
@@ -271,8 +276,8 @@ reset:
271 printk(KERN_ERR "stac9766 failed to resume"); 276 printk(KERN_ERR "stac9766 failed to resume");
272 return -EIO; 277 return -EIO;
273 } 278 }
274 codec->ac97->bus->ops->warm_reset(codec->ac97); 279 ac97->bus->ops->warm_reset(ac97);
275 id = soc_ac97_ops->read(codec->ac97, AC97_VENDOR_ID2); 280 id = soc_ac97_ops->read(ac97, AC97_VENDOR_ID2);
276 if (id != 0x4c13) { 281 if (id != 0x4c13) {
277 stac9766_reset(codec, 0); 282 stac9766_reset(codec, 0);
278 reset++; 283 reset++;
@@ -294,7 +299,6 @@ static const struct snd_soc_dai_ops stac9766_dai_ops_digital = {
294static struct snd_soc_dai_driver stac9766_dai[] = { 299static struct snd_soc_dai_driver stac9766_dai[] = {
295{ 300{
296 .name = "stac9766-hifi-analog", 301 .name = "stac9766-hifi-analog",
297 .ac97_control = 1,
298 302
299 /* stream cababilities */ 303 /* stream cababilities */
300 .playback = { 304 .playback = {
@@ -316,7 +320,6 @@ static struct snd_soc_dai_driver stac9766_dai[] = {
316}, 320},
317{ 321{
318 .name = "stac9766-hifi-IEC958", 322 .name = "stac9766-hifi-IEC958",
319 .ac97_control = 1,
320 323
321 /* stream cababilities */ 324 /* stream cababilities */
322 .playback = { 325 .playback = {
@@ -334,11 +337,14 @@ static struct snd_soc_dai_driver stac9766_dai[] = {
334 337
335static int stac9766_codec_probe(struct snd_soc_codec *codec) 338static int stac9766_codec_probe(struct snd_soc_codec *codec)
336{ 339{
340 struct snd_ac97 *ac97;
337 int ret = 0; 341 int ret = 0;
338 342
339 ret = snd_soc_new_ac97_codec(codec, soc_ac97_ops, 0); 343 ac97 = snd_soc_new_ac97_codec(codec);
340 if (ret < 0) 344 if (IS_ERR(ac97))
341 goto codec_err; 345 return PTR_ERR(ac97);
346
347 snd_soc_codec_set_drvdata(codec, ac97);
342 348
343 /* do a cold reset for the controller and then try 349 /* do a cold reset for the controller and then try
344 * a warm reset followed by an optional cold reset for codec */ 350 * a warm reset followed by an optional cold reset for codec */
@@ -357,13 +363,15 @@ static int stac9766_codec_probe(struct snd_soc_codec *codec)
357 return 0; 363 return 0;
358 364
359codec_err: 365codec_err:
360 snd_soc_free_ac97_codec(codec); 366 snd_soc_free_ac97_codec(ac97);
361 return ret; 367 return ret;
362} 368}
363 369
364static int stac9766_codec_remove(struct snd_soc_codec *codec) 370static int stac9766_codec_remove(struct snd_soc_codec *codec)
365{ 371{
366 snd_soc_free_ac97_codec(codec); 372 struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
373
374 snd_soc_free_ac97_codec(ac97);
367 return 0; 375 return 0;
368} 376}
369 377
diff --git a/sound/soc/codecs/tlv320aic31xx.c b/sound/soc/codecs/tlv320aic31xx.c
index 145fe5b253d4..93de5dd0a7b9 100644
--- a/sound/soc/codecs/tlv320aic31xx.c
+++ b/sound/soc/codecs/tlv320aic31xx.c
@@ -911,12 +911,13 @@ static int aic31xx_set_dai_sysclk(struct snd_soc_dai *codec_dai,
911 } 911 }
912 aic31xx->p_div = i; 912 aic31xx->p_div = i;
913 913
914 for (i = 0; aic31xx_divs[i].mclk_p != freq/aic31xx->p_div; i++) { 914 for (i = 0; i < ARRAY_SIZE(aic31xx_divs) &&
915 if (i == ARRAY_SIZE(aic31xx_divs)) { 915 aic31xx_divs[i].mclk_p != freq/aic31xx->p_div; i++)
916 dev_err(aic31xx->dev, "%s: Unsupported frequency %d\n", 916 ;
917 __func__, freq); 917 if (i == ARRAY_SIZE(aic31xx_divs)) {
918 return -EINVAL; 918 dev_err(aic31xx->dev, "%s: Unsupported frequency %d\n",
919 } 919 __func__, freq);
920 return -EINVAL;
920 } 921 }
921 922
922 /* set clock on MCLK, BCLK, or GPIO1 as PLL input */ 923 /* set clock on MCLK, BCLK, or GPIO1 as PLL input */
diff --git a/sound/soc/codecs/wm5102.c b/sound/soc/codecs/wm5102.c
index f60234962527..d78fb8dffc8c 100644
--- a/sound/soc/codecs/wm5102.c
+++ b/sound/soc/codecs/wm5102.c
@@ -619,10 +619,10 @@ static int wm5102_out_comp_coeff_get(struct snd_kcontrol *kcontrol,
619 struct arizona *arizona = dev_get_drvdata(codec->dev->parent); 619 struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
620 uint16_t data; 620 uint16_t data;
621 621
622 mutex_lock(&codec->mutex); 622 mutex_lock(&arizona->dac_comp_lock);
623 data = cpu_to_be16(arizona->dac_comp_coeff); 623 data = cpu_to_be16(arizona->dac_comp_coeff);
624 memcpy(ucontrol->value.bytes.data, &data, sizeof(data)); 624 memcpy(ucontrol->value.bytes.data, &data, sizeof(data));
625 mutex_unlock(&codec->mutex); 625 mutex_unlock(&arizona->dac_comp_lock);
626 626
627 return 0; 627 return 0;
628} 628}
@@ -633,11 +633,11 @@ static int wm5102_out_comp_coeff_put(struct snd_kcontrol *kcontrol,
633 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 633 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
634 struct arizona *arizona = dev_get_drvdata(codec->dev->parent); 634 struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
635 635
636 mutex_lock(&codec->mutex); 636 mutex_lock(&arizona->dac_comp_lock);
637 memcpy(&arizona->dac_comp_coeff, ucontrol->value.bytes.data, 637 memcpy(&arizona->dac_comp_coeff, ucontrol->value.bytes.data,
638 sizeof(arizona->dac_comp_coeff)); 638 sizeof(arizona->dac_comp_coeff));
639 arizona->dac_comp_coeff = be16_to_cpu(arizona->dac_comp_coeff); 639 arizona->dac_comp_coeff = be16_to_cpu(arizona->dac_comp_coeff);
640 mutex_unlock(&codec->mutex); 640 mutex_unlock(&arizona->dac_comp_lock);
641 641
642 return 0; 642 return 0;
643} 643}
@@ -648,9 +648,9 @@ static int wm5102_out_comp_switch_get(struct snd_kcontrol *kcontrol,
648 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 648 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
649 struct arizona *arizona = dev_get_drvdata(codec->dev->parent); 649 struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
650 650
651 mutex_lock(&codec->mutex); 651 mutex_lock(&arizona->dac_comp_lock);
652 ucontrol->value.integer.value[0] = arizona->dac_comp_enabled; 652 ucontrol->value.integer.value[0] = arizona->dac_comp_enabled;
653 mutex_unlock(&codec->mutex); 653 mutex_unlock(&arizona->dac_comp_lock);
654 654
655 return 0; 655 return 0;
656} 656}
@@ -661,9 +661,9 @@ static int wm5102_out_comp_switch_put(struct snd_kcontrol *kcontrol,
661 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol); 661 struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
662 struct arizona *arizona = dev_get_drvdata(codec->dev->parent); 662 struct arizona *arizona = dev_get_drvdata(codec->dev->parent);
663 663
664 mutex_lock(&codec->mutex); 664 mutex_lock(&arizona->dac_comp_lock);
665 arizona->dac_comp_enabled = ucontrol->value.integer.value[0]; 665 arizona->dac_comp_enabled = ucontrol->value.integer.value[0];
666 mutex_unlock(&codec->mutex); 666 mutex_unlock(&arizona->dac_comp_lock);
667 667
668 return 0; 668 return 0;
669} 669}
@@ -1900,6 +1900,8 @@ static int wm5102_probe(struct platform_device *pdev)
1900 return -ENOMEM; 1900 return -ENOMEM;
1901 platform_set_drvdata(pdev, wm5102); 1901 platform_set_drvdata(pdev, wm5102);
1902 1902
1903 mutex_init(&arizona->dac_comp_lock);
1904
1903 wm5102->core.arizona = arizona; 1905 wm5102->core.arizona = arizona;
1904 wm5102->core.num_inputs = 6; 1906 wm5102->core.num_inputs = 6;
1905 1907
diff --git a/sound/soc/codecs/wm8731.c b/sound/soc/codecs/wm8731.c
index eebb3280bfad..5dae9a6f8076 100644
--- a/sound/soc/codecs/wm8731.c
+++ b/sound/soc/codecs/wm8731.c
@@ -24,6 +24,7 @@
24#include <linux/regulator/consumer.h> 24#include <linux/regulator/consumer.h>
25#include <linux/spi/spi.h> 25#include <linux/spi/spi.h>
26#include <linux/of_device.h> 26#include <linux/of_device.h>
27#include <linux/mutex.h>
27#include <sound/core.h> 28#include <sound/core.h>
28#include <sound/pcm.h> 29#include <sound/pcm.h>
29#include <sound/pcm_params.h> 30#include <sound/pcm_params.h>
@@ -50,6 +51,8 @@ struct wm8731_priv {
50 int sysclk_type; 51 int sysclk_type;
51 int playback_fs; 52 int playback_fs;
52 bool deemph; 53 bool deemph;
54
55 struct mutex lock;
53}; 56};
54 57
55 58
@@ -138,7 +141,7 @@ static int wm8731_put_deemph(struct snd_kcontrol *kcontrol,
138 if (deemph > 1) 141 if (deemph > 1)
139 return -EINVAL; 142 return -EINVAL;
140 143
141 mutex_lock(&codec->mutex); 144 mutex_lock(&wm8731->lock);
142 if (wm8731->deemph != deemph) { 145 if (wm8731->deemph != deemph) {
143 wm8731->deemph = deemph; 146 wm8731->deemph = deemph;
144 147
@@ -146,7 +149,7 @@ static int wm8731_put_deemph(struct snd_kcontrol *kcontrol,
146 149
147 ret = 1; 150 ret = 1;
148 } 151 }
149 mutex_unlock(&codec->mutex); 152 mutex_unlock(&wm8731->lock);
150 153
151 return ret; 154 return ret;
152} 155}
@@ -685,6 +688,8 @@ static int wm8731_spi_probe(struct spi_device *spi)
685 if (wm8731 == NULL) 688 if (wm8731 == NULL)
686 return -ENOMEM; 689 return -ENOMEM;
687 690
691 mutex_init(&wm8731->lock);
692
688 wm8731->regmap = devm_regmap_init_spi(spi, &wm8731_regmap); 693 wm8731->regmap = devm_regmap_init_spi(spi, &wm8731_regmap);
689 if (IS_ERR(wm8731->regmap)) { 694 if (IS_ERR(wm8731->regmap)) {
690 ret = PTR_ERR(wm8731->regmap); 695 ret = PTR_ERR(wm8731->regmap);
diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c
index c038b3e04398..ffbe6df3453a 100644
--- a/sound/soc/codecs/wm8903.c
+++ b/sound/soc/codecs/wm8903.c
@@ -26,6 +26,7 @@
26#include <linux/regmap.h> 26#include <linux/regmap.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/irq.h> 28#include <linux/irq.h>
29#include <linux/mutex.h>
29#include <sound/core.h> 30#include <sound/core.h>
30#include <sound/jack.h> 31#include <sound/jack.h>
31#include <sound/pcm.h> 32#include <sound/pcm.h>
@@ -123,6 +124,7 @@ struct wm8903_priv {
123 int sysclk; 124 int sysclk;
124 int irq; 125 int irq;
125 126
127 struct mutex lock;
126 int fs; 128 int fs;
127 int deemph; 129 int deemph;
128 130
@@ -457,7 +459,7 @@ static int wm8903_put_deemph(struct snd_kcontrol *kcontrol,
457 if (deemph > 1) 459 if (deemph > 1)
458 return -EINVAL; 460 return -EINVAL;
459 461
460 mutex_lock(&codec->mutex); 462 mutex_lock(&wm8903->lock);
461 if (wm8903->deemph != deemph) { 463 if (wm8903->deemph != deemph) {
462 wm8903->deemph = deemph; 464 wm8903->deemph = deemph;
463 465
@@ -465,7 +467,7 @@ static int wm8903_put_deemph(struct snd_kcontrol *kcontrol,
465 467
466 ret = 1; 468 ret = 1;
467 } 469 }
468 mutex_unlock(&codec->mutex); 470 mutex_unlock(&wm8903->lock);
469 471
470 return ret; 472 return ret;
471} 473}
@@ -2023,6 +2025,8 @@ static int wm8903_i2c_probe(struct i2c_client *i2c,
2023 GFP_KERNEL); 2025 GFP_KERNEL);
2024 if (wm8903 == NULL) 2026 if (wm8903 == NULL)
2025 return -ENOMEM; 2027 return -ENOMEM;
2028
2029 mutex_init(&wm8903->lock);
2026 wm8903->dev = &i2c->dev; 2030 wm8903->dev = &i2c->dev;
2027 2031
2028 wm8903->regmap = devm_regmap_init_i2c(i2c, &wm8903_regmap); 2032 wm8903->regmap = devm_regmap_init_i2c(i2c, &wm8903_regmap);
diff --git a/sound/soc/codecs/wm8958-dsp2.c b/sound/soc/codecs/wm8958-dsp2.c
index 0dada7f0105e..3cbc82b33292 100644
--- a/sound/soc/codecs/wm8958-dsp2.c
+++ b/sound/soc/codecs/wm8958-dsp2.c
@@ -867,9 +867,9 @@ static void wm8958_enh_eq_loaded(const struct firmware *fw, void *context)
867 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); 867 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
868 868
869 if (fw && (wm8958_dsp2_fw(codec, "ENH_EQ", fw, true) == 0)) { 869 if (fw && (wm8958_dsp2_fw(codec, "ENH_EQ", fw, true) == 0)) {
870 mutex_lock(&codec->mutex); 870 mutex_lock(&wm8994->fw_lock);
871 wm8994->enh_eq = fw; 871 wm8994->enh_eq = fw;
872 mutex_unlock(&codec->mutex); 872 mutex_unlock(&wm8994->fw_lock);
873 } 873 }
874} 874}
875 875
@@ -879,9 +879,9 @@ static void wm8958_mbc_vss_loaded(const struct firmware *fw, void *context)
879 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); 879 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
880 880
881 if (fw && (wm8958_dsp2_fw(codec, "MBC+VSS", fw, true) == 0)) { 881 if (fw && (wm8958_dsp2_fw(codec, "MBC+VSS", fw, true) == 0)) {
882 mutex_lock(&codec->mutex); 882 mutex_lock(&wm8994->fw_lock);
883 wm8994->mbc_vss = fw; 883 wm8994->mbc_vss = fw;
884 mutex_unlock(&codec->mutex); 884 mutex_unlock(&wm8994->fw_lock);
885 } 885 }
886} 886}
887 887
@@ -891,9 +891,9 @@ static void wm8958_mbc_loaded(const struct firmware *fw, void *context)
891 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec); 891 struct wm8994_priv *wm8994 = snd_soc_codec_get_drvdata(codec);
892 892
893 if (fw && (wm8958_dsp2_fw(codec, "MBC", fw, true) == 0)) { 893 if (fw && (wm8958_dsp2_fw(codec, "MBC", fw, true) == 0)) {
894 mutex_lock(&codec->mutex); 894 mutex_lock(&wm8994->fw_lock);
895 wm8994->mbc = fw; 895 wm8994->mbc = fw;
896 mutex_unlock(&codec->mutex); 896 mutex_unlock(&wm8994->fw_lock);
897 } 897 }
898} 898}
899 899
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index 9077411e62ce..61ca4a7cb6ea 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -26,6 +26,7 @@
26#include <linux/regulator/consumer.h> 26#include <linux/regulator/consumer.h>
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/workqueue.h> 28#include <linux/workqueue.h>
29#include <linux/mutex.h>
29#include <sound/core.h> 30#include <sound/core.h>
30#include <sound/jack.h> 31#include <sound/jack.h>
31#include <sound/pcm.h> 32#include <sound/pcm.h>
@@ -67,6 +68,7 @@ struct wm8962_priv {
67 int fll_fref; 68 int fll_fref;
68 int fll_fout; 69 int fll_fout;
69 70
71 struct mutex dsp2_ena_lock;
70 u16 dsp2_ena; 72 u16 dsp2_ena;
71 73
72 struct delayed_work mic_work; 74 struct delayed_work mic_work;
@@ -1570,7 +1572,7 @@ static int wm8962_dsp2_ena_put(struct snd_kcontrol *kcontrol,
1570 int dsp2_running = snd_soc_read(codec, WM8962_DSP2_POWER_MANAGEMENT) & 1572 int dsp2_running = snd_soc_read(codec, WM8962_DSP2_POWER_MANAGEMENT) &
1571 WM8962_DSP2_ENA; 1573 WM8962_DSP2_ENA;
1572 1574
1573 mutex_lock(&codec->mutex); 1575 mutex_lock(&wm8962->dsp2_ena_lock);
1574 1576
1575 if (ucontrol->value.integer.value[0]) 1577 if (ucontrol->value.integer.value[0])
1576 wm8962->dsp2_ena |= 1 << shift; 1578 wm8962->dsp2_ena |= 1 << shift;
@@ -1590,7 +1592,7 @@ static int wm8962_dsp2_ena_put(struct snd_kcontrol *kcontrol,
1590 } 1592 }
1591 1593
1592out: 1594out:
1593 mutex_unlock(&codec->mutex); 1595 mutex_unlock(&wm8962->dsp2_ena_lock);
1594 1596
1595 return ret; 1597 return ret;
1596} 1598}
@@ -3557,6 +3559,8 @@ static int wm8962_i2c_probe(struct i2c_client *i2c,
3557 if (wm8962 == NULL) 3559 if (wm8962 == NULL)
3558 return -ENOMEM; 3560 return -ENOMEM;
3559 3561
3562 mutex_init(&wm8962->dsp2_ena_lock);
3563
3560 i2c_set_clientdata(i2c, wm8962); 3564 i2c_set_clientdata(i2c, wm8962);
3561 3565
3562 INIT_DELAYED_WORK(&wm8962->mic_work, wm8962_mic_work); 3566 INIT_DELAYED_WORK(&wm8962->mic_work, wm8962_mic_work);
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 1fcb9f3f3097..dbca6e0cc93a 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -4457,6 +4457,8 @@ static int wm8994_probe(struct platform_device *pdev)
4457 return -ENOMEM; 4457 return -ENOMEM;
4458 platform_set_drvdata(pdev, wm8994); 4458 platform_set_drvdata(pdev, wm8994);
4459 4459
4460 mutex_init(&wm8994->fw_lock);
4461
4460 wm8994->wm8994 = dev_get_drvdata(pdev->dev.parent); 4462 wm8994->wm8994 = dev_get_drvdata(pdev->dev.parent);
4461 4463
4462 pm_runtime_enable(&pdev->dev); 4464 pm_runtime_enable(&pdev->dev);
diff --git a/sound/soc/codecs/wm8994.h b/sound/soc/codecs/wm8994.h
index 6536f8d45ac6..dd73387b1cc4 100644
--- a/sound/soc/codecs/wm8994.h
+++ b/sound/soc/codecs/wm8994.h
@@ -13,6 +13,7 @@
13#include <linux/firmware.h> 13#include <linux/firmware.h>
14#include <linux/completion.h> 14#include <linux/completion.h>
15#include <linux/workqueue.h> 15#include <linux/workqueue.h>
16#include <linux/mutex.h>
16 17
17#include "wm_hubs.h" 18#include "wm_hubs.h"
18 19
@@ -156,6 +157,7 @@ struct wm8994_priv {
156 unsigned int aif1clk_disable:1; 157 unsigned int aif1clk_disable:1;
157 unsigned int aif2clk_disable:1; 158 unsigned int aif2clk_disable:1;
158 159
160 struct mutex fw_lock;
159 int dsp_active; 161 int dsp_active;
160 const struct firmware *cur_fw; 162 const struct firmware *cur_fw;
161 const struct firmware *mbc; 163 const struct firmware *mbc;
diff --git a/sound/soc/codecs/wm9705.c b/sound/soc/codecs/wm9705.c
index c0b7f45dfa37..d3a800fa6f06 100644
--- a/sound/soc/codecs/wm9705.c
+++ b/sound/soc/codecs/wm9705.c
@@ -203,13 +203,14 @@ static const struct snd_soc_dapm_route wm9705_audio_map[] = {
203/* We use a register cache to enhance read performance. */ 203/* We use a register cache to enhance read performance. */
204static unsigned int ac97_read(struct snd_soc_codec *codec, unsigned int reg) 204static unsigned int ac97_read(struct snd_soc_codec *codec, unsigned int reg)
205{ 205{
206 struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
206 u16 *cache = codec->reg_cache; 207 u16 *cache = codec->reg_cache;
207 208
208 switch (reg) { 209 switch (reg) {
209 case AC97_RESET: 210 case AC97_RESET:
210 case AC97_VENDOR_ID1: 211 case AC97_VENDOR_ID1:
211 case AC97_VENDOR_ID2: 212 case AC97_VENDOR_ID2:
212 return soc_ac97_ops->read(codec->ac97, reg); 213 return soc_ac97_ops->read(ac97, reg);
213 default: 214 default:
214 reg = reg >> 1; 215 reg = reg >> 1;
215 216
@@ -223,9 +224,10 @@ static unsigned int ac97_read(struct snd_soc_codec *codec, unsigned int reg)
223static int ac97_write(struct snd_soc_codec *codec, unsigned int reg, 224static int ac97_write(struct snd_soc_codec *codec, unsigned int reg,
224 unsigned int val) 225 unsigned int val)
225{ 226{
227 struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
226 u16 *cache = codec->reg_cache; 228 u16 *cache = codec->reg_cache;
227 229
228 soc_ac97_ops->write(codec->ac97, reg, val); 230 soc_ac97_ops->write(ac97, reg, val);
229 reg = reg >> 1; 231 reg = reg >> 1;
230 if (reg < (ARRAY_SIZE(wm9705_reg))) 232 if (reg < (ARRAY_SIZE(wm9705_reg)))
231 cache[reg] = val; 233 cache[reg] = val;
@@ -263,7 +265,6 @@ static const struct snd_soc_dai_ops wm9705_dai_ops = {
263static struct snd_soc_dai_driver wm9705_dai[] = { 265static struct snd_soc_dai_driver wm9705_dai[] = {
264 { 266 {
265 .name = "wm9705-hifi", 267 .name = "wm9705-hifi",
266 .ac97_control = 1,
267 .playback = { 268 .playback = {
268 .stream_name = "HiFi Playback", 269 .stream_name = "HiFi Playback",
269 .channels_min = 1, 270 .channels_min = 1,
@@ -294,36 +295,41 @@ static struct snd_soc_dai_driver wm9705_dai[] = {
294 295
295static int wm9705_reset(struct snd_soc_codec *codec) 296static int wm9705_reset(struct snd_soc_codec *codec)
296{ 297{
298 struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
299
297 if (soc_ac97_ops->reset) { 300 if (soc_ac97_ops->reset) {
298 soc_ac97_ops->reset(codec->ac97); 301 soc_ac97_ops->reset(ac97);
299 if (ac97_read(codec, 0) == wm9705_reg[0]) 302 if (ac97_read(codec, 0) == wm9705_reg[0])
300 return 0; /* Success */ 303 return 0; /* Success */
301 } 304 }
302 305
306 dev_err(codec->dev, "Failed to reset: AC97 link error\n");
307
303 return -EIO; 308 return -EIO;
304} 309}
305 310
306#ifdef CONFIG_PM 311#ifdef CONFIG_PM
307static int wm9705_soc_suspend(struct snd_soc_codec *codec) 312static int wm9705_soc_suspend(struct snd_soc_codec *codec)
308{ 313{
309 soc_ac97_ops->write(codec->ac97, AC97_POWERDOWN, 0xffff); 314 struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
315
316 soc_ac97_ops->write(ac97, AC97_POWERDOWN, 0xffff);
310 317
311 return 0; 318 return 0;
312} 319}
313 320
314static int wm9705_soc_resume(struct snd_soc_codec *codec) 321static int wm9705_soc_resume(struct snd_soc_codec *codec)
315{ 322{
323 struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
316 int i, ret; 324 int i, ret;
317 u16 *cache = codec->reg_cache; 325 u16 *cache = codec->reg_cache;
318 326
319 ret = wm9705_reset(codec); 327 ret = wm9705_reset(codec);
320 if (ret < 0) { 328 if (ret < 0)
321 printk(KERN_ERR "could not reset AC97 codec\n");
322 return ret; 329 return ret;
323 }
324 330
325 for (i = 2; i < ARRAY_SIZE(wm9705_reg) << 1; i += 2) { 331 for (i = 2; i < ARRAY_SIZE(wm9705_reg) << 1; i += 2) {
326 soc_ac97_ops->write(codec->ac97, i, cache[i>>1]); 332 soc_ac97_ops->write(ac97, i, cache[i>>1]);
327 } 333 }
328 334
329 return 0; 335 return 0;
@@ -335,31 +341,34 @@ static int wm9705_soc_resume(struct snd_soc_codec *codec)
335 341
336static int wm9705_soc_probe(struct snd_soc_codec *codec) 342static int wm9705_soc_probe(struct snd_soc_codec *codec)
337{ 343{
344 struct snd_ac97 *ac97;
338 int ret = 0; 345 int ret = 0;
339 346
340 ret = snd_soc_new_ac97_codec(codec, soc_ac97_ops, 0); 347 ac97 = snd_soc_new_ac97_codec(codec);
341 if (ret < 0) { 348 if (IS_ERR(ac97)) {
342 printk(KERN_ERR "wm9705: failed to register AC97 codec\n"); 349 ret = PTR_ERR(ac97);
350 dev_err(codec->dev, "Failed to register AC97 codec\n");
343 return ret; 351 return ret;
344 } 352 }
345 353
354 snd_soc_codec_set_drvdata(codec, ac97);
355
346 ret = wm9705_reset(codec); 356 ret = wm9705_reset(codec);
347 if (ret) 357 if (ret)
348 goto reset_err; 358 goto reset_err;
349 359
350 snd_soc_add_codec_controls(codec, wm9705_snd_ac97_controls,
351 ARRAY_SIZE(wm9705_snd_ac97_controls));
352
353 return 0; 360 return 0;
354 361
355reset_err: 362reset_err:
356 snd_soc_free_ac97_codec(codec); 363 snd_soc_free_ac97_codec(ac97);
357 return ret; 364 return ret;
358} 365}
359 366
360static int wm9705_soc_remove(struct snd_soc_codec *codec) 367static int wm9705_soc_remove(struct snd_soc_codec *codec)
361{ 368{
362 snd_soc_free_ac97_codec(codec); 369 struct snd_ac97 *ac97 = snd_soc_codec_get_drvdata(codec);
370
371 snd_soc_free_ac97_codec(ac97);
363 return 0; 372 return 0;
364} 373}
365 374
@@ -374,6 +383,9 @@ static struct snd_soc_codec_driver soc_codec_dev_wm9705 = {
374 .reg_word_size = sizeof(u16), 383 .reg_word_size = sizeof(u16),
375 .reg_cache_step = 2, 384 .reg_cache_step = 2,
376 .reg_cache_default = wm9705_reg, 385 .reg_cache_default = wm9705_reg,
386
387 .controls = wm9705_snd_ac97_controls,
388 .num_controls = ARRAY_SIZE(wm9705_snd_ac97_controls),
377 .dapm_widgets = wm9705_dapm_widgets, 389 .dapm_widgets = wm9705_dapm_widgets,
378 .num_dapm_widgets = ARRAY_SIZE(wm9705_dapm_widgets), 390 .num_dapm_widgets = ARRAY_SIZE(wm9705_dapm_widgets),
379 .dapm_routes = wm9705_audio_map, 391 .dapm_routes = wm9705_audio_map,
diff --git a/sound/soc/codecs/wm9712.c b/sound/soc/codecs/wm9712.c
index c5eb746087b4..52a211be5b47 100644
--- a/sound/soc/codecs/wm9712.c
+++ b/sound/soc/codecs/wm9712.c
@@ -23,6 +23,12 @@
23#include <sound/tlv.h> 23#include <sound/tlv.h>
24#include "wm9712.h" 24#include "wm9712.h"
25 25
26struct wm9712_priv {
27 struct snd_ac97 *ac97;
28 unsigned int hp_mixer[2];
29 struct mutex lock;
30};
31
26static unsigned int ac97_read(struct snd_soc_codec *codec, 32static unsigned int ac97_read(struct snd_soc_codec *codec,
27 unsigned int reg); 33 unsigned int reg);
28static int ac97_write(struct snd_soc_codec *codec, 34static int ac97_write(struct snd_soc_codec *codec,
@@ -48,12 +54,10 @@ static const u16 wm9712_reg[] = {
48 0x0000, 0x0000, 0x0000, 0x0000, /* 6e */ 54 0x0000, 0x0000, 0x0000, 0x0000, /* 6e */
49 0x0000, 0x0000, 0x0000, 0x0006, /* 76 */ 55 0x0000, 0x0000, 0x0000, 0x0006, /* 76 */
50 0x0001, 0x0000, 0x574d, 0x4c12, /* 7e */ 56 0x0001, 0x0000, 0x574d, 0x4c12, /* 7e */
51 0x0000, 0x0000 /* virtual hp mixers */
52}; 57};
53 58
54/* virtual HP mixers regs */ 59#define HPL_MIXER 0x0
55#define HPL_MIXER 0x80 60#define HPR_MIXER 0x1
56#define HPR_MIXER 0x82
57 61
58static const char *wm9712_alc_select[] = {"None", "Left", "Right", "Stereo"}; 62static const char *wm9712_alc_select[] = {"None", "Left", "Right", "Stereo"};
59static const char *wm9712_alc_mux[] = {"Stereo", "Left", "Right", "None"}; 63static const char *wm9712_alc_mux[] = {"Stereo", "Left", "Right", "None"};
@@ -157,75 +161,108 @@ SOC_SINGLE_TLV("Mic 2 Volume", AC97_MIC, 0, 31, 1, main_tlv),
157SOC_SINGLE_TLV("Mic Boost Volume", AC97_MIC, 7, 1, 0, boost_tlv), 161SOC_SINGLE_TLV("Mic Boost Volume", AC97_MIC, 7, 1, 0, boost_tlv),
158}; 162};
159 163
164static const unsigned int wm9712_mixer_mute_regs[] = {
165 AC97_VIDEO,
166 AC97_PCM,
167 AC97_LINE,
168 AC97_PHONE,
169 AC97_CD,
170 AC97_PC_BEEP,
171};
172
160/* We have to create a fake left and right HP mixers because 173/* We have to create a fake left and right HP mixers because
161 * the codec only has a single control that is shared by both channels. 174 * the codec only has a single control that is shared by both channels.
162 * This makes it impossible to determine the audio path. 175 * This makes it impossible to determine the audio path.
163 */ 176 */
164static int mixer_event(struct snd_soc_dapm_widget *w, 177static int wm9712_hp_mixer_put(struct snd_kcontrol *kcontrol,
165 struct snd_kcontrol *k, int event) 178 struct snd_ctl_elem_value *ucontrol)
166{ 179{
167 u16 l, r, beep, line, phone, mic, pcm, aux; 180 struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
168 181 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
169 l = ac97_read(w->codec, HPL_MIXER); 182 struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
170 r = ac97_read(w->codec, HPR_MIXER); 183 unsigned int val = ucontrol->value.enumerated.item[0];
171 beep = ac97_read(w->codec, AC97_PC_BEEP); 184 struct soc_mixer_control *mc =
172 mic = ac97_read(w->codec, AC97_VIDEO); 185 (struct soc_mixer_control *)kcontrol->private_value;
173 phone = ac97_read(w->codec, AC97_PHONE); 186 unsigned int mixer, mask, shift, old;
174 line = ac97_read(w->codec, AC97_LINE); 187 struct snd_soc_dapm_update update;
175 pcm = ac97_read(w->codec, AC97_PCM); 188 bool change;
176 aux = ac97_read(w->codec, AC97_CD); 189
177 190 mixer = mc->shift >> 8;
178 if (l & 0x1 || r & 0x1) 191 shift = mc->shift & 0xff;
179 ac97_write(w->codec, AC97_VIDEO, mic & 0x7fff); 192 mask = 1 << shift;
193
194 mutex_lock(&wm9712->lock);
195 old = wm9712->hp_mixer[mixer];
196 if (ucontrol->value.enumerated.item[0])
197 wm9712->hp_mixer[mixer] |= mask;
180 else 198 else
181 ac97_write(w->codec, AC97_VIDEO, mic | 0x8000); 199 wm9712->hp_mixer[mixer] &= ~mask;
200
201 change = old != wm9712->hp_mixer[mixer];
202 if (change) {
203 update.kcontrol = kcontrol;
204 update.reg = wm9712_mixer_mute_regs[shift];
205 update.mask = 0x8000;
206 if ((wm9712->hp_mixer[0] & mask) ||
207 (wm9712->hp_mixer[1] & mask))
208 update.val = 0x0;
209 else
210 update.val = 0x8000;
211
212 snd_soc_dapm_mixer_update_power(dapm, kcontrol, val,
213 &update);
214 }
182 215
183 if (l & 0x2 || r & 0x2) 216 mutex_unlock(&wm9712->lock);
184 ac97_write(w->codec, AC97_PCM, pcm & 0x7fff);
185 else
186 ac97_write(w->codec, AC97_PCM, pcm | 0x8000);
187 217
188 if (l & 0x4 || r & 0x4) 218 return change;
189 ac97_write(w->codec, AC97_LINE, line & 0x7fff); 219}
190 else
191 ac97_write(w->codec, AC97_LINE, line | 0x8000);
192 220
193 if (l & 0x8 || r & 0x8) 221static int wm9712_hp_mixer_get(struct snd_kcontrol *kcontrol,
194 ac97_write(w->codec, AC97_PHONE, phone & 0x7fff); 222 struct snd_ctl_elem_value *ucontrol)
195 else 223{
196 ac97_write(w->codec, AC97_PHONE, phone | 0x8000); 224 struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
225 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
226 struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
227 struct soc_mixer_control *mc =
228 (struct soc_mixer_control *)kcontrol->private_value;
229 unsigned int shift, mixer;
197 230
198 if (l & 0x10 || r & 0x10) 231 mixer = mc->shift >> 8;
199 ac97_write(w->codec, AC97_CD, aux & 0x7fff); 232 shift = mc->shift & 0xff;
200 else
201 ac97_write(w->codec, AC97_CD, aux | 0x8000);
202 233
203 if (l & 0x20 || r & 0x20) 234 ucontrol->value.enumerated.item[0] =
204 ac97_write(w->codec, AC97_PC_BEEP, beep & 0x7fff); 235 (wm9712->hp_mixer[mixer] >> shift) & 1;
205 else
206 ac97_write(w->codec, AC97_PC_BEEP, beep | 0x8000);
207 236
208 return 0; 237 return 0;
209} 238}
210 239
240#define WM9712_HP_MIXER_CTRL(xname, xmixer, xshift) { \
241 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
242 .info = snd_soc_info_volsw, \
243 .get = wm9712_hp_mixer_get, .put = wm9712_hp_mixer_put, \
244 .private_value = SOC_SINGLE_VALUE(SND_SOC_NOPM, \
245 (xmixer << 8) | xshift, 1, 0, 0) \
246}
247
211/* Left Headphone Mixers */ 248/* Left Headphone Mixers */
212static const struct snd_kcontrol_new wm9712_hpl_mixer_controls[] = { 249static const struct snd_kcontrol_new wm9712_hpl_mixer_controls[] = {
213 SOC_DAPM_SINGLE("PCBeep Bypass Switch", HPL_MIXER, 5, 1, 0), 250 WM9712_HP_MIXER_CTRL("PCBeep Bypass Switch", HPL_MIXER, 5),
214 SOC_DAPM_SINGLE("Aux Playback Switch", HPL_MIXER, 4, 1, 0), 251 WM9712_HP_MIXER_CTRL("Aux Playback Switch", HPL_MIXER, 4),
215 SOC_DAPM_SINGLE("Phone Bypass Switch", HPL_MIXER, 3, 1, 0), 252 WM9712_HP_MIXER_CTRL("Phone Bypass Switch", HPL_MIXER, 3),
216 SOC_DAPM_SINGLE("Line Bypass Switch", HPL_MIXER, 2, 1, 0), 253 WM9712_HP_MIXER_CTRL("Line Bypass Switch", HPL_MIXER, 2),
217 SOC_DAPM_SINGLE("PCM Playback Switch", HPL_MIXER, 1, 1, 0), 254 WM9712_HP_MIXER_CTRL("PCM Playback Switch", HPL_MIXER, 1),
218 SOC_DAPM_SINGLE("Mic Sidetone Switch", HPL_MIXER, 0, 1, 0), 255 WM9712_HP_MIXER_CTRL("Mic Sidetone Switch", HPL_MIXER, 0),
219}; 256};
220 257
221/* Right Headphone Mixers */ 258/* Right Headphone Mixers */
222static const struct snd_kcontrol_new wm9712_hpr_mixer_controls[] = { 259static const struct snd_kcontrol_new wm9712_hpr_mixer_controls[] = {
223 SOC_DAPM_SINGLE("PCBeep Bypass Switch", HPR_MIXER, 5, 1, 0), 260 WM9712_HP_MIXER_CTRL("PCBeep Bypass Switch", HPR_MIXER, 5),
224 SOC_DAPM_SINGLE("Aux Playback Switch", HPR_MIXER, 4, 1, 0), 261 WM9712_HP_MIXER_CTRL("Aux Playback Switch", HPR_MIXER, 4),
225 SOC_DAPM_SINGLE("Phone Bypass Switch", HPR_MIXER, 3, 1, 0), 262 WM9712_HP_MIXER_CTRL("Phone Bypass Switch", HPR_MIXER, 3),
226 SOC_DAPM_SINGLE("Line Bypass Switch", HPR_MIXER, 2, 1, 0), 263 WM9712_HP_MIXER_CTRL("Line Bypass Switch", HPR_MIXER, 2),
227 SOC_DAPM_SINGLE("PCM Playback Switch", HPR_MIXER, 1, 1, 0), 264 WM9712_HP_MIXER_CTRL("PCM Playback Switch", HPR_MIXER, 1),
228 SOC_DAPM_SINGLE("Mic Sidetone Switch", HPR_MIXER, 0, 1, 0), 265 WM9712_HP_MIXER_CTRL("Mic Sidetone Switch", HPR_MIXER, 0),
229}; 266};
230 267
231/* Speaker Mixer */ 268/* Speaker Mixer */
@@ -299,12 +336,10 @@ SND_SOC_DAPM_MUX("Right Mic Select Source", SND_SOC_NOPM, 0, 0,
299SND_SOC_DAPM_MUX("Differential Source", SND_SOC_NOPM, 0, 0, 336SND_SOC_DAPM_MUX("Differential Source", SND_SOC_NOPM, 0, 0,
300 &wm9712_diff_sel_controls), 337 &wm9712_diff_sel_controls),
301SND_SOC_DAPM_MIXER("AC97 Mixer", SND_SOC_NOPM, 0, 0, NULL, 0), 338SND_SOC_DAPM_MIXER("AC97 Mixer", SND_SOC_NOPM, 0, 0, NULL, 0),
302SND_SOC_DAPM_MIXER_E("Left HP Mixer", AC97_INT_PAGING, 9, 1, 339SND_SOC_DAPM_MIXER("Left HP Mixer", AC97_INT_PAGING, 9, 1,
303 &wm9712_hpl_mixer_controls[0], ARRAY_SIZE(wm9712_hpl_mixer_controls), 340 &wm9712_hpl_mixer_controls[0], ARRAY_SIZE(wm9712_hpl_mixer_controls)),
304 mixer_event, SND_SOC_DAPM_POST_REG), 341SND_SOC_DAPM_MIXER("Right HP Mixer", AC97_INT_PAGING, 8, 1,
305SND_SOC_DAPM_MIXER_E("Right HP Mixer", AC97_INT_PAGING, 8, 1, 342 &wm9712_hpr_mixer_controls[0], ARRAY_SIZE(wm9712_hpr_mixer_controls)),
306 &wm9712_hpr_mixer_controls[0], ARRAY_SIZE(wm9712_hpr_mixer_controls),
307 mixer_event, SND_SOC_DAPM_POST_REG),
308SND_SOC_DAPM_MIXER("Phone Mixer", AC97_INT_PAGING, 6, 1, 343SND_SOC_DAPM_MIXER("Phone Mixer", AC97_INT_PAGING, 6, 1,
309 &wm9712_phone_mixer_controls[0], ARRAY_SIZE(wm9712_phone_mixer_controls)), 344 &wm9712_phone_mixer_controls[0], ARRAY_SIZE(wm9712_phone_mixer_controls)),
310SND_SOC_DAPM_MIXER("Speaker Mixer", AC97_INT_PAGING, 7, 1, 345SND_SOC_DAPM_MIXER("Speaker Mixer", AC97_INT_PAGING, 7, 1,
@@ -450,12 +485,13 @@ static const struct snd_soc_dapm_route wm9712_audio_map[] = {
450static unsigned int ac97_read(struct snd_soc_codec *codec, 485static unsigned int ac97_read(struct snd_soc_codec *codec,
451 unsigned int reg) 486 unsigned int reg)
452{ 487{
488 struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
453 u16 *cache = codec->reg_cache; 489 u16 *cache = codec->reg_cache;
454 490
455 if (reg == AC97_RESET || reg == AC97_GPIO_STATUS || 491 if (reg == AC97_RESET || reg == AC97_GPIO_STATUS ||
456 reg == AC97_VENDOR_ID1 || reg == AC97_VENDOR_ID2 || 492 reg == AC97_VENDOR_ID1 || reg == AC97_VENDOR_ID2 ||
457 reg == AC97_REC_GAIN) 493 reg == AC97_REC_GAIN)
458 return soc_ac97_ops->read(codec->ac97, reg); 494 return soc_ac97_ops->read(wm9712->ac97, reg);
459 else { 495 else {
460 reg = reg >> 1; 496 reg = reg >> 1;
461 497
@@ -469,10 +505,10 @@ static unsigned int ac97_read(struct snd_soc_codec *codec,
469static int ac97_write(struct snd_soc_codec *codec, unsigned int reg, 505static int ac97_write(struct snd_soc_codec *codec, unsigned int reg,
470 unsigned int val) 506 unsigned int val)
471{ 507{
508 struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
472 u16 *cache = codec->reg_cache; 509 u16 *cache = codec->reg_cache;
473 510
474 if (reg < 0x7c) 511 soc_ac97_ops->write(wm9712->ac97, reg, val);
475 soc_ac97_ops->write(codec->ac97, reg, val);
476 reg = reg >> 1; 512 reg = reg >> 1;
477 if (reg < (ARRAY_SIZE(wm9712_reg))) 513 if (reg < (ARRAY_SIZE(wm9712_reg)))
478 cache[reg] = val; 514 cache[reg] = val;
@@ -532,7 +568,6 @@ static const struct snd_soc_dai_ops wm9712_dai_ops_aux = {
532static struct snd_soc_dai_driver wm9712_dai[] = { 568static struct snd_soc_dai_driver wm9712_dai[] = {
533{ 569{
534 .name = "wm9712-hifi", 570 .name = "wm9712-hifi",
535 .ac97_control = 1,
536 .playback = { 571 .playback = {
537 .stream_name = "HiFi Playback", 572 .stream_name = "HiFi Playback",
538 .channels_min = 1, 573 .channels_min = 1,
@@ -581,21 +616,23 @@ static int wm9712_set_bias_level(struct snd_soc_codec *codec,
581 616
582static int wm9712_reset(struct snd_soc_codec *codec, int try_warm) 617static int wm9712_reset(struct snd_soc_codec *codec, int try_warm)
583{ 618{
619 struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
620
584 if (try_warm && soc_ac97_ops->warm_reset) { 621 if (try_warm && soc_ac97_ops->warm_reset) {
585 soc_ac97_ops->warm_reset(codec->ac97); 622 soc_ac97_ops->warm_reset(wm9712->ac97);
586 if (ac97_read(codec, 0) == wm9712_reg[0]) 623 if (ac97_read(codec, 0) == wm9712_reg[0])
587 return 1; 624 return 1;
588 } 625 }
589 626
590 soc_ac97_ops->reset(codec->ac97); 627 soc_ac97_ops->reset(wm9712->ac97);
591 if (soc_ac97_ops->warm_reset) 628 if (soc_ac97_ops->warm_reset)
592 soc_ac97_ops->warm_reset(codec->ac97); 629 soc_ac97_ops->warm_reset(wm9712->ac97);
593 if (ac97_read(codec, 0) != wm9712_reg[0]) 630 if (ac97_read(codec, 0) != wm9712_reg[0])
594 goto err; 631 goto err;
595 return 0; 632 return 0;
596 633
597err: 634err:
598 printk(KERN_ERR "WM9712 AC97 reset failed\n"); 635 dev_err(codec->dev, "Failed to reset: AC97 link error\n");
599 return -EIO; 636 return -EIO;
600} 637}
601 638
@@ -607,14 +644,13 @@ static int wm9712_soc_suspend(struct snd_soc_codec *codec)
607 644
608static int wm9712_soc_resume(struct snd_soc_codec *codec) 645static int wm9712_soc_resume(struct snd_soc_codec *codec)
609{ 646{
647 struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
610 int i, ret; 648 int i, ret;
611 u16 *cache = codec->reg_cache; 649 u16 *cache = codec->reg_cache;
612 650
613 ret = wm9712_reset(codec, 1); 651 ret = wm9712_reset(codec, 1);
614 if (ret < 0) { 652 if (ret < 0)
615 printk(KERN_ERR "could not reset AC97 codec\n");
616 return ret; 653 return ret;
617 }
618 654
619 wm9712_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 655 wm9712_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
620 656
@@ -624,7 +660,7 @@ static int wm9712_soc_resume(struct snd_soc_codec *codec)
624 if (i == AC97_INT_PAGING || i == AC97_POWERDOWN || 660 if (i == AC97_INT_PAGING || i == AC97_POWERDOWN ||
625 (i > 0x58 && i != 0x5c)) 661 (i > 0x58 && i != 0x5c))
626 continue; 662 continue;
627 soc_ac97_ops->write(codec->ac97, i, cache[i>>1]); 663 soc_ac97_ops->write(wm9712->ac97, i, cache[i>>1]);
628 } 664 }
629 } 665 }
630 666
@@ -633,37 +669,37 @@ static int wm9712_soc_resume(struct snd_soc_codec *codec)
633 669
634static int wm9712_soc_probe(struct snd_soc_codec *codec) 670static int wm9712_soc_probe(struct snd_soc_codec *codec)
635{ 671{
672 struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
636 int ret = 0; 673 int ret = 0;
637 674
638 ret = snd_soc_new_ac97_codec(codec, soc_ac97_ops, 0); 675 wm9712->ac97 = snd_soc_new_ac97_codec(codec);
639 if (ret < 0) { 676 if (IS_ERR(wm9712->ac97)) {
640 printk(KERN_ERR "wm9712: failed to register AC97 codec\n"); 677 ret = PTR_ERR(wm9712->ac97);
678 dev_err(codec->dev, "Failed to register AC97 codec: %d\n", ret);
641 return ret; 679 return ret;
642 } 680 }
643 681
644 ret = wm9712_reset(codec, 0); 682 ret = wm9712_reset(codec, 0);
645 if (ret < 0) { 683 if (ret < 0)
646 printk(KERN_ERR "Failed to reset WM9712: AC97 link error\n");
647 goto reset_err; 684 goto reset_err;
648 }
649 685
650 /* set alc mux to none */ 686 /* set alc mux to none */
651 ac97_write(codec, AC97_VIDEO, ac97_read(codec, AC97_VIDEO) | 0x3000); 687 ac97_write(codec, AC97_VIDEO, ac97_read(codec, AC97_VIDEO) | 0x3000);
652 688
653 wm9712_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 689 wm9712_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
654 snd_soc_add_codec_controls(codec, wm9712_snd_ac97_controls,
655 ARRAY_SIZE(wm9712_snd_ac97_controls));
656 690
657 return 0; 691 return 0;
658 692
659reset_err: 693reset_err:
660 snd_soc_free_ac97_codec(codec); 694 snd_soc_free_ac97_codec(wm9712->ac97);
661 return ret; 695 return ret;
662} 696}
663 697
664static int wm9712_soc_remove(struct snd_soc_codec *codec) 698static int wm9712_soc_remove(struct snd_soc_codec *codec)
665{ 699{
666 snd_soc_free_ac97_codec(codec); 700 struct wm9712_priv *wm9712 = snd_soc_codec_get_drvdata(codec);
701
702 snd_soc_free_ac97_codec(wm9712->ac97);
667 return 0; 703 return 0;
668} 704}
669 705
@@ -679,6 +715,9 @@ static struct snd_soc_codec_driver soc_codec_dev_wm9712 = {
679 .reg_word_size = sizeof(u16), 715 .reg_word_size = sizeof(u16),
680 .reg_cache_step = 2, 716 .reg_cache_step = 2,
681 .reg_cache_default = wm9712_reg, 717 .reg_cache_default = wm9712_reg,
718
719 .controls = wm9712_snd_ac97_controls,
720 .num_controls = ARRAY_SIZE(wm9712_snd_ac97_controls),
682 .dapm_widgets = wm9712_dapm_widgets, 721 .dapm_widgets = wm9712_dapm_widgets,
683 .num_dapm_widgets = ARRAY_SIZE(wm9712_dapm_widgets), 722 .num_dapm_widgets = ARRAY_SIZE(wm9712_dapm_widgets),
684 .dapm_routes = wm9712_audio_map, 723 .dapm_routes = wm9712_audio_map,
@@ -687,6 +726,16 @@ static struct snd_soc_codec_driver soc_codec_dev_wm9712 = {
687 726
688static int wm9712_probe(struct platform_device *pdev) 727static int wm9712_probe(struct platform_device *pdev)
689{ 728{
729 struct wm9712_priv *wm9712;
730
731 wm9712 = devm_kzalloc(&pdev->dev, sizeof(*wm9712), GFP_KERNEL);
732 if (wm9712 == NULL)
733 return -ENOMEM;
734
735 mutex_init(&wm9712->lock);
736
737 platform_set_drvdata(pdev, wm9712);
738
690 return snd_soc_register_codec(&pdev->dev, 739 return snd_soc_register_codec(&pdev->dev,
691 &soc_codec_dev_wm9712, wm9712_dai, ARRAY_SIZE(wm9712_dai)); 740 &soc_codec_dev_wm9712, wm9712_dai, ARRAY_SIZE(wm9712_dai));
692} 741}
diff --git a/sound/soc/codecs/wm9713.c b/sound/soc/codecs/wm9713.c
index bddee30a4bc7..6c95d98b0eb1 100644
--- a/sound/soc/codecs/wm9713.c
+++ b/sound/soc/codecs/wm9713.c
@@ -30,7 +30,10 @@
30#include "wm9713.h" 30#include "wm9713.h"
31 31
32struct wm9713_priv { 32struct wm9713_priv {
33 struct snd_ac97 *ac97;
33 u32 pll_in; /* PLL input frequency */ 34 u32 pll_in; /* PLL input frequency */
35 unsigned int hp_mixer[2];
36 struct mutex lock;
34}; 37};
35 38
36static unsigned int ac97_read(struct snd_soc_codec *codec, 39static unsigned int ac97_read(struct snd_soc_codec *codec,
@@ -59,13 +62,10 @@ static const u16 wm9713_reg[] = {
59 0x0000, 0x0000, 0x0000, 0x0000, 62 0x0000, 0x0000, 0x0000, 0x0000,
60 0x0000, 0x0000, 0x0000, 0x0006, 63 0x0000, 0x0000, 0x0000, 0x0006,
61 0x0001, 0x0000, 0x574d, 0x4c13, 64 0x0001, 0x0000, 0x574d, 0x4c13,
62 0x0000, 0x0000, 0x0000
63}; 65};
64 66
65/* virtual HP mixers regs */ 67#define HPL_MIXER 0
66#define HPL_MIXER 0x80 68#define HPR_MIXER 1
67#define HPR_MIXER 0x82
68#define MICB_MUX 0x82
69 69
70static const char *wm9713_mic_mixer[] = {"Stereo", "Mic 1", "Mic 2", "Mute"}; 70static const char *wm9713_mic_mixer[] = {"Stereo", "Mic 1", "Mic 2", "Mute"};
71static const char *wm9713_rec_mux[] = {"Stereo", "Left", "Right", "Mute"}; 71static const char *wm9713_rec_mux[] = {"Stereo", "Left", "Right", "Mute"};
@@ -110,7 +110,7 @@ SOC_ENUM_SINGLE(AC97_REC_GAIN_MIC, 10, 8, wm9713_dac_inv), /* dac invert 2 15 */
110SOC_ENUM_SINGLE(AC97_GENERAL_PURPOSE, 15, 2, wm9713_bass), /* bass control 16 */ 110SOC_ENUM_SINGLE(AC97_GENERAL_PURPOSE, 15, 2, wm9713_bass), /* bass control 16 */
111SOC_ENUM_SINGLE(AC97_PCI_SVID, 5, 2, wm9713_ng_type), /* noise gate type 17 */ 111SOC_ENUM_SINGLE(AC97_PCI_SVID, 5, 2, wm9713_ng_type), /* noise gate type 17 */
112SOC_ENUM_SINGLE(AC97_3D_CONTROL, 12, 3, wm9713_mic_select), /* mic selection 18 */ 112SOC_ENUM_SINGLE(AC97_3D_CONTROL, 12, 3, wm9713_mic_select), /* mic selection 18 */
113SOC_ENUM_SINGLE(MICB_MUX, 0, 2, wm9713_micb_select), /* mic selection 19 */ 113SOC_ENUM_SINGLE_VIRT(2, wm9713_micb_select), /* mic selection 19 */
114}; 114};
115 115
116static const DECLARE_TLV_DB_SCALE(out_tlv, -4650, 150, 0); 116static const DECLARE_TLV_DB_SCALE(out_tlv, -4650, 150, 0);
@@ -234,6 +234,14 @@ static int wm9713_voice_shutdown(struct snd_soc_dapm_widget *w,
234 return 0; 234 return 0;
235} 235}
236 236
237static const unsigned int wm9713_mixer_mute_regs[] = {
238 AC97_PC_BEEP,
239 AC97_MASTER_TONE,
240 AC97_PHONE,
241 AC97_REC_SEL,
242 AC97_PCM,
243 AC97_AUX,
244};
237 245
238/* We have to create a fake left and right HP mixers because 246/* We have to create a fake left and right HP mixers because
239 * the codec only has a single control that is shared by both channels. 247 * the codec only has a single control that is shared by both channels.
@@ -241,73 +249,95 @@ static int wm9713_voice_shutdown(struct snd_soc_dapm_widget *w,
241 * register map, thus we add a new (virtual) register to help determine the 249 * register map, thus we add a new (virtual) register to help determine the
242 * audio route within the device. 250 * audio route within the device.
243 */ 251 */
244static int mixer_event(struct snd_soc_dapm_widget *w, 252static int wm9713_hp_mixer_put(struct snd_kcontrol *kcontrol,
245 struct snd_kcontrol *kcontrol, int event) 253 struct snd_ctl_elem_value *ucontrol)
246{ 254{
247 u16 l, r, beep, tone, phone, rec, pcm, aux; 255 struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
248 256 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
249 l = ac97_read(w->codec, HPL_MIXER); 257 struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
250 r = ac97_read(w->codec, HPR_MIXER); 258 unsigned int val = ucontrol->value.enumerated.item[0];
251 beep = ac97_read(w->codec, AC97_PC_BEEP); 259 struct soc_mixer_control *mc =
252 tone = ac97_read(w->codec, AC97_MASTER_TONE); 260 (struct soc_mixer_control *)kcontrol->private_value;
253 phone = ac97_read(w->codec, AC97_PHONE); 261 unsigned int mixer, mask, shift, old;
254 rec = ac97_read(w->codec, AC97_REC_SEL); 262 struct snd_soc_dapm_update update;
255 pcm = ac97_read(w->codec, AC97_PCM); 263 bool change;
256 aux = ac97_read(w->codec, AC97_AUX); 264
257 265 mixer = mc->shift >> 8;
258 if (event & SND_SOC_DAPM_PRE_REG) 266 shift = mc->shift & 0xff;
259 return 0; 267 mask = (1 << shift);
260 if ((l & 0x1) || (r & 0x1)) 268
261 ac97_write(w->codec, AC97_PC_BEEP, beep & 0x7fff); 269 mutex_lock(&wm9713->lock);
270 old = wm9713->hp_mixer[mixer];
271 if (ucontrol->value.enumerated.item[0])
272 wm9713->hp_mixer[mixer] |= mask;
262 else 273 else
263 ac97_write(w->codec, AC97_PC_BEEP, beep | 0x8000); 274 wm9713->hp_mixer[mixer] &= ~mask;
275
276 change = old != wm9713->hp_mixer[mixer];
277 if (change) {
278 update.kcontrol = kcontrol;
279 update.reg = wm9713_mixer_mute_regs[shift];
280 update.mask = 0x8000;
281 if ((wm9713->hp_mixer[0] & mask) ||
282 (wm9713->hp_mixer[1] & mask))
283 update.val = 0x0;
284 else
285 update.val = 0x8000;
286
287 snd_soc_dapm_mixer_update_power(dapm, kcontrol, val,
288 &update);
289 }
264 290
265 if ((l & 0x2) || (r & 0x2)) 291 mutex_unlock(&wm9713->lock);
266 ac97_write(w->codec, AC97_MASTER_TONE, tone & 0x7fff);
267 else
268 ac97_write(w->codec, AC97_MASTER_TONE, tone | 0x8000);
269 292
270 if ((l & 0x4) || (r & 0x4)) 293 return change;
271 ac97_write(w->codec, AC97_PHONE, phone & 0x7fff); 294}
272 else
273 ac97_write(w->codec, AC97_PHONE, phone | 0x8000);
274 295
275 if ((l & 0x8) || (r & 0x8)) 296static int wm9713_hp_mixer_get(struct snd_kcontrol *kcontrol,
276 ac97_write(w->codec, AC97_REC_SEL, rec & 0x7fff); 297 struct snd_ctl_elem_value *ucontrol)
277 else 298{
278 ac97_write(w->codec, AC97_REC_SEL, rec | 0x8000); 299 struct snd_soc_dapm_context *dapm = snd_soc_dapm_kcontrol_dapm(kcontrol);
300 struct snd_soc_codec *codec = snd_soc_dapm_to_codec(dapm);
301 struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
302 struct soc_mixer_control *mc =
303 (struct soc_mixer_control *)kcontrol->private_value;
304 unsigned int mixer, shift;
279 305
280 if ((l & 0x10) || (r & 0x10)) 306 mixer = mc->shift >> 8;
281 ac97_write(w->codec, AC97_PCM, pcm & 0x7fff); 307 shift = mc->shift & 0xff;
282 else
283 ac97_write(w->codec, AC97_PCM, pcm | 0x8000);
284 308
285 if ((l & 0x20) || (r & 0x20)) 309 ucontrol->value.enumerated.item[0] =
286 ac97_write(w->codec, AC97_AUX, aux & 0x7fff); 310 (wm9713->hp_mixer[mixer] >> shift) & 1;
287 else
288 ac97_write(w->codec, AC97_AUX, aux | 0x8000);
289 311
290 return 0; 312 return 0;
291} 313}
292 314
315#define WM9713_HP_MIXER_CTRL(xname, xmixer, xshift) { \
316 .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \
317 .info = snd_soc_info_volsw, \
318 .get = wm9713_hp_mixer_get, .put = wm9713_hp_mixer_put, \
319 .private_value = SOC_DOUBLE_VALUE(SND_SOC_NOPM, \
320 xshift, xmixer, 1, 0, 0) \
321}
322
293/* Left Headphone Mixers */ 323/* Left Headphone Mixers */
294static const struct snd_kcontrol_new wm9713_hpl_mixer_controls[] = { 324static const struct snd_kcontrol_new wm9713_hpl_mixer_controls[] = {
295SOC_DAPM_SINGLE("Beep Playback Switch", HPL_MIXER, 5, 1, 0), 325WM9713_HP_MIXER_CTRL("Beep Playback Switch", HPL_MIXER, 5),
296SOC_DAPM_SINGLE("Voice Playback Switch", HPL_MIXER, 4, 1, 0), 326WM9713_HP_MIXER_CTRL("Voice Playback Switch", HPL_MIXER, 4),
297SOC_DAPM_SINGLE("Aux Playback Switch", HPL_MIXER, 3, 1, 0), 327WM9713_HP_MIXER_CTRL("Aux Playback Switch", HPL_MIXER, 3),
298SOC_DAPM_SINGLE("PCM Playback Switch", HPL_MIXER, 2, 1, 0), 328WM9713_HP_MIXER_CTRL("PCM Playback Switch", HPL_MIXER, 2),
299SOC_DAPM_SINGLE("MonoIn Playback Switch", HPL_MIXER, 1, 1, 0), 329WM9713_HP_MIXER_CTRL("MonoIn Playback Switch", HPL_MIXER, 1),
300SOC_DAPM_SINGLE("Bypass Playback Switch", HPL_MIXER, 0, 1, 0), 330WM9713_HP_MIXER_CTRL("Bypass Playback Switch", HPL_MIXER, 0),
301}; 331};
302 332
303/* Right Headphone Mixers */ 333/* Right Headphone Mixers */
304static const struct snd_kcontrol_new wm9713_hpr_mixer_controls[] = { 334static const struct snd_kcontrol_new wm9713_hpr_mixer_controls[] = {
305SOC_DAPM_SINGLE("Beep Playback Switch", HPR_MIXER, 5, 1, 0), 335WM9713_HP_MIXER_CTRL("Beep Playback Switch", HPR_MIXER, 5),
306SOC_DAPM_SINGLE("Voice Playback Switch", HPR_MIXER, 4, 1, 0), 336WM9713_HP_MIXER_CTRL("Voice Playback Switch", HPR_MIXER, 4),
307SOC_DAPM_SINGLE("Aux Playback Switch", HPR_MIXER, 3, 1, 0), 337WM9713_HP_MIXER_CTRL("Aux Playback Switch", HPR_MIXER, 3),
308SOC_DAPM_SINGLE("PCM Playback Switch", HPR_MIXER, 2, 1, 0), 338WM9713_HP_MIXER_CTRL("PCM Playback Switch", HPR_MIXER, 2),
309SOC_DAPM_SINGLE("MonoIn Playback Switch", HPR_MIXER, 1, 1, 0), 339WM9713_HP_MIXER_CTRL("MonoIn Playback Switch", HPR_MIXER, 1),
310SOC_DAPM_SINGLE("Bypass Playback Switch", HPR_MIXER, 0, 1, 0), 340WM9713_HP_MIXER_CTRL("Bypass Playback Switch", HPR_MIXER, 0),
311}; 341};
312 342
313/* headphone capture mux */ 343/* headphone capture mux */
@@ -429,12 +459,10 @@ SND_SOC_DAPM_MUX("Mic A Source", SND_SOC_NOPM, 0, 0,
429 &wm9713_mic_sel_mux_controls), 459 &wm9713_mic_sel_mux_controls),
430SND_SOC_DAPM_MUX("Mic B Source", SND_SOC_NOPM, 0, 0, 460SND_SOC_DAPM_MUX("Mic B Source", SND_SOC_NOPM, 0, 0,
431 &wm9713_micb_sel_mux_controls), 461 &wm9713_micb_sel_mux_controls),
432SND_SOC_DAPM_MIXER_E("Left HP Mixer", AC97_EXTENDED_MID, 3, 1, 462SND_SOC_DAPM_MIXER("Left HP Mixer", AC97_EXTENDED_MID, 3, 1,
433 &wm9713_hpl_mixer_controls[0], ARRAY_SIZE(wm9713_hpl_mixer_controls), 463 &wm9713_hpl_mixer_controls[0], ARRAY_SIZE(wm9713_hpl_mixer_controls)),
434 mixer_event, SND_SOC_DAPM_POST_REG), 464SND_SOC_DAPM_MIXER("Right HP Mixer", AC97_EXTENDED_MID, 2, 1,
435SND_SOC_DAPM_MIXER_E("Right HP Mixer", AC97_EXTENDED_MID, 2, 1, 465 &wm9713_hpr_mixer_controls[0], ARRAY_SIZE(wm9713_hpr_mixer_controls)),
436 &wm9713_hpr_mixer_controls[0], ARRAY_SIZE(wm9713_hpr_mixer_controls),
437 mixer_event, SND_SOC_DAPM_POST_REG),
438SND_SOC_DAPM_MIXER("Mono Mixer", AC97_EXTENDED_MID, 0, 1, 466SND_SOC_DAPM_MIXER("Mono Mixer", AC97_EXTENDED_MID, 0, 1,
439 &wm9713_mono_mixer_controls[0], ARRAY_SIZE(wm9713_mono_mixer_controls)), 467 &wm9713_mono_mixer_controls[0], ARRAY_SIZE(wm9713_mono_mixer_controls)),
440SND_SOC_DAPM_MIXER("Speaker Mixer", AC97_EXTENDED_MID, 1, 1, 468SND_SOC_DAPM_MIXER("Speaker Mixer", AC97_EXTENDED_MID, 1, 1,
@@ -647,12 +675,13 @@ static const struct snd_soc_dapm_route wm9713_audio_map[] = {
647static unsigned int ac97_read(struct snd_soc_codec *codec, 675static unsigned int ac97_read(struct snd_soc_codec *codec,
648 unsigned int reg) 676 unsigned int reg)
649{ 677{
678 struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
650 u16 *cache = codec->reg_cache; 679 u16 *cache = codec->reg_cache;
651 680
652 if (reg == AC97_RESET || reg == AC97_GPIO_STATUS || 681 if (reg == AC97_RESET || reg == AC97_GPIO_STATUS ||
653 reg == AC97_VENDOR_ID1 || reg == AC97_VENDOR_ID2 || 682 reg == AC97_VENDOR_ID1 || reg == AC97_VENDOR_ID2 ||
654 reg == AC97_CD) 683 reg == AC97_CD)
655 return soc_ac97_ops->read(codec->ac97, reg); 684 return soc_ac97_ops->read(wm9713->ac97, reg);
656 else { 685 else {
657 reg = reg >> 1; 686 reg = reg >> 1;
658 687
@@ -666,9 +695,10 @@ static unsigned int ac97_read(struct snd_soc_codec *codec,
666static int ac97_write(struct snd_soc_codec *codec, unsigned int reg, 695static int ac97_write(struct snd_soc_codec *codec, unsigned int reg,
667 unsigned int val) 696 unsigned int val)
668{ 697{
698 struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
699
669 u16 *cache = codec->reg_cache; 700 u16 *cache = codec->reg_cache;
670 if (reg < 0x7c) 701 soc_ac97_ops->write(wm9713->ac97, reg, val);
671 soc_ac97_ops->write(codec->ac97, reg, val);
672 reg = reg >> 1; 702 reg = reg >> 1;
673 if (reg < (ARRAY_SIZE(wm9713_reg))) 703 if (reg < (ARRAY_SIZE(wm9713_reg)))
674 cache[reg] = val; 704 cache[reg] = val;
@@ -689,7 +719,8 @@ struct _pll_div {
689 * to allow rounding later */ 719 * to allow rounding later */
690#define FIXED_PLL_SIZE ((1 << 22) * 10) 720#define FIXED_PLL_SIZE ((1 << 22) * 10)
691 721
692static void pll_factors(struct _pll_div *pll_div, unsigned int source) 722static void pll_factors(struct snd_soc_codec *codec,
723 struct _pll_div *pll_div, unsigned int source)
693{ 724{
694 u64 Kpart; 725 u64 Kpart;
695 unsigned int K, Ndiv, Nmod, target; 726 unsigned int K, Ndiv, Nmod, target;
@@ -724,7 +755,7 @@ static void pll_factors(struct _pll_div *pll_div, unsigned int source)
724 755
725 Ndiv = target / source; 756 Ndiv = target / source;
726 if ((Ndiv < 5) || (Ndiv > 12)) 757 if ((Ndiv < 5) || (Ndiv > 12))
727 printk(KERN_WARNING 758 dev_warn(codec->dev,
728 "WM9713 PLL N value %u out of recommended range!\n", 759 "WM9713 PLL N value %u out of recommended range!\n",
729 Ndiv); 760 Ndiv);
730 761
@@ -768,7 +799,7 @@ static int wm9713_set_pll(struct snd_soc_codec *codec,
768 return 0; 799 return 0;
769 } 800 }
770 801
771 pll_factors(&pll_div, freq_in); 802 pll_factors(codec, &pll_div, freq_in);
772 803
773 if (pll_div.k == 0) { 804 if (pll_div.k == 0) {
774 reg = (pll_div.n << 12) | (pll_div.lf << 11) | 805 reg = (pll_div.n << 12) | (pll_div.lf << 11) |
@@ -1049,7 +1080,6 @@ static const struct snd_soc_dai_ops wm9713_dai_ops_voice = {
1049static struct snd_soc_dai_driver wm9713_dai[] = { 1080static struct snd_soc_dai_driver wm9713_dai[] = {
1050{ 1081{
1051 .name = "wm9713-hifi", 1082 .name = "wm9713-hifi",
1052 .ac97_control = 1,
1053 .playback = { 1083 .playback = {
1054 .stream_name = "HiFi Playback", 1084 .stream_name = "HiFi Playback",
1055 .channels_min = 1, 1085 .channels_min = 1,
@@ -1095,17 +1125,22 @@ static struct snd_soc_dai_driver wm9713_dai[] = {
1095 1125
1096int wm9713_reset(struct snd_soc_codec *codec, int try_warm) 1126int wm9713_reset(struct snd_soc_codec *codec, int try_warm)
1097{ 1127{
1128 struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
1129
1098 if (try_warm && soc_ac97_ops->warm_reset) { 1130 if (try_warm && soc_ac97_ops->warm_reset) {
1099 soc_ac97_ops->warm_reset(codec->ac97); 1131 soc_ac97_ops->warm_reset(wm9713->ac97);
1100 if (ac97_read(codec, 0) == wm9713_reg[0]) 1132 if (ac97_read(codec, 0) == wm9713_reg[0])
1101 return 1; 1133 return 1;
1102 } 1134 }
1103 1135
1104 soc_ac97_ops->reset(codec->ac97); 1136 soc_ac97_ops->reset(wm9713->ac97);
1105 if (soc_ac97_ops->warm_reset) 1137 if (soc_ac97_ops->warm_reset)
1106 soc_ac97_ops->warm_reset(codec->ac97); 1138 soc_ac97_ops->warm_reset(wm9713->ac97);
1107 if (ac97_read(codec, 0) != wm9713_reg[0]) 1139 if (ac97_read(codec, 0) != wm9713_reg[0]) {
1140 dev_err(codec->dev, "Failed to reset: AC97 link error\n");
1108 return -EIO; 1141 return -EIO;
1142 }
1143
1109 return 0; 1144 return 0;
1110} 1145}
1111EXPORT_SYMBOL_GPL(wm9713_reset); 1146EXPORT_SYMBOL_GPL(wm9713_reset);
@@ -1163,10 +1198,8 @@ static int wm9713_soc_resume(struct snd_soc_codec *codec)
1163 u16 *cache = codec->reg_cache; 1198 u16 *cache = codec->reg_cache;
1164 1199
1165 ret = wm9713_reset(codec, 1); 1200 ret = wm9713_reset(codec, 1);
1166 if (ret < 0) { 1201 if (ret < 0)
1167 printk(KERN_ERR "could not reset AC97 codec\n");
1168 return ret; 1202 return ret;
1169 }
1170 1203
1171 wm9713_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 1204 wm9713_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
1172 1205
@@ -1180,7 +1213,7 @@ static int wm9713_soc_resume(struct snd_soc_codec *codec)
1180 if (i == AC97_POWERDOWN || i == AC97_EXTENDED_MID || 1213 if (i == AC97_POWERDOWN || i == AC97_EXTENDED_MID ||
1181 i == AC97_EXTENDED_MSTATUS || i > 0x66) 1214 i == AC97_EXTENDED_MSTATUS || i > 0x66)
1182 continue; 1215 continue;
1183 soc_ac97_ops->write(codec->ac97, i, cache[i>>1]); 1216 soc_ac97_ops->write(wm9713->ac97, i, cache[i>>1]);
1184 } 1217 }
1185 } 1218 }
1186 1219
@@ -1189,26 +1222,19 @@ static int wm9713_soc_resume(struct snd_soc_codec *codec)
1189 1222
1190static int wm9713_soc_probe(struct snd_soc_codec *codec) 1223static int wm9713_soc_probe(struct snd_soc_codec *codec)
1191{ 1224{
1192 struct wm9713_priv *wm9713; 1225 struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
1193 int ret = 0, reg; 1226 int ret = 0, reg;
1194 1227
1195 wm9713 = kzalloc(sizeof(struct wm9713_priv), GFP_KERNEL); 1228 wm9713->ac97 = snd_soc_new_ac97_codec(codec);
1196 if (wm9713 == NULL) 1229 if (IS_ERR(wm9713->ac97))
1197 return -ENOMEM; 1230 return PTR_ERR(wm9713->ac97);
1198 snd_soc_codec_set_drvdata(codec, wm9713);
1199
1200 ret = snd_soc_new_ac97_codec(codec, soc_ac97_ops, 0);
1201 if (ret < 0)
1202 goto codec_err;
1203 1231
1204 /* do a cold reset for the controller and then try 1232 /* do a cold reset for the controller and then try
1205 * a warm reset followed by an optional cold reset for codec */ 1233 * a warm reset followed by an optional cold reset for codec */
1206 wm9713_reset(codec, 0); 1234 wm9713_reset(codec, 0);
1207 ret = wm9713_reset(codec, 1); 1235 ret = wm9713_reset(codec, 1);
1208 if (ret < 0) { 1236 if (ret < 0)
1209 printk(KERN_ERR "Failed to reset WM9713: AC97 link error\n");
1210 goto reset_err; 1237 goto reset_err;
1211 }
1212 1238
1213 wm9713_set_bias_level(codec, SND_SOC_BIAS_STANDBY); 1239 wm9713_set_bias_level(codec, SND_SOC_BIAS_STANDBY);
1214 1240
@@ -1216,23 +1242,18 @@ static int wm9713_soc_probe(struct snd_soc_codec *codec)
1216 reg = ac97_read(codec, AC97_CD) & 0x7fff; 1242 reg = ac97_read(codec, AC97_CD) & 0x7fff;
1217 ac97_write(codec, AC97_CD, reg); 1243 ac97_write(codec, AC97_CD, reg);
1218 1244
1219 snd_soc_add_codec_controls(codec, wm9713_snd_ac97_controls,
1220 ARRAY_SIZE(wm9713_snd_ac97_controls));
1221
1222 return 0; 1245 return 0;
1223 1246
1224reset_err: 1247reset_err:
1225 snd_soc_free_ac97_codec(codec); 1248 snd_soc_free_ac97_codec(wm9713->ac97);
1226codec_err:
1227 kfree(wm9713);
1228 return ret; 1249 return ret;
1229} 1250}
1230 1251
1231static int wm9713_soc_remove(struct snd_soc_codec *codec) 1252static int wm9713_soc_remove(struct snd_soc_codec *codec)
1232{ 1253{
1233 struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec); 1254 struct wm9713_priv *wm9713 = snd_soc_codec_get_drvdata(codec);
1234 snd_soc_free_ac97_codec(codec); 1255
1235 kfree(wm9713); 1256 snd_soc_free_ac97_codec(wm9713->ac97);
1236 return 0; 1257 return 0;
1237} 1258}
1238 1259
@@ -1248,6 +1269,9 @@ static struct snd_soc_codec_driver soc_codec_dev_wm9713 = {
1248 .reg_word_size = sizeof(u16), 1269 .reg_word_size = sizeof(u16),
1249 .reg_cache_step = 2, 1270 .reg_cache_step = 2,
1250 .reg_cache_default = wm9713_reg, 1271 .reg_cache_default = wm9713_reg,
1272
1273 .controls = wm9713_snd_ac97_controls,
1274 .num_controls = ARRAY_SIZE(wm9713_snd_ac97_controls),
1251 .dapm_widgets = wm9713_dapm_widgets, 1275 .dapm_widgets = wm9713_dapm_widgets,
1252 .num_dapm_widgets = ARRAY_SIZE(wm9713_dapm_widgets), 1276 .num_dapm_widgets = ARRAY_SIZE(wm9713_dapm_widgets),
1253 .dapm_routes = wm9713_audio_map, 1277 .dapm_routes = wm9713_audio_map,
@@ -1256,6 +1280,16 @@ static struct snd_soc_codec_driver soc_codec_dev_wm9713 = {
1256 1280
1257static int wm9713_probe(struct platform_device *pdev) 1281static int wm9713_probe(struct platform_device *pdev)
1258{ 1282{
1283 struct wm9713_priv *wm9713;
1284
1285 wm9713 = devm_kzalloc(&pdev->dev, sizeof(*wm9713), GFP_KERNEL);
1286 if (wm9713 == NULL)
1287 return -ENOMEM;
1288
1289 mutex_init(&wm9713->lock);
1290
1291 platform_set_drvdata(pdev, wm9713);
1292
1259 return snd_soc_register_codec(&pdev->dev, 1293 return snd_soc_register_codec(&pdev->dev,
1260 &soc_codec_dev_wm9713, wm9713_dai, ARRAY_SIZE(wm9713_dai)); 1294 &soc_codec_dev_wm9713, wm9713_dai, ARRAY_SIZE(wm9713_dai));
1261} 1295}
diff --git a/sound/soc/codecs/wm_adsp.c b/sound/soc/codecs/wm_adsp.c
index f412a9911a75..720d6e852986 100644
--- a/sound/soc/codecs/wm_adsp.c
+++ b/sound/soc/codecs/wm_adsp.c
@@ -21,6 +21,7 @@
21#include <linux/regmap.h> 21#include <linux/regmap.h>
22#include <linux/regulator/consumer.h> 22#include <linux/regulator/consumer.h>
23#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/vmalloc.h>
24#include <linux/workqueue.h> 25#include <linux/workqueue.h>
25#include <sound/core.h> 26#include <sound/core.h>
26#include <sound/pcm.h> 27#include <sound/pcm.h>
@@ -169,11 +170,12 @@ static struct wm_adsp_buf *wm_adsp_buf_alloc(const void *src, size_t len,
169 if (buf == NULL) 170 if (buf == NULL)
170 return NULL; 171 return NULL;
171 172
172 buf->buf = kmemdup(src, len, GFP_KERNEL | GFP_DMA); 173 buf->buf = vmalloc(len);
173 if (!buf->buf) { 174 if (!buf->buf) {
174 kfree(buf); 175 vfree(buf);
175 return NULL; 176 return NULL;
176 } 177 }
178 memcpy(buf->buf, src, len);
177 179
178 if (list) 180 if (list)
179 list_add_tail(&buf->list, list); 181 list_add_tail(&buf->list, list);
@@ -188,7 +190,7 @@ static void wm_adsp_buf_free(struct list_head *list)
188 struct wm_adsp_buf, 190 struct wm_adsp_buf,
189 list); 191 list);
190 list_del(&buf->list); 192 list_del(&buf->list);
191 kfree(buf->buf); 193 vfree(buf->buf);
192 kfree(buf); 194 kfree(buf);
193 } 195 }
194} 196}
@@ -684,38 +686,24 @@ static int wm_adsp_load(struct wm_adsp *dsp)
684 } 686 }
685 687
686 if (reg) { 688 if (reg) {
687 size_t to_write = PAGE_SIZE; 689 buf = wm_adsp_buf_alloc(region->data,
688 size_t remain = le32_to_cpu(region->len); 690 le32_to_cpu(region->len),
689 const u8 *data = region->data; 691 &buf_list);
690 692 if (!buf) {
691 while (remain > 0) { 693 adsp_err(dsp, "Out of memory\n");
692 if (remain < PAGE_SIZE) 694 ret = -ENOMEM;
693 to_write = remain; 695 goto out_fw;
694 696 }
695 buf = wm_adsp_buf_alloc(data,
696 to_write,
697 &buf_list);
698 if (!buf) {
699 adsp_err(dsp, "Out of memory\n");
700 ret = -ENOMEM;
701 goto out_fw;
702 }
703
704 ret = regmap_raw_write_async(regmap, reg,
705 buf->buf,
706 to_write);
707 if (ret != 0) {
708 adsp_err(dsp,
709 "%s.%d: Failed to write %zd bytes at %d in %s: %d\n",
710 file, regions,
711 to_write, offset,
712 region_name, ret);
713 goto out_fw;
714 }
715 697
716 data += to_write; 698 ret = regmap_raw_write_async(regmap, reg, buf->buf,
717 reg += to_write / 2; 699 le32_to_cpu(region->len));
718 remain -= to_write; 700 if (ret != 0) {
701 adsp_err(dsp,
702 "%s.%d: Failed to write %d bytes at %d in %s: %d\n",
703 file, regions,
704 le32_to_cpu(region->len), offset,
705 region_name, ret);
706 goto out_fw;
719 } 707 }
720 } 708 }
721 709
@@ -1065,8 +1053,10 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp)
1065 be32_to_cpu(adsp1_alg[i].zm)); 1053 be32_to_cpu(adsp1_alg[i].zm));
1066 1054
1067 region = kzalloc(sizeof(*region), GFP_KERNEL); 1055 region = kzalloc(sizeof(*region), GFP_KERNEL);
1068 if (!region) 1056 if (!region) {
1069 return -ENOMEM; 1057 ret = -ENOMEM;
1058 goto out;
1059 }
1070 region->type = WMFW_ADSP1_DM; 1060 region->type = WMFW_ADSP1_DM;
1071 region->alg = be32_to_cpu(adsp1_alg[i].alg.id); 1061 region->alg = be32_to_cpu(adsp1_alg[i].alg.id);
1072 region->base = be32_to_cpu(adsp1_alg[i].dm); 1062 region->base = be32_to_cpu(adsp1_alg[i].dm);
@@ -1083,8 +1073,10 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp)
1083 } 1073 }
1084 1074
1085 region = kzalloc(sizeof(*region), GFP_KERNEL); 1075 region = kzalloc(sizeof(*region), GFP_KERNEL);
1086 if (!region) 1076 if (!region) {
1087 return -ENOMEM; 1077 ret = -ENOMEM;
1078 goto out;
1079 }
1088 region->type = WMFW_ADSP1_ZM; 1080 region->type = WMFW_ADSP1_ZM;
1089 region->alg = be32_to_cpu(adsp1_alg[i].alg.id); 1081 region->alg = be32_to_cpu(adsp1_alg[i].alg.id);
1090 region->base = be32_to_cpu(adsp1_alg[i].zm); 1082 region->base = be32_to_cpu(adsp1_alg[i].zm);
@@ -1113,8 +1105,10 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp)
1113 be32_to_cpu(adsp2_alg[i].zm)); 1105 be32_to_cpu(adsp2_alg[i].zm));
1114 1106
1115 region = kzalloc(sizeof(*region), GFP_KERNEL); 1107 region = kzalloc(sizeof(*region), GFP_KERNEL);
1116 if (!region) 1108 if (!region) {
1117 return -ENOMEM; 1109 ret = -ENOMEM;
1110 goto out;
1111 }
1118 region->type = WMFW_ADSP2_XM; 1112 region->type = WMFW_ADSP2_XM;
1119 region->alg = be32_to_cpu(adsp2_alg[i].alg.id); 1113 region->alg = be32_to_cpu(adsp2_alg[i].alg.id);
1120 region->base = be32_to_cpu(adsp2_alg[i].xm); 1114 region->base = be32_to_cpu(adsp2_alg[i].xm);
@@ -1131,8 +1125,10 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp)
1131 } 1125 }
1132 1126
1133 region = kzalloc(sizeof(*region), GFP_KERNEL); 1127 region = kzalloc(sizeof(*region), GFP_KERNEL);
1134 if (!region) 1128 if (!region) {
1135 return -ENOMEM; 1129 ret = -ENOMEM;
1130 goto out;
1131 }
1136 region->type = WMFW_ADSP2_YM; 1132 region->type = WMFW_ADSP2_YM;
1137 region->alg = be32_to_cpu(adsp2_alg[i].alg.id); 1133 region->alg = be32_to_cpu(adsp2_alg[i].alg.id);
1138 region->base = be32_to_cpu(adsp2_alg[i].ym); 1134 region->base = be32_to_cpu(adsp2_alg[i].ym);
@@ -1149,8 +1145,10 @@ static int wm_adsp_setup_algs(struct wm_adsp *dsp)
1149 } 1145 }
1150 1146
1151 region = kzalloc(sizeof(*region), GFP_KERNEL); 1147 region = kzalloc(sizeof(*region), GFP_KERNEL);
1152 if (!region) 1148 if (!region) {
1153 return -ENOMEM; 1149 ret = -ENOMEM;
1150 goto out;
1151 }
1154 region->type = WMFW_ADSP2_ZM; 1152 region->type = WMFW_ADSP2_ZM;
1155 region->alg = be32_to_cpu(adsp2_alg[i].alg.id); 1153 region->alg = be32_to_cpu(adsp2_alg[i].alg.id);
1156 region->base = be32_to_cpu(adsp2_alg[i].zm); 1154 region->base = be32_to_cpu(adsp2_alg[i].zm);
@@ -1355,6 +1353,7 @@ static int wm_adsp_load_coeff(struct wm_adsp *dsp)
1355 file, blocks, pos - firmware->size); 1353 file, blocks, pos - firmware->size);
1356 1354
1357out_fw: 1355out_fw:
1356 regmap_async_complete(regmap);
1358 release_firmware(firmware); 1357 release_firmware(firmware);
1359 wm_adsp_buf_free(&buf_list); 1358 wm_adsp_buf_free(&buf_list);
1360out: 1359out:
@@ -1594,13 +1593,6 @@ static void wm_adsp2_boot_work(struct work_struct *work)
1594 if (ret != 0) 1593 if (ret != 0)
1595 goto err; 1594 goto err;
1596 1595
1597 ret = regmap_update_bits_async(dsp->regmap,
1598 dsp->base + ADSP2_CONTROL,
1599 ADSP2_CORE_ENA,
1600 ADSP2_CORE_ENA);
1601 if (ret != 0)
1602 goto err;
1603
1604 dsp->running = true; 1596 dsp->running = true;
1605 1597
1606 return; 1598 return;
@@ -1650,8 +1642,8 @@ int wm_adsp2_event(struct snd_soc_dapm_widget *w,
1650 1642
1651 ret = regmap_update_bits(dsp->regmap, 1643 ret = regmap_update_bits(dsp->regmap,
1652 dsp->base + ADSP2_CONTROL, 1644 dsp->base + ADSP2_CONTROL,
1653 ADSP2_START, 1645 ADSP2_CORE_ENA | ADSP2_START,
1654 ADSP2_START); 1646 ADSP2_CORE_ENA | ADSP2_START);
1655 if (ret != 0) 1647 if (ret != 0)
1656 goto err; 1648 goto err;
1657 break; 1649 break;
diff --git a/sound/soc/davinci/davinci-mcasp.c b/sound/soc/davinci/davinci-mcasp.c
index 0eed9b1b24e1..0dab382ba147 100644
--- a/sound/soc/davinci/davinci-mcasp.c
+++ b/sound/soc/davinci/davinci-mcasp.c
@@ -70,6 +70,7 @@ struct davinci_mcasp {
70 void __iomem *base; 70 void __iomem *base;
71 u32 fifo_base; 71 u32 fifo_base;
72 struct device *dev; 72 struct device *dev;
73 struct snd_pcm_substream *substreams[2];
73 74
74 /* McASP specific data */ 75 /* McASP specific data */
75 int tdm_slots; 76 int tdm_slots;
@@ -80,6 +81,7 @@ struct davinci_mcasp {
80 u8 bclk_div; 81 u8 bclk_div;
81 u16 bclk_lrclk_ratio; 82 u16 bclk_lrclk_ratio;
82 int streams; 83 int streams;
84 u32 irq_request[2];
83 85
84 int sysclk_freq; 86 int sysclk_freq;
85 bool bclk_master; 87 bool bclk_master;
@@ -90,6 +92,9 @@ struct davinci_mcasp {
90 92
91 bool dat_port; 93 bool dat_port;
92 94
95 /* Used for comstraint setting on the second stream */
96 u32 channels;
97
93#ifdef CONFIG_PM_SLEEP 98#ifdef CONFIG_PM_SLEEP
94 struct davinci_mcasp_context context; 99 struct davinci_mcasp_context context;
95#endif 100#endif
@@ -154,9 +159,16 @@ static bool mcasp_is_synchronous(struct davinci_mcasp *mcasp)
154 159
155static void mcasp_start_rx(struct davinci_mcasp *mcasp) 160static void mcasp_start_rx(struct davinci_mcasp *mcasp)
156{ 161{
162 if (mcasp->rxnumevt) { /* enable FIFO */
163 u32 reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
164
165 mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
166 mcasp_set_bits(mcasp, reg, FIFO_ENABLE);
167 }
168
169 /* Start clocks */
157 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXHCLKRST); 170 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXHCLKRST);
158 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXCLKRST); 171 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXCLKRST);
159
160 /* 172 /*
161 * When ASYNC == 0 the transmit and receive sections operate 173 * When ASYNC == 0 the transmit and receive sections operate
162 * synchronously from the transmit clock and frame sync. We need to make 174 * synchronously from the transmit clock and frame sync. We need to make
@@ -167,74 +179,69 @@ static void mcasp_start_rx(struct davinci_mcasp *mcasp)
167 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXCLKRST); 179 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXCLKRST);
168 } 180 }
169 181
182 /* Activate serializer(s) */
170 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXSERCLR); 183 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXSERCLR);
171 mcasp_set_reg(mcasp, DAVINCI_MCASP_RXBUF_REG, 0); 184 /* Release RX state machine */
172
173 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXSMRST);
174 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXFSRST);
175 mcasp_set_reg(mcasp, DAVINCI_MCASP_RXBUF_REG, 0);
176
177 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXSMRST); 185 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXSMRST);
186 /* Release Frame Sync generator */
178 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXFSRST); 187 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, RXFSRST);
179
180 if (mcasp_is_synchronous(mcasp)) 188 if (mcasp_is_synchronous(mcasp))
181 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXFSRST); 189 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXFSRST);
190
191 /* enable receive IRQs */
192 mcasp_set_bits(mcasp, DAVINCI_MCASP_EVTCTLR_REG,
193 mcasp->irq_request[SNDRV_PCM_STREAM_CAPTURE]);
182} 194}
183 195
184static void mcasp_start_tx(struct davinci_mcasp *mcasp) 196static void mcasp_start_tx(struct davinci_mcasp *mcasp)
185{ 197{
186 u8 offset = 0, i;
187 u32 cnt; 198 u32 cnt;
188 199
200 if (mcasp->txnumevt) { /* enable FIFO */
201 u32 reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
202
203 mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
204 mcasp_set_bits(mcasp, reg, FIFO_ENABLE);
205 }
206
207 /* Start clocks */
189 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXHCLKRST); 208 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXHCLKRST);
190 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXCLKRST); 209 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXCLKRST);
210 /* Activate serializer(s) */
191 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXSERCLR); 211 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXSERCLR);
192 mcasp_set_reg(mcasp, DAVINCI_MCASP_TXBUF_REG, 0);
193 212
194 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXSMRST); 213 /* wait for XDATA to be cleared */
195 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXFSRST);
196 mcasp_set_reg(mcasp, DAVINCI_MCASP_TXBUF_REG, 0);
197 for (i = 0; i < mcasp->num_serializer; i++) {
198 if (mcasp->serial_dir[i] == TX_MODE) {
199 offset = i;
200 break;
201 }
202 }
203
204 /* wait for TX ready */
205 cnt = 0; 214 cnt = 0;
206 while (!(mcasp_get_reg(mcasp, DAVINCI_MCASP_XRSRCTL_REG(offset)) & 215 while (!(mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG) &
207 TXSTATE) && (cnt < 100000)) 216 ~XRDATA) && (cnt < 100000))
208 cnt++; 217 cnt++;
209 218
210 mcasp_set_reg(mcasp, DAVINCI_MCASP_TXBUF_REG, 0); 219 /* Release TX state machine */
220 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXSMRST);
221 /* Release Frame Sync generator */
222 mcasp_set_ctl_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, TXFSRST);
223
224 /* enable transmit IRQs */
225 mcasp_set_bits(mcasp, DAVINCI_MCASP_EVTCTLX_REG,
226 mcasp->irq_request[SNDRV_PCM_STREAM_PLAYBACK]);
211} 227}
212 228
213static void davinci_mcasp_start(struct davinci_mcasp *mcasp, int stream) 229static void davinci_mcasp_start(struct davinci_mcasp *mcasp, int stream)
214{ 230{
215 u32 reg;
216
217 mcasp->streams++; 231 mcasp->streams++;
218 232
219 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 233 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
220 if (mcasp->txnumevt) { /* enable FIFO */
221 reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
222 mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
223 mcasp_set_bits(mcasp, reg, FIFO_ENABLE);
224 }
225 mcasp_start_tx(mcasp); 234 mcasp_start_tx(mcasp);
226 } else { 235 else
227 if (mcasp->rxnumevt) { /* enable FIFO */
228 reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
229 mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
230 mcasp_set_bits(mcasp, reg, FIFO_ENABLE);
231 }
232 mcasp_start_rx(mcasp); 236 mcasp_start_rx(mcasp);
233 }
234} 237}
235 238
236static void mcasp_stop_rx(struct davinci_mcasp *mcasp) 239static void mcasp_stop_rx(struct davinci_mcasp *mcasp)
237{ 240{
241 /* disable IRQ sources */
242 mcasp_clr_bits(mcasp, DAVINCI_MCASP_EVTCTLR_REG,
243 mcasp->irq_request[SNDRV_PCM_STREAM_CAPTURE]);
244
238 /* 245 /*
239 * In synchronous mode stop the TX clocks if no other stream is 246 * In synchronous mode stop the TX clocks if no other stream is
240 * running 247 * running
@@ -244,12 +251,22 @@ static void mcasp_stop_rx(struct davinci_mcasp *mcasp)
244 251
245 mcasp_set_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, 0); 252 mcasp_set_reg(mcasp, DAVINCI_MCASP_GBLCTLR_REG, 0);
246 mcasp_set_reg(mcasp, DAVINCI_MCASP_RXSTAT_REG, 0xFFFFFFFF); 253 mcasp_set_reg(mcasp, DAVINCI_MCASP_RXSTAT_REG, 0xFFFFFFFF);
254
255 if (mcasp->rxnumevt) { /* disable FIFO */
256 u32 reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
257
258 mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
259 }
247} 260}
248 261
249static void mcasp_stop_tx(struct davinci_mcasp *mcasp) 262static void mcasp_stop_tx(struct davinci_mcasp *mcasp)
250{ 263{
251 u32 val = 0; 264 u32 val = 0;
252 265
266 /* disable IRQ sources */
267 mcasp_clr_bits(mcasp, DAVINCI_MCASP_EVTCTLX_REG,
268 mcasp->irq_request[SNDRV_PCM_STREAM_PLAYBACK]);
269
253 /* 270 /*
254 * In synchronous mode keep TX clocks running if the capture stream is 271 * In synchronous mode keep TX clocks running if the capture stream is
255 * still running. 272 * still running.
@@ -259,27 +276,92 @@ static void mcasp_stop_tx(struct davinci_mcasp *mcasp)
259 276
260 mcasp_set_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, val); 277 mcasp_set_reg(mcasp, DAVINCI_MCASP_GBLCTLX_REG, val);
261 mcasp_set_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG, 0xFFFFFFFF); 278 mcasp_set_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG, 0xFFFFFFFF);
279
280 if (mcasp->txnumevt) { /* disable FIFO */
281 u32 reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
282
283 mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
284 }
262} 285}
263 286
264static void davinci_mcasp_stop(struct davinci_mcasp *mcasp, int stream) 287static void davinci_mcasp_stop(struct davinci_mcasp *mcasp, int stream)
265{ 288{
266 u32 reg;
267
268 mcasp->streams--; 289 mcasp->streams--;
269 290
270 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 291 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
271 if (mcasp->txnumevt) { /* disable FIFO */
272 reg = mcasp->fifo_base + MCASP_WFIFOCTL_OFFSET;
273 mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
274 }
275 mcasp_stop_tx(mcasp); 292 mcasp_stop_tx(mcasp);
276 } else { 293 else
277 if (mcasp->rxnumevt) { /* disable FIFO */
278 reg = mcasp->fifo_base + MCASP_RFIFOCTL_OFFSET;
279 mcasp_clr_bits(mcasp, reg, FIFO_ENABLE);
280 }
281 mcasp_stop_rx(mcasp); 294 mcasp_stop_rx(mcasp);
295}
296
297static irqreturn_t davinci_mcasp_tx_irq_handler(int irq, void *data)
298{
299 struct davinci_mcasp *mcasp = (struct davinci_mcasp *)data;
300 struct snd_pcm_substream *substream;
301 u32 irq_mask = mcasp->irq_request[SNDRV_PCM_STREAM_PLAYBACK];
302 u32 handled_mask = 0;
303 u32 stat;
304
305 stat = mcasp_get_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG);
306 if (stat & XUNDRN & irq_mask) {
307 dev_warn(mcasp->dev, "Transmit buffer underflow\n");
308 handled_mask |= XUNDRN;
309
310 substream = mcasp->substreams[SNDRV_PCM_STREAM_PLAYBACK];
311 if (substream) {
312 snd_pcm_stream_lock_irq(substream);
313 if (snd_pcm_running(substream))
314 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
315 snd_pcm_stream_unlock_irq(substream);
316 }
282 } 317 }
318
319 if (!handled_mask)
320 dev_warn(mcasp->dev, "unhandled tx event. txstat: 0x%08x\n",
321 stat);
322
323 if (stat & XRERR)
324 handled_mask |= XRERR;
325
326 /* Ack the handled event only */
327 mcasp_set_reg(mcasp, DAVINCI_MCASP_TXSTAT_REG, handled_mask);
328
329 return IRQ_RETVAL(handled_mask);
330}
331
332static irqreturn_t davinci_mcasp_rx_irq_handler(int irq, void *data)
333{
334 struct davinci_mcasp *mcasp = (struct davinci_mcasp *)data;
335 struct snd_pcm_substream *substream;
336 u32 irq_mask = mcasp->irq_request[SNDRV_PCM_STREAM_CAPTURE];
337 u32 handled_mask = 0;
338 u32 stat;
339
340 stat = mcasp_get_reg(mcasp, DAVINCI_MCASP_RXSTAT_REG);
341 if (stat & ROVRN & irq_mask) {
342 dev_warn(mcasp->dev, "Receive buffer overflow\n");
343 handled_mask |= ROVRN;
344
345 substream = mcasp->substreams[SNDRV_PCM_STREAM_CAPTURE];
346 if (substream) {
347 snd_pcm_stream_lock_irq(substream);
348 if (snd_pcm_running(substream))
349 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
350 snd_pcm_stream_unlock_irq(substream);
351 }
352 }
353
354 if (!handled_mask)
355 dev_warn(mcasp->dev, "unhandled rx event. rxstat: 0x%08x\n",
356 stat);
357
358 if (stat & XRERR)
359 handled_mask |= XRERR;
360
361 /* Ack the handled event only */
362 mcasp_set_reg(mcasp, DAVINCI_MCASP_RXSTAT_REG, handled_mask);
363
364 return IRQ_RETVAL(handled_mask);
283} 365}
284 366
285static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai, 367static int davinci_mcasp_set_dai_fmt(struct snd_soc_dai *cpu_dai,
@@ -500,8 +582,17 @@ static int davinci_config_channel_size(struct davinci_mcasp *mcasp,
500 * both left and right channels), so it has to be divided by number of 582 * both left and right channels), so it has to be divided by number of
501 * tdm-slots (for I2S - divided by 2). 583 * tdm-slots (for I2S - divided by 2).
502 */ 584 */
503 if (mcasp->bclk_lrclk_ratio) 585 if (mcasp->bclk_lrclk_ratio) {
504 word_length = mcasp->bclk_lrclk_ratio / mcasp->tdm_slots; 586 u32 slot_length = mcasp->bclk_lrclk_ratio / mcasp->tdm_slots;
587
588 /*
589 * When we have more bclk then it is needed for the data, we
590 * need to use the rotation to move the received samples to have
591 * correct alignment.
592 */
593 rx_rotate = (slot_length - word_length) / 4;
594 word_length = slot_length;
595 }
505 596
506 /* mapping of the XSSZ bit-field as described in the datasheet */ 597 /* mapping of the XSSZ bit-field as described in the datasheet */
507 fmt = (word_length >> 1) - 1; 598 fmt = (word_length >> 1) - 1;
@@ -635,19 +726,29 @@ static int mcasp_common_hw_param(struct davinci_mcasp *mcasp, int stream,
635 return 0; 726 return 0;
636} 727}
637 728
638static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream) 729static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream,
730 int channels)
639{ 731{
640 int i, active_slots; 732 int i, active_slots;
733 int total_slots;
734 int active_serializers;
641 u32 mask = 0; 735 u32 mask = 0;
642 u32 busel = 0; 736 u32 busel = 0;
643 737
644 if ((mcasp->tdm_slots < 2) || (mcasp->tdm_slots > 32)) { 738 total_slots = mcasp->tdm_slots;
645 dev_err(mcasp->dev, "tdm slot %d not supported\n", 739
646 mcasp->tdm_slots); 740 /*
647 return -EINVAL; 741 * If more than one serializer is needed, then use them with
648 } 742 * their specified tdm_slots count. Otherwise, one serializer
743 * can cope with the transaction using as many slots as channels
744 * in the stream, requires channels symmetry
745 */
746 active_serializers = (channels + total_slots - 1) / total_slots;
747 if (active_serializers == 1)
748 active_slots = channels;
749 else
750 active_slots = total_slots;
649 751
650 active_slots = (mcasp->tdm_slots > 31) ? 32 : mcasp->tdm_slots;
651 for (i = 0; i < active_slots; i++) 752 for (i = 0; i < active_slots; i++)
652 mask |= (1 << i); 753 mask |= (1 << i);
653 754
@@ -659,12 +760,12 @@ static int mcasp_i2s_hw_param(struct davinci_mcasp *mcasp, int stream)
659 mcasp_set_reg(mcasp, DAVINCI_MCASP_TXTDM_REG, mask); 760 mcasp_set_reg(mcasp, DAVINCI_MCASP_TXTDM_REG, mask);
660 mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, busel | TXORD); 761 mcasp_set_bits(mcasp, DAVINCI_MCASP_TXFMT_REG, busel | TXORD);
661 mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG, 762 mcasp_mod_bits(mcasp, DAVINCI_MCASP_TXFMCTL_REG,
662 FSXMOD(mcasp->tdm_slots), FSXMOD(0x1FF)); 763 FSXMOD(total_slots), FSXMOD(0x1FF));
663 764
664 mcasp_set_reg(mcasp, DAVINCI_MCASP_RXTDM_REG, mask); 765 mcasp_set_reg(mcasp, DAVINCI_MCASP_RXTDM_REG, mask);
665 mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD); 766 mcasp_set_bits(mcasp, DAVINCI_MCASP_RXFMT_REG, busel | RXORD);
666 mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG, 767 mcasp_mod_bits(mcasp, DAVINCI_MCASP_RXFMCTL_REG,
667 FSRMOD(mcasp->tdm_slots), FSRMOD(0x1FF)); 768 FSRMOD(total_slots), FSRMOD(0x1FF));
668 769
669 return 0; 770 return 0;
670} 771}
@@ -778,7 +879,8 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
778 if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE) 879 if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
779 ret = mcasp_dit_hw_param(mcasp, params_rate(params)); 880 ret = mcasp_dit_hw_param(mcasp, params_rate(params));
780 else 881 else
781 ret = mcasp_i2s_hw_param(mcasp, substream->stream); 882 ret = mcasp_i2s_hw_param(mcasp, substream->stream,
883 channels);
782 884
783 if (ret) 885 if (ret)
784 return ret; 886 return ret;
@@ -826,6 +928,9 @@ static int davinci_mcasp_hw_params(struct snd_pcm_substream *substream,
826 928
827 davinci_config_channel_size(mcasp, word_length); 929 davinci_config_channel_size(mcasp, word_length);
828 930
931 if (mcasp->op_mode == DAVINCI_MCASP_IIS_MODE)
932 mcasp->channels = channels;
933
829 return 0; 934 return 0;
830} 935}
831 936
@@ -854,7 +959,65 @@ static int davinci_mcasp_trigger(struct snd_pcm_substream *substream,
854 return ret; 959 return ret;
855} 960}
856 961
962static int davinci_mcasp_startup(struct snd_pcm_substream *substream,
963 struct snd_soc_dai *cpu_dai)
964{
965 struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
966 u32 max_channels = 0;
967 int i, dir;
968
969 mcasp->substreams[substream->stream] = substream;
970
971 if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
972 return 0;
973
974 /*
975 * Limit the maximum allowed channels for the first stream:
976 * number of serializers for the direction * tdm slots per serializer
977 */
978 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
979 dir = TX_MODE;
980 else
981 dir = RX_MODE;
982
983 for (i = 0; i < mcasp->num_serializer; i++) {
984 if (mcasp->serial_dir[i] == dir)
985 max_channels++;
986 }
987 max_channels *= mcasp->tdm_slots;
988 /*
989 * If the already active stream has less channels than the calculated
990 * limnit based on the seirializers * tdm_slots, we need to use that as
991 * a constraint for the second stream.
992 * Otherwise (first stream or less allowed channels) we use the
993 * calculated constraint.
994 */
995 if (mcasp->channels && mcasp->channels < max_channels)
996 max_channels = mcasp->channels;
997
998 snd_pcm_hw_constraint_minmax(substream->runtime,
999 SNDRV_PCM_HW_PARAM_CHANNELS,
1000 2, max_channels);
1001 return 0;
1002}
1003
1004static void davinci_mcasp_shutdown(struct snd_pcm_substream *substream,
1005 struct snd_soc_dai *cpu_dai)
1006{
1007 struct davinci_mcasp *mcasp = snd_soc_dai_get_drvdata(cpu_dai);
1008
1009 mcasp->substreams[substream->stream] = NULL;
1010
1011 if (mcasp->op_mode == DAVINCI_MCASP_DIT_MODE)
1012 return;
1013
1014 if (!cpu_dai->active)
1015 mcasp->channels = 0;
1016}
1017
857static const struct snd_soc_dai_ops davinci_mcasp_dai_ops = { 1018static const struct snd_soc_dai_ops davinci_mcasp_dai_ops = {
1019 .startup = davinci_mcasp_startup,
1020 .shutdown = davinci_mcasp_shutdown,
858 .trigger = davinci_mcasp_trigger, 1021 .trigger = davinci_mcasp_trigger,
859 .hw_params = davinci_mcasp_hw_params, 1022 .hw_params = davinci_mcasp_hw_params,
860 .set_fmt = davinci_mcasp_set_dai_fmt, 1023 .set_fmt = davinci_mcasp_set_dai_fmt,
@@ -971,6 +1134,7 @@ static struct snd_soc_dai_driver davinci_mcasp_dai[] = {
971 }, 1134 },
972 .ops = &davinci_mcasp_dai_ops, 1135 .ops = &davinci_mcasp_dai_ops,
973 1136
1137 .symmetric_samplebits = 1,
974 }, 1138 },
975 { 1139 {
976 .name = "davinci-mcasp.1", 1140 .name = "davinci-mcasp.1",
@@ -1194,6 +1358,8 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
1194 struct resource *mem, *ioarea, *res, *dat; 1358 struct resource *mem, *ioarea, *res, *dat;
1195 struct davinci_mcasp_pdata *pdata; 1359 struct davinci_mcasp_pdata *pdata;
1196 struct davinci_mcasp *mcasp; 1360 struct davinci_mcasp *mcasp;
1361 char *irq_name;
1362 int irq;
1197 int ret; 1363 int ret;
1198 1364
1199 if (!pdev->dev.platform_data && !pdev->dev.of_node) { 1365 if (!pdev->dev.platform_data && !pdev->dev.of_node) {
@@ -1235,6 +1401,7 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
1235 ret = pm_runtime_get_sync(&pdev->dev); 1401 ret = pm_runtime_get_sync(&pdev->dev);
1236 if (IS_ERR_VALUE(ret)) { 1402 if (IS_ERR_VALUE(ret)) {
1237 dev_err(&pdev->dev, "pm_runtime_get_sync() failed\n"); 1403 dev_err(&pdev->dev, "pm_runtime_get_sync() failed\n");
1404 pm_runtime_disable(&pdev->dev);
1238 return ret; 1405 return ret;
1239 } 1406 }
1240 1407
@@ -1246,7 +1413,21 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
1246 } 1413 }
1247 1414
1248 mcasp->op_mode = pdata->op_mode; 1415 mcasp->op_mode = pdata->op_mode;
1249 mcasp->tdm_slots = pdata->tdm_slots; 1416 /* sanity check for tdm slots parameter */
1417 if (mcasp->op_mode == DAVINCI_MCASP_IIS_MODE) {
1418 if (pdata->tdm_slots < 2) {
1419 dev_err(&pdev->dev, "invalid tdm slots: %d\n",
1420 pdata->tdm_slots);
1421 mcasp->tdm_slots = 2;
1422 } else if (pdata->tdm_slots > 32) {
1423 dev_err(&pdev->dev, "invalid tdm slots: %d\n",
1424 pdata->tdm_slots);
1425 mcasp->tdm_slots = 32;
1426 } else {
1427 mcasp->tdm_slots = pdata->tdm_slots;
1428 }
1429 }
1430
1250 mcasp->num_serializer = pdata->num_serializer; 1431 mcasp->num_serializer = pdata->num_serializer;
1251#ifdef CONFIG_PM_SLEEP 1432#ifdef CONFIG_PM_SLEEP
1252 mcasp->context.xrsr_regs = devm_kzalloc(&pdev->dev, 1433 mcasp->context.xrsr_regs = devm_kzalloc(&pdev->dev,
@@ -1260,6 +1441,36 @@ static int davinci_mcasp_probe(struct platform_device *pdev)
1260 1441
1261 mcasp->dev = &pdev->dev; 1442 mcasp->dev = &pdev->dev;
1262 1443
1444 irq = platform_get_irq_byname(pdev, "rx");
1445 if (irq >= 0) {
1446 irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_rx\n",
1447 dev_name(&pdev->dev));
1448 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
1449 davinci_mcasp_rx_irq_handler,
1450 IRQF_ONESHOT, irq_name, mcasp);
1451 if (ret) {
1452 dev_err(&pdev->dev, "RX IRQ request failed\n");
1453 goto err;
1454 }
1455
1456 mcasp->irq_request[SNDRV_PCM_STREAM_CAPTURE] = ROVRN;
1457 }
1458
1459 irq = platform_get_irq_byname(pdev, "tx");
1460 if (irq >= 0) {
1461 irq_name = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_tx\n",
1462 dev_name(&pdev->dev));
1463 ret = devm_request_threaded_irq(&pdev->dev, irq, NULL,
1464 davinci_mcasp_tx_irq_handler,
1465 IRQF_ONESHOT, irq_name, mcasp);
1466 if (ret) {
1467 dev_err(&pdev->dev, "TX IRQ request failed\n");
1468 goto err;
1469 }
1470
1471 mcasp->irq_request[SNDRV_PCM_STREAM_PLAYBACK] = XUNDRN;
1472 }
1473
1263 dat = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat"); 1474 dat = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dat");
1264 if (dat) 1475 if (dat)
1265 mcasp->dat_port = true; 1476 mcasp->dat_port = true;
diff --git a/sound/soc/davinci/davinci-mcasp.h b/sound/soc/davinci/davinci-mcasp.h
index 98fbc451892a..79dc511180bf 100644
--- a/sound/soc/davinci/davinci-mcasp.h
+++ b/sound/soc/davinci/davinci-mcasp.h
@@ -253,6 +253,13 @@
253#define TXFSRST BIT(12) /* Frame Sync Generator Reset */ 253#define TXFSRST BIT(12) /* Frame Sync Generator Reset */
254 254
255/* 255/*
256 * DAVINCI_MCASP_TXSTAT_REG - Transmitter Status Register Bits
257 * DAVINCI_MCASP_RXSTAT_REG - Receiver Status Register Bits
258 */
259#define XRERR BIT(8) /* Transmit/Receive error */
260#define XRDATA BIT(5) /* Transmit/Receive data ready */
261
262/*
256 * DAVINCI_MCASP_AMUTE_REG - Mute Control Register Bits 263 * DAVINCI_MCASP_AMUTE_REG - Mute Control Register Bits
257 */ 264 */
258#define MUTENA(val) (val) 265#define MUTENA(val) (val)
@@ -279,6 +286,16 @@
279#define TXDATADMADIS BIT(0) 286#define TXDATADMADIS BIT(0)
280 287
281/* 288/*
289 * DAVINCI_MCASP_EVTCTLR_REG - Receiver Interrupt Control Register Bits
290 */
291#define ROVRN BIT(0)
292
293/*
294 * DAVINCI_MCASP_EVTCTLX_REG - Transmitter Interrupt Control Register Bits
295 */
296#define XUNDRN BIT(0)
297
298/*
282 * DAVINCI_MCASP_W[R]FIFOCTL - Write/Read FIFO Control Register bits 299 * DAVINCI_MCASP_W[R]FIFOCTL - Write/Read FIFO Control Register bits
283 */ 300 */
284#define FIFO_ENABLE BIT(16) 301#define FIFO_ENABLE BIT(16)
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
index e961388e6e9c..08f0229f8d68 100644
--- a/sound/soc/dwc/designware_i2s.c
+++ b/sound/soc/dwc/designware_i2s.c
@@ -338,31 +338,34 @@ static int dw_i2s_probe(struct platform_device *pdev)
338 return -EINVAL; 338 return -EINVAL;
339 } 339 }
340 340
341 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
342 if (!res) {
343 dev_err(&pdev->dev, "no i2s resource defined\n");
344 return -ENODEV;
345 }
346
347 if (!devm_request_mem_region(&pdev->dev, res->start,
348 resource_size(res), pdev->name)) {
349 dev_err(&pdev->dev, "i2s region already claimed\n");
350 return -EBUSY;
351 }
352
353 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); 341 dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL);
354 if (!dev) { 342 if (!dev) {
355 dev_warn(&pdev->dev, "kzalloc fail\n"); 343 dev_warn(&pdev->dev, "kzalloc fail\n");
356 return -ENOMEM; 344 return -ENOMEM;
357 } 345 }
358 346
359 dev->i2s_base = devm_ioremap(&pdev->dev, res->start, 347 dw_i2s_dai = devm_kzalloc(&pdev->dev, sizeof(*dw_i2s_dai), GFP_KERNEL);
360 resource_size(res)); 348 if (!dw_i2s_dai) {
361 if (!dev->i2s_base) { 349 dev_err(&pdev->dev, "mem allocation failed for dai driver\n");
362 dev_err(&pdev->dev, "ioremap fail for i2s_region\n");
363 return -ENOMEM; 350 return -ENOMEM;
364 } 351 }
365 352
353 dw_i2s_dai->ops = &dw_i2s_dai_ops;
354 dw_i2s_dai->suspend = dw_i2s_suspend;
355 dw_i2s_dai->resume = dw_i2s_resume;
356
357 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
358 if (!res) {
359 dev_err(&pdev->dev, "no i2s resource defined\n");
360 return -ENODEV;
361 }
362
363 dev->i2s_base = devm_ioremap_resource(&pdev->dev, res);
364 if (IS_ERR(dev->i2s_base)) {
365 dev_err(&pdev->dev, "ioremap fail for i2s_region\n");
366 return PTR_ERR(dev->i2s_base);
367 }
368
366 cap = pdata->cap; 369 cap = pdata->cap;
367 dev->capability = cap; 370 dev->capability = cap;
368 dev->i2s_clk_cfg = pdata->i2s_clk_cfg; 371 dev->i2s_clk_cfg = pdata->i2s_clk_cfg;
@@ -388,13 +391,6 @@ static int dw_i2s_probe(struct platform_device *pdev)
388 if (ret < 0) 391 if (ret < 0)
389 goto err_clk_put; 392 goto err_clk_put;
390 393
391 dw_i2s_dai = devm_kzalloc(&pdev->dev, sizeof(*dw_i2s_dai), GFP_KERNEL);
392 if (!dw_i2s_dai) {
393 dev_err(&pdev->dev, "mem allocation failed for dai driver\n");
394 ret = -ENOMEM;
395 goto err_clk_disable;
396 }
397
398 if (cap & DWC_I2S_PLAY) { 394 if (cap & DWC_I2S_PLAY) {
399 dev_dbg(&pdev->dev, " designware: play supported\n"); 395 dev_dbg(&pdev->dev, " designware: play supported\n");
400 dw_i2s_dai->playback.channels_min = MIN_CHANNEL_NUM; 396 dw_i2s_dai->playback.channels_min = MIN_CHANNEL_NUM;
@@ -411,10 +407,6 @@ static int dw_i2s_probe(struct platform_device *pdev)
411 dw_i2s_dai->capture.rates = pdata->snd_rates; 407 dw_i2s_dai->capture.rates = pdata->snd_rates;
412 } 408 }
413 409
414 dw_i2s_dai->ops = &dw_i2s_dai_ops;
415 dw_i2s_dai->suspend = dw_i2s_suspend;
416 dw_i2s_dai->resume = dw_i2s_resume;
417
418 dev->dev = &pdev->dev; 410 dev->dev = &pdev->dev;
419 dev_set_drvdata(&pdev->dev, dev); 411 dev_set_drvdata(&pdev->dev, dev);
420 ret = snd_soc_register_component(&pdev->dev, &dw_i2s_component, 412 ret = snd_soc_register_component(&pdev->dev, &dw_i2s_component,
diff --git a/sound/soc/fsl/eukrea-tlv320.c b/sound/soc/fsl/eukrea-tlv320.c
index eb093d5b85c4..b175b0145a42 100644
--- a/sound/soc/fsl/eukrea-tlv320.c
+++ b/sound/soc/fsl/eukrea-tlv320.c
@@ -105,7 +105,7 @@ static int eukrea_tlv320_probe(struct platform_device *pdev)
105 int ret; 105 int ret;
106 int int_port = 0, ext_port; 106 int int_port = 0, ext_port;
107 struct device_node *np = pdev->dev.of_node; 107 struct device_node *np = pdev->dev.of_node;
108 struct device_node *ssi_np, *codec_np; 108 struct device_node *ssi_np = NULL, *codec_np = NULL;
109 109
110 eukrea_tlv320.dev = &pdev->dev; 110 eukrea_tlv320.dev = &pdev->dev;
111 if (np) { 111 if (np) {
@@ -217,8 +217,7 @@ static int eukrea_tlv320_probe(struct platform_device *pdev)
217err: 217err:
218 if (ret) 218 if (ret)
219 dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret); 219 dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
220 if (np) 220 of_node_put(ssi_np);
221 of_node_put(ssi_np);
222 221
223 return ret; 222 return ret;
224} 223}
diff --git a/sound/soc/fsl/fsl-asoc-card.c b/sound/soc/fsl/fsl-asoc-card.c
index 007c772f3cef..3f6959c8e2f7 100644
--- a/sound/soc/fsl/fsl-asoc-card.c
+++ b/sound/soc/fsl/fsl-asoc-card.c
@@ -51,6 +51,7 @@ struct codec_priv {
51 * @sysclk_freq[2]: SYSCLK rates for set_sysclk() 51 * @sysclk_freq[2]: SYSCLK rates for set_sysclk()
52 * @sysclk_dir[2]: SYSCLK directions for set_sysclk() 52 * @sysclk_dir[2]: SYSCLK directions for set_sysclk()
53 * @sysclk_id[2]: SYSCLK ids for set_sysclk() 53 * @sysclk_id[2]: SYSCLK ids for set_sysclk()
54 * @slot_width: Slot width of each frame
54 * 55 *
55 * Note: [1] for tx and [0] for rx 56 * Note: [1] for tx and [0] for rx
56 */ 57 */
@@ -58,6 +59,7 @@ struct cpu_priv {
58 unsigned long sysclk_freq[2]; 59 unsigned long sysclk_freq[2];
59 u32 sysclk_dir[2]; 60 u32 sysclk_dir[2];
60 u32 sysclk_id[2]; 61 u32 sysclk_id[2];
62 u32 slot_width;
61}; 63};
62 64
63/** 65/**
@@ -125,7 +127,12 @@ static int fsl_asoc_card_hw_params(struct snd_pcm_substream *substream,
125 priv->sample_rate = params_rate(params); 127 priv->sample_rate = params_rate(params);
126 priv->sample_format = params_format(params); 128 priv->sample_format = params_format(params);
127 129
128 if (priv->card.set_bias_level) 130 /*
131 * If codec-dai is DAI Master and all configurations are already in the
132 * set_bias_level(), bypass the remaining settings in hw_params().
133 * Note: (dai_fmt & CBM_CFM) includes CBM_CFM and CBM_CFS.
134 */
135 if (priv->card.set_bias_level && priv->dai_fmt & SND_SOC_DAIFMT_CBM_CFM)
129 return 0; 136 return 0;
130 137
131 /* Specific configurations of DAIs starts from here */ 138 /* Specific configurations of DAIs starts from here */
@@ -137,6 +144,15 @@ static int fsl_asoc_card_hw_params(struct snd_pcm_substream *substream,
137 return ret; 144 return ret;
138 } 145 }
139 146
147 if (cpu_priv->slot_width) {
148 ret = snd_soc_dai_set_tdm_slot(rtd->cpu_dai, 0x3, 0x3, 2,
149 cpu_priv->slot_width);
150 if (ret) {
151 dev_err(dev, "failed to set TDM slot for cpu dai\n");
152 return ret;
153 }
154 }
155
140 return 0; 156 return 0;
141} 157}
142 158
@@ -448,6 +464,7 @@ static int fsl_asoc_card_probe(struct platform_device *pdev)
448 priv->cpu_priv.sysclk_freq[RX] = priv->codec_priv.mclk_freq; 464 priv->cpu_priv.sysclk_freq[RX] = priv->codec_priv.mclk_freq;
449 priv->cpu_priv.sysclk_dir[TX] = SND_SOC_CLOCK_OUT; 465 priv->cpu_priv.sysclk_dir[TX] = SND_SOC_CLOCK_OUT;
450 priv->cpu_priv.sysclk_dir[RX] = SND_SOC_CLOCK_OUT; 466 priv->cpu_priv.sysclk_dir[RX] = SND_SOC_CLOCK_OUT;
467 priv->cpu_priv.slot_width = 32;
451 priv->dai_fmt |= SND_SOC_DAIFMT_CBS_CFS; 468 priv->dai_fmt |= SND_SOC_DAIFMT_CBS_CFS;
452 } else if (of_device_is_compatible(np, "fsl,imx-audio-sgtl5000")) { 469 } else if (of_device_is_compatible(np, "fsl,imx-audio-sgtl5000")) {
453 priv->codec_priv.mclk_id = SGTL5000_SYSCLK; 470 priv->codec_priv.mclk_id = SGTL5000_SYSCLK;
diff --git a/sound/soc/fsl/fsl_asrc.c b/sound/soc/fsl/fsl_asrc.c
index 3b145313f93e..9deabdd2b1a2 100644
--- a/sound/soc/fsl/fsl_asrc.c
+++ b/sound/soc/fsl/fsl_asrc.c
@@ -684,12 +684,38 @@ static bool fsl_asrc_writeable_reg(struct device *dev, unsigned int reg)
684 } 684 }
685} 685}
686 686
687static struct reg_default fsl_asrc_reg[] = {
688 { REG_ASRCTR, 0x0000 }, { REG_ASRIER, 0x0000 },
689 { REG_ASRCNCR, 0x0000 }, { REG_ASRCFG, 0x0000 },
690 { REG_ASRCSR, 0x0000 }, { REG_ASRCDR1, 0x0000 },
691 { REG_ASRCDR2, 0x0000 }, { REG_ASRSTR, 0x0000 },
692 { REG_ASRRA, 0x0000 }, { REG_ASRRB, 0x0000 },
693 { REG_ASRRC, 0x0000 }, { REG_ASRPM1, 0x0000 },
694 { REG_ASRPM2, 0x0000 }, { REG_ASRPM3, 0x0000 },
695 { REG_ASRPM4, 0x0000 }, { REG_ASRPM5, 0x0000 },
696 { REG_ASRTFR1, 0x0000 }, { REG_ASRCCR, 0x0000 },
697 { REG_ASRDIA, 0x0000 }, { REG_ASRDOA, 0x0000 },
698 { REG_ASRDIB, 0x0000 }, { REG_ASRDOB, 0x0000 },
699 { REG_ASRDIC, 0x0000 }, { REG_ASRDOC, 0x0000 },
700 { REG_ASRIDRHA, 0x0000 }, { REG_ASRIDRLA, 0x0000 },
701 { REG_ASRIDRHB, 0x0000 }, { REG_ASRIDRLB, 0x0000 },
702 { REG_ASRIDRHC, 0x0000 }, { REG_ASRIDRLC, 0x0000 },
703 { REG_ASR76K, 0x0A47 }, { REG_ASR56K, 0x0DF3 },
704 { REG_ASRMCRA, 0x0000 }, { REG_ASRFSTA, 0x0000 },
705 { REG_ASRMCRB, 0x0000 }, { REG_ASRFSTB, 0x0000 },
706 { REG_ASRMCRC, 0x0000 }, { REG_ASRFSTC, 0x0000 },
707 { REG_ASRMCR1A, 0x0000 }, { REG_ASRMCR1B, 0x0000 },
708 { REG_ASRMCR1C, 0x0000 },
709};
710
687static const struct regmap_config fsl_asrc_regmap_config = { 711static const struct regmap_config fsl_asrc_regmap_config = {
688 .reg_bits = 32, 712 .reg_bits = 32,
689 .reg_stride = 4, 713 .reg_stride = 4,
690 .val_bits = 32, 714 .val_bits = 32,
691 715
692 .max_register = REG_ASRMCR1C, 716 .max_register = REG_ASRMCR1C,
717 .reg_defaults = fsl_asrc_reg,
718 .num_reg_defaults = ARRAY_SIZE(fsl_asrc_reg),
693 .readable_reg = fsl_asrc_readable_reg, 719 .readable_reg = fsl_asrc_readable_reg,
694 .volatile_reg = fsl_asrc_volatile_reg, 720 .volatile_reg = fsl_asrc_volatile_reg,
695 .writeable_reg = fsl_asrc_writeable_reg, 721 .writeable_reg = fsl_asrc_writeable_reg,
@@ -792,7 +818,7 @@ static int fsl_asrc_probe(struct platform_device *pdev)
792 return -ENOMEM; 818 return -ENOMEM;
793 819
794 asrc_priv->pdev = pdev; 820 asrc_priv->pdev = pdev;
795 strcpy(asrc_priv->name, np->name); 821 strncpy(asrc_priv->name, np->name, sizeof(asrc_priv->name) - 1);
796 822
797 /* Get the addresses and IRQ */ 823 /* Get the addresses and IRQ */
798 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 824 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/sound/soc/fsl/fsl_esai.c b/sound/soc/fsl/fsl_esai.c
index 8bcdfda09d7a..ca319d59f843 100644
--- a/sound/soc/fsl/fsl_esai.c
+++ b/sound/soc/fsl/fsl_esai.c
@@ -513,10 +513,15 @@ static int fsl_esai_hw_params(struct snd_pcm_substream *substream,
513 u32 width = snd_pcm_format_width(params_format(params)); 513 u32 width = snd_pcm_format_width(params_format(params));
514 u32 channels = params_channels(params); 514 u32 channels = params_channels(params);
515 u32 pins = DIV_ROUND_UP(channels, esai_priv->slots); 515 u32 pins = DIV_ROUND_UP(channels, esai_priv->slots);
516 u32 slot_width = width;
516 u32 bclk, mask, val; 517 u32 bclk, mask, val;
517 int ret; 518 int ret;
518 519
519 bclk = params_rate(params) * esai_priv->slot_width * esai_priv->slots; 520 /* Override slot_width if being specifially set */
521 if (esai_priv->slot_width)
522 slot_width = esai_priv->slot_width;
523
524 bclk = params_rate(params) * slot_width * esai_priv->slots;
520 525
521 ret = fsl_esai_set_bclk(dai, tx, bclk); 526 ret = fsl_esai_set_bclk(dai, tx, bclk);
522 if (ret) 527 if (ret)
@@ -538,7 +543,7 @@ static int fsl_esai_hw_params(struct snd_pcm_substream *substream,
538 regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx), mask, val); 543 regmap_update_bits(esai_priv->regmap, REG_ESAI_xFCR(tx), mask, val);
539 544
540 mask = ESAI_xCR_xSWS_MASK | (tx ? ESAI_xCR_PADC : 0); 545 mask = ESAI_xCR_xSWS_MASK | (tx ? ESAI_xCR_PADC : 0);
541 val = ESAI_xCR_xSWS(esai_priv->slot_width, width) | (tx ? ESAI_xCR_PADC : 0); 546 val = ESAI_xCR_xSWS(slot_width, width) | (tx ? ESAI_xCR_PADC : 0);
542 547
543 regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx), mask, val); 548 regmap_update_bits(esai_priv->regmap, REG_ESAI_xCR(tx), mask, val);
544 549
@@ -734,7 +739,7 @@ static int fsl_esai_probe(struct platform_device *pdev)
734 return -ENOMEM; 739 return -ENOMEM;
735 740
736 esai_priv->pdev = pdev; 741 esai_priv->pdev = pdev;
737 strcpy(esai_priv->name, np->name); 742 strncpy(esai_priv->name, np->name, sizeof(esai_priv->name) - 1);
738 743
739 /* Get the addresses and IRQ */ 744 /* Get the addresses and IRQ */
740 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 745 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -780,9 +785,6 @@ static int fsl_esai_probe(struct platform_device *pdev)
780 return ret; 785 return ret;
781 } 786 }
782 787
783 /* Set a default slot size */
784 esai_priv->slot_width = 32;
785
786 /* Set a default slot number */ 788 /* Set a default slot number */
787 esai_priv->slots = 2; 789 esai_priv->slots = 2;
788 790
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index e6955170dc42..b6b0d25f6ace 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -67,8 +67,6 @@
67/** 67/**
68 * FSLSSI_I2S_FORMATS: audio formats supported by the SSI 68 * FSLSSI_I2S_FORMATS: audio formats supported by the SSI
69 * 69 *
70 * This driver currently only supports the SSI running in I2S slave mode.
71 *
72 * The SSI has a limitation in that the samples must be in the same byte 70 * The SSI has a limitation in that the samples must be in the same byte
73 * order as the host CPU. This is because when multiple bytes are written 71 * order as the host CPU. This is because when multiple bytes are written
74 * to the STX register, the bytes and bits must be written in the same 72 * to the STX register, the bytes and bits must be written in the same
@@ -1099,7 +1097,7 @@ static const struct snd_soc_component_driver fsl_ssi_component = {
1099}; 1097};
1100 1098
1101static struct snd_soc_dai_driver fsl_ssi_ac97_dai = { 1099static struct snd_soc_dai_driver fsl_ssi_ac97_dai = {
1102 .ac97_control = 1, 1100 .bus_control = true,
1103 .playback = { 1101 .playback = {
1104 .stream_name = "AC97 Playback", 1102 .stream_name = "AC97 Playback",
1105 .channels_min = 2, 1103 .channels_min = 2,
@@ -1363,7 +1361,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
1363 return PTR_ERR(ssi_private->regs); 1361 return PTR_ERR(ssi_private->regs);
1364 } 1362 }
1365 1363
1366 ssi_private->irq = irq_of_parse_and_map(np, 0); 1364 ssi_private->irq = platform_get_irq(pdev, 0);
1367 if (!ssi_private->irq) { 1365 if (!ssi_private->irq) {
1368 dev_err(&pdev->dev, "no irq for node %s\n", np->full_name); 1366 dev_err(&pdev->dev, "no irq for node %s\n", np->full_name);
1369 return -ENXIO; 1367 return -ENXIO;
@@ -1389,7 +1387,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
1389 if (ssi_private->soc->imx) { 1387 if (ssi_private->soc->imx) {
1390 ret = fsl_ssi_imx_probe(pdev, ssi_private, iomem); 1388 ret = fsl_ssi_imx_probe(pdev, ssi_private, iomem);
1391 if (ret) 1389 if (ret)
1392 goto error_irqmap; 1390 return ret;
1393 } 1391 }
1394 1392
1395 ret = snd_soc_register_component(&pdev->dev, &fsl_ssi_component, 1393 ret = snd_soc_register_component(&pdev->dev, &fsl_ssi_component,
@@ -1412,7 +1410,7 @@ static int fsl_ssi_probe(struct platform_device *pdev)
1412 1410
1413 ret = fsl_ssi_debugfs_create(&ssi_private->dbg_stats, &pdev->dev); 1411 ret = fsl_ssi_debugfs_create(&ssi_private->dbg_stats, &pdev->dev);
1414 if (ret) 1412 if (ret)
1415 goto error_asoc_register; 1413 goto error_irq;
1416 1414
1417 /* 1415 /*
1418 * If codec-handle property is missing from SSI node, we assume 1416 * If codec-handle property is missing from SSI node, we assume
@@ -1460,10 +1458,6 @@ error_asoc_register:
1460 if (ssi_private->soc->imx) 1458 if (ssi_private->soc->imx)
1461 fsl_ssi_imx_clean(pdev, ssi_private); 1459 fsl_ssi_imx_clean(pdev, ssi_private);
1462 1460
1463error_irqmap:
1464 if (ssi_private->use_dma)
1465 irq_dispose_mapping(ssi_private->irq);
1466
1467 return ret; 1461 return ret;
1468} 1462}
1469 1463
@@ -1480,9 +1474,6 @@ static int fsl_ssi_remove(struct platform_device *pdev)
1480 if (ssi_private->soc->imx) 1474 if (ssi_private->soc->imx)
1481 fsl_ssi_imx_clean(pdev, ssi_private); 1475 fsl_ssi_imx_clean(pdev, ssi_private);
1482 1476
1483 if (ssi_private->use_dma)
1484 irq_dispose_mapping(ssi_private->irq);
1485
1486 return 0; 1477 return 0;
1487} 1478}
1488 1479
diff --git a/sound/soc/fsl/imx-sgtl5000.c b/sound/soc/fsl/imx-sgtl5000.c
index 1cb22dd034eb..1dab963a59f7 100644
--- a/sound/soc/fsl/imx-sgtl5000.c
+++ b/sound/soc/fsl/imx-sgtl5000.c
@@ -175,10 +175,8 @@ static int imx_sgtl5000_probe(struct platform_device *pdev)
175fail: 175fail:
176 if (data && !IS_ERR(data->codec_clk)) 176 if (data && !IS_ERR(data->codec_clk))
177 clk_put(data->codec_clk); 177 clk_put(data->codec_clk);
178 if (ssi_np) 178 of_node_put(ssi_np);
179 of_node_put(ssi_np); 179 of_node_put(codec_np);
180 if (codec_np)
181 of_node_put(codec_np);
182 180
183 return ret; 181 return ret;
184} 182}
diff --git a/sound/soc/fsl/imx-spdif.c b/sound/soc/fsl/imx-spdif.c
index e1dc40143600..0c9068ebe1e7 100644
--- a/sound/soc/fsl/imx-spdif.c
+++ b/sound/soc/fsl/imx-spdif.c
@@ -74,8 +74,7 @@ static int imx_spdif_audio_probe(struct platform_device *pdev)
74 platform_set_drvdata(pdev, data); 74 platform_set_drvdata(pdev, data);
75 75
76end: 76end:
77 if (spdif_np) 77 of_node_put(spdif_np);
78 of_node_put(spdif_np);
79 78
80 return ret; 79 return ret;
81} 80}
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c
index ab2fdd76b693..60b0a5b1f1f1 100644
--- a/sound/soc/fsl/imx-ssi.c
+++ b/sound/soc/fsl/imx-ssi.c
@@ -382,7 +382,7 @@ static struct snd_soc_dai_driver imx_ssi_dai = {
382 382
383static struct snd_soc_dai_driver imx_ac97_dai = { 383static struct snd_soc_dai_driver imx_ac97_dai = {
384 .probe = imx_ssi_dai_probe, 384 .probe = imx_ssi_dai_probe,
385 .ac97_control = 1, 385 .bus_control = true,
386 .playback = { 386 .playback = {
387 .stream_name = "AC97 Playback", 387 .stream_name = "AC97 Playback",
388 .channels_min = 2, 388 .channels_min = 2,
diff --git a/sound/soc/fsl/imx-wm8962.c b/sound/soc/fsl/imx-wm8962.c
index 3a3d17ce6ba4..48179ffe1543 100644
--- a/sound/soc/fsl/imx-wm8962.c
+++ b/sound/soc/fsl/imx-wm8962.c
@@ -281,10 +281,8 @@ static int imx_wm8962_probe(struct platform_device *pdev)
281clk_fail: 281clk_fail:
282 clk_disable_unprepare(data->codec_clk); 282 clk_disable_unprepare(data->codec_clk);
283fail: 283fail:
284 if (ssi_np) 284 of_node_put(ssi_np);
285 of_node_put(ssi_np); 285 of_node_put(codec_np);
286 if (codec_np)
287 of_node_put(codec_np);
288 286
289 return ret; 287 return ret;
290} 288}
diff --git a/sound/soc/fsl/mpc5200_dma.c b/sound/soc/fsl/mpc5200_dma.c
index f2b5d756b1f3..0b82e209b6e3 100644
--- a/sound/soc/fsl/mpc5200_dma.c
+++ b/sound/soc/fsl/mpc5200_dma.c
@@ -327,9 +327,6 @@ static int psc_dma_new(struct snd_soc_pcm_runtime *rtd)
327 goto capture_alloc_err; 327 goto capture_alloc_err;
328 } 328 }
329 329
330 if (rtd->codec->ac97)
331 rtd->codec->ac97->private_data = psc_dma;
332
333 return 0; 330 return 0;
334 331
335 capture_alloc_err: 332 capture_alloc_err:
diff --git a/sound/soc/fsl/mpc5200_psc_ac97.c b/sound/soc/fsl/mpc5200_psc_ac97.c
index 24eafa2cfbf4..c6ed6ba965a9 100644
--- a/sound/soc/fsl/mpc5200_psc_ac97.c
+++ b/sound/soc/fsl/mpc5200_psc_ac97.c
@@ -237,7 +237,7 @@ static const struct snd_soc_dai_ops psc_ac97_digital_ops = {
237static struct snd_soc_dai_driver psc_ac97_dai[] = { 237static struct snd_soc_dai_driver psc_ac97_dai[] = {
238{ 238{
239 .name = "mpc5200-psc-ac97.0", 239 .name = "mpc5200-psc-ac97.0",
240 .ac97_control = 1, 240 .bus_control = true,
241 .probe = psc_ac97_probe, 241 .probe = psc_ac97_probe,
242 .playback = { 242 .playback = {
243 .stream_name = "AC97 Playback", 243 .stream_name = "AC97 Playback",
@@ -257,7 +257,7 @@ static struct snd_soc_dai_driver psc_ac97_dai[] = {
257}, 257},
258{ 258{
259 .name = "mpc5200-psc-ac97.1", 259 .name = "mpc5200-psc-ac97.1",
260 .ac97_control = 1, 260 .bus_control = true,
261 .playback = { 261 .playback = {
262 .stream_name = "AC97 SPDIF", 262 .stream_name = "AC97 SPDIF",
263 .channels_min = 1, 263 .channels_min = 1,
@@ -282,7 +282,6 @@ static const struct snd_soc_component_driver psc_ac97_component = {
282static int psc_ac97_of_probe(struct platform_device *op) 282static int psc_ac97_of_probe(struct platform_device *op)
283{ 283{
284 int rc; 284 int rc;
285 struct snd_ac97 ac97;
286 struct mpc52xx_psc __iomem *regs; 285 struct mpc52xx_psc __iomem *regs;
287 286
288 rc = mpc5200_audio_dma_create(op); 287 rc = mpc5200_audio_dma_create(op);
@@ -304,7 +303,6 @@ static int psc_ac97_of_probe(struct platform_device *op)
304 303
305 psc_dma = dev_get_drvdata(&op->dev); 304 psc_dma = dev_get_drvdata(&op->dev);
306 regs = psc_dma->psc_regs; 305 regs = psc_dma->psc_regs;
307 ac97.private_data = psc_dma;
308 306
309 psc_dma->imr = 0; 307 psc_dma->imr = 0;
310 out_be16(&psc_dma->psc_regs->isr_imr.imr, psc_dma->imr); 308 out_be16(&psc_dma->psc_regs->isr_imr.imr, psc_dma->imr);
diff --git a/sound/soc/intel/sst-haswell-pcm.c b/sound/soc/intel/sst-haswell-pcm.c
index 33fc5c3abf55..4df867cbb92a 100644
--- a/sound/soc/intel/sst-haswell-pcm.c
+++ b/sound/soc/intel/sst-haswell-pcm.c
@@ -691,9 +691,7 @@ static int hsw_pcm_new(struct snd_soc_pcm_runtime *rtd)
691} 691}
692 692
693#define HSW_FORMATS \ 693#define HSW_FORMATS \
694 (SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S24_LE | \ 694 (SNDRV_PCM_FMTBIT_S24_LE | SNDRV_PCM_FMTBIT_S16_LE)
695 SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S16_LE |\
696 SNDRV_PCM_FMTBIT_S8)
697 695
698static struct snd_soc_dai_driver hsw_dais[] = { 696static struct snd_soc_dai_driver hsw_dais[] = {
699 { 697 {
diff --git a/sound/soc/nuc900/nuc900-ac97.c b/sound/soc/nuc900/nuc900-ac97.c
index f2f67942b229..dff443e4b657 100644
--- a/sound/soc/nuc900/nuc900-ac97.c
+++ b/sound/soc/nuc900/nuc900-ac97.c
@@ -298,7 +298,7 @@ static const struct snd_soc_dai_ops nuc900_ac97_dai_ops = {
298static struct snd_soc_dai_driver nuc900_ac97_dai = { 298static struct snd_soc_dai_driver nuc900_ac97_dai = {
299 .probe = nuc900_ac97_probe, 299 .probe = nuc900_ac97_probe,
300 .remove = nuc900_ac97_remove, 300 .remove = nuc900_ac97_remove,
301 .ac97_control = 1, 301 .bus_control = true,
302 .playback = { 302 .playback = {
303 .rates = SNDRV_PCM_RATE_8000_48000, 303 .rates = SNDRV_PCM_RATE_8000_48000,
304 .formats = SNDRV_PCM_FMTBIT_S16_LE, 304 .formats = SNDRV_PCM_FMTBIT_S16_LE,
diff --git a/sound/soc/pxa/pxa2xx-ac97.c b/sound/soc/pxa/pxa2xx-ac97.c
index ae956e3f4b9d..73ca2820c08c 100644
--- a/sound/soc/pxa/pxa2xx-ac97.c
+++ b/sound/soc/pxa/pxa2xx-ac97.c
@@ -157,7 +157,7 @@ static const struct snd_soc_dai_ops pxa_ac97_mic_dai_ops = {
157static struct snd_soc_dai_driver pxa_ac97_dai_driver[] = { 157static struct snd_soc_dai_driver pxa_ac97_dai_driver[] = {
158{ 158{
159 .name = "pxa2xx-ac97", 159 .name = "pxa2xx-ac97",
160 .ac97_control = 1, 160 .bus_control = true,
161 .playback = { 161 .playback = {
162 .stream_name = "AC97 Playback", 162 .stream_name = "AC97 Playback",
163 .channels_min = 2, 163 .channels_min = 2,
@@ -174,7 +174,7 @@ static struct snd_soc_dai_driver pxa_ac97_dai_driver[] = {
174}, 174},
175{ 175{
176 .name = "pxa2xx-ac97-aux", 176 .name = "pxa2xx-ac97-aux",
177 .ac97_control = 1, 177 .bus_control = true,
178 .playback = { 178 .playback = {
179 .stream_name = "AC97 Aux Playback", 179 .stream_name = "AC97 Aux Playback",
180 .channels_min = 1, 180 .channels_min = 1,
@@ -191,7 +191,7 @@ static struct snd_soc_dai_driver pxa_ac97_dai_driver[] = {
191}, 191},
192{ 192{
193 .name = "pxa2xx-ac97-mic", 193 .name = "pxa2xx-ac97-mic",
194 .ac97_control = 1, 194 .bus_control = true,
195 .capture = { 195 .capture = {
196 .stream_name = "AC97 Mic Capture", 196 .stream_name = "AC97 Mic Capture",
197 .channels_min = 1, 197 .channels_min = 1,
diff --git a/sound/soc/rockchip/rockchip_i2s.c b/sound/soc/rockchip/rockchip_i2s.c
index f373e37f8305..c74ba37f862c 100644
--- a/sound/soc/rockchip/rockchip_i2s.c
+++ b/sound/soc/rockchip/rockchip_i2s.c
@@ -154,8 +154,10 @@ static void rockchip_snd_rxctrl(struct rk_i2s_dev *i2s, int on)
154 while (val) { 154 while (val) {
155 regmap_read(i2s->regmap, I2S_CLR, &val); 155 regmap_read(i2s->regmap, I2S_CLR, &val);
156 retry--; 156 retry--;
157 if (!retry) 157 if (!retry) {
158 dev_warn(i2s->dev, "fail to clear\n"); 158 dev_warn(i2s->dev, "fail to clear\n");
159 break;
160 }
159 } 161 }
160 } 162 }
161 } 163 }
diff --git a/sound/soc/s6000/Kconfig b/sound/soc/s6000/Kconfig
deleted file mode 100644
index f244a2566f20..000000000000
--- a/sound/soc/s6000/Kconfig
+++ /dev/null
@@ -1,26 +0,0 @@
1config SND_S6000_SOC
2 tristate "SoC Audio for the Stretch s6000 family"
3 depends on XTENSA_VARIANT_S6000 || COMPILE_TEST
4 depends on HAS_IOMEM
5 select SND_S6000_SOC_PCM if XTENSA_VARIANT_S6000
6 help
7 Say Y or M if you want to add support for codecs attached to
8 s6000 family chips. You will also need to select the platform
9 to support below.
10
11config SND_S6000_SOC_PCM
12 tristate
13
14config SND_S6000_SOC_I2S
15 tristate
16
17config SND_S6000_SOC_S6IPCAM
18 bool "SoC Audio support for Stretch 6105 IP Camera"
19 depends on SND_S6000_SOC=y
20 depends on I2C=y
21 depends on XTENSA_PLATFORM_S6105 || COMPILE_TEST
22 select SND_S6000_SOC_I2S
23 select SND_SOC_TLV320AIC3X
24 help
25 Say Y if you want to add support for SoC audio on the
26 Stretch s6105 IP Camera Reference Design.
diff --git a/sound/soc/s6000/Makefile b/sound/soc/s6000/Makefile
deleted file mode 100644
index 0f0ae2a012aa..000000000000
--- a/sound/soc/s6000/Makefile
+++ /dev/null
@@ -1,11 +0,0 @@
1# s6000 Platform Support
2snd-soc-s6000-objs := s6000-pcm.o
3snd-soc-s6000-i2s-objs := s6000-i2s.o
4
5obj-$(CONFIG_SND_S6000_SOC_PCM) += snd-soc-s6000.o
6obj-$(CONFIG_SND_S6000_SOC_I2S) += snd-soc-s6000-i2s.o
7
8# s6105 Machine Support
9snd-soc-s6ipcam-objs := s6105-ipcam.o
10
11obj-$(CONFIG_SND_S6000_SOC_S6IPCAM) += snd-soc-s6ipcam.o
diff --git a/sound/soc/s6000/s6000-i2s.c b/sound/soc/s6000/s6000-i2s.c
deleted file mode 100644
index 1c8d01166e5b..000000000000
--- a/sound/soc/s6000/s6000-i2s.c
+++ /dev/null
@@ -1,617 +0,0 @@
1/*
2 * ALSA SoC I2S Audio Layer for the Stretch S6000 family
3 *
4 * Author: Daniel Gloeckner, <dg@emlix.com>
5 * Copyright: (C) 2009 emlix GmbH <info@emlix.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/init.h>
13#include <linux/module.h>
14#include <linux/device.h>
15#include <linux/delay.h>
16#include <linux/clk.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/slab.h>
20
21#include <sound/core.h>
22#include <sound/pcm.h>
23#include <sound/pcm_params.h>
24#include <sound/initval.h>
25#include <sound/soc.h>
26
27#include "s6000-i2s.h"
28#include "s6000-pcm.h"
29
30struct s6000_i2s_dev {
31 dma_addr_t sifbase;
32 u8 __iomem *scbbase;
33 unsigned int wide;
34 unsigned int channel_in;
35 unsigned int channel_out;
36 unsigned int lines_in;
37 unsigned int lines_out;
38 struct s6000_pcm_dma_params dma_params;
39};
40
41#define S6_I2S_INTERRUPT_STATUS 0x00
42#define S6_I2S_INT_OVERRUN 1
43#define S6_I2S_INT_UNDERRUN 2
44#define S6_I2S_INT_ALIGNMENT 4
45#define S6_I2S_INTERRUPT_ENABLE 0x04
46#define S6_I2S_INTERRUPT_RAW 0x08
47#define S6_I2S_INTERRUPT_CLEAR 0x0C
48#define S6_I2S_INTERRUPT_SET 0x10
49#define S6_I2S_MODE 0x20
50#define S6_I2S_DUAL 0
51#define S6_I2S_WIDE 1
52#define S6_I2S_TX_DEFAULT 0x24
53#define S6_I2S_DATA_CFG(c) (0x40 + 0x10 * (c))
54#define S6_I2S_IN 0
55#define S6_I2S_OUT 1
56#define S6_I2S_UNUSED 2
57#define S6_I2S_INTERFACE_CFG(c) (0x44 + 0x10 * (c))
58#define S6_I2S_DIV_MASK 0x001fff
59#define S6_I2S_16BIT 0x000000
60#define S6_I2S_20BIT 0x002000
61#define S6_I2S_24BIT 0x004000
62#define S6_I2S_32BIT 0x006000
63#define S6_I2S_BITS_MASK 0x006000
64#define S6_I2S_MEM_16BIT 0x000000
65#define S6_I2S_MEM_32BIT 0x008000
66#define S6_I2S_MEM_MASK 0x008000
67#define S6_I2S_CHANNELS_SHIFT 16
68#define S6_I2S_CHANNELS_MASK 0x030000
69#define S6_I2S_SCK_IN 0x000000
70#define S6_I2S_SCK_OUT 0x040000
71#define S6_I2S_SCK_DIR 0x040000
72#define S6_I2S_WS_IN 0x000000
73#define S6_I2S_WS_OUT 0x080000
74#define S6_I2S_WS_DIR 0x080000
75#define S6_I2S_LEFT_FIRST 0x000000
76#define S6_I2S_RIGHT_FIRST 0x100000
77#define S6_I2S_FIRST 0x100000
78#define S6_I2S_CUR_SCK 0x200000
79#define S6_I2S_CUR_WS 0x400000
80#define S6_I2S_ENABLE(c) (0x48 + 0x10 * (c))
81#define S6_I2S_DISABLE_IF 0x02
82#define S6_I2S_ENABLE_IF 0x03
83#define S6_I2S_IS_BUSY 0x04
84#define S6_I2S_DMA_ACTIVE 0x08
85#define S6_I2S_IS_ENABLED 0x10
86
87#define S6_I2S_NUM_LINES 4
88
89#define S6_I2S_SIF_PORT0 0x0000000
90#define S6_I2S_SIF_PORT1 0x0000080 /* docs say 0x0000010 */
91
92static inline void s6_i2s_write_reg(struct s6000_i2s_dev *dev, int reg, u32 val)
93{
94 writel(val, dev->scbbase + reg);
95}
96
97static inline u32 s6_i2s_read_reg(struct s6000_i2s_dev *dev, int reg)
98{
99 return readl(dev->scbbase + reg);
100}
101
102static inline void s6_i2s_mod_reg(struct s6000_i2s_dev *dev, int reg,
103 u32 mask, u32 val)
104{
105 val ^= s6_i2s_read_reg(dev, reg) & ~mask;
106 s6_i2s_write_reg(dev, reg, val);
107}
108
109static void s6000_i2s_start_channel(struct s6000_i2s_dev *dev, int channel)
110{
111 int i, j, cur, prev;
112
113 /*
114 * Wait for WCLK to toggle 5 times before enabling the channel
115 * s6000 Family Datasheet 3.6.4:
116 * "At least two cycles of WS must occur between commands
117 * to disable or enable the interface"
118 */
119 j = 0;
120 prev = ~S6_I2S_CUR_WS;
121 for (i = 1000000; --i && j < 6; ) {
122 cur = s6_i2s_read_reg(dev, S6_I2S_INTERFACE_CFG(channel))
123 & S6_I2S_CUR_WS;
124 if (prev != cur) {
125 prev = cur;
126 j++;
127 }
128 }
129 if (j < 6)
130 printk(KERN_WARNING "s6000-i2s: timeout waiting for WCLK\n");
131
132 s6_i2s_write_reg(dev, S6_I2S_ENABLE(channel), S6_I2S_ENABLE_IF);
133}
134
135static void s6000_i2s_stop_channel(struct s6000_i2s_dev *dev, int channel)
136{
137 s6_i2s_write_reg(dev, S6_I2S_ENABLE(channel), S6_I2S_DISABLE_IF);
138}
139
140static void s6000_i2s_start(struct snd_pcm_substream *substream)
141{
142 struct snd_soc_pcm_runtime *rtd = substream->private_data;
143 struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai);
144 int channel;
145
146 channel = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
147 dev->channel_out : dev->channel_in;
148
149 s6000_i2s_start_channel(dev, channel);
150}
151
152static void s6000_i2s_stop(struct snd_pcm_substream *substream)
153{
154 struct snd_soc_pcm_runtime *rtd = substream->private_data;
155 struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(rtd->cpu_dai);
156 int channel;
157
158 channel = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ?
159 dev->channel_out : dev->channel_in;
160
161 s6000_i2s_stop_channel(dev, channel);
162}
163
164static int s6000_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
165 int after)
166{
167 switch (cmd) {
168 case SNDRV_PCM_TRIGGER_START:
169 case SNDRV_PCM_TRIGGER_RESUME:
170 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
171 if ((substream->stream == SNDRV_PCM_STREAM_CAPTURE) ^ !after)
172 s6000_i2s_start(substream);
173 break;
174 case SNDRV_PCM_TRIGGER_STOP:
175 case SNDRV_PCM_TRIGGER_SUSPEND:
176 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
177 if (!after)
178 s6000_i2s_stop(substream);
179 }
180 return 0;
181}
182
183static unsigned int s6000_i2s_int_sources(struct s6000_i2s_dev *dev)
184{
185 unsigned int pending;
186 pending = s6_i2s_read_reg(dev, S6_I2S_INTERRUPT_RAW);
187 pending &= S6_I2S_INT_ALIGNMENT |
188 S6_I2S_INT_UNDERRUN |
189 S6_I2S_INT_OVERRUN;
190 s6_i2s_write_reg(dev, S6_I2S_INTERRUPT_CLEAR, pending);
191
192 return pending;
193}
194
195static unsigned int s6000_i2s_check_xrun(struct snd_soc_dai *cpu_dai)
196{
197 struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(cpu_dai);
198 unsigned int errors;
199 unsigned int ret;
200
201 errors = s6000_i2s_int_sources(dev);
202 if (likely(!errors))
203 return 0;
204
205 ret = 0;
206 if (errors & S6_I2S_INT_ALIGNMENT)
207 printk(KERN_ERR "s6000-i2s: WCLK misaligned\n");
208 if (errors & S6_I2S_INT_UNDERRUN)
209 ret |= 1 << SNDRV_PCM_STREAM_PLAYBACK;
210 if (errors & S6_I2S_INT_OVERRUN)
211 ret |= 1 << SNDRV_PCM_STREAM_CAPTURE;
212 return ret;
213}
214
215static void s6000_i2s_wait_disabled(struct s6000_i2s_dev *dev)
216{
217 int channel;
218 int n = 50;
219 for (channel = 0; channel < 2; channel++) {
220 while (--n >= 0) {
221 int v = s6_i2s_read_reg(dev, S6_I2S_ENABLE(channel));
222 if ((v & S6_I2S_IS_ENABLED)
223 || !(v & (S6_I2S_DMA_ACTIVE | S6_I2S_IS_BUSY)))
224 break;
225 udelay(20);
226 }
227 }
228 if (n < 0)
229 printk(KERN_WARNING "s6000-i2s: timeout disabling interfaces");
230}
231
232static int s6000_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
233 unsigned int fmt)
234{
235 struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(cpu_dai);
236 u32 w;
237
238 switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
239 case SND_SOC_DAIFMT_CBM_CFM:
240 w = S6_I2S_SCK_IN | S6_I2S_WS_IN;
241 break;
242 case SND_SOC_DAIFMT_CBS_CFM:
243 w = S6_I2S_SCK_OUT | S6_I2S_WS_IN;
244 break;
245 case SND_SOC_DAIFMT_CBM_CFS:
246 w = S6_I2S_SCK_IN | S6_I2S_WS_OUT;
247 break;
248 case SND_SOC_DAIFMT_CBS_CFS:
249 w = S6_I2S_SCK_OUT | S6_I2S_WS_OUT;
250 break;
251 default:
252 return -EINVAL;
253 }
254
255 switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
256 case SND_SOC_DAIFMT_NB_NF:
257 w |= S6_I2S_LEFT_FIRST;
258 break;
259 case SND_SOC_DAIFMT_NB_IF:
260 w |= S6_I2S_RIGHT_FIRST;
261 break;
262 default:
263 return -EINVAL;
264 }
265
266 s6_i2s_mod_reg(dev, S6_I2S_INTERFACE_CFG(0),
267 S6_I2S_FIRST | S6_I2S_WS_DIR | S6_I2S_SCK_DIR, w);
268 s6_i2s_mod_reg(dev, S6_I2S_INTERFACE_CFG(1),
269 S6_I2S_FIRST | S6_I2S_WS_DIR | S6_I2S_SCK_DIR, w);
270
271 return 0;
272}
273
274static int s6000_i2s_set_clkdiv(struct snd_soc_dai *dai, int div_id, int div)
275{
276 struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
277
278 if (!div || (div & 1) || div > (S6_I2S_DIV_MASK + 1) * 2)
279 return -EINVAL;
280
281 s6_i2s_mod_reg(dev, S6_I2S_INTERFACE_CFG(div_id),
282 S6_I2S_DIV_MASK, div / 2 - 1);
283 return 0;
284}
285
286static int s6000_i2s_hw_params(struct snd_pcm_substream *substream,
287 struct snd_pcm_hw_params *params,
288 struct snd_soc_dai *dai)
289{
290 struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
291 int interf;
292 u32 w = 0;
293
294 if (dev->wide)
295 interf = 0;
296 else {
297 w |= (((params_channels(params) - 2) / 2)
298 << S6_I2S_CHANNELS_SHIFT) & S6_I2S_CHANNELS_MASK;
299 interf = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
300 ? dev->channel_out : dev->channel_in;
301 }
302
303 switch (params_format(params)) {
304 case SNDRV_PCM_FORMAT_S16_LE:
305 w |= S6_I2S_16BIT | S6_I2S_MEM_16BIT;
306 break;
307 case SNDRV_PCM_FORMAT_S32_LE:
308 w |= S6_I2S_32BIT | S6_I2S_MEM_32BIT;
309 break;
310 default:
311 printk(KERN_WARNING "s6000-i2s: unsupported PCM format %x\n",
312 params_format(params));
313 return -EINVAL;
314 }
315
316 if (s6_i2s_read_reg(dev, S6_I2S_INTERFACE_CFG(interf))
317 & S6_I2S_IS_ENABLED) {
318 printk(KERN_ERR "s6000-i2s: interface already enabled\n");
319 return -EBUSY;
320 }
321
322 s6_i2s_mod_reg(dev, S6_I2S_INTERFACE_CFG(interf),
323 S6_I2S_CHANNELS_MASK|S6_I2S_MEM_MASK|S6_I2S_BITS_MASK,
324 w);
325
326 return 0;
327}
328
329static int s6000_i2s_dai_probe(struct snd_soc_dai *dai)
330{
331 struct s6000_i2s_dev *dev = snd_soc_dai_get_drvdata(dai);
332 struct s6000_snd_platform_data *pdata = dai->dev->platform_data;
333
334 if (!pdata)
335 return -EINVAL;
336
337 dai->capture_dma_data = &dev->dma_params;
338 dai->playback_dma_data = &dev->dma_params;
339
340 dev->wide = pdata->wide;
341 dev->channel_in = pdata->channel_in;
342 dev->channel_out = pdata->channel_out;
343 dev->lines_in = pdata->lines_in;
344 dev->lines_out = pdata->lines_out;
345
346 s6_i2s_write_reg(dev, S6_I2S_MODE,
347 dev->wide ? S6_I2S_WIDE : S6_I2S_DUAL);
348
349 if (dev->wide) {
350 int i;
351
352 if (dev->lines_in + dev->lines_out > S6_I2S_NUM_LINES)
353 return -EINVAL;
354
355 dev->channel_in = 0;
356 dev->channel_out = 1;
357 dai->driver->capture.channels_min = 2 * dev->lines_in;
358 dai->driver->capture.channels_max = dai->driver->capture.channels_min;
359 dai->driver->playback.channels_min = 2 * dev->lines_out;
360 dai->driver->playback.channels_max = dai->driver->playback.channels_min;
361
362 for (i = 0; i < dev->lines_out; i++)
363 s6_i2s_write_reg(dev, S6_I2S_DATA_CFG(i), S6_I2S_OUT);
364
365 for (; i < S6_I2S_NUM_LINES - dev->lines_in; i++)
366 s6_i2s_write_reg(dev, S6_I2S_DATA_CFG(i),
367 S6_I2S_UNUSED);
368
369 for (; i < S6_I2S_NUM_LINES; i++)
370 s6_i2s_write_reg(dev, S6_I2S_DATA_CFG(i), S6_I2S_IN);
371 } else {
372 unsigned int cfg[2] = {S6_I2S_UNUSED, S6_I2S_UNUSED};
373
374 if (dev->lines_in > 1 || dev->lines_out > 1)
375 return -EINVAL;
376
377 dai->driver->capture.channels_min = 2 * dev->lines_in;
378 dai->driver->capture.channels_max = 8 * dev->lines_in;
379 dai->driver->playback.channels_min = 2 * dev->lines_out;
380 dai->driver->playback.channels_max = 8 * dev->lines_out;
381
382 if (dev->lines_in)
383 cfg[dev->channel_in] = S6_I2S_IN;
384 if (dev->lines_out)
385 cfg[dev->channel_out] = S6_I2S_OUT;
386
387 s6_i2s_write_reg(dev, S6_I2S_DATA_CFG(0), cfg[0]);
388 s6_i2s_write_reg(dev, S6_I2S_DATA_CFG(1), cfg[1]);
389 }
390
391 if (dev->lines_out) {
392 if (dev->lines_in) {
393 if (!dev->dma_params.dma_out)
394 return -ENODEV;
395 } else {
396 dev->dma_params.dma_out = dev->dma_params.dma_in;
397 dev->dma_params.dma_in = 0;
398 }
399 }
400 dev->dma_params.sif_in = dev->sifbase + (dev->channel_in ?
401 S6_I2S_SIF_PORT1 : S6_I2S_SIF_PORT0);
402 dev->dma_params.sif_out = dev->sifbase + (dev->channel_out ?
403 S6_I2S_SIF_PORT1 : S6_I2S_SIF_PORT0);
404 dev->dma_params.same_rate = pdata->same_rate | pdata->wide;
405 return 0;
406}
407
408#define S6000_I2S_RATES SNDRV_PCM_RATE_CONTINUOUS
409#define S6000_I2S_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE)
410
411static const struct snd_soc_dai_ops s6000_i2s_dai_ops = {
412 .set_fmt = s6000_i2s_set_dai_fmt,
413 .set_clkdiv = s6000_i2s_set_clkdiv,
414 .hw_params = s6000_i2s_hw_params,
415};
416
417static struct snd_soc_dai_driver s6000_i2s_dai = {
418 .probe = s6000_i2s_dai_probe,
419 .playback = {
420 .channels_min = 2,
421 .channels_max = 8,
422 .formats = S6000_I2S_FORMATS,
423 .rates = S6000_I2S_RATES,
424 .rate_min = 0,
425 .rate_max = 1562500,
426 },
427 .capture = {
428 .channels_min = 2,
429 .channels_max = 8,
430 .formats = S6000_I2S_FORMATS,
431 .rates = S6000_I2S_RATES,
432 .rate_min = 0,
433 .rate_max = 1562500,
434 },
435 .ops = &s6000_i2s_dai_ops,
436};
437
438static const struct snd_soc_component_driver s6000_i2s_component = {
439 .name = "s6000-i2s",
440};
441
442static int s6000_i2s_probe(struct platform_device *pdev)
443{
444 struct s6000_i2s_dev *dev;
445 struct resource *scbmem, *sifmem, *region, *dma1, *dma2;
446 u8 __iomem *mmio;
447 int ret;
448
449 scbmem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
450 if (!scbmem) {
451 dev_err(&pdev->dev, "no mem resource?\n");
452 ret = -ENODEV;
453 goto err_release_none;
454 }
455
456 region = request_mem_region(scbmem->start, resource_size(scbmem),
457 pdev->name);
458 if (!region) {
459 dev_err(&pdev->dev, "I2S SCB region already claimed\n");
460 ret = -EBUSY;
461 goto err_release_none;
462 }
463
464 mmio = ioremap(scbmem->start, resource_size(scbmem));
465 if (!mmio) {
466 dev_err(&pdev->dev, "can't ioremap SCB region\n");
467 ret = -ENOMEM;
468 goto err_release_scb;
469 }
470
471 sifmem = platform_get_resource(pdev, IORESOURCE_MEM, 1);
472 if (!sifmem) {
473 dev_err(&pdev->dev, "no second mem resource?\n");
474 ret = -ENODEV;
475 goto err_release_map;
476 }
477
478 region = request_mem_region(sifmem->start, resource_size(sifmem),
479 pdev->name);
480 if (!region) {
481 dev_err(&pdev->dev, "I2S SIF region already claimed\n");
482 ret = -EBUSY;
483 goto err_release_map;
484 }
485
486 dma1 = platform_get_resource(pdev, IORESOURCE_DMA, 0);
487 if (!dma1) {
488 dev_err(&pdev->dev, "no dma resource?\n");
489 ret = -ENODEV;
490 goto err_release_sif;
491 }
492
493 region = request_mem_region(dma1->start, resource_size(dma1),
494 pdev->name);
495 if (!region) {
496 dev_err(&pdev->dev, "I2S DMA region already claimed\n");
497 ret = -EBUSY;
498 goto err_release_sif;
499 }
500
501 dma2 = platform_get_resource(pdev, IORESOURCE_DMA, 1);
502 if (dma2) {
503 region = request_mem_region(dma2->start, resource_size(dma2),
504 pdev->name);
505 if (!region) {
506 dev_err(&pdev->dev,
507 "I2S DMA region already claimed\n");
508 ret = -EBUSY;
509 goto err_release_dma1;
510 }
511 }
512
513 dev = kzalloc(sizeof(struct s6000_i2s_dev), GFP_KERNEL);
514 if (!dev) {
515 ret = -ENOMEM;
516 goto err_release_dma2;
517 }
518 dev_set_drvdata(&pdev->dev, dev);
519
520 dev->sifbase = sifmem->start;
521 dev->scbbase = mmio;
522
523 s6_i2s_write_reg(dev, S6_I2S_INTERRUPT_ENABLE, 0);
524 s6_i2s_write_reg(dev, S6_I2S_INTERRUPT_CLEAR,
525 S6_I2S_INT_ALIGNMENT |
526 S6_I2S_INT_UNDERRUN |
527 S6_I2S_INT_OVERRUN);
528
529 s6000_i2s_stop_channel(dev, 0);
530 s6000_i2s_stop_channel(dev, 1);
531 s6000_i2s_wait_disabled(dev);
532
533 dev->dma_params.check_xrun = s6000_i2s_check_xrun;
534 dev->dma_params.trigger = s6000_i2s_trigger;
535 dev->dma_params.dma_in = dma1->start;
536 dev->dma_params.dma_out = dma2 ? dma2->start : 0;
537 dev->dma_params.irq = platform_get_irq(pdev, 0);
538 if (dev->dma_params.irq < 0) {
539 dev_err(&pdev->dev, "no irq resource?\n");
540 ret = -ENODEV;
541 goto err_release_dev;
542 }
543
544 s6_i2s_write_reg(dev, S6_I2S_INTERRUPT_ENABLE,
545 S6_I2S_INT_ALIGNMENT |
546 S6_I2S_INT_UNDERRUN |
547 S6_I2S_INT_OVERRUN);
548
549 ret = snd_soc_register_component(&pdev->dev, &s6000_i2s_component,
550 &s6000_i2s_dai, 1);
551 if (ret)
552 goto err_release_dev;
553
554 return 0;
555
556err_release_dev:
557 kfree(dev);
558err_release_dma2:
559 if (dma2)
560 release_mem_region(dma2->start, resource_size(dma2));
561err_release_dma1:
562 release_mem_region(dma1->start, resource_size(dma1));
563err_release_sif:
564 release_mem_region(sifmem->start, resource_size(sifmem));
565err_release_map:
566 iounmap(mmio);
567err_release_scb:
568 release_mem_region(scbmem->start, resource_size(scbmem));
569err_release_none:
570 return ret;
571}
572
573static int s6000_i2s_remove(struct platform_device *pdev)
574{
575 struct s6000_i2s_dev *dev = dev_get_drvdata(&pdev->dev);
576 struct resource *region;
577 void __iomem *mmio = dev->scbbase;
578
579 snd_soc_unregister_component(&pdev->dev);
580
581 s6000_i2s_stop_channel(dev, 0);
582 s6000_i2s_stop_channel(dev, 1);
583
584 s6_i2s_write_reg(dev, S6_I2S_INTERRUPT_ENABLE, 0);
585 kfree(dev);
586
587 region = platform_get_resource(pdev, IORESOURCE_DMA, 0);
588 release_mem_region(region->start, resource_size(region));
589
590 region = platform_get_resource(pdev, IORESOURCE_DMA, 1);
591 if (region)
592 release_mem_region(region->start, resource_size(region));
593
594 region = platform_get_resource(pdev, IORESOURCE_MEM, 0);
595 release_mem_region(region->start, resource_size(region));
596
597 iounmap(mmio);
598 region = platform_get_resource(pdev, IORESOURCE_IO, 0);
599 release_mem_region(region->start, resource_size(region));
600
601 return 0;
602}
603
604static struct platform_driver s6000_i2s_driver = {
605 .probe = s6000_i2s_probe,
606 .remove = s6000_i2s_remove,
607 .driver = {
608 .name = "s6000-i2s",
609 .owner = THIS_MODULE,
610 },
611};
612
613module_platform_driver(s6000_i2s_driver);
614
615MODULE_AUTHOR("Daniel Gloeckner");
616MODULE_DESCRIPTION("Stretch s6000 family I2S SoC Interface");
617MODULE_LICENSE("GPL");
diff --git a/sound/soc/s6000/s6000-i2s.h b/sound/soc/s6000/s6000-i2s.h
deleted file mode 100644
index 86aa1921c89e..000000000000
--- a/sound/soc/s6000/s6000-i2s.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2 * ALSA SoC I2S Audio Layer for the Stretch s6000 family
3 *
4 * Author: Daniel Gloeckner, <dg@emlix.com>
5 * Copyright: (C) 2009 emlix GmbH <info@emlix.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _S6000_I2S_H
13#define _S6000_I2S_H
14
15struct s6000_snd_platform_data {
16 int lines_in;
17 int lines_out;
18 int channel_in;
19 int channel_out;
20 int wide;
21 int same_rate;
22};
23#endif
diff --git a/sound/soc/s6000/s6000-pcm.c b/sound/soc/s6000/s6000-pcm.c
deleted file mode 100644
index fb8461e1b1f6..000000000000
--- a/sound/soc/s6000/s6000-pcm.c
+++ /dev/null
@@ -1,521 +0,0 @@
1/*
2 * ALSA PCM interface for the Stetch s6000 family
3 *
4 * Author: Daniel Gloeckner, <dg@emlix.com>
5 * Copyright: (C) 2009 emlix GmbH <info@emlix.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/init.h>
14#include <linux/platform_device.h>
15#include <linux/slab.h>
16#include <linux/dma-mapping.h>
17#include <linux/interrupt.h>
18
19#include <sound/core.h>
20#include <sound/pcm.h>
21#include <sound/pcm_params.h>
22#include <sound/soc.h>
23
24#include <asm/dma.h>
25#include <variant/dmac.h>
26
27#include "s6000-pcm.h"
28
29#define S6_PCM_PREALLOCATE_SIZE (96 * 1024)
30#define S6_PCM_PREALLOCATE_MAX (2048 * 1024)
31
32static struct snd_pcm_hardware s6000_pcm_hardware = {
33 .info = (SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER |
34 SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID |
35 SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_JOINT_DUPLEX),
36 .buffer_bytes_max = 0x7ffffff0,
37 .period_bytes_min = 16,
38 .period_bytes_max = 0xfffff0,
39 .periods_min = 2,
40 .periods_max = 1024, /* no limit */
41 .fifo_size = 0,
42};
43
44struct s6000_runtime_data {
45 spinlock_t lock;
46 int period; /* current DMA period */
47};
48
49static void s6000_pcm_enqueue_dma(struct snd_pcm_substream *substream)
50{
51 struct snd_pcm_runtime *runtime = substream->runtime;
52 struct s6000_runtime_data *prtd = runtime->private_data;
53 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
54 struct s6000_pcm_dma_params *par;
55 int channel;
56 unsigned int period_size;
57 unsigned int dma_offset;
58 dma_addr_t dma_pos;
59 dma_addr_t src, dst;
60
61 par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
62
63 period_size = snd_pcm_lib_period_bytes(substream);
64 dma_offset = prtd->period * period_size;
65 dma_pos = runtime->dma_addr + dma_offset;
66
67 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
68 src = dma_pos;
69 dst = par->sif_out;
70 channel = par->dma_out;
71 } else {
72 src = par->sif_in;
73 dst = dma_pos;
74 channel = par->dma_in;
75 }
76
77 if (!s6dmac_channel_enabled(DMA_MASK_DMAC(channel),
78 DMA_INDEX_CHNL(channel)))
79 return;
80
81 if (s6dmac_fifo_full(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel))) {
82 printk(KERN_ERR "s6000-pcm: fifo full\n");
83 return;
84 }
85
86 if (WARN_ON(period_size & 15))
87 return;
88 s6dmac_put_fifo(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel),
89 src, dst, period_size);
90
91 prtd->period++;
92 if (unlikely(prtd->period >= runtime->periods))
93 prtd->period = 0;
94}
95
96static irqreturn_t s6000_pcm_irq(int irq, void *data)
97{
98 struct snd_pcm *pcm = data;
99 struct snd_soc_pcm_runtime *runtime = pcm->private_data;
100 struct s6000_runtime_data *prtd;
101 unsigned int has_xrun;
102 int i, ret = IRQ_NONE;
103
104 for (i = 0; i < 2; ++i) {
105 struct snd_pcm_substream *substream = pcm->streams[i].substream;
106 struct s6000_pcm_dma_params *params =
107 snd_soc_dai_get_dma_data(runtime->cpu_dai, substream);
108 u32 channel;
109 unsigned int pending;
110
111 if (substream == SNDRV_PCM_STREAM_PLAYBACK)
112 channel = params->dma_out;
113 else
114 channel = params->dma_in;
115
116 has_xrun = params->check_xrun(runtime->cpu_dai);
117
118 if (!channel)
119 continue;
120
121 if (unlikely(has_xrun & (1 << i)) &&
122 substream->runtime &&
123 snd_pcm_running(substream)) {
124 dev_dbg(pcm->dev, "xrun\n");
125 snd_pcm_stream_lock(substream);
126 snd_pcm_stop(substream, SNDRV_PCM_STATE_XRUN);
127 snd_pcm_stream_unlock(substream);
128 ret = IRQ_HANDLED;
129 }
130
131 pending = s6dmac_int_sources(DMA_MASK_DMAC(channel),
132 DMA_INDEX_CHNL(channel));
133
134 if (pending & 1) {
135 ret = IRQ_HANDLED;
136 if (likely(substream->runtime &&
137 snd_pcm_running(substream))) {
138 snd_pcm_period_elapsed(substream);
139 dev_dbg(pcm->dev, "period elapsed %x %x\n",
140 s6dmac_cur_src(DMA_MASK_DMAC(channel),
141 DMA_INDEX_CHNL(channel)),
142 s6dmac_cur_dst(DMA_MASK_DMAC(channel),
143 DMA_INDEX_CHNL(channel)));
144 prtd = substream->runtime->private_data;
145 spin_lock(&prtd->lock);
146 s6000_pcm_enqueue_dma(substream);
147 spin_unlock(&prtd->lock);
148 }
149 }
150
151 if (unlikely(pending & ~7)) {
152 if (pending & (1 << 3))
153 printk(KERN_WARNING
154 "s6000-pcm: DMA %x Underflow\n",
155 channel);
156 if (pending & (1 << 4))
157 printk(KERN_WARNING
158 "s6000-pcm: DMA %x Overflow\n",
159 channel);
160 if (pending & 0x1e0)
161 printk(KERN_WARNING
162 "s6000-pcm: DMA %x Master Error "
163 "(mask %x)\n",
164 channel, pending >> 5);
165
166 }
167 }
168
169 return ret;
170}
171
172static int s6000_pcm_start(struct snd_pcm_substream *substream)
173{
174 struct s6000_runtime_data *prtd = substream->runtime->private_data;
175 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
176 struct s6000_pcm_dma_params *par;
177 unsigned long flags;
178 int srcinc;
179 u32 dma;
180
181 par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
182
183 spin_lock_irqsave(&prtd->lock, flags);
184
185 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
186 srcinc = 1;
187 dma = par->dma_out;
188 } else {
189 srcinc = 0;
190 dma = par->dma_in;
191 }
192 s6dmac_enable_chan(DMA_MASK_DMAC(dma), DMA_INDEX_CHNL(dma),
193 1 /* priority 1 (0 is max) */,
194 0 /* peripheral requests w/o xfer length mode */,
195 srcinc /* source address increment */,
196 srcinc^1 /* destination address increment */,
197 0 /* chunksize 0 (skip impossible on this dma) */,
198 0 /* source skip after chunk (impossible) */,
199 0 /* destination skip after chunk (impossible) */,
200 4 /* 16 byte burst size */,
201 -1 /* don't conserve bandwidth */,
202 0 /* low watermark irq descriptor threshold */,
203 0 /* disable hardware timestamps */,
204 1 /* enable channel */);
205
206 s6000_pcm_enqueue_dma(substream);
207 s6000_pcm_enqueue_dma(substream);
208
209 spin_unlock_irqrestore(&prtd->lock, flags);
210
211 return 0;
212}
213
214static int s6000_pcm_stop(struct snd_pcm_substream *substream)
215{
216 struct s6000_runtime_data *prtd = substream->runtime->private_data;
217 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
218 struct s6000_pcm_dma_params *par;
219 unsigned long flags;
220 u32 channel;
221
222 par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
223
224 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
225 channel = par->dma_out;
226 else
227 channel = par->dma_in;
228
229 s6dmac_set_terminal_count(DMA_MASK_DMAC(channel),
230 DMA_INDEX_CHNL(channel), 0);
231
232 spin_lock_irqsave(&prtd->lock, flags);
233
234 s6dmac_disable_chan(DMA_MASK_DMAC(channel), DMA_INDEX_CHNL(channel));
235
236 spin_unlock_irqrestore(&prtd->lock, flags);
237
238 return 0;
239}
240
241static int s6000_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
242{
243 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
244 struct s6000_pcm_dma_params *par;
245 int ret;
246
247 par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
248
249 ret = par->trigger(substream, cmd, 0);
250 if (ret < 0)
251 return ret;
252
253 switch (cmd) {
254 case SNDRV_PCM_TRIGGER_START:
255 case SNDRV_PCM_TRIGGER_RESUME:
256 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
257 ret = s6000_pcm_start(substream);
258 break;
259 case SNDRV_PCM_TRIGGER_STOP:
260 case SNDRV_PCM_TRIGGER_SUSPEND:
261 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
262 ret = s6000_pcm_stop(substream);
263 break;
264 default:
265 ret = -EINVAL;
266 }
267 if (ret < 0)
268 return ret;
269
270 return par->trigger(substream, cmd, 1);
271}
272
273static int s6000_pcm_prepare(struct snd_pcm_substream *substream)
274{
275 struct s6000_runtime_data *prtd = substream->runtime->private_data;
276
277 prtd->period = 0;
278
279 return 0;
280}
281
282static snd_pcm_uframes_t s6000_pcm_pointer(struct snd_pcm_substream *substream)
283{
284 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
285 struct s6000_pcm_dma_params *par;
286 struct snd_pcm_runtime *runtime = substream->runtime;
287 struct s6000_runtime_data *prtd = runtime->private_data;
288 unsigned long flags;
289 unsigned int offset;
290 dma_addr_t count;
291
292 par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
293
294 spin_lock_irqsave(&prtd->lock, flags);
295
296 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
297 count = s6dmac_cur_src(DMA_MASK_DMAC(par->dma_out),
298 DMA_INDEX_CHNL(par->dma_out));
299 else
300 count = s6dmac_cur_dst(DMA_MASK_DMAC(par->dma_in),
301 DMA_INDEX_CHNL(par->dma_in));
302
303 count -= runtime->dma_addr;
304
305 spin_unlock_irqrestore(&prtd->lock, flags);
306
307 offset = bytes_to_frames(runtime, count);
308 if (unlikely(offset >= runtime->buffer_size))
309 offset = 0;
310
311 return offset;
312}
313
314static int s6000_pcm_open(struct snd_pcm_substream *substream)
315{
316 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
317 struct s6000_pcm_dma_params *par;
318 struct snd_pcm_runtime *runtime = substream->runtime;
319 struct s6000_runtime_data *prtd;
320 int ret;
321
322 par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
323 snd_soc_set_runtime_hwparams(substream, &s6000_pcm_hardware);
324
325 ret = snd_pcm_hw_constraint_step(runtime, 0,
326 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 16);
327 if (ret < 0)
328 return ret;
329 ret = snd_pcm_hw_constraint_step(runtime, 0,
330 SNDRV_PCM_HW_PARAM_BUFFER_BYTES, 16);
331 if (ret < 0)
332 return ret;
333 ret = snd_pcm_hw_constraint_integer(runtime,
334 SNDRV_PCM_HW_PARAM_PERIODS);
335 if (ret < 0)
336 return ret;
337
338 if (par->same_rate) {
339 int rate;
340 spin_lock(&par->lock); /* needed? */
341 rate = par->rate;
342 spin_unlock(&par->lock);
343 if (rate != -1) {
344 ret = snd_pcm_hw_constraint_minmax(runtime,
345 SNDRV_PCM_HW_PARAM_RATE,
346 rate, rate);
347 if (ret < 0)
348 return ret;
349 }
350 }
351
352 prtd = kzalloc(sizeof(struct s6000_runtime_data), GFP_KERNEL);
353 if (prtd == NULL)
354 return -ENOMEM;
355
356 spin_lock_init(&prtd->lock);
357
358 runtime->private_data = prtd;
359
360 return 0;
361}
362
363static int s6000_pcm_close(struct snd_pcm_substream *substream)
364{
365 struct snd_pcm_runtime *runtime = substream->runtime;
366 struct s6000_runtime_data *prtd = runtime->private_data;
367
368 kfree(prtd);
369
370 return 0;
371}
372
373static int s6000_pcm_hw_params(struct snd_pcm_substream *substream,
374 struct snd_pcm_hw_params *hw_params)
375{
376 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
377 struct s6000_pcm_dma_params *par;
378 int ret;
379 ret = snd_pcm_lib_malloc_pages(substream,
380 params_buffer_bytes(hw_params));
381 if (ret < 0) {
382 printk(KERN_WARNING "s6000-pcm: allocation of memory failed\n");
383 return ret;
384 }
385
386 par = snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
387
388 if (par->same_rate) {
389 spin_lock(&par->lock);
390 if (par->rate == -1 ||
391 !(par->in_use & ~(1 << substream->stream))) {
392 par->rate = params_rate(hw_params);
393 par->in_use |= 1 << substream->stream;
394 } else if (params_rate(hw_params) != par->rate) {
395 snd_pcm_lib_free_pages(substream);
396 par->in_use &= ~(1 << substream->stream);
397 ret = -EBUSY;
398 }
399 spin_unlock(&par->lock);
400 }
401 return ret;
402}
403
404static int s6000_pcm_hw_free(struct snd_pcm_substream *substream)
405{
406 struct snd_soc_pcm_runtime *soc_runtime = substream->private_data;
407 struct s6000_pcm_dma_params *par =
408 snd_soc_dai_get_dma_data(soc_runtime->cpu_dai, substream);
409
410 spin_lock(&par->lock);
411 par->in_use &= ~(1 << substream->stream);
412 if (!par->in_use)
413 par->rate = -1;
414 spin_unlock(&par->lock);
415
416 return snd_pcm_lib_free_pages(substream);
417}
418
419static struct snd_pcm_ops s6000_pcm_ops = {
420 .open = s6000_pcm_open,
421 .close = s6000_pcm_close,
422 .ioctl = snd_pcm_lib_ioctl,
423 .hw_params = s6000_pcm_hw_params,
424 .hw_free = s6000_pcm_hw_free,
425 .trigger = s6000_pcm_trigger,
426 .prepare = s6000_pcm_prepare,
427 .pointer = s6000_pcm_pointer,
428};
429
430static void s6000_pcm_free(struct snd_pcm *pcm)
431{
432 struct snd_soc_pcm_runtime *runtime = pcm->private_data;
433 struct s6000_pcm_dma_params *params =
434 snd_soc_dai_get_dma_data(runtime->cpu_dai,
435 pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream);
436
437 free_irq(params->irq, pcm);
438 snd_pcm_lib_preallocate_free_for_all(pcm);
439}
440
441static int s6000_pcm_new(struct snd_soc_pcm_runtime *runtime)
442{
443 struct snd_card *card = runtime->card->snd_card;
444 struct snd_pcm *pcm = runtime->pcm;
445 struct s6000_pcm_dma_params *params;
446 int res;
447
448 params = snd_soc_dai_get_dma_data(runtime->cpu_dai,
449 pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream);
450
451 res = dma_coerce_mask_and_coherent(card->dev, DMA_BIT_MASK(32));
452 if (res)
453 return res;
454
455 if (params->dma_in) {
456 s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_in),
457 DMA_INDEX_CHNL(params->dma_in));
458 s6dmac_int_sources(DMA_MASK_DMAC(params->dma_in),
459 DMA_INDEX_CHNL(params->dma_in));
460 }
461
462 if (params->dma_out) {
463 s6dmac_disable_chan(DMA_MASK_DMAC(params->dma_out),
464 DMA_INDEX_CHNL(params->dma_out));
465 s6dmac_int_sources(DMA_MASK_DMAC(params->dma_out),
466 DMA_INDEX_CHNL(params->dma_out));
467 }
468
469 res = request_irq(params->irq, s6000_pcm_irq, IRQF_SHARED,
470 "s6000-audio", pcm);
471 if (res) {
472 printk(KERN_ERR "s6000-pcm couldn't get IRQ\n");
473 return res;
474 }
475
476 res = snd_pcm_lib_preallocate_pages_for_all(pcm,
477 SNDRV_DMA_TYPE_DEV,
478 card->dev,
479 S6_PCM_PREALLOCATE_SIZE,
480 S6_PCM_PREALLOCATE_MAX);
481 if (res)
482 printk(KERN_WARNING "s6000-pcm: preallocation failed\n");
483
484 spin_lock_init(&params->lock);
485 params->in_use = 0;
486 params->rate = -1;
487 return 0;
488}
489
490static struct snd_soc_platform_driver s6000_soc_platform = {
491 .ops = &s6000_pcm_ops,
492 .pcm_new = s6000_pcm_new,
493 .pcm_free = s6000_pcm_free,
494};
495
496static int s6000_soc_platform_probe(struct platform_device *pdev)
497{
498 return snd_soc_register_platform(&pdev->dev, &s6000_soc_platform);
499}
500
501static int s6000_soc_platform_remove(struct platform_device *pdev)
502{
503 snd_soc_unregister_platform(&pdev->dev);
504 return 0;
505}
506
507static struct platform_driver s6000_pcm_driver = {
508 .driver = {
509 .name = "s6000-pcm-audio",
510 .owner = THIS_MODULE,
511 },
512
513 .probe = s6000_soc_platform_probe,
514 .remove = s6000_soc_platform_remove,
515};
516
517module_platform_driver(s6000_pcm_driver);
518
519MODULE_AUTHOR("Daniel Gloeckner");
520MODULE_DESCRIPTION("Stretch s6000 family PCM DMA module");
521MODULE_LICENSE("GPL");
diff --git a/sound/soc/s6000/s6000-pcm.h b/sound/soc/s6000/s6000-pcm.h
deleted file mode 100644
index 09d9b883e58b..000000000000
--- a/sound/soc/s6000/s6000-pcm.h
+++ /dev/null
@@ -1,33 +0,0 @@
1/*
2 * ALSA PCM interface for the Stretch s6000 family
3 *
4 * Author: Daniel Gloeckner, <dg@emlix.com>
5 * Copyright: (C) 2009 emlix GmbH <info@emlix.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#ifndef _S6000_PCM_H
13#define _S6000_PCM_H
14
15struct snd_soc_dai;
16struct snd_pcm_substream;
17
18struct s6000_pcm_dma_params {
19 unsigned int (*check_xrun)(struct snd_soc_dai *cpu_dai);
20 int (*trigger)(struct snd_pcm_substream *substream, int cmd, int after);
21 dma_addr_t sif_in;
22 dma_addr_t sif_out;
23 u32 dma_in;
24 u32 dma_out;
25 int irq;
26 int same_rate;
27
28 spinlock_t lock;
29 int in_use;
30 int rate;
31};
32
33#endif
diff --git a/sound/soc/s6000/s6105-ipcam.c b/sound/soc/s6000/s6105-ipcam.c
deleted file mode 100644
index 3510c01f8a6a..000000000000
--- a/sound/soc/s6000/s6105-ipcam.c
+++ /dev/null
@@ -1,221 +0,0 @@
1/*
2 * ASoC driver for Stretch s6105 IP camera platform
3 *
4 * Author: Daniel Gloeckner, <dg@emlix.com>
5 * Copyright: (C) 2009 emlix GmbH <info@emlix.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/module.h>
13#include <linux/moduleparam.h>
14#include <linux/timer.h>
15#include <linux/interrupt.h>
16#include <linux/platform_device.h>
17#include <linux/i2c.h>
18#include <sound/core.h>
19#include <sound/pcm.h>
20#include <sound/soc.h>
21
22#include "s6000-pcm.h"
23#include "s6000-i2s.h"
24
25#define S6105_CAM_CODEC_CLOCK 12288000
26
27static int s6105_hw_params(struct snd_pcm_substream *substream,
28 struct snd_pcm_hw_params *params)
29{
30 struct snd_soc_pcm_runtime *rtd = substream->private_data;
31 struct snd_soc_dai *codec_dai = rtd->codec_dai;
32 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
33 int ret = 0;
34
35 /* set codec DAI configuration */
36 ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_I2S |
37 SND_SOC_DAIFMT_CBM_CFM);
38 if (ret < 0)
39 return ret;
40
41 /* set cpu DAI configuration */
42 ret = snd_soc_dai_set_fmt(cpu_dai, SND_SOC_DAIFMT_CBM_CFM |
43 SND_SOC_DAIFMT_NB_NF);
44 if (ret < 0)
45 return ret;
46
47 /* set the codec system clock */
48 ret = snd_soc_dai_set_sysclk(codec_dai, 0, S6105_CAM_CODEC_CLOCK,
49 SND_SOC_CLOCK_OUT);
50 if (ret < 0)
51 return ret;
52
53 return 0;
54}
55
56static struct snd_soc_ops s6105_ops = {
57 .hw_params = s6105_hw_params,
58};
59
60/* s6105 machine dapm widgets */
61static const struct snd_soc_dapm_widget aic3x_dapm_widgets[] = {
62 SND_SOC_DAPM_LINE("Audio Out Differential", NULL),
63 SND_SOC_DAPM_LINE("Audio Out Stereo", NULL),
64 SND_SOC_DAPM_LINE("Audio In", NULL),
65};
66
67/* s6105 machine audio_mapnections to the codec pins */
68static const struct snd_soc_dapm_route audio_map[] = {
69 /* Audio Out connected to HPLOUT, HPLCOM, HPROUT */
70 {"Audio Out Differential", NULL, "HPLOUT"},
71 {"Audio Out Differential", NULL, "HPLCOM"},
72 {"Audio Out Stereo", NULL, "HPLOUT"},
73 {"Audio Out Stereo", NULL, "HPROUT"},
74
75 /* Audio In connected to LINE1L, LINE1R */
76 {"LINE1L", NULL, "Audio In"},
77 {"LINE1R", NULL, "Audio In"},
78};
79
80static int output_type_info(struct snd_kcontrol *kcontrol,
81 struct snd_ctl_elem_info *uinfo)
82{
83 uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
84 uinfo->count = 1;
85 uinfo->value.enumerated.items = 2;
86 if (uinfo->value.enumerated.item) {
87 uinfo->value.enumerated.item = 1;
88 strcpy(uinfo->value.enumerated.name, "HPLOUT/HPROUT");
89 } else {
90 strcpy(uinfo->value.enumerated.name, "HPLOUT/HPLCOM");
91 }
92 return 0;
93}
94
95static int output_type_get(struct snd_kcontrol *kcontrol,
96 struct snd_ctl_elem_value *ucontrol)
97{
98 ucontrol->value.enumerated.item[0] = kcontrol->private_value;
99 return 0;
100}
101
102static int output_type_put(struct snd_kcontrol *kcontrol,
103 struct snd_ctl_elem_value *ucontrol)
104{
105 struct snd_soc_card *card = kcontrol->private_data;
106 struct snd_soc_dapm_context *dapm = &card->dapm;
107 unsigned int val = (ucontrol->value.enumerated.item[0] != 0);
108 char *differential = "Audio Out Differential";
109 char *stereo = "Audio Out Stereo";
110
111 if (kcontrol->private_value == val)
112 return 0;
113 kcontrol->private_value = val;
114 snd_soc_dapm_disable_pin(dapm, val ? differential : stereo);
115 snd_soc_dapm_sync(dapm);
116 snd_soc_dapm_enable_pin(dapm, val ? stereo : differential);
117 snd_soc_dapm_sync(dapm);
118
119 return 1;
120}
121
122static const struct snd_kcontrol_new audio_out_mux = {
123 .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
124 .name = "Master Output Mux",
125 .index = 0,
126 .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
127 .info = output_type_info,
128 .get = output_type_get,
129 .put = output_type_put,
130 .private_value = 1 /* default to stereo */
131};
132
133/* Logic for a aic3x as connected on the s6105 ip camera ref design */
134static int s6105_aic3x_init(struct snd_soc_pcm_runtime *rtd)
135{
136 struct snd_soc_card *card = rtd->card;
137
138 /* must correspond to audio_out_mux.private_value initializer */
139 snd_soc_dapm_disable_pin(&card->dapm, "Audio Out Differential");
140
141 snd_ctl_add(card->snd_card, snd_ctl_new1(&audio_out_mux, card));
142
143 return 0;
144}
145
146/* s6105 digital audio interface glue - connects codec <--> CPU */
147static struct snd_soc_dai_link s6105_dai = {
148 .name = "TLV320AIC31",
149 .stream_name = "AIC31",
150 .cpu_dai_name = "s6000-i2s",
151 .codec_dai_name = "tlv320aic3x-hifi",
152 .platform_name = "s6000-pcm-audio",
153 .codec_name = "tlv320aic3x-codec.0-001a",
154 .init = s6105_aic3x_init,
155 .ops = &s6105_ops,
156};
157
158/* s6105 audio machine driver */
159static struct snd_soc_card snd_soc_card_s6105 = {
160 .name = "Stretch IP Camera",
161 .owner = THIS_MODULE,
162 .dai_link = &s6105_dai,
163 .num_links = 1,
164
165 .dapm_widgets = aic3x_dapm_widgets,
166 .num_dapm_widgets = ARRAY_SIZE(aic3x_dapm_widgets),
167 .dapm_routes = audio_map,
168 .num_dapm_routes = ARRAY_SIZE(audio_map),
169 .fully_routed = true,
170};
171
172static struct s6000_snd_platform_data s6105_snd_data __initdata = {
173 .wide = 0,
174 .channel_in = 0,
175 .channel_out = 1,
176 .lines_in = 1,
177 .lines_out = 1,
178 .same_rate = 1,
179};
180
181static struct platform_device *s6105_snd_device;
182
183/* temporary i2c device creation until this can be moved into the machine
184 * support file.
185*/
186static struct i2c_board_info i2c_device[] = {
187 { I2C_BOARD_INFO("tlv320aic33", 0x18), }
188};
189
190static int __init s6105_init(void)
191{
192 int ret;
193
194 i2c_register_board_info(0, i2c_device, ARRAY_SIZE(i2c_device));
195
196 s6105_snd_device = platform_device_alloc("soc-audio", -1);
197 if (!s6105_snd_device)
198 return -ENOMEM;
199
200 platform_set_drvdata(s6105_snd_device, &snd_soc_card_s6105);
201 platform_device_add_data(s6105_snd_device, &s6105_snd_data,
202 sizeof(s6105_snd_data));
203
204 ret = platform_device_add(s6105_snd_device);
205 if (ret)
206 platform_device_put(s6105_snd_device);
207
208 return ret;
209}
210
211static void __exit s6105_exit(void)
212{
213 platform_device_unregister(s6105_snd_device);
214}
215
216module_init(s6105_init);
217module_exit(s6105_exit);
218
219MODULE_AUTHOR("Daniel Gloeckner");
220MODULE_DESCRIPTION("Stretch s6105 IP camera ASoC driver");
221MODULE_LICENSE("GPL");
diff --git a/sound/soc/samsung/ac97.c b/sound/soc/samsung/ac97.c
index e1615113fd84..7952a625669d 100644
--- a/sound/soc/samsung/ac97.c
+++ b/sound/soc/samsung/ac97.c
@@ -288,7 +288,7 @@ static int s3c_ac97_mic_dai_probe(struct snd_soc_dai *dai)
288static struct snd_soc_dai_driver s3c_ac97_dai[] = { 288static struct snd_soc_dai_driver s3c_ac97_dai[] = {
289 [S3C_AC97_DAI_PCM] = { 289 [S3C_AC97_DAI_PCM] = {
290 .name = "samsung-ac97", 290 .name = "samsung-ac97",
291 .ac97_control = 1, 291 .bus_control = true,
292 .playback = { 292 .playback = {
293 .stream_name = "AC97 Playback", 293 .stream_name = "AC97 Playback",
294 .channels_min = 2, 294 .channels_min = 2,
@@ -306,7 +306,7 @@ static struct snd_soc_dai_driver s3c_ac97_dai[] = {
306 }, 306 },
307 [S3C_AC97_DAI_MIC] = { 307 [S3C_AC97_DAI_MIC] = {
308 .name = "samsung-ac97-mic", 308 .name = "samsung-ac97-mic",
309 .ac97_control = 1, 309 .bus_control = true,
310 .capture = { 310 .capture = {
311 .stream_name = "AC97 Mic Capture", 311 .stream_name = "AC97 Mic Capture",
312 .channels_min = 1, 312 .channels_min = 1,
diff --git a/sound/soc/samsung/snow.c b/sound/soc/samsung/snow.c
index 0acf5d0eed53..72118a77dd5b 100644
--- a/sound/soc/samsung/snow.c
+++ b/sound/soc/samsung/snow.c
@@ -110,6 +110,7 @@ static const struct of_device_id snow_of_match[] = {
110 { .compatible = "google,snow-audio-max98095", }, 110 { .compatible = "google,snow-audio-max98095", },
111 {}, 111 {},
112}; 112};
113MODULE_DEVICE_TABLE(of, snow_of_match);
113 114
114static struct platform_driver snow_driver = { 115static struct platform_driver snow_driver = {
115 .driver = { 116 .driver = {
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 66fddec9543d..8869971d7884 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -842,12 +842,9 @@ static int fsi_clk_disable(struct device *dev,
842 return -EINVAL; 842 return -EINVAL;
843 843
844 if (1 == clock->count--) { 844 if (1 == clock->count--) {
845 if (clock->xck) 845 clk_disable(clock->xck);
846 clk_disable(clock->xck); 846 clk_disable(clock->ick);
847 if (clock->ick) 847 clk_disable(clock->div);
848 clk_disable(clock->ick);
849 if (clock->div)
850 clk_disable(clock->div);
851 } 848 }
852 849
853 return 0; 850 return 0;
@@ -1711,8 +1708,7 @@ static const struct snd_soc_dai_ops fsi_dai_ops = {
1711static struct snd_pcm_hardware fsi_pcm_hardware = { 1708static struct snd_pcm_hardware fsi_pcm_hardware = {
1712 .info = SNDRV_PCM_INFO_INTERLEAVED | 1709 .info = SNDRV_PCM_INFO_INTERLEAVED |
1713 SNDRV_PCM_INFO_MMAP | 1710 SNDRV_PCM_INFO_MMAP |
1714 SNDRV_PCM_INFO_MMAP_VALID | 1711 SNDRV_PCM_INFO_MMAP_VALID,
1715 SNDRV_PCM_INFO_PAUSE,
1716 .buffer_bytes_max = 64 * 1024, 1712 .buffer_bytes_max = 64 * 1024,
1717 .period_bytes_min = 32, 1713 .period_bytes_min = 32,
1718 .period_bytes_max = 8192, 1714 .period_bytes_max = 8192,
diff --git a/sound/soc/sh/hac.c b/sound/soc/sh/hac.c
index 0af2e4dfd139..d5f567e085ff 100644
--- a/sound/soc/sh/hac.c
+++ b/sound/soc/sh/hac.c
@@ -272,7 +272,7 @@ static const struct snd_soc_dai_ops hac_dai_ops = {
272static struct snd_soc_dai_driver sh4_hac_dai[] = { 272static struct snd_soc_dai_driver sh4_hac_dai[] = {
273{ 273{
274 .name = "hac-dai.0", 274 .name = "hac-dai.0",
275 .ac97_control = 1, 275 .bus_control = true,
276 .playback = { 276 .playback = {
277 .rates = AC97_RATES, 277 .rates = AC97_RATES,
278 .formats = AC97_FMTS, 278 .formats = AC97_FMTS,
diff --git a/sound/soc/sh/rcar/core.c b/sound/soc/sh/rcar/core.c
index 1922ec57d10a..70042197f9e2 100644
--- a/sound/soc/sh/rcar/core.c
+++ b/sound/soc/sh/rcar/core.c
@@ -886,8 +886,7 @@ static int rsnd_dai_probe(struct platform_device *pdev,
886static struct snd_pcm_hardware rsnd_pcm_hardware = { 886static struct snd_pcm_hardware rsnd_pcm_hardware = {
887 .info = SNDRV_PCM_INFO_INTERLEAVED | 887 .info = SNDRV_PCM_INFO_INTERLEAVED |
888 SNDRV_PCM_INFO_MMAP | 888 SNDRV_PCM_INFO_MMAP |
889 SNDRV_PCM_INFO_MMAP_VALID | 889 SNDRV_PCM_INFO_MMAP_VALID,
890 SNDRV_PCM_INFO_PAUSE,
891 .buffer_bytes_max = 64 * 1024, 890 .buffer_bytes_max = 64 * 1024,
892 .period_bytes_min = 32, 891 .period_bytes_min = 32,
893 .period_bytes_max = 8192, 892 .period_bytes_max = 8192,
diff --git a/sound/soc/soc-ac97.c b/sound/soc/soc-ac97.c
new file mode 100644
index 000000000000..2e10e9a38376
--- /dev/null
+++ b/sound/soc/soc-ac97.c
@@ -0,0 +1,256 @@
1/*
2 * soc-ac97.c -- ALSA SoC Audio Layer AC97 support
3 *
4 * Copyright 2005 Wolfson Microelectronics PLC.
5 * Copyright 2005 Openedhand Ltd.
6 * Copyright (C) 2010 Slimlogic Ltd.
7 * Copyright (C) 2010 Texas Instruments Inc.
8 *
9 * Author: Liam Girdwood <lrg@slimlogic.co.uk>
10 * with code, comments and ideas from :-
11 * Richard Purdie <richard@openedhand.com>
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 */
18
19#include <linux/ctype.h>
20#include <linux/delay.h>
21#include <linux/export.h>
22#include <linux/gpio.h>
23#include <linux/init.h>
24#include <linux/of_gpio.h>
25#include <linux/of.h>
26#include <linux/pinctrl/consumer.h>
27#include <linux/slab.h>
28#include <sound/ac97_codec.h>
29#include <sound/soc.h>
30
31struct snd_ac97_reset_cfg {
32 struct pinctrl *pctl;
33 struct pinctrl_state *pstate_reset;
34 struct pinctrl_state *pstate_warm_reset;
35 struct pinctrl_state *pstate_run;
36 int gpio_sdata;
37 int gpio_sync;
38 int gpio_reset;
39};
40
41static struct snd_ac97_bus soc_ac97_bus = {
42 .ops = NULL, /* Gets initialized in snd_soc_set_ac97_ops() */
43};
44
45static void soc_ac97_device_release(struct device *dev)
46{
47 kfree(to_ac97_t(dev));
48}
49
50/**
51 * snd_soc_new_ac97_codec - initailise AC97 device
52 * @codec: audio codec
53 *
54 * Initialises AC97 codec resources for use by ad-hoc devices only.
55 */
56struct snd_ac97 *snd_soc_new_ac97_codec(struct snd_soc_codec *codec)
57{
58 struct snd_ac97 *ac97;
59 int ret;
60
61 ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL);
62 if (ac97 == NULL)
63 return ERR_PTR(-ENOMEM);
64
65 ac97->bus = &soc_ac97_bus;
66 ac97->num = 0;
67
68 ac97->dev.bus = &ac97_bus_type;
69 ac97->dev.parent = codec->component.card->dev;
70 ac97->dev.release = soc_ac97_device_release;
71
72 dev_set_name(&ac97->dev, "%d-%d:%s",
73 codec->component.card->snd_card->number, 0,
74 codec->component.name);
75
76 ret = device_register(&ac97->dev);
77 if (ret) {
78 put_device(&ac97->dev);
79 return ERR_PTR(ret);
80 }
81
82 return ac97;
83}
84EXPORT_SYMBOL_GPL(snd_soc_new_ac97_codec);
85
86/**
87 * snd_soc_free_ac97_codec - free AC97 codec device
88 * @codec: audio codec
89 *
90 * Frees AC97 codec device resources.
91 */
92void snd_soc_free_ac97_codec(struct snd_ac97 *ac97)
93{
94 device_del(&ac97->dev);
95 ac97->bus = NULL;
96 put_device(&ac97->dev);
97}
98EXPORT_SYMBOL_GPL(snd_soc_free_ac97_codec);
99
100static struct snd_ac97_reset_cfg snd_ac97_rst_cfg;
101
102static void snd_soc_ac97_warm_reset(struct snd_ac97 *ac97)
103{
104 struct pinctrl *pctl = snd_ac97_rst_cfg.pctl;
105
106 pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_warm_reset);
107
108 gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 1);
109
110 udelay(10);
111
112 gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 0);
113
114 pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_run);
115 msleep(2);
116}
117
118static void snd_soc_ac97_reset(struct snd_ac97 *ac97)
119{
120 struct pinctrl *pctl = snd_ac97_rst_cfg.pctl;
121
122 pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_reset);
123
124 gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 0);
125 gpio_direction_output(snd_ac97_rst_cfg.gpio_sdata, 0);
126 gpio_direction_output(snd_ac97_rst_cfg.gpio_reset, 0);
127
128 udelay(10);
129
130 gpio_direction_output(snd_ac97_rst_cfg.gpio_reset, 1);
131
132 pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_run);
133 msleep(2);
134}
135
136static int snd_soc_ac97_parse_pinctl(struct device *dev,
137 struct snd_ac97_reset_cfg *cfg)
138{
139 struct pinctrl *p;
140 struct pinctrl_state *state;
141 int gpio;
142 int ret;
143
144 p = devm_pinctrl_get(dev);
145 if (IS_ERR(p)) {
146 dev_err(dev, "Failed to get pinctrl\n");
147 return PTR_ERR(p);
148 }
149 cfg->pctl = p;
150
151 state = pinctrl_lookup_state(p, "ac97-reset");
152 if (IS_ERR(state)) {
153 dev_err(dev, "Can't find pinctrl state ac97-reset\n");
154 return PTR_ERR(state);
155 }
156 cfg->pstate_reset = state;
157
158 state = pinctrl_lookup_state(p, "ac97-warm-reset");
159 if (IS_ERR(state)) {
160 dev_err(dev, "Can't find pinctrl state ac97-warm-reset\n");
161 return PTR_ERR(state);
162 }
163 cfg->pstate_warm_reset = state;
164
165 state = pinctrl_lookup_state(p, "ac97-running");
166 if (IS_ERR(state)) {
167 dev_err(dev, "Can't find pinctrl state ac97-running\n");
168 return PTR_ERR(state);
169 }
170 cfg->pstate_run = state;
171
172 gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 0);
173 if (gpio < 0) {
174 dev_err(dev, "Can't find ac97-sync gpio\n");
175 return gpio;
176 }
177 ret = devm_gpio_request(dev, gpio, "AC97 link sync");
178 if (ret) {
179 dev_err(dev, "Failed requesting ac97-sync gpio\n");
180 return ret;
181 }
182 cfg->gpio_sync = gpio;
183
184 gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 1);
185 if (gpio < 0) {
186 dev_err(dev, "Can't find ac97-sdata gpio %d\n", gpio);
187 return gpio;
188 }
189 ret = devm_gpio_request(dev, gpio, "AC97 link sdata");
190 if (ret) {
191 dev_err(dev, "Failed requesting ac97-sdata gpio\n");
192 return ret;
193 }
194 cfg->gpio_sdata = gpio;
195
196 gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 2);
197 if (gpio < 0) {
198 dev_err(dev, "Can't find ac97-reset gpio\n");
199 return gpio;
200 }
201 ret = devm_gpio_request(dev, gpio, "AC97 link reset");
202 if (ret) {
203 dev_err(dev, "Failed requesting ac97-reset gpio\n");
204 return ret;
205 }
206 cfg->gpio_reset = gpio;
207
208 return 0;
209}
210
211struct snd_ac97_bus_ops *soc_ac97_ops;
212EXPORT_SYMBOL_GPL(soc_ac97_ops);
213
214int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops)
215{
216 if (ops == soc_ac97_ops)
217 return 0;
218
219 if (soc_ac97_ops && ops)
220 return -EBUSY;
221
222 soc_ac97_ops = ops;
223 soc_ac97_bus.ops = ops;
224
225 return 0;
226}
227EXPORT_SYMBOL_GPL(snd_soc_set_ac97_ops);
228
229/**
230 * snd_soc_set_ac97_ops_of_reset - Set ac97 ops with generic ac97 reset functions
231 *
232 * This function sets the reset and warm_reset properties of ops and parses
233 * the device node of pdev to get pinctrl states and gpio numbers to use.
234 */
235int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
236 struct platform_device *pdev)
237{
238 struct device *dev = &pdev->dev;
239 struct snd_ac97_reset_cfg cfg;
240 int ret;
241
242 ret = snd_soc_ac97_parse_pinctl(dev, &cfg);
243 if (ret)
244 return ret;
245
246 ret = snd_soc_set_ac97_ops(ops);
247 if (ret)
248 return ret;
249
250 ops->warm_reset = snd_soc_ac97_warm_reset;
251 ops->reset = snd_soc_ac97_reset;
252
253 snd_ac97_rst_cfg = cfg;
254 return 0;
255}
256EXPORT_SYMBOL_GPL(snd_soc_set_ac97_ops_of_reset);
diff --git a/sound/soc/soc-cache.c b/sound/soc/soc-cache.c
index a9f82b5aba9d..07f43356f963 100644
--- a/sound/soc/soc-cache.c
+++ b/sound/soc/soc-cache.c
@@ -15,56 +15,6 @@
15#include <linux/export.h> 15#include <linux/export.h>
16#include <linux/slab.h> 16#include <linux/slab.h>
17 17
18#include <trace/events/asoc.h>
19
20static bool snd_soc_set_cache_val(void *base, unsigned int idx,
21 unsigned int val, unsigned int word_size)
22{
23 switch (word_size) {
24 case 1: {
25 u8 *cache = base;
26 if (cache[idx] == val)
27 return true;
28 cache[idx] = val;
29 break;
30 }
31 case 2: {
32 u16 *cache = base;
33 if (cache[idx] == val)
34 return true;
35 cache[idx] = val;
36 break;
37 }
38 default:
39 WARN(1, "Invalid word_size %d\n", word_size);
40 break;
41 }
42 return false;
43}
44
45static unsigned int snd_soc_get_cache_val(const void *base, unsigned int idx,
46 unsigned int word_size)
47{
48 if (!base)
49 return -1;
50
51 switch (word_size) {
52 case 1: {
53 const u8 *cache = base;
54 return cache[idx];
55 }
56 case 2: {
57 const u16 *cache = base;
58 return cache[idx];
59 }
60 default:
61 WARN(1, "Invalid word_size %d\n", word_size);
62 break;
63 }
64 /* unreachable */
65 return -1;
66}
67
68int snd_soc_cache_init(struct snd_soc_codec *codec) 18int snd_soc_cache_init(struct snd_soc_codec *codec)
69{ 19{
70 const struct snd_soc_codec_driver *codec_drv = codec->driver; 20 const struct snd_soc_codec_driver *codec_drv = codec->driver;
@@ -75,8 +25,6 @@ int snd_soc_cache_init(struct snd_soc_codec *codec)
75 if (!reg_size) 25 if (!reg_size)
76 return 0; 26 return 0;
77 27
78 mutex_init(&codec->cache_rw_mutex);
79
80 dev_dbg(codec->dev, "ASoC: Initializing cache for %s codec\n", 28 dev_dbg(codec->dev, "ASoC: Initializing cache for %s codec\n",
81 codec->component.name); 29 codec->component.name);
82 30
@@ -103,100 +51,3 @@ int snd_soc_cache_exit(struct snd_soc_codec *codec)
103 codec->reg_cache = NULL; 51 codec->reg_cache = NULL;
104 return 0; 52 return 0;
105} 53}
106
107/**
108 * snd_soc_cache_read: Fetch the value of a given register from the cache.
109 *
110 * @codec: CODEC to configure.
111 * @reg: The register index.
112 * @value: The value to be returned.
113 */
114int snd_soc_cache_read(struct snd_soc_codec *codec,
115 unsigned int reg, unsigned int *value)
116{
117 if (!value)
118 return -EINVAL;
119
120 mutex_lock(&codec->cache_rw_mutex);
121 if (!ZERO_OR_NULL_PTR(codec->reg_cache))
122 *value = snd_soc_get_cache_val(codec->reg_cache, reg,
123 codec->driver->reg_word_size);
124 mutex_unlock(&codec->cache_rw_mutex);
125
126 return 0;
127}
128EXPORT_SYMBOL_GPL(snd_soc_cache_read);
129
130/**
131 * snd_soc_cache_write: Set the value of a given register in the cache.
132 *
133 * @codec: CODEC to configure.
134 * @reg: The register index.
135 * @value: The new register value.
136 */
137int snd_soc_cache_write(struct snd_soc_codec *codec,
138 unsigned int reg, unsigned int value)
139{
140 mutex_lock(&codec->cache_rw_mutex);
141 if (!ZERO_OR_NULL_PTR(codec->reg_cache))
142 snd_soc_set_cache_val(codec->reg_cache, reg, value,
143 codec->driver->reg_word_size);
144 mutex_unlock(&codec->cache_rw_mutex);
145
146 return 0;
147}
148EXPORT_SYMBOL_GPL(snd_soc_cache_write);
149
150static int snd_soc_flat_cache_sync(struct snd_soc_codec *codec)
151{
152 int i;
153 int ret;
154 const struct snd_soc_codec_driver *codec_drv;
155 unsigned int val;
156
157 codec_drv = codec->driver;
158 for (i = 0; i < codec_drv->reg_cache_size; ++i) {
159 ret = snd_soc_cache_read(codec, i, &val);
160 if (ret)
161 return ret;
162 if (codec_drv->reg_cache_default)
163 if (snd_soc_get_cache_val(codec_drv->reg_cache_default,
164 i, codec_drv->reg_word_size) == val)
165 continue;
166
167 ret = snd_soc_write(codec, i, val);
168 if (ret)
169 return ret;
170 dev_dbg(codec->dev, "ASoC: Synced register %#x, value = %#x\n",
171 i, val);
172 }
173 return 0;
174}
175
176/**
177 * snd_soc_cache_sync: Sync the register cache with the hardware.
178 *
179 * @codec: CODEC to configure.
180 *
181 * Any registers that should not be synced should be marked as
182 * volatile. In general drivers can choose not to use the provided
183 * syncing functionality if they so require.
184 */
185int snd_soc_cache_sync(struct snd_soc_codec *codec)
186{
187 const char *name = "flat";
188 int ret;
189
190 if (!codec->cache_sync)
191 return 0;
192
193 dev_dbg(codec->dev, "ASoC: Syncing cache for %s codec\n",
194 codec->component.name);
195 trace_snd_soc_cache_sync(codec, name, "start");
196 ret = snd_soc_flat_cache_sync(codec);
197 if (!ret)
198 codec->cache_sync = 0;
199 trace_snd_soc_cache_sync(codec, name, "end");
200 return ret;
201}
202EXPORT_SYMBOL_GPL(snd_soc_cache_sync);
diff --git a/sound/soc/soc-compress.c b/sound/soc/soc-compress.c
index cecfab3cc948..590a82f01d0b 100644
--- a/sound/soc/soc-compress.c
+++ b/sound/soc/soc-compress.c
@@ -258,10 +258,7 @@ static int soc_compr_free_fe(struct snd_compr_stream *cstream)
258 list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be) 258 list_for_each_entry(dpcm, &fe->dpcm[stream].be_clients, list_be)
259 dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE; 259 dpcm->state = SND_SOC_DPCM_LINK_STATE_FREE;
260 260
261 if (stream == SNDRV_PCM_STREAM_PLAYBACK) 261 dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
262 dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
263 else
264 dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
265 262
266 fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE; 263 fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE;
267 fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; 264 fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO;
@@ -456,11 +453,7 @@ static int soc_compr_set_params_fe(struct snd_compr_stream *cstream,
456 if (ret < 0) 453 if (ret < 0)
457 goto out; 454 goto out;
458 455
459 if (stream == SNDRV_PCM_STREAM_PLAYBACK) 456 dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_START);
460 dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_START);
461 else
462 dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_START);
463
464 fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE; 457 fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
465 458
466out: 459out:
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 4c8f8a23a0e9..11d01e5a2f70 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -34,9 +34,6 @@
34#include <linux/ctype.h> 34#include <linux/ctype.h>
35#include <linux/slab.h> 35#include <linux/slab.h>
36#include <linux/of.h> 36#include <linux/of.h>
37#include <linux/gpio.h>
38#include <linux/of_gpio.h>
39#include <sound/ac97_codec.h>
40#include <sound/core.h> 37#include <sound/core.h>
41#include <sound/jack.h> 38#include <sound/jack.h>
42#include <sound/pcm.h> 39#include <sound/pcm.h>
@@ -69,16 +66,6 @@ static int pmdown_time = 5000;
69module_param(pmdown_time, int, 0); 66module_param(pmdown_time, int, 0);
70MODULE_PARM_DESC(pmdown_time, "DAPM stream powerdown time (msecs)"); 67MODULE_PARM_DESC(pmdown_time, "DAPM stream powerdown time (msecs)");
71 68
72struct snd_ac97_reset_cfg {
73 struct pinctrl *pctl;
74 struct pinctrl_state *pstate_reset;
75 struct pinctrl_state *pstate_warm_reset;
76 struct pinctrl_state *pstate_run;
77 int gpio_sdata;
78 int gpio_sync;
79 int gpio_reset;
80};
81
82/* returns the minimum number of bytes needed to represent 69/* returns the minimum number of bytes needed to represent
83 * a particular given value */ 70 * a particular given value */
84static int min_bytes_needed(unsigned long val) 71static int min_bytes_needed(unsigned long val)
@@ -309,9 +296,6 @@ static void soc_init_codec_debugfs(struct snd_soc_component *component)
309{ 296{
310 struct snd_soc_codec *codec = snd_soc_component_to_codec(component); 297 struct snd_soc_codec *codec = snd_soc_component_to_codec(component);
311 298
312 debugfs_create_bool("cache_sync", 0444, codec->component.debugfs_root,
313 &codec->cache_sync);
314
315 codec->debugfs_reg = debugfs_create_file("codec_reg", 0644, 299 codec->debugfs_reg = debugfs_create_file("codec_reg", 0644,
316 codec->component.debugfs_root, 300 codec->component.debugfs_root,
317 codec, &codec_reg_fops); 301 codec, &codec_reg_fops);
@@ -499,40 +483,6 @@ struct snd_soc_pcm_runtime *snd_soc_get_pcm_runtime(struct snd_soc_card *card,
499} 483}
500EXPORT_SYMBOL_GPL(snd_soc_get_pcm_runtime); 484EXPORT_SYMBOL_GPL(snd_soc_get_pcm_runtime);
501 485
502#ifdef CONFIG_SND_SOC_AC97_BUS
503/* unregister ac97 codec */
504static int soc_ac97_dev_unregister(struct snd_soc_codec *codec)
505{
506 if (codec->ac97->dev.bus)
507 device_unregister(&codec->ac97->dev);
508 return 0;
509}
510
511/* stop no dev release warning */
512static void soc_ac97_device_release(struct device *dev){}
513
514/* register ac97 codec to bus */
515static int soc_ac97_dev_register(struct snd_soc_codec *codec)
516{
517 int err;
518
519 codec->ac97->dev.bus = &ac97_bus_type;
520 codec->ac97->dev.parent = codec->component.card->dev;
521 codec->ac97->dev.release = soc_ac97_device_release;
522
523 dev_set_name(&codec->ac97->dev, "%d-%d:%s",
524 codec->component.card->snd_card->number, 0,
525 codec->component.name);
526 err = device_register(&codec->ac97->dev);
527 if (err < 0) {
528 dev_err(codec->dev, "ASoC: Can't register ac97 bus\n");
529 codec->ac97->dev.bus = NULL;
530 return err;
531 }
532 return 0;
533}
534#endif
535
536static void codec2codec_close_delayed_work(struct work_struct *work) 486static void codec2codec_close_delayed_work(struct work_struct *work)
537{ 487{
538 /* Currently nothing to do for c2c links 488 /* Currently nothing to do for c2c links
@@ -592,17 +542,12 @@ int snd_soc_suspend(struct device *dev)
592 542
593 for (i = 0; i < card->num_rtd; i++) { 543 for (i = 0; i < card->num_rtd; i++) {
594 struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai; 544 struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai;
595 struct snd_soc_platform *platform = card->rtd[i].platform;
596 545
597 if (card->rtd[i].dai_link->ignore_suspend) 546 if (card->rtd[i].dai_link->ignore_suspend)
598 continue; 547 continue;
599 548
600 if (cpu_dai->driver->suspend && !cpu_dai->driver->ac97_control) 549 if (cpu_dai->driver->suspend && !cpu_dai->driver->bus_control)
601 cpu_dai->driver->suspend(cpu_dai); 550 cpu_dai->driver->suspend(cpu_dai);
602 if (platform->driver->suspend && !platform->suspended) {
603 platform->driver->suspend(cpu_dai);
604 platform->suspended = 1;
605 }
606 } 551 }
607 552
608 /* close any waiting streams and save state */ 553 /* close any waiting streams and save state */
@@ -629,8 +574,8 @@ int snd_soc_suspend(struct device *dev)
629 SND_SOC_DAPM_STREAM_SUSPEND); 574 SND_SOC_DAPM_STREAM_SUSPEND);
630 } 575 }
631 576
632 /* Recheck all analogue paths too */ 577 /* Recheck all endpoints too, their state is affected by suspend */
633 dapm_mark_io_dirty(&card->dapm); 578 dapm_mark_endpoints_dirty(card);
634 snd_soc_dapm_sync(&card->dapm); 579 snd_soc_dapm_sync(&card->dapm);
635 580
636 /* suspend all CODECs */ 581 /* suspend all CODECs */
@@ -656,7 +601,6 @@ int snd_soc_suspend(struct device *dev)
656 if (codec->driver->suspend) 601 if (codec->driver->suspend)
657 codec->driver->suspend(codec); 602 codec->driver->suspend(codec);
658 codec->suspended = 1; 603 codec->suspended = 1;
659 codec->cache_sync = 1;
660 if (codec->component.regmap) 604 if (codec->component.regmap)
661 regcache_mark_dirty(codec->component.regmap); 605 regcache_mark_dirty(codec->component.regmap);
662 /* deactivate pins to sleep state */ 606 /* deactivate pins to sleep state */
@@ -676,7 +620,7 @@ int snd_soc_suspend(struct device *dev)
676 if (card->rtd[i].dai_link->ignore_suspend) 620 if (card->rtd[i].dai_link->ignore_suspend)
677 continue; 621 continue;
678 622
679 if (cpu_dai->driver->suspend && cpu_dai->driver->ac97_control) 623 if (cpu_dai->driver->suspend && cpu_dai->driver->bus_control)
680 cpu_dai->driver->suspend(cpu_dai); 624 cpu_dai->driver->suspend(cpu_dai);
681 625
682 /* deactivate pins to sleep state */ 626 /* deactivate pins to sleep state */
@@ -712,14 +656,14 @@ static void soc_resume_deferred(struct work_struct *work)
712 if (card->resume_pre) 656 if (card->resume_pre)
713 card->resume_pre(card); 657 card->resume_pre(card);
714 658
715 /* resume AC97 DAIs */ 659 /* resume control bus DAIs */
716 for (i = 0; i < card->num_rtd; i++) { 660 for (i = 0; i < card->num_rtd; i++) {
717 struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai; 661 struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai;
718 662
719 if (card->rtd[i].dai_link->ignore_suspend) 663 if (card->rtd[i].dai_link->ignore_suspend)
720 continue; 664 continue;
721 665
722 if (cpu_dai->driver->resume && cpu_dai->driver->ac97_control) 666 if (cpu_dai->driver->resume && cpu_dai->driver->bus_control)
723 cpu_dai->driver->resume(cpu_dai); 667 cpu_dai->driver->resume(cpu_dai);
724 } 668 }
725 669
@@ -775,17 +719,12 @@ static void soc_resume_deferred(struct work_struct *work)
775 719
776 for (i = 0; i < card->num_rtd; i++) { 720 for (i = 0; i < card->num_rtd; i++) {
777 struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai; 721 struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai;
778 struct snd_soc_platform *platform = card->rtd[i].platform;
779 722
780 if (card->rtd[i].dai_link->ignore_suspend) 723 if (card->rtd[i].dai_link->ignore_suspend)
781 continue; 724 continue;
782 725
783 if (cpu_dai->driver->resume && !cpu_dai->driver->ac97_control) 726 if (cpu_dai->driver->resume && !cpu_dai->driver->bus_control)
784 cpu_dai->driver->resume(cpu_dai); 727 cpu_dai->driver->resume(cpu_dai);
785 if (platform->driver->resume && platform->suspended) {
786 platform->driver->resume(cpu_dai);
787 platform->suspended = 0;
788 }
789 } 728 }
790 729
791 if (card->resume_post) 730 if (card->resume_post)
@@ -796,8 +735,8 @@ static void soc_resume_deferred(struct work_struct *work)
796 /* userspace can access us now we are back as we were before */ 735 /* userspace can access us now we are back as we were before */
797 snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D0); 736 snd_power_change_state(card->snd_card, SNDRV_CTL_POWER_D0);
798 737
799 /* Recheck all analogue paths too */ 738 /* Recheck all endpoints too, their state is affected by suspend */
800 dapm_mark_io_dirty(&card->dapm); 739 dapm_mark_endpoints_dirty(card);
801 snd_soc_dapm_sync(&card->dapm); 740 snd_soc_dapm_sync(&card->dapm);
802} 741}
803 742
@@ -805,7 +744,8 @@ static void soc_resume_deferred(struct work_struct *work)
805int snd_soc_resume(struct device *dev) 744int snd_soc_resume(struct device *dev)
806{ 745{
807 struct snd_soc_card *card = dev_get_drvdata(dev); 746 struct snd_soc_card *card = dev_get_drvdata(dev);
808 int i, ac97_control = 0; 747 bool bus_control = false;
748 int i;
809 749
810 /* If the card is not initialized yet there is nothing to do */ 750 /* If the card is not initialized yet there is nothing to do */
811 if (!card->instantiated) 751 if (!card->instantiated)
@@ -828,17 +768,18 @@ int snd_soc_resume(struct device *dev)
828 } 768 }
829 } 769 }
830 770
831 /* AC97 devices might have other drivers hanging off them so 771 /*
832 * need to resume immediately. Other drivers don't have that 772 * DAIs that also act as the control bus master might have other drivers
833 * problem and may take a substantial amount of time to resume 773 * hanging off them so need to resume immediately. Other drivers don't
774 * have that problem and may take a substantial amount of time to resume
834 * due to I/O costs and anti-pop so handle them out of line. 775 * due to I/O costs and anti-pop so handle them out of line.
835 */ 776 */
836 for (i = 0; i < card->num_rtd; i++) { 777 for (i = 0; i < card->num_rtd; i++) {
837 struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai; 778 struct snd_soc_dai *cpu_dai = card->rtd[i].cpu_dai;
838 ac97_control |= cpu_dai->driver->ac97_control; 779 bus_control |= cpu_dai->driver->bus_control;
839 } 780 }
840 if (ac97_control) { 781 if (bus_control) {
841 dev_dbg(dev, "ASoC: Resuming AC97 immediately\n"); 782 dev_dbg(dev, "ASoC: Resuming control bus master immediately\n");
842 soc_resume_deferred(&card->deferred_resume_work); 783 soc_resume_deferred(&card->deferred_resume_work);
843 } else { 784 } else {
844 dev_dbg(dev, "ASoC: Scheduling resume work\n"); 785 dev_dbg(dev, "ASoC: Scheduling resume work\n");
@@ -884,7 +825,7 @@ static struct snd_soc_dai *snd_soc_find_dai(
884 list_for_each_entry(component, &component_list, list) { 825 list_for_each_entry(component, &component_list, list) {
885 if (dlc->of_node && component->dev->of_node != dlc->of_node) 826 if (dlc->of_node && component->dev->of_node != dlc->of_node)
886 continue; 827 continue;
887 if (dlc->name && strcmp(dev_name(component->dev), dlc->name)) 828 if (dlc->name && strcmp(component->name, dlc->name))
888 continue; 829 continue;
889 list_for_each_entry(dai, &component->dai_list, list) { 830 list_for_each_entry(dai, &component->dai_list, list) {
890 if (dlc->dai_name && strcmp(dai->name, dlc->dai_name)) 831 if (dlc->dai_name && strcmp(dai->name, dlc->dai_name))
@@ -1251,25 +1192,22 @@ static int soc_probe_link_components(struct snd_soc_card *card, int num,
1251 return 0; 1192 return 0;
1252} 1193}
1253 1194
1254static int soc_probe_codec_dai(struct snd_soc_card *card, 1195static int soc_probe_dai(struct snd_soc_dai *dai, int order)
1255 struct snd_soc_dai *codec_dai,
1256 int order)
1257{ 1196{
1258 int ret; 1197 int ret;
1259 1198
1260 if (!codec_dai->probed && codec_dai->driver->probe_order == order) { 1199 if (!dai->probed && dai->driver->probe_order == order) {
1261 if (codec_dai->driver->probe) { 1200 if (dai->driver->probe) {
1262 ret = codec_dai->driver->probe(codec_dai); 1201 ret = dai->driver->probe(dai);
1263 if (ret < 0) { 1202 if (ret < 0) {
1264 dev_err(codec_dai->dev, 1203 dev_err(dai->dev,
1265 "ASoC: failed to probe CODEC DAI %s: %d\n", 1204 "ASoC: failed to probe DAI %s: %d\n",
1266 codec_dai->name, ret); 1205 dai->name, ret);
1267 return ret; 1206 return ret;
1268 } 1207 }
1269 } 1208 }
1270 1209
1271 /* mark codec_dai as probed and add to card dai list */ 1210 dai->probed = 1;
1272 codec_dai->probed = 1;
1273 } 1211 }
1274 1212
1275 return 0; 1213 return 0;
@@ -1319,40 +1257,22 @@ static int soc_probe_link_dais(struct snd_soc_card *card, int num, int order)
1319{ 1257{
1320 struct snd_soc_dai_link *dai_link = &card->dai_link[num]; 1258 struct snd_soc_dai_link *dai_link = &card->dai_link[num];
1321 struct snd_soc_pcm_runtime *rtd = &card->rtd[num]; 1259 struct snd_soc_pcm_runtime *rtd = &card->rtd[num];
1322 struct snd_soc_platform *platform = rtd->platform;
1323 struct snd_soc_dai *cpu_dai = rtd->cpu_dai; 1260 struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
1324 int i, ret; 1261 int i, ret;
1325 1262
1326 dev_dbg(card->dev, "ASoC: probe %s dai link %d late %d\n", 1263 dev_dbg(card->dev, "ASoC: probe %s dai link %d late %d\n",
1327 card->name, num, order); 1264 card->name, num, order);
1328 1265
1329 /* config components */
1330 cpu_dai->platform = platform;
1331 cpu_dai->card = card;
1332 for (i = 0; i < rtd->num_codecs; i++)
1333 rtd->codec_dais[i]->card = card;
1334
1335 /* set default power off timeout */ 1266 /* set default power off timeout */
1336 rtd->pmdown_time = pmdown_time; 1267 rtd->pmdown_time = pmdown_time;
1337 1268
1338 /* probe the cpu_dai */ 1269 ret = soc_probe_dai(cpu_dai, order);
1339 if (!cpu_dai->probed && 1270 if (ret)
1340 cpu_dai->driver->probe_order == order) { 1271 return ret;
1341 if (cpu_dai->driver->probe) {
1342 ret = cpu_dai->driver->probe(cpu_dai);
1343 if (ret < 0) {
1344 dev_err(cpu_dai->dev,
1345 "ASoC: failed to probe CPU DAI %s: %d\n",
1346 cpu_dai->name, ret);
1347 return ret;
1348 }
1349 }
1350 cpu_dai->probed = 1;
1351 }
1352 1272
1353 /* probe the CODEC DAI */ 1273 /* probe the CODEC DAI */
1354 for (i = 0; i < rtd->num_codecs; i++) { 1274 for (i = 0; i < rtd->num_codecs; i++) {
1355 ret = soc_probe_codec_dai(card, rtd->codec_dais[i], order); 1275 ret = soc_probe_dai(rtd->codec_dais[i], order);
1356 if (ret) 1276 if (ret)
1357 return ret; 1277 return ret;
1358 } 1278 }
@@ -1422,84 +1342,9 @@ static int soc_probe_link_dais(struct snd_soc_card *card, int num, int order)
1422 } 1342 }
1423 } 1343 }
1424 1344
1425 /* add platform data for AC97 devices */
1426 for (i = 0; i < rtd->num_codecs; i++) {
1427 if (rtd->codec_dais[i]->driver->ac97_control)
1428 snd_ac97_dev_add_pdata(rtd->codec_dais[i]->codec->ac97,
1429 rtd->cpu_dai->ac97_pdata);
1430 }
1431
1432 return 0; 1345 return 0;
1433} 1346}
1434 1347
1435#ifdef CONFIG_SND_SOC_AC97_BUS
1436static int soc_register_ac97_codec(struct snd_soc_codec *codec,
1437 struct snd_soc_dai *codec_dai)
1438{
1439 int ret;
1440
1441 /* Only instantiate AC97 if not already done by the adaptor
1442 * for the generic AC97 subsystem.
1443 */
1444 if (codec_dai->driver->ac97_control && !codec->ac97_registered) {
1445 /*
1446 * It is possible that the AC97 device is already registered to
1447 * the device subsystem. This happens when the device is created
1448 * via snd_ac97_mixer(). Currently only SoC codec that does so
1449 * is the generic AC97 glue but others migh emerge.
1450 *
1451 * In those cases we don't try to register the device again.
1452 */
1453 if (!codec->ac97_created)
1454 return 0;
1455
1456 ret = soc_ac97_dev_register(codec);
1457 if (ret < 0) {
1458 dev_err(codec->dev,
1459 "ASoC: AC97 device register failed: %d\n", ret);
1460 return ret;
1461 }
1462
1463 codec->ac97_registered = 1;
1464 }
1465 return 0;
1466}
1467
1468static void soc_unregister_ac97_codec(struct snd_soc_codec *codec)
1469{
1470 if (codec->ac97_registered) {
1471 soc_ac97_dev_unregister(codec);
1472 codec->ac97_registered = 0;
1473 }
1474}
1475
1476static int soc_register_ac97_dai_link(struct snd_soc_pcm_runtime *rtd)
1477{
1478 int i, ret;
1479
1480 for (i = 0; i < rtd->num_codecs; i++) {
1481 struct snd_soc_dai *codec_dai = rtd->codec_dais[i];
1482
1483 ret = soc_register_ac97_codec(codec_dai->codec, codec_dai);
1484 if (ret) {
1485 while (--i >= 0)
1486 soc_unregister_ac97_codec(codec_dai->codec);
1487 return ret;
1488 }
1489 }
1490
1491 return 0;
1492}
1493
1494static void soc_unregister_ac97_dai_link(struct snd_soc_pcm_runtime *rtd)
1495{
1496 int i;
1497
1498 for (i = 0; i < rtd->num_codecs; i++)
1499 soc_unregister_ac97_codec(rtd->codec_dais[i]->codec);
1500}
1501#endif
1502
1503static int soc_bind_aux_dev(struct snd_soc_card *card, int num) 1348static int soc_bind_aux_dev(struct snd_soc_card *card, int num)
1504{ 1349{
1505 struct snd_soc_pcm_runtime *rtd = &card->rtd_aux[num]; 1350 struct snd_soc_pcm_runtime *rtd = &card->rtd_aux[num];
@@ -1793,20 +1638,6 @@ static int snd_soc_instantiate_card(struct snd_soc_card *card)
1793 goto probe_aux_dev_err; 1638 goto probe_aux_dev_err;
1794 } 1639 }
1795 1640
1796#ifdef CONFIG_SND_SOC_AC97_BUS
1797 /* register any AC97 codecs */
1798 for (i = 0; i < card->num_rtd; i++) {
1799 ret = soc_register_ac97_dai_link(&card->rtd[i]);
1800 if (ret < 0) {
1801 dev_err(card->dev,
1802 "ASoC: failed to register AC97: %d\n", ret);
1803 while (--i >= 0)
1804 soc_unregister_ac97_dai_link(&card->rtd[i]);
1805 goto probe_aux_dev_err;
1806 }
1807 }
1808#endif
1809
1810 card->instantiated = 1; 1641 card->instantiated = 1;
1811 snd_soc_dapm_sync(&card->dapm); 1642 snd_soc_dapm_sync(&card->dapm);
1812 mutex_unlock(&card->mutex); 1643 mutex_unlock(&card->mutex);
@@ -1949,216 +1780,6 @@ static struct platform_driver soc_driver = {
1949}; 1780};
1950 1781
1951/** 1782/**
1952 * snd_soc_new_ac97_codec - initailise AC97 device
1953 * @codec: audio codec
1954 * @ops: AC97 bus operations
1955 * @num: AC97 codec number
1956 *
1957 * Initialises AC97 codec resources for use by ad-hoc devices only.
1958 */
1959int snd_soc_new_ac97_codec(struct snd_soc_codec *codec,
1960 struct snd_ac97_bus_ops *ops, int num)
1961{
1962 codec->ac97 = kzalloc(sizeof(struct snd_ac97), GFP_KERNEL);
1963 if (codec->ac97 == NULL)
1964 return -ENOMEM;
1965
1966 codec->ac97->bus = kzalloc(sizeof(struct snd_ac97_bus), GFP_KERNEL);
1967 if (codec->ac97->bus == NULL) {
1968 kfree(codec->ac97);
1969 codec->ac97 = NULL;
1970 return -ENOMEM;
1971 }
1972
1973 codec->ac97->bus->ops = ops;
1974 codec->ac97->num = num;
1975
1976 /*
1977 * Mark the AC97 device to be created by us. This way we ensure that the
1978 * device will be registered with the device subsystem later on.
1979 */
1980 codec->ac97_created = 1;
1981
1982 return 0;
1983}
1984EXPORT_SYMBOL_GPL(snd_soc_new_ac97_codec);
1985
1986static struct snd_ac97_reset_cfg snd_ac97_rst_cfg;
1987
1988static void snd_soc_ac97_warm_reset(struct snd_ac97 *ac97)
1989{
1990 struct pinctrl *pctl = snd_ac97_rst_cfg.pctl;
1991
1992 pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_warm_reset);
1993
1994 gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 1);
1995
1996 udelay(10);
1997
1998 gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 0);
1999
2000 pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_run);
2001 msleep(2);
2002}
2003
2004static void snd_soc_ac97_reset(struct snd_ac97 *ac97)
2005{
2006 struct pinctrl *pctl = snd_ac97_rst_cfg.pctl;
2007
2008 pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_reset);
2009
2010 gpio_direction_output(snd_ac97_rst_cfg.gpio_sync, 0);
2011 gpio_direction_output(snd_ac97_rst_cfg.gpio_sdata, 0);
2012 gpio_direction_output(snd_ac97_rst_cfg.gpio_reset, 0);
2013
2014 udelay(10);
2015
2016 gpio_direction_output(snd_ac97_rst_cfg.gpio_reset, 1);
2017
2018 pinctrl_select_state(pctl, snd_ac97_rst_cfg.pstate_run);
2019 msleep(2);
2020}
2021
2022static int snd_soc_ac97_parse_pinctl(struct device *dev,
2023 struct snd_ac97_reset_cfg *cfg)
2024{
2025 struct pinctrl *p;
2026 struct pinctrl_state *state;
2027 int gpio;
2028 int ret;
2029
2030 p = devm_pinctrl_get(dev);
2031 if (IS_ERR(p)) {
2032 dev_err(dev, "Failed to get pinctrl\n");
2033 return PTR_ERR(p);
2034 }
2035 cfg->pctl = p;
2036
2037 state = pinctrl_lookup_state(p, "ac97-reset");
2038 if (IS_ERR(state)) {
2039 dev_err(dev, "Can't find pinctrl state ac97-reset\n");
2040 return PTR_ERR(state);
2041 }
2042 cfg->pstate_reset = state;
2043
2044 state = pinctrl_lookup_state(p, "ac97-warm-reset");
2045 if (IS_ERR(state)) {
2046 dev_err(dev, "Can't find pinctrl state ac97-warm-reset\n");
2047 return PTR_ERR(state);
2048 }
2049 cfg->pstate_warm_reset = state;
2050
2051 state = pinctrl_lookup_state(p, "ac97-running");
2052 if (IS_ERR(state)) {
2053 dev_err(dev, "Can't find pinctrl state ac97-running\n");
2054 return PTR_ERR(state);
2055 }
2056 cfg->pstate_run = state;
2057
2058 gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 0);
2059 if (gpio < 0) {
2060 dev_err(dev, "Can't find ac97-sync gpio\n");
2061 return gpio;
2062 }
2063 ret = devm_gpio_request(dev, gpio, "AC97 link sync");
2064 if (ret) {
2065 dev_err(dev, "Failed requesting ac97-sync gpio\n");
2066 return ret;
2067 }
2068 cfg->gpio_sync = gpio;
2069
2070 gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 1);
2071 if (gpio < 0) {
2072 dev_err(dev, "Can't find ac97-sdata gpio %d\n", gpio);
2073 return gpio;
2074 }
2075 ret = devm_gpio_request(dev, gpio, "AC97 link sdata");
2076 if (ret) {
2077 dev_err(dev, "Failed requesting ac97-sdata gpio\n");
2078 return ret;
2079 }
2080 cfg->gpio_sdata = gpio;
2081
2082 gpio = of_get_named_gpio(dev->of_node, "ac97-gpios", 2);
2083 if (gpio < 0) {
2084 dev_err(dev, "Can't find ac97-reset gpio\n");
2085 return gpio;
2086 }
2087 ret = devm_gpio_request(dev, gpio, "AC97 link reset");
2088 if (ret) {
2089 dev_err(dev, "Failed requesting ac97-reset gpio\n");
2090 return ret;
2091 }
2092 cfg->gpio_reset = gpio;
2093
2094 return 0;
2095}
2096
2097struct snd_ac97_bus_ops *soc_ac97_ops;
2098EXPORT_SYMBOL_GPL(soc_ac97_ops);
2099
2100int snd_soc_set_ac97_ops(struct snd_ac97_bus_ops *ops)
2101{
2102 if (ops == soc_ac97_ops)
2103 return 0;
2104
2105 if (soc_ac97_ops && ops)
2106 return -EBUSY;
2107
2108 soc_ac97_ops = ops;
2109
2110 return 0;
2111}
2112EXPORT_SYMBOL_GPL(snd_soc_set_ac97_ops);
2113
2114/**
2115 * snd_soc_set_ac97_ops_of_reset - Set ac97 ops with generic ac97 reset functions
2116 *
2117 * This function sets the reset and warm_reset properties of ops and parses
2118 * the device node of pdev to get pinctrl states and gpio numbers to use.
2119 */
2120int snd_soc_set_ac97_ops_of_reset(struct snd_ac97_bus_ops *ops,
2121 struct platform_device *pdev)
2122{
2123 struct device *dev = &pdev->dev;
2124 struct snd_ac97_reset_cfg cfg;
2125 int ret;
2126
2127 ret = snd_soc_ac97_parse_pinctl(dev, &cfg);
2128 if (ret)
2129 return ret;
2130
2131 ret = snd_soc_set_ac97_ops(ops);
2132 if (ret)
2133 return ret;
2134
2135 ops->warm_reset = snd_soc_ac97_warm_reset;
2136 ops->reset = snd_soc_ac97_reset;
2137
2138 snd_ac97_rst_cfg = cfg;
2139 return 0;
2140}
2141EXPORT_SYMBOL_GPL(snd_soc_set_ac97_ops_of_reset);
2142
2143/**
2144 * snd_soc_free_ac97_codec - free AC97 codec device
2145 * @codec: audio codec
2146 *
2147 * Frees AC97 codec device resources.
2148 */
2149void snd_soc_free_ac97_codec(struct snd_soc_codec *codec)
2150{
2151#ifdef CONFIG_SND_SOC_AC97_BUS
2152 soc_unregister_ac97_codec(codec);
2153#endif
2154 kfree(codec->ac97->bus);
2155 kfree(codec->ac97);
2156 codec->ac97 = NULL;
2157 codec->ac97_created = 0;
2158}
2159EXPORT_SYMBOL_GPL(snd_soc_free_ac97_codec);
2160
2161/**
2162 * snd_soc_cnew - create new control 1783 * snd_soc_cnew - create new control
2163 * @_template: control template 1784 * @_template: control template
2164 * @data: control private data 1785 * @data: control private data
@@ -2326,7 +1947,7 @@ EXPORT_SYMBOL_GPL(snd_soc_add_card_controls);
2326int snd_soc_add_dai_controls(struct snd_soc_dai *dai, 1947int snd_soc_add_dai_controls(struct snd_soc_dai *dai,
2327 const struct snd_kcontrol_new *controls, int num_controls) 1948 const struct snd_kcontrol_new *controls, int num_controls)
2328{ 1949{
2329 struct snd_card *card = dai->card->snd_card; 1950 struct snd_card *card = dai->component->card->snd_card;
2330 1951
2331 return snd_soc_add_controls(card, dai->dev, controls, num_controls, 1952 return snd_soc_add_controls(card, dai->dev, controls, num_controls,
2332 NULL, dai); 1953 NULL, dai);
@@ -2334,1020 +1955,6 @@ int snd_soc_add_dai_controls(struct snd_soc_dai *dai,
2334EXPORT_SYMBOL_GPL(snd_soc_add_dai_controls); 1955EXPORT_SYMBOL_GPL(snd_soc_add_dai_controls);
2335 1956
2336/** 1957/**
2337 * snd_soc_info_enum_double - enumerated double mixer info callback
2338 * @kcontrol: mixer control
2339 * @uinfo: control element information
2340 *
2341 * Callback to provide information about a double enumerated
2342 * mixer control.
2343 *
2344 * Returns 0 for success.
2345 */
2346int snd_soc_info_enum_double(struct snd_kcontrol *kcontrol,
2347 struct snd_ctl_elem_info *uinfo)
2348{
2349 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
2350
2351 uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED;
2352 uinfo->count = e->shift_l == e->shift_r ? 1 : 2;
2353 uinfo->value.enumerated.items = e->items;
2354
2355 if (uinfo->value.enumerated.item >= e->items)
2356 uinfo->value.enumerated.item = e->items - 1;
2357 strlcpy(uinfo->value.enumerated.name,
2358 e->texts[uinfo->value.enumerated.item],
2359 sizeof(uinfo->value.enumerated.name));
2360 return 0;
2361}
2362EXPORT_SYMBOL_GPL(snd_soc_info_enum_double);
2363
2364/**
2365 * snd_soc_get_enum_double - enumerated double mixer get callback
2366 * @kcontrol: mixer control
2367 * @ucontrol: control element information
2368 *
2369 * Callback to get the value of a double enumerated mixer.
2370 *
2371 * Returns 0 for success.
2372 */
2373int snd_soc_get_enum_double(struct snd_kcontrol *kcontrol,
2374 struct snd_ctl_elem_value *ucontrol)
2375{
2376 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
2377 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
2378 unsigned int val, item;
2379 unsigned int reg_val;
2380 int ret;
2381
2382 ret = snd_soc_component_read(component, e->reg, &reg_val);
2383 if (ret)
2384 return ret;
2385 val = (reg_val >> e->shift_l) & e->mask;
2386 item = snd_soc_enum_val_to_item(e, val);
2387 ucontrol->value.enumerated.item[0] = item;
2388 if (e->shift_l != e->shift_r) {
2389 val = (reg_val >> e->shift_l) & e->mask;
2390 item = snd_soc_enum_val_to_item(e, val);
2391 ucontrol->value.enumerated.item[1] = item;
2392 }
2393
2394 return 0;
2395}
2396EXPORT_SYMBOL_GPL(snd_soc_get_enum_double);
2397
2398/**
2399 * snd_soc_put_enum_double - enumerated double mixer put callback
2400 * @kcontrol: mixer control
2401 * @ucontrol: control element information
2402 *
2403 * Callback to set the value of a double enumerated mixer.
2404 *
2405 * Returns 0 for success.
2406 */
2407int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol,
2408 struct snd_ctl_elem_value *ucontrol)
2409{
2410 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
2411 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
2412 unsigned int *item = ucontrol->value.enumerated.item;
2413 unsigned int val;
2414 unsigned int mask;
2415
2416 if (item[0] >= e->items)
2417 return -EINVAL;
2418 val = snd_soc_enum_item_to_val(e, item[0]) << e->shift_l;
2419 mask = e->mask << e->shift_l;
2420 if (e->shift_l != e->shift_r) {
2421 if (item[1] >= e->items)
2422 return -EINVAL;
2423 val |= snd_soc_enum_item_to_val(e, item[1]) << e->shift_r;
2424 mask |= e->mask << e->shift_r;
2425 }
2426
2427 return snd_soc_component_update_bits(component, e->reg, mask, val);
2428}
2429EXPORT_SYMBOL_GPL(snd_soc_put_enum_double);
2430
2431/**
2432 * snd_soc_read_signed - Read a codec register and interprete as signed value
2433 * @component: component
2434 * @reg: Register to read
2435 * @mask: Mask to use after shifting the register value
2436 * @shift: Right shift of register value
2437 * @sign_bit: Bit that describes if a number is negative or not.
2438 * @signed_val: Pointer to where the read value should be stored
2439 *
2440 * This functions reads a codec register. The register value is shifted right
2441 * by 'shift' bits and masked with the given 'mask'. Afterwards it translates
2442 * the given registervalue into a signed integer if sign_bit is non-zero.
2443 *
2444 * Returns 0 on sucess, otherwise an error value
2445 */
2446static int snd_soc_read_signed(struct snd_soc_component *component,
2447 unsigned int reg, unsigned int mask, unsigned int shift,
2448 unsigned int sign_bit, int *signed_val)
2449{
2450 int ret;
2451 unsigned int val;
2452
2453 ret = snd_soc_component_read(component, reg, &val);
2454 if (ret < 0)
2455 return ret;
2456
2457 val = (val >> shift) & mask;
2458
2459 if (!sign_bit) {
2460 *signed_val = val;
2461 return 0;
2462 }
2463
2464 /* non-negative number */
2465 if (!(val & BIT(sign_bit))) {
2466 *signed_val = val;
2467 return 0;
2468 }
2469
2470 ret = val;
2471
2472 /*
2473 * The register most probably does not contain a full-sized int.
2474 * Instead we have an arbitrary number of bits in a signed
2475 * representation which has to be translated into a full-sized int.
2476 * This is done by filling up all bits above the sign-bit.
2477 */
2478 ret |= ~((int)(BIT(sign_bit) - 1));
2479
2480 *signed_val = ret;
2481
2482 return 0;
2483}
2484
2485/**
2486 * snd_soc_info_volsw - single mixer info callback
2487 * @kcontrol: mixer control
2488 * @uinfo: control element information
2489 *
2490 * Callback to provide information about a single mixer control, or a double
2491 * mixer control that spans 2 registers.
2492 *
2493 * Returns 0 for success.
2494 */
2495int snd_soc_info_volsw(struct snd_kcontrol *kcontrol,
2496 struct snd_ctl_elem_info *uinfo)
2497{
2498 struct soc_mixer_control *mc =
2499 (struct soc_mixer_control *)kcontrol->private_value;
2500 int platform_max;
2501
2502 if (!mc->platform_max)
2503 mc->platform_max = mc->max;
2504 platform_max = mc->platform_max;
2505
2506 if (platform_max == 1 && !strstr(kcontrol->id.name, " Volume"))
2507 uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
2508 else
2509 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
2510
2511 uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1;
2512 uinfo->value.integer.min = 0;
2513 uinfo->value.integer.max = platform_max - mc->min;
2514 return 0;
2515}
2516EXPORT_SYMBOL_GPL(snd_soc_info_volsw);
2517
2518/**
2519 * snd_soc_get_volsw - single mixer get callback
2520 * @kcontrol: mixer control
2521 * @ucontrol: control element information
2522 *
2523 * Callback to get the value of a single mixer control, or a double mixer
2524 * control that spans 2 registers.
2525 *
2526 * Returns 0 for success.
2527 */
2528int snd_soc_get_volsw(struct snd_kcontrol *kcontrol,
2529 struct snd_ctl_elem_value *ucontrol)
2530{
2531 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
2532 struct soc_mixer_control *mc =
2533 (struct soc_mixer_control *)kcontrol->private_value;
2534 unsigned int reg = mc->reg;
2535 unsigned int reg2 = mc->rreg;
2536 unsigned int shift = mc->shift;
2537 unsigned int rshift = mc->rshift;
2538 int max = mc->max;
2539 int min = mc->min;
2540 int sign_bit = mc->sign_bit;
2541 unsigned int mask = (1 << fls(max)) - 1;
2542 unsigned int invert = mc->invert;
2543 int val;
2544 int ret;
2545
2546 if (sign_bit)
2547 mask = BIT(sign_bit + 1) - 1;
2548
2549 ret = snd_soc_read_signed(component, reg, mask, shift, sign_bit, &val);
2550 if (ret)
2551 return ret;
2552
2553 ucontrol->value.integer.value[0] = val - min;
2554 if (invert)
2555 ucontrol->value.integer.value[0] =
2556 max - ucontrol->value.integer.value[0];
2557
2558 if (snd_soc_volsw_is_stereo(mc)) {
2559 if (reg == reg2)
2560 ret = snd_soc_read_signed(component, reg, mask, rshift,
2561 sign_bit, &val);
2562 else
2563 ret = snd_soc_read_signed(component, reg2, mask, shift,
2564 sign_bit, &val);
2565 if (ret)
2566 return ret;
2567
2568 ucontrol->value.integer.value[1] = val - min;
2569 if (invert)
2570 ucontrol->value.integer.value[1] =
2571 max - ucontrol->value.integer.value[1];
2572 }
2573
2574 return 0;
2575}
2576EXPORT_SYMBOL_GPL(snd_soc_get_volsw);
2577
2578/**
2579 * snd_soc_put_volsw - single mixer put callback
2580 * @kcontrol: mixer control
2581 * @ucontrol: control element information
2582 *
2583 * Callback to set the value of a single mixer control, or a double mixer
2584 * control that spans 2 registers.
2585 *
2586 * Returns 0 for success.
2587 */
2588int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
2589 struct snd_ctl_elem_value *ucontrol)
2590{
2591 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
2592 struct soc_mixer_control *mc =
2593 (struct soc_mixer_control *)kcontrol->private_value;
2594 unsigned int reg = mc->reg;
2595 unsigned int reg2 = mc->rreg;
2596 unsigned int shift = mc->shift;
2597 unsigned int rshift = mc->rshift;
2598 int max = mc->max;
2599 int min = mc->min;
2600 unsigned int sign_bit = mc->sign_bit;
2601 unsigned int mask = (1 << fls(max)) - 1;
2602 unsigned int invert = mc->invert;
2603 int err;
2604 bool type_2r = false;
2605 unsigned int val2 = 0;
2606 unsigned int val, val_mask;
2607
2608 if (sign_bit)
2609 mask = BIT(sign_bit + 1) - 1;
2610
2611 val = ((ucontrol->value.integer.value[0] + min) & mask);
2612 if (invert)
2613 val = max - val;
2614 val_mask = mask << shift;
2615 val = val << shift;
2616 if (snd_soc_volsw_is_stereo(mc)) {
2617 val2 = ((ucontrol->value.integer.value[1] + min) & mask);
2618 if (invert)
2619 val2 = max - val2;
2620 if (reg == reg2) {
2621 val_mask |= mask << rshift;
2622 val |= val2 << rshift;
2623 } else {
2624 val2 = val2 << shift;
2625 type_2r = true;
2626 }
2627 }
2628 err = snd_soc_component_update_bits(component, reg, val_mask, val);
2629 if (err < 0)
2630 return err;
2631
2632 if (type_2r)
2633 err = snd_soc_component_update_bits(component, reg2, val_mask,
2634 val2);
2635
2636 return err;
2637}
2638EXPORT_SYMBOL_GPL(snd_soc_put_volsw);
2639
2640/**
2641 * snd_soc_get_volsw_sx - single mixer get callback
2642 * @kcontrol: mixer control
2643 * @ucontrol: control element information
2644 *
2645 * Callback to get the value of a single mixer control, or a double mixer
2646 * control that spans 2 registers.
2647 *
2648 * Returns 0 for success.
2649 */
2650int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol,
2651 struct snd_ctl_elem_value *ucontrol)
2652{
2653 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
2654 struct soc_mixer_control *mc =
2655 (struct soc_mixer_control *)kcontrol->private_value;
2656 unsigned int reg = mc->reg;
2657 unsigned int reg2 = mc->rreg;
2658 unsigned int shift = mc->shift;
2659 unsigned int rshift = mc->rshift;
2660 int max = mc->max;
2661 int min = mc->min;
2662 int mask = (1 << (fls(min + max) - 1)) - 1;
2663 unsigned int val;
2664 int ret;
2665
2666 ret = snd_soc_component_read(component, reg, &val);
2667 if (ret < 0)
2668 return ret;
2669
2670 ucontrol->value.integer.value[0] = ((val >> shift) - min) & mask;
2671
2672 if (snd_soc_volsw_is_stereo(mc)) {
2673 ret = snd_soc_component_read(component, reg2, &val);
2674 if (ret < 0)
2675 return ret;
2676
2677 val = ((val >> rshift) - min) & mask;
2678 ucontrol->value.integer.value[1] = val;
2679 }
2680
2681 return 0;
2682}
2683EXPORT_SYMBOL_GPL(snd_soc_get_volsw_sx);
2684
2685/**
2686 * snd_soc_put_volsw_sx - double mixer set callback
2687 * @kcontrol: mixer control
2688 * @uinfo: control element information
2689 *
2690 * Callback to set the value of a double mixer control that spans 2 registers.
2691 *
2692 * Returns 0 for success.
2693 */
2694int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
2695 struct snd_ctl_elem_value *ucontrol)
2696{
2697 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
2698 struct soc_mixer_control *mc =
2699 (struct soc_mixer_control *)kcontrol->private_value;
2700
2701 unsigned int reg = mc->reg;
2702 unsigned int reg2 = mc->rreg;
2703 unsigned int shift = mc->shift;
2704 unsigned int rshift = mc->rshift;
2705 int max = mc->max;
2706 int min = mc->min;
2707 int mask = (1 << (fls(min + max) - 1)) - 1;
2708 int err = 0;
2709 unsigned int val, val_mask, val2 = 0;
2710
2711 val_mask = mask << shift;
2712 val = (ucontrol->value.integer.value[0] + min) & mask;
2713 val = val << shift;
2714
2715 err = snd_soc_component_update_bits(component, reg, val_mask, val);
2716 if (err < 0)
2717 return err;
2718
2719 if (snd_soc_volsw_is_stereo(mc)) {
2720 val_mask = mask << rshift;
2721 val2 = (ucontrol->value.integer.value[1] + min) & mask;
2722 val2 = val2 << rshift;
2723
2724 err = snd_soc_component_update_bits(component, reg2, val_mask,
2725 val2);
2726 }
2727 return err;
2728}
2729EXPORT_SYMBOL_GPL(snd_soc_put_volsw_sx);
2730
2731/**
2732 * snd_soc_info_volsw_s8 - signed mixer info callback
2733 * @kcontrol: mixer control
2734 * @uinfo: control element information
2735 *
2736 * Callback to provide information about a signed mixer control.
2737 *
2738 * Returns 0 for success.
2739 */
2740int snd_soc_info_volsw_s8(struct snd_kcontrol *kcontrol,
2741 struct snd_ctl_elem_info *uinfo)
2742{
2743 struct soc_mixer_control *mc =
2744 (struct soc_mixer_control *)kcontrol->private_value;
2745 int platform_max;
2746 int min = mc->min;
2747
2748 if (!mc->platform_max)
2749 mc->platform_max = mc->max;
2750 platform_max = mc->platform_max;
2751
2752 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
2753 uinfo->count = 2;
2754 uinfo->value.integer.min = 0;
2755 uinfo->value.integer.max = platform_max - min;
2756 return 0;
2757}
2758EXPORT_SYMBOL_GPL(snd_soc_info_volsw_s8);
2759
2760/**
2761 * snd_soc_get_volsw_s8 - signed mixer get callback
2762 * @kcontrol: mixer control
2763 * @ucontrol: control element information
2764 *
2765 * Callback to get the value of a signed mixer control.
2766 *
2767 * Returns 0 for success.
2768 */
2769int snd_soc_get_volsw_s8(struct snd_kcontrol *kcontrol,
2770 struct snd_ctl_elem_value *ucontrol)
2771{
2772 struct soc_mixer_control *mc =
2773 (struct soc_mixer_control *)kcontrol->private_value;
2774 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
2775 unsigned int reg = mc->reg;
2776 unsigned int val;
2777 int min = mc->min;
2778 int ret;
2779
2780 ret = snd_soc_component_read(component, reg, &val);
2781 if (ret)
2782 return ret;
2783
2784 ucontrol->value.integer.value[0] =
2785 ((signed char)(val & 0xff))-min;
2786 ucontrol->value.integer.value[1] =
2787 ((signed char)((val >> 8) & 0xff))-min;
2788 return 0;
2789}
2790EXPORT_SYMBOL_GPL(snd_soc_get_volsw_s8);
2791
2792/**
2793 * snd_soc_put_volsw_sgn - signed mixer put callback
2794 * @kcontrol: mixer control
2795 * @ucontrol: control element information
2796 *
2797 * Callback to set the value of a signed mixer control.
2798 *
2799 * Returns 0 for success.
2800 */
2801int snd_soc_put_volsw_s8(struct snd_kcontrol *kcontrol,
2802 struct snd_ctl_elem_value *ucontrol)
2803{
2804 struct soc_mixer_control *mc =
2805 (struct soc_mixer_control *)kcontrol->private_value;
2806 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
2807 unsigned int reg = mc->reg;
2808 int min = mc->min;
2809 unsigned int val;
2810
2811 val = (ucontrol->value.integer.value[0]+min) & 0xff;
2812 val |= ((ucontrol->value.integer.value[1]+min) & 0xff) << 8;
2813
2814 return snd_soc_component_update_bits(component, reg, 0xffff, val);
2815}
2816EXPORT_SYMBOL_GPL(snd_soc_put_volsw_s8);
2817
2818/**
2819 * snd_soc_info_volsw_range - single mixer info callback with range.
2820 * @kcontrol: mixer control
2821 * @uinfo: control element information
2822 *
2823 * Callback to provide information, within a range, about a single
2824 * mixer control.
2825 *
2826 * returns 0 for success.
2827 */
2828int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol,
2829 struct snd_ctl_elem_info *uinfo)
2830{
2831 struct soc_mixer_control *mc =
2832 (struct soc_mixer_control *)kcontrol->private_value;
2833 int platform_max;
2834 int min = mc->min;
2835
2836 if (!mc->platform_max)
2837 mc->platform_max = mc->max;
2838 platform_max = mc->platform_max;
2839
2840 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
2841 uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1;
2842 uinfo->value.integer.min = 0;
2843 uinfo->value.integer.max = platform_max - min;
2844
2845 return 0;
2846}
2847EXPORT_SYMBOL_GPL(snd_soc_info_volsw_range);
2848
2849/**
2850 * snd_soc_put_volsw_range - single mixer put value callback with range.
2851 * @kcontrol: mixer control
2852 * @ucontrol: control element information
2853 *
2854 * Callback to set the value, within a range, for a single mixer control.
2855 *
2856 * Returns 0 for success.
2857 */
2858int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
2859 struct snd_ctl_elem_value *ucontrol)
2860{
2861 struct soc_mixer_control *mc =
2862 (struct soc_mixer_control *)kcontrol->private_value;
2863 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
2864 unsigned int reg = mc->reg;
2865 unsigned int rreg = mc->rreg;
2866 unsigned int shift = mc->shift;
2867 int min = mc->min;
2868 int max = mc->max;
2869 unsigned int mask = (1 << fls(max)) - 1;
2870 unsigned int invert = mc->invert;
2871 unsigned int val, val_mask;
2872 int ret;
2873
2874 if (invert)
2875 val = (max - ucontrol->value.integer.value[0]) & mask;
2876 else
2877 val = ((ucontrol->value.integer.value[0] + min) & mask);
2878 val_mask = mask << shift;
2879 val = val << shift;
2880
2881 ret = snd_soc_component_update_bits(component, reg, val_mask, val);
2882 if (ret < 0)
2883 return ret;
2884
2885 if (snd_soc_volsw_is_stereo(mc)) {
2886 if (invert)
2887 val = (max - ucontrol->value.integer.value[1]) & mask;
2888 else
2889 val = ((ucontrol->value.integer.value[1] + min) & mask);
2890 val_mask = mask << shift;
2891 val = val << shift;
2892
2893 ret = snd_soc_component_update_bits(component, rreg, val_mask,
2894 val);
2895 }
2896
2897 return ret;
2898}
2899EXPORT_SYMBOL_GPL(snd_soc_put_volsw_range);
2900
2901/**
2902 * snd_soc_get_volsw_range - single mixer get callback with range
2903 * @kcontrol: mixer control
2904 * @ucontrol: control element information
2905 *
2906 * Callback to get the value, within a range, of a single mixer control.
2907 *
2908 * Returns 0 for success.
2909 */
2910int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol,
2911 struct snd_ctl_elem_value *ucontrol)
2912{
2913 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
2914 struct soc_mixer_control *mc =
2915 (struct soc_mixer_control *)kcontrol->private_value;
2916 unsigned int reg = mc->reg;
2917 unsigned int rreg = mc->rreg;
2918 unsigned int shift = mc->shift;
2919 int min = mc->min;
2920 int max = mc->max;
2921 unsigned int mask = (1 << fls(max)) - 1;
2922 unsigned int invert = mc->invert;
2923 unsigned int val;
2924 int ret;
2925
2926 ret = snd_soc_component_read(component, reg, &val);
2927 if (ret)
2928 return ret;
2929
2930 ucontrol->value.integer.value[0] = (val >> shift) & mask;
2931 if (invert)
2932 ucontrol->value.integer.value[0] =
2933 max - ucontrol->value.integer.value[0];
2934 else
2935 ucontrol->value.integer.value[0] =
2936 ucontrol->value.integer.value[0] - min;
2937
2938 if (snd_soc_volsw_is_stereo(mc)) {
2939 ret = snd_soc_component_read(component, rreg, &val);
2940 if (ret)
2941 return ret;
2942
2943 ucontrol->value.integer.value[1] = (val >> shift) & mask;
2944 if (invert)
2945 ucontrol->value.integer.value[1] =
2946 max - ucontrol->value.integer.value[1];
2947 else
2948 ucontrol->value.integer.value[1] =
2949 ucontrol->value.integer.value[1] - min;
2950 }
2951
2952 return 0;
2953}
2954EXPORT_SYMBOL_GPL(snd_soc_get_volsw_range);
2955
2956/**
2957 * snd_soc_limit_volume - Set new limit to an existing volume control.
2958 *
2959 * @codec: where to look for the control
2960 * @name: Name of the control
2961 * @max: new maximum limit
2962 *
2963 * Return 0 for success, else error.
2964 */
2965int snd_soc_limit_volume(struct snd_soc_codec *codec,
2966 const char *name, int max)
2967{
2968 struct snd_card *card = codec->component.card->snd_card;
2969 struct snd_kcontrol *kctl;
2970 struct soc_mixer_control *mc;
2971 int found = 0;
2972 int ret = -EINVAL;
2973
2974 /* Sanity check for name and max */
2975 if (unlikely(!name || max <= 0))
2976 return -EINVAL;
2977
2978 list_for_each_entry(kctl, &card->controls, list) {
2979 if (!strncmp(kctl->id.name, name, sizeof(kctl->id.name))) {
2980 found = 1;
2981 break;
2982 }
2983 }
2984 if (found) {
2985 mc = (struct soc_mixer_control *)kctl->private_value;
2986 if (max <= mc->max) {
2987 mc->platform_max = max;
2988 ret = 0;
2989 }
2990 }
2991 return ret;
2992}
2993EXPORT_SYMBOL_GPL(snd_soc_limit_volume);
2994
2995int snd_soc_bytes_info(struct snd_kcontrol *kcontrol,
2996 struct snd_ctl_elem_info *uinfo)
2997{
2998 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
2999 struct soc_bytes *params = (void *)kcontrol->private_value;
3000
3001 uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
3002 uinfo->count = params->num_regs * component->val_bytes;
3003
3004 return 0;
3005}
3006EXPORT_SYMBOL_GPL(snd_soc_bytes_info);
3007
3008int snd_soc_bytes_get(struct snd_kcontrol *kcontrol,
3009 struct snd_ctl_elem_value *ucontrol)
3010{
3011 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
3012 struct soc_bytes *params = (void *)kcontrol->private_value;
3013 int ret;
3014
3015 if (component->regmap)
3016 ret = regmap_raw_read(component->regmap, params->base,
3017 ucontrol->value.bytes.data,
3018 params->num_regs * component->val_bytes);
3019 else
3020 ret = -EINVAL;
3021
3022 /* Hide any masked bytes to ensure consistent data reporting */
3023 if (ret == 0 && params->mask) {
3024 switch (component->val_bytes) {
3025 case 1:
3026 ucontrol->value.bytes.data[0] &= ~params->mask;
3027 break;
3028 case 2:
3029 ((u16 *)(&ucontrol->value.bytes.data))[0]
3030 &= cpu_to_be16(~params->mask);
3031 break;
3032 case 4:
3033 ((u32 *)(&ucontrol->value.bytes.data))[0]
3034 &= cpu_to_be32(~params->mask);
3035 break;
3036 default:
3037 return -EINVAL;
3038 }
3039 }
3040
3041 return ret;
3042}
3043EXPORT_SYMBOL_GPL(snd_soc_bytes_get);
3044
3045int snd_soc_bytes_put(struct snd_kcontrol *kcontrol,
3046 struct snd_ctl_elem_value *ucontrol)
3047{
3048 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
3049 struct soc_bytes *params = (void *)kcontrol->private_value;
3050 int ret, len;
3051 unsigned int val, mask;
3052 void *data;
3053
3054 if (!component->regmap || !params->num_regs)
3055 return -EINVAL;
3056
3057 len = params->num_regs * component->val_bytes;
3058
3059 data = kmemdup(ucontrol->value.bytes.data, len, GFP_KERNEL | GFP_DMA);
3060 if (!data)
3061 return -ENOMEM;
3062
3063 /*
3064 * If we've got a mask then we need to preserve the register
3065 * bits. We shouldn't modify the incoming data so take a
3066 * copy.
3067 */
3068 if (params->mask) {
3069 ret = regmap_read(component->regmap, params->base, &val);
3070 if (ret != 0)
3071 goto out;
3072
3073 val &= params->mask;
3074
3075 switch (component->val_bytes) {
3076 case 1:
3077 ((u8 *)data)[0] &= ~params->mask;
3078 ((u8 *)data)[0] |= val;
3079 break;
3080 case 2:
3081 mask = ~params->mask;
3082 ret = regmap_parse_val(component->regmap,
3083 &mask, &mask);
3084 if (ret != 0)
3085 goto out;
3086
3087 ((u16 *)data)[0] &= mask;
3088
3089 ret = regmap_parse_val(component->regmap,
3090 &val, &val);
3091 if (ret != 0)
3092 goto out;
3093
3094 ((u16 *)data)[0] |= val;
3095 break;
3096 case 4:
3097 mask = ~params->mask;
3098 ret = regmap_parse_val(component->regmap,
3099 &mask, &mask);
3100 if (ret != 0)
3101 goto out;
3102
3103 ((u32 *)data)[0] &= mask;
3104
3105 ret = regmap_parse_val(component->regmap,
3106 &val, &val);
3107 if (ret != 0)
3108 goto out;
3109
3110 ((u32 *)data)[0] |= val;
3111 break;
3112 default:
3113 ret = -EINVAL;
3114 goto out;
3115 }
3116 }
3117
3118 ret = regmap_raw_write(component->regmap, params->base,
3119 data, len);
3120
3121out:
3122 kfree(data);
3123
3124 return ret;
3125}
3126EXPORT_SYMBOL_GPL(snd_soc_bytes_put);
3127
3128int snd_soc_bytes_info_ext(struct snd_kcontrol *kcontrol,
3129 struct snd_ctl_elem_info *ucontrol)
3130{
3131 struct soc_bytes_ext *params = (void *)kcontrol->private_value;
3132
3133 ucontrol->type = SNDRV_CTL_ELEM_TYPE_BYTES;
3134 ucontrol->count = params->max;
3135
3136 return 0;
3137}
3138EXPORT_SYMBOL_GPL(snd_soc_bytes_info_ext);
3139
3140int snd_soc_bytes_tlv_callback(struct snd_kcontrol *kcontrol, int op_flag,
3141 unsigned int size, unsigned int __user *tlv)
3142{
3143 struct soc_bytes_ext *params = (void *)kcontrol->private_value;
3144 unsigned int count = size < params->max ? size : params->max;
3145 int ret = -ENXIO;
3146
3147 switch (op_flag) {
3148 case SNDRV_CTL_TLV_OP_READ:
3149 if (params->get)
3150 ret = params->get(tlv, count);
3151 break;
3152 case SNDRV_CTL_TLV_OP_WRITE:
3153 if (params->put)
3154 ret = params->put(tlv, count);
3155 break;
3156 }
3157 return ret;
3158}
3159EXPORT_SYMBOL_GPL(snd_soc_bytes_tlv_callback);
3160
3161/**
3162 * snd_soc_info_xr_sx - signed multi register info callback
3163 * @kcontrol: mreg control
3164 * @uinfo: control element information
3165 *
3166 * Callback to provide information of a control that can
3167 * span multiple codec registers which together
3168 * forms a single signed value in a MSB/LSB manner.
3169 *
3170 * Returns 0 for success.
3171 */
3172int snd_soc_info_xr_sx(struct snd_kcontrol *kcontrol,
3173 struct snd_ctl_elem_info *uinfo)
3174{
3175 struct soc_mreg_control *mc =
3176 (struct soc_mreg_control *)kcontrol->private_value;
3177 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
3178 uinfo->count = 1;
3179 uinfo->value.integer.min = mc->min;
3180 uinfo->value.integer.max = mc->max;
3181
3182 return 0;
3183}
3184EXPORT_SYMBOL_GPL(snd_soc_info_xr_sx);
3185
3186/**
3187 * snd_soc_get_xr_sx - signed multi register get callback
3188 * @kcontrol: mreg control
3189 * @ucontrol: control element information
3190 *
3191 * Callback to get the value of a control that can span
3192 * multiple codec registers which together forms a single
3193 * signed value in a MSB/LSB manner. The control supports
3194 * specifying total no of bits used to allow for bitfields
3195 * across the multiple codec registers.
3196 *
3197 * Returns 0 for success.
3198 */
3199int snd_soc_get_xr_sx(struct snd_kcontrol *kcontrol,
3200 struct snd_ctl_elem_value *ucontrol)
3201{
3202 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
3203 struct soc_mreg_control *mc =
3204 (struct soc_mreg_control *)kcontrol->private_value;
3205 unsigned int regbase = mc->regbase;
3206 unsigned int regcount = mc->regcount;
3207 unsigned int regwshift = component->val_bytes * BITS_PER_BYTE;
3208 unsigned int regwmask = (1<<regwshift)-1;
3209 unsigned int invert = mc->invert;
3210 unsigned long mask = (1UL<<mc->nbits)-1;
3211 long min = mc->min;
3212 long max = mc->max;
3213 long val = 0;
3214 unsigned int regval;
3215 unsigned int i;
3216 int ret;
3217
3218 for (i = 0; i < regcount; i++) {
3219 ret = snd_soc_component_read(component, regbase+i, &regval);
3220 if (ret)
3221 return ret;
3222 val |= (regval & regwmask) << (regwshift*(regcount-i-1));
3223 }
3224 val &= mask;
3225 if (min < 0 && val > max)
3226 val |= ~mask;
3227 if (invert)
3228 val = max - val;
3229 ucontrol->value.integer.value[0] = val;
3230
3231 return 0;
3232}
3233EXPORT_SYMBOL_GPL(snd_soc_get_xr_sx);
3234
3235/**
3236 * snd_soc_put_xr_sx - signed multi register get callback
3237 * @kcontrol: mreg control
3238 * @ucontrol: control element information
3239 *
3240 * Callback to set the value of a control that can span
3241 * multiple codec registers which together forms a single
3242 * signed value in a MSB/LSB manner. The control supports
3243 * specifying total no of bits used to allow for bitfields
3244 * across the multiple codec registers.
3245 *
3246 * Returns 0 for success.
3247 */
3248int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol,
3249 struct snd_ctl_elem_value *ucontrol)
3250{
3251 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
3252 struct soc_mreg_control *mc =
3253 (struct soc_mreg_control *)kcontrol->private_value;
3254 unsigned int regbase = mc->regbase;
3255 unsigned int regcount = mc->regcount;
3256 unsigned int regwshift = component->val_bytes * BITS_PER_BYTE;
3257 unsigned int regwmask = (1<<regwshift)-1;
3258 unsigned int invert = mc->invert;
3259 unsigned long mask = (1UL<<mc->nbits)-1;
3260 long max = mc->max;
3261 long val = ucontrol->value.integer.value[0];
3262 unsigned int i, regval, regmask;
3263 int err;
3264
3265 if (invert)
3266 val = max - val;
3267 val &= mask;
3268 for (i = 0; i < regcount; i++) {
3269 regval = (val >> (regwshift*(regcount-i-1))) & regwmask;
3270 regmask = (mask >> (regwshift*(regcount-i-1))) & regwmask;
3271 err = snd_soc_component_update_bits(component, regbase+i,
3272 regmask, regval);
3273 if (err < 0)
3274 return err;
3275 }
3276
3277 return 0;
3278}
3279EXPORT_SYMBOL_GPL(snd_soc_put_xr_sx);
3280
3281/**
3282 * snd_soc_get_strobe - strobe get callback
3283 * @kcontrol: mixer control
3284 * @ucontrol: control element information
3285 *
3286 * Callback get the value of a strobe mixer control.
3287 *
3288 * Returns 0 for success.
3289 */
3290int snd_soc_get_strobe(struct snd_kcontrol *kcontrol,
3291 struct snd_ctl_elem_value *ucontrol)
3292{
3293 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
3294 struct soc_mixer_control *mc =
3295 (struct soc_mixer_control *)kcontrol->private_value;
3296 unsigned int reg = mc->reg;
3297 unsigned int shift = mc->shift;
3298 unsigned int mask = 1 << shift;
3299 unsigned int invert = mc->invert != 0;
3300 unsigned int val;
3301 int ret;
3302
3303 ret = snd_soc_component_read(component, reg, &val);
3304 if (ret)
3305 return ret;
3306
3307 val &= mask;
3308
3309 if (shift != 0 && val != 0)
3310 val = val >> shift;
3311 ucontrol->value.enumerated.item[0] = val ^ invert;
3312
3313 return 0;
3314}
3315EXPORT_SYMBOL_GPL(snd_soc_get_strobe);
3316
3317/**
3318 * snd_soc_put_strobe - strobe put callback
3319 * @kcontrol: mixer control
3320 * @ucontrol: control element information
3321 *
3322 * Callback strobe a register bit to high then low (or the inverse)
3323 * in one pass of a single mixer enum control.
3324 *
3325 * Returns 1 for success.
3326 */
3327int snd_soc_put_strobe(struct snd_kcontrol *kcontrol,
3328 struct snd_ctl_elem_value *ucontrol)
3329{
3330 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
3331 struct soc_mixer_control *mc =
3332 (struct soc_mixer_control *)kcontrol->private_value;
3333 unsigned int reg = mc->reg;
3334 unsigned int shift = mc->shift;
3335 unsigned int mask = 1 << shift;
3336 unsigned int invert = mc->invert != 0;
3337 unsigned int strobe = ucontrol->value.enumerated.item[0] != 0;
3338 unsigned int val1 = (strobe ^ invert) ? mask : 0;
3339 unsigned int val2 = (strobe ^ invert) ? 0 : mask;
3340 int err;
3341
3342 err = snd_soc_component_update_bits(component, reg, mask, val1);
3343 if (err < 0)
3344 return err;
3345
3346 return snd_soc_component_update_bits(component, reg, mask, val2);
3347}
3348EXPORT_SYMBOL_GPL(snd_soc_put_strobe);
3349
3350/**
3351 * snd_soc_dai_set_sysclk - configure DAI system or master clock. 1958 * snd_soc_dai_set_sysclk - configure DAI system or master clock.
3352 * @dai: DAI 1959 * @dai: DAI
3353 * @clk_id: DAI specific clock ID 1960 * @clk_id: DAI specific clock ID
@@ -3996,22 +2603,62 @@ static int snd_soc_component_initialize(struct snd_soc_component *component,
3996 return 0; 2603 return 0;
3997} 2604}
3998 2605
3999static void snd_soc_component_init_regmap(struct snd_soc_component *component) 2606static void snd_soc_component_setup_regmap(struct snd_soc_component *component)
4000{ 2607{
4001 if (!component->regmap) 2608 int val_bytes = regmap_get_val_bytes(component->regmap);
4002 component->regmap = dev_get_regmap(component->dev, NULL); 2609
4003 if (component->regmap) { 2610 /* Errors are legitimate for non-integer byte multiples */
4004 int val_bytes = regmap_get_val_bytes(component->regmap); 2611 if (val_bytes > 0)
4005 /* Errors are legitimate for non-integer byte multiples */ 2612 component->val_bytes = val_bytes;
4006 if (val_bytes > 0)
4007 component->val_bytes = val_bytes;
4008 }
4009} 2613}
4010 2614
2615#ifdef CONFIG_REGMAP
2616
2617/**
2618 * snd_soc_component_init_regmap() - Initialize regmap instance for the component
2619 * @component: The component for which to initialize the regmap instance
2620 * @regmap: The regmap instance that should be used by the component
2621 *
2622 * This function allows deferred assignment of the regmap instance that is
2623 * associated with the component. Only use this if the regmap instance is not
2624 * yet ready when the component is registered. The function must also be called
2625 * before the first IO attempt of the component.
2626 */
2627void snd_soc_component_init_regmap(struct snd_soc_component *component,
2628 struct regmap *regmap)
2629{
2630 component->regmap = regmap;
2631 snd_soc_component_setup_regmap(component);
2632}
2633EXPORT_SYMBOL_GPL(snd_soc_component_init_regmap);
2634
2635/**
2636 * snd_soc_component_exit_regmap() - De-initialize regmap instance for the component
2637 * @component: The component for which to de-initialize the regmap instance
2638 *
2639 * Calls regmap_exit() on the regmap instance associated to the component and
2640 * removes the regmap instance from the component.
2641 *
2642 * This function should only be used if snd_soc_component_init_regmap() was used
2643 * to initialize the regmap instance.
2644 */
2645void snd_soc_component_exit_regmap(struct snd_soc_component *component)
2646{
2647 regmap_exit(component->regmap);
2648 component->regmap = NULL;
2649}
2650EXPORT_SYMBOL_GPL(snd_soc_component_exit_regmap);
2651
2652#endif
2653
4011static void snd_soc_component_add_unlocked(struct snd_soc_component *component) 2654static void snd_soc_component_add_unlocked(struct snd_soc_component *component)
4012{ 2655{
4013 if (!component->write && !component->read) 2656 if (!component->write && !component->read) {
4014 snd_soc_component_init_regmap(component); 2657 if (!component->regmap)
2658 component->regmap = dev_get_regmap(component->dev, NULL);
2659 if (component->regmap)
2660 snd_soc_component_setup_regmap(component);
2661 }
4015 2662
4016 list_add(&component->list, &component_list); 2663 list_add(&component->list, &component_list);
4017} 2664}
@@ -4362,7 +3009,6 @@ int snd_soc_register_codec(struct device *dev,
4362 codec->dev = dev; 3009 codec->dev = dev;
4363 codec->driver = codec_drv; 3010 codec->driver = codec_drv;
4364 codec->component.val_bytes = codec_drv->reg_word_size; 3011 codec->component.val_bytes = codec_drv->reg_word_size;
4365 mutex_init(&codec->mutex);
4366 3012
4367#ifdef CONFIG_DEBUG_FS 3013#ifdef CONFIG_DEBUG_FS
4368 codec->component.init_debugfs = soc_init_codec_debugfs; 3014 codec->component.init_debugfs = soc_init_codec_debugfs;
@@ -4585,7 +3231,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
4585 const char *propname) 3231 const char *propname)
4586{ 3232{
4587 struct device_node *np = card->dev->of_node; 3233 struct device_node *np = card->dev->of_node;
4588 int num_routes; 3234 int num_routes, old_routes;
4589 struct snd_soc_dapm_route *routes; 3235 struct snd_soc_dapm_route *routes;
4590 int i, ret; 3236 int i, ret;
4591 3237
@@ -4603,7 +3249,9 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
4603 return -EINVAL; 3249 return -EINVAL;
4604 } 3250 }
4605 3251
4606 routes = devm_kzalloc(card->dev, num_routes * sizeof(*routes), 3252 old_routes = card->num_dapm_routes;
3253 routes = devm_kzalloc(card->dev,
3254 (old_routes + num_routes) * sizeof(*routes),
4607 GFP_KERNEL); 3255 GFP_KERNEL);
4608 if (!routes) { 3256 if (!routes) {
4609 dev_err(card->dev, 3257 dev_err(card->dev,
@@ -4611,9 +3259,11 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
4611 return -EINVAL; 3259 return -EINVAL;
4612 } 3260 }
4613 3261
3262 memcpy(routes, card->dapm_routes, old_routes * sizeof(*routes));
3263
4614 for (i = 0; i < num_routes; i++) { 3264 for (i = 0; i < num_routes; i++) {
4615 ret = of_property_read_string_index(np, propname, 3265 ret = of_property_read_string_index(np, propname,
4616 2 * i, &routes[i].sink); 3266 2 * i, &routes[old_routes + i].sink);
4617 if (ret) { 3267 if (ret) {
4618 dev_err(card->dev, 3268 dev_err(card->dev,
4619 "ASoC: Property '%s' index %d could not be read: %d\n", 3269 "ASoC: Property '%s' index %d could not be read: %d\n",
@@ -4621,7 +3271,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
4621 return -EINVAL; 3271 return -EINVAL;
4622 } 3272 }
4623 ret = of_property_read_string_index(np, propname, 3273 ret = of_property_read_string_index(np, propname,
4624 (2 * i) + 1, &routes[i].source); 3274 (2 * i) + 1, &routes[old_routes + i].source);
4625 if (ret) { 3275 if (ret) {
4626 dev_err(card->dev, 3276 dev_err(card->dev,
4627 "ASoC: Property '%s' index %d could not be read: %d\n", 3277 "ASoC: Property '%s' index %d could not be read: %d\n",
@@ -4630,7 +3280,7 @@ int snd_soc_of_parse_audio_routing(struct snd_soc_card *card,
4630 } 3280 }
4631 } 3281 }
4632 3282
4633 card->num_dapm_routes = num_routes; 3283 card->num_dapm_routes += num_routes;
4634 card->dapm_routes = routes; 3284 card->dapm_routes = routes;
4635 3285
4636 return 0; 3286 return 0;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index c61cb9cedbcd..c5136bb1f982 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -159,27 +159,135 @@ static void dapm_mark_dirty(struct snd_soc_dapm_widget *w, const char *reason)
159 } 159 }
160} 160}
161 161
162void dapm_mark_io_dirty(struct snd_soc_dapm_context *dapm) 162/*
163 * dapm_widget_invalidate_input_paths() - Invalidate the cached number of input
164 * paths
165 * @w: The widget for which to invalidate the cached number of input paths
166 *
167 * The function resets the cached number of inputs for the specified widget and
168 * all widgets that can be reached via outgoing paths from the widget.
169 *
170 * This function must be called if the number of input paths for a widget might
171 * have changed. E.g. if the source state of a widget changes or a path is added
172 * or activated with the widget as the sink.
173 */
174static void dapm_widget_invalidate_input_paths(struct snd_soc_dapm_widget *w)
175{
176 struct snd_soc_dapm_widget *sink;
177 struct snd_soc_dapm_path *p;
178 LIST_HEAD(list);
179
180 dapm_assert_locked(w->dapm);
181
182 if (w->inputs == -1)
183 return;
184
185 w->inputs = -1;
186 list_add_tail(&w->work_list, &list);
187
188 list_for_each_entry(w, &list, work_list) {
189 list_for_each_entry(p, &w->sinks, list_source) {
190 if (p->is_supply || p->weak || !p->connect)
191 continue;
192 sink = p->sink;
193 if (sink->inputs != -1) {
194 sink->inputs = -1;
195 list_add_tail(&sink->work_list, &list);
196 }
197 }
198 }
199}
200
201/*
202 * dapm_widget_invalidate_output_paths() - Invalidate the cached number of
203 * output paths
204 * @w: The widget for which to invalidate the cached number of output paths
205 *
206 * Resets the cached number of outputs for the specified widget and all widgets
207 * that can be reached via incoming paths from the widget.
208 *
209 * This function must be called if the number of output paths for a widget might
210 * have changed. E.g. if the sink state of a widget changes or a path is added
211 * or activated with the widget as the source.
212 */
213static void dapm_widget_invalidate_output_paths(struct snd_soc_dapm_widget *w)
214{
215 struct snd_soc_dapm_widget *source;
216 struct snd_soc_dapm_path *p;
217 LIST_HEAD(list);
218
219 dapm_assert_locked(w->dapm);
220
221 if (w->outputs == -1)
222 return;
223
224 w->outputs = -1;
225 list_add_tail(&w->work_list, &list);
226
227 list_for_each_entry(w, &list, work_list) {
228 list_for_each_entry(p, &w->sources, list_sink) {
229 if (p->is_supply || p->weak || !p->connect)
230 continue;
231 source = p->source;
232 if (source->outputs != -1) {
233 source->outputs = -1;
234 list_add_tail(&source->work_list, &list);
235 }
236 }
237 }
238}
239
240/*
241 * dapm_path_invalidate() - Invalidates the cached number of inputs and outputs
242 * for the widgets connected to a path
243 * @p: The path to invalidate
244 *
245 * Resets the cached number of inputs for the sink of the path and the cached
246 * number of outputs for the source of the path.
247 *
248 * This function must be called when a path is added, removed or the connected
249 * state changes.
250 */
251static void dapm_path_invalidate(struct snd_soc_dapm_path *p)
252{
253 /*
254 * Weak paths or supply paths do not influence the number of input or
255 * output paths of their neighbors.
256 */
257 if (p->weak || p->is_supply)
258 return;
259
260 /*
261 * The number of connected endpoints is the sum of the number of
262 * connected endpoints of all neighbors. If a node with 0 connected
263 * endpoints is either connected or disconnected that sum won't change,
264 * so there is no need to re-check the path.
265 */
266 if (p->source->inputs != 0)
267 dapm_widget_invalidate_input_paths(p->sink);
268 if (p->sink->outputs != 0)
269 dapm_widget_invalidate_output_paths(p->source);
270}
271
272void dapm_mark_endpoints_dirty(struct snd_soc_card *card)
163{ 273{
164 struct snd_soc_card *card = dapm->card;
165 struct snd_soc_dapm_widget *w; 274 struct snd_soc_dapm_widget *w;
166 275
167 mutex_lock(&card->dapm_mutex); 276 mutex_lock(&card->dapm_mutex);
168 277
169 list_for_each_entry(w, &card->widgets, list) { 278 list_for_each_entry(w, &card->widgets, list) {
170 switch (w->id) { 279 if (w->is_sink || w->is_source) {
171 case snd_soc_dapm_input: 280 dapm_mark_dirty(w, "Rechecking endpoints");
172 case snd_soc_dapm_output: 281 if (w->is_sink)
173 dapm_mark_dirty(w, "Rechecking inputs and outputs"); 282 dapm_widget_invalidate_output_paths(w);
174 break; 283 if (w->is_source)
175 default: 284 dapm_widget_invalidate_input_paths(w);
176 break;
177 } 285 }
178 } 286 }
179 287
180 mutex_unlock(&card->dapm_mutex); 288 mutex_unlock(&card->dapm_mutex);
181} 289}
182EXPORT_SYMBOL_GPL(dapm_mark_io_dirty); 290EXPORT_SYMBOL_GPL(dapm_mark_endpoints_dirty);
183 291
184/* create a new dapm widget */ 292/* create a new dapm widget */
185static inline struct snd_soc_dapm_widget *dapm_cnew_widget( 293static inline struct snd_soc_dapm_widget *dapm_cnew_widget(
@@ -386,8 +494,6 @@ static void dapm_reset(struct snd_soc_card *card)
386 list_for_each_entry(w, &card->widgets, list) { 494 list_for_each_entry(w, &card->widgets, list) {
387 w->new_power = w->power; 495 w->new_power = w->power;
388 w->power_checked = false; 496 w->power_checked = false;
389 w->inputs = -1;
390 w->outputs = -1;
391 } 497 }
392} 498}
393 499
@@ -469,10 +575,9 @@ out:
469 575
470/* connect mux widget to its interconnecting audio paths */ 576/* connect mux widget to its interconnecting audio paths */
471static int dapm_connect_mux(struct snd_soc_dapm_context *dapm, 577static int dapm_connect_mux(struct snd_soc_dapm_context *dapm,
472 struct snd_soc_dapm_widget *src, struct snd_soc_dapm_widget *dest, 578 struct snd_soc_dapm_path *path, const char *control_name)
473 struct snd_soc_dapm_path *path, const char *control_name,
474 const struct snd_kcontrol_new *kcontrol)
475{ 579{
580 const struct snd_kcontrol_new *kcontrol = &path->sink->kcontrol_news[0];
476 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value; 581 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
477 unsigned int val, item; 582 unsigned int val, item;
478 int i; 583 int i;
@@ -493,10 +598,7 @@ static int dapm_connect_mux(struct snd_soc_dapm_context *dapm,
493 598
494 for (i = 0; i < e->items; i++) { 599 for (i = 0; i < e->items; i++) {
495 if (!(strcmp(control_name, e->texts[i]))) { 600 if (!(strcmp(control_name, e->texts[i]))) {
496 list_add(&path->list, &dapm->card->paths); 601 path->name = e->texts[i];
497 list_add(&path->list_sink, &dest->sources);
498 list_add(&path->list_source, &src->sinks);
499 path->name = (char*)e->texts[i];
500 if (i == item) 602 if (i == item)
501 path->connect = 1; 603 path->connect = 1;
502 else 604 else
@@ -509,11 +611,10 @@ static int dapm_connect_mux(struct snd_soc_dapm_context *dapm,
509} 611}
510 612
511/* set up initial codec paths */ 613/* set up initial codec paths */
512static void dapm_set_mixer_path_status(struct snd_soc_dapm_widget *w, 614static void dapm_set_mixer_path_status(struct snd_soc_dapm_path *p, int i)
513 struct snd_soc_dapm_path *p, int i)
514{ 615{
515 struct soc_mixer_control *mc = (struct soc_mixer_control *) 616 struct soc_mixer_control *mc = (struct soc_mixer_control *)
516 w->kcontrol_news[i].private_value; 617 p->sink->kcontrol_news[i].private_value;
517 unsigned int reg = mc->reg; 618 unsigned int reg = mc->reg;
518 unsigned int shift = mc->shift; 619 unsigned int shift = mc->shift;
519 unsigned int max = mc->max; 620 unsigned int max = mc->max;
@@ -522,7 +623,7 @@ static void dapm_set_mixer_path_status(struct snd_soc_dapm_widget *w,
522 unsigned int val; 623 unsigned int val;
523 624
524 if (reg != SND_SOC_NOPM) { 625 if (reg != SND_SOC_NOPM) {
525 soc_dapm_read(w->dapm, reg, &val); 626 soc_dapm_read(p->sink->dapm, reg, &val);
526 val = (val >> shift) & mask; 627 val = (val >> shift) & mask;
527 if (invert) 628 if (invert)
528 val = max - val; 629 val = max - val;
@@ -534,19 +635,15 @@ static void dapm_set_mixer_path_status(struct snd_soc_dapm_widget *w,
534 635
535/* connect mixer widget to its interconnecting audio paths */ 636/* connect mixer widget to its interconnecting audio paths */
536static int dapm_connect_mixer(struct snd_soc_dapm_context *dapm, 637static int dapm_connect_mixer(struct snd_soc_dapm_context *dapm,
537 struct snd_soc_dapm_widget *src, struct snd_soc_dapm_widget *dest,
538 struct snd_soc_dapm_path *path, const char *control_name) 638 struct snd_soc_dapm_path *path, const char *control_name)
539{ 639{
540 int i; 640 int i;
541 641
542 /* search for mixer kcontrol */ 642 /* search for mixer kcontrol */
543 for (i = 0; i < dest->num_kcontrols; i++) { 643 for (i = 0; i < path->sink->num_kcontrols; i++) {
544 if (!strcmp(control_name, dest->kcontrol_news[i].name)) { 644 if (!strcmp(control_name, path->sink->kcontrol_news[i].name)) {
545 list_add(&path->list, &dapm->card->paths); 645 path->name = path->sink->kcontrol_news[i].name;
546 list_add(&path->list_sink, &dest->sources); 646 dapm_set_mixer_path_status(path, i);
547 list_add(&path->list_source, &src->sinks);
548 path->name = dest->kcontrol_news[i].name;
549 dapm_set_mixer_path_status(dest, path, i);
550 return 0; 647 return 0;
551 } 648 }
552 } 649 }
@@ -738,8 +835,10 @@ static int dapm_new_mux(struct snd_soc_dapm_widget *w)
738 if (ret < 0) 835 if (ret < 0)
739 return ret; 836 return ret;
740 837
741 list_for_each_entry(path, &w->sources, list_sink) 838 list_for_each_entry(path, &w->sources, list_sink) {
742 dapm_kcontrol_add_path(w->kcontrols[0], path); 839 if (path->name)
840 dapm_kcontrol_add_path(w->kcontrols[0], path);
841 }
743 842
744 return 0; 843 return 0;
745} 844}
@@ -754,34 +853,6 @@ static int dapm_new_pga(struct snd_soc_dapm_widget *w)
754 return 0; 853 return 0;
755} 854}
756 855
757/* reset 'walked' bit for each dapm path */
758static void dapm_clear_walk_output(struct snd_soc_dapm_context *dapm,
759 struct list_head *sink)
760{
761 struct snd_soc_dapm_path *p;
762
763 list_for_each_entry(p, sink, list_source) {
764 if (p->walked) {
765 p->walked = 0;
766 dapm_clear_walk_output(dapm, &p->sink->sinks);
767 }
768 }
769}
770
771static void dapm_clear_walk_input(struct snd_soc_dapm_context *dapm,
772 struct list_head *source)
773{
774 struct snd_soc_dapm_path *p;
775
776 list_for_each_entry(p, source, list_sink) {
777 if (p->walked) {
778 p->walked = 0;
779 dapm_clear_walk_input(dapm, &p->source->sources);
780 }
781 }
782}
783
784
785/* We implement power down on suspend by checking the power state of 856/* We implement power down on suspend by checking the power state of
786 * the ALSA card - when we are suspending the ALSA state for the card 857 * the ALSA card - when we are suspending the ALSA state for the card
787 * is set to D3. 858 * is set to D3.
@@ -856,61 +927,23 @@ static int is_connected_output_ep(struct snd_soc_dapm_widget *widget,
856 927
857 DAPM_UPDATE_STAT(widget, path_checks); 928 DAPM_UPDATE_STAT(widget, path_checks);
858 929
859 switch (widget->id) { 930 if (widget->is_sink && widget->connected) {
860 case snd_soc_dapm_supply: 931 widget->outputs = snd_soc_dapm_suspend_check(widget);
861 case snd_soc_dapm_regulator_supply: 932 return widget->outputs;
862 case snd_soc_dapm_clock_supply:
863 case snd_soc_dapm_kcontrol:
864 return 0;
865 default:
866 break;
867 }
868
869 switch (widget->id) {
870 case snd_soc_dapm_adc:
871 case snd_soc_dapm_aif_out:
872 case snd_soc_dapm_dai_out:
873 if (widget->active) {
874 widget->outputs = snd_soc_dapm_suspend_check(widget);
875 return widget->outputs;
876 }
877 default:
878 break;
879 }
880
881 if (widget->connected) {
882 /* connected pin ? */
883 if (widget->id == snd_soc_dapm_output && !widget->ext) {
884 widget->outputs = snd_soc_dapm_suspend_check(widget);
885 return widget->outputs;
886 }
887
888 /* connected jack or spk ? */
889 if (widget->id == snd_soc_dapm_hp ||
890 widget->id == snd_soc_dapm_spk ||
891 (widget->id == snd_soc_dapm_line &&
892 !list_empty(&widget->sources))) {
893 widget->outputs = snd_soc_dapm_suspend_check(widget);
894 return widget->outputs;
895 }
896 } 933 }
897 934
898 list_for_each_entry(path, &widget->sinks, list_source) { 935 list_for_each_entry(path, &widget->sinks, list_source) {
899 DAPM_UPDATE_STAT(widget, neighbour_checks); 936 DAPM_UPDATE_STAT(widget, neighbour_checks);
900 937
901 if (path->weak) 938 if (path->weak || path->is_supply)
902 continue; 939 continue;
903 940
904 if (path->walking) 941 if (path->walking)
905 return 1; 942 return 1;
906 943
907 if (path->walked)
908 continue;
909
910 trace_snd_soc_dapm_output_path(widget, path); 944 trace_snd_soc_dapm_output_path(widget, path);
911 945
912 if (path->sink && path->connect) { 946 if (path->connect) {
913 path->walked = 1;
914 path->walking = 1; 947 path->walking = 1;
915 948
916 /* do we need to add this widget to the list ? */ 949 /* do we need to add this widget to the list ? */
@@ -952,73 +985,23 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
952 985
953 DAPM_UPDATE_STAT(widget, path_checks); 986 DAPM_UPDATE_STAT(widget, path_checks);
954 987
955 switch (widget->id) { 988 if (widget->is_source && widget->connected) {
956 case snd_soc_dapm_supply: 989 widget->inputs = snd_soc_dapm_suspend_check(widget);
957 case snd_soc_dapm_regulator_supply: 990 return widget->inputs;
958 case snd_soc_dapm_clock_supply:
959 case snd_soc_dapm_kcontrol:
960 return 0;
961 default:
962 break;
963 }
964
965 /* active stream ? */
966 switch (widget->id) {
967 case snd_soc_dapm_dac:
968 case snd_soc_dapm_aif_in:
969 case snd_soc_dapm_dai_in:
970 if (widget->active) {
971 widget->inputs = snd_soc_dapm_suspend_check(widget);
972 return widget->inputs;
973 }
974 default:
975 break;
976 }
977
978 if (widget->connected) {
979 /* connected pin ? */
980 if (widget->id == snd_soc_dapm_input && !widget->ext) {
981 widget->inputs = snd_soc_dapm_suspend_check(widget);
982 return widget->inputs;
983 }
984
985 /* connected VMID/Bias for lower pops */
986 if (widget->id == snd_soc_dapm_vmid) {
987 widget->inputs = snd_soc_dapm_suspend_check(widget);
988 return widget->inputs;
989 }
990
991 /* connected jack ? */
992 if (widget->id == snd_soc_dapm_mic ||
993 (widget->id == snd_soc_dapm_line &&
994 !list_empty(&widget->sinks))) {
995 widget->inputs = snd_soc_dapm_suspend_check(widget);
996 return widget->inputs;
997 }
998
999 /* signal generator */
1000 if (widget->id == snd_soc_dapm_siggen) {
1001 widget->inputs = snd_soc_dapm_suspend_check(widget);
1002 return widget->inputs;
1003 }
1004 } 991 }
1005 992
1006 list_for_each_entry(path, &widget->sources, list_sink) { 993 list_for_each_entry(path, &widget->sources, list_sink) {
1007 DAPM_UPDATE_STAT(widget, neighbour_checks); 994 DAPM_UPDATE_STAT(widget, neighbour_checks);
1008 995
1009 if (path->weak) 996 if (path->weak || path->is_supply)
1010 continue; 997 continue;
1011 998
1012 if (path->walking) 999 if (path->walking)
1013 return 1; 1000 return 1;
1014 1001
1015 if (path->walked)
1016 continue;
1017
1018 trace_snd_soc_dapm_input_path(widget, path); 1002 trace_snd_soc_dapm_input_path(widget, path);
1019 1003
1020 if (path->source && path->connect) { 1004 if (path->connect) {
1021 path->walked = 1;
1022 path->walking = 1; 1005 path->walking = 1;
1023 1006
1024 /* do we need to add this widget to the list ? */ 1007 /* do we need to add this widget to the list ? */
@@ -1060,21 +1043,25 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
1060int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream, 1043int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
1061 struct snd_soc_dapm_widget_list **list) 1044 struct snd_soc_dapm_widget_list **list)
1062{ 1045{
1063 struct snd_soc_card *card = dai->card; 1046 struct snd_soc_card *card = dai->component->card;
1047 struct snd_soc_dapm_widget *w;
1064 int paths; 1048 int paths;
1065 1049
1066 mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME); 1050 mutex_lock_nested(&card->dapm_mutex, SND_SOC_DAPM_CLASS_RUNTIME);
1067 dapm_reset(card);
1068 1051
1069 if (stream == SNDRV_PCM_STREAM_PLAYBACK) { 1052 /*
1053 * For is_connected_{output,input}_ep fully discover the graph we need
1054 * to reset the cached number of inputs and outputs.
1055 */
1056 list_for_each_entry(w, &card->widgets, list) {
1057 w->inputs = -1;
1058 w->outputs = -1;
1059 }
1060
1061 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
1070 paths = is_connected_output_ep(dai->playback_widget, list); 1062 paths = is_connected_output_ep(dai->playback_widget, list);
1071 dapm_clear_walk_output(&card->dapm, 1063 else
1072 &dai->playback_widget->sinks);
1073 } else {
1074 paths = is_connected_input_ep(dai->capture_widget, list); 1064 paths = is_connected_input_ep(dai->capture_widget, list);
1075 dapm_clear_walk_input(&card->dapm,
1076 &dai->capture_widget->sources);
1077 }
1078 1065
1079 trace_snd_soc_dapm_connected(paths, stream); 1066 trace_snd_soc_dapm_connected(paths, stream);
1080 mutex_unlock(&card->dapm_mutex); 1067 mutex_unlock(&card->dapm_mutex);
@@ -1163,44 +1150,10 @@ static int dapm_generic_check_power(struct snd_soc_dapm_widget *w)
1163 DAPM_UPDATE_STAT(w, power_checks); 1150 DAPM_UPDATE_STAT(w, power_checks);
1164 1151
1165 in = is_connected_input_ep(w, NULL); 1152 in = is_connected_input_ep(w, NULL);
1166 dapm_clear_walk_input(w->dapm, &w->sources);
1167 out = is_connected_output_ep(w, NULL); 1153 out = is_connected_output_ep(w, NULL);
1168 dapm_clear_walk_output(w->dapm, &w->sinks);
1169 return out != 0 && in != 0; 1154 return out != 0 && in != 0;
1170} 1155}
1171 1156
1172/* Check to see if an ADC has power */
1173static int dapm_adc_check_power(struct snd_soc_dapm_widget *w)
1174{
1175 int in;
1176
1177 DAPM_UPDATE_STAT(w, power_checks);
1178
1179 if (w->active) {
1180 in = is_connected_input_ep(w, NULL);
1181 dapm_clear_walk_input(w->dapm, &w->sources);
1182 return in != 0;
1183 } else {
1184 return dapm_generic_check_power(w);
1185 }
1186}
1187
1188/* Check to see if a DAC has power */
1189static int dapm_dac_check_power(struct snd_soc_dapm_widget *w)
1190{
1191 int out;
1192
1193 DAPM_UPDATE_STAT(w, power_checks);
1194
1195 if (w->active) {
1196 out = is_connected_output_ep(w, NULL);
1197 dapm_clear_walk_output(w->dapm, &w->sinks);
1198 return out != 0;
1199 } else {
1200 return dapm_generic_check_power(w);
1201 }
1202}
1203
1204/* Check to see if a power supply is needed */ 1157/* Check to see if a power supply is needed */
1205static int dapm_supply_check_power(struct snd_soc_dapm_widget *w) 1158static int dapm_supply_check_power(struct snd_soc_dapm_widget *w)
1206{ 1159{
@@ -1219,9 +1172,6 @@ static int dapm_supply_check_power(struct snd_soc_dapm_widget *w)
1219 !path->connected(path->source, path->sink)) 1172 !path->connected(path->source, path->sink))
1220 continue; 1173 continue;
1221 1174
1222 if (!path->sink)
1223 continue;
1224
1225 if (dapm_widget_power_check(path->sink)) 1175 if (dapm_widget_power_check(path->sink))
1226 return 1; 1176 return 1;
1227 } 1177 }
@@ -1636,27 +1586,14 @@ static void dapm_widget_set_power(struct snd_soc_dapm_widget *w, bool power,
1636 /* If we changed our power state perhaps our neigbours changed 1586 /* If we changed our power state perhaps our neigbours changed
1637 * also. 1587 * also.
1638 */ 1588 */
1639 list_for_each_entry(path, &w->sources, list_sink) { 1589 list_for_each_entry(path, &w->sources, list_sink)
1640 if (path->source) { 1590 dapm_widget_set_peer_power(path->source, power, path->connect);
1641 dapm_widget_set_peer_power(path->source, power, 1591
1592 /* Supplies can't affect their outputs, only their inputs */
1593 if (!w->is_supply) {
1594 list_for_each_entry(path, &w->sinks, list_source)
1595 dapm_widget_set_peer_power(path->sink, power,
1642 path->connect); 1596 path->connect);
1643 }
1644 }
1645 switch (w->id) {
1646 case snd_soc_dapm_supply:
1647 case snd_soc_dapm_regulator_supply:
1648 case snd_soc_dapm_clock_supply:
1649 case snd_soc_dapm_kcontrol:
1650 /* Supplies can't affect their outputs, only their inputs */
1651 break;
1652 default:
1653 list_for_each_entry(path, &w->sinks, list_source) {
1654 if (path->sink) {
1655 dapm_widget_set_peer_power(path->sink, power,
1656 path->connect);
1657 }
1658 }
1659 break;
1660 } 1597 }
1661 1598
1662 if (power) 1599 if (power)
@@ -1863,10 +1800,14 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
1863 if (!buf) 1800 if (!buf)
1864 return -ENOMEM; 1801 return -ENOMEM;
1865 1802
1866 in = is_connected_input_ep(w, NULL); 1803 /* Supply widgets are not handled by is_connected_{input,output}_ep() */
1867 dapm_clear_walk_input(w->dapm, &w->sources); 1804 if (w->is_supply) {
1868 out = is_connected_output_ep(w, NULL); 1805 in = 0;
1869 dapm_clear_walk_output(w->dapm, &w->sinks); 1806 out = 0;
1807 } else {
1808 in = is_connected_input_ep(w, NULL);
1809 out = is_connected_output_ep(w, NULL);
1810 }
1870 1811
1871 ret = snprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d", 1812 ret = snprintf(buf, PAGE_SIZE, "%s: %s%s in %d out %d",
1872 w->name, w->power ? "On" : "Off", 1813 w->name, w->power ? "On" : "Off",
@@ -2011,32 +1952,45 @@ static inline void dapm_debugfs_cleanup(struct snd_soc_dapm_context *dapm)
2011 1952
2012#endif 1953#endif
2013 1954
1955/*
1956 * soc_dapm_connect_path() - Connects or disconnects a path
1957 * @path: The path to update
1958 * @connect: The new connect state of the path. True if the path is connected,
1959 * false if it is disconneted.
1960 * @reason: The reason why the path changed (for debugging only)
1961 */
1962static void soc_dapm_connect_path(struct snd_soc_dapm_path *path,
1963 bool connect, const char *reason)
1964{
1965 if (path->connect == connect)
1966 return;
1967
1968 path->connect = connect;
1969 dapm_mark_dirty(path->source, reason);
1970 dapm_mark_dirty(path->sink, reason);
1971 dapm_path_invalidate(path);
1972}
1973
2014/* test and update the power status of a mux widget */ 1974/* test and update the power status of a mux widget */
2015static int soc_dapm_mux_update_power(struct snd_soc_card *card, 1975static int soc_dapm_mux_update_power(struct snd_soc_card *card,
2016 struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e) 1976 struct snd_kcontrol *kcontrol, int mux, struct soc_enum *e)
2017{ 1977{
2018 struct snd_soc_dapm_path *path; 1978 struct snd_soc_dapm_path *path;
2019 int found = 0; 1979 int found = 0;
1980 bool connect;
2020 1981
2021 lockdep_assert_held(&card->dapm_mutex); 1982 lockdep_assert_held(&card->dapm_mutex);
2022 1983
2023 /* find dapm widget path assoc with kcontrol */ 1984 /* find dapm widget path assoc with kcontrol */
2024 dapm_kcontrol_for_each_path(path, kcontrol) { 1985 dapm_kcontrol_for_each_path(path, kcontrol) {
2025 if (!path->name || !e->texts[mux])
2026 continue;
2027
2028 found = 1; 1986 found = 1;
2029 /* we now need to match the string in the enum to the path */ 1987 /* we now need to match the string in the enum to the path */
2030 if (!(strcmp(path->name, e->texts[mux]))) { 1988 if (!(strcmp(path->name, e->texts[mux])))
2031 path->connect = 1; /* new connection */ 1989 connect = true;
2032 dapm_mark_dirty(path->source, "mux connection"); 1990 else
2033 } else { 1991 connect = false;
2034 if (path->connect) 1992
2035 dapm_mark_dirty(path->source, 1993 soc_dapm_connect_path(path, connect, "mux update");
2036 "mux disconnection");
2037 path->connect = 0; /* old connection must be powered down */
2038 }
2039 dapm_mark_dirty(path->sink, "mux change");
2040 } 1994 }
2041 1995
2042 if (found) 1996 if (found)
@@ -2075,9 +2029,7 @@ static int soc_dapm_mixer_update_power(struct snd_soc_card *card,
2075 /* find dapm widget path assoc with kcontrol */ 2029 /* find dapm widget path assoc with kcontrol */
2076 dapm_kcontrol_for_each_path(path, kcontrol) { 2030 dapm_kcontrol_for_each_path(path, kcontrol) {
2077 found = 1; 2031 found = 1;
2078 path->connect = connect; 2032 soc_dapm_connect_path(path, connect, "mixer update");
2079 dapm_mark_dirty(path->source, "mixer connection");
2080 dapm_mark_dirty(path->sink, "mixer update");
2081 } 2033 }
2082 2034
2083 if (found) 2035 if (found)
@@ -2255,8 +2207,11 @@ static int snd_soc_dapm_set_pin(struct snd_soc_dapm_context *dapm,
2255 return -EINVAL; 2207 return -EINVAL;
2256 } 2208 }
2257 2209
2258 if (w->connected != status) 2210 if (w->connected != status) {
2259 dapm_mark_dirty(w, "pin configuration"); 2211 dapm_mark_dirty(w, "pin configuration");
2212 dapm_widget_invalidate_input_paths(w);
2213 dapm_widget_invalidate_output_paths(w);
2214 }
2260 2215
2261 w->connected = status; 2216 w->connected = status;
2262 if (status == 0) 2217 if (status == 0)
@@ -2309,6 +2264,53 @@ int snd_soc_dapm_sync(struct snd_soc_dapm_context *dapm)
2309} 2264}
2310EXPORT_SYMBOL_GPL(snd_soc_dapm_sync); 2265EXPORT_SYMBOL_GPL(snd_soc_dapm_sync);
2311 2266
2267/*
2268 * dapm_update_widget_flags() - Re-compute widget sink and source flags
2269 * @w: The widget for which to update the flags
2270 *
2271 * Some widgets have a dynamic category which depends on which neighbors they
2272 * are connected to. This function update the category for these widgets.
2273 *
2274 * This function must be called whenever a path is added or removed to a widget.
2275 */
2276static void dapm_update_widget_flags(struct snd_soc_dapm_widget *w)
2277{
2278 struct snd_soc_dapm_path *p;
2279
2280 switch (w->id) {
2281 case snd_soc_dapm_input:
2282 w->is_source = 1;
2283 list_for_each_entry(p, &w->sources, list_sink) {
2284 if (p->source->id == snd_soc_dapm_micbias ||
2285 p->source->id == snd_soc_dapm_mic ||
2286 p->source->id == snd_soc_dapm_line ||
2287 p->source->id == snd_soc_dapm_output) {
2288 w->is_source = 0;
2289 break;
2290 }
2291 }
2292 break;
2293 case snd_soc_dapm_output:
2294 w->is_sink = 1;
2295 list_for_each_entry(p, &w->sinks, list_source) {
2296 if (p->sink->id == snd_soc_dapm_spk ||
2297 p->sink->id == snd_soc_dapm_hp ||
2298 p->sink->id == snd_soc_dapm_line ||
2299 p->sink->id == snd_soc_dapm_input) {
2300 w->is_sink = 0;
2301 break;
2302 }
2303 }
2304 break;
2305 case snd_soc_dapm_line:
2306 w->is_sink = !list_empty(&w->sources);
2307 w->is_source = !list_empty(&w->sinks);
2308 break;
2309 default:
2310 break;
2311 }
2312}
2313
2312static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm, 2314static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm,
2313 struct snd_soc_dapm_widget *wsource, struct snd_soc_dapm_widget *wsink, 2315 struct snd_soc_dapm_widget *wsource, struct snd_soc_dapm_widget *wsink,
2314 const char *control, 2316 const char *control,
@@ -2318,6 +2320,27 @@ static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm,
2318 struct snd_soc_dapm_path *path; 2320 struct snd_soc_dapm_path *path;
2319 int ret; 2321 int ret;
2320 2322
2323 if (wsink->is_supply && !wsource->is_supply) {
2324 dev_err(dapm->dev,
2325 "Connecting non-supply widget to supply widget is not supported (%s -> %s)\n",
2326 wsource->name, wsink->name);
2327 return -EINVAL;
2328 }
2329
2330 if (connected && !wsource->is_supply) {
2331 dev_err(dapm->dev,
2332 "connected() callback only supported for supply widgets (%s -> %s)\n",
2333 wsource->name, wsink->name);
2334 return -EINVAL;
2335 }
2336
2337 if (wsource->is_supply && control) {
2338 dev_err(dapm->dev,
2339 "Conditional paths are not supported for supply widgets (%s -> [%s] -> %s)\n",
2340 wsource->name, control, wsink->name);
2341 return -EINVAL;
2342 }
2343
2321 path = kzalloc(sizeof(struct snd_soc_dapm_path), GFP_KERNEL); 2344 path = kzalloc(sizeof(struct snd_soc_dapm_path), GFP_KERNEL);
2322 if (!path) 2345 if (!path)
2323 return -ENOMEM; 2346 return -ENOMEM;
@@ -2330,85 +2353,49 @@ static int snd_soc_dapm_add_path(struct snd_soc_dapm_context *dapm,
2330 INIT_LIST_HEAD(&path->list_source); 2353 INIT_LIST_HEAD(&path->list_source);
2331 INIT_LIST_HEAD(&path->list_sink); 2354 INIT_LIST_HEAD(&path->list_sink);
2332 2355
2333 /* check for external widgets */ 2356 if (wsource->is_supply || wsink->is_supply)
2334 if (wsink->id == snd_soc_dapm_input) { 2357 path->is_supply = 1;
2335 if (wsource->id == snd_soc_dapm_micbias ||
2336 wsource->id == snd_soc_dapm_mic ||
2337 wsource->id == snd_soc_dapm_line ||
2338 wsource->id == snd_soc_dapm_output)
2339 wsink->ext = 1;
2340 }
2341 if (wsource->id == snd_soc_dapm_output) {
2342 if (wsink->id == snd_soc_dapm_spk ||
2343 wsink->id == snd_soc_dapm_hp ||
2344 wsink->id == snd_soc_dapm_line ||
2345 wsink->id == snd_soc_dapm_input)
2346 wsource->ext = 1;
2347 }
2348
2349 dapm_mark_dirty(wsource, "Route added");
2350 dapm_mark_dirty(wsink, "Route added");
2351 2358
2352 /* connect static paths */ 2359 /* connect static paths */
2353 if (control == NULL) { 2360 if (control == NULL) {
2354 list_add(&path->list, &dapm->card->paths);
2355 list_add(&path->list_sink, &wsink->sources);
2356 list_add(&path->list_source, &wsource->sinks);
2357 path->connect = 1; 2361 path->connect = 1;
2358 return 0; 2362 } else {
2359 } 2363 /* connect dynamic paths */
2360 2364 switch (wsink->id) {
2361 /* connect dynamic paths */ 2365 case snd_soc_dapm_mux:
2362 switch (wsink->id) { 2366 ret = dapm_connect_mux(dapm, path, control);
2363 case snd_soc_dapm_adc: 2367 if (ret != 0)
2364 case snd_soc_dapm_dac: 2368 goto err;
2365 case snd_soc_dapm_pga: 2369 break;
2366 case snd_soc_dapm_out_drv: 2370 case snd_soc_dapm_switch:
2367 case snd_soc_dapm_input: 2371 case snd_soc_dapm_mixer:
2368 case snd_soc_dapm_output: 2372 case snd_soc_dapm_mixer_named_ctl:
2369 case snd_soc_dapm_siggen: 2373 ret = dapm_connect_mixer(dapm, path, control);
2370 case snd_soc_dapm_micbias: 2374 if (ret != 0)
2371 case snd_soc_dapm_vmid: 2375 goto err;
2372 case snd_soc_dapm_pre: 2376 break;
2373 case snd_soc_dapm_post: 2377 default:
2374 case snd_soc_dapm_supply: 2378 dev_err(dapm->dev,
2375 case snd_soc_dapm_regulator_supply: 2379 "Control not supported for path %s -> [%s] -> %s\n",
2376 case snd_soc_dapm_clock_supply: 2380 wsource->name, control, wsink->name);
2377 case snd_soc_dapm_aif_in: 2381 ret = -EINVAL;
2378 case snd_soc_dapm_aif_out:
2379 case snd_soc_dapm_dai_in:
2380 case snd_soc_dapm_dai_out:
2381 case snd_soc_dapm_dai_link:
2382 case snd_soc_dapm_kcontrol:
2383 list_add(&path->list, &dapm->card->paths);
2384 list_add(&path->list_sink, &wsink->sources);
2385 list_add(&path->list_source, &wsource->sinks);
2386 path->connect = 1;
2387 return 0;
2388 case snd_soc_dapm_mux:
2389 ret = dapm_connect_mux(dapm, wsource, wsink, path, control,
2390 &wsink->kcontrol_news[0]);
2391 if (ret != 0)
2392 goto err;
2393 break;
2394 case snd_soc_dapm_switch:
2395 case snd_soc_dapm_mixer:
2396 case snd_soc_dapm_mixer_named_ctl:
2397 ret = dapm_connect_mixer(dapm, wsource, wsink, path, control);
2398 if (ret != 0)
2399 goto err; 2382 goto err;
2400 break; 2383 }
2401 case snd_soc_dapm_hp:
2402 case snd_soc_dapm_mic:
2403 case snd_soc_dapm_line:
2404 case snd_soc_dapm_spk:
2405 list_add(&path->list, &dapm->card->paths);
2406 list_add(&path->list_sink, &wsink->sources);
2407 list_add(&path->list_source, &wsource->sinks);
2408 path->connect = 0;
2409 return 0;
2410 } 2384 }
2411 2385
2386 list_add(&path->list, &dapm->card->paths);
2387 list_add(&path->list_sink, &wsink->sources);
2388 list_add(&path->list_source, &wsource->sinks);
2389
2390 dapm_update_widget_flags(wsource);
2391 dapm_update_widget_flags(wsink);
2392
2393 dapm_mark_dirty(wsource, "Route added");
2394 dapm_mark_dirty(wsink, "Route added");
2395
2396 if (dapm->card->instantiated && path->connect)
2397 dapm_path_invalidate(path);
2398
2412 return 0; 2399 return 0;
2413err: 2400err:
2414 kfree(path); 2401 kfree(path);
@@ -2489,6 +2476,7 @@ err:
2489static int snd_soc_dapm_del_route(struct snd_soc_dapm_context *dapm, 2476static int snd_soc_dapm_del_route(struct snd_soc_dapm_context *dapm,
2490 const struct snd_soc_dapm_route *route) 2477 const struct snd_soc_dapm_route *route)
2491{ 2478{
2479 struct snd_soc_dapm_widget *wsource, *wsink;
2492 struct snd_soc_dapm_path *path, *p; 2480 struct snd_soc_dapm_path *path, *p;
2493 const char *sink; 2481 const char *sink;
2494 const char *source; 2482 const char *source;
@@ -2526,10 +2514,19 @@ static int snd_soc_dapm_del_route(struct snd_soc_dapm_context *dapm,
2526 } 2514 }
2527 2515
2528 if (path) { 2516 if (path) {
2529 dapm_mark_dirty(path->source, "Route removed"); 2517 wsource = path->source;
2530 dapm_mark_dirty(path->sink, "Route removed"); 2518 wsink = path->sink;
2519
2520 dapm_mark_dirty(wsource, "Route removed");
2521 dapm_mark_dirty(wsink, "Route removed");
2522 if (path->connect)
2523 dapm_path_invalidate(path);
2531 2524
2532 dapm_free_path(path); 2525 dapm_free_path(path);
2526
2527 /* Update any path related flags */
2528 dapm_update_widget_flags(wsource);
2529 dapm_update_widget_flags(wsink);
2533 } else { 2530 } else {
2534 dev_warn(dapm->dev, "ASoC: Route %s->%s does not exist\n", 2531 dev_warn(dapm->dev, "ASoC: Route %s->%s does not exist\n",
2535 source, sink); 2532 source, sink);
@@ -3087,40 +3084,44 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
3087 } 3084 }
3088 3085
3089 switch (w->id) { 3086 switch (w->id) {
3090 case snd_soc_dapm_switch: 3087 case snd_soc_dapm_mic:
3091 case snd_soc_dapm_mixer: 3088 case snd_soc_dapm_input:
3092 case snd_soc_dapm_mixer_named_ctl: 3089 w->is_source = 1;
3093 w->power_check = dapm_generic_check_power; 3090 w->power_check = dapm_generic_check_power;
3094 break; 3091 break;
3095 case snd_soc_dapm_mux: 3092 case snd_soc_dapm_spk:
3093 case snd_soc_dapm_hp:
3094 case snd_soc_dapm_output:
3095 w->is_sink = 1;
3096 w->power_check = dapm_generic_check_power; 3096 w->power_check = dapm_generic_check_power;
3097 break; 3097 break;
3098 case snd_soc_dapm_dai_out: 3098 case snd_soc_dapm_vmid:
3099 w->power_check = dapm_adc_check_power; 3099 case snd_soc_dapm_siggen:
3100 break; 3100 w->is_source = 1;
3101 case snd_soc_dapm_dai_in: 3101 w->power_check = dapm_always_on_check_power;
3102 w->power_check = dapm_dac_check_power;
3103 break; 3102 break;
3103 case snd_soc_dapm_mux:
3104 case snd_soc_dapm_switch:
3105 case snd_soc_dapm_mixer:
3106 case snd_soc_dapm_mixer_named_ctl:
3104 case snd_soc_dapm_adc: 3107 case snd_soc_dapm_adc:
3105 case snd_soc_dapm_aif_out: 3108 case snd_soc_dapm_aif_out:
3106 case snd_soc_dapm_dac: 3109 case snd_soc_dapm_dac:
3107 case snd_soc_dapm_aif_in: 3110 case snd_soc_dapm_aif_in:
3108 case snd_soc_dapm_pga: 3111 case snd_soc_dapm_pga:
3109 case snd_soc_dapm_out_drv: 3112 case snd_soc_dapm_out_drv:
3110 case snd_soc_dapm_input:
3111 case snd_soc_dapm_output:
3112 case snd_soc_dapm_micbias: 3113 case snd_soc_dapm_micbias:
3113 case snd_soc_dapm_spk:
3114 case snd_soc_dapm_hp:
3115 case snd_soc_dapm_mic:
3116 case snd_soc_dapm_line: 3114 case snd_soc_dapm_line:
3117 case snd_soc_dapm_dai_link: 3115 case snd_soc_dapm_dai_link:
3116 case snd_soc_dapm_dai_out:
3117 case snd_soc_dapm_dai_in:
3118 w->power_check = dapm_generic_check_power; 3118 w->power_check = dapm_generic_check_power;
3119 break; 3119 break;
3120 case snd_soc_dapm_supply: 3120 case snd_soc_dapm_supply:
3121 case snd_soc_dapm_regulator_supply: 3121 case snd_soc_dapm_regulator_supply:
3122 case snd_soc_dapm_clock_supply: 3122 case snd_soc_dapm_clock_supply:
3123 case snd_soc_dapm_kcontrol: 3123 case snd_soc_dapm_kcontrol:
3124 w->is_supply = 1;
3124 w->power_check = dapm_supply_check_power; 3125 w->power_check = dapm_supply_check_power;
3125 break; 3126 break;
3126 default: 3127 default:
@@ -3137,6 +3138,9 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
3137 INIT_LIST_HEAD(&w->dirty); 3138 INIT_LIST_HEAD(&w->dirty);
3138 list_add(&w->list, &dapm->card->widgets); 3139 list_add(&w->list, &dapm->card->widgets);
3139 3140
3141 w->inputs = -1;
3142 w->outputs = -1;
3143
3140 /* machine layer set ups unconnected pins and insertions */ 3144 /* machine layer set ups unconnected pins and insertions */
3141 w->connected = 1; 3145 w->connected = 1;
3142 return w; 3146 return w;
@@ -3484,6 +3488,14 @@ static void soc_dapm_dai_stream_event(struct snd_soc_dai *dai, int stream,
3484 case SND_SOC_DAPM_STREAM_PAUSE_RELEASE: 3488 case SND_SOC_DAPM_STREAM_PAUSE_RELEASE:
3485 break; 3489 break;
3486 } 3490 }
3491
3492 if (w->id == snd_soc_dapm_dai_in) {
3493 w->is_source = w->active;
3494 dapm_widget_invalidate_input_paths(w);
3495 } else {
3496 w->is_sink = w->active;
3497 dapm_widget_invalidate_output_paths(w);
3498 }
3487 } 3499 }
3488} 3500}
3489 3501
@@ -3610,7 +3622,15 @@ int snd_soc_dapm_force_enable_pin_unlocked(struct snd_soc_dapm_context *dapm,
3610 } 3622 }
3611 3623
3612 dev_dbg(w->dapm->dev, "ASoC: force enable pin %s\n", pin); 3624 dev_dbg(w->dapm->dev, "ASoC: force enable pin %s\n", pin);
3613 w->connected = 1; 3625 if (!w->connected) {
3626 /*
3627 * w->force does not affect the number of input or output paths,
3628 * so we only have to recheck if w->connected is changed
3629 */
3630 dapm_widget_invalidate_input_paths(w);
3631 dapm_widget_invalidate_output_paths(w);
3632 w->connected = 1;
3633 }
3614 w->force = 1; 3634 w->force = 1;
3615 dapm_mark_dirty(w, "force enable"); 3635 dapm_mark_dirty(w, "force enable");
3616 3636
@@ -3788,35 +3808,54 @@ int snd_soc_dapm_ignore_suspend(struct snd_soc_dapm_context *dapm,
3788} 3808}
3789EXPORT_SYMBOL_GPL(snd_soc_dapm_ignore_suspend); 3809EXPORT_SYMBOL_GPL(snd_soc_dapm_ignore_suspend);
3790 3810
3811/**
3812 * dapm_is_external_path() - Checks if a path is a external path
3813 * @card: The card the path belongs to
3814 * @path: The path to check
3815 *
3816 * Returns true if the path is either between two different DAPM contexts or
3817 * between two external pins of the same DAPM context. Otherwise returns
3818 * false.
3819 */
3820static bool dapm_is_external_path(struct snd_soc_card *card,
3821 struct snd_soc_dapm_path *path)
3822{
3823 dev_dbg(card->dev,
3824 "... Path %s(id:%d dapm:%p) - %s(id:%d dapm:%p)\n",
3825 path->source->name, path->source->id, path->source->dapm,
3826 path->sink->name, path->sink->id, path->sink->dapm);
3827
3828 /* Connection between two different DAPM contexts */
3829 if (path->source->dapm != path->sink->dapm)
3830 return true;
3831
3832 /* Loopback connection from external pin to external pin */
3833 if (path->sink->id == snd_soc_dapm_input) {
3834 switch (path->source->id) {
3835 case snd_soc_dapm_output:
3836 case snd_soc_dapm_micbias:
3837 return true;
3838 default:
3839 break;
3840 }
3841 }
3842
3843 return false;
3844}
3845
3791static bool snd_soc_dapm_widget_in_card_paths(struct snd_soc_card *card, 3846static bool snd_soc_dapm_widget_in_card_paths(struct snd_soc_card *card,
3792 struct snd_soc_dapm_widget *w) 3847 struct snd_soc_dapm_widget *w)
3793{ 3848{
3794 struct snd_soc_dapm_path *p; 3849 struct snd_soc_dapm_path *p;
3795 3850
3796 list_for_each_entry(p, &card->paths, list) { 3851 list_for_each_entry(p, &w->sources, list_sink) {
3797 if ((p->source == w) || (p->sink == w)) { 3852 if (dapm_is_external_path(card, p))
3798 dev_dbg(card->dev, 3853 return true;
3799 "... Path %s(id:%d dapm:%p) - %s(id:%d dapm:%p)\n", 3854 }
3800 p->source->name, p->source->id, p->source->dapm,
3801 p->sink->name, p->sink->id, p->sink->dapm);
3802 3855
3803 /* Connected to something other than the codec */ 3856 list_for_each_entry(p, &w->sinks, list_source) {
3804 if (p->source->dapm != p->sink->dapm) 3857 if (dapm_is_external_path(card, p))
3805 return true; 3858 return true;
3806 /*
3807 * Loopback connection from codec external pin to
3808 * codec external pin
3809 */
3810 if (p->sink->id == snd_soc_dapm_input) {
3811 switch (p->source->id) {
3812 case snd_soc_dapm_output:
3813 case snd_soc_dapm_micbias:
3814 return true;
3815 default:
3816 break;
3817 }
3818 }
3819 }
3820 } 3859 }
3821 3860
3822 return false; 3861 return false;
diff --git a/sound/soc/soc-jack.c b/sound/soc/soc-jack.c
index ab47fea997a3..ef1d42d7c6f6 100644
--- a/sound/soc/soc-jack.c
+++ b/sound/soc/soc-jack.c
@@ -116,7 +116,7 @@ EXPORT_SYMBOL_GPL(snd_soc_jack_report);
116 * 116 *
117 * @jack: ASoC jack 117 * @jack: ASoC jack
118 * @count: Number of zones 118 * @count: Number of zones
119 * @zone: Array of zones 119 * @zones: Array of zones
120 * 120 *
121 * After this function has been called the zones specified in the 121 * After this function has been called the zones specified in the
122 * array will be associated with the jack. 122 * array will be associated with the jack.
diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c
new file mode 100644
index 000000000000..100d92b5b77e
--- /dev/null
+++ b/sound/soc/soc-ops.c
@@ -0,0 +1,952 @@
1/*
2 * soc-ops.c -- Generic ASoC operations
3 *
4 * Copyright 2005 Wolfson Microelectronics PLC.
5 * Copyright 2005 Openedhand Ltd.
6 * Copyright (C) 2010 Slimlogic Ltd.
7 * Copyright (C) 2010 Texas Instruments Inc.
8 *
9 * Author: Liam Girdwood <lrg@slimlogic.co.uk>
10 * with code, comments and ideas from :-
11 * Richard Purdie <richard@openedhand.com>
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the
15 * Free Software Foundation; either version 2 of the License, or (at your
16 * option) any later version.
17 */
18
19#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/init.h>
22#include <linux/delay.h>
23#include <linux/pm.h>
24#include <linux/bitops.h>
25#include <linux/ctype.h>
26#include <linux/slab.h>
27#include <sound/core.h>
28#include <sound/jack.h>
29#include <sound/pcm.h>
30#include <sound/pcm_params.h>
31#include <sound/soc.h>
32#include <sound/soc-dpcm.h>
33#include <sound/initval.h>
34
35/**
36 * snd_soc_info_enum_double - enumerated double mixer info callback
37 * @kcontrol: mixer control
38 * @uinfo: control element information
39 *
40 * Callback to provide information about a double enumerated
41 * mixer control.
42 *
43 * Returns 0 for success.
44 */
45int snd_soc_info_enum_double(struct snd_kcontrol *kcontrol,
46 struct snd_ctl_elem_info *uinfo)
47{
48 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
49
50 return snd_ctl_enum_info(uinfo, e->shift_l == e->shift_r ? 1 : 2,
51 e->items, e->texts);
52}
53EXPORT_SYMBOL_GPL(snd_soc_info_enum_double);
54
55/**
56 * snd_soc_get_enum_double - enumerated double mixer get callback
57 * @kcontrol: mixer control
58 * @ucontrol: control element information
59 *
60 * Callback to get the value of a double enumerated mixer.
61 *
62 * Returns 0 for success.
63 */
64int snd_soc_get_enum_double(struct snd_kcontrol *kcontrol,
65 struct snd_ctl_elem_value *ucontrol)
66{
67 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
68 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
69 unsigned int val, item;
70 unsigned int reg_val;
71 int ret;
72
73 ret = snd_soc_component_read(component, e->reg, &reg_val);
74 if (ret)
75 return ret;
76 val = (reg_val >> e->shift_l) & e->mask;
77 item = snd_soc_enum_val_to_item(e, val);
78 ucontrol->value.enumerated.item[0] = item;
79 if (e->shift_l != e->shift_r) {
80 val = (reg_val >> e->shift_l) & e->mask;
81 item = snd_soc_enum_val_to_item(e, val);
82 ucontrol->value.enumerated.item[1] = item;
83 }
84
85 return 0;
86}
87EXPORT_SYMBOL_GPL(snd_soc_get_enum_double);
88
89/**
90 * snd_soc_put_enum_double - enumerated double mixer put callback
91 * @kcontrol: mixer control
92 * @ucontrol: control element information
93 *
94 * Callback to set the value of a double enumerated mixer.
95 *
96 * Returns 0 for success.
97 */
98int snd_soc_put_enum_double(struct snd_kcontrol *kcontrol,
99 struct snd_ctl_elem_value *ucontrol)
100{
101 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
102 struct soc_enum *e = (struct soc_enum *)kcontrol->private_value;
103 unsigned int *item = ucontrol->value.enumerated.item;
104 unsigned int val;
105 unsigned int mask;
106
107 if (item[0] >= e->items)
108 return -EINVAL;
109 val = snd_soc_enum_item_to_val(e, item[0]) << e->shift_l;
110 mask = e->mask << e->shift_l;
111 if (e->shift_l != e->shift_r) {
112 if (item[1] >= e->items)
113 return -EINVAL;
114 val |= snd_soc_enum_item_to_val(e, item[1]) << e->shift_r;
115 mask |= e->mask << e->shift_r;
116 }
117
118 return snd_soc_component_update_bits(component, e->reg, mask, val);
119}
120EXPORT_SYMBOL_GPL(snd_soc_put_enum_double);
121
122/**
123 * snd_soc_read_signed - Read a codec register and interprete as signed value
124 * @component: component
125 * @reg: Register to read
126 * @mask: Mask to use after shifting the register value
127 * @shift: Right shift of register value
128 * @sign_bit: Bit that describes if a number is negative or not.
129 * @signed_val: Pointer to where the read value should be stored
130 *
131 * This functions reads a codec register. The register value is shifted right
132 * by 'shift' bits and masked with the given 'mask'. Afterwards it translates
133 * the given registervalue into a signed integer if sign_bit is non-zero.
134 *
135 * Returns 0 on sucess, otherwise an error value
136 */
137static int snd_soc_read_signed(struct snd_soc_component *component,
138 unsigned int reg, unsigned int mask, unsigned int shift,
139 unsigned int sign_bit, int *signed_val)
140{
141 int ret;
142 unsigned int val;
143
144 ret = snd_soc_component_read(component, reg, &val);
145 if (ret < 0)
146 return ret;
147
148 val = (val >> shift) & mask;
149
150 if (!sign_bit) {
151 *signed_val = val;
152 return 0;
153 }
154
155 /* non-negative number */
156 if (!(val & BIT(sign_bit))) {
157 *signed_val = val;
158 return 0;
159 }
160
161 ret = val;
162
163 /*
164 * The register most probably does not contain a full-sized int.
165 * Instead we have an arbitrary number of bits in a signed
166 * representation which has to be translated into a full-sized int.
167 * This is done by filling up all bits above the sign-bit.
168 */
169 ret |= ~((int)(BIT(sign_bit) - 1));
170
171 *signed_val = ret;
172
173 return 0;
174}
175
176/**
177 * snd_soc_info_volsw - single mixer info callback
178 * @kcontrol: mixer control
179 * @uinfo: control element information
180 *
181 * Callback to provide information about a single mixer control, or a double
182 * mixer control that spans 2 registers.
183 *
184 * Returns 0 for success.
185 */
186int snd_soc_info_volsw(struct snd_kcontrol *kcontrol,
187 struct snd_ctl_elem_info *uinfo)
188{
189 struct soc_mixer_control *mc =
190 (struct soc_mixer_control *)kcontrol->private_value;
191 int platform_max;
192
193 if (!mc->platform_max)
194 mc->platform_max = mc->max;
195 platform_max = mc->platform_max;
196
197 if (platform_max == 1 && !strstr(kcontrol->id.name, " Volume"))
198 uinfo->type = SNDRV_CTL_ELEM_TYPE_BOOLEAN;
199 else
200 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
201
202 uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1;
203 uinfo->value.integer.min = 0;
204 uinfo->value.integer.max = platform_max - mc->min;
205 return 0;
206}
207EXPORT_SYMBOL_GPL(snd_soc_info_volsw);
208
209/**
210 * snd_soc_get_volsw - single mixer get callback
211 * @kcontrol: mixer control
212 * @ucontrol: control element information
213 *
214 * Callback to get the value of a single mixer control, or a double mixer
215 * control that spans 2 registers.
216 *
217 * Returns 0 for success.
218 */
219int snd_soc_get_volsw(struct snd_kcontrol *kcontrol,
220 struct snd_ctl_elem_value *ucontrol)
221{
222 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
223 struct soc_mixer_control *mc =
224 (struct soc_mixer_control *)kcontrol->private_value;
225 unsigned int reg = mc->reg;
226 unsigned int reg2 = mc->rreg;
227 unsigned int shift = mc->shift;
228 unsigned int rshift = mc->rshift;
229 int max = mc->max;
230 int min = mc->min;
231 int sign_bit = mc->sign_bit;
232 unsigned int mask = (1 << fls(max)) - 1;
233 unsigned int invert = mc->invert;
234 int val;
235 int ret;
236
237 if (sign_bit)
238 mask = BIT(sign_bit + 1) - 1;
239
240 ret = snd_soc_read_signed(component, reg, mask, shift, sign_bit, &val);
241 if (ret)
242 return ret;
243
244 ucontrol->value.integer.value[0] = val - min;
245 if (invert)
246 ucontrol->value.integer.value[0] =
247 max - ucontrol->value.integer.value[0];
248
249 if (snd_soc_volsw_is_stereo(mc)) {
250 if (reg == reg2)
251 ret = snd_soc_read_signed(component, reg, mask, rshift,
252 sign_bit, &val);
253 else
254 ret = snd_soc_read_signed(component, reg2, mask, shift,
255 sign_bit, &val);
256 if (ret)
257 return ret;
258
259 ucontrol->value.integer.value[1] = val - min;
260 if (invert)
261 ucontrol->value.integer.value[1] =
262 max - ucontrol->value.integer.value[1];
263 }
264
265 return 0;
266}
267EXPORT_SYMBOL_GPL(snd_soc_get_volsw);
268
269/**
270 * snd_soc_put_volsw - single mixer put callback
271 * @kcontrol: mixer control
272 * @ucontrol: control element information
273 *
274 * Callback to set the value of a single mixer control, or a double mixer
275 * control that spans 2 registers.
276 *
277 * Returns 0 for success.
278 */
279int snd_soc_put_volsw(struct snd_kcontrol *kcontrol,
280 struct snd_ctl_elem_value *ucontrol)
281{
282 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
283 struct soc_mixer_control *mc =
284 (struct soc_mixer_control *)kcontrol->private_value;
285 unsigned int reg = mc->reg;
286 unsigned int reg2 = mc->rreg;
287 unsigned int shift = mc->shift;
288 unsigned int rshift = mc->rshift;
289 int max = mc->max;
290 int min = mc->min;
291 unsigned int sign_bit = mc->sign_bit;
292 unsigned int mask = (1 << fls(max)) - 1;
293 unsigned int invert = mc->invert;
294 int err;
295 bool type_2r = false;
296 unsigned int val2 = 0;
297 unsigned int val, val_mask;
298
299 if (sign_bit)
300 mask = BIT(sign_bit + 1) - 1;
301
302 val = ((ucontrol->value.integer.value[0] + min) & mask);
303 if (invert)
304 val = max - val;
305 val_mask = mask << shift;
306 val = val << shift;
307 if (snd_soc_volsw_is_stereo(mc)) {
308 val2 = ((ucontrol->value.integer.value[1] + min) & mask);
309 if (invert)
310 val2 = max - val2;
311 if (reg == reg2) {
312 val_mask |= mask << rshift;
313 val |= val2 << rshift;
314 } else {
315 val2 = val2 << shift;
316 type_2r = true;
317 }
318 }
319 err = snd_soc_component_update_bits(component, reg, val_mask, val);
320 if (err < 0)
321 return err;
322
323 if (type_2r)
324 err = snd_soc_component_update_bits(component, reg2, val_mask,
325 val2);
326
327 return err;
328}
329EXPORT_SYMBOL_GPL(snd_soc_put_volsw);
330
331/**
332 * snd_soc_get_volsw_sx - single mixer get callback
333 * @kcontrol: mixer control
334 * @ucontrol: control element information
335 *
336 * Callback to get the value of a single mixer control, or a double mixer
337 * control that spans 2 registers.
338 *
339 * Returns 0 for success.
340 */
341int snd_soc_get_volsw_sx(struct snd_kcontrol *kcontrol,
342 struct snd_ctl_elem_value *ucontrol)
343{
344 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
345 struct soc_mixer_control *mc =
346 (struct soc_mixer_control *)kcontrol->private_value;
347 unsigned int reg = mc->reg;
348 unsigned int reg2 = mc->rreg;
349 unsigned int shift = mc->shift;
350 unsigned int rshift = mc->rshift;
351 int max = mc->max;
352 int min = mc->min;
353 int mask = (1 << (fls(min + max) - 1)) - 1;
354 unsigned int val;
355 int ret;
356
357 ret = snd_soc_component_read(component, reg, &val);
358 if (ret < 0)
359 return ret;
360
361 ucontrol->value.integer.value[0] = ((val >> shift) - min) & mask;
362
363 if (snd_soc_volsw_is_stereo(mc)) {
364 ret = snd_soc_component_read(component, reg2, &val);
365 if (ret < 0)
366 return ret;
367
368 val = ((val >> rshift) - min) & mask;
369 ucontrol->value.integer.value[1] = val;
370 }
371
372 return 0;
373}
374EXPORT_SYMBOL_GPL(snd_soc_get_volsw_sx);
375
376/**
377 * snd_soc_put_volsw_sx - double mixer set callback
378 * @kcontrol: mixer control
379 * @uinfo: control element information
380 *
381 * Callback to set the value of a double mixer control that spans 2 registers.
382 *
383 * Returns 0 for success.
384 */
385int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol,
386 struct snd_ctl_elem_value *ucontrol)
387{
388 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
389 struct soc_mixer_control *mc =
390 (struct soc_mixer_control *)kcontrol->private_value;
391
392 unsigned int reg = mc->reg;
393 unsigned int reg2 = mc->rreg;
394 unsigned int shift = mc->shift;
395 unsigned int rshift = mc->rshift;
396 int max = mc->max;
397 int min = mc->min;
398 int mask = (1 << (fls(min + max) - 1)) - 1;
399 int err = 0;
400 unsigned int val, val_mask, val2 = 0;
401
402 val_mask = mask << shift;
403 val = (ucontrol->value.integer.value[0] + min) & mask;
404 val = val << shift;
405
406 err = snd_soc_component_update_bits(component, reg, val_mask, val);
407 if (err < 0)
408 return err;
409
410 if (snd_soc_volsw_is_stereo(mc)) {
411 val_mask = mask << rshift;
412 val2 = (ucontrol->value.integer.value[1] + min) & mask;
413 val2 = val2 << rshift;
414
415 err = snd_soc_component_update_bits(component, reg2, val_mask,
416 val2);
417 }
418 return err;
419}
420EXPORT_SYMBOL_GPL(snd_soc_put_volsw_sx);
421
422/**
423 * snd_soc_info_volsw_range - single mixer info callback with range.
424 * @kcontrol: mixer control
425 * @uinfo: control element information
426 *
427 * Callback to provide information, within a range, about a single
428 * mixer control.
429 *
430 * returns 0 for success.
431 */
432int snd_soc_info_volsw_range(struct snd_kcontrol *kcontrol,
433 struct snd_ctl_elem_info *uinfo)
434{
435 struct soc_mixer_control *mc =
436 (struct soc_mixer_control *)kcontrol->private_value;
437 int platform_max;
438 int min = mc->min;
439
440 if (!mc->platform_max)
441 mc->platform_max = mc->max;
442 platform_max = mc->platform_max;
443
444 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
445 uinfo->count = snd_soc_volsw_is_stereo(mc) ? 2 : 1;
446 uinfo->value.integer.min = 0;
447 uinfo->value.integer.max = platform_max - min;
448
449 return 0;
450}
451EXPORT_SYMBOL_GPL(snd_soc_info_volsw_range);
452
453/**
454 * snd_soc_put_volsw_range - single mixer put value callback with range.
455 * @kcontrol: mixer control
456 * @ucontrol: control element information
457 *
458 * Callback to set the value, within a range, for a single mixer control.
459 *
460 * Returns 0 for success.
461 */
462int snd_soc_put_volsw_range(struct snd_kcontrol *kcontrol,
463 struct snd_ctl_elem_value *ucontrol)
464{
465 struct soc_mixer_control *mc =
466 (struct soc_mixer_control *)kcontrol->private_value;
467 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
468 unsigned int reg = mc->reg;
469 unsigned int rreg = mc->rreg;
470 unsigned int shift = mc->shift;
471 int min = mc->min;
472 int max = mc->max;
473 unsigned int mask = (1 << fls(max)) - 1;
474 unsigned int invert = mc->invert;
475 unsigned int val, val_mask;
476 int ret;
477
478 if (invert)
479 val = (max - ucontrol->value.integer.value[0]) & mask;
480 else
481 val = ((ucontrol->value.integer.value[0] + min) & mask);
482 val_mask = mask << shift;
483 val = val << shift;
484
485 ret = snd_soc_component_update_bits(component, reg, val_mask, val);
486 if (ret < 0)
487 return ret;
488
489 if (snd_soc_volsw_is_stereo(mc)) {
490 if (invert)
491 val = (max - ucontrol->value.integer.value[1]) & mask;
492 else
493 val = ((ucontrol->value.integer.value[1] + min) & mask);
494 val_mask = mask << shift;
495 val = val << shift;
496
497 ret = snd_soc_component_update_bits(component, rreg, val_mask,
498 val);
499 }
500
501 return ret;
502}
503EXPORT_SYMBOL_GPL(snd_soc_put_volsw_range);
504
505/**
506 * snd_soc_get_volsw_range - single mixer get callback with range
507 * @kcontrol: mixer control
508 * @ucontrol: control element information
509 *
510 * Callback to get the value, within a range, of a single mixer control.
511 *
512 * Returns 0 for success.
513 */
514int snd_soc_get_volsw_range(struct snd_kcontrol *kcontrol,
515 struct snd_ctl_elem_value *ucontrol)
516{
517 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
518 struct soc_mixer_control *mc =
519 (struct soc_mixer_control *)kcontrol->private_value;
520 unsigned int reg = mc->reg;
521 unsigned int rreg = mc->rreg;
522 unsigned int shift = mc->shift;
523 int min = mc->min;
524 int max = mc->max;
525 unsigned int mask = (1 << fls(max)) - 1;
526 unsigned int invert = mc->invert;
527 unsigned int val;
528 int ret;
529
530 ret = snd_soc_component_read(component, reg, &val);
531 if (ret)
532 return ret;
533
534 ucontrol->value.integer.value[0] = (val >> shift) & mask;
535 if (invert)
536 ucontrol->value.integer.value[0] =
537 max - ucontrol->value.integer.value[0];
538 else
539 ucontrol->value.integer.value[0] =
540 ucontrol->value.integer.value[0] - min;
541
542 if (snd_soc_volsw_is_stereo(mc)) {
543 ret = snd_soc_component_read(component, rreg, &val);
544 if (ret)
545 return ret;
546
547 ucontrol->value.integer.value[1] = (val >> shift) & mask;
548 if (invert)
549 ucontrol->value.integer.value[1] =
550 max - ucontrol->value.integer.value[1];
551 else
552 ucontrol->value.integer.value[1] =
553 ucontrol->value.integer.value[1] - min;
554 }
555
556 return 0;
557}
558EXPORT_SYMBOL_GPL(snd_soc_get_volsw_range);
559
560/**
561 * snd_soc_limit_volume - Set new limit to an existing volume control.
562 *
563 * @codec: where to look for the control
564 * @name: Name of the control
565 * @max: new maximum limit
566 *
567 * Return 0 for success, else error.
568 */
569int snd_soc_limit_volume(struct snd_soc_codec *codec,
570 const char *name, int max)
571{
572 struct snd_card *card = codec->component.card->snd_card;
573 struct snd_kcontrol *kctl;
574 struct soc_mixer_control *mc;
575 int found = 0;
576 int ret = -EINVAL;
577
578 /* Sanity check for name and max */
579 if (unlikely(!name || max <= 0))
580 return -EINVAL;
581
582 list_for_each_entry(kctl, &card->controls, list) {
583 if (!strncmp(kctl->id.name, name, sizeof(kctl->id.name))) {
584 found = 1;
585 break;
586 }
587 }
588 if (found) {
589 mc = (struct soc_mixer_control *)kctl->private_value;
590 if (max <= mc->max) {
591 mc->platform_max = max;
592 ret = 0;
593 }
594 }
595 return ret;
596}
597EXPORT_SYMBOL_GPL(snd_soc_limit_volume);
598
599int snd_soc_bytes_info(struct snd_kcontrol *kcontrol,
600 struct snd_ctl_elem_info *uinfo)
601{
602 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
603 struct soc_bytes *params = (void *)kcontrol->private_value;
604
605 uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
606 uinfo->count = params->num_regs * component->val_bytes;
607
608 return 0;
609}
610EXPORT_SYMBOL_GPL(snd_soc_bytes_info);
611
612int snd_soc_bytes_get(struct snd_kcontrol *kcontrol,
613 struct snd_ctl_elem_value *ucontrol)
614{
615 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
616 struct soc_bytes *params = (void *)kcontrol->private_value;
617 int ret;
618
619 if (component->regmap)
620 ret = regmap_raw_read(component->regmap, params->base,
621 ucontrol->value.bytes.data,
622 params->num_regs * component->val_bytes);
623 else
624 ret = -EINVAL;
625
626 /* Hide any masked bytes to ensure consistent data reporting */
627 if (ret == 0 && params->mask) {
628 switch (component->val_bytes) {
629 case 1:
630 ucontrol->value.bytes.data[0] &= ~params->mask;
631 break;
632 case 2:
633 ((u16 *)(&ucontrol->value.bytes.data))[0]
634 &= cpu_to_be16(~params->mask);
635 break;
636 case 4:
637 ((u32 *)(&ucontrol->value.bytes.data))[0]
638 &= cpu_to_be32(~params->mask);
639 break;
640 default:
641 return -EINVAL;
642 }
643 }
644
645 return ret;
646}
647EXPORT_SYMBOL_GPL(snd_soc_bytes_get);
648
649int snd_soc_bytes_put(struct snd_kcontrol *kcontrol,
650 struct snd_ctl_elem_value *ucontrol)
651{
652 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
653 struct soc_bytes *params = (void *)kcontrol->private_value;
654 int ret, len;
655 unsigned int val, mask;
656 void *data;
657
658 if (!component->regmap || !params->num_regs)
659 return -EINVAL;
660
661 len = params->num_regs * component->val_bytes;
662
663 data = kmemdup(ucontrol->value.bytes.data, len, GFP_KERNEL | GFP_DMA);
664 if (!data)
665 return -ENOMEM;
666
667 /*
668 * If we've got a mask then we need to preserve the register
669 * bits. We shouldn't modify the incoming data so take a
670 * copy.
671 */
672 if (params->mask) {
673 ret = regmap_read(component->regmap, params->base, &val);
674 if (ret != 0)
675 goto out;
676
677 val &= params->mask;
678
679 switch (component->val_bytes) {
680 case 1:
681 ((u8 *)data)[0] &= ~params->mask;
682 ((u8 *)data)[0] |= val;
683 break;
684 case 2:
685 mask = ~params->mask;
686 ret = regmap_parse_val(component->regmap,
687 &mask, &mask);
688 if (ret != 0)
689 goto out;
690
691 ((u16 *)data)[0] &= mask;
692
693 ret = regmap_parse_val(component->regmap,
694 &val, &val);
695 if (ret != 0)
696 goto out;
697
698 ((u16 *)data)[0] |= val;
699 break;
700 case 4:
701 mask = ~params->mask;
702 ret = regmap_parse_val(component->regmap,
703 &mask, &mask);
704 if (ret != 0)
705 goto out;
706
707 ((u32 *)data)[0] &= mask;
708
709 ret = regmap_parse_val(component->regmap,
710 &val, &val);
711 if (ret != 0)
712 goto out;
713
714 ((u32 *)data)[0] |= val;
715 break;
716 default:
717 ret = -EINVAL;
718 goto out;
719 }
720 }
721
722 ret = regmap_raw_write(component->regmap, params->base,
723 data, len);
724
725out:
726 kfree(data);
727
728 return ret;
729}
730EXPORT_SYMBOL_GPL(snd_soc_bytes_put);
731
732int snd_soc_bytes_info_ext(struct snd_kcontrol *kcontrol,
733 struct snd_ctl_elem_info *ucontrol)
734{
735 struct soc_bytes_ext *params = (void *)kcontrol->private_value;
736
737 ucontrol->type = SNDRV_CTL_ELEM_TYPE_BYTES;
738 ucontrol->count = params->max;
739
740 return 0;
741}
742EXPORT_SYMBOL_GPL(snd_soc_bytes_info_ext);
743
744int snd_soc_bytes_tlv_callback(struct snd_kcontrol *kcontrol, int op_flag,
745 unsigned int size, unsigned int __user *tlv)
746{
747 struct soc_bytes_ext *params = (void *)kcontrol->private_value;
748 unsigned int count = size < params->max ? size : params->max;
749 int ret = -ENXIO;
750
751 switch (op_flag) {
752 case SNDRV_CTL_TLV_OP_READ:
753 if (params->get)
754 ret = params->get(tlv, count);
755 break;
756 case SNDRV_CTL_TLV_OP_WRITE:
757 if (params->put)
758 ret = params->put(tlv, count);
759 break;
760 }
761 return ret;
762}
763EXPORT_SYMBOL_GPL(snd_soc_bytes_tlv_callback);
764
765/**
766 * snd_soc_info_xr_sx - signed multi register info callback
767 * @kcontrol: mreg control
768 * @uinfo: control element information
769 *
770 * Callback to provide information of a control that can
771 * span multiple codec registers which together
772 * forms a single signed value in a MSB/LSB manner.
773 *
774 * Returns 0 for success.
775 */
776int snd_soc_info_xr_sx(struct snd_kcontrol *kcontrol,
777 struct snd_ctl_elem_info *uinfo)
778{
779 struct soc_mreg_control *mc =
780 (struct soc_mreg_control *)kcontrol->private_value;
781 uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER;
782 uinfo->count = 1;
783 uinfo->value.integer.min = mc->min;
784 uinfo->value.integer.max = mc->max;
785
786 return 0;
787}
788EXPORT_SYMBOL_GPL(snd_soc_info_xr_sx);
789
790/**
791 * snd_soc_get_xr_sx - signed multi register get callback
792 * @kcontrol: mreg control
793 * @ucontrol: control element information
794 *
795 * Callback to get the value of a control that can span
796 * multiple codec registers which together forms a single
797 * signed value in a MSB/LSB manner. The control supports
798 * specifying total no of bits used to allow for bitfields
799 * across the multiple codec registers.
800 *
801 * Returns 0 for success.
802 */
803int snd_soc_get_xr_sx(struct snd_kcontrol *kcontrol,
804 struct snd_ctl_elem_value *ucontrol)
805{
806 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
807 struct soc_mreg_control *mc =
808 (struct soc_mreg_control *)kcontrol->private_value;
809 unsigned int regbase = mc->regbase;
810 unsigned int regcount = mc->regcount;
811 unsigned int regwshift = component->val_bytes * BITS_PER_BYTE;
812 unsigned int regwmask = (1<<regwshift)-1;
813 unsigned int invert = mc->invert;
814 unsigned long mask = (1UL<<mc->nbits)-1;
815 long min = mc->min;
816 long max = mc->max;
817 long val = 0;
818 unsigned int regval;
819 unsigned int i;
820 int ret;
821
822 for (i = 0; i < regcount; i++) {
823 ret = snd_soc_component_read(component, regbase+i, &regval);
824 if (ret)
825 return ret;
826 val |= (regval & regwmask) << (regwshift*(regcount-i-1));
827 }
828 val &= mask;
829 if (min < 0 && val > max)
830 val |= ~mask;
831 if (invert)
832 val = max - val;
833 ucontrol->value.integer.value[0] = val;
834
835 return 0;
836}
837EXPORT_SYMBOL_GPL(snd_soc_get_xr_sx);
838
839/**
840 * snd_soc_put_xr_sx - signed multi register get callback
841 * @kcontrol: mreg control
842 * @ucontrol: control element information
843 *
844 * Callback to set the value of a control that can span
845 * multiple codec registers which together forms a single
846 * signed value in a MSB/LSB manner. The control supports
847 * specifying total no of bits used to allow for bitfields
848 * across the multiple codec registers.
849 *
850 * Returns 0 for success.
851 */
852int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol,
853 struct snd_ctl_elem_value *ucontrol)
854{
855 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
856 struct soc_mreg_control *mc =
857 (struct soc_mreg_control *)kcontrol->private_value;
858 unsigned int regbase = mc->regbase;
859 unsigned int regcount = mc->regcount;
860 unsigned int regwshift = component->val_bytes * BITS_PER_BYTE;
861 unsigned int regwmask = (1<<regwshift)-1;
862 unsigned int invert = mc->invert;
863 unsigned long mask = (1UL<<mc->nbits)-1;
864 long max = mc->max;
865 long val = ucontrol->value.integer.value[0];
866 unsigned int i, regval, regmask;
867 int err;
868
869 if (invert)
870 val = max - val;
871 val &= mask;
872 for (i = 0; i < regcount; i++) {
873 regval = (val >> (regwshift*(regcount-i-1))) & regwmask;
874 regmask = (mask >> (regwshift*(regcount-i-1))) & regwmask;
875 err = snd_soc_component_update_bits(component, regbase+i,
876 regmask, regval);
877 if (err < 0)
878 return err;
879 }
880
881 return 0;
882}
883EXPORT_SYMBOL_GPL(snd_soc_put_xr_sx);
884
885/**
886 * snd_soc_get_strobe - strobe get callback
887 * @kcontrol: mixer control
888 * @ucontrol: control element information
889 *
890 * Callback get the value of a strobe mixer control.
891 *
892 * Returns 0 for success.
893 */
894int snd_soc_get_strobe(struct snd_kcontrol *kcontrol,
895 struct snd_ctl_elem_value *ucontrol)
896{
897 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
898 struct soc_mixer_control *mc =
899 (struct soc_mixer_control *)kcontrol->private_value;
900 unsigned int reg = mc->reg;
901 unsigned int shift = mc->shift;
902 unsigned int mask = 1 << shift;
903 unsigned int invert = mc->invert != 0;
904 unsigned int val;
905 int ret;
906
907 ret = snd_soc_component_read(component, reg, &val);
908 if (ret)
909 return ret;
910
911 val &= mask;
912
913 if (shift != 0 && val != 0)
914 val = val >> shift;
915 ucontrol->value.enumerated.item[0] = val ^ invert;
916
917 return 0;
918}
919EXPORT_SYMBOL_GPL(snd_soc_get_strobe);
920
921/**
922 * snd_soc_put_strobe - strobe put callback
923 * @kcontrol: mixer control
924 * @ucontrol: control element information
925 *
926 * Callback strobe a register bit to high then low (or the inverse)
927 * in one pass of a single mixer enum control.
928 *
929 * Returns 1 for success.
930 */
931int snd_soc_put_strobe(struct snd_kcontrol *kcontrol,
932 struct snd_ctl_elem_value *ucontrol)
933{
934 struct snd_soc_component *component = snd_kcontrol_chip(kcontrol);
935 struct soc_mixer_control *mc =
936 (struct soc_mixer_control *)kcontrol->private_value;
937 unsigned int reg = mc->reg;
938 unsigned int shift = mc->shift;
939 unsigned int mask = 1 << shift;
940 unsigned int invert = mc->invert != 0;
941 unsigned int strobe = ucontrol->value.enumerated.item[0] != 0;
942 unsigned int val1 = (strobe ^ invert) ? mask : 0;
943 unsigned int val2 = (strobe ^ invert) ? 0 : mask;
944 int err;
945
946 err = snd_soc_component_update_bits(component, reg, mask, val1);
947 if (err < 0)
948 return err;
949
950 return snd_soc_component_update_bits(component, reg, mask, val2);
951}
952EXPORT_SYMBOL_GPL(snd_soc_put_strobe);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index 002311afdeaa..eb87d96e2cf0 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -654,6 +654,8 @@ static int soc_pcm_close(struct snd_pcm_substream *substream)
654 codec_dai->rate = 0; 654 codec_dai->rate = 0;
655 } 655 }
656 656
657 snd_soc_dai_digital_mute(cpu_dai, 1, substream->stream);
658
657 if (cpu_dai->driver->ops->shutdown) 659 if (cpu_dai->driver->ops->shutdown)
658 cpu_dai->driver->ops->shutdown(substream, cpu_dai); 660 cpu_dai->driver->ops->shutdown(substream, cpu_dai);
659 661
@@ -772,6 +774,7 @@ static int soc_pcm_prepare(struct snd_pcm_substream *substream)
772 for (i = 0; i < rtd->num_codecs; i++) 774 for (i = 0; i < rtd->num_codecs; i++)
773 snd_soc_dai_digital_mute(rtd->codec_dais[i], 0, 775 snd_soc_dai_digital_mute(rtd->codec_dais[i], 0,
774 substream->stream); 776 substream->stream);
777 snd_soc_dai_digital_mute(cpu_dai, 0, substream->stream);
775 778
776out: 779out:
777 mutex_unlock(&rtd->pcm_mutex); 780 mutex_unlock(&rtd->pcm_mutex);
@@ -1522,13 +1525,36 @@ static void dpcm_set_fe_runtime(struct snd_pcm_substream *substream)
1522 dpcm_init_runtime_hw(runtime, &cpu_dai_drv->capture); 1525 dpcm_init_runtime_hw(runtime, &cpu_dai_drv->capture);
1523} 1526}
1524 1527
1528static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd);
1529
1530/* Set FE's runtime_update state; the state is protected via PCM stream lock
1531 * for avoiding the race with trigger callback.
1532 * If the state is unset and a trigger is pending while the previous operation,
1533 * process the pending trigger action here.
1534 */
1535static void dpcm_set_fe_update_state(struct snd_soc_pcm_runtime *fe,
1536 int stream, enum snd_soc_dpcm_update state)
1537{
1538 struct snd_pcm_substream *substream =
1539 snd_soc_dpcm_get_substream(fe, stream);
1540
1541 snd_pcm_stream_lock_irq(substream);
1542 if (state == SND_SOC_DPCM_UPDATE_NO && fe->dpcm[stream].trigger_pending) {
1543 dpcm_fe_dai_do_trigger(substream,
1544 fe->dpcm[stream].trigger_pending - 1);
1545 fe->dpcm[stream].trigger_pending = 0;
1546 }
1547 fe->dpcm[stream].runtime_update = state;
1548 snd_pcm_stream_unlock_irq(substream);
1549}
1550
1525static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream) 1551static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream)
1526{ 1552{
1527 struct snd_soc_pcm_runtime *fe = fe_substream->private_data; 1553 struct snd_soc_pcm_runtime *fe = fe_substream->private_data;
1528 struct snd_pcm_runtime *runtime = fe_substream->runtime; 1554 struct snd_pcm_runtime *runtime = fe_substream->runtime;
1529 int stream = fe_substream->stream, ret = 0; 1555 int stream = fe_substream->stream, ret = 0;
1530 1556
1531 fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; 1557 dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
1532 1558
1533 ret = dpcm_be_dai_startup(fe, fe_substream->stream); 1559 ret = dpcm_be_dai_startup(fe, fe_substream->stream);
1534 if (ret < 0) { 1560 if (ret < 0) {
@@ -1550,13 +1576,13 @@ static int dpcm_fe_dai_startup(struct snd_pcm_substream *fe_substream)
1550 dpcm_set_fe_runtime(fe_substream); 1576 dpcm_set_fe_runtime(fe_substream);
1551 snd_pcm_limit_hw_rates(runtime); 1577 snd_pcm_limit_hw_rates(runtime);
1552 1578
1553 fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; 1579 dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
1554 return 0; 1580 return 0;
1555 1581
1556unwind: 1582unwind:
1557 dpcm_be_dai_startup_unwind(fe, fe_substream->stream); 1583 dpcm_be_dai_startup_unwind(fe, fe_substream->stream);
1558be_err: 1584be_err:
1559 fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; 1585 dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
1560 return ret; 1586 return ret;
1561} 1587}
1562 1588
@@ -1603,7 +1629,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream)
1603 struct snd_soc_pcm_runtime *fe = substream->private_data; 1629 struct snd_soc_pcm_runtime *fe = substream->private_data;
1604 int stream = substream->stream; 1630 int stream = substream->stream;
1605 1631
1606 fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; 1632 dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
1607 1633
1608 /* shutdown the BEs */ 1634 /* shutdown the BEs */
1609 dpcm_be_dai_shutdown(fe, substream->stream); 1635 dpcm_be_dai_shutdown(fe, substream->stream);
@@ -1617,7 +1643,7 @@ static int dpcm_fe_dai_shutdown(struct snd_pcm_substream *substream)
1617 dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP); 1643 dpcm_dapm_stream_event(fe, stream, SND_SOC_DAPM_STREAM_STOP);
1618 1644
1619 fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE; 1645 fe->dpcm[stream].state = SND_SOC_DPCM_STATE_CLOSE;
1620 fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; 1646 dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
1621 return 0; 1647 return 0;
1622} 1648}
1623 1649
@@ -1641,6 +1667,10 @@ int dpcm_be_dai_hw_free(struct snd_soc_pcm_runtime *fe, int stream)
1641 if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream)) 1667 if (!snd_soc_dpcm_can_be_free_stop(fe, be, stream))
1642 continue; 1668 continue;
1643 1669
1670 /* do not free hw if this BE is used by other FE */
1671 if (be->dpcm[stream].users > 1)
1672 continue;
1673
1644 if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) && 1674 if ((be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_PARAMS) &&
1645 (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) && 1675 (be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
1646 (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) && 1676 (be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
@@ -1665,7 +1695,7 @@ static int dpcm_fe_dai_hw_free(struct snd_pcm_substream *substream)
1665 int err, stream = substream->stream; 1695 int err, stream = substream->stream;
1666 1696
1667 mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME); 1697 mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
1668 fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; 1698 dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
1669 1699
1670 dev_dbg(fe->dev, "ASoC: hw_free FE %s\n", fe->dai_link->name); 1700 dev_dbg(fe->dev, "ASoC: hw_free FE %s\n", fe->dai_link->name);
1671 1701
@@ -1680,7 +1710,7 @@ static int dpcm_fe_dai_hw_free(struct snd_pcm_substream *substream)
1680 err = dpcm_be_dai_hw_free(fe, stream); 1710 err = dpcm_be_dai_hw_free(fe, stream);
1681 1711
1682 fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE; 1712 fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_FREE;
1683 fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; 1713 dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
1684 1714
1685 mutex_unlock(&fe->card->mutex); 1715 mutex_unlock(&fe->card->mutex);
1686 return 0; 1716 return 0;
@@ -1773,7 +1803,7 @@ static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream,
1773 int ret, stream = substream->stream; 1803 int ret, stream = substream->stream;
1774 1804
1775 mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME); 1805 mutex_lock_nested(&fe->card->mutex, SND_SOC_CARD_CLASS_RUNTIME);
1776 fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; 1806 dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
1777 1807
1778 memcpy(&fe->dpcm[substream->stream].hw_params, params, 1808 memcpy(&fe->dpcm[substream->stream].hw_params, params,
1779 sizeof(struct snd_pcm_hw_params)); 1809 sizeof(struct snd_pcm_hw_params));
@@ -1796,7 +1826,7 @@ static int dpcm_fe_dai_hw_params(struct snd_pcm_substream *substream,
1796 fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_PARAMS; 1826 fe->dpcm[stream].state = SND_SOC_DPCM_STATE_HW_PARAMS;
1797 1827
1798out: 1828out:
1799 fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; 1829 dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
1800 mutex_unlock(&fe->card->mutex); 1830 mutex_unlock(&fe->card->mutex);
1801 return ret; 1831 return ret;
1802} 1832}
@@ -1910,7 +1940,7 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream,
1910} 1940}
1911EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger); 1941EXPORT_SYMBOL_GPL(dpcm_be_dai_trigger);
1912 1942
1913static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd) 1943static int dpcm_fe_dai_do_trigger(struct snd_pcm_substream *substream, int cmd)
1914{ 1944{
1915 struct snd_soc_pcm_runtime *fe = substream->private_data; 1945 struct snd_soc_pcm_runtime *fe = substream->private_data;
1916 int stream = substream->stream, ret; 1946 int stream = substream->stream, ret;
@@ -1984,6 +2014,23 @@ out:
1984 return ret; 2014 return ret;
1985} 2015}
1986 2016
2017static int dpcm_fe_dai_trigger(struct snd_pcm_substream *substream, int cmd)
2018{
2019 struct snd_soc_pcm_runtime *fe = substream->private_data;
2020 int stream = substream->stream;
2021
2022 /* if FE's runtime_update is already set, we're in race;
2023 * process this trigger later at exit
2024 */
2025 if (fe->dpcm[stream].runtime_update != SND_SOC_DPCM_UPDATE_NO) {
2026 fe->dpcm[stream].trigger_pending = cmd + 1;
2027 return 0; /* delayed, assuming it's successful */
2028 }
2029
2030 /* we're alone, let's trigger */
2031 return dpcm_fe_dai_do_trigger(substream, cmd);
2032}
2033
1987int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream) 2034int dpcm_be_dai_prepare(struct snd_soc_pcm_runtime *fe, int stream)
1988{ 2035{
1989 struct snd_soc_dpcm *dpcm; 2036 struct snd_soc_dpcm *dpcm;
@@ -2027,7 +2074,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
2027 2074
2028 dev_dbg(fe->dev, "ASoC: prepare FE %s\n", fe->dai_link->name); 2075 dev_dbg(fe->dev, "ASoC: prepare FE %s\n", fe->dai_link->name);
2029 2076
2030 fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_FE; 2077 dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_FE);
2031 2078
2032 /* there is no point preparing this FE if there are no BEs */ 2079 /* there is no point preparing this FE if there are no BEs */
2033 if (list_empty(&fe->dpcm[stream].be_clients)) { 2080 if (list_empty(&fe->dpcm[stream].be_clients)) {
@@ -2054,7 +2101,7 @@ static int dpcm_fe_dai_prepare(struct snd_pcm_substream *substream)
2054 fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE; 2101 fe->dpcm[stream].state = SND_SOC_DPCM_STATE_PREPARE;
2055 2102
2056out: 2103out:
2057 fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; 2104 dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
2058 mutex_unlock(&fe->card->mutex); 2105 mutex_unlock(&fe->card->mutex);
2059 2106
2060 return ret; 2107 return ret;
@@ -2201,11 +2248,11 @@ static int dpcm_run_new_update(struct snd_soc_pcm_runtime *fe, int stream)
2201{ 2248{
2202 int ret; 2249 int ret;
2203 2250
2204 fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_BE; 2251 dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE);
2205 ret = dpcm_run_update_startup(fe, stream); 2252 ret = dpcm_run_update_startup(fe, stream);
2206 if (ret < 0) 2253 if (ret < 0)
2207 dev_err(fe->dev, "ASoC: failed to startup some BEs\n"); 2254 dev_err(fe->dev, "ASoC: failed to startup some BEs\n");
2208 fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; 2255 dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
2209 2256
2210 return ret; 2257 return ret;
2211} 2258}
@@ -2214,11 +2261,11 @@ static int dpcm_run_old_update(struct snd_soc_pcm_runtime *fe, int stream)
2214{ 2261{
2215 int ret; 2262 int ret;
2216 2263
2217 fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_BE; 2264 dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_BE);
2218 ret = dpcm_run_update_shutdown(fe, stream); 2265 ret = dpcm_run_update_shutdown(fe, stream);
2219 if (ret < 0) 2266 if (ret < 0)
2220 dev_err(fe->dev, "ASoC: failed to shutdown some BEs\n"); 2267 dev_err(fe->dev, "ASoC: failed to shutdown some BEs\n");
2221 fe->dpcm[stream].runtime_update = SND_SOC_DPCM_UPDATE_NO; 2268 dpcm_set_fe_update_state(fe, stream, SND_SOC_DPCM_UPDATE_NO);
2222 2269
2223 return ret; 2270 return ret;
2224} 2271}
@@ -2248,7 +2295,13 @@ int soc_dpcm_runtime_update(struct snd_soc_card *card)
2248 fe->dai_link->name); 2295 fe->dai_link->name);
2249 2296
2250 /* skip if FE doesn't have playback capability */ 2297 /* skip if FE doesn't have playback capability */
2251 if (!fe->cpu_dai->driver->playback.channels_min) 2298 if (!fe->cpu_dai->driver->playback.channels_min
2299 || !fe->codec_dai->driver->playback.channels_min)
2300 goto capture;
2301
2302 /* skip if FE isn't currently playing */
2303 if (!fe->cpu_dai->playback_active
2304 || !fe->codec_dai->playback_active)
2252 goto capture; 2305 goto capture;
2253 2306
2254 paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_PLAYBACK, &list); 2307 paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_PLAYBACK, &list);
@@ -2278,7 +2331,13 @@ int soc_dpcm_runtime_update(struct snd_soc_card *card)
2278 dpcm_path_put(&list); 2331 dpcm_path_put(&list);
2279capture: 2332capture:
2280 /* skip if FE doesn't have capture capability */ 2333 /* skip if FE doesn't have capture capability */
2281 if (!fe->cpu_dai->driver->capture.channels_min) 2334 if (!fe->cpu_dai->driver->capture.channels_min
2335 || !fe->codec_dai->driver->capture.channels_min)
2336 continue;
2337
2338 /* skip if FE isn't currently capturing */
2339 if (!fe->cpu_dai->capture_active
2340 || !fe->codec_dai->capture_active)
2282 continue; 2341 continue;
2283 2342
2284 paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_CAPTURE, &list); 2343 paths = dpcm_path_get(fe, SNDRV_PCM_STREAM_CAPTURE, &list);
diff --git a/sound/soc/tegra/tegra20_ac97.c b/sound/soc/tegra/tegra20_ac97.c
index 3b0fa12dbff7..29a9957d335a 100644
--- a/sound/soc/tegra/tegra20_ac97.c
+++ b/sound/soc/tegra/tegra20_ac97.c
@@ -228,7 +228,7 @@ static int tegra20_ac97_probe(struct snd_soc_dai *dai)
228 228
229static struct snd_soc_dai_driver tegra20_ac97_dai = { 229static struct snd_soc_dai_driver tegra20_ac97_dai = {
230 .name = "tegra-ac97-pcm", 230 .name = "tegra-ac97-pcm",
231 .ac97_control = 1, 231 .bus_control = true,
232 .probe = tegra20_ac97_probe, 232 .probe = tegra20_ac97_probe,
233 .playback = { 233 .playback = {
234 .stream_name = "PCM Playback", 234 .stream_name = "PCM Playback",
diff --git a/sound/soc/txx9/txx9aclc-ac97.c b/sound/soc/txx9/txx9aclc-ac97.c
index 9edd68db9f48..f7135cdaa2ca 100644
--- a/sound/soc/txx9/txx9aclc-ac97.c
+++ b/sound/soc/txx9/txx9aclc-ac97.c
@@ -152,7 +152,7 @@ static int txx9aclc_ac97_remove(struct snd_soc_dai *dai)
152} 152}
153 153
154static struct snd_soc_dai_driver txx9aclc_ac97_dai = { 154static struct snd_soc_dai_driver txx9aclc_ac97_dai = {
155 .ac97_control = 1, 155 .bus_control = true,
156 .probe = txx9aclc_ac97_probe, 156 .probe = txx9aclc_ac97_probe,
157 .remove = txx9aclc_ac97_remove, 157 .remove = txx9aclc_ac97_remove,
158 .playback = { 158 .playback = {
diff --git a/sound/soc/txx9/txx9aclc.c b/sound/soc/txx9/txx9aclc.c
index cd71fd889d8b..00b7e2d02690 100644
--- a/sound/soc/txx9/txx9aclc.c
+++ b/sound/soc/txx9/txx9aclc.c
@@ -292,7 +292,7 @@ static int txx9aclc_pcm_new(struct snd_soc_pcm_runtime *rtd)
292 struct snd_card *card = rtd->card->snd_card; 292 struct snd_card *card = rtd->card->snd_card;
293 struct snd_soc_dai *dai = rtd->cpu_dai; 293 struct snd_soc_dai *dai = rtd->cpu_dai;
294 struct snd_pcm *pcm = rtd->pcm; 294 struct snd_pcm *pcm = rtd->pcm;
295 struct platform_device *pdev = to_platform_device(dai->platform->dev); 295 struct platform_device *pdev = to_platform_device(rtd->platform->dev);
296 struct txx9aclc_soc_device *dev; 296 struct txx9aclc_soc_device *dev;
297 struct resource *r; 297 struct resource *r;
298 int i; 298 int i;
diff --git a/sound/usb/card.c b/sound/usb/card.c
index 7ecd0e8a5c51..f61ebb17cc64 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -591,18 +591,19 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
591{ 591{
592 struct snd_card *card; 592 struct snd_card *card;
593 struct list_head *p; 593 struct list_head *p;
594 bool was_shutdown;
594 595
595 if (chip == (void *)-1L) 596 if (chip == (void *)-1L)
596 return; 597 return;
597 598
598 card = chip->card; 599 card = chip->card;
599 down_write(&chip->shutdown_rwsem); 600 down_write(&chip->shutdown_rwsem);
601 was_shutdown = chip->shutdown;
600 chip->shutdown = 1; 602 chip->shutdown = 1;
601 up_write(&chip->shutdown_rwsem); 603 up_write(&chip->shutdown_rwsem);
602 604
603 mutex_lock(&register_mutex); 605 mutex_lock(&register_mutex);
604 chip->num_interfaces--; 606 if (!was_shutdown) {
605 if (chip->num_interfaces <= 0) {
606 struct snd_usb_endpoint *ep; 607 struct snd_usb_endpoint *ep;
607 608
608 snd_card_disconnect(card); 609 snd_card_disconnect(card);
@@ -622,6 +623,10 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
622 list_for_each(p, &chip->mixer_list) { 623 list_for_each(p, &chip->mixer_list) {
623 snd_usb_mixer_disconnect(p); 624 snd_usb_mixer_disconnect(p);
624 } 625 }
626 }
627
628 chip->num_interfaces--;
629 if (chip->num_interfaces <= 0) {
625 usb_chip[chip->index] = NULL; 630 usb_chip[chip->index] = NULL;
626 mutex_unlock(&register_mutex); 631 mutex_unlock(&register_mutex);
627 snd_card_free_when_closed(card); 632 snd_card_free_when_closed(card);
diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c
index 2e4a9dbc51fa..6e354d326858 100644
--- a/sound/usb/mixer.c
+++ b/sound/usb/mixer.c
@@ -2033,10 +2033,11 @@ static int parse_audio_selector_unit(struct mixer_build *state, int unitid,
2033 cval->res = 1; 2033 cval->res = 1;
2034 cval->initialized = 1; 2034 cval->initialized = 1;
2035 2035
2036 if (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR) 2036 if (state->mixer->protocol == UAC_VERSION_1)
2037 cval->control = UAC2_CX_CLOCK_SELECTOR;
2038 else
2039 cval->control = 0; 2037 cval->control = 0;
2038 else /* UAC_VERSION_2 */
2039 cval->control = (desc->bDescriptorSubtype == UAC2_CLOCK_SELECTOR) ?
2040 UAC2_CX_CLOCK_SELECTOR : UAC2_SU_SELECTOR;
2040 2041
2041 namelist = kmalloc(sizeof(char *) * desc->bNrInPins, GFP_KERNEL); 2042 namelist = kmalloc(sizeof(char *) * desc->bNrInPins, GFP_KERNEL);
2042 if (!namelist) { 2043 if (!namelist) {
diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c
index f119a41ed9a9..8c9bf4b7aaf0 100644
--- a/sound/usb/mixer_quirks.c
+++ b/sound/usb/mixer_quirks.c
@@ -593,10 +593,10 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol,
593 if (mixer->chip->shutdown) 593 if (mixer->chip->shutdown)
594 ret = -ENODEV; 594 ret = -ENODEV;
595 else 595 else
596 ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest, 596 ret = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest,
597 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 597 USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
598 0, wIndex, 598 0, wIndex,
599 &tmp, sizeof(tmp), 1000); 599 &tmp, sizeof(tmp));
600 up_read(&mixer->chip->shutdown_rwsem); 600 up_read(&mixer->chip->shutdown_rwsem);
601 601
602 if (ret < 0) { 602 if (ret < 0) {
@@ -885,6 +885,11 @@ static int snd_ftu_eff_switch_put(struct snd_kcontrol *kctl,
885 return changed; 885 return changed;
886} 886}
887 887
888static void kctl_private_value_free(struct snd_kcontrol *kctl)
889{
890 kfree((void *)kctl->private_value);
891}
892
888static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer, 893static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer,
889 int validx, int bUnitID) 894 int validx, int bUnitID)
890{ 895{
@@ -919,6 +924,7 @@ static int snd_ftu_create_effect_switch(struct usb_mixer_interface *mixer,
919 return -ENOMEM; 924 return -ENOMEM;
920 } 925 }
921 926
927 kctl->private_free = kctl_private_value_free;
922 err = snd_ctl_add(mixer->chip->card, kctl); 928 err = snd_ctl_add(mixer->chip->card, kctl);
923 if (err < 0) 929 if (err < 0)
924 return err; 930 return err;
diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h
index 223c47b33ba3..c657752a420c 100644
--- a/sound/usb/quirks-table.h
+++ b/sound/usb/quirks-table.h
@@ -385,6 +385,36 @@ YAMAHA_DEVICE(0x105d, NULL),
385 } 385 }
386}, 386},
387{ 387{
388 USB_DEVICE(0x0499, 0x1509),
389 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
390 /* .vendor_name = "Yamaha", */
391 /* .product_name = "Steinberg UR22", */
392 .ifnum = QUIRK_ANY_INTERFACE,
393 .type = QUIRK_COMPOSITE,
394 .data = (const struct snd_usb_audio_quirk[]) {
395 {
396 .ifnum = 1,
397 .type = QUIRK_AUDIO_STANDARD_INTERFACE
398 },
399 {
400 .ifnum = 2,
401 .type = QUIRK_AUDIO_STANDARD_INTERFACE
402 },
403 {
404 .ifnum = 3,
405 .type = QUIRK_MIDI_YAMAHA
406 },
407 {
408 .ifnum = 4,
409 .type = QUIRK_IGNORE_INTERFACE
410 },
411 {
412 .ifnum = -1
413 }
414 }
415 }
416},
417{
388 USB_DEVICE(0x0499, 0x150a), 418 USB_DEVICE(0x0499, 0x150a),
389 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { 419 .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) {
390 /* .vendor_name = "Yamaha", */ 420 /* .vendor_name = "Yamaha", */
diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c
index d2aa45a8d895..60dfe0d28771 100644
--- a/sound/usb/quirks.c
+++ b/sound/usb/quirks.c
@@ -1146,6 +1146,20 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe,
1146 if ((le16_to_cpu(dev->descriptor.idVendor) == 0x23ba) && 1146 if ((le16_to_cpu(dev->descriptor.idVendor) == 0x23ba) &&
1147 (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) 1147 (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS)
1148 mdelay(20); 1148 mdelay(20);
1149
1150 /* Marantz/Denon devices with USB DAC functionality need a delay
1151 * after each class compliant request
1152 */
1153 if ((le16_to_cpu(dev->descriptor.idVendor) == 0x154e) &&
1154 (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) {
1155
1156 switch (le16_to_cpu(dev->descriptor.idProduct)) {
1157 case 0x3005: /* Marantz HD-DAC1 */
1158 case 0x3006: /* Marantz SA-14S1 */
1159 mdelay(20);
1160 break;
1161 }
1162 }
1149} 1163}
1150 1164
1151/* 1165/*
@@ -1179,12 +1193,12 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
1179 /* iFi Audio micro/nano iDSD */ 1193 /* iFi Audio micro/nano iDSD */
1180 case USB_ID(0x20b1, 0x3008): 1194 case USB_ID(0x20b1, 0x3008):
1181 if (fp->altsetting == 2) 1195 if (fp->altsetting == 2)
1182 return SNDRV_PCM_FMTBIT_DSD_U32_LE; 1196 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
1183 break; 1197 break;
1184 /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */ 1198 /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
1185 case USB_ID(0x20b1, 0x2009): 1199 case USB_ID(0x20b1, 0x2009):
1186 if (fp->altsetting == 3) 1200 if (fp->altsetting == 3)
1187 return SNDRV_PCM_FMTBIT_DSD_U32_LE; 1201 return SNDRV_PCM_FMTBIT_DSD_U32_BE;
1188 break; 1202 break;
1189 default: 1203 default:
1190 break; 1204 break;
diff --git a/tools/perf/builtin-diff.c b/tools/perf/builtin-diff.c
index 8c5c11ca8c53..25114c9a6801 100644
--- a/tools/perf/builtin-diff.c
+++ b/tools/perf/builtin-diff.c
@@ -1142,6 +1142,11 @@ static int data_init(int argc, const char **argv)
1142 1142
1143int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused) 1143int cmd_diff(int argc, const char **argv, const char *prefix __maybe_unused)
1144{ 1144{
1145 int ret = hists__init();
1146
1147 if (ret < 0)
1148 return ret;
1149
1145 perf_config(perf_default_config, NULL); 1150 perf_config(perf_default_config, NULL);
1146 1151
1147 argc = parse_options(argc, argv, options, diff_usage, 0); 1152 argc = parse_options(argc, argv, options, diff_usage, 0);
diff --git a/tools/perf/builtin-probe.c b/tools/perf/builtin-probe.c
index 04412b4770a2..7af26acf06d9 100644
--- a/tools/perf/builtin-probe.c
+++ b/tools/perf/builtin-probe.c
@@ -375,7 +375,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
375 OPT_CALLBACK('x', "exec", NULL, "executable|path", 375 OPT_CALLBACK('x', "exec", NULL, "executable|path",
376 "target executable name or path", opt_set_target), 376 "target executable name or path", opt_set_target),
377 OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle, 377 OPT_BOOLEAN(0, "demangle", &symbol_conf.demangle,
378 "Disable symbol demangling"), 378 "Enable symbol demangling"),
379 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel, 379 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
380 "Enable kernel symbol demangling"), 380 "Enable kernel symbol demangling"),
381 OPT_END() 381 OPT_END()
diff --git a/tools/perf/perf-sys.h b/tools/perf/perf-sys.h
index 937e4324ad94..a3b13d7dc1d4 100644
--- a/tools/perf/perf-sys.h
+++ b/tools/perf/perf-sys.h
@@ -13,7 +13,7 @@
13#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") 13#define wmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
14#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory") 14#define rmb() asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
15#define cpu_relax() asm volatile("rep; nop" ::: "memory"); 15#define cpu_relax() asm volatile("rep; nop" ::: "memory");
16#define CPUINFO_PROC "model name" 16#define CPUINFO_PROC {"model name"}
17#ifndef __NR_perf_event_open 17#ifndef __NR_perf_event_open
18# define __NR_perf_event_open 336 18# define __NR_perf_event_open 336
19#endif 19#endif
@@ -30,7 +30,7 @@
30#define wmb() asm volatile("sfence" ::: "memory") 30#define wmb() asm volatile("sfence" ::: "memory")
31#define rmb() asm volatile("lfence" ::: "memory") 31#define rmb() asm volatile("lfence" ::: "memory")
32#define cpu_relax() asm volatile("rep; nop" ::: "memory"); 32#define cpu_relax() asm volatile("rep; nop" ::: "memory");
33#define CPUINFO_PROC "model name" 33#define CPUINFO_PROC {"model name"}
34#ifndef __NR_perf_event_open 34#ifndef __NR_perf_event_open
35# define __NR_perf_event_open 298 35# define __NR_perf_event_open 298
36#endif 36#endif
@@ -47,14 +47,14 @@
47#define mb() asm volatile ("sync" ::: "memory") 47#define mb() asm volatile ("sync" ::: "memory")
48#define wmb() asm volatile ("sync" ::: "memory") 48#define wmb() asm volatile ("sync" ::: "memory")
49#define rmb() asm volatile ("sync" ::: "memory") 49#define rmb() asm volatile ("sync" ::: "memory")
50#define CPUINFO_PROC "cpu" 50#define CPUINFO_PROC {"cpu"}
51#endif 51#endif
52 52
53#ifdef __s390__ 53#ifdef __s390__
54#define mb() asm volatile("bcr 15,0" ::: "memory") 54#define mb() asm volatile("bcr 15,0" ::: "memory")
55#define wmb() asm volatile("bcr 15,0" ::: "memory") 55#define wmb() asm volatile("bcr 15,0" ::: "memory")
56#define rmb() asm volatile("bcr 15,0" ::: "memory") 56#define rmb() asm volatile("bcr 15,0" ::: "memory")
57#define CPUINFO_PROC "vendor_id" 57#define CPUINFO_PROC {"vendor_id"}
58#endif 58#endif
59 59
60#ifdef __sh__ 60#ifdef __sh__
@@ -67,14 +67,14 @@
67# define wmb() asm volatile("" ::: "memory") 67# define wmb() asm volatile("" ::: "memory")
68# define rmb() asm volatile("" ::: "memory") 68# define rmb() asm volatile("" ::: "memory")
69#endif 69#endif
70#define CPUINFO_PROC "cpu type" 70#define CPUINFO_PROC {"cpu type"}
71#endif 71#endif
72 72
73#ifdef __hppa__ 73#ifdef __hppa__
74#define mb() asm volatile("" ::: "memory") 74#define mb() asm volatile("" ::: "memory")
75#define wmb() asm volatile("" ::: "memory") 75#define wmb() asm volatile("" ::: "memory")
76#define rmb() asm volatile("" ::: "memory") 76#define rmb() asm volatile("" ::: "memory")
77#define CPUINFO_PROC "cpu" 77#define CPUINFO_PROC {"cpu"}
78#endif 78#endif
79 79
80#ifdef __sparc__ 80#ifdef __sparc__
@@ -87,14 +87,14 @@
87#endif 87#endif
88#define wmb() asm volatile("":::"memory") 88#define wmb() asm volatile("":::"memory")
89#define rmb() asm volatile("":::"memory") 89#define rmb() asm volatile("":::"memory")
90#define CPUINFO_PROC "cpu" 90#define CPUINFO_PROC {"cpu"}
91#endif 91#endif
92 92
93#ifdef __alpha__ 93#ifdef __alpha__
94#define mb() asm volatile("mb" ::: "memory") 94#define mb() asm volatile("mb" ::: "memory")
95#define wmb() asm volatile("wmb" ::: "memory") 95#define wmb() asm volatile("wmb" ::: "memory")
96#define rmb() asm volatile("mb" ::: "memory") 96#define rmb() asm volatile("mb" ::: "memory")
97#define CPUINFO_PROC "cpu model" 97#define CPUINFO_PROC {"cpu model"}
98#endif 98#endif
99 99
100#ifdef __ia64__ 100#ifdef __ia64__
@@ -102,7 +102,7 @@
102#define wmb() asm volatile ("mf" ::: "memory") 102#define wmb() asm volatile ("mf" ::: "memory")
103#define rmb() asm volatile ("mf" ::: "memory") 103#define rmb() asm volatile ("mf" ::: "memory")
104#define cpu_relax() asm volatile ("hint @pause" ::: "memory") 104#define cpu_relax() asm volatile ("hint @pause" ::: "memory")
105#define CPUINFO_PROC "model name" 105#define CPUINFO_PROC {"model name"}
106#endif 106#endif
107 107
108#ifdef __arm__ 108#ifdef __arm__
@@ -113,7 +113,7 @@
113#define mb() ((void(*)(void))0xffff0fa0)() 113#define mb() ((void(*)(void))0xffff0fa0)()
114#define wmb() ((void(*)(void))0xffff0fa0)() 114#define wmb() ((void(*)(void))0xffff0fa0)()
115#define rmb() ((void(*)(void))0xffff0fa0)() 115#define rmb() ((void(*)(void))0xffff0fa0)()
116#define CPUINFO_PROC "Processor" 116#define CPUINFO_PROC {"model name", "Processor"}
117#endif 117#endif
118 118
119#ifdef __aarch64__ 119#ifdef __aarch64__
@@ -133,28 +133,28 @@
133 : "memory") 133 : "memory")
134#define wmb() mb() 134#define wmb() mb()
135#define rmb() mb() 135#define rmb() mb()
136#define CPUINFO_PROC "cpu model" 136#define CPUINFO_PROC {"cpu model"}
137#endif 137#endif
138 138
139#ifdef __arc__ 139#ifdef __arc__
140#define mb() asm volatile("" ::: "memory") 140#define mb() asm volatile("" ::: "memory")
141#define wmb() asm volatile("" ::: "memory") 141#define wmb() asm volatile("" ::: "memory")
142#define rmb() asm volatile("" ::: "memory") 142#define rmb() asm volatile("" ::: "memory")
143#define CPUINFO_PROC "Processor" 143#define CPUINFO_PROC {"Processor"}
144#endif 144#endif
145 145
146#ifdef __metag__ 146#ifdef __metag__
147#define mb() asm volatile("" ::: "memory") 147#define mb() asm volatile("" ::: "memory")
148#define wmb() asm volatile("" ::: "memory") 148#define wmb() asm volatile("" ::: "memory")
149#define rmb() asm volatile("" ::: "memory") 149#define rmb() asm volatile("" ::: "memory")
150#define CPUINFO_PROC "CPU" 150#define CPUINFO_PROC {"CPU"}
151#endif 151#endif
152 152
153#ifdef __xtensa__ 153#ifdef __xtensa__
154#define mb() asm volatile("memw" ::: "memory") 154#define mb() asm volatile("memw" ::: "memory")
155#define wmb() asm volatile("memw" ::: "memory") 155#define wmb() asm volatile("memw" ::: "memory")
156#define rmb() asm volatile("" ::: "memory") 156#define rmb() asm volatile("" ::: "memory")
157#define CPUINFO_PROC "core ID" 157#define CPUINFO_PROC {"core ID"}
158#endif 158#endif
159 159
160#ifdef __tile__ 160#ifdef __tile__
@@ -162,7 +162,7 @@
162#define wmb() asm volatile ("mf" ::: "memory") 162#define wmb() asm volatile ("mf" ::: "memory")
163#define rmb() asm volatile ("mf" ::: "memory") 163#define rmb() asm volatile ("mf" ::: "memory")
164#define cpu_relax() asm volatile ("mfspr zero, PASS" ::: "memory") 164#define cpu_relax() asm volatile ("mfspr zero, PASS" ::: "memory")
165#define CPUINFO_PROC "model name" 165#define CPUINFO_PROC {"model name"}
166#endif 166#endif
167 167
168#define barrier() asm volatile ("" ::: "memory") 168#define barrier() asm volatile ("" ::: "memory")
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index ce0de00399da..26f5b2fe5dc8 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -579,16 +579,12 @@ static int write_version(int fd, struct perf_header *h __maybe_unused,
579 return do_write_string(fd, perf_version_string); 579 return do_write_string(fd, perf_version_string);
580} 580}
581 581
582static int write_cpudesc(int fd, struct perf_header *h __maybe_unused, 582static int __write_cpudesc(int fd, const char *cpuinfo_proc)
583 struct perf_evlist *evlist __maybe_unused)
584{ 583{
585#ifndef CPUINFO_PROC
586#define CPUINFO_PROC NULL
587#endif
588 FILE *file; 584 FILE *file;
589 char *buf = NULL; 585 char *buf = NULL;
590 char *s, *p; 586 char *s, *p;
591 const char *search = CPUINFO_PROC; 587 const char *search = cpuinfo_proc;
592 size_t len = 0; 588 size_t len = 0;
593 int ret = -1; 589 int ret = -1;
594 590
@@ -638,6 +634,25 @@ done:
638 return ret; 634 return ret;
639} 635}
640 636
637static int write_cpudesc(int fd, struct perf_header *h __maybe_unused,
638 struct perf_evlist *evlist __maybe_unused)
639{
640#ifndef CPUINFO_PROC
641#define CPUINFO_PROC {"model name", }
642#endif
643 const char *cpuinfo_procs[] = CPUINFO_PROC;
644 unsigned int i;
645
646 for (i = 0; i < ARRAY_SIZE(cpuinfo_procs); i++) {
647 int ret;
648 ret = __write_cpudesc(fd, cpuinfo_procs[i]);
649 if (ret >= 0)
650 return ret;
651 }
652 return -1;
653}
654
655
641static int write_nrcpus(int fd, struct perf_header *h __maybe_unused, 656static int write_nrcpus(int fd, struct perf_header *h __maybe_unused,
642 struct perf_evlist *evlist __maybe_unused) 657 struct perf_evlist *evlist __maybe_unused)
643{ 658{
diff --git a/tools/perf/util/sort.c b/tools/perf/util/sort.c
index 4906cd81cb56..9402885a77f3 100644
--- a/tools/perf/util/sort.c
+++ b/tools/perf/util/sort.c
@@ -373,6 +373,9 @@ struct sort_entry sort_cpu = {
373static int64_t 373static int64_t
374sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right) 374sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
375{ 375{
376 if (!left->branch_info || !right->branch_info)
377 return cmp_null(left->branch_info, right->branch_info);
378
376 return _sort__dso_cmp(left->branch_info->from.map, 379 return _sort__dso_cmp(left->branch_info->from.map,
377 right->branch_info->from.map); 380 right->branch_info->from.map);
378} 381}
@@ -380,13 +383,19 @@ sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
380static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf, 383static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
381 size_t size, unsigned int width) 384 size_t size, unsigned int width)
382{ 385{
383 return _hist_entry__dso_snprintf(he->branch_info->from.map, 386 if (he->branch_info)
384 bf, size, width); 387 return _hist_entry__dso_snprintf(he->branch_info->from.map,
388 bf, size, width);
389 else
390 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
385} 391}
386 392
387static int64_t 393static int64_t
388sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right) 394sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
389{ 395{
396 if (!left->branch_info || !right->branch_info)
397 return cmp_null(left->branch_info, right->branch_info);
398
390 return _sort__dso_cmp(left->branch_info->to.map, 399 return _sort__dso_cmp(left->branch_info->to.map,
391 right->branch_info->to.map); 400 right->branch_info->to.map);
392} 401}
@@ -394,8 +403,11 @@ sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
394static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf, 403static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
395 size_t size, unsigned int width) 404 size_t size, unsigned int width)
396{ 405{
397 return _hist_entry__dso_snprintf(he->branch_info->to.map, 406 if (he->branch_info)
398 bf, size, width); 407 return _hist_entry__dso_snprintf(he->branch_info->to.map,
408 bf, size, width);
409 else
410 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
399} 411}
400 412
401static int64_t 413static int64_t
@@ -404,6 +416,12 @@ sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
404 struct addr_map_symbol *from_l = &left->branch_info->from; 416 struct addr_map_symbol *from_l = &left->branch_info->from;
405 struct addr_map_symbol *from_r = &right->branch_info->from; 417 struct addr_map_symbol *from_r = &right->branch_info->from;
406 418
419 if (!left->branch_info || !right->branch_info)
420 return cmp_null(left->branch_info, right->branch_info);
421
422 from_l = &left->branch_info->from;
423 from_r = &right->branch_info->from;
424
407 if (!from_l->sym && !from_r->sym) 425 if (!from_l->sym && !from_r->sym)
408 return _sort__addr_cmp(from_l->addr, from_r->addr); 426 return _sort__addr_cmp(from_l->addr, from_r->addr);
409 427
@@ -413,8 +431,13 @@ sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
413static int64_t 431static int64_t
414sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right) 432sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
415{ 433{
416 struct addr_map_symbol *to_l = &left->branch_info->to; 434 struct addr_map_symbol *to_l, *to_r;
417 struct addr_map_symbol *to_r = &right->branch_info->to; 435
436 if (!left->branch_info || !right->branch_info)
437 return cmp_null(left->branch_info, right->branch_info);
438
439 to_l = &left->branch_info->to;
440 to_r = &right->branch_info->to;
418 441
419 if (!to_l->sym && !to_r->sym) 442 if (!to_l->sym && !to_r->sym)
420 return _sort__addr_cmp(to_l->addr, to_r->addr); 443 return _sort__addr_cmp(to_l->addr, to_r->addr);
@@ -425,19 +448,27 @@ sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
425static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf, 448static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
426 size_t size, unsigned int width) 449 size_t size, unsigned int width)
427{ 450{
428 struct addr_map_symbol *from = &he->branch_info->from; 451 if (he->branch_info) {
429 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr, 452 struct addr_map_symbol *from = &he->branch_info->from;
430 he->level, bf, size, width);
431 453
454 return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
455 he->level, bf, size, width);
456 }
457
458 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
432} 459}
433 460
434static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf, 461static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
435 size_t size, unsigned int width) 462 size_t size, unsigned int width)
436{ 463{
437 struct addr_map_symbol *to = &he->branch_info->to; 464 if (he->branch_info) {
438 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr, 465 struct addr_map_symbol *to = &he->branch_info->to;
439 he->level, bf, size, width);
440 466
467 return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
468 he->level, bf, size, width);
469 }
470
471 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
441} 472}
442 473
443struct sort_entry sort_dso_from = { 474struct sort_entry sort_dso_from = {
@@ -471,11 +502,13 @@ struct sort_entry sort_sym_to = {
471static int64_t 502static int64_t
472sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right) 503sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
473{ 504{
474 const unsigned char mp = left->branch_info->flags.mispred != 505 unsigned char mp, p;
475 right->branch_info->flags.mispred; 506
476 const unsigned char p = left->branch_info->flags.predicted != 507 if (!left->branch_info || !right->branch_info)
477 right->branch_info->flags.predicted; 508 return cmp_null(left->branch_info, right->branch_info);
478 509
510 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
511 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
479 return mp || p; 512 return mp || p;
480} 513}
481 514
@@ -483,10 +516,12 @@ static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
483 size_t size, unsigned int width){ 516 size_t size, unsigned int width){
484 static const char *out = "N/A"; 517 static const char *out = "N/A";
485 518
486 if (he->branch_info->flags.predicted) 519 if (he->branch_info) {
487 out = "N"; 520 if (he->branch_info->flags.predicted)
488 else if (he->branch_info->flags.mispred) 521 out = "N";
489 out = "Y"; 522 else if (he->branch_info->flags.mispred)
523 out = "Y";
524 }
490 525
491 return repsep_snprintf(bf, size, "%-*.*s", width, width, out); 526 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
492} 527}
@@ -989,6 +1024,9 @@ struct sort_entry sort_mem_dcacheline = {
989static int64_t 1024static int64_t
990sort__abort_cmp(struct hist_entry *left, struct hist_entry *right) 1025sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
991{ 1026{
1027 if (!left->branch_info || !right->branch_info)
1028 return cmp_null(left->branch_info, right->branch_info);
1029
992 return left->branch_info->flags.abort != 1030 return left->branch_info->flags.abort !=
993 right->branch_info->flags.abort; 1031 right->branch_info->flags.abort;
994} 1032}
@@ -996,10 +1034,15 @@ sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
996static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf, 1034static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
997 size_t size, unsigned int width) 1035 size_t size, unsigned int width)
998{ 1036{
999 static const char *out = "."; 1037 static const char *out = "N/A";
1038
1039 if (he->branch_info) {
1040 if (he->branch_info->flags.abort)
1041 out = "A";
1042 else
1043 out = ".";
1044 }
1000 1045
1001 if (he->branch_info->flags.abort)
1002 out = "A";
1003 return repsep_snprintf(bf, size, "%-*s", width, out); 1046 return repsep_snprintf(bf, size, "%-*s", width, out);
1004} 1047}
1005 1048
@@ -1013,6 +1056,9 @@ struct sort_entry sort_abort = {
1013static int64_t 1056static int64_t
1014sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right) 1057sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1015{ 1058{
1059 if (!left->branch_info || !right->branch_info)
1060 return cmp_null(left->branch_info, right->branch_info);
1061
1016 return left->branch_info->flags.in_tx != 1062 return left->branch_info->flags.in_tx !=
1017 right->branch_info->flags.in_tx; 1063 right->branch_info->flags.in_tx;
1018} 1064}
@@ -1020,10 +1066,14 @@ sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1020static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf, 1066static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1021 size_t size, unsigned int width) 1067 size_t size, unsigned int width)
1022{ 1068{
1023 static const char *out = "."; 1069 static const char *out = "N/A";
1024 1070
1025 if (he->branch_info->flags.in_tx) 1071 if (he->branch_info) {
1026 out = "T"; 1072 if (he->branch_info->flags.in_tx)
1073 out = "T";
1074 else
1075 out = ".";
1076 }
1027 1077
1028 return repsep_snprintf(bf, size, "%-*s", width, out); 1078 return repsep_snprintf(bf, size, "%-*s", width, out);
1029} 1079}
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 2b7b2d91c016..c41411726c7a 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -117,6 +117,9 @@ int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
117 if (!new) 117 if (!new)
118 return -ENOMEM; 118 return -ENOMEM;
119 list_add(&new->list, &thread->comm_list); 119 list_add(&new->list, &thread->comm_list);
120
121 if (exec)
122 unwind__flush_access(thread);
120 } 123 }
121 124
122 thread->comm_set = true; 125 thread->comm_set = true;
diff --git a/tools/perf/util/unwind-libunwind.c b/tools/perf/util/unwind-libunwind.c
index e060386165c5..4d45c0dfe343 100644
--- a/tools/perf/util/unwind-libunwind.c
+++ b/tools/perf/util/unwind-libunwind.c
@@ -539,11 +539,23 @@ int unwind__prepare_access(struct thread *thread)
539 return -ENOMEM; 539 return -ENOMEM;
540 } 540 }
541 541
542 unw_set_caching_policy(addr_space, UNW_CACHE_GLOBAL);
542 thread__set_priv(thread, addr_space); 543 thread__set_priv(thread, addr_space);
543 544
544 return 0; 545 return 0;
545} 546}
546 547
548void unwind__flush_access(struct thread *thread)
549{
550 unw_addr_space_t addr_space;
551
552 if (callchain_param.record_mode != CALLCHAIN_DWARF)
553 return;
554
555 addr_space = thread__priv(thread);
556 unw_flush_cache(addr_space, 0, 0);
557}
558
547void unwind__finish_access(struct thread *thread) 559void unwind__finish_access(struct thread *thread)
548{ 560{
549 unw_addr_space_t addr_space; 561 unw_addr_space_t addr_space;
diff --git a/tools/perf/util/unwind.h b/tools/perf/util/unwind.h
index c17c4855bdbc..f50b737235eb 100644
--- a/tools/perf/util/unwind.h
+++ b/tools/perf/util/unwind.h
@@ -23,6 +23,7 @@ int unwind__get_entries(unwind_entry_cb_t cb, void *arg,
23#ifdef HAVE_LIBUNWIND_SUPPORT 23#ifdef HAVE_LIBUNWIND_SUPPORT
24int libunwind__arch_reg_id(int regnum); 24int libunwind__arch_reg_id(int regnum);
25int unwind__prepare_access(struct thread *thread); 25int unwind__prepare_access(struct thread *thread);
26void unwind__flush_access(struct thread *thread);
26void unwind__finish_access(struct thread *thread); 27void unwind__finish_access(struct thread *thread);
27#else 28#else
28static inline int unwind__prepare_access(struct thread *thread __maybe_unused) 29static inline int unwind__prepare_access(struct thread *thread __maybe_unused)
@@ -30,6 +31,7 @@ static inline int unwind__prepare_access(struct thread *thread __maybe_unused)
30 return 0; 31 return 0;
31} 32}
32 33
34static inline void unwind__flush_access(struct thread *thread __maybe_unused) {}
33static inline void unwind__finish_access(struct thread *thread __maybe_unused) {} 35static inline void unwind__finish_access(struct thread *thread __maybe_unused) {}
34#endif 36#endif
35#else 37#else
@@ -49,6 +51,7 @@ static inline int unwind__prepare_access(struct thread *thread __maybe_unused)
49 return 0; 51 return 0;
50} 52}
51 53
54static inline void unwind__flush_access(struct thread *thread __maybe_unused) {}
52static inline void unwind__finish_access(struct thread *thread __maybe_unused) {} 55static inline void unwind__finish_access(struct thread *thread __maybe_unused) {}
53#endif /* HAVE_DWARF_UNWIND_SUPPORT */ 56#endif /* HAVE_DWARF_UNWIND_SUPPORT */
54#endif /* __UNWIND_H */ 57#endif /* __UNWIND_H */
diff --git a/tools/power/acpi/os_specific/service_layers/osunixxf.c b/tools/power/acpi/os_specific/service_layers/osunixxf.c
index 60b58cd18410..7ccb073f8316 100644
--- a/tools/power/acpi/os_specific/service_layers/osunixxf.c
+++ b/tools/power/acpi/os_specific/service_layers/osunixxf.c
@@ -122,6 +122,14 @@ static void os_enter_line_edit_mode(void)
122{ 122{
123 struct termios local_term_attributes; 123 struct termios local_term_attributes;
124 124
125 term_attributes_were_set = 0;
126
127 /* STDIN must be a terminal */
128
129 if (!isatty(STDIN_FILENO)) {
130 return;
131 }
132
125 /* Get and keep the original attributes */ 133 /* Get and keep the original attributes */
126 134
127 if (tcgetattr(STDIN_FILENO, &original_term_attributes)) { 135 if (tcgetattr(STDIN_FILENO, &original_term_attributes)) {
diff --git a/tools/power/acpi/tools/acpidump/apdump.c b/tools/power/acpi/tools/acpidump/apdump.c
index 53cee781e24e..24d32968802d 100644
--- a/tools/power/acpi/tools/acpidump/apdump.c
+++ b/tools/power/acpi/tools/acpidump/apdump.c
@@ -146,7 +146,7 @@ u32 ap_get_table_length(struct acpi_table_header *table)
146 146
147 if (ACPI_VALIDATE_RSDP_SIG(table->signature)) { 147 if (ACPI_VALIDATE_RSDP_SIG(table->signature)) {
148 rsdp = ACPI_CAST_PTR(struct acpi_table_rsdp, table); 148 rsdp = ACPI_CAST_PTR(struct acpi_table_rsdp, table);
149 return (rsdp->length); 149 return (acpi_tb_get_rsdp_length(rsdp));
150 } 150 }
151 151
152 /* Normal ACPI table */ 152 /* Normal ACPI table */
diff --git a/tools/testing/selftests/ftrace/ftracetest b/tools/testing/selftests/ftrace/ftracetest
index a8f81c782856..515247601df4 100755
--- a/tools/testing/selftests/ftrace/ftracetest
+++ b/tools/testing/selftests/ftrace/ftracetest
@@ -82,7 +82,7 @@ parse_opts() { # opts
82} 82}
83 83
84# Parameters 84# Parameters
85DEBUGFS_DIR=`grep debugfs /proc/mounts | cut -f2 -d' '` 85DEBUGFS_DIR=`grep debugfs /proc/mounts | cut -f2 -d' ' | head -1`
86TRACING_DIR=$DEBUGFS_DIR/tracing 86TRACING_DIR=$DEBUGFS_DIR/tracing
87TOP_DIR=`absdir $0` 87TOP_DIR=`absdir $0`
88TEST_DIR=$TOP_DIR/test.d 88TEST_DIR=$TOP_DIR/test.d
diff --git a/tools/testing/selftests/net/psock_fanout.c b/tools/testing/selftests/net/psock_fanout.c
index 57b9c2b7c4ff..6f6733331d95 100644
--- a/tools/testing/selftests/net/psock_fanout.c
+++ b/tools/testing/selftests/net/psock_fanout.c
@@ -128,7 +128,7 @@ static int sock_fanout_read_ring(int fd, void *ring)
128 struct tpacket2_hdr *header = ring; 128 struct tpacket2_hdr *header = ring;
129 int count = 0; 129 int count = 0;
130 130
131 while (header->tp_status & TP_STATUS_USER && count < RING_NUM_FRAMES) { 131 while (count < RING_NUM_FRAMES && header->tp_status & TP_STATUS_USER) {
132 count++; 132 count++;
133 header = ring + (count * getpagesize()); 133 header = ring + (count * getpagesize());
134 } 134 }
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 3aaca49de325..aacdb59f30de 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1933,7 +1933,7 @@ out:
1933 1933
1934int kvm_vgic_create(struct kvm *kvm) 1934int kvm_vgic_create(struct kvm *kvm)
1935{ 1935{
1936 int i, vcpu_lock_idx = -1, ret = 0; 1936 int i, vcpu_lock_idx = -1, ret;
1937 struct kvm_vcpu *vcpu; 1937 struct kvm_vcpu *vcpu;
1938 1938
1939 mutex_lock(&kvm->lock); 1939 mutex_lock(&kvm->lock);
@@ -1948,6 +1948,7 @@ int kvm_vgic_create(struct kvm *kvm)
1948 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure 1948 * vcpu->mutex. By grabbing the vcpu->mutex of all VCPUs we ensure
1949 * that no other VCPUs are run while we create the vgic. 1949 * that no other VCPUs are run while we create the vgic.
1950 */ 1950 */
1951 ret = -EBUSY;
1951 kvm_for_each_vcpu(i, vcpu, kvm) { 1952 kvm_for_each_vcpu(i, vcpu, kvm) {
1952 if (!mutex_trylock(&vcpu->mutex)) 1953 if (!mutex_trylock(&vcpu->mutex))
1953 goto out_unlock; 1954 goto out_unlock;
@@ -1955,11 +1956,10 @@ int kvm_vgic_create(struct kvm *kvm)
1955 } 1956 }
1956 1957
1957 kvm_for_each_vcpu(i, vcpu, kvm) { 1958 kvm_for_each_vcpu(i, vcpu, kvm) {
1958 if (vcpu->arch.has_run_once) { 1959 if (vcpu->arch.has_run_once)
1959 ret = -EBUSY;
1960 goto out_unlock; 1960 goto out_unlock;
1961 }
1962 } 1961 }
1962 ret = 0;
1963 1963
1964 spin_lock_init(&kvm->arch.vgic.lock); 1964 spin_lock_init(&kvm->arch.vgic.lock);
1965 kvm->arch.vgic.in_kernel = true; 1965 kvm->arch.vgic.in_kernel = true;
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index e51d9f9b995f..c1e6ae989a43 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -43,13 +43,13 @@ static void kvm_iommu_put_pages(struct kvm *kvm,
43 gfn_t base_gfn, unsigned long npages); 43 gfn_t base_gfn, unsigned long npages);
44 44
45static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn, 45static pfn_t kvm_pin_pages(struct kvm_memory_slot *slot, gfn_t gfn,
46 unsigned long size) 46 unsigned long npages)
47{ 47{
48 gfn_t end_gfn; 48 gfn_t end_gfn;
49 pfn_t pfn; 49 pfn_t pfn;
50 50
51 pfn = gfn_to_pfn_memslot(slot, gfn); 51 pfn = gfn_to_pfn_memslot(slot, gfn);
52 end_gfn = gfn + (size >> PAGE_SHIFT); 52 end_gfn = gfn + npages;
53 gfn += 1; 53 gfn += 1;
54 54
55 if (is_error_noslot_pfn(pfn)) 55 if (is_error_noslot_pfn(pfn))
@@ -119,7 +119,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
119 * Pin all pages we are about to map in memory. This is 119 * Pin all pages we are about to map in memory. This is
120 * important because we unmap and unpin in 4kb steps later. 120 * important because we unmap and unpin in 4kb steps later.
121 */ 121 */
122 pfn = kvm_pin_pages(slot, gfn, page_size); 122 pfn = kvm_pin_pages(slot, gfn, page_size >> PAGE_SHIFT);
123 if (is_error_noslot_pfn(pfn)) { 123 if (is_error_noslot_pfn(pfn)) {
124 gfn += 1; 124 gfn += 1;
125 continue; 125 continue;
@@ -131,7 +131,7 @@ int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot)
131 if (r) { 131 if (r) {
132 printk(KERN_ERR "kvm_iommu_map_address:" 132 printk(KERN_ERR "kvm_iommu_map_address:"
133 "iommu failed to map pfn=%llx\n", pfn); 133 "iommu failed to map pfn=%llx\n", pfn);
134 kvm_unpin_pages(kvm, pfn, page_size); 134 kvm_unpin_pages(kvm, pfn, page_size >> PAGE_SHIFT);
135 goto unmap_pages; 135 goto unmap_pages;
136 } 136 }
137 137
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 384eaa7b02fa..3cee7b167052 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -107,10 +107,10 @@ EXPORT_SYMBOL_GPL(kvm_rebooting);
107 107
108static bool largepages_enabled = true; 108static bool largepages_enabled = true;
109 109
110bool kvm_is_mmio_pfn(pfn_t pfn) 110bool kvm_is_reserved_pfn(pfn_t pfn)
111{ 111{
112 if (pfn_valid(pfn)) 112 if (pfn_valid(pfn))
113 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)); 113 return PageReserved(pfn_to_page(pfn));
114 114
115 return true; 115 return true;
116} 116}
@@ -1321,7 +1321,7 @@ static pfn_t hva_to_pfn(unsigned long addr, bool atomic, bool *async,
1321 else if ((vma->vm_flags & VM_PFNMAP)) { 1321 else if ((vma->vm_flags & VM_PFNMAP)) {
1322 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + 1322 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) +
1323 vma->vm_pgoff; 1323 vma->vm_pgoff;
1324 BUG_ON(!kvm_is_mmio_pfn(pfn)); 1324 BUG_ON(!kvm_is_reserved_pfn(pfn));
1325 } else { 1325 } else {
1326 if (async && vma_is_valid(vma, write_fault)) 1326 if (async && vma_is_valid(vma, write_fault))
1327 *async = true; 1327 *async = true;
@@ -1427,7 +1427,7 @@ static struct page *kvm_pfn_to_page(pfn_t pfn)
1427 if (is_error_noslot_pfn(pfn)) 1427 if (is_error_noslot_pfn(pfn))
1428 return KVM_ERR_PTR_BAD_PAGE; 1428 return KVM_ERR_PTR_BAD_PAGE;
1429 1429
1430 if (kvm_is_mmio_pfn(pfn)) { 1430 if (kvm_is_reserved_pfn(pfn)) {
1431 WARN_ON(1); 1431 WARN_ON(1);
1432 return KVM_ERR_PTR_BAD_PAGE; 1432 return KVM_ERR_PTR_BAD_PAGE;
1433 } 1433 }
@@ -1456,7 +1456,7 @@ EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1456 1456
1457void kvm_release_pfn_clean(pfn_t pfn) 1457void kvm_release_pfn_clean(pfn_t pfn)
1458{ 1458{
1459 if (!is_error_noslot_pfn(pfn) && !kvm_is_mmio_pfn(pfn)) 1459 if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn))
1460 put_page(pfn_to_page(pfn)); 1460 put_page(pfn_to_page(pfn));
1461} 1461}
1462EXPORT_SYMBOL_GPL(kvm_release_pfn_clean); 1462EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
@@ -1477,7 +1477,7 @@ static void kvm_release_pfn_dirty(pfn_t pfn)
1477 1477
1478void kvm_set_pfn_dirty(pfn_t pfn) 1478void kvm_set_pfn_dirty(pfn_t pfn)
1479{ 1479{
1480 if (!kvm_is_mmio_pfn(pfn)) { 1480 if (!kvm_is_reserved_pfn(pfn)) {
1481 struct page *page = pfn_to_page(pfn); 1481 struct page *page = pfn_to_page(pfn);
1482 if (!PageReserved(page)) 1482 if (!PageReserved(page))
1483 SetPageDirty(page); 1483 SetPageDirty(page);
@@ -1487,14 +1487,14 @@ EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1487 1487
1488void kvm_set_pfn_accessed(pfn_t pfn) 1488void kvm_set_pfn_accessed(pfn_t pfn)
1489{ 1489{
1490 if (!kvm_is_mmio_pfn(pfn)) 1490 if (!kvm_is_reserved_pfn(pfn))
1491 mark_page_accessed(pfn_to_page(pfn)); 1491 mark_page_accessed(pfn_to_page(pfn));
1492} 1492}
1493EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed); 1493EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1494 1494
1495void kvm_get_pfn(pfn_t pfn) 1495void kvm_get_pfn(pfn_t pfn)
1496{ 1496{
1497 if (!kvm_is_mmio_pfn(pfn)) 1497 if (!kvm_is_reserved_pfn(pfn))
1498 get_page(pfn_to_page(pfn)); 1498 get_page(pfn_to_page(pfn));
1499} 1499}
1500EXPORT_SYMBOL_GPL(kvm_get_pfn); 1500EXPORT_SYMBOL_GPL(kvm_get_pfn);
@@ -2354,6 +2354,12 @@ int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type)
2354 return 0; 2354 return 0;
2355} 2355}
2356 2356
2357void kvm_unregister_device_ops(u32 type)
2358{
2359 if (kvm_device_ops_table[type] != NULL)
2360 kvm_device_ops_table[type] = NULL;
2361}
2362
2357static int kvm_ioctl_create_device(struct kvm *kvm, 2363static int kvm_ioctl_create_device(struct kvm *kvm,
2358 struct kvm_create_device *cd) 2364 struct kvm_create_device *cd)
2359{ 2365{
@@ -3328,5 +3334,6 @@ void kvm_exit(void)
3328 kvm_arch_exit(); 3334 kvm_arch_exit();
3329 kvm_irqfd_exit(); 3335 kvm_irqfd_exit();
3330 free_cpumask_var(cpus_hardware_enabled); 3336 free_cpumask_var(cpus_hardware_enabled);
3337 kvm_vfio_ops_exit();
3331} 3338}
3332EXPORT_SYMBOL_GPL(kvm_exit); 3339EXPORT_SYMBOL_GPL(kvm_exit);
diff --git a/virt/kvm/vfio.c b/virt/kvm/vfio.c
index 281e7cf2b8e5..620e37f741b8 100644
--- a/virt/kvm/vfio.c
+++ b/virt/kvm/vfio.c
@@ -283,3 +283,8 @@ int kvm_vfio_ops_init(void)
283{ 283{
284 return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO); 284 return kvm_register_device_ops(&kvm_vfio_ops, KVM_DEV_TYPE_VFIO);
285} 285}
286
287void kvm_vfio_ops_exit(void)
288{
289 kvm_unregister_device_ops(KVM_DEV_TYPE_VFIO);
290}
diff --git a/virt/kvm/vfio.h b/virt/kvm/vfio.h
index 92eac75d6b62..ab88c7dc0514 100644
--- a/virt/kvm/vfio.h
+++ b/virt/kvm/vfio.h
@@ -3,11 +3,15 @@
3 3
4#ifdef CONFIG_KVM_VFIO 4#ifdef CONFIG_KVM_VFIO
5int kvm_vfio_ops_init(void); 5int kvm_vfio_ops_init(void);
6void kvm_vfio_ops_exit(void);
6#else 7#else
7static inline int kvm_vfio_ops_init(void) 8static inline int kvm_vfio_ops_init(void)
8{ 9{
9 return 0; 10 return 0;
10} 11}
12static inline void kvm_vfio_ops_exit(void)
13{
14}
11#endif 15#endif
12 16
13#endif 17#endif