aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--CREDITS4
-rw-r--r--Documentation/Changes9
-rw-r--r--Documentation/DocBook/gadget.tmpl2
-rw-r--r--Documentation/DocBook/genericirq.tmpl4
-rw-r--r--Documentation/DocBook/kernel-locking.tmpl2
-rw-r--r--Documentation/DocBook/libata.tmpl6
-rw-r--r--Documentation/DocBook/media_api.tmpl2
-rw-r--r--Documentation/DocBook/mtdnand.tmpl30
-rw-r--r--Documentation/DocBook/regulator.tmpl2
-rw-r--r--Documentation/DocBook/uio-howto.tmpl4
-rw-r--r--Documentation/DocBook/usb.tmpl2
-rw-r--r--Documentation/DocBook/writing-an-alsa-driver.tmpl2
-rw-r--r--Documentation/accounting/getdelays.c1
-rw-r--r--Documentation/acpi/enumeration.txt6
-rw-r--r--Documentation/cpu-freq/intel-pstate.txt7
-rw-r--r--Documentation/devicetree/bindings/arm/armada-38x.txt14
-rw-r--r--Documentation/devicetree/bindings/arm/exynos/power_domain.txt20
-rw-r--r--Documentation/devicetree/bindings/arm/l2cc.txt3
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt2
-rw-r--r--Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt6
-rw-r--r--Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt20
-rw-r--r--Documentation/devicetree/bindings/serial/renesas,sci-serial.txt7
-rw-r--r--Documentation/devicetree/bindings/spi/qcom,spi-qup.txt6
-rw-r--r--Documentation/devicetree/bindings/vendor-prefixes.txt1
-rw-r--r--Documentation/email-clients.txt11
-rw-r--r--Documentation/hwmon/ntc_thermistor8
-rw-r--r--Documentation/kernel-parameters.txt13
-rw-r--r--Documentation/laptops/00-INDEX4
-rw-r--r--Documentation/laptops/freefall.c (renamed from Documentation/laptops/hpfall.c)59
-rw-r--r--Documentation/memory-hotplug.txt15
-rw-r--r--Documentation/ptp/testptp.c5
-rw-r--r--Documentation/sound/alsa/HD-Audio-Models.txt5
-rw-r--r--Documentation/sysctl/kernel.txt17
-rw-r--r--Documentation/sysctl/vm.txt3
-rw-r--r--Documentation/trace/postprocess/trace-vmscan-postprocess.pl14
-rw-r--r--MAINTAINERS103
-rw-r--r--Makefile102
-rw-r--r--arch/arc/include/asm/cache.h4
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h1
-rw-r--r--arch/arc/kernel/ctx_sw_asm.S2
-rw-r--r--arch/arc/kernel/devtree.c2
-rw-r--r--arch/arc/kernel/head.S7
-rw-r--r--arch/arc/kernel/ptrace.c4
-rw-r--r--arch/arc/kernel/smp.c15
-rw-r--r--arch/arc/kernel/vmlinux.lds.S2
-rw-r--r--arch/arc/mm/cache_arc700.c25
-rw-r--r--arch/arm/Kconfig1
-rw-r--r--arch/arm/boot/dts/Makefile2
-rw-r--r--arch/arm/boot/dts/am335x-evm.dts4
-rw-r--r--arch/arm/boot/dts/am335x-evmsk.dts4
-rw-r--r--arch/arm/boot/dts/am335x-igep0033.dtsi6
-rw-r--r--arch/arm/boot/dts/am43x-epos-evm.dts4
-rw-r--r--arch/arm/boot/dts/armada-380.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-385-db.dts2
-rw-r--r--arch/arm/boot/dts/armada-385-rd.dts2
-rw-r--r--arch/arm/boot/dts/armada-385.dtsi2
-rw-r--r--arch/arm/boot/dts/armada-38x.dtsi2
-rw-r--r--arch/arm/boot/dts/at91sam9261.dtsi21
-rw-r--r--arch/arm/boot/dts/at91sam9261ek.dts4
-rw-r--r--arch/arm/boot/dts/at91sam9n12.dtsi6
-rw-r--r--arch/arm/boot/dts/at91sam9x5.dtsi10
-rw-r--r--arch/arm/boot/dts/dra7-evm.dts1
-rw-r--r--arch/arm/boot/dts/dra7.dtsi12
-rw-r--r--arch/arm/boot/dts/dra7xx-clocks.dtsi26
-rw-r--r--arch/arm/boot/dts/exynos4.dtsi4
-rw-r--r--arch/arm/boot/dts/exynos5420.dtsi5
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts10
-rw-r--r--arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts4
-rw-r--r--arch/arm/boot/dts/imx53-m53evk.dts40
-rw-r--r--arch/arm/boot/dts/imx6dl-hummingboard.dts10
-rw-r--r--arch/arm/boot/dts/imx6q-gw51xx.dts2
-rw-r--r--arch/arm/boot/dts/imx6qdl-cubox-i.dtsi27
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw51xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw52xx.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw53xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-microsom.dtsi13
-rw-r--r--arch/arm/boot/dts/imx6sl.dtsi2
-rw-r--r--arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts4
-rw-r--r--arch/arm/boot/dts/omap3-beagle-xm.dts6
-rw-r--r--arch/arm/boot/dts/omap3-evm-common.dtsi7
-rw-r--r--arch/arm/boot/dts/omap3-n900.dts5
-rw-r--r--arch/arm/boot/dts/omap5.dtsi1
-rw-r--r--arch/arm/boot/dts/stih415.dtsi8
-rw-r--r--arch/arm/boot/dts/stih416-b2020e.dts (renamed from arch/arm/boot/dts/stih416-b2020-revE.dts)0
-rw-r--r--arch/arm/boot/dts/stih416.dtsi8
-rw-r--r--arch/arm/common/scoop.c1
-rw-r--r--arch/arm/configs/bcm_defconfig2
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig1
-rw-r--r--arch/arm/configs/multi_v7_defconfig4
-rw-r--r--arch/arm/configs/mvebu_v7_defconfig2
-rw-r--r--arch/arm/include/asm/mcpm.h2
-rw-r--r--arch/arm/include/asm/thread_info.h6
-rw-r--r--arch/arm/kernel/kprobes-test-arm.c30
-rw-r--r--arch/arm/kernel/kprobes-test.c10
-rw-r--r--arch/arm/kernel/perf_event_v7.c4
-rw-r--r--arch/arm/kernel/probes-arm.c6
-rw-r--r--arch/arm/kernel/ptrace.c7
-rw-r--r--arch/arm/kernel/topology.c2
-rw-r--r--arch/arm/mach-exynos/exynos.c8
-rw-r--r--arch/arm/mach-exynos/firmware.c9
-rw-r--r--arch/arm/mach-exynos/hotplug.c18
-rw-r--r--arch/arm/mach-exynos/mcpm-exynos.c11
-rw-r--r--arch/arm/mach-exynos/platsmp.c34
-rw-r--r--arch/arm/mach-exynos/pm.c15
-rw-r--r--arch/arm/mach-exynos/pm_domains.c61
-rw-r--r--arch/arm/mach-imx/Kconfig12
-rw-r--r--arch/arm/mach-imx/clk-gate2.c31
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c4
-rw-r--r--arch/arm/mach-imx/clk-imx6sl.c1
-rw-r--r--arch/arm/mach-integrator/integrator_ap.c26
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c23
-rw-r--r--arch/arm/mach-mvebu/Kconfig2
-rw-r--r--arch/arm/mach-mvebu/Makefile2
-rw-r--r--arch/arm/mach-mvebu/board-v7.c29
-rw-r--r--arch/arm/mach-mvebu/coherency.c6
-rw-r--r--arch/arm/mach-mvebu/headsmp-a9.S9
-rw-r--r--arch/arm/mach-mvebu/pmsu.c19
-rw-r--r--arch/arm/mach-mvebu/pmsu_ll.S25
-rw-r--r--arch/arm/mach-omap2/Kconfig4
-rw-r--r--arch/arm/mach-omap2/Makefile6
-rw-r--r--arch/arm/mach-omap2/clkt_dpll.c2
-rw-r--r--arch/arm/mach-omap2/cm-regbits-34xx.h3
-rw-r--r--arch/arm/mach-omap2/cm33xx.h2
-rw-r--r--arch/arm/mach-omap2/common.h4
-rw-r--r--arch/arm/mach-omap2/devices.c28
-rw-r--r--arch/arm/mach-omap2/dsp.c10
-rw-r--r--arch/arm/mach-omap2/gpmc.c2
-rw-r--r--arch/arm/mach-omap2/id.c12
-rw-r--r--arch/arm/mach-omap2/mux.c6
-rw-r--r--arch/arm/mach-omap2/omap4-common.c20
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c6
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_54xx_data.c73
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c18
-rw-r--r--arch/arm/mach-omap2/prm-regbits-34xx.h6
-rw-r--r--arch/arm/mach-omap2/soc.h1
-rw-r--r--arch/arm/mach-sa1100/collie.c7
-rw-r--r--arch/arm/mach-sti/Kconfig4
-rw-r--r--arch/arm/mach-sunxi/sunxi.c77
-rw-r--r--arch/arm/mach-ux500/Kconfig2
-rw-r--r--arch/arm/mach-vexpress/Kconfig2
-rw-r--r--arch/arm/mm/Kconfig9
-rw-r--r--arch/arm/mm/cache-l2x0.c33
-rw-r--r--arch/arm/mm/nommu.c1
-rw-r--r--arch/arm/mm/proc-arm925.S1
-rw-r--r--arch/arm64/Kconfig1
-rw-r--r--arch/arm64/include/asm/memory.h2
-rw-r--r--arch/arm64/include/asm/pgtable.h2
-rw-r--r--arch/arm64/include/asm/ptrace.h4
-rw-r--r--arch/arm64/kernel/efi-entry.S3
-rw-r--r--arch/arm64/kernel/efi-stub.c2
-rw-r--r--arch/arm64/kernel/head.S3
-rw-r--r--arch/arm64/mm/copypage.c2
-rw-r--r--arch/arm64/mm/flush.c3
-rw-r--r--arch/ia64/include/uapi/asm/fcntl.h1
-rw-r--r--arch/m68k/kernel/head.S3
-rw-r--r--arch/m68k/kernel/time.c2
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/include/asm/sigcontext.h2
-rw-r--r--arch/mips/include/asm/uasm.h4
-rw-r--r--arch/mips/include/uapi/asm/inst.h1
-rw-r--r--arch/mips/include/uapi/asm/sigcontext.h8
-rw-r--r--arch/mips/kernel/asm-offsets.c3
-rw-r--r--arch/mips/kernel/irq-msc01.c2
-rw-r--r--arch/mips/kernel/pm-cps.c4
-rw-r--r--arch/mips/kernel/r4k_fpu.S213
-rw-r--r--arch/mips/kernel/signal.c79
-rw-r--r--arch/mips/kernel/signal32.c74
-rw-r--r--arch/mips/kernel/smp-cps.c2
-rw-r--r--arch/mips/kvm/kvm_mips.c1
-rw-r--r--arch/mips/math-emu/ieee754.c23
-rw-r--r--arch/mips/mm/uasm-micromips.c1
-rw-r--r--arch/mips/mm/uasm-mips.c3
-rw-r--r--arch/mips/mm/uasm.c10
-rw-r--r--arch/mips/net/bpf_jit.c266
-rw-r--r--arch/parisc/kernel/hardware.c3
-rw-r--r--arch/parisc/kernel/sys_parisc32.c46
-rw-r--r--arch/parisc/kernel/syscall_table.S2
-rw-r--r--arch/powerpc/Kconfig4
-rw-r--r--arch/powerpc/Kconfig.debug1
-rw-r--r--arch/powerpc/include/asm/code-patching.h11
-rw-r--r--arch/powerpc/include/asm/mmu.h10
-rw-r--r--arch/powerpc/include/asm/opal.h29
-rw-r--r--arch/powerpc/include/asm/perf_event_server.h3
-rw-r--r--arch/powerpc/include/asm/swab.h43
-rw-r--r--arch/powerpc/kernel/ftrace.c52
-rw-r--r--arch/powerpc/kernel/idle_power7.S2
-rw-r--r--arch/powerpc/kernel/iomap.c20
-rw-r--r--arch/powerpc/kernel/kprobes.c9
-rw-r--r--arch/powerpc/kernel/module_64.c11
-rw-r--r--arch/powerpc/kernel/prom.c7
-rw-r--r--arch/powerpc/kernel/prom_init.c211
-rw-r--r--arch/powerpc/kernel/prom_init_check.sh4
-rw-r--r--arch/powerpc/kernel/setup-common.c10
-rw-r--r--arch/powerpc/kernel/signal_32.c9
-rw-r--r--arch/powerpc/kernel/signal_64.c9
-rw-r--r--arch/powerpc/kernel/smp.c2
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S5
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c12
-rw-r--r--arch/powerpc/net/bpf_jit_comp.c10
-rw-r--r--arch/powerpc/perf/core-book3s.c26
-rw-r--r--arch/powerpc/perf/power8-pmu.c2
-rw-r--r--arch/powerpc/platforms/cell/cbe_thermal.c2
-rw-r--r--arch/powerpc/platforms/cell/spu_syscalls.c2
-rw-r--r--arch/powerpc/platforms/cell/spufs/Makefile3
-rw-r--r--arch/powerpc/platforms/cell/spufs/syscalls.c6
-rw-r--r--arch/powerpc/platforms/powernv/Makefile2
-rw-r--r--arch/powerpc/platforms/powernv/opal-takeover.S140
-rw-r--r--arch/powerpc/sysdev/dart_iommu.c5
-rw-r--r--arch/s390/include/uapi/asm/Kbuild1
-rw-r--r--arch/s390/include/uapi/asm/sie.h26
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/irq_64.h2
-rw-r--r--arch/sparc/kernel/process_64.c18
-rw-r--r--arch/um/kernel/tlb.c9
-rw-r--r--arch/um/kernel/trap.c2
-rw-r--r--arch/um/os-Linux/skas/process.c9
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/boot/header.S26
-rw-r--r--arch/x86/boot/tools/build.c38
-rw-r--r--arch/x86/crypto/sha512_ssse3_glue.c2
-rw-r--r--arch/x86/include/asm/irq.h2
-rw-r--r--arch/x86/include/asm/kvm_host.h4
-rw-r--r--arch/x86/include/asm/ptrace.h16
-rw-r--r--arch/x86/kernel/acpi/Makefile1
-rw-r--r--arch/x86/kernel/acpi/apei.c62
-rw-r--r--arch/x86/kernel/apic/hw_nmi.c18
-rw-r--r--arch/x86/kernel/apm_32.c1
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c9
-rw-r--r--arch/x86/kernel/entry_32.S10
-rw-r--r--arch/x86/kernel/espfix_64.c5
-rw-r--r--arch/x86/kernel/signal.c2
-rw-r--r--arch/x86/kernel/tsc.c4
-rw-r--r--arch/x86/kvm/svm.c1
-rw-r--r--arch/x86/kvm/x86.c2
-rw-r--r--arch/x86/vdso/Makefile24
-rw-r--r--arch/x86/vdso/vclock_gettime.c3
-rw-r--r--arch/x86/vdso/vdso-fakesections.c41
-rw-r--r--arch/x86/vdso/vdso-layout.lds.S64
-rw-r--r--arch/x86/vdso/vdso.lds.S2
-rw-r--r--arch/x86/vdso/vdso2c.c73
-rw-r--r--arch/x86/vdso/vdso2c.h202
-rw-r--r--arch/x86/vdso/vdso32/vdso-fakesections.c1
-rw-r--r--arch/x86/vdso/vdsox32.lds.S2
-rw-r--r--arch/x86/vdso/vma.c4
-rw-r--r--block/bio.c8
-rw-r--r--block/blk-cgroup.c9
-rw-r--r--block/blk-cgroup.h21
-rw-r--r--block/blk-merge.c10
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/elevator.c2
-rw-r--r--drivers/acpi/ac.c130
-rw-r--r--drivers/acpi/acpi_pnp.c2
-rw-r--r--drivers/acpi/apei/Kconfig8
-rw-r--r--drivers/acpi/apei/apei-base.c13
-rw-r--r--drivers/acpi/apei/ghes.c173
-rw-r--r--drivers/acpi/apei/hest.c29
-rw-r--r--drivers/acpi/battery.c41
-rw-r--r--drivers/acpi/ec.c164
-rw-r--r--drivers/acpi/resource.c10
-rw-r--r--drivers/acpi/video.c21
-rw-r--r--drivers/acpi/video_detect.c8
-rw-r--r--drivers/ata/ahci.h2
-rw-r--r--drivers/ata/ahci_imx.c38
-rw-r--r--drivers/ata/ahci_platform.c2
-rw-r--r--drivers/ata/ahci_xgene.c60
-rw-r--r--drivers/ata/libahci.c7
-rw-r--r--drivers/ata/libahci_platform.c7
-rw-r--r--drivers/base/dma-contiguous.c12
-rw-r--r--drivers/base/platform.c18
-rw-r--r--drivers/block/drbd/drbd_receiver.c5
-rw-r--r--drivers/block/floppy.c2
-rw-r--r--drivers/block/rbd.c10
-rw-r--r--drivers/block/zram/zram_drv.c5
-rw-r--r--drivers/bluetooth/ath3k.c2
-rw-r--r--drivers/bluetooth/btusb.c1
-rw-r--r--drivers/bluetooth/hci_h5.c1
-rw-r--r--drivers/char/hw_random/core.c47
-rw-r--r--drivers/char/hw_random/virtio-rng.c10
-rw-r--r--drivers/char/i8k.c4
-rw-r--r--drivers/char/random.c17
-rw-r--r--drivers/clk/clk-s2mps11.c7
-rw-r--r--drivers/clk/qcom/mmcc-msm8960.c2
-rw-r--r--drivers/clk/samsung/clk-exynos4.c16
-rw-r--r--drivers/clk/samsung/clk-exynos5250.c2
-rw-r--r--drivers/clk/samsung/clk-exynos5420.c91
-rw-r--r--drivers/clk/samsung/clk-s3c2410.c9
-rw-r--r--drivers/clk/samsung/clk-s3c64xx.c6
-rw-r--r--drivers/clk/spear/spear3xx_clock.c16
-rw-r--r--drivers/clk/sunxi/clk-sun6i-apb0-gates.c2
-rw-r--r--drivers/clk/ti/apll.c8
-rw-r--r--drivers/clk/ti/dpll.c5
-rw-r--r--drivers/clk/ti/mux.c2
-rw-r--r--drivers/clocksource/exynos_mct.c29
-rw-r--r--drivers/cpufreq/Kconfig.arm3
-rw-r--r--drivers/cpufreq/Makefile2
-rw-r--r--drivers/cpufreq/cpufreq-cpu0.c7
-rw-r--r--drivers/cpufreq/cpufreq.c6
-rw-r--r--drivers/cpufreq/intel_pstate.c35
-rw-r--r--drivers/cpufreq/sa1110-cpufreq.c2
-rw-r--r--drivers/crypto/caam/jr.c8
-rw-r--r--drivers/dma/cppi41.c13
-rw-r--r--drivers/dma/imx-sdma.c22
-rw-r--r--drivers/firewire/Kconfig1
-rw-r--r--drivers/firmware/efi/efi-pstore.c2
-rw-r--r--drivers/firmware/efi/efi.c28
-rw-r--r--drivers/firmware/efi/fdt.c12
-rw-r--r--drivers/gpio/gpio-mcp23s08.c6
-rw-r--r--drivers/gpu/drm/drm_drv.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dpi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c50
-rw-r--r--drivers/gpu/drm/exynos/regs-mixer.h1
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c12
-rw-r--r--drivers/gpu/drm/i915/i915_debugfs.c2
-rw-r--r--drivers/gpu/drm/i915/i915_dma.c5
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_gem_context.c8
-rw-r--r--drivers/gpu/drm/i915/i915_gem_stolen.c44
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h3
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c6
-rw-r--r--drivers/gpu/drm/i915/intel_display.c58
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c46
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h6
-rw-r--r--drivers/gpu/drm/i915/intel_dsi.c29
-rw-r--r--drivers/gpu/drm/i915/intel_dsi_cmd.c6
-rw-r--r--drivers/gpu/drm/i915/intel_lvds.c7
-rw-r--r--drivers/gpu/drm/i915/intel_opregion.c9
-rw-r--r--drivers/gpu/drm/i915/intel_panel.c16
-rw-r--r--drivers/gpu/drm/i915/intel_pm.c66
-rw-r--r--drivers/gpu/drm/i915/intel_sprite.c8
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c2
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_connector.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c22
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h1
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c2
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c6
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c23
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h1
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nv50.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c6
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c8
-rw-r--r--drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h4
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c1
-rw-r--r--drivers/gpu/drm/nouveau/core/subdev/therm/temp.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c17
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.c13
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fbcon.h1
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_irq.c3
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c14
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c10
-rw-r--r--drivers/gpu/drm/radeon/ci_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/cik.c6
-rw-r--r--drivers/gpu/drm/radeon/cikd.h2
-rw-r--r--drivers/gpu/drm/radeon/cypress_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c19
-rw-r--r--drivers/gpu/drm/radeon/evergreen_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/kv_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/ni_dpm.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c3
-rw-r--r--drivers/gpu/drm/radeon/radeon_display.c211
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h15
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c6
-rw-r--r--drivers/gpu/drm/radeon/radeon_vm.c4
-rw-r--r--drivers/gpu/drm/radeon/rv515.c5
-rw-r--r--drivers/gpu/drm/radeon/rv770_dpm.c6
-rw-r--r--drivers/gpu/drm/radeon/si.c6
-rw-r--r--drivers/gpu/drm/radeon/trinity_dpm.c10
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_fb.c1
-rw-r--r--drivers/hid/Kconfig2
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-rmi.c2
-rw-r--r--drivers/hid/hid-sensor-hub.c25
-rw-r--r--drivers/hid/usbhid/hid-quirks.c3
-rw-r--r--drivers/hv/connection.c8
-rw-r--r--drivers/hv/hv_fcopy.c2
-rw-r--r--drivers/hv/hv_kvp.c17
-rw-r--r--drivers/hv/hv_util.c2
-rw-r--r--drivers/hwmon/Kconfig6
-rw-r--r--drivers/hwmon/adc128d818.c28
-rw-r--r--drivers/hwmon/adm1021.c14
-rw-r--r--drivers/hwmon/adm1029.c3
-rw-r--r--drivers/hwmon/adm1031.c8
-rw-r--r--drivers/hwmon/adt7470.c6
-rw-r--r--drivers/hwmon/amc6821.c2
-rw-r--r--drivers/hwmon/da9052-hwmon.c2
-rw-r--r--drivers/hwmon/da9055-hwmon.c2
-rw-r--r--drivers/hwmon/emc2103.c15
-rw-r--r--drivers/hwmon/gpio-fan.c2
-rw-r--r--drivers/hwmon/ntc_thermistor.c16
-rw-r--r--drivers/hwmon/w83l786ng.c2
-rw-r--r--drivers/i2c/busses/i2c-sun6i-p2wi.c1
-rw-r--r--drivers/i2c/muxes/Kconfig1
-rw-r--r--drivers/iio/accel/hid-sensor-accel-3d.c7
-rw-r--r--drivers/iio/accel/mma8452.c8
-rw-r--r--drivers/iio/adc/ad799x.c8
-rw-r--r--drivers/iio/adc/ti_am335x_adc.c2
-rw-r--r--drivers/iio/gyro/hid-sensor-gyro-3d.c7
-rw-r--r--drivers/iio/industrialio-event.c3
-rw-r--r--drivers/iio/inkern.c6
-rw-r--r--drivers/iio/light/hid-sensor-als.c7
-rw-r--r--drivers/iio/light/hid-sensor-prox.c7
-rw-r--r--drivers/iio/light/tcs3472.c11
-rw-r--r--drivers/iio/magnetometer/hid-sensor-magn-3d.c7
-rw-r--r--drivers/iio/pressure/hid-sensor-press.c7
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c14
-rw-r--r--drivers/infiniband/hw/cxgb4/device.c18
-rw-r--r--drivers/infiniband/hw/cxgb4/iw_cxgb4.h2
-rw-r--r--drivers/infiniband/hw/mlx5/qp.c2
-rw-r--r--drivers/iommu/amd_iommu_v2.c18
-rw-r--r--drivers/iommu/fsl_pamu.c8
-rw-r--r--drivers/iommu/fsl_pamu_domain.c18
-rw-r--r--drivers/iommu/intel-iommu.c9
-rw-r--r--drivers/irqchip/irq-armada-370-xp.c17
-rw-r--r--drivers/irqchip/irq-brcmstb-l2.c2
-rw-r--r--drivers/irqchip/irq-gic.c7
-rw-r--r--drivers/irqchip/spear-shirq.c2
-rw-r--r--drivers/isdn/hisax/Kconfig11
-rw-r--r--drivers/isdn/hisax/l3ni1.c14
-rw-r--r--drivers/isdn/i4l/isdn_ppp.c8
-rw-r--r--drivers/macintosh/smu.c3
-rw-r--r--drivers/md/dm-cache-metadata.c9
-rw-r--r--drivers/md/dm-crypt.c4
-rw-r--r--drivers/md/dm-io.c22
-rw-r--r--drivers/md/dm-mpath.c5
-rw-r--r--drivers/md/dm-thin-metadata.c9
-rw-r--r--drivers/md/dm-zero.c4
-rw-r--r--drivers/md/dm.c15
-rw-r--r--drivers/md/md.c15
-rw-r--r--drivers/memstick/host/rtsx_pci_ms.c1
-rw-r--r--drivers/mfd/Kconfig5
-rw-r--r--drivers/mfd/ab8500-core.c2
-rw-r--r--drivers/misc/Kconfig2
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0001.c43
-rw-r--r--drivers/mtd/devices/elm.c2
-rw-r--r--drivers/mtd/nand/nand_base.c6
-rw-r--r--drivers/mtd/ubi/fastmap.c4
-rw-r--r--drivers/net/bonding/bond_main.c11
-rw-r--r--drivers/net/can/slcan.c37
-rw-r--r--drivers/net/ethernet/allwinner/sun4i-emac.c1
-rw-r--r--drivers/net/ethernet/broadcom/bcmsysport.c43
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c3
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c2
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.c16
-rw-r--r--drivers/net/ethernet/broadcom/genet/bcmgenet.h2
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c8
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c16
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/t4_hw.c1
-rw-r--r--drivers/net/ethernet/dec/tulip/timer.c2
-rw-r--r--drivers/net/ethernet/emulex/benet/be.h4
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h2
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c6
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c8
-rw-r--r--drivers/net/ethernet/freescale/ucc_geth.c4
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_82575.c7
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_defines.h18
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_hw.h3
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.c66
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_i210.h12
-rw-r--r--drivers/net/ethernet/intel/igb/e1000_regs.h1
-rw-r--r--drivers/net/ethernet/intel/igb/igb_main.c16
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c4
-rw-r--r--drivers/net/ethernet/marvell/skge.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_cq.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_ethtool.c7
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_netdev.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_tx.c34
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c69
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c3
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4_en.h4
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/mr.c19
-rw-r--r--drivers/net/ethernet/realtek/r8169.c25
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c5
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/enh_desc.c2
-rw-r--r--drivers/net/ethernet/ti/cpsw.c7
-rw-r--r--drivers/net/ethernet/tile/tilegx.c1
-rw-r--r--drivers/net/fddi/defxx.c17
-rw-r--r--drivers/net/hyperv/netvsc.c2
-rw-r--r--drivers/net/ieee802154/at86rf230.c5
-rw-r--r--drivers/net/phy/at803x.c195
-rw-r--r--drivers/net/phy/dp83640.c6
-rw-r--r--drivers/net/phy/mdio_bus.c44
-rw-r--r--drivers/net/phy/phy.c3
-rw-r--r--drivers/net/ppp/ppp_generic.c8
-rw-r--r--drivers/net/ppp/pppoe.c2
-rw-r--r--drivers/net/slip/slip.c36
-rw-r--r--drivers/net/slip/slip.h1
-rw-r--r--drivers/net/usb/hso.c50
-rw-r--r--drivers/net/usb/huawei_cdc_ncm.c7
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c7
-rw-r--r--drivers/net/usb/smsc95xx.c14
-rw-r--r--drivers/net/vmxnet3/vmxnet3_drv.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_ethtool.c12
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h5
-rw-r--r--drivers/net/wan/farsync.c112
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c6
-rw-r--r--drivers/net/wireless/ath/ath10k/htt_rx.c18
-rw-r--r--drivers/net/wireless/b43/Kconfig2
-rw-r--r--drivers/net/wireless/b43/main.c1
-rw-r--r--drivers/net/wireless/b43/xmit.c10
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c5
-rw-r--r--drivers/net/wireless/iwlwifi/dvm/rxon.c12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fw.h1
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/mac80211.c19
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c65
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c3
-rw-r--r--drivers/net/wireless/mwifiex/11n_aggr.c1
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c1
-rw-r--r--drivers/net/wireless/mwifiex/cmdevt.c1
-rw-r--r--drivers/net/wireless/mwifiex/main.c1
-rw-r--r--drivers/net/wireless/mwifiex/pcie.c4
-rw-r--r--drivers/net/wireless/mwifiex/sta_tx.c1
-rw-r--r--drivers/net/wireless/mwifiex/tdls.c2
-rw-r--r--drivers/net/wireless/mwifiex/txrx.c1
-rw-r--r--drivers/net/wireless/mwifiex/uap_txrx.c1
-rw-r--r--drivers/net/wireless/mwifiex/util.h43
-rw-r--r--drivers/net/wireless/rt2x00/rt2500pci.c7
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c55
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00dev.c24
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h1
-rw-r--r--drivers/net/xen-netback/common.h1
-rw-r--r--drivers/net/xen-netback/interface.c49
-rw-r--r--drivers/net/xen-netback/xenbus.c28
-rw-r--r--drivers/net/xen-netfront.c134
-rw-r--r--drivers/of/fdt.c15
-rw-r--r--drivers/of/of_mdio.c42
-rw-r--r--drivers/pci/pci.c9
-rw-r--r--drivers/phy/Kconfig2
-rw-r--r--drivers/phy/phy-core.c7
-rw-r--r--drivers/phy/phy-omap-usb2.c11
-rw-r--r--drivers/phy/phy-samsung-usb2.c1
-rw-r--r--drivers/pinctrl/berlin/berlin.c2
-rw-r--r--drivers/pinctrl/sunxi/pinctrl-sunxi.c4
-rw-r--r--drivers/ptp/Kconfig2
-rw-r--r--drivers/regulator/bcm590xx-regulator.c5
-rw-r--r--drivers/regulator/palmas-regulator.c12
-rw-r--r--drivers/regulator/tps65218-regulator.c3
-rw-r--r--drivers/scsi/be2iscsi/be_main.c2
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c4
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c16
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c2
-rw-r--r--drivers/scsi/ibmvscsi/ibmvscsi.c13
-rw-r--r--drivers/scsi/pm8001/pm8001_init.c13
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c17
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h4
-rw-r--r--drivers/scsi/scsi_error.c20
-rw-r--r--drivers/scsi/scsi_transport_fc.c1
-rw-r--r--drivers/scsi/sd.c5
-rw-r--r--drivers/scsi/virtio_scsi.c26
-rw-r--r--drivers/spi/spi-pxa2xx.c8
-rw-r--r--drivers/spi/spi-qup.c44
-rw-r--r--drivers/spi/spi-sh-sci.c4
-rw-r--r--drivers/staging/iio/adc/ad7291.c4
-rw-r--r--drivers/staging/tidspbridge/core/tiomap3430.c6
-rw-r--r--drivers/target/iscsi/iscsi_target.c2
-rw-r--r--drivers/target/iscsi/iscsi_target_auth.c14
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c13
-rw-r--r--drivers/target/iscsi/iscsi_target_util.c2
-rw-r--r--drivers/target/loopback/tcm_loop.c1
-rw-r--r--drivers/target/target_core_device.c1
-rw-r--r--drivers/tc/tc.c10
-rw-r--r--drivers/thermal/imx_thermal.c18
-rw-r--r--drivers/thermal/of-thermal.c7
-rw-r--r--drivers/thermal/thermal_hwmon.c33
-rw-r--r--drivers/thermal/ti-soc-thermal/ti-bandgap.c2
-rw-r--r--drivers/tty/serial/arc_uart.c2
-rw-r--r--drivers/tty/serial/imx.c3
-rw-r--r--drivers/tty/serial/ip22zilog.c2
-rw-r--r--drivers/tty/serial/m32r_sio.c8
-rw-r--r--drivers/tty/serial/msm_serial.c2
-rw-r--r--drivers/tty/serial/pmac_zilog.c3
-rw-r--r--drivers/tty/serial/sunsab.c3
-rw-r--r--drivers/tty/serial/sunzilog.c2
-rw-r--r--drivers/usb/chipidea/udc.c11
-rw-r--r--drivers/usb/core/hub.c19
-rw-r--r--drivers/usb/dwc3/Kconfig1
-rw-r--r--drivers/usb/dwc3/dwc3-omap.c17
-rw-r--r--drivers/usb/dwc3/gadget.c8
-rw-r--r--drivers/usb/gadget/configfs.c37
-rw-r--r--drivers/usb/gadget/configfs.h1
-rw-r--r--drivers/usb/gadget/f_fs.c12
-rw-r--r--drivers/usb/gadget/f_rndis.c6
-rw-r--r--drivers/usb/gadget/gr_udc.c5
-rw-r--r--drivers/usb/gadget/inode.c7
-rw-r--r--drivers/usb/gadget/u_ether.c3
-rw-r--r--drivers/usb/host/Kconfig2
-rw-r--r--drivers/usb/host/xhci-hub.c5
-rw-r--r--drivers/usb/host/xhci-ring.c9
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/musb/musb_am335x.c23
-rw-r--r--drivers/usb/musb/musb_core.c2
-rw-r--r--drivers/usb/musb/musb_cppi41.c2
-rw-r--r--drivers/usb/musb/musb_dsps.c9
-rw-r--r--drivers/usb/musb/ux500.c1
-rw-r--r--drivers/usb/phy/phy-msm-usb.c4
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c8
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c12
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h9
-rw-r--r--drivers/usb/serial/option.c28
-rw-r--r--drivers/usb/storage/scsiglue.c4
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/vhost/net.c12
-rw-r--r--drivers/vhost/scsi.c12
-rw-r--r--drivers/video/fbdev/atmel_lcdfb.c2
-rw-r--r--drivers/video/fbdev/bfin_adv7393fb.c2
-rw-r--r--drivers/video/fbdev/omap2/dss/omapdss-boot-init.c8
-rw-r--r--drivers/video/fbdev/vt8500lcdfb.c2
-rw-r--r--drivers/xen/balloon.c12
-rw-r--r--drivers/xen/manage.c5
-rw-r--r--firmware/Makefile6
-rw-r--r--fs/aio.c13
-rw-r--r--fs/autofs4/inode.c2
-rw-r--r--fs/btrfs/compression.c2
-rw-r--r--fs/btrfs/dev-replace.c5
-rw-r--r--fs/btrfs/disk-io.c5
-rw-r--r--fs/btrfs/extent-tree.c5
-rw-r--r--fs/btrfs/ioctl.c37
-rw-r--r--fs/btrfs/ordered-data.c11
-rw-r--r--fs/btrfs/print-tree.c9
-rw-r--r--fs/btrfs/raid56.c5
-rw-r--r--fs/btrfs/super.c7
-rw-r--r--fs/btrfs/sysfs.c32
-rw-r--r--fs/btrfs/sysfs.h4
-rw-r--r--fs/btrfs/transaction.c12
-rw-r--r--fs/btrfs/volumes.c32
-rw-r--r--fs/btrfs/zlib.c2
-rw-r--r--fs/cifs/cifs_unicode.c7
-rw-r--r--fs/cifs/cifsfs.c17
-rw-r--r--fs/cifs/link.c2
-rw-r--r--fs/ext4/balloc.c16
-rw-r--r--fs/ext4/extents_status.c4
-rw-r--r--fs/ext4/ialloc.c37
-rw-r--r--fs/ext4/indirect.c24
-rw-r--r--fs/ext4/mballoc.c12
-rw-r--r--fs/ext4/super.c60
-rw-r--r--fs/f2fs/data.c23
-rw-r--r--fs/f2fs/dir.c2
-rw-r--r--fs/f2fs/f2fs.h6
-rw-r--r--fs/f2fs/file.c12
-rw-r--r--fs/f2fs/inode.c1
-rw-r--r--fs/f2fs/namei.c13
-rw-r--r--fs/f2fs/node.c2
-rw-r--r--fs/f2fs/segment.c5
-rw-r--r--fs/f2fs/super.c4
-rw-r--r--fs/fuse/dev.c51
-rw-r--r--fs/fuse/dir.c41
-rw-r--r--fs/fuse/file.c8
-rw-r--r--fs/fuse/inode.c22
-rw-r--r--fs/gfs2/file.c4
-rw-r--r--fs/gfs2/glock.c14
-rw-r--r--fs/gfs2/glops.c4
-rw-r--r--fs/gfs2/lock_dlm.c4
-rw-r--r--fs/gfs2/rgrp.c4
-rw-r--r--fs/jbd2/transaction.c5
-rw-r--r--fs/kernfs/file.c69
-rw-r--r--fs/kernfs/mount.c30
-rw-r--r--fs/mbcache.c3
-rw-r--r--fs/nfs/direct.c2
-rw-r--r--fs/nfs/inode.c76
-rw-r--r--fs/nfs/internal.h1
-rw-r--r--fs/nfs/nfs3acl.c43
-rw-r--r--fs/nfs/nfs3proc.c4
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4namespace.c102
-rw-r--r--fs/nfs/nfs4proc.c2
-rw-r--r--fs/nfs/pagelist.c20
-rw-r--r--fs/nfs/write.c339
-rw-r--r--fs/nfsd/nfs4proc.c9
-rw-r--r--fs/nfsd/nfs4xdr.c17
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h4
-rw-r--r--fs/ocfs2/dlm/dlmmaster.c57
-rw-r--r--fs/ocfs2/dlm/dlmrecovery.c3
-rw-r--r--fs/ocfs2/dlm/dlmthread.c13
-rw-r--r--fs/ocfs2/dlm/dlmunlock.c18
-rw-r--r--fs/ocfs2/namei.c145
-rw-r--r--fs/ocfs2/ocfs2_trace.h2
-rw-r--r--fs/ocfs2/refcounttree.c8
-rw-r--r--fs/ocfs2/super.c8
-rw-r--r--fs/proc/stat.c22
-rw-r--r--fs/quota/dquot.c2
-rw-r--r--fs/seq_file.c30
-rw-r--r--fs/xfs/xfs_bmap.c7
-rw-r--r--fs/xfs/xfs_bmap.h4
-rw-r--r--fs/xfs/xfs_bmap_util.c53
-rw-r--r--fs/xfs/xfs_bmap_util.h4
-rw-r--r--fs/xfs/xfs_btree.c82
-rw-r--r--fs/xfs/xfs_iomap.c3
-rw-r--r--fs/xfs/xfs_sb.c25
-rw-r--r--include/acpi/apei.h4
-rw-r--r--include/acpi/video.h2
-rw-r--r--include/asm-generic/vmlinux.lds.h2
-rw-r--r--include/drm/i915_pciids.h12
-rw-r--r--include/drm/i915_powerwell.h1
-rw-r--r--include/dt-bindings/clock/exynos5420.h3
-rw-r--r--include/dt-bindings/clock/imx6sl-clock.h3
-rw-r--r--include/dt-bindings/clock/stih415-clks.h1
-rw-r--r--include/dt-bindings/clock/stih416-clks.h1
-rw-r--r--include/linux/bio.h13
-rw-r--r--include/linux/blkdev.h1
-rw-r--r--include/linux/cpufreq.h4
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/kernfs.h2
-rw-r--r--include/linux/mlx4/device.h4
-rw-r--r--include/linux/mutex.h4
-rw-r--r--include/linux/nmi.h16
-rw-r--r--include/linux/of_mdio.h8
-rw-r--r--include/linux/osq_lock.h27
-rw-r--r--include/linux/page-flags.h3
-rw-r--r--include/linux/percpu-defs.h4
-rw-r--r--include/linux/phy.h9
-rw-r--r--include/linux/ptrace.h3
-rw-r--r--include/linux/rcupdate.h46
-rw-r--r--include/linux/rwsem-spinlock.h8
-rw-r--r--include/linux/rwsem.h34
-rw-r--r--include/linux/sched.h8
-rw-r--r--include/linux/socket.h4
-rw-r--r--include/linux/uio.h19
-rw-r--r--include/linux/usb_usual.h4
-rw-r--r--include/net/neighbour.h1
-rw-r--r--include/net/netfilter/nf_tables.h6
-rw-r--r--include/net/netns/ieee802154_6lowpan.h2
-rw-r--r--include/net/sock.h16
-rw-r--r--include/scsi/scsi_cmnd.h2
-rw-r--r--include/scsi/scsi_device.h1
-rw-r--r--include/uapi/linux/btrfs.h1
-rw-r--r--include/uapi/linux/usb/functionfs.h7
-rw-r--r--include/uapi/sound/compress_offload.h14
-rw-r--r--include/uapi/sound/compress_params.h14
-rw-r--r--kernel/Kconfig.locks9
-rw-r--r--kernel/cgroup.c58
-rw-r--r--kernel/cpuset.c20
-rw-r--r--kernel/events/core.c2
-rw-r--r--kernel/events/uprobes.c6
-rw-r--r--kernel/irq/irqdesc.c4
-rw-r--r--kernel/kexec.c1
-rw-r--r--kernel/locking/mcs_spinlock.c64
-rw-r--r--kernel/locking/mcs_spinlock.h9
-rw-r--r--kernel/locking/mutex.c2
-rw-r--r--kernel/locking/rwsem-spinlock.c28
-rw-r--r--kernel/locking/rwsem-xadd.c16
-rw-r--r--kernel/locking/rwsem.c2
-rw-r--r--kernel/power/process.c1
-rw-r--r--kernel/power/suspend.c4
-rw-r--r--kernel/printk/printk.c44
-rw-r--r--kernel/rcu/tree.c140
-rw-r--r--kernel/rcu/tree.h6
-rw-r--r--kernel/rcu/tree_plugin.h2
-rw-r--r--kernel/rcu/update.c22
-rw-r--r--kernel/sched/core.c7
-rw-r--r--kernel/sched/debug.c2
-rw-r--r--kernel/smp.c57
-rw-r--r--kernel/sysctl.c14
-rw-r--r--kernel/time/alarmtimer.c20
-rw-r--r--kernel/trace/ftrace.c4
-rw-r--r--kernel/trace/ring_buffer.c4
-rw-r--r--kernel/trace/trace.c20
-rw-r--r--kernel/trace/trace_events.c1
-rw-r--r--kernel/trace/trace_uprobe.c46
-rw-r--r--kernel/watchdog.c41
-rw-r--r--kernel/workqueue.c3
-rw-r--r--lib/Kconfig.debug4
-rw-r--r--lib/cpumask.c2
-rw-r--r--lib/iovec.c55
-rw-r--r--lib/lz4/lz4_decompress.c12
-rw-r--r--lib/lzo/lzo1x_decompress_safe.c62
-rw-r--r--lib/swiotlb.c28
-rw-r--r--mm/huge_memory.c57
-rw-r--r--mm/hugetlb.c71
-rw-r--r--mm/ksm.c1
-rw-r--r--mm/memory-failure.c9
-rw-r--r--mm/mempolicy.c48
-rw-r--r--mm/migrate.c2
-rw-r--r--mm/msync.c3
-rw-r--r--mm/nommu.c2
-rw-r--r--mm/page_alloc.c56
-rw-r--r--mm/rmap.c12
-rw-r--r--mm/shmem.c74
-rw-r--r--mm/slab.c90
-rw-r--r--mm/slub.c6
-rw-r--r--net/8021q/vlan_core.c5
-rw-r--r--net/8021q/vlan_dev.c13
-rw-r--r--net/appletalk/ddp.c3
-rw-r--r--net/bluetooth/hci_conn.c19
-rw-r--r--net/bluetooth/hci_event.c17
-rw-r--r--net/bluetooth/l2cap_core.c8
-rw-r--r--net/bluetooth/l2cap_sock.c5
-rw-r--r--net/bluetooth/mgmt.c104
-rw-r--r--net/bluetooth/smp.c69
-rw-r--r--net/core/dev.c30
-rw-r--r--net/core/dst.c16
-rw-r--r--net/core/filter.c10
-rw-r--r--net/core/iovec.c55
-rw-r--r--net/core/neighbour.c9
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/ipv4/gre_demux.c1
-rw-r--r--net/ipv4/icmp.c2
-rw-r--r--net/ipv4/igmp.c10
-rw-r--r--net/ipv4/ip_tunnel.c26
-rw-r--r--net/ipv4/route.c15
-rw-r--r--net/ipv4/tcp.c3
-rw-r--r--net/ipv4/tcp_fastopen.c2
-rw-r--r--net/ipv4/tcp_input.c10
-rw-r--r--net/ipv4/tcp_output.c6
-rw-r--r--net/ipv4/udp.c5
-rw-r--r--net/ipv6/mcast.c13
-rw-r--r--net/ipv6/udp.c6
-rw-r--r--net/l2tp/l2tp_ppp.c4
-rw-r--r--net/mac80211/util.c5
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c2
-rw-r--r--net/netfilter/nf_conntrack_netlink.c20
-rw-r--r--net/netfilter/nf_nat_core.c35
-rw-r--r--net/netfilter/nf_tables_api.c11
-rw-r--r--net/netfilter/nft_compat.c18
-rw-r--r--net/netfilter/nft_nat.c14
-rw-r--r--net/netlink/af_netlink.c4
-rw-r--r--net/openvswitch/actions.c2
-rw-r--r--net/openvswitch/datapath.c27
-rw-r--r--net/openvswitch/flow.c4
-rw-r--r--net/openvswitch/flow.h5
-rw-r--r--net/openvswitch/flow_table.c16
-rw-r--r--net/openvswitch/flow_table.h3
-rw-r--r--net/openvswitch/vport-gre.c17
-rw-r--r--net/sctp/sysctl.c46
-rw-r--r--net/sctp/ulpevent.c122
-rw-r--r--net/sunrpc/auth.c1
-rw-r--r--net/tipc/bcast.c1
-rw-r--r--net/tipc/msg.c11
-rw-r--r--net/wireless/core.h2
-rw-r--r--net/wireless/nl80211.c11
-rw-r--r--net/wireless/reg.c22
-rwxr-xr-xscripts/checkpatch.pl15
-rwxr-xr-xscripts/kernel-doc15
-rw-r--r--scripts/recordmcount.h4
-rw-r--r--sound/pci/hda/hda_auto_parser.c1
-rw-r--r--sound/pci/hda/hda_controller.c3
-rw-r--r--sound/pci/hda/hda_i915.c55
-rw-r--r--sound/pci/hda/hda_i915.h2
-rw-r--r--sound/pci/hda/hda_intel.c45
-rw-r--r--sound/pci/hda/hda_local.h21
-rw-r--r--sound/pci/hda/hda_priv.h1
-rw-r--r--sound/pci/hda/hda_tegra.c2
-rw-r--r--sound/pci/hda/patch_hdmi.c4
-rw-r--r--sound/pci/hda/patch_realtek.c502
-rw-r--r--sound/pci/hda/patch_sigmatel.c58
-rw-r--r--sound/soc/fsl/imx-pcm-dma.c1
-rw-r--r--sound/usb/card.c13
-rw-r--r--sound/usb/endpoint.c17
-rw-r--r--sound/usb/endpoint.h1
-rw-r--r--tools/lib/lockdep/include/liblockdep/mutex.h4
-rw-r--r--tools/lib/lockdep/include/liblockdep/rwlock.h8
-rw-r--r--tools/lib/lockdep/preload.c20
-rw-r--r--tools/perf/ui/browsers/hists.c21
-rw-r--r--tools/perf/util/machine.c54
-rw-r--r--tools/testing/selftests/cpu-hotplug/Makefile2
-rw-r--r--tools/testing/selftests/ipc/msgque.c5
-rw-r--r--tools/testing/selftests/memory-hotplug/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/tm/Makefile2
-rw-r--r--tools/testing/selftests/powerpc/tm/tm-resched-dscr.c14
-rw-r--r--tools/thermal/tmon/Makefile2
-rw-r--r--tools/thermal/tmon/tmon.c26
-rw-r--r--tools/usb/ffs-test.c4
878 files changed, 8593 insertions, 5321 deletions
diff --git a/CREDITS b/CREDITS
index c322dcfb926d..28ee1514b9de 100644
--- a/CREDITS
+++ b/CREDITS
@@ -9,6 +9,10 @@
9 Linus 9 Linus
10---------- 10----------
11 11
12M: Matt Mackal
13E: mpm@selenic.com
14D: SLOB slab allocator
15
12N: Matti Aarnio 16N: Matti Aarnio
13E: mea@nic.funet.fi 17E: mea@nic.funet.fi
14D: Alpha systems hacking, IPv6 and other network related stuff 18D: Alpha systems hacking, IPv6 and other network related stuff
diff --git a/Documentation/Changes b/Documentation/Changes
index 2254db0f00a5..227bec88021e 100644
--- a/Documentation/Changes
+++ b/Documentation/Changes
@@ -280,12 +280,9 @@ that is possible.
280mcelog 280mcelog
281------ 281------
282 282
283In Linux 2.6.31+ the i386 kernel needs to run the mcelog utility 283On x86 kernels the mcelog utility is needed to process and log machine check
284as a regular cronjob similar to the x86-64 kernel to process and log 284events when CONFIG_X86_MCE is enabled. Machine check events are errors reported
285machine check events when CONFIG_X86_NEW_MCE is enabled. Machine check 285by the CPU. Processing them is strongly encouraged.
286events are errors reported by the CPU. Processing them is strongly encouraged.
287All x86-64 kernels since 2.6.4 require the mcelog utility to
288process machine checks.
289 286
290Getting updated software 287Getting updated software
291======================== 288========================
diff --git a/Documentation/DocBook/gadget.tmpl b/Documentation/DocBook/gadget.tmpl
index 4017f147ba2f..2c425d70f7e2 100644
--- a/Documentation/DocBook/gadget.tmpl
+++ b/Documentation/DocBook/gadget.tmpl
@@ -708,7 +708,7 @@ hardware level details could be very different.
708 708
709<para>Systems need specialized hardware support to implement OTG, 709<para>Systems need specialized hardware support to implement OTG,
710notably including a special <emphasis>Mini-AB</emphasis> jack 710notably including a special <emphasis>Mini-AB</emphasis> jack
711and associated transciever to support <emphasis>Dual-Role</emphasis> 711and associated transceiver to support <emphasis>Dual-Role</emphasis>
712operation: 712operation:
713they can act either as a host, using the standard 713they can act either as a host, using the standard
714Linux-USB host side driver stack, 714Linux-USB host side driver stack,
diff --git a/Documentation/DocBook/genericirq.tmpl b/Documentation/DocBook/genericirq.tmpl
index 46347f603353..59fb5c077541 100644
--- a/Documentation/DocBook/genericirq.tmpl
+++ b/Documentation/DocBook/genericirq.tmpl
@@ -182,7 +182,7 @@
182 <para> 182 <para>
183 Each interrupt is described by an interrupt descriptor structure 183 Each interrupt is described by an interrupt descriptor structure
184 irq_desc. The interrupt is referenced by an 'unsigned int' numeric 184 irq_desc. The interrupt is referenced by an 'unsigned int' numeric
185 value which selects the corresponding interrupt decription structure 185 value which selects the corresponding interrupt description structure
186 in the descriptor structures array. 186 in the descriptor structures array.
187 The descriptor structure contains status information and pointers 187 The descriptor structure contains status information and pointers
188 to the interrupt flow method and the interrupt chip structure 188 to the interrupt flow method and the interrupt chip structure
@@ -470,7 +470,7 @@ if (desc->irq_data.chip->irq_eoi)
470 <para> 470 <para>
471 To avoid copies of identical implementations of IRQ chips the 471 To avoid copies of identical implementations of IRQ chips the
472 core provides a configurable generic interrupt chip 472 core provides a configurable generic interrupt chip
473 implementation. Developers should check carefuly whether the 473 implementation. Developers should check carefully whether the
474 generic chip fits their needs before implementing the same 474 generic chip fits their needs before implementing the same
475 functionality slightly differently themselves. 475 functionality slightly differently themselves.
476 </para> 476 </para>
diff --git a/Documentation/DocBook/kernel-locking.tmpl b/Documentation/DocBook/kernel-locking.tmpl
index 19f2a5a5a5b4..e584ee12a1e7 100644
--- a/Documentation/DocBook/kernel-locking.tmpl
+++ b/Documentation/DocBook/kernel-locking.tmpl
@@ -1760,7 +1760,7 @@ as it would be on UP.
1760</para> 1760</para>
1761 1761
1762<para> 1762<para>
1763There is a furthur optimization possible here: remember our original 1763There is a further optimization possible here: remember our original
1764cache code, where there were no reference counts and the caller simply 1764cache code, where there were no reference counts and the caller simply
1765held the lock whenever using the object? This is still possible: if 1765held the lock whenever using the object? This is still possible: if
1766you hold the lock, no one can delete the object, so you don't need to 1766you hold the lock, no one can delete the object, so you don't need to
diff --git a/Documentation/DocBook/libata.tmpl b/Documentation/DocBook/libata.tmpl
index deb71baed328..d7fcdc5a4379 100644
--- a/Documentation/DocBook/libata.tmpl
+++ b/Documentation/DocBook/libata.tmpl
@@ -677,7 +677,7 @@ and other resources, etc.
677 677
678 <listitem> 678 <listitem>
679 <para> 679 <para>
680 ATA_QCFLAG_ACTIVE is clared from qc->flags. 680 ATA_QCFLAG_ACTIVE is cleared from qc->flags.
681 </para> 681 </para>
682 </listitem> 682 </listitem>
683 683
@@ -708,7 +708,7 @@ and other resources, etc.
708 708
709 <listitem> 709 <listitem>
710 <para> 710 <para>
711 qc->waiting is claread &amp; completed (in that order). 711 qc->waiting is cleared &amp; completed (in that order).
712 </para> 712 </para>
713 </listitem> 713 </listitem>
714 714
@@ -1163,7 +1163,7 @@ and other resources, etc.
1163 1163
1164 <para> 1164 <para>
1165 Once sense data is acquired, this type of errors can be 1165 Once sense data is acquired, this type of errors can be
1166 handled similary to other SCSI errors. Note that sense data 1166 handled similarly to other SCSI errors. Note that sense data
1167 may indicate ATA bus error (e.g. Sense Key 04h HARDWARE ERROR 1167 may indicate ATA bus error (e.g. Sense Key 04h HARDWARE ERROR
1168 &amp;&amp; ASC/ASCQ 47h/00h SCSI PARITY ERROR). In such 1168 &amp;&amp; ASC/ASCQ 47h/00h SCSI PARITY ERROR). In such
1169 cases, the error should be considered as an ATA bus error and 1169 cases, the error should be considered as an ATA bus error and
diff --git a/Documentation/DocBook/media_api.tmpl b/Documentation/DocBook/media_api.tmpl
index 4decb46bfa76..03f9a1f8d413 100644
--- a/Documentation/DocBook/media_api.tmpl
+++ b/Documentation/DocBook/media_api.tmpl
@@ -68,7 +68,7 @@
68 several digital tv standards. While it is called as DVB API, 68 several digital tv standards. While it is called as DVB API,
69 in fact it covers several different video standards including 69 in fact it covers several different video standards including
70 DVB-T, DVB-S, DVB-C and ATSC. The API is currently being updated 70 DVB-T, DVB-S, DVB-C and ATSC. The API is currently being updated
71 to documment support also for DVB-S2, ISDB-T and ISDB-S.</para> 71 to document support also for DVB-S2, ISDB-T and ISDB-S.</para>
72 <para>The third part covers the Remote Controller API.</para> 72 <para>The third part covers the Remote Controller API.</para>
73 <para>The fourth part covers the Media Controller API.</para> 73 <para>The fourth part covers the Media Controller API.</para>
74 <para>For additional information and for the latest development code, 74 <para>For additional information and for the latest development code,
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl
index cd11926e07c7..7da8f0402af5 100644
--- a/Documentation/DocBook/mtdnand.tmpl
+++ b/Documentation/DocBook/mtdnand.tmpl
@@ -91,7 +91,7 @@
91 <listitem><para> 91 <listitem><para>
92 [MTD Interface]</para><para> 92 [MTD Interface]</para><para>
93 These functions provide the interface to the MTD kernel API. 93 These functions provide the interface to the MTD kernel API.
94 They are not replacable and provide functionality 94 They are not replaceable and provide functionality
95 which is complete hardware independent. 95 which is complete hardware independent.
96 </para></listitem> 96 </para></listitem>
97 <listitem><para> 97 <listitem><para>
@@ -100,14 +100,14 @@
100 </para></listitem> 100 </para></listitem>
101 <listitem><para> 101 <listitem><para>
102 [GENERIC]</para><para> 102 [GENERIC]</para><para>
103 Generic functions are not replacable and provide functionality 103 Generic functions are not replaceable and provide functionality
104 which is complete hardware independent. 104 which is complete hardware independent.
105 </para></listitem> 105 </para></listitem>
106 <listitem><para> 106 <listitem><para>
107 [DEFAULT]</para><para> 107 [DEFAULT]</para><para>
108 Default functions provide hardware related functionality which is suitable 108 Default functions provide hardware related functionality which is suitable
109 for most of the implementations. These functions can be replaced by the 109 for most of the implementations. These functions can be replaced by the
110 board driver if neccecary. Those functions are called via pointers in the 110 board driver if necessary. Those functions are called via pointers in the
111 NAND chip description structure. The board driver can set the functions which 111 NAND chip description structure. The board driver can set the functions which
112 should be replaced by board dependent functions before calling nand_scan(). 112 should be replaced by board dependent functions before calling nand_scan().
113 If the function pointer is NULL on entry to nand_scan() then the pointer 113 If the function pointer is NULL on entry to nand_scan() then the pointer
@@ -264,7 +264,7 @@ static void board_hwcontrol(struct mtd_info *mtd, int cmd)
264 is set up nand_scan() is called. This function tries to 264 is set up nand_scan() is called. This function tries to
265 detect and identify then chip. If a chip is found all the 265 detect and identify then chip. If a chip is found all the
266 internal data fields are initialized accordingly. 266 internal data fields are initialized accordingly.
267 The structure(s) have to be zeroed out first and then filled with the neccecary 267 The structure(s) have to be zeroed out first and then filled with the necessary
268 information about the device. 268 information about the device.
269 </para> 269 </para>
270 <programlisting> 270 <programlisting>
@@ -327,7 +327,7 @@ module_init(board_init);
327 <sect1 id="Exit_function"> 327 <sect1 id="Exit_function">
328 <title>Exit function</title> 328 <title>Exit function</title>
329 <para> 329 <para>
330 The exit function is only neccecary if the driver is 330 The exit function is only necessary if the driver is
331 compiled as a module. It releases all resources which 331 compiled as a module. It releases all resources which
332 are held by the chip driver and unregisters the partitions 332 are held by the chip driver and unregisters the partitions
333 in the MTD layer. 333 in the MTD layer.
@@ -494,7 +494,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
494 in this case. See rts_from4.c and diskonchip.c for 494 in this case. See rts_from4.c and diskonchip.c for
495 implementation reference. In those cases we must also 495 implementation reference. In those cases we must also
496 use bad block tables on FLASH, because the ECC layout is 496 use bad block tables on FLASH, because the ECC layout is
497 interferring with the bad block marker positions. 497 interfering with the bad block marker positions.
498 See bad block table support for details. 498 See bad block table support for details.
499 </para> 499 </para>
500 </sect2> 500 </sect2>
@@ -542,7 +542,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
542 <para> 542 <para>
543 nand_scan() calls the function nand_default_bbt(). 543 nand_scan() calls the function nand_default_bbt().
544 nand_default_bbt() selects appropriate default 544 nand_default_bbt() selects appropriate default
545 bad block table desriptors depending on the chip information 545 bad block table descriptors depending on the chip information
546 which was retrieved by nand_scan(). 546 which was retrieved by nand_scan().
547 </para> 547 </para>
548 <para> 548 <para>
@@ -554,7 +554,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
554 <sect2 id="Flash_based_tables"> 554 <sect2 id="Flash_based_tables">
555 <title>Flash based tables</title> 555 <title>Flash based tables</title>
556 <para> 556 <para>
557 It may be desired or neccecary to keep a bad block table in FLASH. 557 It may be desired or necessary to keep a bad block table in FLASH.
558 For AG-AND chips this is mandatory, as they have no factory marked 558 For AG-AND chips this is mandatory, as they have no factory marked
559 bad blocks. They have factory marked good blocks. The marker pattern 559 bad blocks. They have factory marked good blocks. The marker pattern
560 is erased when the block is erased to be reused. So in case of 560 is erased when the block is erased to be reused. So in case of
@@ -565,10 +565,10 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
565 of the blocks. 565 of the blocks.
566 </para> 566 </para>
567 <para> 567 <para>
568 The blocks in which the tables are stored are procteted against 568 The blocks in which the tables are stored are protected against
569 accidental access by marking them bad in the memory bad block 569 accidental access by marking them bad in the memory bad block
570 table. The bad block table management functions are allowed 570 table. The bad block table management functions are allowed
571 to circumvernt this protection. 571 to circumvent this protection.
572 </para> 572 </para>
573 <para> 573 <para>
574 The simplest way to activate the FLASH based bad block table support 574 The simplest way to activate the FLASH based bad block table support
@@ -592,7 +592,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
592 User defined tables are created by filling out a 592 User defined tables are created by filling out a
593 nand_bbt_descr structure and storing the pointer in the 593 nand_bbt_descr structure and storing the pointer in the
594 nand_chip structure member bbt_td before calling nand_scan(). 594 nand_chip structure member bbt_td before calling nand_scan().
595 If a mirror table is neccecary a second structure must be 595 If a mirror table is necessary a second structure must be
596 created and a pointer to this structure must be stored 596 created and a pointer to this structure must be stored
597 in bbt_md inside the nand_chip structure. If the bbt_md 597 in bbt_md inside the nand_chip structure. If the bbt_md
598 member is set to NULL then only the main table is used 598 member is set to NULL then only the main table is used
@@ -666,7 +666,7 @@ static void board_select_chip (struct mtd_info *mtd, int chip)
666 <para> 666 <para>
667 For automatic placement some blocks must be reserved for 667 For automatic placement some blocks must be reserved for
668 bad block table storage. The number of reserved blocks is defined 668 bad block table storage. The number of reserved blocks is defined
669 in the maxblocks member of the babd block table description structure. 669 in the maxblocks member of the bad block table description structure.
670 Reserving 4 blocks for mirrored tables should be a reasonable number. 670 Reserving 4 blocks for mirrored tables should be a reasonable number.
671 This also limits the number of blocks which are scanned for the bad 671 This also limits the number of blocks which are scanned for the bad
672 block table ident pattern. 672 block table ident pattern.
@@ -1068,11 +1068,11 @@ in this page</entry>
1068 <chapter id="filesystems"> 1068 <chapter id="filesystems">
1069 <title>Filesystem support</title> 1069 <title>Filesystem support</title>
1070 <para> 1070 <para>
1071 The NAND driver provides all neccecary functions for a 1071 The NAND driver provides all necessary functions for a
1072 filesystem via the MTD interface. 1072 filesystem via the MTD interface.
1073 </para> 1073 </para>
1074 <para> 1074 <para>
1075 Filesystems must be aware of the NAND pecularities and 1075 Filesystems must be aware of the NAND peculiarities and
1076 restrictions. One major restrictions of NAND Flash is, that you cannot 1076 restrictions. One major restrictions of NAND Flash is, that you cannot
1077 write as often as you want to a page. The consecutive writes to a page, 1077 write as often as you want to a page. The consecutive writes to a page,
1078 before erasing it again, are restricted to 1-3 writes, depending on the 1078 before erasing it again, are restricted to 1-3 writes, depending on the
@@ -1222,7 +1222,7 @@ in this page</entry>
1222#define NAND_BBT_VERSION 0x00000100 1222#define NAND_BBT_VERSION 0x00000100
1223/* Create a bbt if none axists */ 1223/* Create a bbt if none axists */
1224#define NAND_BBT_CREATE 0x00000200 1224#define NAND_BBT_CREATE 0x00000200
1225/* Write bbt if neccecary */ 1225/* Write bbt if necessary */
1226#define NAND_BBT_WRITE 0x00001000 1226#define NAND_BBT_WRITE 0x00001000
1227/* Read and write back block contents when writing bbt */ 1227/* Read and write back block contents when writing bbt */
1228#define NAND_BBT_SAVECONTENT 0x00002000 1228#define NAND_BBT_SAVECONTENT 0x00002000
diff --git a/Documentation/DocBook/regulator.tmpl b/Documentation/DocBook/regulator.tmpl
index 346e552fa2cc..3b08a085d2c7 100644
--- a/Documentation/DocBook/regulator.tmpl
+++ b/Documentation/DocBook/regulator.tmpl
@@ -155,7 +155,7 @@
155 release regulators. Functions are 155 release regulators. Functions are
156 provided to <link linkend='API-regulator-enable'>enable</link> 156 provided to <link linkend='API-regulator-enable'>enable</link>
157 and <link linkend='API-regulator-disable'>disable</link> the 157 and <link linkend='API-regulator-disable'>disable</link> the
158 reguator and to get and set the runtime parameters of the 158 regulator and to get and set the runtime parameters of the
159 regulator. 159 regulator.
160 </para> 160 </para>
161 <para> 161 <para>
diff --git a/Documentation/DocBook/uio-howto.tmpl b/Documentation/DocBook/uio-howto.tmpl
index 95618159e29b..bbe9c1fd5cef 100644
--- a/Documentation/DocBook/uio-howto.tmpl
+++ b/Documentation/DocBook/uio-howto.tmpl
@@ -766,10 +766,10 @@ framework to set up sysfs files for this region. Simply leave it alone.
766 <para> 766 <para>
767 The dynamic memory regions will be allocated when the UIO device file, 767 The dynamic memory regions will be allocated when the UIO device file,
768 <varname>/dev/uioX</varname> is opened. 768 <varname>/dev/uioX</varname> is opened.
769 Simiar to static memory resources, the memory region information for 769 Similar to static memory resources, the memory region information for
770 dynamic regions is then visible via sysfs at 770 dynamic regions is then visible via sysfs at
771 <varname>/sys/class/uio/uioX/maps/mapY/*</varname>. 771 <varname>/sys/class/uio/uioX/maps/mapY/*</varname>.
772 The dynmaic memory regions will be freed when the UIO device file is 772 The dynamic memory regions will be freed when the UIO device file is
773 closed. When no processes are holding the device file open, the address 773 closed. When no processes are holding the device file open, the address
774 returned to userspace is ~0. 774 returned to userspace is ~0.
775 </para> 775 </para>
diff --git a/Documentation/DocBook/usb.tmpl b/Documentation/DocBook/usb.tmpl
index 8d57c1888dca..85fc0e28576f 100644
--- a/Documentation/DocBook/usb.tmpl
+++ b/Documentation/DocBook/usb.tmpl
@@ -153,7 +153,7 @@
153 153
154 <listitem><para>The Linux USB API supports synchronous calls for 154 <listitem><para>The Linux USB API supports synchronous calls for
155 control and bulk messages. 155 control and bulk messages.
156 It also supports asynchnous calls for all kinds of data transfer, 156 It also supports asynchronous calls for all kinds of data transfer,
157 using request structures called "URBs" (USB Request Blocks). 157 using request structures called "URBs" (USB Request Blocks).
158 </para></listitem> 158 </para></listitem>
159 159
diff --git a/Documentation/DocBook/writing-an-alsa-driver.tmpl b/Documentation/DocBook/writing-an-alsa-driver.tmpl
index d0056a4e9c53..6f639d9530b5 100644
--- a/Documentation/DocBook/writing-an-alsa-driver.tmpl
+++ b/Documentation/DocBook/writing-an-alsa-driver.tmpl
@@ -5696,7 +5696,7 @@ struct _snd_pcm_runtime {
5696 suspending the PCM operations via 5696 suspending the PCM operations via
5697 <function>snd_pcm_suspend_all()</function> or 5697 <function>snd_pcm_suspend_all()</function> or
5698 <function>snd_pcm_suspend()</function>. It means that the PCM 5698 <function>snd_pcm_suspend()</function>. It means that the PCM
5699 streams are already stoppped when the register snapshot is 5699 streams are already stopped when the register snapshot is
5700 taken. But, remember that you don't have to restart the PCM 5700 taken. But, remember that you don't have to restart the PCM
5701 stream in the resume callback. It'll be restarted via 5701 stream in the resume callback. It'll be restarted via
5702 trigger call with <constant>SNDRV_PCM_TRIGGER_RESUME</constant> 5702 trigger call with <constant>SNDRV_PCM_TRIGGER_RESUME</constant>
diff --git a/Documentation/accounting/getdelays.c b/Documentation/accounting/getdelays.c
index c6a06b71594d..f40578026a04 100644
--- a/Documentation/accounting/getdelays.c
+++ b/Documentation/accounting/getdelays.c
@@ -314,6 +314,7 @@ int main(int argc, char *argv[])
314 break; 314 break;
315 case 'm': 315 case 'm':
316 strncpy(cpumask, optarg, sizeof(cpumask)); 316 strncpy(cpumask, optarg, sizeof(cpumask));
317 cpumask[sizeof(cpumask) - 1] = '\0';
317 maskset = 1; 318 maskset = 1;
318 printf("cpumask %s maskset %d\n", cpumask, maskset); 319 printf("cpumask %s maskset %d\n", cpumask, maskset);
319 break; 320 break;
diff --git a/Documentation/acpi/enumeration.txt b/Documentation/acpi/enumeration.txt
index fd786ea13a1f..e182be5e3c83 100644
--- a/Documentation/acpi/enumeration.txt
+++ b/Documentation/acpi/enumeration.txt
@@ -60,12 +60,6 @@ If the driver needs to perform more complex initialization like getting and
60configuring GPIOs it can get its ACPI handle and extract this information 60configuring GPIOs it can get its ACPI handle and extract this information
61from ACPI tables. 61from ACPI tables.
62 62
63Currently the kernel is not able to automatically determine from which ACPI
64device it should make the corresponding platform device so we need to add
65the ACPI device explicitly to acpi_platform_device_ids list defined in
66drivers/acpi/acpi_platform.c. This limitation is only for the platform
67devices, SPI and I2C devices are created automatically as described below.
68
69DMA support 63DMA support
70~~~~~~~~~~~ 64~~~~~~~~~~~
71DMA controllers enumerated via ACPI should be registered in the system to 65DMA controllers enumerated via ACPI should be registered in the system to
diff --git a/Documentation/cpu-freq/intel-pstate.txt b/Documentation/cpu-freq/intel-pstate.txt
index e742d21dbd96..a69ffe1d54d5 100644
--- a/Documentation/cpu-freq/intel-pstate.txt
+++ b/Documentation/cpu-freq/intel-pstate.txt
@@ -15,10 +15,13 @@ New sysfs files for controlling P state selection have been added to
15/sys/devices/system/cpu/intel_pstate/ 15/sys/devices/system/cpu/intel_pstate/
16 16
17 max_perf_pct: limits the maximum P state that will be requested by 17 max_perf_pct: limits the maximum P state that will be requested by
18 the driver stated as a percentage of the available performance. 18 the driver stated as a percentage of the available performance. The
19 available (P states) performance may be reduced by the no_turbo
20 setting described below.
19 21
20 min_perf_pct: limits the minimum P state that will be requested by 22 min_perf_pct: limits the minimum P state that will be requested by
21 the driver stated as a percentage of the available performance. 23 the driver stated as a percentage of the max (non-turbo)
24 performance level.
22 25
23 no_turbo: limits the driver to selecting P states below the turbo 26 no_turbo: limits the driver to selecting P states below the turbo
24 frequency range. 27 frequency range.
diff --git a/Documentation/devicetree/bindings/arm/armada-38x.txt b/Documentation/devicetree/bindings/arm/armada-38x.txt
index 11f2330a6554..ad9f8ed4d9bd 100644
--- a/Documentation/devicetree/bindings/arm/armada-38x.txt
+++ b/Documentation/devicetree/bindings/arm/armada-38x.txt
@@ -6,5 +6,15 @@ following property:
6 6
7Required root node property: 7Required root node property:
8 8
9 - compatible: must contain either "marvell,armada380" or 9 - compatible: must contain "marvell,armada380"
10 "marvell,armada385" depending on the variant of the SoC being used. 10
11In addition, boards using the Marvell Armada 385 SoC shall have the
12following property before the previous one:
13
14Required root node property:
15
16compatible: must contain "marvell,armada385"
17
18Example:
19
20compatible = "marvell,a385-rd", "marvell,armada385", "marvell,armada380";
diff --git a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
index 5216b419016a..8b4f7b7fe88b 100644
--- a/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
+++ b/Documentation/devicetree/bindings/arm/exynos/power_domain.txt
@@ -9,6 +9,18 @@ Required Properties:
9- reg: physical base address of the controller and length of memory mapped 9- reg: physical base address of the controller and length of memory mapped
10 region. 10 region.
11 11
12Optional Properties:
13- clocks: List of clock handles. The parent clocks of the input clocks to the
14 devices in this power domain are set to oscclk before power gating
15 and restored back after powering on a domain. This is required for
16 all domains which are powered on and off and not required for unused
17 domains.
18- clock-names: The following clocks can be specified:
19 - oscclk: Oscillator clock.
20 - pclkN, clkN: Pairs of parent of input clock and input clock to the
21 devices in this power domain. Maximum of 4 pairs (N = 0 to 3)
22 are supported currently.
23
12Node of a device using power domains must have a samsung,power-domain property 24Node of a device using power domains must have a samsung,power-domain property
13defined with a phandle to respective power domain. 25defined with a phandle to respective power domain.
14 26
@@ -19,6 +31,14 @@ Example:
19 reg = <0x10023C00 0x10>; 31 reg = <0x10023C00 0x10>;
20 }; 32 };
21 33
34 mfc_pd: power-domain@10044060 {
35 compatible = "samsung,exynos4210-pd";
36 reg = <0x10044060 0x20>;
37 clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MOUT_SW_ACLK333>,
38 <&clock CLK_MOUT_USER_ACLK333>;
39 clock-names = "oscclk", "pclk0", "clk0";
40 };
41
22Example of the node using power domain: 42Example of the node using power domain:
23 43
24 node { 44 node {
diff --git a/Documentation/devicetree/bindings/arm/l2cc.txt b/Documentation/devicetree/bindings/arm/l2cc.txt
index b513cb8196fe..af527ee111c2 100644
--- a/Documentation/devicetree/bindings/arm/l2cc.txt
+++ b/Documentation/devicetree/bindings/arm/l2cc.txt
@@ -40,6 +40,9 @@ Optional properties:
40- arm,filter-ranges : <start length> Starting address and length of window to 40- arm,filter-ranges : <start length> Starting address and length of window to
41 filter. Addresses in the filter window are directed to the M1 port. Other 41 filter. Addresses in the filter window are directed to the M1 port. Other
42 addresses will go to the M0 port. 42 addresses will go to the M0 port.
43- arm,io-coherent : indicates that the system is operating in an hardware
44 I/O coherent mode. Valid only when the arm,pl310-cache compatible
45 string is used.
43- interrupts : 1 combined interrupt. 46- interrupts : 1 combined interrupt.
44- cache-id-part: cache id part number to be used if it is not present 47- cache-id-part: cache id part number to be used if it is not present
45 on hardware 48 on hardware
diff --git a/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt b/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
index 5d49f2b37f68..832fe8cc24d7 100644
--- a/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
+++ b/Documentation/devicetree/bindings/arm/samsung/exynos-adc.txt
@@ -48,7 +48,7 @@ adc@12D10000 {
48 48
49 /* NTC thermistor is a hwmon device */ 49 /* NTC thermistor is a hwmon device */
50 ncp15wb473@0 { 50 ncp15wb473@0 {
51 compatible = "ntc,ncp15wb473"; 51 compatible = "murata,ncp15wb473";
52 pullup-uv = <1800000>; 52 pullup-uv = <1800000>;
53 pullup-ohm = <47000>; 53 pullup-ohm = <47000>;
54 pulldown-ohm = <0>; 54 pulldown-ohm = <0>;
diff --git a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
index f055515d2b62..366690cb86a3 100644
--- a/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
+++ b/Documentation/devicetree/bindings/cpufreq/cpufreq-cpu0.txt
@@ -8,10 +8,12 @@ Both required and optional properties listed below must be defined
8under node /cpus/cpu@0. 8under node /cpus/cpu@0.
9 9
10Required properties: 10Required properties:
11- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt 11- None
12 for details
13 12
14Optional properties: 13Optional properties:
14- operating-points: Refer to Documentation/devicetree/bindings/power/opp.txt for
15 details. OPPs *must* be supplied either via DT, i.e. this property, or
16 populated at runtime.
15- clock-latency: Specify the possible maximum transition latency for clock, 17- clock-latency: Specify the possible maximum transition latency for clock,
16 in unit of nanoseconds. 18 in unit of nanoseconds.
17- voltage-tolerance: Specify the CPU voltage tolerance in percentage. 19- voltage-tolerance: Specify the CPU voltage tolerance in percentage.
diff --git a/Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt b/Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt
index c6f66674f19c..b117b2e9e1a7 100644
--- a/Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt
+++ b/Documentation/devicetree/bindings/hwmon/ntc_thermistor.txt
@@ -3,11 +3,19 @@ NTC Thermistor hwmon sensors
3 3
4Requires node properties: 4Requires node properties:
5- "compatible" value : one of 5- "compatible" value : one of
6 "ntc,ncp15wb473" 6 "murata,ncp15wb473"
7 "ntc,ncp18wb473" 7 "murata,ncp18wb473"
8 "ntc,ncp21wb473" 8 "murata,ncp21wb473"
9 "ntc,ncp03wb473" 9 "murata,ncp03wb473"
10 "ntc,ncp15wl333" 10 "murata,ncp15wl333"
11
12/* Usage of vendor name "ntc" is deprecated */
13<DEPRECATED> "ntc,ncp15wb473"
14<DEPRECATED> "ntc,ncp18wb473"
15<DEPRECATED> "ntc,ncp21wb473"
16<DEPRECATED> "ntc,ncp03wb473"
17<DEPRECATED> "ntc,ncp15wl333"
18
11- "pullup-uv" Pull up voltage in micro volts 19- "pullup-uv" Pull up voltage in micro volts
12- "pullup-ohm" Pull up resistor value in ohms 20- "pullup-ohm" Pull up resistor value in ohms
13- "pulldown-ohm" Pull down resistor value in ohms 21- "pulldown-ohm" Pull down resistor value in ohms
@@ -21,7 +29,7 @@ Read more about iio bindings at
21 29
22Example: 30Example:
23 ncp15wb473@0 { 31 ncp15wb473@0 {
24 compatible = "ntc,ncp15wb473"; 32 compatible = "murata,ncp15wb473";
25 pullup-uv = <1800000>; 33 pullup-uv = <1800000>;
26 pullup-ohm = <47000>; 34 pullup-ohm = <47000>;
27 pulldown-ohm = <0>; 35 pulldown-ohm = <0>;
diff --git a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
index 64fd7dec1bbc..b3556609a06f 100644
--- a/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
+++ b/Documentation/devicetree/bindings/serial/renesas,sci-serial.txt
@@ -4,6 +4,13 @@ Required properties:
4 4
5 - compatible: Must contain one of the following: 5 - compatible: Must contain one of the following:
6 6
7 - "renesas,scifa-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFA compatible UART.
8 - "renesas,scifb-sh73a0" for SH73A0 (SH-Mobile AG5) SCIFB compatible UART.
9 - "renesas,scifa-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFA compatible UART.
10 - "renesas,scifb-r8a73a4" for R8A73A4 (R-Mobile APE6) SCIFB compatible UART.
11 - "renesas,scifa-r8a7740" for R8A7740 (R-Mobile A1) SCIFA compatible UART.
12 - "renesas,scifb-r8a7740" for R8A7740 (R-Mobile A1) SCIFB compatible UART.
13 - "renesas,scif-r8a7778" for R8A7778 (R-Car M1) SCIF compatible UART.
7 - "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART. 14 - "renesas,scif-r8a7779" for R8A7779 (R-Car H1) SCIF compatible UART.
8 - "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART. 15 - "renesas,scif-r8a7790" for R8A7790 (R-Car H2) SCIF compatible UART.
9 - "renesas,scifa-r8a7790" for R8A7790 (R-Car H2) SCIFA compatible UART. 16 - "renesas,scifa-r8a7790" for R8A7790 (R-Car H2) SCIFA compatible UART.
diff --git a/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt b/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt
index b82a268f1bd4..bee6ff204baf 100644
--- a/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt
+++ b/Documentation/devicetree/bindings/spi/qcom,spi-qup.txt
@@ -23,6 +23,12 @@ Optional properties:
23- spi-max-frequency: Specifies maximum SPI clock frequency, 23- spi-max-frequency: Specifies maximum SPI clock frequency,
24 Units - Hz. Definition as per 24 Units - Hz. Definition as per
25 Documentation/devicetree/bindings/spi/spi-bus.txt 25 Documentation/devicetree/bindings/spi/spi-bus.txt
26- num-cs: total number of chipselects
27- cs-gpios: should specify GPIOs used for chipselects.
28 The gpios will be referred to as reg = <index> in the SPI child
29 nodes. If unspecified, a single SPI device without a chip
30 select can be used.
31
26 32
27SPI slave nodes must be children of the SPI master node and can contain 33SPI slave nodes must be children of the SPI master node and can contain
28properties described in Documentation/devicetree/bindings/spi/spi-bus.txt 34properties described in Documentation/devicetree/bindings/spi/spi-bus.txt
diff --git a/Documentation/devicetree/bindings/vendor-prefixes.txt b/Documentation/devicetree/bindings/vendor-prefixes.txt
index 4d7f3758d1b4..46a311e728a8 100644
--- a/Documentation/devicetree/bindings/vendor-prefixes.txt
+++ b/Documentation/devicetree/bindings/vendor-prefixes.txt
@@ -83,6 +83,7 @@ mosaixtech Mosaix Technologies, Inc.
83moxa Moxa 83moxa Moxa
84mpl MPL AG 84mpl MPL AG
85mundoreader Mundo Reader S.L. 85mundoreader Mundo Reader S.L.
86murata Murata Manufacturing Co., Ltd.
86mxicy Macronix International Co., Ltd. 87mxicy Macronix International Co., Ltd.
87national National Semiconductor 88national National Semiconductor
88neonode Neonode Inc. 89neonode Neonode Inc.
diff --git a/Documentation/email-clients.txt b/Documentation/email-clients.txt
index 4e30ebaa9e5b..9af538be3751 100644
--- a/Documentation/email-clients.txt
+++ b/Documentation/email-clients.txt
@@ -1,6 +1,17 @@
1Email clients info for Linux 1Email clients info for Linux
2====================================================================== 2======================================================================
3 3
4Git
5----------------------------------------------------------------------
6These days most developers use `git send-email` instead of regular
7email clients. The man page for this is quite good. On the receiving
8end, maintainers use `git am` to apply the patches.
9
10If you are new to git then send your first patch to yourself. Save it
11as raw text including all the headers. Run `git am raw_email.txt` and
12then review the changelog with `git log`. When that works then send
13the patch to the appropriate mailing list(s).
14
4General Preferences 15General Preferences
5---------------------------------------------------------------------- 16----------------------------------------------------------------------
6Patches for the Linux kernel are submitted via email, preferably as 17Patches for the Linux kernel are submitted via email, preferably as
diff --git a/Documentation/hwmon/ntc_thermistor b/Documentation/hwmon/ntc_thermistor
index 3bfda94096fd..057b77029f26 100644
--- a/Documentation/hwmon/ntc_thermistor
+++ b/Documentation/hwmon/ntc_thermistor
@@ -1,7 +1,7 @@
1Kernel driver ntc_thermistor 1Kernel driver ntc_thermistor
2================= 2=================
3 3
4Supported thermistors: 4Supported thermistors from Murata:
5* Murata NTC Thermistors NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, NCP15WL333 5* Murata NTC Thermistors NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, NCP15WL333
6 Prefixes: 'ncp15wb473', 'ncp18wb473', 'ncp21wb473', 'ncp03wb473', 'ncp15wl333' 6 Prefixes: 'ncp15wb473', 'ncp18wb473', 'ncp21wb473', 'ncp03wb473', 'ncp15wl333'
7 Datasheet: Publicly available at Murata 7 Datasheet: Publicly available at Murata
@@ -15,9 +15,9 @@ Authors:
15Description 15Description
16----------- 16-----------
17 17
18The NTC thermistor is a simple thermistor that requires users to provide the 18The NTC (Negative Temperature Coefficient) thermistor is a simple thermistor
19resistance and lookup the corresponding compensation table to get the 19that requires users to provide the resistance and lookup the corresponding
20temperature input. 20compensation table to get the temperature input.
21 21
22The NTC driver provides lookup tables with a linear approximation function 22The NTC driver provides lookup tables with a linear approximation function
23and four circuit models with an option not to use any of the four models. 23and four circuit models with an option not to use any of the four models.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 884904975d0b..b7fa2f599459 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2790,6 +2790,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2790 leaf rcu_node structure. Useful for very large 2790 leaf rcu_node structure. Useful for very large
2791 systems. 2791 systems.
2792 2792
2793 rcutree.jiffies_till_sched_qs= [KNL]
2794 Set required age in jiffies for a
2795 given grace period before RCU starts
2796 soliciting quiescent-state help from
2797 rcu_note_context_switch().
2798
2793 rcutree.jiffies_till_first_fqs= [KNL] 2799 rcutree.jiffies_till_first_fqs= [KNL]
2794 Set delay from grace-period initialization to 2800 Set delay from grace-period initialization to
2795 first attempt to force quiescent states. 2801 first attempt to force quiescent states.
@@ -3130,6 +3136,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3130 [KNL] Should the soft-lockup detector generate panics. 3136 [KNL] Should the soft-lockup detector generate panics.
3131 Format: <integer> 3137 Format: <integer>
3132 3138
3139 softlockup_all_cpu_backtrace=
3140 [KNL] Should the soft-lockup detector generate
3141 backtraces on all cpus.
3142 Format: <integer>
3143
3133 sonypi.*= [HW] Sony Programmable I/O Control Device driver 3144 sonypi.*= [HW] Sony Programmable I/O Control Device driver
3134 See Documentation/laptops/sonypi.txt 3145 See Documentation/laptops/sonypi.txt
3135 3146
@@ -3521,7 +3532,7 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
3521 the allocated input device; If set to 0, video driver 3532 the allocated input device; If set to 0, video driver
3522 will only send out the event without touching backlight 3533 will only send out the event without touching backlight
3523 brightness level. 3534 brightness level.
3524 default: 0 3535 default: 1
3525 3536
3526 virtio_mmio.device= 3537 virtio_mmio.device=
3527 [VMMIO] Memory mapped virtio (platform) device. 3538 [VMMIO] Memory mapped virtio (platform) device.
diff --git a/Documentation/laptops/00-INDEX b/Documentation/laptops/00-INDEX
index d13b9a9a9e00..d399ae1fc724 100644
--- a/Documentation/laptops/00-INDEX
+++ b/Documentation/laptops/00-INDEX
@@ -8,8 +8,8 @@ disk-shock-protection.txt
8 - information on hard disk shock protection. 8 - information on hard disk shock protection.
9dslm.c 9dslm.c
10 - Simple Disk Sleep Monitor program 10 - Simple Disk Sleep Monitor program
11hpfall.c 11freefall.c
12 - (HP) laptop accelerometer program for disk protection. 12 - (HP/DELL) laptop accelerometer program for disk protection.
13laptop-mode.txt 13laptop-mode.txt
14 - how to conserve battery power using laptop-mode. 14 - how to conserve battery power using laptop-mode.
15sony-laptop.txt 15sony-laptop.txt
diff --git a/Documentation/laptops/hpfall.c b/Documentation/laptops/freefall.c
index b85dbbac0499..aab2ff09e868 100644
--- a/Documentation/laptops/hpfall.c
+++ b/Documentation/laptops/freefall.c
@@ -1,7 +1,9 @@
1/* Disk protection for HP machines. 1/* Disk protection for HP/DELL machines.
2 * 2 *
3 * Copyright 2008 Eric Piel 3 * Copyright 2008 Eric Piel
4 * Copyright 2009 Pavel Machek <pavel@ucw.cz> 4 * Copyright 2009 Pavel Machek <pavel@ucw.cz>
5 * Copyright 2012 Sonal Santan
6 * Copyright 2014 Pali Rohár <pali.rohar@gmail.com>
5 * 7 *
6 * GPLv2. 8 * GPLv2.
7 */ 9 */
@@ -18,24 +20,31 @@
18#include <signal.h> 20#include <signal.h>
19#include <sys/mman.h> 21#include <sys/mman.h>
20#include <sched.h> 22#include <sched.h>
23#include <syslog.h>
21 24
22char unload_heads_path[64]; 25static int noled;
26static char unload_heads_path[64];
27static char device_path[32];
28static const char app_name[] = "FREE FALL";
23 29
24int set_unload_heads_path(char *device) 30static int set_unload_heads_path(char *device)
25{ 31{
26 char devname[64]; 32 char devname[64];
27 33
28 if (strlen(device) <= 5 || strncmp(device, "/dev/", 5) != 0) 34 if (strlen(device) <= 5 || strncmp(device, "/dev/", 5) != 0)
29 return -EINVAL; 35 return -EINVAL;
30 strncpy(devname, device + 5, sizeof(devname)); 36 strncpy(devname, device + 5, sizeof(devname) - 1);
37 strncpy(device_path, device, sizeof(device_path) - 1);
31 38
32 snprintf(unload_heads_path, sizeof(unload_heads_path) - 1, 39 snprintf(unload_heads_path, sizeof(unload_heads_path) - 1,
33 "/sys/block/%s/device/unload_heads", devname); 40 "/sys/block/%s/device/unload_heads", devname);
34 return 0; 41 return 0;
35} 42}
36int valid_disk(void) 43
44static int valid_disk(void)
37{ 45{
38 int fd = open(unload_heads_path, O_RDONLY); 46 int fd = open(unload_heads_path, O_RDONLY);
47
39 if (fd < 0) { 48 if (fd < 0) {
40 perror(unload_heads_path); 49 perror(unload_heads_path);
41 return 0; 50 return 0;
@@ -45,43 +54,54 @@ int valid_disk(void)
45 return 1; 54 return 1;
46} 55}
47 56
48void write_int(char *path, int i) 57static void write_int(char *path, int i)
49{ 58{
50 char buf[1024]; 59 char buf[1024];
51 int fd = open(path, O_RDWR); 60 int fd = open(path, O_RDWR);
61
52 if (fd < 0) { 62 if (fd < 0) {
53 perror("open"); 63 perror("open");
54 exit(1); 64 exit(1);
55 } 65 }
66
56 sprintf(buf, "%d", i); 67 sprintf(buf, "%d", i);
68
57 if (write(fd, buf, strlen(buf)) != strlen(buf)) { 69 if (write(fd, buf, strlen(buf)) != strlen(buf)) {
58 perror("write"); 70 perror("write");
59 exit(1); 71 exit(1);
60 } 72 }
73
61 close(fd); 74 close(fd);
62} 75}
63 76
64void set_led(int on) 77static void set_led(int on)
65{ 78{
79 if (noled)
80 return;
66 write_int("/sys/class/leds/hp::hddprotect/brightness", on); 81 write_int("/sys/class/leds/hp::hddprotect/brightness", on);
67} 82}
68 83
69void protect(int seconds) 84static void protect(int seconds)
70{ 85{
86 const char *str = (seconds == 0) ? "Unparked" : "Parked";
87
71 write_int(unload_heads_path, seconds*1000); 88 write_int(unload_heads_path, seconds*1000);
89 syslog(LOG_INFO, "%s %s disk head\n", str, device_path);
72} 90}
73 91
74int on_ac(void) 92static int on_ac(void)
75{ 93{
76// /sys/class/power_supply/AC0/online 94 /* /sys/class/power_supply/AC0/online */
95 return 1;
77} 96}
78 97
79int lid_open(void) 98static int lid_open(void)
80{ 99{
81// /proc/acpi/button/lid/LID/state 100 /* /proc/acpi/button/lid/LID/state */
101 return 1;
82} 102}
83 103
84void ignore_me(void) 104static void ignore_me(int signum)
85{ 105{
86 protect(0); 106 protect(0);
87 set_led(0); 107 set_led(0);
@@ -90,6 +110,7 @@ void ignore_me(void)
90int main(int argc, char **argv) 110int main(int argc, char **argv)
91{ 111{
92 int fd, ret; 112 int fd, ret;
113 struct stat st;
93 struct sched_param param; 114 struct sched_param param;
94 115
95 if (argc == 1) 116 if (argc == 1)
@@ -111,7 +132,16 @@ int main(int argc, char **argv)
111 return EXIT_FAILURE; 132 return EXIT_FAILURE;
112 } 133 }
113 134
114 daemon(0, 0); 135 if (stat("/sys/class/leds/hp::hddprotect/brightness", &st))
136 noled = 1;
137
138 if (daemon(0, 0) != 0) {
139 perror("daemon");
140 return EXIT_FAILURE;
141 }
142
143 openlog(app_name, LOG_CONS | LOG_PID | LOG_NDELAY, LOG_LOCAL1);
144
115 param.sched_priority = sched_get_priority_max(SCHED_FIFO); 145 param.sched_priority = sched_get_priority_max(SCHED_FIFO);
116 sched_setscheduler(0, SCHED_FIFO, &param); 146 sched_setscheduler(0, SCHED_FIFO, &param);
117 mlockall(MCL_CURRENT|MCL_FUTURE); 147 mlockall(MCL_CURRENT|MCL_FUTURE);
@@ -141,6 +171,7 @@ int main(int argc, char **argv)
141 alarm(20); 171 alarm(20);
142 } 172 }
143 173
174 closelog();
144 close(fd); 175 close(fd);
145 return EXIT_SUCCESS; 176 return EXIT_SUCCESS;
146} 177}
diff --git a/Documentation/memory-hotplug.txt b/Documentation/memory-hotplug.txt
index f304edb8fbe7..45134dc23854 100644
--- a/Documentation/memory-hotplug.txt
+++ b/Documentation/memory-hotplug.txt
@@ -209,15 +209,12 @@ If memory device is found, memory hotplug code will be called.
209 209
2104.2 Notify memory hot-add event by hand 2104.2 Notify memory hot-add event by hand
211------------ 211------------
212On powerpc, the firmware does not notify a memory hotplug event to the kernel. 212On some architectures, the firmware may not notify the kernel of a memory
213Therefore, "probe" interface is supported to notify the event to the kernel. 213hotplug event. Therefore, the memory "probe" interface is supported to
214This interface depends on CONFIG_ARCH_MEMORY_PROBE. 214explicitly notify the kernel. This interface depends on
215 215CONFIG_ARCH_MEMORY_PROBE and can be configured on powerpc, sh, and x86
216CONFIG_ARCH_MEMORY_PROBE is supported on powerpc only. On x86, this config 216if hotplug is supported, although for x86 this should be handled by ACPI
217option is disabled by default since ACPI notifies a memory hotplug event to 217notification.
218the kernel, which performs its hotplug operation as the result. Please
219enable this option if you need the "probe" interface for testing purposes
220on x86.
221 218
222Probe interface is located at 219Probe interface is located at
223/sys/devices/system/memory/probe 220/sys/devices/system/memory/probe
diff --git a/Documentation/ptp/testptp.c b/Documentation/ptp/testptp.c
index f1ac2dae999e..ba1d50200c46 100644
--- a/Documentation/ptp/testptp.c
+++ b/Documentation/ptp/testptp.c
@@ -17,6 +17,7 @@
17 * along with this program; if not, write to the Free Software 17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. 18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */ 19 */
20#define _GNU_SOURCE
20#include <errno.h> 21#include <errno.h>
21#include <fcntl.h> 22#include <fcntl.h>
22#include <inttypes.h> 23#include <inttypes.h>
@@ -46,12 +47,14 @@
46#define CLOCK_INVALID -1 47#define CLOCK_INVALID -1
47#endif 48#endif
48 49
49/* When glibc offers the syscall, this will go away. */ 50/* clock_adjtime is not available in GLIBC < 2.14 */
51#if !__GLIBC_PREREQ(2, 14)
50#include <sys/syscall.h> 52#include <sys/syscall.h>
51static int clock_adjtime(clockid_t id, struct timex *tx) 53static int clock_adjtime(clockid_t id, struct timex *tx)
52{ 54{
53 return syscall(__NR_clock_adjtime, id, tx); 55 return syscall(__NR_clock_adjtime, id, tx);
54} 56}
57#endif
55 58
56static clockid_t get_clockid(int fd) 59static clockid_t get_clockid(int fd)
57{ 60{
diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
index 85c362d8ea34..d1ab5e17eb13 100644
--- a/Documentation/sound/alsa/HD-Audio-Models.txt
+++ b/Documentation/sound/alsa/HD-Audio-Models.txt
@@ -286,6 +286,11 @@ STAC92HD83*
286 hp-inv-led HP with broken BIOS for inverted mute LED 286 hp-inv-led HP with broken BIOS for inverted mute LED
287 auto BIOS setup (default) 287 auto BIOS setup (default)
288 288
289STAC92HD95
290==========
291 hp-led LED support for HP laptops
292 hp-bass Bass HPF setup for HP Spectre 13
293
289STAC9872 294STAC9872
290======== 295========
291 vaio VAIO laptop without SPDIF 296 vaio VAIO laptop without SPDIF
diff --git a/Documentation/sysctl/kernel.txt b/Documentation/sysctl/kernel.txt
index 708bb7f1b7e0..c14374e71775 100644
--- a/Documentation/sysctl/kernel.txt
+++ b/Documentation/sysctl/kernel.txt
@@ -75,6 +75,7 @@ show up in /proc/sys/kernel:
75- shmall 75- shmall
76- shmmax [ sysv ipc ] 76- shmmax [ sysv ipc ]
77- shmmni 77- shmmni
78- softlockup_all_cpu_backtrace
78- stop-a [ SPARC only ] 79- stop-a [ SPARC only ]
79- sysrq ==> Documentation/sysrq.txt 80- sysrq ==> Documentation/sysrq.txt
80- sysctl_writes_strict 81- sysctl_writes_strict
@@ -783,6 +784,22 @@ via the /proc/sys interface:
783 784
784============================================================== 785==============================================================
785 786
787softlockup_all_cpu_backtrace:
788
789This value controls the soft lockup detector thread's behavior
790when a soft lockup condition is detected as to whether or not
791to gather further debug information. If enabled, each cpu will
792be issued an NMI and instructed to capture stack trace.
793
794This feature is only applicable for architectures which support
795NMI.
796
7970: do nothing. This is the default behavior.
798
7991: on detection capture more debug information.
800
801==============================================================
802
786tainted: 803tainted:
787 804
788Non-zero if the kernel has been tainted. Numeric values, which 805Non-zero if the kernel has been tainted. Numeric values, which
diff --git a/Documentation/sysctl/vm.txt b/Documentation/sysctl/vm.txt
index bd4b34c03738..4415aa915681 100644
--- a/Documentation/sysctl/vm.txt
+++ b/Documentation/sysctl/vm.txt
@@ -702,7 +702,8 @@ The batch value of each per cpu pagelist is also updated as a result. It is
702set to pcp->high/4. The upper limit of batch is (PAGE_SHIFT * 8) 702set to pcp->high/4. The upper limit of batch is (PAGE_SHIFT * 8)
703 703
704The initial value is zero. Kernel does not use this value at boot time to set 704The initial value is zero. Kernel does not use this value at boot time to set
705the high water marks for each per cpu page list. 705the high water marks for each per cpu page list. If the user writes '0' to this
706sysctl, it will revert to this default behavior.
706 707
707============================================================== 708==============================================================
708 709
diff --git a/Documentation/trace/postprocess/trace-vmscan-postprocess.pl b/Documentation/trace/postprocess/trace-vmscan-postprocess.pl
index 00e425faa2fd..78c9a7b2b58f 100644
--- a/Documentation/trace/postprocess/trace-vmscan-postprocess.pl
+++ b/Documentation/trace/postprocess/trace-vmscan-postprocess.pl
@@ -47,7 +47,6 @@ use constant HIGH_KSWAPD_REWAKEUP => 21;
47use constant HIGH_NR_SCANNED => 22; 47use constant HIGH_NR_SCANNED => 22;
48use constant HIGH_NR_TAKEN => 23; 48use constant HIGH_NR_TAKEN => 23;
49use constant HIGH_NR_RECLAIMED => 24; 49use constant HIGH_NR_RECLAIMED => 24;
50use constant HIGH_NR_CONTIG_DIRTY => 25;
51 50
52my %perprocesspid; 51my %perprocesspid;
53my %perprocess; 52my %perprocess;
@@ -105,7 +104,7 @@ my $regex_direct_end_default = 'nr_reclaimed=([0-9]*)';
105my $regex_kswapd_wake_default = 'nid=([0-9]*) order=([0-9]*)'; 104my $regex_kswapd_wake_default = 'nid=([0-9]*) order=([0-9]*)';
106my $regex_kswapd_sleep_default = 'nid=([0-9]*)'; 105my $regex_kswapd_sleep_default = 'nid=([0-9]*)';
107my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*)'; 106my $regex_wakeup_kswapd_default = 'nid=([0-9]*) zid=([0-9]*) order=([0-9]*)';
108my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_taken=([0-9]*) contig_taken=([0-9]*) contig_dirty=([0-9]*) contig_failed=([0-9]*)'; 107my $regex_lru_isolate_default = 'isolate_mode=([0-9]*) order=([0-9]*) nr_requested=([0-9]*) nr_scanned=([0-9]*) nr_taken=([0-9]*) file=([0-9]*)';
109my $regex_lru_shrink_inactive_default = 'nid=([0-9]*) zid=([0-9]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) priority=([0-9]*) flags=([A-Z_|]*)'; 108my $regex_lru_shrink_inactive_default = 'nid=([0-9]*) zid=([0-9]*) nr_scanned=([0-9]*) nr_reclaimed=([0-9]*) priority=([0-9]*) flags=([A-Z_|]*)';
110my $regex_lru_shrink_active_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_rotated=([0-9]*) priority=([0-9]*)'; 109my $regex_lru_shrink_active_default = 'lru=([A-Z_]*) nr_scanned=([0-9]*) nr_rotated=([0-9]*) priority=([0-9]*)';
111my $regex_writepage_default = 'page=([0-9a-f]*) pfn=([0-9]*) flags=([A-Z_|]*)'; 110my $regex_writepage_default = 'page=([0-9a-f]*) pfn=([0-9]*) flags=([A-Z_|]*)';
@@ -200,7 +199,7 @@ $regex_lru_isolate = generate_traceevent_regex(
200 $regex_lru_isolate_default, 199 $regex_lru_isolate_default,
201 "isolate_mode", "order", 200 "isolate_mode", "order",
202 "nr_requested", "nr_scanned", "nr_taken", 201 "nr_requested", "nr_scanned", "nr_taken",
203 "contig_taken", "contig_dirty", "contig_failed"); 202 "file");
204$regex_lru_shrink_inactive = generate_traceevent_regex( 203$regex_lru_shrink_inactive = generate_traceevent_regex(
205 "vmscan/mm_vmscan_lru_shrink_inactive", 204 "vmscan/mm_vmscan_lru_shrink_inactive",
206 $regex_lru_shrink_inactive_default, 205 $regex_lru_shrink_inactive_default,
@@ -375,7 +374,6 @@ EVENT_PROCESS:
375 } 374 }
376 my $isolate_mode = $1; 375 my $isolate_mode = $1;
377 my $nr_scanned = $4; 376 my $nr_scanned = $4;
378 my $nr_contig_dirty = $7;
379 377
380 # To closer match vmstat scanning statistics, only count isolate_both 378 # To closer match vmstat scanning statistics, only count isolate_both
381 # and isolate_inactive as scanning. isolate_active is rotation 379 # and isolate_inactive as scanning. isolate_active is rotation
@@ -385,7 +383,6 @@ EVENT_PROCESS:
385 if ($isolate_mode != 2) { 383 if ($isolate_mode != 2) {
386 $perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned; 384 $perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned;
387 } 385 }
388 $perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty;
389 } elsif ($tracepoint eq "mm_vmscan_lru_shrink_inactive") { 386 } elsif ($tracepoint eq "mm_vmscan_lru_shrink_inactive") {
390 $details = $6; 387 $details = $6;
391 if ($details !~ /$regex_lru_shrink_inactive/o) { 388 if ($details !~ /$regex_lru_shrink_inactive/o) {
@@ -539,13 +536,6 @@ sub dump_stats {
539 } 536 }
540 } 537 }
541 } 538 }
542 if ($stats{$process_pid}->{HIGH_NR_CONTIG_DIRTY}) {
543 print " ";
544 my $count = $stats{$process_pid}->{HIGH_NR_CONTIG_DIRTY};
545 if ($count != 0) {
546 print "contig-dirty=$count ";
547 }
548 }
549 539
550 print "\n"; 540 print "\n";
551 } 541 }
diff --git a/MAINTAINERS b/MAINTAINERS
index 3f2e171047b9..61a8f486306b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -156,7 +156,6 @@ F: drivers/net/hamradio/6pack.c
156 156
1578169 10/100/1000 GIGABIT ETHERNET DRIVER 1578169 10/100/1000 GIGABIT ETHERNET DRIVER
158M: Realtek linux nic maintainers <nic_swsd@realtek.com> 158M: Realtek linux nic maintainers <nic_swsd@realtek.com>
159M: Francois Romieu <romieu@fr.zoreil.com>
160L: netdev@vger.kernel.org 159L: netdev@vger.kernel.org
161S: Maintained 160S: Maintained
162F: drivers/net/ethernet/realtek/r8169.c 161F: drivers/net/ethernet/realtek/r8169.c
@@ -943,16 +942,10 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
943S: Maintained 942S: Maintained
944T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git 943T: git git://git.kernel.org/pub/scm/linux/kernel/git/shawnguo/linux.git
945F: arch/arm/mach-imx/ 944F: arch/arm/mach-imx/
945F: arch/arm/mach-mxs/
946F: arch/arm/boot/dts/imx* 946F: arch/arm/boot/dts/imx*
947F: arch/arm/configs/imx*_defconfig 947F: arch/arm/configs/imx*_defconfig
948 948
949ARM/FREESCALE MXS ARM ARCHITECTURE
950M: Shawn Guo <shawn.guo@linaro.org>
951L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
952S: Maintained
953T: git git://git.linaro.org/people/shawnguo/linux-2.6.git
954F: arch/arm/mach-mxs/
955
956ARM/GLOMATION GESBC9312SX MACHINE SUPPORT 949ARM/GLOMATION GESBC9312SX MACHINE SUPPORT
957M: Lennert Buytenhek <kernel@wantstofly.org> 950M: Lennert Buytenhek <kernel@wantstofly.org>
958L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 951L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1052,9 +1045,33 @@ M: Santosh Shilimkar <santosh.shilimkar@ti.com>
1052L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1045L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1053S: Maintained 1046S: Maintained
1054F: arch/arm/mach-keystone/ 1047F: arch/arm/mach-keystone/
1055F: drivers/clk/keystone/
1056T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git 1048T: git git://git.kernel.org/pub/scm/linux/kernel/git/ssantosh/linux-keystone.git
1057 1049
1050ARM/TEXAS INSTRUMENT KEYSTONE CLOCK FRAMEWORK
1051M: Santosh Shilimkar <santosh.shilimkar@ti.com>
1052L: linux-kernel@vger.kernel.org
1053S: Maintained
1054F: drivers/clk/keystone/
1055
1056ARM/TEXAS INSTRUMENT KEYSTONE ClOCKSOURCE
1057M: Santosh Shilimkar <santosh.shilimkar@ti.com>
1058L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
1059L: linux-kernel@vger.kernel.org
1060S: Maintained
1061F: drivers/clocksource/timer-keystone.c
1062
1063ARM/TEXAS INSTRUMENT KEYSTONE RESET DRIVER
1064M: Santosh Shilimkar <santosh.shilimkar@ti.com>
1065L: linux-kernel@vger.kernel.org
1066S: Maintained
1067F: drivers/power/reset/keystone-reset.c
1068
1069ARM/TEXAS INSTRUMENT AEMIF/EMIF DRIVERS
1070M: Santosh Shilimkar <santosh.shilimkar@ti.com>
1071L: linux-kernel@vger.kernel.org
1072S: Maintained
1073F: drivers/memory/*emif*
1074
1058ARM/LOGICPD PXA270 MACHINE SUPPORT 1075ARM/LOGICPD PXA270 MACHINE SUPPORT
1059M: Lennert Buytenhek <kernel@wantstofly.org> 1076M: Lennert Buytenhek <kernel@wantstofly.org>
1060L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 1077L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1296,6 +1313,20 @@ W: http://oss.renesas.com
1296Q: http://patchwork.kernel.org/project/linux-sh/list/ 1313Q: http://patchwork.kernel.org/project/linux-sh/list/
1297T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next 1314T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
1298S: Supported 1315S: Supported
1316F: arch/arm/boot/dts/emev2*
1317F: arch/arm/boot/dts/r7s*
1318F: arch/arm/boot/dts/r8a*
1319F: arch/arm/boot/dts/sh*
1320F: arch/arm/configs/ape6evm_defconfig
1321F: arch/arm/configs/armadillo800eva_defconfig
1322F: arch/arm/configs/bockw_defconfig
1323F: arch/arm/configs/genmai_defconfig
1324F: arch/arm/configs/koelsch_defconfig
1325F: arch/arm/configs/kzm9g_defconfig
1326F: arch/arm/configs/lager_defconfig
1327F: arch/arm/configs/mackerel_defconfig
1328F: arch/arm/configs/marzen_defconfig
1329F: arch/arm/configs/shmobile_defconfig
1299F: arch/arm/mach-shmobile/ 1330F: arch/arm/mach-shmobile/
1300F: drivers/sh/ 1331F: drivers/sh/
1301 1332
@@ -2917,6 +2948,9 @@ L: linux-doc@vger.kernel.org
2917T: quilt http://www.infradead.org/~rdunlap/Doc/patches/ 2948T: quilt http://www.infradead.org/~rdunlap/Doc/patches/
2918S: Maintained 2949S: Maintained
2919F: Documentation/ 2950F: Documentation/
2951X: Documentation/ABI/
2952X: Documentation/devicetree/
2953X: Documentation/[a-z][a-z]_[A-Z][A-Z]/
2920 2954
2921DOUBLETALK DRIVER 2955DOUBLETALK DRIVER
2922M: "James R. Van Zandt" <jrv@vanzandt.mv.com> 2956M: "James R. Van Zandt" <jrv@vanzandt.mv.com>
@@ -3189,14 +3223,6 @@ L: linux-scsi@vger.kernel.org
3189S: Maintained 3223S: Maintained
3190F: drivers/scsi/eata_pio.* 3224F: drivers/scsi/eata_pio.*
3191 3225
3192EBTABLES
3193L: netfilter-devel@vger.kernel.org
3194W: http://ebtables.sourceforge.net/
3195S: Orphan
3196F: include/linux/netfilter_bridge/ebt_*.h
3197F: include/uapi/linux/netfilter_bridge/ebt_*.h
3198F: net/bridge/netfilter/ebt*.c
3199
3200EC100 MEDIA DRIVER 3226EC100 MEDIA DRIVER
3201M: Antti Palosaari <crope@iki.fi> 3227M: Antti Palosaari <crope@iki.fi>
3202L: linux-media@vger.kernel.org 3228L: linux-media@vger.kernel.org
@@ -4484,8 +4510,7 @@ S: Supported
4484F: drivers/idle/i7300_idle.c 4510F: drivers/idle/i7300_idle.c
4485 4511
4486IEEE 802.15.4 SUBSYSTEM 4512IEEE 802.15.4 SUBSYSTEM
4487M: Alexander Smirnov <alex.bluesman.smirnov@gmail.com> 4513M: Alexander Aring <alex.aring@gmail.com>
4488M: Dmitry Eremin-Solenikov <dbaryshkov@gmail.com>
4489L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers) 4514L: linux-zigbee-devel@lists.sourceforge.net (moderated for non-subscribers)
4490W: http://apps.sourceforge.net/trac/linux-zigbee 4515W: http://apps.sourceforge.net/trac/linux-zigbee
4491T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git 4516T: git git://git.kernel.org/pub/scm/linux/kernel/git/lowpan/lowpan.git
@@ -5517,10 +5542,11 @@ S: Maintained
5517F: arch/arm/mach-lpc32xx/ 5542F: arch/arm/mach-lpc32xx/
5518 5543
5519LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI) 5544LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
5520M: Nagalakshmi Nandigama <Nagalakshmi.Nandigama@lsi.com> 5545M: Nagalakshmi Nandigama <nagalakshmi.nandigama@avagotech.com>
5521M: Sreekanth Reddy <Sreekanth.Reddy@lsi.com> 5546M: Praveen Krishnamoorthy <praveen.krishnamoorthy@avagotech.com>
5522M: support@lsi.com 5547M: Sreekanth Reddy <sreekanth.reddy@avagotech.com>
5523L: DL-MPTFusionLinux@lsi.com 5548M: Abhijit Mahajan <abhijit.mahajan@avagotech.com>
5549L: MPT-FusionLinux.pdl@avagotech.com
5524L: linux-scsi@vger.kernel.org 5550L: linux-scsi@vger.kernel.org
5525W: http://www.lsilogic.com/support 5551W: http://www.lsilogic.com/support
5526S: Supported 5552S: Supported
@@ -6105,12 +6131,11 @@ F: Documentation/networking/s2io.txt
6105F: Documentation/networking/vxge.txt 6131F: Documentation/networking/vxge.txt
6106F: drivers/net/ethernet/neterion/ 6132F: drivers/net/ethernet/neterion/
6107 6133
6108NETFILTER/IPTABLES 6134NETFILTER ({IP,IP6,ARP,EB,NF}TABLES)
6109M: Pablo Neira Ayuso <pablo@netfilter.org> 6135M: Pablo Neira Ayuso <pablo@netfilter.org>
6110M: Patrick McHardy <kaber@trash.net> 6136M: Patrick McHardy <kaber@trash.net>
6111M: Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> 6137M: Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>
6112L: netfilter-devel@vger.kernel.org 6138L: netfilter-devel@vger.kernel.org
6113L: netfilter@vger.kernel.org
6114L: coreteam@netfilter.org 6139L: coreteam@netfilter.org
6115W: http://www.netfilter.org/ 6140W: http://www.netfilter.org/
6116W: http://www.iptables.org/ 6141W: http://www.iptables.org/
@@ -6774,7 +6799,7 @@ F: arch/x86/kernel/quirks.c
6774 6799
6775PCI DRIVER FOR IMX6 6800PCI DRIVER FOR IMX6
6776M: Richard Zhu <r65037@freescale.com> 6801M: Richard Zhu <r65037@freescale.com>
6777M: Shawn Guo <shawn.guo@linaro.org> 6802M: Shawn Guo <shawn.guo@freescale.com>
6778L: linux-pci@vger.kernel.org 6803L: linux-pci@vger.kernel.org
6779L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 6804L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6780S: Maintained 6805S: Maintained
@@ -8196,13 +8221,15 @@ S: Maintained
8196F: drivers/usb/misc/sisusbvga/ 8221F: drivers/usb/misc/sisusbvga/
8197 8222
8198SLAB ALLOCATOR 8223SLAB ALLOCATOR
8199M: Christoph Lameter <cl@linux-foundation.org> 8224M: Christoph Lameter <cl@linux.com>
8200M: Pekka Enberg <penberg@kernel.org> 8225M: Pekka Enberg <penberg@kernel.org>
8201M: Matt Mackall <mpm@selenic.com> 8226M: David Rientjes <rientjes@google.com>
8227M: Joonsoo Kim <iamjoonsoo.kim@lge.com>
8228M: Andrew Morton <akpm@linux-foundation.org>
8202L: linux-mm@kvack.org 8229L: linux-mm@kvack.org
8203S: Maintained 8230S: Maintained
8204F: include/linux/sl?b*.h 8231F: include/linux/sl?b*.h
8205F: mm/sl?b.c 8232F: mm/sl?b*
8206 8233
8207SLEEPABLE READ-COPY UPDATE (SRCU) 8234SLEEPABLE READ-COPY UPDATE (SRCU)
8208M: Lai Jiangshan <laijs@cn.fujitsu.com> 8235M: Lai Jiangshan <laijs@cn.fujitsu.com>
@@ -8969,7 +8996,7 @@ F: drivers/media/radio/radio-raremono.c
8969 8996
8970THERMAL 8997THERMAL
8971M: Zhang Rui <rui.zhang@intel.com> 8998M: Zhang Rui <rui.zhang@intel.com>
8972M: Eduardo Valentin <eduardo.valentin@ti.com> 8999M: Eduardo Valentin <edubezval@gmail.com>
8973L: linux-pm@vger.kernel.org 9000L: linux-pm@vger.kernel.org
8974T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git 9001T: git git://git.kernel.org/pub/scm/linux/kernel/git/rzhang/linux.git
8975T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git 9002T: git git://git.kernel.org/pub/scm/linux/kernel/git/evalenti/linux-soc-thermal.git
@@ -8996,7 +9023,7 @@ S: Maintained
8996F: drivers/platform/x86/thinkpad_acpi.c 9023F: drivers/platform/x86/thinkpad_acpi.c
8997 9024
8998TI BANDGAP AND THERMAL DRIVER 9025TI BANDGAP AND THERMAL DRIVER
8999M: Eduardo Valentin <eduardo.valentin@ti.com> 9026M: Eduardo Valentin <edubezval@gmail.com>
9000L: linux-pm@vger.kernel.org 9027L: linux-pm@vger.kernel.org
9001S: Supported 9028S: Supported
9002F: drivers/thermal/ti-soc-thermal/ 9029F: drivers/thermal/ti-soc-thermal/
@@ -9410,12 +9437,6 @@ S: Maintained
9410F: drivers/usb/host/isp116x* 9437F: drivers/usb/host/isp116x*
9411F: include/linux/usb/isp116x.h 9438F: include/linux/usb/isp116x.h
9412 9439
9413USB KAWASAKI LSI DRIVER
9414M: Oliver Neukum <oliver@neukum.org>
9415L: linux-usb@vger.kernel.org
9416S: Maintained
9417F: drivers/usb/serial/kl5kusb105.*
9418
9419USB MASS STORAGE DRIVER 9440USB MASS STORAGE DRIVER
9420M: Matthew Dharm <mdharm-usb@one-eyed-alien.net> 9441M: Matthew Dharm <mdharm-usb@one-eyed-alien.net>
9421L: linux-usb@vger.kernel.org 9442L: linux-usb@vger.kernel.org
@@ -9443,12 +9464,6 @@ S: Maintained
9443F: Documentation/usb/ohci.txt 9464F: Documentation/usb/ohci.txt
9444F: drivers/usb/host/ohci* 9465F: drivers/usb/host/ohci*
9445 9466
9446USB OPTION-CARD DRIVER
9447M: Matthias Urlichs <smurf@smurf.noris.de>
9448L: linux-usb@vger.kernel.org
9449S: Maintained
9450F: drivers/usb/serial/option.c
9451
9452USB PEGASUS DRIVER 9467USB PEGASUS DRIVER
9453M: Petko Manolov <petkan@nucleusys.com> 9468M: Petko Manolov <petkan@nucleusys.com>
9454L: linux-usb@vger.kernel.org 9469L: linux-usb@vger.kernel.org
@@ -9481,7 +9496,7 @@ S: Maintained
9481F: drivers/net/usb/rtl8150.c 9496F: drivers/net/usb/rtl8150.c
9482 9497
9483USB SERIAL SUBSYSTEM 9498USB SERIAL SUBSYSTEM
9484M: Johan Hovold <jhovold@gmail.com> 9499M: Johan Hovold <johan@kernel.org>
9485L: linux-usb@vger.kernel.org 9500L: linux-usb@vger.kernel.org
9486S: Maintained 9501S: Maintained
9487F: Documentation/usb/usb-serial.txt 9502F: Documentation/usb/usb-serial.txt
diff --git a/Makefile b/Makefile
index b11e2d504a00..6b2774145d66 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 16 2PATCHLEVEL = 16
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc2 4EXTRAVERSION = -rc6
5NAME = Shuffling Zombie Juror 5NAME = Shuffling Zombie Juror
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -41,6 +41,29 @@ unexport GREP_OPTIONS
41# descending is started. They are now explicitly listed as the 41# descending is started. They are now explicitly listed as the
42# prepare rule. 42# prepare rule.
43 43
44# Beautify output
45# ---------------------------------------------------------------------------
46#
47# Normally, we echo the whole command before executing it. By making
48# that echo $($(quiet)$(cmd)), we now have the possibility to set
49# $(quiet) to choose other forms of output instead, e.g.
50#
51# quiet_cmd_cc_o_c = Compiling $(RELDIR)/$@
52# cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $<
53#
54# If $(quiet) is empty, the whole command will be printed.
55# If it is set to "quiet_", only the short version will be printed.
56# If it is set to "silent_", nothing will be printed at all, since
57# the variable $(silent_cmd_cc_o_c) doesn't exist.
58#
59# A simple variant is to prefix commands with $(Q) - that's useful
60# for commands that shall be hidden in non-verbose mode.
61#
62# $(Q)ln $@ :<
63#
64# If KBUILD_VERBOSE equals 0 then the above command will be hidden.
65# If KBUILD_VERBOSE equals 1 then the above command is displayed.
66#
44# To put more focus on warnings, be less verbose as default 67# To put more focus on warnings, be less verbose as default
45# Use 'make V=1' to see the full commands 68# Use 'make V=1' to see the full commands
46 69
@@ -51,6 +74,29 @@ ifndef KBUILD_VERBOSE
51 KBUILD_VERBOSE = 0 74 KBUILD_VERBOSE = 0
52endif 75endif
53 76
77ifeq ($(KBUILD_VERBOSE),1)
78 quiet =
79 Q =
80else
81 quiet=quiet_
82 Q = @
83endif
84
85# If the user is running make -s (silent mode), suppress echoing of
86# commands
87
88ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
89ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
90 quiet=silent_
91endif
92else # make-3.8x
93ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
94 quiet=silent_
95endif
96endif
97
98export quiet Q KBUILD_VERBOSE
99
54# Call a source code checker (by default, "sparse") as part of the 100# Call a source code checker (by default, "sparse") as part of the
55# C compilation. 101# C compilation.
56# 102#
@@ -126,7 +172,13 @@ PHONY += $(MAKECMDGOALS) sub-make
126$(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make 172$(filter-out _all sub-make $(CURDIR)/Makefile, $(MAKECMDGOALS)) _all: sub-make
127 @: 173 @:
128 174
175# Fake the "Entering directory" message once, so that IDEs/editors are
176# able to understand relative filenames.
177 echodir := @echo
178 quiet_echodir := @echo
179silent_echodir := @:
129sub-make: FORCE 180sub-make: FORCE
181 $($(quiet)echodir) "make[1]: Entering directory \`$(KBUILD_OUTPUT)'"
130 $(if $(KBUILD_VERBOSE:1=),@)$(MAKE) -C $(KBUILD_OUTPUT) \ 182 $(if $(KBUILD_VERBOSE:1=),@)$(MAKE) -C $(KBUILD_OUTPUT) \
131 KBUILD_SRC=$(CURDIR) \ 183 KBUILD_SRC=$(CURDIR) \
132 KBUILD_EXTMOD="$(KBUILD_EXTMOD)" -f $(CURDIR)/Makefile \ 184 KBUILD_EXTMOD="$(KBUILD_EXTMOD)" -f $(CURDIR)/Makefile \
@@ -289,52 +341,6 @@ endif
289export KBUILD_MODULES KBUILD_BUILTIN 341export KBUILD_MODULES KBUILD_BUILTIN
290export KBUILD_CHECKSRC KBUILD_SRC KBUILD_EXTMOD 342export KBUILD_CHECKSRC KBUILD_SRC KBUILD_EXTMOD
291 343
292# Beautify output
293# ---------------------------------------------------------------------------
294#
295# Normally, we echo the whole command before executing it. By making
296# that echo $($(quiet)$(cmd)), we now have the possibility to set
297# $(quiet) to choose other forms of output instead, e.g.
298#
299# quiet_cmd_cc_o_c = Compiling $(RELDIR)/$@
300# cmd_cc_o_c = $(CC) $(c_flags) -c -o $@ $<
301#
302# If $(quiet) is empty, the whole command will be printed.
303# If it is set to "quiet_", only the short version will be printed.
304# If it is set to "silent_", nothing will be printed at all, since
305# the variable $(silent_cmd_cc_o_c) doesn't exist.
306#
307# A simple variant is to prefix commands with $(Q) - that's useful
308# for commands that shall be hidden in non-verbose mode.
309#
310# $(Q)ln $@ :<
311#
312# If KBUILD_VERBOSE equals 0 then the above command will be hidden.
313# If KBUILD_VERBOSE equals 1 then the above command is displayed.
314
315ifeq ($(KBUILD_VERBOSE),1)
316 quiet =
317 Q =
318else
319 quiet=quiet_
320 Q = @
321endif
322
323# If the user is running make -s (silent mode), suppress echoing of
324# commands
325
326ifneq ($(filter 4.%,$(MAKE_VERSION)),) # make-4
327ifneq ($(filter %s ,$(firstword x$(MAKEFLAGS))),)
328 quiet=silent_
329endif
330else # make-3.8x
331ifneq ($(filter s% -s%,$(MAKEFLAGS)),)
332 quiet=silent_
333endif
334endif
335
336export quiet Q KBUILD_VERBOSE
337
338ifneq ($(CC),) 344ifneq ($(CC),)
339ifeq ($(shell $(CC) -v 2>&1 | grep -c "clang version"), 1) 345ifeq ($(shell $(CC) -v 2>&1 | grep -c "clang version"), 1)
340COMPILER := clang 346COMPILER := clang
@@ -1170,7 +1176,7 @@ distclean: mrproper
1170# Packaging of the kernel to various formats 1176# Packaging of the kernel to various formats
1171# --------------------------------------------------------------------------- 1177# ---------------------------------------------------------------------------
1172# rpm target kept for backward compatibility 1178# rpm target kept for backward compatibility
1173package-dir := $(srctree)/scripts/package 1179package-dir := scripts/package
1174 1180
1175%src-pkg: FORCE 1181%src-pkg: FORCE
1176 $(Q)$(MAKE) $(build)=$(package-dir) $@ 1182 $(Q)$(MAKE) $(build)=$(package-dir) $@
diff --git a/arch/arc/include/asm/cache.h b/arch/arc/include/asm/cache.h
index c1d3d2da1191..b3c750979aa1 100644
--- a/arch/arc/include/asm/cache.h
+++ b/arch/arc/include/asm/cache.h
@@ -60,7 +60,7 @@ extern void read_decode_cache_bcr(void);
60#define ARC_REG_IC_IVIC 0x10 60#define ARC_REG_IC_IVIC 0x10
61#define ARC_REG_IC_CTRL 0x11 61#define ARC_REG_IC_CTRL 0x11
62#define ARC_REG_IC_IVIL 0x19 62#define ARC_REG_IC_IVIL 0x19
63#if defined(CONFIG_ARC_MMU_V3) || defined (CONFIG_ARC_MMU_V4) 63#if defined(CONFIG_ARC_MMU_V3)
64#define ARC_REG_IC_PTAG 0x1E 64#define ARC_REG_IC_PTAG 0x1E
65#endif 65#endif
66 66
@@ -74,7 +74,7 @@ extern void read_decode_cache_bcr(void);
74#define ARC_REG_DC_IVDL 0x4A 74#define ARC_REG_DC_IVDL 0x4A
75#define ARC_REG_DC_FLSH 0x4B 75#define ARC_REG_DC_FLSH 0x4B
76#define ARC_REG_DC_FLDL 0x4C 76#define ARC_REG_DC_FLDL 0x4C
77#if defined(CONFIG_ARC_MMU_V3) || defined (CONFIG_ARC_MMU_V4) 77#if defined(CONFIG_ARC_MMU_V3)
78#define ARC_REG_DC_PTAG 0x5C 78#define ARC_REG_DC_PTAG 0x5C
79#endif 79#endif
80 80
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
index 2618cc13ba75..76a7739aab1c 100644
--- a/arch/arc/include/uapi/asm/ptrace.h
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -11,6 +11,7 @@
11#ifndef _UAPI__ASM_ARC_PTRACE_H 11#ifndef _UAPI__ASM_ARC_PTRACE_H
12#define _UAPI__ASM_ARC_PTRACE_H 12#define _UAPI__ASM_ARC_PTRACE_H
13 13
14#define PTRACE_GET_THREAD_AREA 25
14 15
15#ifndef __ASSEMBLY__ 16#ifndef __ASSEMBLY__
16/* 17/*
diff --git a/arch/arc/kernel/ctx_sw_asm.S b/arch/arc/kernel/ctx_sw_asm.S
index 2ff0347a2fd7..e248594097e7 100644
--- a/arch/arc/kernel/ctx_sw_asm.S
+++ b/arch/arc/kernel/ctx_sw_asm.S
@@ -10,9 +10,9 @@
10 * -This is the more "natural" hand written assembler 10 * -This is the more "natural" hand written assembler
11 */ 11 */
12 12
13#include <linux/linkage.h>
13#include <asm/entry.h> /* For the SAVE_* macros */ 14#include <asm/entry.h> /* For the SAVE_* macros */
14#include <asm/asm-offsets.h> 15#include <asm/asm-offsets.h>
15#include <asm/linkage.h>
16 16
17#define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4) 17#define KSP_WORD_OFF ((TASK_THREAD + THREAD_KSP) / 4)
18 18
diff --git a/arch/arc/kernel/devtree.c b/arch/arc/kernel/devtree.c
index 0b3ef4025d89..fffdb5e41b20 100644
--- a/arch/arc/kernel/devtree.c
+++ b/arch/arc/kernel/devtree.c
@@ -41,7 +41,7 @@ const struct machine_desc * __init setup_machine_fdt(void *dt)
41{ 41{
42 const struct machine_desc *mdesc; 42 const struct machine_desc *mdesc;
43 unsigned long dt_root; 43 unsigned long dt_root;
44 void *clk; 44 const void *clk;
45 int len; 45 int len;
46 46
47 if (!early_init_dt_scan(dt)) 47 if (!early_init_dt_scan(dt))
diff --git a/arch/arc/kernel/head.S b/arch/arc/kernel/head.S
index 07a58f2d3077..4d2481bd8b98 100644
--- a/arch/arc/kernel/head.S
+++ b/arch/arc/kernel/head.S
@@ -77,10 +77,11 @@ stext:
77 ; Clear BSS before updating any globals 77 ; Clear BSS before updating any globals
78 ; XXX: use ZOL here 78 ; XXX: use ZOL here
79 mov r5, __bss_start 79 mov r5, __bss_start
80 mov r6, __bss_stop 80 sub r6, __bss_stop, r5
81 lsr.f lp_count, r6, 2
82 lpnz 1f
83 st.ab 0, [r5, 4]
811: 841:
82 st.ab 0, [r5,4]
83 brlt r5, r6, 1b
84 85
85 ; Uboot - kernel ABI 86 ; Uboot - kernel ABI
86 ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2 87 ; r0 = [0] No uboot interaction, [1] cmdline in r2, [2] DTB in r2
diff --git a/arch/arc/kernel/ptrace.c b/arch/arc/kernel/ptrace.c
index 5d76706139dd..13b3ffb27a38 100644
--- a/arch/arc/kernel/ptrace.c
+++ b/arch/arc/kernel/ptrace.c
@@ -146,6 +146,10 @@ long arch_ptrace(struct task_struct *child, long request,
146 pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data); 146 pr_debug("REQ=%ld: ADDR =0x%lx, DATA=0x%lx)\n", request, addr, data);
147 147
148 switch (request) { 148 switch (request) {
149 case PTRACE_GET_THREAD_AREA:
150 ret = put_user(task_thread_info(child)->thr_ptr,
151 (unsigned long __user *)data);
152 break;
149 default: 153 default:
150 ret = ptrace_request(child, request, addr, data); 154 ret = ptrace_request(child, request, addr, data);
151 break; 155 break;
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index cf90b6f4d3e0..c802bb500602 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -337,8 +337,19 @@ irqreturn_t do_IPI(int irq, void *dev_id)
337 * API called by platform code to hookup arch-common ISR to their IPI IRQ 337 * API called by platform code to hookup arch-common ISR to their IPI IRQ
338 */ 338 */
339static DEFINE_PER_CPU(int, ipi_dev); 339static DEFINE_PER_CPU(int, ipi_dev);
340
341static struct irqaction arc_ipi_irq = {
342 .name = "IPI Interrupt",
343 .flags = IRQF_PERCPU,
344 .handler = do_IPI,
345};
346
340int smp_ipi_irq_setup(int cpu, int irq) 347int smp_ipi_irq_setup(int cpu, int irq)
341{ 348{
342 int *dev_id = &per_cpu(ipi_dev, smp_processor_id()); 349 if (!cpu)
343 return request_percpu_irq(irq, do_IPI, "IPI Interrupt", dev_id); 350 return setup_irq(irq, &arc_ipi_irq);
351 else
352 arch_unmask_irq(irq);
353
354 return 0;
344} 355}
diff --git a/arch/arc/kernel/vmlinux.lds.S b/arch/arc/kernel/vmlinux.lds.S
index 2555f5886af6..dd35bde39f69 100644
--- a/arch/arc/kernel/vmlinux.lds.S
+++ b/arch/arc/kernel/vmlinux.lds.S
@@ -116,7 +116,7 @@ SECTIONS
116 116
117 _edata = .; 117 _edata = .;
118 118
119 BSS_SECTION(0, 0, 0) 119 BSS_SECTION(4, 4, 4)
120 120
121#ifdef CONFIG_ARC_DW2_UNWIND 121#ifdef CONFIG_ARC_DW2_UNWIND
122 . = ALIGN(PAGE_SIZE); 122 . = ALIGN(PAGE_SIZE);
diff --git a/arch/arc/mm/cache_arc700.c b/arch/arc/mm/cache_arc700.c
index 1f676c4794e0..353b202c37c9 100644
--- a/arch/arc/mm/cache_arc700.c
+++ b/arch/arc/mm/cache_arc700.c
@@ -389,7 +389,7 @@ static inline void __dc_line_op(unsigned long paddr, unsigned long vaddr,
389/*********************************************************** 389/***********************************************************
390 * Machine specific helper for per line I-Cache invalidate. 390 * Machine specific helper for per line I-Cache invalidate.
391 */ 391 */
392static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr, 392static void __ic_line_inv_vaddr_local(unsigned long paddr, unsigned long vaddr,
393 unsigned long sz) 393 unsigned long sz)
394{ 394{
395 unsigned long flags; 395 unsigned long flags;
@@ -405,6 +405,23 @@ static inline void __ic_entire_inv(void)
405 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */ 405 read_aux_reg(ARC_REG_IC_CTRL); /* blocks */
406} 406}
407 407
408struct ic_line_inv_vaddr_ipi {
409 unsigned long paddr, vaddr;
410 int sz;
411};
412
413static void __ic_line_inv_vaddr_helper(void *info)
414{
415 struct ic_line_inv_vaddr_ipi *ic_inv = (struct ic_line_inv_vaddr_ipi*) info;
416 __ic_line_inv_vaddr_local(ic_inv->paddr, ic_inv->vaddr, ic_inv->sz);
417}
418
419static void __ic_line_inv_vaddr(unsigned long paddr, unsigned long vaddr,
420 unsigned long sz)
421{
422 struct ic_line_inv_vaddr_ipi ic_inv = { paddr, vaddr , sz};
423 on_each_cpu(__ic_line_inv_vaddr_helper, &ic_inv, 1);
424}
408#else 425#else
409 426
410#define __ic_entire_inv() 427#define __ic_entire_inv()
@@ -553,12 +570,8 @@ void flush_icache_range(unsigned long kstart, unsigned long kend)
553 */ 570 */
554void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len) 571void __sync_icache_dcache(unsigned long paddr, unsigned long vaddr, int len)
555{ 572{
556 unsigned long flags;
557
558 local_irq_save(flags);
559 __ic_line_inv_vaddr(paddr, vaddr, len);
560 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV); 573 __dc_line_op(paddr, vaddr, len, OP_FLUSH_N_INV);
561 local_irq_restore(flags); 574 __ic_line_inv_vaddr(paddr, vaddr, len);
562} 575}
563 576
564/* wrapper to compile time eliminate alignment checks in flush loop */ 577/* wrapper to compile time eliminate alignment checks in flush loop */
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 245058b3b0ef..88acf8bc1490 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -6,6 +6,7 @@ config ARM
6 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 6 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
7 select ARCH_HAVE_CUSTOM_GPIO_H 7 select ARCH_HAVE_CUSTOM_GPIO_H
8 select ARCH_MIGHT_HAVE_PC_PARPORT 8 select ARCH_MIGHT_HAVE_PC_PARPORT
9 select ARCH_SUPPORTS_ATOMIC_RMW
9 select ARCH_USE_BUILTIN_BSWAP 10 select ARCH_USE_BUILTIN_BSWAP
10 select ARCH_USE_CMPXCHG_LOCKREF 11 select ARCH_USE_CMPXCHG_LOCKREF
11 select ARCH_WANT_IPC_PARSE_VERSION 12 select ARCH_WANT_IPC_PARSE_VERSION
diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile
index 5986ff63b901..adb5ed9e269e 100644
--- a/arch/arm/boot/dts/Makefile
+++ b/arch/arm/boot/dts/Makefile
@@ -357,7 +357,7 @@ dtb-$(CONFIG_ARCH_STI)+= stih407-b2120.dtb \
357 stih415-b2020.dtb \ 357 stih415-b2020.dtb \
358 stih416-b2000.dtb \ 358 stih416-b2000.dtb \
359 stih416-b2020.dtb \ 359 stih416-b2020.dtb \
360 stih416-b2020-revE.dtb 360 stih416-b2020e.dtb
361dtb-$(CONFIG_MACH_SUN4I) += \ 361dtb-$(CONFIG_MACH_SUN4I) += \
362 sun4i-a10-a1000.dtb \ 362 sun4i-a10-a1000.dtb \
363 sun4i-a10-cubieboard.dtb \ 363 sun4i-a10-cubieboard.dtb \
diff --git a/arch/arm/boot/dts/am335x-evm.dts b/arch/arm/boot/dts/am335x-evm.dts
index ecb267767cf5..e2156a583de7 100644
--- a/arch/arm/boot/dts/am335x-evm.dts
+++ b/arch/arm/boot/dts/am335x-evm.dts
@@ -529,8 +529,8 @@
529 serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */ 529 serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */
530 0 0 1 2 530 0 0 1 2
531 >; 531 >;
532 tx-num-evt = <1>; 532 tx-num-evt = <32>;
533 rx-num-evt = <1>; 533 rx-num-evt = <32>;
534}; 534};
535 535
536&tps { 536&tps {
diff --git a/arch/arm/boot/dts/am335x-evmsk.dts b/arch/arm/boot/dts/am335x-evmsk.dts
index ab9a34ce524c..80a3b215e7d6 100644
--- a/arch/arm/boot/dts/am335x-evmsk.dts
+++ b/arch/arm/boot/dts/am335x-evmsk.dts
@@ -560,8 +560,8 @@
560 serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */ 560 serial-dir = < /* 0: INACTIVE, 1: TX, 2: RX */
561 0 0 1 2 561 0 0 1 2
562 >; 562 >;
563 tx-num-evt = <1>; 563 tx-num-evt = <32>;
564 rx-num-evt = <1>; 564 rx-num-evt = <32>;
565}; 565};
566 566
567&tscadc { 567&tscadc {
diff --git a/arch/arm/boot/dts/am335x-igep0033.dtsi b/arch/arm/boot/dts/am335x-igep0033.dtsi
index 8a0a72dc7dd7..a1a0cc5eb35c 100644
--- a/arch/arm/boot/dts/am335x-igep0033.dtsi
+++ b/arch/arm/boot/dts/am335x-igep0033.dtsi
@@ -105,10 +105,16 @@
105 105
106&cpsw_emac0 { 106&cpsw_emac0 {
107 phy_id = <&davinci_mdio>, <0>; 107 phy_id = <&davinci_mdio>, <0>;
108 phy-mode = "rmii";
108}; 109};
109 110
110&cpsw_emac1 { 111&cpsw_emac1 {
111 phy_id = <&davinci_mdio>, <1>; 112 phy_id = <&davinci_mdio>, <1>;
113 phy-mode = "rmii";
114};
115
116&phy_sel {
117 rmii-clock-ext;
112}; 118};
113 119
114&elm { 120&elm {
diff --git a/arch/arm/boot/dts/am43x-epos-evm.dts b/arch/arm/boot/dts/am43x-epos-evm.dts
index 19f1f7e87597..90098f98a5c8 100644
--- a/arch/arm/boot/dts/am43x-epos-evm.dts
+++ b/arch/arm/boot/dts/am43x-epos-evm.dts
@@ -319,6 +319,10 @@
319 phy-mode = "rmii"; 319 phy-mode = "rmii";
320}; 320};
321 321
322&phy_sel {
323 rmii-clock-ext;
324};
325
322&i2c0 { 326&i2c0 {
323 status = "okay"; 327 status = "okay";
324 pinctrl-names = "default"; 328 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/armada-380.dtsi b/arch/arm/boot/dts/armada-380.dtsi
index e69bc6759c39..4173a8ab34e7 100644
--- a/arch/arm/boot/dts/armada-380.dtsi
+++ b/arch/arm/boot/dts/armada-380.dtsi
@@ -16,7 +16,7 @@
16 16
17/ { 17/ {
18 model = "Marvell Armada 380 family SoC"; 18 model = "Marvell Armada 380 family SoC";
19 compatible = "marvell,armada380", "marvell,armada38x"; 19 compatible = "marvell,armada380";
20 20
21 cpus { 21 cpus {
22 #address-cells = <1>; 22 #address-cells = <1>;
diff --git a/arch/arm/boot/dts/armada-385-db.dts b/arch/arm/boot/dts/armada-385-db.dts
index 5bae4731828b..1af886f1e486 100644
--- a/arch/arm/boot/dts/armada-385-db.dts
+++ b/arch/arm/boot/dts/armada-385-db.dts
@@ -16,7 +16,7 @@
16 16
17/ { 17/ {
18 model = "Marvell Armada 385 Development Board"; 18 model = "Marvell Armada 385 Development Board";
19 compatible = "marvell,a385-db", "marvell,armada385", "marvell,armada38x"; 19 compatible = "marvell,a385-db", "marvell,armada385", "marvell,armada380";
20 20
21 chosen { 21 chosen {
22 bootargs = "console=ttyS0,115200 earlyprintk"; 22 bootargs = "console=ttyS0,115200 earlyprintk";
diff --git a/arch/arm/boot/dts/armada-385-rd.dts b/arch/arm/boot/dts/armada-385-rd.dts
index 40893255a3f0..aaca2861dc87 100644
--- a/arch/arm/boot/dts/armada-385-rd.dts
+++ b/arch/arm/boot/dts/armada-385-rd.dts
@@ -17,7 +17,7 @@
17 17
18/ { 18/ {
19 model = "Marvell Armada 385 Reference Design"; 19 model = "Marvell Armada 385 Reference Design";
20 compatible = "marvell,a385-rd", "marvell,armada385", "marvell,armada38x"; 20 compatible = "marvell,a385-rd", "marvell,armada385", "marvell,armada380";
21 21
22 chosen { 22 chosen {
23 bootargs = "console=ttyS0,115200 earlyprintk"; 23 bootargs = "console=ttyS0,115200 earlyprintk";
diff --git a/arch/arm/boot/dts/armada-385.dtsi b/arch/arm/boot/dts/armada-385.dtsi
index f011009bf4cf..6283d7912f71 100644
--- a/arch/arm/boot/dts/armada-385.dtsi
+++ b/arch/arm/boot/dts/armada-385.dtsi
@@ -16,7 +16,7 @@
16 16
17/ { 17/ {
18 model = "Marvell Armada 385 family SoC"; 18 model = "Marvell Armada 385 family SoC";
19 compatible = "marvell,armada385", "marvell,armada38x"; 19 compatible = "marvell,armada385", "marvell,armada380";
20 20
21 cpus { 21 cpus {
22 #address-cells = <1>; 22 #address-cells = <1>;
diff --git a/arch/arm/boot/dts/armada-38x.dtsi b/arch/arm/boot/dts/armada-38x.dtsi
index 3de364e81b52..689fa1a46728 100644
--- a/arch/arm/boot/dts/armada-38x.dtsi
+++ b/arch/arm/boot/dts/armada-38x.dtsi
@@ -20,7 +20,7 @@
20 20
21/ { 21/ {
22 model = "Marvell Armada 38x family SoC"; 22 model = "Marvell Armada 38x family SoC";
23 compatible = "marvell,armada38x"; 23 compatible = "marvell,armada380";
24 24
25 aliases { 25 aliases {
26 gpio0 = &gpio0; 26 gpio0 = &gpio0;
diff --git a/arch/arm/boot/dts/at91sam9261.dtsi b/arch/arm/boot/dts/at91sam9261.dtsi
index b309c1c6e848..04927db1d6bf 100644
--- a/arch/arm/boot/dts/at91sam9261.dtsi
+++ b/arch/arm/boot/dts/at91sam9261.dtsi
@@ -568,24 +568,17 @@
568 #size-cells = <0>; 568 #size-cells = <0>;
569 #interrupt-cells = <1>; 569 #interrupt-cells = <1>;
570 570
571 slow_rc_osc: slow_rc_osc { 571 main_osc: main_osc {
572 compatible = "fixed-clock"; 572 compatible = "atmel,at91rm9200-clk-main-osc";
573 #clock-cells = <0>; 573 #clock-cells = <0>;
574 clock-frequency = <32768>; 574 interrupts-extended = <&pmc AT91_PMC_MOSCS>;
575 clock-accuracy = <50000000>; 575 clocks = <&main_xtal>;
576 };
577
578 clk32k: slck {
579 compatible = "atmel,at91sam9260-clk-slow";
580 #clock-cells = <0>;
581 clocks = <&slow_rc_osc &slow_xtal>;
582 }; 576 };
583 577
584 main: mainck { 578 main: mainck {
585 compatible = "atmel,at91rm9200-clk-main"; 579 compatible = "atmel,at91rm9200-clk-main";
586 #clock-cells = <0>; 580 #clock-cells = <0>;
587 interrupts-extended = <&pmc AT91_PMC_MOSCS>; 581 clocks = <&main_osc>;
588 clocks = <&main_xtal>;
589 }; 582 };
590 583
591 plla: pllack { 584 plla: pllack {
@@ -615,7 +608,7 @@
615 compatible = "atmel,at91rm9200-clk-master"; 608 compatible = "atmel,at91rm9200-clk-master";
616 #clock-cells = <0>; 609 #clock-cells = <0>;
617 interrupts-extended = <&pmc AT91_PMC_MCKRDY>; 610 interrupts-extended = <&pmc AT91_PMC_MCKRDY>;
618 clocks = <&clk32k>, <&main>, <&plla>, <&pllb>; 611 clocks = <&slow_xtal>, <&main>, <&plla>, <&pllb>;
619 atmel,clk-output-range = <0 94000000>; 612 atmel,clk-output-range = <0 94000000>;
620 atmel,clk-divisors = <1 2 4 0>; 613 atmel,clk-divisors = <1 2 4 0>;
621 }; 614 };
@@ -632,7 +625,7 @@
632 #address-cells = <1>; 625 #address-cells = <1>;
633 #size-cells = <0>; 626 #size-cells = <0>;
634 interrupt-parent = <&pmc>; 627 interrupt-parent = <&pmc>;
635 clocks = <&clk32k>, <&main>, <&plla>, <&pllb>; 628 clocks = <&slow_xtal>, <&main>, <&plla>, <&pllb>;
636 629
637 prog0: prog0 { 630 prog0: prog0 {
638 #clock-cells = <0>; 631 #clock-cells = <0>;
diff --git a/arch/arm/boot/dts/at91sam9261ek.dts b/arch/arm/boot/dts/at91sam9261ek.dts
index c6683ea8b743..aa35a7aec9a8 100644
--- a/arch/arm/boot/dts/at91sam9261ek.dts
+++ b/arch/arm/boot/dts/at91sam9261ek.dts
@@ -20,6 +20,10 @@
20 reg = <0x20000000 0x4000000>; 20 reg = <0x20000000 0x4000000>;
21 }; 21 };
22 22
23 slow_xtal {
24 clock-frequency = <32768>;
25 };
26
23 main_xtal { 27 main_xtal {
24 clock-frequency = <18432000>; 28 clock-frequency = <18432000>;
25 }; 29 };
diff --git a/arch/arm/boot/dts/at91sam9n12.dtsi b/arch/arm/boot/dts/at91sam9n12.dtsi
index d1b82e6635d5..b84bac5bada4 100644
--- a/arch/arm/boot/dts/at91sam9n12.dtsi
+++ b/arch/arm/boot/dts/at91sam9n12.dtsi
@@ -132,8 +132,8 @@
132 <595000000 650000000 3 0>, 132 <595000000 650000000 3 0>,
133 <545000000 600000000 0 1>, 133 <545000000 600000000 0 1>,
134 <495000000 555000000 1 1>, 134 <495000000 555000000 1 1>,
135 <445000000 500000000 1 2>, 135 <445000000 500000000 2 1>,
136 <400000000 450000000 1 3>; 136 <400000000 450000000 3 1>;
137 }; 137 };
138 138
139 plladiv: plladivck { 139 plladiv: plladivck {
@@ -925,7 +925,7 @@
925 compatible = "atmel,at91rm9200-ohci", "usb-ohci"; 925 compatible = "atmel,at91rm9200-ohci", "usb-ohci";
926 reg = <0x00500000 0x00100000>; 926 reg = <0x00500000 0x00100000>;
927 interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; 927 interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
928 clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>, 928 clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>,
929 <&uhpck>; 929 <&uhpck>;
930 clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck"; 930 clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
931 status = "disabled"; 931 status = "disabled";
diff --git a/arch/arm/boot/dts/at91sam9x5.dtsi b/arch/arm/boot/dts/at91sam9x5.dtsi
index 1a57298636a5..2c0d6ea3ab41 100644
--- a/arch/arm/boot/dts/at91sam9x5.dtsi
+++ b/arch/arm/boot/dts/at91sam9x5.dtsi
@@ -140,8 +140,8 @@
140 595000000 650000000 3 0 140 595000000 650000000 3 0
141 545000000 600000000 0 1 141 545000000 600000000 0 1
142 495000000 555000000 1 1 142 495000000 555000000 1 1
143 445000000 500000000 1 2 143 445000000 500000000 2 1
144 400000000 450000000 1 3>; 144 400000000 450000000 3 1>;
145 }; 145 };
146 146
147 plladiv: plladivck { 147 plladiv: plladivck {
@@ -1045,6 +1045,8 @@
1045 reg = <0x00500000 0x80000 1045 reg = <0x00500000 0x80000
1046 0xf803c000 0x400>; 1046 0xf803c000 0x400>;
1047 interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>; 1047 interrupts = <23 IRQ_TYPE_LEVEL_HIGH 0>;
1048 clocks = <&usb>, <&udphs_clk>;
1049 clock-names = "hclk", "pclk";
1048 status = "disabled"; 1050 status = "disabled";
1049 1051
1050 ep0 { 1052 ep0 {
@@ -1122,6 +1124,7 @@
1122 compatible = "atmel,at91sam9rl-pwm"; 1124 compatible = "atmel,at91sam9rl-pwm";
1123 reg = <0xf8034000 0x300>; 1125 reg = <0xf8034000 0x300>;
1124 interrupts = <18 IRQ_TYPE_LEVEL_HIGH 4>; 1126 interrupts = <18 IRQ_TYPE_LEVEL_HIGH 4>;
1127 clocks = <&pwm_clk>;
1125 #pwm-cells = <3>; 1128 #pwm-cells = <3>;
1126 status = "disabled"; 1129 status = "disabled";
1127 }; 1130 };
@@ -1153,8 +1156,7 @@
1153 compatible = "atmel,at91rm9200-ohci", "usb-ohci"; 1156 compatible = "atmel,at91rm9200-ohci", "usb-ohci";
1154 reg = <0x00600000 0x100000>; 1157 reg = <0x00600000 0x100000>;
1155 interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>; 1158 interrupts = <22 IRQ_TYPE_LEVEL_HIGH 2>;
1156 clocks = <&usb>, <&uhphs_clk>, <&udphs_clk>, 1159 clocks = <&usb>, <&uhphs_clk>, <&uhphs_clk>, <&uhpck>;
1157 <&uhpck>;
1158 clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck"; 1160 clock-names = "usb_clk", "ohci_clk", "hclk", "uhpck";
1159 status = "disabled"; 1161 status = "disabled";
1160 }; 1162 };
diff --git a/arch/arm/boot/dts/dra7-evm.dts b/arch/arm/boot/dts/dra7-evm.dts
index 4adc28039c30..83089540e324 100644
--- a/arch/arm/boot/dts/dra7-evm.dts
+++ b/arch/arm/boot/dts/dra7-evm.dts
@@ -240,6 +240,7 @@
240 regulator-name = "ldo3"; 240 regulator-name = "ldo3";
241 regulator-min-microvolt = <1800000>; 241 regulator-min-microvolt = <1800000>;
242 regulator-max-microvolt = <1800000>; 242 regulator-max-microvolt = <1800000>;
243 regulator-always-on;
243 regulator-boot-on; 244 regulator-boot-on;
244 }; 245 };
245 246
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index c29945e07c5a..80127638b379 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -773,7 +773,6 @@
773 clocks = <&qspi_gfclk_div>; 773 clocks = <&qspi_gfclk_div>;
774 clock-names = "fck"; 774 clock-names = "fck";
775 num-cs = <4>; 775 num-cs = <4>;
776 interrupts = <0 343 0x4>;
777 status = "disabled"; 776 status = "disabled";
778 }; 777 };
779 778
@@ -984,6 +983,17 @@
984 #size-cells = <1>; 983 #size-cells = <1>;
985 status = "disabled"; 984 status = "disabled";
986 }; 985 };
986
987 atl: atl@4843c000 {
988 compatible = "ti,dra7-atl";
989 reg = <0x4843c000 0x3ff>;
990 ti,hwmods = "atl";
991 ti,provided-clocks = <&atl_clkin0_ck>, <&atl_clkin1_ck>,
992 <&atl_clkin2_ck>, <&atl_clkin3_ck>;
993 clocks = <&atl_gfclk_mux>;
994 clock-names = "fck";
995 status = "disabled";
996 };
987 }; 997 };
988}; 998};
989 999
diff --git a/arch/arm/boot/dts/dra7xx-clocks.dtsi b/arch/arm/boot/dts/dra7xx-clocks.dtsi
index b03cfe49d22b..dc7a292fe939 100644
--- a/arch/arm/boot/dts/dra7xx-clocks.dtsi
+++ b/arch/arm/boot/dts/dra7xx-clocks.dtsi
@@ -10,26 +10,26 @@
10&cm_core_aon_clocks { 10&cm_core_aon_clocks {
11 atl_clkin0_ck: atl_clkin0_ck { 11 atl_clkin0_ck: atl_clkin0_ck {
12 #clock-cells = <0>; 12 #clock-cells = <0>;
13 compatible = "fixed-clock"; 13 compatible = "ti,dra7-atl-clock";
14 clock-frequency = <0>; 14 clocks = <&atl_gfclk_mux>;
15 }; 15 };
16 16
17 atl_clkin1_ck: atl_clkin1_ck { 17 atl_clkin1_ck: atl_clkin1_ck {
18 #clock-cells = <0>; 18 #clock-cells = <0>;
19 compatible = "fixed-clock"; 19 compatible = "ti,dra7-atl-clock";
20 clock-frequency = <0>; 20 clocks = <&atl_gfclk_mux>;
21 }; 21 };
22 22
23 atl_clkin2_ck: atl_clkin2_ck { 23 atl_clkin2_ck: atl_clkin2_ck {
24 #clock-cells = <0>; 24 #clock-cells = <0>;
25 compatible = "fixed-clock"; 25 compatible = "ti,dra7-atl-clock";
26 clock-frequency = <0>; 26 clocks = <&atl_gfclk_mux>;
27 }; 27 };
28 28
29 atl_clkin3_ck: atl_clkin3_ck { 29 atl_clkin3_ck: atl_clkin3_ck {
30 #clock-cells = <0>; 30 #clock-cells = <0>;
31 compatible = "fixed-clock"; 31 compatible = "ti,dra7-atl-clock";
32 clock-frequency = <0>; 32 clocks = <&atl_gfclk_mux>;
33 }; 33 };
34 34
35 hdmi_clkin_ck: hdmi_clkin_ck { 35 hdmi_clkin_ck: hdmi_clkin_ck {
@@ -673,10 +673,12 @@
673 673
674 l3_iclk_div: l3_iclk_div { 674 l3_iclk_div: l3_iclk_div {
675 #clock-cells = <0>; 675 #clock-cells = <0>;
676 compatible = "fixed-factor-clock"; 676 compatible = "ti,divider-clock";
677 ti,max-div = <2>;
678 ti,bit-shift = <4>;
679 reg = <0x0100>;
677 clocks = <&dpll_core_h12x2_ck>; 680 clocks = <&dpll_core_h12x2_ck>;
678 clock-mult = <1>; 681 ti,index-power-of-two;
679 clock-div = <1>;
680 }; 682 };
681 683
682 l4_root_clk_div: l4_root_clk_div { 684 l4_root_clk_div: l4_root_clk_div {
@@ -684,7 +686,7 @@
684 compatible = "fixed-factor-clock"; 686 compatible = "fixed-factor-clock";
685 clocks = <&l3_iclk_div>; 687 clocks = <&l3_iclk_div>;
686 clock-mult = <1>; 688 clock-mult = <1>;
687 clock-div = <1>; 689 clock-div = <2>;
688 }; 690 };
689 691
690 video1_clk2_div: video1_clk2_div { 692 video1_clk2_div: video1_clk2_div {
diff --git a/arch/arm/boot/dts/exynos4.dtsi b/arch/arm/boot/dts/exynos4.dtsi
index b8ece4be41ca..17b22e9cc2aa 100644
--- a/arch/arm/boot/dts/exynos4.dtsi
+++ b/arch/arm/boot/dts/exynos4.dtsi
@@ -113,7 +113,7 @@
113 compatible = "arm,cortex-a9-gic"; 113 compatible = "arm,cortex-a9-gic";
114 #interrupt-cells = <3>; 114 #interrupt-cells = <3>;
115 interrupt-controller; 115 interrupt-controller;
116 reg = <0x10490000 0x1000>, <0x10480000 0x100>; 116 reg = <0x10490000 0x10000>, <0x10480000 0x10000>;
117 }; 117 };
118 118
119 combiner: interrupt-controller@10440000 { 119 combiner: interrupt-controller@10440000 {
@@ -554,7 +554,7 @@
554 interrupts = <0 37 0>, <0 38 0>, <0 39 0>, <0 40 0>, <0 41 0>; 554 interrupts = <0 37 0>, <0 38 0>, <0 39 0>, <0 40 0>, <0 41 0>;
555 clocks = <&clock CLK_PWM>; 555 clocks = <&clock CLK_PWM>;
556 clock-names = "timers"; 556 clock-names = "timers";
557 #pwm-cells = <2>; 557 #pwm-cells = <3>;
558 status = "disabled"; 558 status = "disabled";
559 }; 559 };
560 560
diff --git a/arch/arm/boot/dts/exynos5420.dtsi b/arch/arm/boot/dts/exynos5420.dtsi
index e38532271ef9..15957227ffda 100644
--- a/arch/arm/boot/dts/exynos5420.dtsi
+++ b/arch/arm/boot/dts/exynos5420.dtsi
@@ -167,7 +167,7 @@
167 compatible = "samsung,exynos5420-audss-clock"; 167 compatible = "samsung,exynos5420-audss-clock";
168 reg = <0x03810000 0x0C>; 168 reg = <0x03810000 0x0C>;
169 #clock-cells = <1>; 169 #clock-cells = <1>;
170 clocks = <&clock CLK_FIN_PLL>, <&clock CLK_FOUT_EPLL>, 170 clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MAU_EPLL>,
171 <&clock CLK_SCLK_MAUDIO0>, <&clock CLK_SCLK_MAUPCM0>; 171 <&clock CLK_SCLK_MAUDIO0>, <&clock CLK_SCLK_MAUPCM0>;
172 clock-names = "pll_ref", "pll_in", "sclk_audio", "sclk_pcm_in"; 172 clock-names = "pll_ref", "pll_in", "sclk_audio", "sclk_pcm_in";
173 }; 173 };
@@ -260,6 +260,9 @@
260 mfc_pd: power-domain@10044060 { 260 mfc_pd: power-domain@10044060 {
261 compatible = "samsung,exynos4210-pd"; 261 compatible = "samsung,exynos4210-pd";
262 reg = <0x10044060 0x20>; 262 reg = <0x10044060 0x20>;
263 clocks = <&clock CLK_FIN_PLL>, <&clock CLK_MOUT_SW_ACLK333>,
264 <&clock CLK_MOUT_USER_ACLK333>;
265 clock-names = "oscclk", "pclk0", "clk0";
263 }; 266 };
264 267
265 disp_pd: power-domain@100440C0 { 268 disp_pd: power-domain@100440C0 {
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index 6bc3243a80d3..181d77fa2fa6 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -315,15 +315,15 @@
315&esdhc1 { 315&esdhc1 {
316 pinctrl-names = "default"; 316 pinctrl-names = "default";
317 pinctrl-0 = <&pinctrl_esdhc1>; 317 pinctrl-0 = <&pinctrl_esdhc1>;
318 fsl,cd-controller; 318 cd-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
319 fsl,wp-controller; 319 wp-gpios = <&gpio1 1 GPIO_ACTIVE_HIGH>;
320 status = "okay"; 320 status = "okay";
321}; 321};
322 322
323&esdhc2 { 323&esdhc2 {
324 pinctrl-names = "default"; 324 pinctrl-names = "default";
325 pinctrl-0 = <&pinctrl_esdhc2>; 325 pinctrl-0 = <&pinctrl_esdhc2>;
326 cd-gpios = <&gpio1 6 GPIO_ACTIVE_HIGH>; 326 cd-gpios = <&gpio1 6 GPIO_ACTIVE_LOW>;
327 wp-gpios = <&gpio1 5 GPIO_ACTIVE_HIGH>; 327 wp-gpios = <&gpio1 5 GPIO_ACTIVE_HIGH>;
328 status = "okay"; 328 status = "okay";
329}; 329};
@@ -468,8 +468,8 @@
468 MX51_PAD_SD1_DATA1__SD1_DATA1 0x20d5 468 MX51_PAD_SD1_DATA1__SD1_DATA1 0x20d5
469 MX51_PAD_SD1_DATA2__SD1_DATA2 0x20d5 469 MX51_PAD_SD1_DATA2__SD1_DATA2 0x20d5
470 MX51_PAD_SD1_DATA3__SD1_DATA3 0x20d5 470 MX51_PAD_SD1_DATA3__SD1_DATA3 0x20d5
471 MX51_PAD_GPIO1_0__SD1_CD 0x20d5 471 MX51_PAD_GPIO1_0__GPIO1_0 0x100
472 MX51_PAD_GPIO1_1__SD1_WP 0x20d5 472 MX51_PAD_GPIO1_1__GPIO1_1 0x100
473 >; 473 >;
474 }; 474 };
475 475
diff --git a/arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts b/arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts
index 75e66c9c6144..31cfb7f2b02e 100644
--- a/arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts
+++ b/arch/arm/boot/dts/imx51-eukrea-mbimxsd51-baseboard.dts
@@ -107,7 +107,7 @@
107&esdhc1 { 107&esdhc1 {
108 pinctrl-names = "default"; 108 pinctrl-names = "default";
109 pinctrl-0 = <&pinctrl_esdhc1 &pinctrl_esdhc1_cd>; 109 pinctrl-0 = <&pinctrl_esdhc1 &pinctrl_esdhc1_cd>;
110 fsl,cd-controller; 110 cd-gpios = <&gpio1 0 GPIO_ACTIVE_LOW>;
111 status = "okay"; 111 status = "okay";
112}; 112};
113 113
@@ -206,7 +206,7 @@
206 206
207 pinctrl_esdhc1_cd: esdhc1_cd { 207 pinctrl_esdhc1_cd: esdhc1_cd {
208 fsl,pins = < 208 fsl,pins = <
209 MX51_PAD_GPIO1_0__SD1_CD 0x20d5 209 MX51_PAD_GPIO1_0__GPIO1_0 0xd5
210 >; 210 >;
211 }; 211 };
212 212
diff --git a/arch/arm/boot/dts/imx53-m53evk.dts b/arch/arm/boot/dts/imx53-m53evk.dts
index d5d146a8b149..c4956b0ffb35 100644
--- a/arch/arm/boot/dts/imx53-m53evk.dts
+++ b/arch/arm/boot/dts/imx53-m53evk.dts
@@ -21,27 +21,25 @@
21 <0xb0000000 0x20000000>; 21 <0xb0000000 0x20000000>;
22 }; 22 };
23 23
24 soc { 24 display1: display@di1 {
25 display1: display@di1 { 25 compatible = "fsl,imx-parallel-display";
26 compatible = "fsl,imx-parallel-display"; 26 interface-pix-fmt = "bgr666";
27 interface-pix-fmt = "bgr666"; 27 pinctrl-names = "default";
28 pinctrl-names = "default"; 28 pinctrl-0 = <&pinctrl_ipu_disp1>;
29 pinctrl-0 = <&pinctrl_ipu_disp1>; 29
30 30 display-timings {
31 display-timings { 31 800x480p60 {
32 800x480p60 { 32 native-mode;
33 native-mode; 33 clock-frequency = <31500000>;
34 clock-frequency = <31500000>; 34 hactive = <800>;
35 hactive = <800>; 35 vactive = <480>;
36 vactive = <480>; 36 hfront-porch = <40>;
37 hfront-porch = <40>; 37 hback-porch = <88>;
38 hback-porch = <88>; 38 hsync-len = <128>;
39 hsync-len = <128>; 39 vback-porch = <33>;
40 vback-porch = <33>; 40 vfront-porch = <9>;
41 vfront-porch = <9>; 41 vsync-len = <3>;
42 vsync-len = <3>; 42 vsync-active = <1>;
43 vsync-active = <1>;
44 };
45 }; 43 };
46 }; 44 };
47 45
diff --git a/arch/arm/boot/dts/imx6dl-hummingboard.dts b/arch/arm/boot/dts/imx6dl-hummingboard.dts
index 5373a5f2782b..c8e51dd41b8f 100644
--- a/arch/arm/boot/dts/imx6dl-hummingboard.dts
+++ b/arch/arm/boot/dts/imx6dl-hummingboard.dts
@@ -143,6 +143,14 @@
143 fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0>; 143 fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x1b0b0>;
144 }; 144 };
145 145
146 pinctrl_hummingboard_usbotg_id: hummingboard-usbotg-id {
147 /*
148 * Similar to pinctrl_usbotg_2, but we want it
149 * pulled down for a fixed host connection.
150 */
151 fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
152 };
153
146 pinctrl_hummingboard_usbotg_vbus: hummingboard-usbotg-vbus { 154 pinctrl_hummingboard_usbotg_vbus: hummingboard-usbotg-vbus {
147 fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0>; 155 fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x1b0b0>;
148 }; 156 };
@@ -178,6 +186,8 @@
178}; 186};
179 187
180&usbotg { 188&usbotg {
189 pinctrl-names = "default";
190 pinctrl-0 = <&pinctrl_hummingboard_usbotg_id>;
181 vbus-supply = <&reg_usbotg_vbus>; 191 vbus-supply = <&reg_usbotg_vbus>;
182 status = "okay"; 192 status = "okay";
183}; 193};
diff --git a/arch/arm/boot/dts/imx6q-gw51xx.dts b/arch/arm/boot/dts/imx6q-gw51xx.dts
index af4929aee075..0e1406e58eff 100644
--- a/arch/arm/boot/dts/imx6q-gw51xx.dts
+++ b/arch/arm/boot/dts/imx6q-gw51xx.dts
@@ -11,7 +11,7 @@
11 11
12/dts-v1/; 12/dts-v1/;
13#include "imx6q.dtsi" 13#include "imx6q.dtsi"
14#include "imx6qdl-gw54xx.dtsi" 14#include "imx6qdl-gw51xx.dtsi"
15 15
16/ { 16/ {
17 model = "Gateworks Ventana i.MX6 Quad GW51XX"; 17 model = "Gateworks Ventana i.MX6 Quad GW51XX";
diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
index 25da82a03110..e8e781656b3f 100644
--- a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
@@ -12,6 +12,19 @@
12 pinctrl-0 = <&pinctrl_cubox_i_ir>; 12 pinctrl-0 = <&pinctrl_cubox_i_ir>;
13 }; 13 };
14 14
15 pwmleds {
16 compatible = "pwm-leds";
17 pinctrl-names = "default";
18 pinctrl-0 = <&pinctrl_cubox_i_pwm1>;
19
20 front {
21 active-low;
22 label = "imx6:red:front";
23 max-brightness = <248>;
24 pwms = <&pwm1 0 50000>;
25 };
26 };
27
15 regulators { 28 regulators {
16 compatible = "simple-bus"; 29 compatible = "simple-bus";
17 30
@@ -109,6 +122,10 @@
109 >; 122 >;
110 }; 123 };
111 124
125 pinctrl_cubox_i_pwm1: cubox-i-pwm1-front-led {
126 fsl,pins = <MX6QDL_PAD_DISP0_DAT8__PWM1_OUT 0x1b0b0>;
127 };
128
112 pinctrl_cubox_i_spdif: cubox-i-spdif { 129 pinctrl_cubox_i_spdif: cubox-i-spdif {
113 fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>; 130 fsl,pins = <MX6QDL_PAD_GPIO_17__SPDIF_OUT 0x13091>;
114 }; 131 };
@@ -117,6 +134,14 @@
117 fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x4001b0b0>; 134 fsl,pins = <MX6QDL_PAD_GPIO_0__GPIO1_IO00 0x4001b0b0>;
118 }; 135 };
119 136
137 pinctrl_cubox_i_usbotg_id: cubox-i-usbotg-id {
138 /*
139 * The Cubox-i pulls this low, but as it's pointless
140 * leaving it as a pull-up, even if it is just 10uA.
141 */
142 fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
143 };
144
120 pinctrl_cubox_i_usbotg_vbus: cubox-i-usbotg-vbus { 145 pinctrl_cubox_i_usbotg_vbus: cubox-i-usbotg-vbus {
121 fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x4001b0b0>; 146 fsl,pins = <MX6QDL_PAD_EIM_D22__GPIO3_IO22 0x4001b0b0>;
122 }; 147 };
@@ -153,6 +178,8 @@
153}; 178};
154 179
155&usbotg { 180&usbotg {
181 pinctrl-names = "default";
182 pinctrl-0 = <&pinctrl_cubox_i_usbotg_id>;
156 vbus-supply = <&reg_usbotg_vbus>; 183 vbus-supply = <&reg_usbotg_vbus>;
157 status = "okay"; 184 status = "okay";
158}; 185};
diff --git a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
index 31665adcbf39..0db15af41cb1 100644
--- a/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw51xx.dtsi
@@ -161,7 +161,7 @@
161 status = "okay"; 161 status = "okay";
162 162
163 pmic: ltc3676@3c { 163 pmic: ltc3676@3c {
164 compatible = "ltc,ltc3676"; 164 compatible = "lltc,ltc3676";
165 reg = <0x3c>; 165 reg = <0x3c>;
166 166
167 regulators { 167 regulators {
diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
index 367af3ec9435..744c8a2d81f6 100644
--- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
@@ -220,7 +220,7 @@
220 }; 220 };
221 221
222 pmic: ltc3676@3c { 222 pmic: ltc3676@3c {
223 compatible = "ltc,ltc3676"; 223 compatible = "lltc,ltc3676";
224 reg = <0x3c>; 224 reg = <0x3c>;
225 225
226 regulators { 226 regulators {
@@ -288,7 +288,7 @@
288 codec: sgtl5000@0a { 288 codec: sgtl5000@0a {
289 compatible = "fsl,sgtl5000"; 289 compatible = "fsl,sgtl5000";
290 reg = <0x0a>; 290 reg = <0x0a>;
291 clocks = <&clks 169>; 291 clocks = <&clks 201>;
292 VDDA-supply = <&reg_1p8v>; 292 VDDA-supply = <&reg_1p8v>;
293 VDDIO-supply = <&reg_3p3v>; 293 VDDIO-supply = <&reg_3p3v>;
294 }; 294 };
diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
index c91b5a6c769b..adf150c1be90 100644
--- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
@@ -234,7 +234,7 @@
234 }; 234 };
235 235
236 pmic: ltc3676@3c { 236 pmic: ltc3676@3c {
237 compatible = "ltc,ltc3676"; 237 compatible = "lltc,ltc3676";
238 reg = <0x3c>; 238 reg = <0x3c>;
239 239
240 regulators { 240 regulators {
diff --git a/arch/arm/boot/dts/imx6qdl-microsom.dtsi b/arch/arm/boot/dts/imx6qdl-microsom.dtsi
index d729d0b15f25..79eac6849d4c 100644
--- a/arch/arm/boot/dts/imx6qdl-microsom.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-microsom.dtsi
@@ -10,14 +10,6 @@
10 MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1 10 MX6QDL_PAD_CSI0_DAT11__UART1_RX_DATA 0x1b0b1
11 >; 11 >;
12 }; 12 };
13
14 pinctrl_microsom_usbotg: microsom-usbotg {
15 /*
16 * Similar to pinctrl_usbotg_2, but we want it
17 * pulled down for a fixed host connection.
18 */
19 fsl,pins = <MX6QDL_PAD_GPIO_1__USB_OTG_ID 0x13059>;
20 };
21 }; 13 };
22}; 14};
23 15
@@ -26,8 +18,3 @@
26 pinctrl-0 = <&pinctrl_microsom_uart1>; 18 pinctrl-0 = <&pinctrl_microsom_uart1>;
27 status = "okay"; 19 status = "okay";
28}; 20};
29
30&usbotg {
31 pinctrl-names = "default";
32 pinctrl-0 = <&pinctrl_microsom_usbotg>;
33};
diff --git a/arch/arm/boot/dts/imx6sl.dtsi b/arch/arm/boot/dts/imx6sl.dtsi
index 2d4e5285f3f3..57d4abe03a94 100644
--- a/arch/arm/boot/dts/imx6sl.dtsi
+++ b/arch/arm/boot/dts/imx6sl.dtsi
@@ -686,7 +686,7 @@
686 compatible = "fsl,imx6sl-fec", "fsl,imx25-fec"; 686 compatible = "fsl,imx6sl-fec", "fsl,imx25-fec";
687 reg = <0x02188000 0x4000>; 687 reg = <0x02188000 0x4000>;
688 interrupts = <0 114 IRQ_TYPE_LEVEL_HIGH>; 688 interrupts = <0 114 IRQ_TYPE_LEVEL_HIGH>;
689 clocks = <&clks IMX6SL_CLK_ENET_REF>, 689 clocks = <&clks IMX6SL_CLK_ENET>,
690 <&clks IMX6SL_CLK_ENET_REF>; 690 <&clks IMX6SL_CLK_ENET_REF>;
691 clock-names = "ipg", "ahb"; 691 clock-names = "ipg", "ahb";
692 status = "disabled"; 692 status = "disabled";
diff --git a/arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts b/arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts
index c5a1fc75c7a3..b2d9834bf458 100644
--- a/arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts
+++ b/arch/arm/boot/dts/kirkwood-guruplug-server-plus.dts
@@ -105,7 +105,6 @@
105 compatible = "ethernet-phy-id0141.0cb0", 105 compatible = "ethernet-phy-id0141.0cb0",
106 "ethernet-phy-ieee802.3-c22"; 106 "ethernet-phy-ieee802.3-c22";
107 reg = <0>; 107 reg = <0>;
108 phy-connection-type = "rgmii-id";
109 }; 108 };
110 109
111 ethphy1: ethernet-phy@1 { 110 ethphy1: ethernet-phy@1 {
@@ -113,7 +112,6 @@
113 compatible = "ethernet-phy-id0141.0cb0", 112 compatible = "ethernet-phy-id0141.0cb0",
114 "ethernet-phy-ieee802.3-c22"; 113 "ethernet-phy-ieee802.3-c22";
115 reg = <1>; 114 reg = <1>;
116 phy-connection-type = "rgmii-id";
117 }; 115 };
118}; 116};
119 117
@@ -121,6 +119,7 @@
121 status = "okay"; 119 status = "okay";
122 ethernet0-port@0 { 120 ethernet0-port@0 {
123 phy-handle = <&ethphy0>; 121 phy-handle = <&ethphy0>;
122 phy-connection-type = "rgmii-id";
124 }; 123 };
125}; 124};
126 125
@@ -128,5 +127,6 @@
128 status = "okay"; 127 status = "okay";
129 ethernet1-port@0 { 128 ethernet1-port@0 {
130 phy-handle = <&ethphy1>; 129 phy-handle = <&ethphy1>;
130 phy-connection-type = "rgmii-id";
131 }; 131 };
132}; 132};
diff --git a/arch/arm/boot/dts/omap3-beagle-xm.dts b/arch/arm/boot/dts/omap3-beagle-xm.dts
index cf0be662297e..1becefce821b 100644
--- a/arch/arm/boot/dts/omap3-beagle-xm.dts
+++ b/arch/arm/boot/dts/omap3-beagle-xm.dts
@@ -251,6 +251,11 @@
251 codec { 251 codec {
252 }; 252 };
253 }; 253 };
254
255 twl_power: power {
256 compatible = "ti,twl4030-power-beagleboard-xm", "ti,twl4030-power-idle-osc-off";
257 ti,use_poweroff;
258 };
254 }; 259 };
255}; 260};
256 261
@@ -301,6 +306,7 @@
301}; 306};
302 307
303&uart3 { 308&uart3 {
309 interrupts-extended = <&intc 74 &omap3_pmx_core OMAP3_UART3_RX>;
304 pinctrl-names = "default"; 310 pinctrl-names = "default";
305 pinctrl-0 = <&uart3_pins>; 311 pinctrl-0 = <&uart3_pins>;
306}; 312};
diff --git a/arch/arm/boot/dts/omap3-evm-common.dtsi b/arch/arm/boot/dts/omap3-evm-common.dtsi
index 8ae8f007c8ad..c8747c7f1cc8 100644
--- a/arch/arm/boot/dts/omap3-evm-common.dtsi
+++ b/arch/arm/boot/dts/omap3-evm-common.dtsi
@@ -50,6 +50,13 @@
50 gpios = <&twl_gpio 18 GPIO_ACTIVE_LOW>; 50 gpios = <&twl_gpio 18 GPIO_ACTIVE_LOW>;
51}; 51};
52 52
53&twl {
54 twl_power: power {
55 compatible = "ti,twl4030-power-omap3-evm", "ti,twl4030-power-idle";
56 ti,use_poweroff;
57 };
58};
59
53&i2c2 { 60&i2c2 {
54 clock-frequency = <400000>; 61 clock-frequency = <400000>;
55}; 62};
diff --git a/arch/arm/boot/dts/omap3-n900.dts b/arch/arm/boot/dts/omap3-n900.dts
index ae8ae3f4f9bf..1fe45d1f75ec 100644
--- a/arch/arm/boot/dts/omap3-n900.dts
+++ b/arch/arm/boot/dts/omap3-n900.dts
@@ -351,6 +351,11 @@
351 compatible = "ti,twl4030-audio"; 351 compatible = "ti,twl4030-audio";
352 ti,enable-vibra = <1>; 352 ti,enable-vibra = <1>;
353 }; 353 };
354
355 twl_power: power {
356 compatible = "ti,twl4030-power-n900", "ti,twl4030-power-idle-osc-off";
357 ti,use_poweroff;
358 };
354}; 359};
355 360
356&twl_keypad { 361&twl_keypad {
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index 3bfda16c8b52..a4ed54988866 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -45,7 +45,6 @@
45 45
46 operating-points = < 46 operating-points = <
47 /* kHz uV */ 47 /* kHz uV */
48 500000 880000
49 1000000 1060000 48 1000000 1060000
50 1500000 1250000 49 1500000 1250000
51 >; 50 >;
diff --git a/arch/arm/boot/dts/stih415.dtsi b/arch/arm/boot/dts/stih415.dtsi
index d6f254f302fe..a0f6f75fe3b5 100644
--- a/arch/arm/boot/dts/stih415.dtsi
+++ b/arch/arm/boot/dts/stih415.dtsi
@@ -169,8 +169,8 @@
169 169
170 pinctrl-names = "default"; 170 pinctrl-names = "default";
171 pinctrl-0 = <&pinctrl_mii0>; 171 pinctrl-0 = <&pinctrl_mii0>;
172 clock-names = "stmmaceth"; 172 clock-names = "stmmaceth", "sti-ethclk";
173 clocks = <&clk_s_a1_ls CLK_GMAC0_PHY>; 173 clocks = <&clk_s_a1_ls CLK_ICN_IF_2>, <&clk_s_a1_ls CLK_GMAC0_PHY>;
174 }; 174 };
175 175
176 ethernet1: dwmac@fef08000 { 176 ethernet1: dwmac@fef08000 {
@@ -192,8 +192,8 @@
192 reset-names = "stmmaceth"; 192 reset-names = "stmmaceth";
193 pinctrl-names = "default"; 193 pinctrl-names = "default";
194 pinctrl-0 = <&pinctrl_mii1>; 194 pinctrl-0 = <&pinctrl_mii1>;
195 clock-names = "stmmaceth"; 195 clock-names = "stmmaceth", "sti-ethclk";
196 clocks = <&clk_s_a0_ls CLK_ETH1_PHY>; 196 clocks = <&clk_s_a0_ls CLK_ICN_REG>, <&clk_s_a0_ls CLK_ETH1_PHY>;
197 }; 197 };
198 198
199 rc: rc@fe518000 { 199 rc: rc@fe518000 {
diff --git a/arch/arm/boot/dts/stih416-b2020-revE.dts b/arch/arm/boot/dts/stih416-b2020e.dts
index ba0fa2caaf18..ba0fa2caaf18 100644
--- a/arch/arm/boot/dts/stih416-b2020-revE.dts
+++ b/arch/arm/boot/dts/stih416-b2020e.dts
diff --git a/arch/arm/boot/dts/stih416.dtsi b/arch/arm/boot/dts/stih416.dtsi
index 06473c5d9ea9..84758d76d064 100644
--- a/arch/arm/boot/dts/stih416.dtsi
+++ b/arch/arm/boot/dts/stih416.dtsi
@@ -175,8 +175,8 @@
175 reset-names = "stmmaceth"; 175 reset-names = "stmmaceth";
176 pinctrl-names = "default"; 176 pinctrl-names = "default";
177 pinctrl-0 = <&pinctrl_mii0>; 177 pinctrl-0 = <&pinctrl_mii0>;
178 clock-names = "stmmaceth"; 178 clock-names = "stmmaceth", "sti-ethclk";
179 clocks = <&clk_s_a1_ls CLK_GMAC0_PHY>; 179 clocks = <&clk_s_a1_ls CLK_ICN_IF_2>, <&clk_s_a1_ls CLK_GMAC0_PHY>;
180 }; 180 };
181 181
182 ethernet1: dwmac@fef08000 { 182 ethernet1: dwmac@fef08000 {
@@ -197,8 +197,8 @@
197 reset-names = "stmmaceth"; 197 reset-names = "stmmaceth";
198 pinctrl-names = "default"; 198 pinctrl-names = "default";
199 pinctrl-0 = <&pinctrl_mii1>; 199 pinctrl-0 = <&pinctrl_mii1>;
200 clock-names = "stmmaceth"; 200 clock-names = "stmmaceth", "sti-ethclk";
201 clocks = <&clk_s_a0_ls CLK_ETH1_PHY>; 201 clocks = <&clk_s_a0_ls CLK_ICN_REG>, <&clk_s_a0_ls CLK_ETH1_PHY>;
202 }; 202 };
203 203
204 rc: rc@fe518000 { 204 rc: rc@fe518000 {
diff --git a/arch/arm/common/scoop.c b/arch/arm/common/scoop.c
index 6ef146edd0cd..a20fa80776d3 100644
--- a/arch/arm/common/scoop.c
+++ b/arch/arm/common/scoop.c
@@ -182,7 +182,6 @@ static int scoop_probe(struct platform_device *pdev)
182 struct scoop_config *inf; 182 struct scoop_config *inf;
183 struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 183 struct resource *mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
184 int ret; 184 int ret;
185 int temp;
186 185
187 if (!mem) 186 if (!mem)
188 return -EINVAL; 187 return -EINVAL;
diff --git a/arch/arm/configs/bcm_defconfig b/arch/arm/configs/bcm_defconfig
index 9d13dae99125..4bf72264b175 100644
--- a/arch/arm/configs/bcm_defconfig
+++ b/arch/arm/configs/bcm_defconfig
@@ -94,10 +94,10 @@ CONFIG_BACKLIGHT_CLASS_DEVICE=y
94CONFIG_BACKLIGHT_PWM=y 94CONFIG_BACKLIGHT_PWM=y
95# CONFIG_USB_SUPPORT is not set 95# CONFIG_USB_SUPPORT is not set
96CONFIG_MMC=y 96CONFIG_MMC=y
97CONFIG_MMC_UNSAFE_RESUME=y
98CONFIG_MMC_BLOCK_MINORS=32 97CONFIG_MMC_BLOCK_MINORS=32
99CONFIG_MMC_TEST=y 98CONFIG_MMC_TEST=y
100CONFIG_MMC_SDHCI=y 99CONFIG_MMC_SDHCI=y
100CONFIG_MMC_SDHCI_PLTFM=y
101CONFIG_MMC_SDHCI_BCM_KONA=y 101CONFIG_MMC_SDHCI_BCM_KONA=y
102CONFIG_NEW_LEDS=y 102CONFIG_NEW_LEDS=y
103CONFIG_LEDS_CLASS=y 103CONFIG_LEDS_CLASS=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index ef8815327e5b..59b7e45142d8 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -186,6 +186,7 @@ CONFIG_VIDEO_MX3=y
186CONFIG_V4L_MEM2MEM_DRIVERS=y 186CONFIG_V4L_MEM2MEM_DRIVERS=y
187CONFIG_VIDEO_CODA=y 187CONFIG_VIDEO_CODA=y
188CONFIG_SOC_CAMERA_OV2640=y 188CONFIG_SOC_CAMERA_OV2640=y
189CONFIG_IMX_IPUV3_CORE=y
189CONFIG_DRM=y 190CONFIG_DRM=y
190CONFIG_DRM_PANEL_SIMPLE=y 191CONFIG_DRM_PANEL_SIMPLE=y
191CONFIG_BACKLIGHT_LCD_SUPPORT=y 192CONFIG_BACKLIGHT_LCD_SUPPORT=y
diff --git a/arch/arm/configs/multi_v7_defconfig b/arch/arm/configs/multi_v7_defconfig
index 17d9462b9fb9..534836497998 100644
--- a/arch/arm/configs/multi_v7_defconfig
+++ b/arch/arm/configs/multi_v7_defconfig
@@ -223,12 +223,12 @@ CONFIG_POWER_RESET_GPIO=y
223CONFIG_POWER_RESET_SUN6I=y 223CONFIG_POWER_RESET_SUN6I=y
224CONFIG_SENSORS_LM90=y 224CONFIG_SENSORS_LM90=y
225CONFIG_THERMAL=y 225CONFIG_THERMAL=y
226CONFIG_DOVE_THERMAL=y
227CONFIG_ARMADA_THERMAL=y 226CONFIG_ARMADA_THERMAL=y
228CONFIG_WATCHDOG=y 227CONFIG_WATCHDOG=y
229CONFIG_ORION_WATCHDOG=y 228CONFIG_ORION_WATCHDOG=y
230CONFIG_SUNXI_WATCHDOG=y 229CONFIG_SUNXI_WATCHDOG=y
231CONFIG_MFD_AS3722=y 230CONFIG_MFD_AS3722=y
231CONFIG_MFD_BCM590XX=y
232CONFIG_MFD_CROS_EC=y 232CONFIG_MFD_CROS_EC=y
233CONFIG_MFD_CROS_EC_SPI=y 233CONFIG_MFD_CROS_EC_SPI=y
234CONFIG_MFD_MAX8907=y 234CONFIG_MFD_MAX8907=y
@@ -240,6 +240,7 @@ CONFIG_MFD_TPS65910=y
240CONFIG_REGULATOR_VIRTUAL_CONSUMER=y 240CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
241CONFIG_REGULATOR_AB8500=y 241CONFIG_REGULATOR_AB8500=y
242CONFIG_REGULATOR_AS3722=y 242CONFIG_REGULATOR_AS3722=y
243CONFIG_REGULATOR_BCM590XX=y
243CONFIG_REGULATOR_GPIO=y 244CONFIG_REGULATOR_GPIO=y
244CONFIG_REGULATOR_MAX8907=y 245CONFIG_REGULATOR_MAX8907=y
245CONFIG_REGULATOR_PALMAS=y 246CONFIG_REGULATOR_PALMAS=y
@@ -353,6 +354,7 @@ CONFIG_MFD_NVEC=y
353CONFIG_KEYBOARD_NVEC=y 354CONFIG_KEYBOARD_NVEC=y
354CONFIG_SERIO_NVEC_PS2=y 355CONFIG_SERIO_NVEC_PS2=y
355CONFIG_NVEC_POWER=y 356CONFIG_NVEC_POWER=y
357CONFIG_QCOM_GSBI=y
356CONFIG_COMMON_CLK_QCOM=y 358CONFIG_COMMON_CLK_QCOM=y
357CONFIG_MSM_GCC_8660=y 359CONFIG_MSM_GCC_8660=y
358CONFIG_MSM_MMCC_8960=y 360CONFIG_MSM_MMCC_8960=y
diff --git a/arch/arm/configs/mvebu_v7_defconfig b/arch/arm/configs/mvebu_v7_defconfig
index e11170e37442..b0bfefa23902 100644
--- a/arch/arm/configs/mvebu_v7_defconfig
+++ b/arch/arm/configs/mvebu_v7_defconfig
@@ -14,6 +14,7 @@ CONFIG_MACH_ARMADA_370=y
14CONFIG_MACH_ARMADA_375=y 14CONFIG_MACH_ARMADA_375=y
15CONFIG_MACH_ARMADA_38X=y 15CONFIG_MACH_ARMADA_38X=y
16CONFIG_MACH_ARMADA_XP=y 16CONFIG_MACH_ARMADA_XP=y
17CONFIG_MACH_DOVE=y
17CONFIG_NEON=y 18CONFIG_NEON=y
18# CONFIG_CACHE_L2X0 is not set 19# CONFIG_CACHE_L2X0 is not set
19# CONFIG_SWP_EMULATE is not set 20# CONFIG_SWP_EMULATE is not set
@@ -52,6 +53,7 @@ CONFIG_INPUT_EVDEV=y
52CONFIG_KEYBOARD_GPIO=y 53CONFIG_KEYBOARD_GPIO=y
53CONFIG_SERIAL_8250=y 54CONFIG_SERIAL_8250=y
54CONFIG_SERIAL_8250_CONSOLE=y 55CONFIG_SERIAL_8250_CONSOLE=y
56CONFIG_SERIAL_OF_PLATFORM=y
55CONFIG_I2C=y 57CONFIG_I2C=y
56CONFIG_SPI=y 58CONFIG_SPI=y
57CONFIG_SPI_ORION=y 59CONFIG_SPI_ORION=y
diff --git a/arch/arm/include/asm/mcpm.h b/arch/arm/include/asm/mcpm.h
index d9702eb0b02b..94060adba174 100644
--- a/arch/arm/include/asm/mcpm.h
+++ b/arch/arm/include/asm/mcpm.h
@@ -208,8 +208,6 @@ struct sync_struct {
208 struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS]; 208 struct mcpm_sync_struct clusters[MAX_NR_CLUSTERS];
209}; 209};
210 210
211extern unsigned long sync_phys; /* physical address of *mcpm_sync */
212
213void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster); 211void __mcpm_cpu_going_down(unsigned int cpu, unsigned int cluster);
214void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster); 212void __mcpm_cpu_down(unsigned int cpu, unsigned int cluster);
215void __mcpm_outbound_leave_critical(unsigned int cluster, int state); 213void __mcpm_outbound_leave_critical(unsigned int cluster, int state);
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index f989d7c22dc5..e4e4208a9130 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -114,8 +114,14 @@ static inline struct thread_info *current_thread_info(void)
114 ((unsigned long)(task_thread_info(tsk)->cpu_context.pc)) 114 ((unsigned long)(task_thread_info(tsk)->cpu_context.pc))
115#define thread_saved_sp(tsk) \ 115#define thread_saved_sp(tsk) \
116 ((unsigned long)(task_thread_info(tsk)->cpu_context.sp)) 116 ((unsigned long)(task_thread_info(tsk)->cpu_context.sp))
117
118#ifndef CONFIG_THUMB2_KERNEL
117#define thread_saved_fp(tsk) \ 119#define thread_saved_fp(tsk) \
118 ((unsigned long)(task_thread_info(tsk)->cpu_context.fp)) 120 ((unsigned long)(task_thread_info(tsk)->cpu_context.fp))
121#else
122#define thread_saved_fp(tsk) \
123 ((unsigned long)(task_thread_info(tsk)->cpu_context.r7))
124#endif
119 125
120extern void crunch_task_disable(struct thread_info *); 126extern void crunch_task_disable(struct thread_info *);
121extern void crunch_task_copy(struct thread_info *, void *); 127extern void crunch_task_copy(struct thread_info *, void *);
diff --git a/arch/arm/kernel/kprobes-test-arm.c b/arch/arm/kernel/kprobes-test-arm.c
index 9db4b659d03e..cb1424240ff6 100644
--- a/arch/arm/kernel/kprobes-test-arm.c
+++ b/arch/arm/kernel/kprobes-test-arm.c
@@ -74,8 +74,6 @@ void kprobe_arm_test_cases(void)
74 TEST_RRR( op "lt" s " r11, r",11,VAL1,", r",14,N(val),", asr r",7, 6,"")\ 74 TEST_RRR( op "lt" s " r11, r",11,VAL1,", r",14,N(val),", asr r",7, 6,"")\
75 TEST_RR( op "gt" s " r12, r13" ", r",14,val, ", ror r",14,7,"")\ 75 TEST_RR( op "gt" s " r12, r13" ", r",14,val, ", ror r",14,7,"")\
76 TEST_RR( op "le" s " r14, r",0, val, ", r13" ", lsl r",14,8,"")\ 76 TEST_RR( op "le" s " r14, r",0, val, ", r13" ", lsl r",14,8,"")\
77 TEST_RR( op s " r12, pc" ", r",14,val, ", ror r",14,7,"")\
78 TEST_RR( op s " r14, r",0, val, ", pc" ", lsl r",14,8,"")\
79 TEST_R( op "eq" s " r0, r",11,VAL1,", #0xf5") \ 77 TEST_R( op "eq" s " r0, r",11,VAL1,", #0xf5") \
80 TEST_R( op "ne" s " r11, r",0, VAL1,", #0xf5000000") \ 78 TEST_R( op "ne" s " r11, r",0, VAL1,", #0xf5000000") \
81 TEST_R( op s " r7, r",8, VAL2,", #0x000af000") \ 79 TEST_R( op s " r7, r",8, VAL2,", #0x000af000") \
@@ -103,8 +101,6 @@ void kprobe_arm_test_cases(void)
103 TEST_RRR( op "ge r",11,VAL1,", r",14,N(val),", asr r",7, 6,"") \ 101 TEST_RRR( op "ge r",11,VAL1,", r",14,N(val),", asr r",7, 6,"") \
104 TEST_RR( op "le r13" ", r",14,val, ", ror r",14,7,"") \ 102 TEST_RR( op "le r13" ", r",14,val, ", ror r",14,7,"") \
105 TEST_RR( op "gt r",0, val, ", r13" ", lsl r",14,8,"") \ 103 TEST_RR( op "gt r",0, val, ", r13" ", lsl r",14,8,"") \
106 TEST_RR( op " pc" ", r",14,val, ", ror r",14,7,"") \
107 TEST_RR( op " r",0, val, ", pc" ", lsl r",14,8,"") \
108 TEST_R( op "eq r",11,VAL1,", #0xf5") \ 104 TEST_R( op "eq r",11,VAL1,", #0xf5") \
109 TEST_R( op "ne r",0, VAL1,", #0xf5000000") \ 105 TEST_R( op "ne r",0, VAL1,", #0xf5000000") \
110 TEST_R( op " r",8, VAL2,", #0x000af000") 106 TEST_R( op " r",8, VAL2,", #0x000af000")
@@ -125,7 +121,6 @@ void kprobe_arm_test_cases(void)
125 TEST_RR( op "ge" s " r11, r",11,N(val),", asr r",7, 6,"") \ 121 TEST_RR( op "ge" s " r11, r",11,N(val),", asr r",7, 6,"") \
126 TEST_RR( op "lt" s " r12, r",11,val, ", ror r",14,7,"") \ 122 TEST_RR( op "lt" s " r12, r",11,val, ", ror r",14,7,"") \
127 TEST_R( op "gt" s " r14, r13" ", lsl r",14,8,"") \ 123 TEST_R( op "gt" s " r14, r13" ", lsl r",14,8,"") \
128 TEST_R( op "le" s " r14, pc" ", lsl r",14,8,"") \
129 TEST( op "eq" s " r0, #0xf5") \ 124 TEST( op "eq" s " r0, #0xf5") \
130 TEST( op "ne" s " r11, #0xf5000000") \ 125 TEST( op "ne" s " r11, #0xf5000000") \
131 TEST( op s " r7, #0x000af000") \ 126 TEST( op s " r7, #0x000af000") \
@@ -159,12 +154,19 @@ void kprobe_arm_test_cases(void)
159 TEST_SUPPORTED("cmp pc, #0x1000"); 154 TEST_SUPPORTED("cmp pc, #0x1000");
160 TEST_SUPPORTED("cmp sp, #0x1000"); 155 TEST_SUPPORTED("cmp sp, #0x1000");
161 156
162 /* Data-processing with PC as shift*/ 157 /* Data-processing with PC and a shift count in a register */
163 TEST_UNSUPPORTED(__inst_arm(0xe15c0f1e) " @ cmp r12, r14, asl pc") 158 TEST_UNSUPPORTED(__inst_arm(0xe15c0f1e) " @ cmp r12, r14, asl pc")
164 TEST_UNSUPPORTED(__inst_arm(0xe1a0cf1e) " @ mov r12, r14, asl pc") 159 TEST_UNSUPPORTED(__inst_arm(0xe1a0cf1e) " @ mov r12, r14, asl pc")
165 TEST_UNSUPPORTED(__inst_arm(0xe08caf1e) " @ add r10, r12, r14, asl pc") 160 TEST_UNSUPPORTED(__inst_arm(0xe08caf1e) " @ add r10, r12, r14, asl pc")
166 161 TEST_UNSUPPORTED(__inst_arm(0xe151021f) " @ cmp r1, pc, lsl r2")
167 /* Data-processing with PC as shift*/ 162 TEST_UNSUPPORTED(__inst_arm(0xe17f0211) " @ cmn pc, r1, lsl r2")
163 TEST_UNSUPPORTED(__inst_arm(0xe1a0121f) " @ mov r1, pc, lsl r2")
164 TEST_UNSUPPORTED(__inst_arm(0xe1a0f211) " @ mov pc, r1, lsl r2")
165 TEST_UNSUPPORTED(__inst_arm(0xe042131f) " @ sub r1, r2, pc, lsl r3")
166 TEST_UNSUPPORTED(__inst_arm(0xe1cf1312) " @ bic r1, pc, r2, lsl r3")
167 TEST_UNSUPPORTED(__inst_arm(0xe081f312) " @ add pc, r1, r2, lsl r3")
168
169 /* Data-processing with PC as a target and status registers updated */
168 TEST_UNSUPPORTED("movs pc, r1") 170 TEST_UNSUPPORTED("movs pc, r1")
169 TEST_UNSUPPORTED("movs pc, r1, lsl r2") 171 TEST_UNSUPPORTED("movs pc, r1, lsl r2")
170 TEST_UNSUPPORTED("movs pc, #0x10000") 172 TEST_UNSUPPORTED("movs pc, #0x10000")
@@ -187,14 +189,14 @@ void kprobe_arm_test_cases(void)
187 TEST_BF_R ("add pc, pc, r",14,2f-1f-8,"") 189 TEST_BF_R ("add pc, pc, r",14,2f-1f-8,"")
188 TEST_BF_R ("add pc, r",14,2f-1f-8,", pc") 190 TEST_BF_R ("add pc, r",14,2f-1f-8,", pc")
189 TEST_BF_R ("mov pc, r",0,2f,"") 191 TEST_BF_R ("mov pc, r",0,2f,"")
190 TEST_BF_RR("mov pc, r",0,2f,", asl r",1,0,"") 192 TEST_BF_R ("add pc, pc, r",14,(2f-1f-8)*2,", asr #1")
191 TEST_BB( "sub pc, pc, #1b-2b+8") 193 TEST_BB( "sub pc, pc, #1b-2b+8")
192#if __LINUX_ARM_ARCH__ == 6 && !defined(CONFIG_CPU_V7) 194#if __LINUX_ARM_ARCH__ == 6 && !defined(CONFIG_CPU_V7)
193 TEST_BB( "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before and after ARMv6 */ 195 TEST_BB( "sub pc, pc, #1b-2b+8-2") /* UNPREDICTABLE before and after ARMv6 */
194#endif 196#endif
195 TEST_BB_R( "sub pc, pc, r",14, 1f-2f+8,"") 197 TEST_BB_R( "sub pc, pc, r",14, 1f-2f+8,"")
196 TEST_BB_R( "rsb pc, r",14,1f-2f+8,", pc") 198 TEST_BB_R( "rsb pc, r",14,1f-2f+8,", pc")
197 TEST_RR( "add pc, pc, r",10,-2,", asl r",11,1,"") 199 TEST_R( "add pc, pc, r",10,-2,", asl #1")
198#ifdef CONFIG_THUMB2_KERNEL 200#ifdef CONFIG_THUMB2_KERNEL
199 TEST_ARM_TO_THUMB_INTERWORK_R("add pc, pc, r",0,3f-1f-8+1,"") 201 TEST_ARM_TO_THUMB_INTERWORK_R("add pc, pc, r",0,3f-1f-8+1,"")
200 TEST_ARM_TO_THUMB_INTERWORK_R("sub pc, r",0,3f+8+1,", #8") 202 TEST_ARM_TO_THUMB_INTERWORK_R("sub pc, r",0,3f+8+1,", #8")
@@ -216,6 +218,7 @@ void kprobe_arm_test_cases(void)
216 TEST_BB_R("bx r",7,2f,"") 218 TEST_BB_R("bx r",7,2f,"")
217 TEST_BF_R("bxeq r",14,2f,"") 219 TEST_BF_R("bxeq r",14,2f,"")
218 220
221#if __LINUX_ARM_ARCH__ >= 5
219 TEST_R("clz r0, r",0, 0x0,"") 222 TEST_R("clz r0, r",0, 0x0,"")
220 TEST_R("clzeq r7, r",14,0x1,"") 223 TEST_R("clzeq r7, r",14,0x1,"")
221 TEST_R("clz lr, r",7, 0xffffffff,"") 224 TEST_R("clz lr, r",7, 0xffffffff,"")
@@ -337,6 +340,7 @@ void kprobe_arm_test_cases(void)
337 TEST_UNSUPPORTED(__inst_arm(0xe16f02e1) " @ smultt pc, r1, r2") 340 TEST_UNSUPPORTED(__inst_arm(0xe16f02e1) " @ smultt pc, r1, r2")
338 TEST_UNSUPPORTED(__inst_arm(0xe16002ef) " @ smultt r0, pc, r2") 341 TEST_UNSUPPORTED(__inst_arm(0xe16002ef) " @ smultt r0, pc, r2")
339 TEST_UNSUPPORTED(__inst_arm(0xe1600fe1) " @ smultt r0, r1, pc") 342 TEST_UNSUPPORTED(__inst_arm(0xe1600fe1) " @ smultt r0, r1, pc")
343#endif
340 344
341 TEST_GROUP("Multiply and multiply-accumulate") 345 TEST_GROUP("Multiply and multiply-accumulate")
342 346
@@ -559,6 +563,7 @@ void kprobe_arm_test_cases(void)
559 TEST_UNSUPPORTED("ldrsht r1, [r2], #48") 563 TEST_UNSUPPORTED("ldrsht r1, [r2], #48")
560#endif 564#endif
561 565
566#if __LINUX_ARM_ARCH__ >= 5
562 TEST_RPR( "strd r",0, VAL1,", [r",1, 48,", -r",2,24,"]") 567 TEST_RPR( "strd r",0, VAL1,", [r",1, 48,", -r",2,24,"]")
563 TEST_RPR( "strccd r",8, VAL2,", [r",13,0, ", r",12,48,"]") 568 TEST_RPR( "strccd r",8, VAL2,", [r",13,0, ", r",12,48,"]")
564 TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!") 569 TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!")
@@ -595,6 +600,7 @@ void kprobe_arm_test_cases(void)
595 TEST_UNSUPPORTED(__inst_arm(0xe1efc3d0) " @ ldrd r12, [pc, #48]!") 600 TEST_UNSUPPORTED(__inst_arm(0xe1efc3d0) " @ ldrd r12, [pc, #48]!")
596 TEST_UNSUPPORTED(__inst_arm(0xe0c9f3d0) " @ ldrd pc, [r9], #48") 601 TEST_UNSUPPORTED(__inst_arm(0xe0c9f3d0) " @ ldrd pc, [r9], #48")
597 TEST_UNSUPPORTED(__inst_arm(0xe0c9e3d0) " @ ldrd lr, [r9], #48") 602 TEST_UNSUPPORTED(__inst_arm(0xe0c9e3d0) " @ ldrd lr, [r9], #48")
603#endif
598 604
599 TEST_GROUP("Miscellaneous") 605 TEST_GROUP("Miscellaneous")
600 606
@@ -1227,7 +1233,9 @@ void kprobe_arm_test_cases(void)
1227 TEST_COPROCESSOR( "mrc"two" 0, 0, r0, cr0, cr0, 0") 1233 TEST_COPROCESSOR( "mrc"two" 0, 0, r0, cr0, cr0, 0")
1228 1234
1229 COPROCESSOR_INSTRUCTIONS_ST_LD("",e) 1235 COPROCESSOR_INSTRUCTIONS_ST_LD("",e)
1236#if __LINUX_ARM_ARCH__ >= 5
1230 COPROCESSOR_INSTRUCTIONS_MC_MR("",e) 1237 COPROCESSOR_INSTRUCTIONS_MC_MR("",e)
1238#endif
1231 TEST_UNSUPPORTED("svc 0") 1239 TEST_UNSUPPORTED("svc 0")
1232 TEST_UNSUPPORTED("svc 0xffffff") 1240 TEST_UNSUPPORTED("svc 0xffffff")
1233 1241
@@ -1287,7 +1295,9 @@ void kprobe_arm_test_cases(void)
1287 TEST( "blx __dummy_thumb_subroutine_odd") 1295 TEST( "blx __dummy_thumb_subroutine_odd")
1288#endif /* __LINUX_ARM_ARCH__ >= 6 */ 1296#endif /* __LINUX_ARM_ARCH__ >= 6 */
1289 1297
1298#if __LINUX_ARM_ARCH__ >= 5
1290 COPROCESSOR_INSTRUCTIONS_ST_LD("2",f) 1299 COPROCESSOR_INSTRUCTIONS_ST_LD("2",f)
1300#endif
1291#if __LINUX_ARM_ARCH__ >= 6 1301#if __LINUX_ARM_ARCH__ >= 6
1292 COPROCESSOR_INSTRUCTIONS_MC_MR("2",f) 1302 COPROCESSOR_INSTRUCTIONS_MC_MR("2",f)
1293#endif 1303#endif
diff --git a/arch/arm/kernel/kprobes-test.c b/arch/arm/kernel/kprobes-test.c
index 379639998d5a..08d731294bcd 100644
--- a/arch/arm/kernel/kprobes-test.c
+++ b/arch/arm/kernel/kprobes-test.c
@@ -225,6 +225,7 @@ static int pre_handler_called;
225static int post_handler_called; 225static int post_handler_called;
226static int jprobe_func_called; 226static int jprobe_func_called;
227static int kretprobe_handler_called; 227static int kretprobe_handler_called;
228static int tests_failed;
228 229
229#define FUNC_ARG1 0x12345678 230#define FUNC_ARG1 0x12345678
230#define FUNC_ARG2 0xabcdef 231#define FUNC_ARG2 0xabcdef
@@ -461,6 +462,13 @@ static int run_api_tests(long (*func)(long, long))
461 462
462 pr_info(" jprobe\n"); 463 pr_info(" jprobe\n");
463 ret = test_jprobe(func); 464 ret = test_jprobe(func);
465#if defined(CONFIG_THUMB2_KERNEL) && !defined(MODULE)
466 if (ret == -EINVAL) {
467 pr_err("FAIL: Known longtime bug with jprobe on Thumb kernels\n");
468 tests_failed = ret;
469 ret = 0;
470 }
471#endif
464 if (ret < 0) 472 if (ret < 0)
465 return ret; 473 return ret;
466 474
@@ -1672,6 +1680,8 @@ static int __init run_all_tests(void)
1672 1680
1673out: 1681out:
1674 if (ret == 0) 1682 if (ret == 0)
1683 ret = tests_failed;
1684 if (ret == 0)
1675 pr_info("Finished kprobe tests OK\n"); 1685 pr_info("Finished kprobe tests OK\n");
1676 else 1686 else
1677 pr_err("kprobe tests failed\n"); 1687 pr_err("kprobe tests failed\n");
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 2037f7205987..1d37568c547a 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1924,7 +1924,7 @@ static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1924 struct perf_event *event) 1924 struct perf_event *event)
1925{ 1925{
1926 int idx; 1926 int idx;
1927 int bit; 1927 int bit = -1;
1928 unsigned int prefix; 1928 unsigned int prefix;
1929 unsigned int region; 1929 unsigned int region;
1930 unsigned int code; 1930 unsigned int code;
@@ -1953,7 +1953,7 @@ static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1953 } 1953 }
1954 1954
1955 idx = armv7pmu_get_event_idx(cpuc, event); 1955 idx = armv7pmu_get_event_idx(cpuc, event);
1956 if (idx < 0 && krait_event) 1956 if (idx < 0 && bit >= 0)
1957 clear_bit(bit, cpuc->used_mask); 1957 clear_bit(bit, cpuc->used_mask);
1958 1958
1959 return idx; 1959 return idx;
diff --git a/arch/arm/kernel/probes-arm.c b/arch/arm/kernel/probes-arm.c
index 51a13a027989..8eaef81d8344 100644
--- a/arch/arm/kernel/probes-arm.c
+++ b/arch/arm/kernel/probes-arm.c
@@ -341,12 +341,12 @@ static const union decode_item arm_cccc_000x_table[] = {
341 /* CMP (reg-shift reg) cccc 0001 0101 xxxx xxxx xxxx 0xx1 xxxx */ 341 /* CMP (reg-shift reg) cccc 0001 0101 xxxx xxxx xxxx 0xx1 xxxx */
342 /* CMN (reg-shift reg) cccc 0001 0111 xxxx xxxx xxxx 0xx1 xxxx */ 342 /* CMN (reg-shift reg) cccc 0001 0111 xxxx xxxx xxxx 0xx1 xxxx */
343 DECODE_EMULATEX (0x0f900090, 0x01100010, PROBES_DATA_PROCESSING_REG, 343 DECODE_EMULATEX (0x0f900090, 0x01100010, PROBES_DATA_PROCESSING_REG,
344 REGS(ANY, 0, NOPC, 0, ANY)), 344 REGS(NOPC, 0, NOPC, 0, NOPC)),
345 345
346 /* MOV (reg-shift reg) cccc 0001 101x xxxx xxxx xxxx 0xx1 xxxx */ 346 /* MOV (reg-shift reg) cccc 0001 101x xxxx xxxx xxxx 0xx1 xxxx */
347 /* MVN (reg-shift reg) cccc 0001 111x xxxx xxxx xxxx 0xx1 xxxx */ 347 /* MVN (reg-shift reg) cccc 0001 111x xxxx xxxx xxxx 0xx1 xxxx */
348 DECODE_EMULATEX (0x0fa00090, 0x01a00010, PROBES_DATA_PROCESSING_REG, 348 DECODE_EMULATEX (0x0fa00090, 0x01a00010, PROBES_DATA_PROCESSING_REG,
349 REGS(0, ANY, NOPC, 0, ANY)), 349 REGS(0, NOPC, NOPC, 0, NOPC)),
350 350
351 /* AND (reg-shift reg) cccc 0000 000x xxxx xxxx xxxx 0xx1 xxxx */ 351 /* AND (reg-shift reg) cccc 0000 000x xxxx xxxx xxxx 0xx1 xxxx */
352 /* EOR (reg-shift reg) cccc 0000 001x xxxx xxxx xxxx 0xx1 xxxx */ 352 /* EOR (reg-shift reg) cccc 0000 001x xxxx xxxx xxxx 0xx1 xxxx */
@@ -359,7 +359,7 @@ static const union decode_item arm_cccc_000x_table[] = {
359 /* ORR (reg-shift reg) cccc 0001 100x xxxx xxxx xxxx 0xx1 xxxx */ 359 /* ORR (reg-shift reg) cccc 0001 100x xxxx xxxx xxxx 0xx1 xxxx */
360 /* BIC (reg-shift reg) cccc 0001 110x xxxx xxxx xxxx 0xx1 xxxx */ 360 /* BIC (reg-shift reg) cccc 0001 110x xxxx xxxx xxxx 0xx1 xxxx */
361 DECODE_EMULATEX (0x0e000090, 0x00000010, PROBES_DATA_PROCESSING_REG, 361 DECODE_EMULATEX (0x0e000090, 0x00000010, PROBES_DATA_PROCESSING_REG,
362 REGS(ANY, ANY, NOPC, 0, ANY)), 362 REGS(NOPC, NOPC, NOPC, 0, NOPC)),
363 363
364 DECODE_END 364 DECODE_END
365}; 365};
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 0dd3b79b15c3..0c27ed6f3f23 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -908,7 +908,7 @@ enum ptrace_syscall_dir {
908 PTRACE_SYSCALL_EXIT, 908 PTRACE_SYSCALL_EXIT,
909}; 909};
910 910
911static int tracehook_report_syscall(struct pt_regs *regs, 911static void tracehook_report_syscall(struct pt_regs *regs,
912 enum ptrace_syscall_dir dir) 912 enum ptrace_syscall_dir dir)
913{ 913{
914 unsigned long ip; 914 unsigned long ip;
@@ -926,7 +926,6 @@ static int tracehook_report_syscall(struct pt_regs *regs,
926 current_thread_info()->syscall = -1; 926 current_thread_info()->syscall = -1;
927 927
928 regs->ARM_ip = ip; 928 regs->ARM_ip = ip;
929 return current_thread_info()->syscall;
930} 929}
931 930
932asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno) 931asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
@@ -938,7 +937,9 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
938 return -1; 937 return -1;
939 938
940 if (test_thread_flag(TIF_SYSCALL_TRACE)) 939 if (test_thread_flag(TIF_SYSCALL_TRACE))
941 scno = tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER); 940 tracehook_report_syscall(regs, PTRACE_SYSCALL_ENTER);
941
942 scno = current_thread_info()->syscall;
942 943
943 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT)) 944 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
944 trace_sys_enter(regs, scno); 945 trace_sys_enter(regs, scno);
diff --git a/arch/arm/kernel/topology.c b/arch/arm/kernel/topology.c
index 9d853189028b..e35d880f9773 100644
--- a/arch/arm/kernel/topology.c
+++ b/arch/arm/kernel/topology.c
@@ -275,7 +275,7 @@ void store_cpu_topology(unsigned int cpuid)
275 cpu_topology[cpuid].socket_id, mpidr); 275 cpu_topology[cpuid].socket_id, mpidr);
276} 276}
277 277
278static inline const int cpu_corepower_flags(void) 278static inline int cpu_corepower_flags(void)
279{ 279{
280 return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN; 280 return SD_SHARE_PKG_RESOURCES | SD_SHARE_POWERDOMAIN;
281} 281}
diff --git a/arch/arm/mach-exynos/exynos.c b/arch/arm/mach-exynos/exynos.c
index f38cf7c110cc..46d893fcbe85 100644
--- a/arch/arm/mach-exynos/exynos.c
+++ b/arch/arm/mach-exynos/exynos.c
@@ -173,10 +173,8 @@ static struct platform_device exynos_cpuidle = {
173 173
174void __init exynos_cpuidle_init(void) 174void __init exynos_cpuidle_init(void)
175{ 175{
176 if (soc_is_exynos5440()) 176 if (soc_is_exynos4210() || soc_is_exynos5250())
177 return; 177 platform_device_register(&exynos_cpuidle);
178
179 platform_device_register(&exynos_cpuidle);
180} 178}
181 179
182void __init exynos_cpufreq_init(void) 180void __init exynos_cpufreq_init(void)
@@ -297,7 +295,7 @@ static void __init exynos_dt_machine_init(void)
297 * This is called from smp_prepare_cpus if we've built for SMP, but 295 * This is called from smp_prepare_cpus if we've built for SMP, but
298 * we still need to set it up for PM and firmware ops if not. 296 * we still need to set it up for PM and firmware ops if not.
299 */ 297 */
300 if (!IS_ENABLED(SMP)) 298 if (!IS_ENABLED(CONFIG_SMP))
301 exynos_sysram_init(); 299 exynos_sysram_init();
302 300
303 exynos_cpuidle_init(); 301 exynos_cpuidle_init();
diff --git a/arch/arm/mach-exynos/firmware.c b/arch/arm/mach-exynos/firmware.c
index eb91d2350f8c..e8797bb78871 100644
--- a/arch/arm/mach-exynos/firmware.c
+++ b/arch/arm/mach-exynos/firmware.c
@@ -57,8 +57,13 @@ static int exynos_set_cpu_boot_addr(int cpu, unsigned long boot_addr)
57 57
58 boot_reg = sysram_ns_base_addr + 0x1c; 58 boot_reg = sysram_ns_base_addr + 0x1c;
59 59
60 if (!soc_is_exynos4212() && !soc_is_exynos3250()) 60 /*
61 boot_reg += 4*cpu; 61 * Almost all Exynos-series of SoCs that run in secure mode don't need
62 * additional offset for every CPU, with Exynos4412 being the only
63 * exception.
64 */
65 if (soc_is_exynos4412())
66 boot_reg += 4 * cpu;
62 67
63 __raw_writel(boot_addr, boot_reg); 68 __raw_writel(boot_addr, boot_reg);
64 return 0; 69 return 0;
diff --git a/arch/arm/mach-exynos/hotplug.c b/arch/arm/mach-exynos/hotplug.c
index 69fa48397394..920a4baa53cd 100644
--- a/arch/arm/mach-exynos/hotplug.c
+++ b/arch/arm/mach-exynos/hotplug.c
@@ -40,21 +40,17 @@ static inline void cpu_leave_lowpower(void)
40 40
41static inline void platform_do_lowpower(unsigned int cpu, int *spurious) 41static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
42{ 42{
43 u32 mpidr = cpu_logical_map(cpu);
44 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
45
43 for (;;) { 46 for (;;) {
44 47
45 /* make cpu1 to be turned off at next WFI command */ 48 /* Turn the CPU off on next WFI instruction. */
46 if (cpu == 1) 49 exynos_cpu_power_down(core_id);
47 exynos_cpu_power_down(cpu);
48 50
49 /* 51 wfi();
50 * here's the WFI
51 */
52 asm(".word 0xe320f003\n"
53 :
54 :
55 : "memory", "cc");
56 52
57 if (pen_release == cpu_logical_map(cpu)) { 53 if (pen_release == core_id) {
58 /* 54 /*
59 * OK, proper wakeup, we're done 55 * OK, proper wakeup, we're done
60 */ 56 */
diff --git a/arch/arm/mach-exynos/mcpm-exynos.c b/arch/arm/mach-exynos/mcpm-exynos.c
index 0498d0b887ef..ace0ed617476 100644
--- a/arch/arm/mach-exynos/mcpm-exynos.c
+++ b/arch/arm/mach-exynos/mcpm-exynos.c
@@ -25,7 +25,6 @@
25 25
26#define EXYNOS5420_CPUS_PER_CLUSTER 4 26#define EXYNOS5420_CPUS_PER_CLUSTER 4
27#define EXYNOS5420_NR_CLUSTERS 2 27#define EXYNOS5420_NR_CLUSTERS 2
28#define MCPM_BOOT_ADDR_OFFSET 0x1c
29 28
30/* 29/*
31 * The common v7_exit_coherency_flush API could not be used because of the 30 * The common v7_exit_coherency_flush API could not be used because of the
@@ -343,11 +342,13 @@ static int __init exynos_mcpm_init(void)
343 pr_info("Exynos MCPM support installed\n"); 342 pr_info("Exynos MCPM support installed\n");
344 343
345 /* 344 /*
346 * Future entries into the kernel can now go 345 * U-Boot SPL is hardcoded to jump to the start of ns_sram_base_addr
347 * through the cluster entry vectors. 346 * as part of secondary_cpu_start(). Let's redirect it to the
347 * mcpm_entry_point().
348 */ 348 */
349 __raw_writel(virt_to_phys(mcpm_entry_point), 349 __raw_writel(0xe59f0000, ns_sram_base_addr); /* ldr r0, [pc, #0] */
350 ns_sram_base_addr + MCPM_BOOT_ADDR_OFFSET); 350 __raw_writel(0xe12fff10, ns_sram_base_addr + 4); /* bx r0 */
351 __raw_writel(virt_to_phys(mcpm_entry_point), ns_sram_base_addr + 8);
351 352
352 iounmap(ns_sram_base_addr); 353 iounmap(ns_sram_base_addr);
353 354
diff --git a/arch/arm/mach-exynos/platsmp.c b/arch/arm/mach-exynos/platsmp.c
index 1c8d31e39520..50b9aad5e27b 100644
--- a/arch/arm/mach-exynos/platsmp.c
+++ b/arch/arm/mach-exynos/platsmp.c
@@ -90,7 +90,8 @@ static void exynos_secondary_init(unsigned int cpu)
90static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle) 90static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
91{ 91{
92 unsigned long timeout; 92 unsigned long timeout;
93 unsigned long phys_cpu = cpu_logical_map(cpu); 93 u32 mpidr = cpu_logical_map(cpu);
94 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
94 int ret = -ENOSYS; 95 int ret = -ENOSYS;
95 96
96 /* 97 /*
@@ -104,17 +105,18 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
104 * the holding pen - release it, then wait for it to flag 105 * the holding pen - release it, then wait for it to flag
105 * that it has been released by resetting pen_release. 106 * that it has been released by resetting pen_release.
106 * 107 *
107 * Note that "pen_release" is the hardware CPU ID, whereas 108 * Note that "pen_release" is the hardware CPU core ID, whereas
108 * "cpu" is Linux's internal ID. 109 * "cpu" is Linux's internal ID.
109 */ 110 */
110 write_pen_release(phys_cpu); 111 write_pen_release(core_id);
111 112
112 if (!exynos_cpu_power_state(cpu)) { 113 if (!exynos_cpu_power_state(core_id)) {
113 exynos_cpu_power_up(cpu); 114 exynos_cpu_power_up(core_id);
114 timeout = 10; 115 timeout = 10;
115 116
116 /* wait max 10 ms until cpu1 is on */ 117 /* wait max 10 ms until cpu1 is on */
117 while (exynos_cpu_power_state(cpu) != S5P_CORE_LOCAL_PWR_EN) { 118 while (exynos_cpu_power_state(core_id)
119 != S5P_CORE_LOCAL_PWR_EN) {
118 if (timeout-- == 0) 120 if (timeout-- == 0)
119 break; 121 break;
120 122
@@ -145,20 +147,20 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
145 * Try to set boot address using firmware first 147 * Try to set boot address using firmware first
146 * and fall back to boot register if it fails. 148 * and fall back to boot register if it fails.
147 */ 149 */
148 ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr); 150 ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
149 if (ret && ret != -ENOSYS) 151 if (ret && ret != -ENOSYS)
150 goto fail; 152 goto fail;
151 if (ret == -ENOSYS) { 153 if (ret == -ENOSYS) {
152 void __iomem *boot_reg = cpu_boot_reg(phys_cpu); 154 void __iomem *boot_reg = cpu_boot_reg(core_id);
153 155
154 if (IS_ERR(boot_reg)) { 156 if (IS_ERR(boot_reg)) {
155 ret = PTR_ERR(boot_reg); 157 ret = PTR_ERR(boot_reg);
156 goto fail; 158 goto fail;
157 } 159 }
158 __raw_writel(boot_addr, cpu_boot_reg(phys_cpu)); 160 __raw_writel(boot_addr, cpu_boot_reg(core_id));
159 } 161 }
160 162
161 call_firmware_op(cpu_boot, phys_cpu); 163 call_firmware_op(cpu_boot, core_id);
162 164
163 arch_send_wakeup_ipi_mask(cpumask_of(cpu)); 165 arch_send_wakeup_ipi_mask(cpumask_of(cpu));
164 166
@@ -227,22 +229,24 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
227 * boot register if it fails. 229 * boot register if it fails.
228 */ 230 */
229 for (i = 1; i < max_cpus; ++i) { 231 for (i = 1; i < max_cpus; ++i) {
230 unsigned long phys_cpu;
231 unsigned long boot_addr; 232 unsigned long boot_addr;
233 u32 mpidr;
234 u32 core_id;
232 int ret; 235 int ret;
233 236
234 phys_cpu = cpu_logical_map(i); 237 mpidr = cpu_logical_map(i);
238 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
235 boot_addr = virt_to_phys(exynos4_secondary_startup); 239 boot_addr = virt_to_phys(exynos4_secondary_startup);
236 240
237 ret = call_firmware_op(set_cpu_boot_addr, phys_cpu, boot_addr); 241 ret = call_firmware_op(set_cpu_boot_addr, core_id, boot_addr);
238 if (ret && ret != -ENOSYS) 242 if (ret && ret != -ENOSYS)
239 break; 243 break;
240 if (ret == -ENOSYS) { 244 if (ret == -ENOSYS) {
241 void __iomem *boot_reg = cpu_boot_reg(phys_cpu); 245 void __iomem *boot_reg = cpu_boot_reg(core_id);
242 246
243 if (IS_ERR(boot_reg)) 247 if (IS_ERR(boot_reg))
244 break; 248 break;
245 __raw_writel(boot_addr, cpu_boot_reg(phys_cpu)); 249 __raw_writel(boot_addr, cpu_boot_reg(core_id));
246 } 250 }
247 } 251 }
248} 252}
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 87c0d34c7fba..202ca73e49c4 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -300,7 +300,7 @@ static int exynos_pm_suspend(void)
300 tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0); 300 tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0);
301 __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION); 301 __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION);
302 302
303 if (!soc_is_exynos5250()) 303 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
304 exynos_cpu_save_register(); 304 exynos_cpu_save_register();
305 305
306 return 0; 306 return 0;
@@ -334,7 +334,7 @@ static void exynos_pm_resume(void)
334 if (exynos_pm_central_resume()) 334 if (exynos_pm_central_resume())
335 goto early_wakeup; 335 goto early_wakeup;
336 336
337 if (!soc_is_exynos5250()) 337 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
338 exynos_cpu_restore_register(); 338 exynos_cpu_restore_register();
339 339
340 /* For release retention */ 340 /* For release retention */
@@ -353,7 +353,7 @@ static void exynos_pm_resume(void)
353 353
354 s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save)); 354 s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
355 355
356 if (!soc_is_exynos5250()) 356 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
357 scu_enable(S5P_VA_SCU); 357 scu_enable(S5P_VA_SCU);
358 358
359early_wakeup: 359early_wakeup:
@@ -440,15 +440,18 @@ static int exynos_cpu_pm_notifier(struct notifier_block *self,
440 case CPU_PM_ENTER: 440 case CPU_PM_ENTER:
441 if (cpu == 0) { 441 if (cpu == 0) {
442 exynos_pm_central_suspend(); 442 exynos_pm_central_suspend();
443 exynos_cpu_save_register(); 443 if (read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9)
444 exynos_cpu_save_register();
444 } 445 }
445 break; 446 break;
446 447
447 case CPU_PM_EXIT: 448 case CPU_PM_EXIT:
448 if (cpu == 0) { 449 if (cpu == 0) {
449 if (!soc_is_exynos5250()) 450 if (read_cpuid_part_number() ==
451 ARM_CPU_PART_CORTEX_A9) {
450 scu_enable(S5P_VA_SCU); 452 scu_enable(S5P_VA_SCU);
451 exynos_cpu_restore_register(); 453 exynos_cpu_restore_register();
454 }
452 exynos_pm_central_resume(); 455 exynos_pm_central_resume();
453 } 456 }
454 break; 457 break;
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index fe6570ebbdde..797cb134bfff 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -17,6 +17,7 @@
17#include <linux/err.h> 17#include <linux/err.h>
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/pm_domain.h> 19#include <linux/pm_domain.h>
20#include <linux/clk.h>
20#include <linux/delay.h> 21#include <linux/delay.h>
21#include <linux/of_address.h> 22#include <linux/of_address.h>
22#include <linux/of_platform.h> 23#include <linux/of_platform.h>
@@ -24,6 +25,8 @@
24 25
25#include "regs-pmu.h" 26#include "regs-pmu.h"
26 27
28#define MAX_CLK_PER_DOMAIN 4
29
27/* 30/*
28 * Exynos specific wrapper around the generic power domain 31 * Exynos specific wrapper around the generic power domain
29 */ 32 */
@@ -32,6 +35,9 @@ struct exynos_pm_domain {
32 char const *name; 35 char const *name;
33 bool is_off; 36 bool is_off;
34 struct generic_pm_domain pd; 37 struct generic_pm_domain pd;
38 struct clk *oscclk;
39 struct clk *clk[MAX_CLK_PER_DOMAIN];
40 struct clk *pclk[MAX_CLK_PER_DOMAIN];
35}; 41};
36 42
37static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on) 43static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
@@ -44,6 +50,19 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
44 pd = container_of(domain, struct exynos_pm_domain, pd); 50 pd = container_of(domain, struct exynos_pm_domain, pd);
45 base = pd->base; 51 base = pd->base;
46 52
53 /* Set oscclk before powering off a domain*/
54 if (!power_on) {
55 int i;
56
57 for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
58 if (IS_ERR(pd->clk[i]))
59 break;
60 if (clk_set_parent(pd->clk[i], pd->oscclk))
61 pr_err("%s: error setting oscclk as parent to clock %d\n",
62 pd->name, i);
63 }
64 }
65
47 pwr = power_on ? S5P_INT_LOCAL_PWR_EN : 0; 66 pwr = power_on ? S5P_INT_LOCAL_PWR_EN : 0;
48 __raw_writel(pwr, base); 67 __raw_writel(pwr, base);
49 68
@@ -60,6 +79,20 @@ static int exynos_pd_power(struct generic_pm_domain *domain, bool power_on)
60 cpu_relax(); 79 cpu_relax();
61 usleep_range(80, 100); 80 usleep_range(80, 100);
62 } 81 }
82
83 /* Restore clocks after powering on a domain*/
84 if (power_on) {
85 int i;
86
87 for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
88 if (IS_ERR(pd->clk[i]))
89 break;
90 if (clk_set_parent(pd->clk[i], pd->pclk[i]))
91 pr_err("%s: error setting parent to clock%d\n",
92 pd->name, i);
93 }
94 }
95
63 return 0; 96 return 0;
64} 97}
65 98
@@ -152,9 +185,11 @@ static __init int exynos4_pm_init_power_domain(void)
152 185
153 for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") { 186 for_each_compatible_node(np, NULL, "samsung,exynos4210-pd") {
154 struct exynos_pm_domain *pd; 187 struct exynos_pm_domain *pd;
155 int on; 188 int on, i;
189 struct device *dev;
156 190
157 pdev = of_find_device_by_node(np); 191 pdev = of_find_device_by_node(np);
192 dev = &pdev->dev;
158 193
159 pd = kzalloc(sizeof(*pd), GFP_KERNEL); 194 pd = kzalloc(sizeof(*pd), GFP_KERNEL);
160 if (!pd) { 195 if (!pd) {
@@ -170,6 +205,30 @@ static __init int exynos4_pm_init_power_domain(void)
170 pd->pd.power_on = exynos_pd_power_on; 205 pd->pd.power_on = exynos_pd_power_on;
171 pd->pd.of_node = np; 206 pd->pd.of_node = np;
172 207
208 pd->oscclk = clk_get(dev, "oscclk");
209 if (IS_ERR(pd->oscclk))
210 goto no_clk;
211
212 for (i = 0; i < MAX_CLK_PER_DOMAIN; i++) {
213 char clk_name[8];
214
215 snprintf(clk_name, sizeof(clk_name), "clk%d", i);
216 pd->clk[i] = clk_get(dev, clk_name);
217 if (IS_ERR(pd->clk[i]))
218 break;
219 snprintf(clk_name, sizeof(clk_name), "pclk%d", i);
220 pd->pclk[i] = clk_get(dev, clk_name);
221 if (IS_ERR(pd->pclk[i])) {
222 clk_put(pd->clk[i]);
223 pd->clk[i] = ERR_PTR(-EINVAL);
224 break;
225 }
226 }
227
228 if (IS_ERR(pd->clk[0]))
229 clk_put(pd->oscclk);
230
231no_clk:
173 platform_set_drvdata(pdev, pd); 232 platform_set_drvdata(pdev, pd);
174 233
175 on = __raw_readl(pd->base + 0x4) & S5P_INT_LOCAL_PWR_EN; 234 on = __raw_readl(pd->base + 0x4) & S5P_INT_LOCAL_PWR_EN;
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 28fa2fa49e5d..4b5185748f74 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -734,9 +734,9 @@ config SOC_IMX6
734 select HAVE_IMX_MMDC 734 select HAVE_IMX_MMDC
735 select HAVE_IMX_SRC 735 select HAVE_IMX_SRC
736 select MFD_SYSCON 736 select MFD_SYSCON
737 select PL310_ERRATA_588369 if CACHE_PL310 737 select PL310_ERRATA_588369 if CACHE_L2X0
738 select PL310_ERRATA_727915 if CACHE_PL310 738 select PL310_ERRATA_727915 if CACHE_L2X0
739 select PL310_ERRATA_769419 if CACHE_PL310 739 select PL310_ERRATA_769419 if CACHE_L2X0
740 740
741config SOC_IMX6Q 741config SOC_IMX6Q
742 bool "i.MX6 Quad/DualLite support" 742 bool "i.MX6 Quad/DualLite support"
@@ -771,9 +771,9 @@ config SOC_VF610
771 select ARM_GIC 771 select ARM_GIC
772 select PINCTRL_VF610 772 select PINCTRL_VF610
773 select VF_PIT_TIMER 773 select VF_PIT_TIMER
774 select PL310_ERRATA_588369 if CACHE_PL310 774 select PL310_ERRATA_588369 if CACHE_L2X0
775 select PL310_ERRATA_727915 if CACHE_PL310 775 select PL310_ERRATA_727915 if CACHE_L2X0
776 select PL310_ERRATA_769419 if CACHE_PL310 776 select PL310_ERRATA_769419 if CACHE_L2X0
777 777
778 help 778 help
779 This enable support for Freescale Vybrid VF610 processor. 779 This enable support for Freescale Vybrid VF610 processor.
diff --git a/arch/arm/mach-imx/clk-gate2.c b/arch/arm/mach-imx/clk-gate2.c
index 4ba587da89d2..84acdfd1d715 100644
--- a/arch/arm/mach-imx/clk-gate2.c
+++ b/arch/arm/mach-imx/clk-gate2.c
@@ -67,8 +67,12 @@ static void clk_gate2_disable(struct clk_hw *hw)
67 67
68 spin_lock_irqsave(gate->lock, flags); 68 spin_lock_irqsave(gate->lock, flags);
69 69
70 if (gate->share_count && --(*gate->share_count) > 0) 70 if (gate->share_count) {
71 goto out; 71 if (WARN_ON(*gate->share_count == 0))
72 goto out;
73 else if (--(*gate->share_count) > 0)
74 goto out;
75 }
72 76
73 reg = readl(gate->reg); 77 reg = readl(gate->reg);
74 reg &= ~(3 << gate->bit_idx); 78 reg &= ~(3 << gate->bit_idx);
@@ -78,19 +82,26 @@ out:
78 spin_unlock_irqrestore(gate->lock, flags); 82 spin_unlock_irqrestore(gate->lock, flags);
79} 83}
80 84
81static int clk_gate2_is_enabled(struct clk_hw *hw) 85static int clk_gate2_reg_is_enabled(void __iomem *reg, u8 bit_idx)
82{ 86{
83 u32 reg; 87 u32 val = readl(reg);
84 struct clk_gate2 *gate = to_clk_gate2(hw);
85 88
86 reg = readl(gate->reg); 89 if (((val >> bit_idx) & 1) == 1)
87
88 if (((reg >> gate->bit_idx) & 1) == 1)
89 return 1; 90 return 1;
90 91
91 return 0; 92 return 0;
92} 93}
93 94
95static int clk_gate2_is_enabled(struct clk_hw *hw)
96{
97 struct clk_gate2 *gate = to_clk_gate2(hw);
98
99 if (gate->share_count)
100 return !!(*gate->share_count);
101 else
102 return clk_gate2_reg_is_enabled(gate->reg, gate->bit_idx);
103}
104
94static struct clk_ops clk_gate2_ops = { 105static struct clk_ops clk_gate2_ops = {
95 .enable = clk_gate2_enable, 106 .enable = clk_gate2_enable,
96 .disable = clk_gate2_disable, 107 .disable = clk_gate2_disable,
@@ -116,6 +127,10 @@ struct clk *clk_register_gate2(struct device *dev, const char *name,
116 gate->bit_idx = bit_idx; 127 gate->bit_idx = bit_idx;
117 gate->flags = clk_gate2_flags; 128 gate->flags = clk_gate2_flags;
118 gate->lock = lock; 129 gate->lock = lock;
130
131 /* Initialize share_count per hardware state */
132 if (share_count)
133 *share_count = clk_gate2_reg_is_enabled(reg, bit_idx) ? 1 : 0;
119 gate->share_count = share_count; 134 gate->share_count = share_count;
120 135
121 init.name = name; 136 init.name = name;
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index 8e795dea02ec..8556c787e59c 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -70,7 +70,7 @@ static const char *cko_sels[] = { "cko1", "cko2", };
70static const char *lvds_sels[] = { 70static const char *lvds_sels[] = {
71 "dummy", "dummy", "dummy", "dummy", "dummy", "dummy", 71 "dummy", "dummy", "dummy", "dummy", "dummy", "dummy",
72 "pll4_audio", "pll5_video", "pll8_mlb", "enet_ref", 72 "pll4_audio", "pll5_video", "pll8_mlb", "enet_ref",
73 "pcie_ref", "sata_ref", 73 "pcie_ref_125m", "sata_ref_100m",
74}; 74};
75 75
76enum mx6q_clks { 76enum mx6q_clks {
@@ -491,7 +491,7 @@ static void __init imx6q_clocks_init(struct device_node *ccm_node)
491 491
492 /* All existing boards with PCIe use LVDS1 */ 492 /* All existing boards with PCIe use LVDS1 */
493 if (IS_ENABLED(CONFIG_PCI_IMX6)) 493 if (IS_ENABLED(CONFIG_PCI_IMX6))
494 clk_set_parent(clk[lvds1_sel], clk[sata_ref]); 494 clk_set_parent(clk[lvds1_sel], clk[sata_ref_100m]);
495 495
496 /* Set initial power mode */ 496 /* Set initial power mode */
497 imx6q_set_lpm(WAIT_CLOCKED); 497 imx6q_set_lpm(WAIT_CLOCKED);
diff --git a/arch/arm/mach-imx/clk-imx6sl.c b/arch/arm/mach-imx/clk-imx6sl.c
index 21cf06cebade..5408ca70c8d6 100644
--- a/arch/arm/mach-imx/clk-imx6sl.c
+++ b/arch/arm/mach-imx/clk-imx6sl.c
@@ -312,6 +312,7 @@ static void __init imx6sl_clocks_init(struct device_node *ccm_node)
312 clks[IMX6SL_CLK_ECSPI2] = imx_clk_gate2("ecspi2", "ecspi_root", base + 0x6c, 2); 312 clks[IMX6SL_CLK_ECSPI2] = imx_clk_gate2("ecspi2", "ecspi_root", base + 0x6c, 2);
313 clks[IMX6SL_CLK_ECSPI3] = imx_clk_gate2("ecspi3", "ecspi_root", base + 0x6c, 4); 313 clks[IMX6SL_CLK_ECSPI3] = imx_clk_gate2("ecspi3", "ecspi_root", base + 0x6c, 4);
314 clks[IMX6SL_CLK_ECSPI4] = imx_clk_gate2("ecspi4", "ecspi_root", base + 0x6c, 6); 314 clks[IMX6SL_CLK_ECSPI4] = imx_clk_gate2("ecspi4", "ecspi_root", base + 0x6c, 6);
315 clks[IMX6SL_CLK_ENET] = imx_clk_gate2("enet", "ipg", base + 0x6c, 10);
315 clks[IMX6SL_CLK_EPIT1] = imx_clk_gate2("epit1", "perclk", base + 0x6c, 12); 316 clks[IMX6SL_CLK_EPIT1] = imx_clk_gate2("epit1", "perclk", base + 0x6c, 12);
316 clks[IMX6SL_CLK_EPIT2] = imx_clk_gate2("epit2", "perclk", base + 0x6c, 14); 317 clks[IMX6SL_CLK_EPIT2] = imx_clk_gate2("epit2", "perclk", base + 0x6c, 14);
317 clks[IMX6SL_CLK_EXTERN_AUDIO] = imx_clk_gate2("extern_audio", "extern_audio_podf", base + 0x6c, 16); 318 clks[IMX6SL_CLK_EXTERN_AUDIO] = imx_clk_gate2("extern_audio", "extern_audio_podf", base + 0x6c, 16);
diff --git a/arch/arm/mach-integrator/integrator_ap.c b/arch/arm/mach-integrator/integrator_ap.c
index dd0cc677d596..660ca6feff40 100644
--- a/arch/arm/mach-integrator/integrator_ap.c
+++ b/arch/arm/mach-integrator/integrator_ap.c
@@ -480,25 +480,18 @@ static const struct of_device_id ebi_match[] = {
480static void __init ap_init_of(void) 480static void __init ap_init_of(void)
481{ 481{
482 unsigned long sc_dec; 482 unsigned long sc_dec;
483 struct device_node *root;
484 struct device_node *syscon; 483 struct device_node *syscon;
485 struct device_node *ebi; 484 struct device_node *ebi;
486 struct device *parent; 485 struct device *parent;
487 struct soc_device *soc_dev; 486 struct soc_device *soc_dev;
488 struct soc_device_attribute *soc_dev_attr; 487 struct soc_device_attribute *soc_dev_attr;
489 u32 ap_sc_id; 488 u32 ap_sc_id;
490 int err;
491 int i; 489 int i;
492 490
493 /* Here we create an SoC device for the root node */ 491 syscon = of_find_matching_node(NULL, ap_syscon_match);
494 root = of_find_node_by_path("/");
495 if (!root)
496 return;
497
498 syscon = of_find_matching_node(root, ap_syscon_match);
499 if (!syscon) 492 if (!syscon)
500 return; 493 return;
501 ebi = of_find_matching_node(root, ebi_match); 494 ebi = of_find_matching_node(NULL, ebi_match);
502 if (!ebi) 495 if (!ebi)
503 return; 496 return;
504 497
@@ -509,19 +502,17 @@ static void __init ap_init_of(void)
509 if (!ebi_base) 502 if (!ebi_base)
510 return; 503 return;
511 504
505 of_platform_populate(NULL, of_default_bus_match_table,
506 ap_auxdata_lookup, NULL);
507
512 ap_sc_id = readl(ap_syscon_base); 508 ap_sc_id = readl(ap_syscon_base);
513 509
514 soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); 510 soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
515 if (!soc_dev_attr) 511 if (!soc_dev_attr)
516 return; 512 return;
517 513
518 err = of_property_read_string(root, "compatible", 514 soc_dev_attr->soc_id = "XVC";
519 &soc_dev_attr->soc_id); 515 soc_dev_attr->machine = "Integrator/AP";
520 if (err)
521 return;
522 err = of_property_read_string(root, "model", &soc_dev_attr->machine);
523 if (err)
524 return;
525 soc_dev_attr->family = "Integrator"; 516 soc_dev_attr->family = "Integrator";
526 soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%c", 517 soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%c",
527 'A' + (ap_sc_id & 0x0f)); 518 'A' + (ap_sc_id & 0x0f));
@@ -536,9 +527,6 @@ static void __init ap_init_of(void)
536 parent = soc_device_to_device(soc_dev); 527 parent = soc_device_to_device(soc_dev);
537 integrator_init_sysfs(parent, ap_sc_id); 528 integrator_init_sysfs(parent, ap_sc_id);
538 529
539 of_platform_populate(root, of_default_bus_match_table,
540 ap_auxdata_lookup, parent);
541
542 sc_dec = readl(ap_syscon_base + INTEGRATOR_SC_DEC_OFFSET); 530 sc_dec = readl(ap_syscon_base + INTEGRATOR_SC_DEC_OFFSET);
543 for (i = 0; i < 4; i++) { 531 for (i = 0; i < 4; i++) {
544 struct lm_device *lmdev; 532 struct lm_device *lmdev;
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index a938242b0c95..0e57f8f820a5 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -279,20 +279,13 @@ static const struct of_device_id intcp_syscon_match[] = {
279 279
280static void __init intcp_init_of(void) 280static void __init intcp_init_of(void)
281{ 281{
282 struct device_node *root;
283 struct device_node *cpcon; 282 struct device_node *cpcon;
284 struct device *parent; 283 struct device *parent;
285 struct soc_device *soc_dev; 284 struct soc_device *soc_dev;
286 struct soc_device_attribute *soc_dev_attr; 285 struct soc_device_attribute *soc_dev_attr;
287 u32 intcp_sc_id; 286 u32 intcp_sc_id;
288 int err;
289 287
290 /* Here we create an SoC device for the root node */ 288 cpcon = of_find_matching_node(NULL, intcp_syscon_match);
291 root = of_find_node_by_path("/");
292 if (!root)
293 return;
294
295 cpcon = of_find_matching_node(root, intcp_syscon_match);
296 if (!cpcon) 289 if (!cpcon)
297 return; 290 return;
298 291
@@ -300,19 +293,17 @@ static void __init intcp_init_of(void)
300 if (!intcp_con_base) 293 if (!intcp_con_base)
301 return; 294 return;
302 295
296 of_platform_populate(NULL, of_default_bus_match_table,
297 intcp_auxdata_lookup, NULL);
298
303 intcp_sc_id = readl(intcp_con_base); 299 intcp_sc_id = readl(intcp_con_base);
304 300
305 soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL); 301 soc_dev_attr = kzalloc(sizeof(*soc_dev_attr), GFP_KERNEL);
306 if (!soc_dev_attr) 302 if (!soc_dev_attr)
307 return; 303 return;
308 304
309 err = of_property_read_string(root, "compatible", 305 soc_dev_attr->soc_id = "XCV";
310 &soc_dev_attr->soc_id); 306 soc_dev_attr->machine = "Integrator/CP";
311 if (err)
312 return;
313 err = of_property_read_string(root, "model", &soc_dev_attr->machine);
314 if (err)
315 return;
316 soc_dev_attr->family = "Integrator"; 307 soc_dev_attr->family = "Integrator";
317 soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%c", 308 soc_dev_attr->revision = kasprintf(GFP_KERNEL, "%c",
318 'A' + (intcp_sc_id & 0x0f)); 309 'A' + (intcp_sc_id & 0x0f));
@@ -326,8 +317,6 @@ static void __init intcp_init_of(void)
326 317
327 parent = soc_device_to_device(soc_dev); 318 parent = soc_device_to_device(soc_dev);
328 integrator_init_sysfs(parent, intcp_sc_id); 319 integrator_init_sysfs(parent, intcp_sc_id);
329 of_platform_populate(root, of_default_bus_match_table,
330 intcp_auxdata_lookup, parent);
331} 320}
332 321
333static const char * intcp_dt_board_compat[] = { 322static const char * intcp_dt_board_compat[] = {
diff --git a/arch/arm/mach-mvebu/Kconfig b/arch/arm/mach-mvebu/Kconfig
index 4a7c250c9a30..b9bc599a5fd0 100644
--- a/arch/arm/mach-mvebu/Kconfig
+++ b/arch/arm/mach-mvebu/Kconfig
@@ -10,6 +10,7 @@ menuconfig ARCH_MVEBU
10 select ZONE_DMA if ARM_LPAE 10 select ZONE_DMA if ARM_LPAE
11 select ARCH_REQUIRE_GPIOLIB 11 select ARCH_REQUIRE_GPIOLIB
12 select PCI_QUIRKS if PCI 12 select PCI_QUIRKS if PCI
13 select OF_ADDRESS_PCI
13 14
14if ARCH_MVEBU 15if ARCH_MVEBU
15 16
@@ -17,6 +18,7 @@ config MACH_MVEBU_V7
17 bool 18 bool
18 select ARMADA_370_XP_TIMER 19 select ARMADA_370_XP_TIMER
19 select CACHE_L2X0 20 select CACHE_L2X0
21 select ARM_CPU_SUSPEND
20 22
21config MACH_ARMADA_370 23config MACH_ARMADA_370
22 bool "Marvell Armada 370 boards" if ARCH_MULTI_V7 24 bool "Marvell Armada 370 boards" if ARCH_MULTI_V7
diff --git a/arch/arm/mach-mvebu/Makefile b/arch/arm/mach-mvebu/Makefile
index 2ecb828e4a8b..1636cdbef01a 100644
--- a/arch/arm/mach-mvebu/Makefile
+++ b/arch/arm/mach-mvebu/Makefile
@@ -7,7 +7,7 @@ CFLAGS_pmsu.o := -march=armv7-a
7obj-y += system-controller.o mvebu-soc-id.o 7obj-y += system-controller.o mvebu-soc-id.o
8 8
9ifeq ($(CONFIG_MACH_MVEBU_V7),y) 9ifeq ($(CONFIG_MACH_MVEBU_V7),y)
10obj-y += cpu-reset.o board-v7.o coherency.o coherency_ll.o pmsu.o 10obj-y += cpu-reset.o board-v7.o coherency.o coherency_ll.o pmsu.o pmsu_ll.o
11obj-$(CONFIG_SMP) += platsmp.o headsmp.o platsmp-a9.o headsmp-a9.o 11obj-$(CONFIG_SMP) += platsmp.o headsmp.o platsmp-a9.o headsmp-a9.o
12obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o 12obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
13endif 13endif
diff --git a/arch/arm/mach-mvebu/board-v7.c b/arch/arm/mach-mvebu/board-v7.c
index 8bb742fdf5ca..b2524d689f21 100644
--- a/arch/arm/mach-mvebu/board-v7.c
+++ b/arch/arm/mach-mvebu/board-v7.c
@@ -23,6 +23,7 @@
23#include <linux/mbus.h> 23#include <linux/mbus.h>
24#include <linux/signal.h> 24#include <linux/signal.h>
25#include <linux/slab.h> 25#include <linux/slab.h>
26#include <linux/irqchip.h>
26#include <asm/hardware/cache-l2x0.h> 27#include <asm/hardware/cache-l2x0.h>
27#include <asm/mach/arch.h> 28#include <asm/mach/arch.h>
28#include <asm/mach/map.h> 29#include <asm/mach/map.h>
@@ -71,17 +72,23 @@ static int armada_375_external_abort_wa(unsigned long addr, unsigned int fsr,
71 return 1; 72 return 1;
72} 73}
73 74
74static void __init mvebu_timer_and_clk_init(void) 75static void __init mvebu_init_irq(void)
75{ 76{
76 of_clk_init(NULL); 77 irqchip_init();
77 clocksource_of_init();
78 mvebu_scu_enable(); 78 mvebu_scu_enable();
79 coherency_init(); 79 coherency_init();
80 BUG_ON(mvebu_mbus_dt_init(coherency_available())); 80 BUG_ON(mvebu_mbus_dt_init(coherency_available()));
81}
82
83static void __init external_abort_quirk(void)
84{
85 u32 dev, rev;
81 86
82 if (of_machine_is_compatible("marvell,armada375")) 87 if (mvebu_get_soc_id(&dev, &rev) == 0 && rev > ARMADA_375_Z1_REV)
83 hook_fault_code(16 + 6, armada_375_external_abort_wa, SIGBUS, 0, 88 return;
84 "imprecise external abort"); 89
90 hook_fault_code(16 + 6, armada_375_external_abort_wa, SIGBUS, 0,
91 "imprecise external abort");
85} 92}
86 93
87static void __init i2c_quirk(void) 94static void __init i2c_quirk(void)
@@ -169,8 +176,10 @@ static void __init mvebu_dt_init(void)
169{ 176{
170 if (of_machine_is_compatible("plathome,openblocks-ax3-4")) 177 if (of_machine_is_compatible("plathome,openblocks-ax3-4"))
171 i2c_quirk(); 178 i2c_quirk();
172 if (of_machine_is_compatible("marvell,a375-db")) 179 if (of_machine_is_compatible("marvell,a375-db")) {
180 external_abort_quirk();
173 thermal_quirk(); 181 thermal_quirk();
182 }
174 183
175 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 184 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
176} 185}
@@ -185,7 +194,7 @@ DT_MACHINE_START(ARMADA_370_XP_DT, "Marvell Armada 370/XP (Device Tree)")
185 .l2c_aux_mask = ~0, 194 .l2c_aux_mask = ~0,
186 .smp = smp_ops(armada_xp_smp_ops), 195 .smp = smp_ops(armada_xp_smp_ops),
187 .init_machine = mvebu_dt_init, 196 .init_machine = mvebu_dt_init,
188 .init_time = mvebu_timer_and_clk_init, 197 .init_irq = mvebu_init_irq,
189 .restart = mvebu_restart, 198 .restart = mvebu_restart,
190 .dt_compat = armada_370_xp_dt_compat, 199 .dt_compat = armada_370_xp_dt_compat,
191MACHINE_END 200MACHINE_END
@@ -198,7 +207,7 @@ static const char * const armada_375_dt_compat[] = {
198DT_MACHINE_START(ARMADA_375_DT, "Marvell Armada 375 (Device Tree)") 207DT_MACHINE_START(ARMADA_375_DT, "Marvell Armada 375 (Device Tree)")
199 .l2c_aux_val = 0, 208 .l2c_aux_val = 0,
200 .l2c_aux_mask = ~0, 209 .l2c_aux_mask = ~0,
201 .init_time = mvebu_timer_and_clk_init, 210 .init_irq = mvebu_init_irq,
202 .init_machine = mvebu_dt_init, 211 .init_machine = mvebu_dt_init,
203 .restart = mvebu_restart, 212 .restart = mvebu_restart,
204 .dt_compat = armada_375_dt_compat, 213 .dt_compat = armada_375_dt_compat,
@@ -213,7 +222,7 @@ static const char * const armada_38x_dt_compat[] = {
213DT_MACHINE_START(ARMADA_38X_DT, "Marvell Armada 380/385 (Device Tree)") 222DT_MACHINE_START(ARMADA_38X_DT, "Marvell Armada 380/385 (Device Tree)")
214 .l2c_aux_val = 0, 223 .l2c_aux_val = 0,
215 .l2c_aux_mask = ~0, 224 .l2c_aux_mask = ~0,
216 .init_time = mvebu_timer_and_clk_init, 225 .init_irq = mvebu_init_irq,
217 .restart = mvebu_restart, 226 .restart = mvebu_restart,
218 .dt_compat = armada_38x_dt_compat, 227 .dt_compat = armada_38x_dt_compat,
219MACHINE_END 228MACHINE_END
diff --git a/arch/arm/mach-mvebu/coherency.c b/arch/arm/mach-mvebu/coherency.c
index 477202fd39cc..2bdc3233abe2 100644
--- a/arch/arm/mach-mvebu/coherency.c
+++ b/arch/arm/mach-mvebu/coherency.c
@@ -292,6 +292,10 @@ static struct notifier_block mvebu_hwcc_nb = {
292 .notifier_call = mvebu_hwcc_notifier, 292 .notifier_call = mvebu_hwcc_notifier,
293}; 293};
294 294
295static struct notifier_block mvebu_hwcc_pci_nb = {
296 .notifier_call = mvebu_hwcc_notifier,
297};
298
295static void __init armada_370_coherency_init(struct device_node *np) 299static void __init armada_370_coherency_init(struct device_node *np)
296{ 300{
297 struct resource res; 301 struct resource res;
@@ -427,7 +431,7 @@ static int __init coherency_pci_init(void)
427{ 431{
428 if (coherency_available()) 432 if (coherency_available())
429 bus_register_notifier(&pci_bus_type, 433 bus_register_notifier(&pci_bus_type,
430 &mvebu_hwcc_nb); 434 &mvebu_hwcc_pci_nb);
431 return 0; 435 return 0;
432} 436}
433 437
diff --git a/arch/arm/mach-mvebu/headsmp-a9.S b/arch/arm/mach-mvebu/headsmp-a9.S
index 5925366bc03c..da5bb292b91c 100644
--- a/arch/arm/mach-mvebu/headsmp-a9.S
+++ b/arch/arm/mach-mvebu/headsmp-a9.S
@@ -15,6 +15,8 @@
15#include <linux/linkage.h> 15#include <linux/linkage.h>
16#include <linux/init.h> 16#include <linux/init.h>
17 17
18#include <asm/assembler.h>
19
18 __CPUINIT 20 __CPUINIT
19#define CPU_RESUME_ADDR_REG 0xf10182d4 21#define CPU_RESUME_ADDR_REG 0xf10182d4
20 22
@@ -22,13 +24,18 @@
22.global armada_375_smp_cpu1_enable_code_end 24.global armada_375_smp_cpu1_enable_code_end
23 25
24armada_375_smp_cpu1_enable_code_start: 26armada_375_smp_cpu1_enable_code_start:
25 ldr r0, [pc, #4] 27ARM_BE8(setend be)
28 adr r0, 1f
29 ldr r0, [r0]
26 ldr r1, [r0] 30 ldr r1, [r0]
31ARM_BE8(rev r1, r1)
27 mov pc, r1 32 mov pc, r1
331:
28 .word CPU_RESUME_ADDR_REG 34 .word CPU_RESUME_ADDR_REG
29armada_375_smp_cpu1_enable_code_end: 35armada_375_smp_cpu1_enable_code_end:
30 36
31ENTRY(mvebu_cortex_a9_secondary_startup) 37ENTRY(mvebu_cortex_a9_secondary_startup)
38ARM_BE8(setend be)
32 bl v7_invalidate_l1 39 bl v7_invalidate_l1
33 b secondary_startup 40 b secondary_startup
34ENDPROC(mvebu_cortex_a9_secondary_startup) 41ENDPROC(mvebu_cortex_a9_secondary_startup)
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index 53a55c8520bf..25aa8237d668 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -66,6 +66,8 @@ static void __iomem *pmsu_mp_base;
66extern void ll_disable_coherency(void); 66extern void ll_disable_coherency(void);
67extern void ll_enable_coherency(void); 67extern void ll_enable_coherency(void);
68 68
69extern void armada_370_xp_cpu_resume(void);
70
69static struct platform_device armada_xp_cpuidle_device = { 71static struct platform_device armada_xp_cpuidle_device = {
70 .name = "cpuidle-armada-370-xp", 72 .name = "cpuidle-armada-370-xp",
71}; 73};
@@ -140,13 +142,6 @@ static void armada_370_xp_pmsu_enable_l2_powerdown_onidle(void)
140 writel(reg, pmsu_mp_base + L2C_NFABRIC_PM_CTL); 142 writel(reg, pmsu_mp_base + L2C_NFABRIC_PM_CTL);
141} 143}
142 144
143static void armada_370_xp_cpu_resume(void)
144{
145 asm volatile("bl ll_add_cpu_to_smp_group\n\t"
146 "bl ll_enable_coherency\n\t"
147 "b cpu_resume\n\t");
148}
149
150/* No locking is needed because we only access per-CPU registers */ 145/* No locking is needed because we only access per-CPU registers */
151void armada_370_xp_pmsu_idle_prepare(bool deepidle) 146void armada_370_xp_pmsu_idle_prepare(bool deepidle)
152{ 147{
@@ -206,12 +201,12 @@ static noinline int do_armada_370_xp_cpu_suspend(unsigned long deepidle)
206 201
207 /* Test the CR_C bit and set it if it was cleared */ 202 /* Test the CR_C bit and set it if it was cleared */
208 asm volatile( 203 asm volatile(
209 "mrc p15, 0, %0, c1, c0, 0 \n\t" 204 "mrc p15, 0, r0, c1, c0, 0 \n\t"
210 "tst %0, #(1 << 2) \n\t" 205 "tst r0, #(1 << 2) \n\t"
211 "orreq %0, %0, #(1 << 2) \n\t" 206 "orreq r0, r0, #(1 << 2) \n\t"
212 "mcreq p15, 0, %0, c1, c0, 0 \n\t" 207 "mcreq p15, 0, r0, c1, c0, 0 \n\t"
213 "isb " 208 "isb "
214 : : "r" (0)); 209 : : : "r0");
215 210
216 pr_warn("Failed to suspend the system\n"); 211 pr_warn("Failed to suspend the system\n");
217 212
diff --git a/arch/arm/mach-mvebu/pmsu_ll.S b/arch/arm/mach-mvebu/pmsu_ll.S
new file mode 100644
index 000000000000..fc3de68d8c54
--- /dev/null
+++ b/arch/arm/mach-mvebu/pmsu_ll.S
@@ -0,0 +1,25 @@
1/*
2 * Copyright (C) 2014 Marvell
3 *
4 * Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
5 * Gregory Clement <gregory.clement@free-electrons.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/linkage.h>
13#include <asm/assembler.h>
14
15/*
16 * This is the entry point through which CPUs exiting cpuidle deep
17 * idle state are going.
18 */
19ENTRY(armada_370_xp_cpu_resume)
20ARM_BE8(setend be ) @ go BE8 if entered LE
21 bl ll_add_cpu_to_smp_group
22 bl ll_enable_coherency
23 b cpu_resume
24ENDPROC(armada_370_xp_cpu_resume)
25
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 062505345c95..1c1ed737f7ab 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -34,8 +34,8 @@ config ARCH_OMAP4
34 select HAVE_ARM_SCU if SMP 34 select HAVE_ARM_SCU if SMP
35 select HAVE_ARM_TWD if SMP 35 select HAVE_ARM_TWD if SMP
36 select OMAP_INTERCONNECT 36 select OMAP_INTERCONNECT
37 select PL310_ERRATA_588369 37 select PL310_ERRATA_588369 if CACHE_L2X0
38 select PL310_ERRATA_727915 38 select PL310_ERRATA_727915 if CACHE_L2X0
39 select PM_OPP if PM 39 select PM_OPP if PM
40 select PM_RUNTIME if CPU_IDLE 40 select PM_RUNTIME if CPU_IDLE
41 select ARM_ERRATA_754322 41 select ARM_ERRATA_754322
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 8421f38cf445..8ca99e9321e3 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -110,14 +110,16 @@ obj-y += prm_common.o cm_common.o
110obj-$(CONFIG_ARCH_OMAP2) += prm2xxx_3xxx.o prm2xxx.o cm2xxx.o 110obj-$(CONFIG_ARCH_OMAP2) += prm2xxx_3xxx.o prm2xxx.o cm2xxx.o
111obj-$(CONFIG_ARCH_OMAP3) += prm2xxx_3xxx.o prm3xxx.o cm3xxx.o 111obj-$(CONFIG_ARCH_OMAP3) += prm2xxx_3xxx.o prm3xxx.o cm3xxx.o
112obj-$(CONFIG_ARCH_OMAP3) += vc3xxx_data.o vp3xxx_data.o 112obj-$(CONFIG_ARCH_OMAP3) += vc3xxx_data.o vp3xxx_data.o
113obj-$(CONFIG_SOC_AM33XX) += prm33xx.o cm33xx.o
114omap-prcm-4-5-common = cminst44xx.o cm44xx.o prm44xx.o \ 113omap-prcm-4-5-common = cminst44xx.o cm44xx.o prm44xx.o \
115 prcm_mpu44xx.o prminst44xx.o \ 114 prcm_mpu44xx.o prminst44xx.o \
116 vc44xx_data.o vp44xx_data.o 115 vc44xx_data.o vp44xx_data.o
117obj-$(CONFIG_ARCH_OMAP4) += $(omap-prcm-4-5-common) 116obj-$(CONFIG_ARCH_OMAP4) += $(omap-prcm-4-5-common)
118obj-$(CONFIG_SOC_OMAP5) += $(omap-prcm-4-5-common) 117obj-$(CONFIG_SOC_OMAP5) += $(omap-prcm-4-5-common)
119obj-$(CONFIG_SOC_DRA7XX) += $(omap-prcm-4-5-common) 118obj-$(CONFIG_SOC_DRA7XX) += $(omap-prcm-4-5-common)
120obj-$(CONFIG_SOC_AM43XX) += $(omap-prcm-4-5-common) 119am33xx-43xx-prcm-common += prm33xx.o cm33xx.o
120obj-$(CONFIG_SOC_AM33XX) += $(am33xx-43xx-prcm-common)
121obj-$(CONFIG_SOC_AM43XX) += $(omap-prcm-4-5-common) \
122 $(am33xx-43xx-prcm-common)
121 123
122# OMAP voltage domains 124# OMAP voltage domains
123voltagedomain-common := voltage.o vc.o vp.o 125voltagedomain-common := voltage.o vc.o vp.o
diff --git a/arch/arm/mach-omap2/clkt_dpll.c b/arch/arm/mach-omap2/clkt_dpll.c
index 332af927f4d3..67fd26a18441 100644
--- a/arch/arm/mach-omap2/clkt_dpll.c
+++ b/arch/arm/mach-omap2/clkt_dpll.c
@@ -76,7 +76,7 @@
76 * (assuming that it is counting N upwards), or -2 if the enclosing loop 76 * (assuming that it is counting N upwards), or -2 if the enclosing loop
77 * should skip to the next iteration (again assuming N is increasing). 77 * should skip to the next iteration (again assuming N is increasing).
78 */ 78 */
79static int _dpll_test_fint(struct clk_hw_omap *clk, u8 n) 79static int _dpll_test_fint(struct clk_hw_omap *clk, unsigned int n)
80{ 80{
81 struct dpll_data *dd; 81 struct dpll_data *dd;
82 long fint, fint_min, fint_max; 82 long fint, fint_min, fint_max;
diff --git a/arch/arm/mach-omap2/cm-regbits-34xx.h b/arch/arm/mach-omap2/cm-regbits-34xx.h
index 04dab2fcf862..ee6c784cd6b7 100644
--- a/arch/arm/mach-omap2/cm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/cm-regbits-34xx.h
@@ -26,11 +26,14 @@
26#define OMAP3430_EN_WDT3_SHIFT 12 26#define OMAP3430_EN_WDT3_SHIFT 12
27#define OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK (1 << 0) 27#define OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_MASK (1 << 0)
28#define OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT 0 28#define OMAP3430_CM_FCLKEN_IVA2_EN_IVA2_SHIFT 0
29#define OMAP3430_IVA2_DPLL_FREQSEL_SHIFT 4
29#define OMAP3430_IVA2_DPLL_FREQSEL_MASK (0xf << 4) 30#define OMAP3430_IVA2_DPLL_FREQSEL_MASK (0xf << 4)
30#define OMAP3430_EN_IVA2_DPLL_DRIFTGUARD_SHIFT 3 31#define OMAP3430_EN_IVA2_DPLL_DRIFTGUARD_SHIFT 3
32#define OMAP3430_EN_IVA2_DPLL_SHIFT 0
31#define OMAP3430_EN_IVA2_DPLL_MASK (0x7 << 0) 33#define OMAP3430_EN_IVA2_DPLL_MASK (0x7 << 0)
32#define OMAP3430_ST_IVA2_SHIFT 0 34#define OMAP3430_ST_IVA2_SHIFT 0
33#define OMAP3430_ST_IVA2_CLK_MASK (1 << 0) 35#define OMAP3430_ST_IVA2_CLK_MASK (1 << 0)
36#define OMAP3430_AUTO_IVA2_DPLL_SHIFT 0
34#define OMAP3430_AUTO_IVA2_DPLL_MASK (0x7 << 0) 37#define OMAP3430_AUTO_IVA2_DPLL_MASK (0x7 << 0)
35#define OMAP3430_IVA2_CLK_SRC_SHIFT 19 38#define OMAP3430_IVA2_CLK_SRC_SHIFT 19
36#define OMAP3430_IVA2_CLK_SRC_WIDTH 3 39#define OMAP3430_IVA2_CLK_SRC_WIDTH 3
diff --git a/arch/arm/mach-omap2/cm33xx.h b/arch/arm/mach-omap2/cm33xx.h
index 15a778ce7707..bd2441790779 100644
--- a/arch/arm/mach-omap2/cm33xx.h
+++ b/arch/arm/mach-omap2/cm33xx.h
@@ -380,7 +380,7 @@ void am33xx_cm_clkdm_disable_hwsup(u16 inst, u16 cdoffs);
380void am33xx_cm_clkdm_force_sleep(u16 inst, u16 cdoffs); 380void am33xx_cm_clkdm_force_sleep(u16 inst, u16 cdoffs);
381void am33xx_cm_clkdm_force_wakeup(u16 inst, u16 cdoffs); 381void am33xx_cm_clkdm_force_wakeup(u16 inst, u16 cdoffs);
382 382
383#ifdef CONFIG_SOC_AM33XX 383#if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
384extern int am33xx_cm_wait_module_idle(u16 inst, s16 cdoffs, 384extern int am33xx_cm_wait_module_idle(u16 inst, s16 cdoffs,
385 u16 clkctrl_offs); 385 u16 clkctrl_offs);
386extern void am33xx_cm_module_enable(u8 mode, u16 inst, s16 cdoffs, 386extern void am33xx_cm_module_enable(u8 mode, u16 inst, s16 cdoffs,
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index a373d508799a..dc571f1d3b8a 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -162,7 +162,8 @@ static inline void omap3xxx_restart(enum reboot_mode mode, const char *cmd)
162} 162}
163#endif 163#endif
164 164
165#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) 165#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
166 defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM43XX)
166void omap44xx_restart(enum reboot_mode mode, const char *cmd); 167void omap44xx_restart(enum reboot_mode mode, const char *cmd);
167#else 168#else
168static inline void omap44xx_restart(enum reboot_mode mode, const char *cmd) 169static inline void omap44xx_restart(enum reboot_mode mode, const char *cmd)
@@ -248,7 +249,6 @@ static inline void __iomem *omap4_get_scu_base(void)
248} 249}
249#endif 250#endif
250 251
251extern void __init gic_init_irq(void);
252extern void gic_dist_disable(void); 252extern void gic_dist_disable(void);
253extern void gic_dist_enable(void); 253extern void gic_dist_enable(void);
254extern bool gic_dist_disabled(void); 254extern bool gic_dist_disabled(void);
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index 592ba0a0ecf3..b6f8f348296e 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -297,33 +297,6 @@ static void omap_init_audio(void)
297static inline void omap_init_audio(void) {} 297static inline void omap_init_audio(void) {}
298#endif 298#endif
299 299
300#if defined(CONFIG_SND_OMAP_SOC_OMAP_HDMI) || \
301 defined(CONFIG_SND_OMAP_SOC_OMAP_HDMI_MODULE)
302
303static struct platform_device omap_hdmi_audio = {
304 .name = "omap-hdmi-audio",
305 .id = -1,
306};
307
308static void __init omap_init_hdmi_audio(void)
309{
310 struct omap_hwmod *oh;
311 struct platform_device *pdev;
312
313 oh = omap_hwmod_lookup("dss_hdmi");
314 if (!oh)
315 return;
316
317 pdev = omap_device_build("omap-hdmi-audio-dai", -1, oh, NULL, 0);
318 WARN(IS_ERR(pdev),
319 "Can't build omap_device for omap-hdmi-audio-dai.\n");
320
321 platform_device_register(&omap_hdmi_audio);
322}
323#else
324static inline void omap_init_hdmi_audio(void) {}
325#endif
326
327#if defined(CONFIG_SPI_OMAP24XX) || defined(CONFIG_SPI_OMAP24XX_MODULE) 300#if defined(CONFIG_SPI_OMAP24XX) || defined(CONFIG_SPI_OMAP24XX_MODULE)
328 301
329#include <linux/platform_data/spi-omap2-mcspi.h> 302#include <linux/platform_data/spi-omap2-mcspi.h>
@@ -459,7 +432,6 @@ static int __init omap2_init_devices(void)
459 */ 432 */
460 omap_init_audio(); 433 omap_init_audio();
461 omap_init_camera(); 434 omap_init_camera();
462 omap_init_hdmi_audio();
463 omap_init_mbox(); 435 omap_init_mbox();
464 /* If dtb is there, the devices will be created dynamically */ 436 /* If dtb is there, the devices will be created dynamically */
465 if (!of_have_populated_dt()) { 437 if (!of_have_populated_dt()) {
diff --git a/arch/arm/mach-omap2/dsp.c b/arch/arm/mach-omap2/dsp.c
index b8208b4b1bd9..f7492df1cbba 100644
--- a/arch/arm/mach-omap2/dsp.c
+++ b/arch/arm/mach-omap2/dsp.c
@@ -29,6 +29,7 @@
29#ifdef CONFIG_TIDSPBRIDGE_DVFS 29#ifdef CONFIG_TIDSPBRIDGE_DVFS
30#include "omap-pm.h" 30#include "omap-pm.h"
31#endif 31#endif
32#include "soc.h"
32 33
33#include <linux/platform_data/dsp-omap.h> 34#include <linux/platform_data/dsp-omap.h>
34 35
@@ -59,6 +60,9 @@ void __init omap_dsp_reserve_sdram_memblock(void)
59 phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE; 60 phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
60 phys_addr_t paddr; 61 phys_addr_t paddr;
61 62
63 if (!cpu_is_omap34xx())
64 return;
65
62 if (!size) 66 if (!size)
63 return; 67 return;
64 68
@@ -83,6 +87,9 @@ static int __init omap_dsp_init(void)
83 int err = -ENOMEM; 87 int err = -ENOMEM;
84 struct omap_dsp_platform_data *pdata = &omap_dsp_pdata; 88 struct omap_dsp_platform_data *pdata = &omap_dsp_pdata;
85 89
90 if (!cpu_is_omap34xx())
91 return 0;
92
86 pdata->phys_mempool_base = omap_dsp_get_mempool_base(); 93 pdata->phys_mempool_base = omap_dsp_get_mempool_base();
87 94
88 if (pdata->phys_mempool_base) { 95 if (pdata->phys_mempool_base) {
@@ -115,6 +122,9 @@ module_init(omap_dsp_init);
115 122
116static void __exit omap_dsp_exit(void) 123static void __exit omap_dsp_exit(void)
117{ 124{
125 if (!cpu_is_omap34xx())
126 return;
127
118 platform_device_unregister(omap_dsp_pdev); 128 platform_device_unregister(omap_dsp_pdev);
119} 129}
120module_exit(omap_dsp_exit); 130module_exit(omap_dsp_exit);
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 2c0c2816900f..8bc13380f0a0 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -1615,7 +1615,7 @@ static int gpmc_probe_dt(struct platform_device *pdev)
1615 return ret; 1615 return ret;
1616 } 1616 }
1617 1617
1618 for_each_child_of_node(pdev->dev.of_node, child) { 1618 for_each_available_child_of_node(pdev->dev.of_node, child) {
1619 1619
1620 if (!child->name) 1620 if (!child->name)
1621 continue; 1621 continue;
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 43969da5d50b..d42022f2a71e 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -649,6 +649,18 @@ void __init dra7xxx_check_revision(void)
649 } 649 }
650 break; 650 break;
651 651
652 case 0xb9bc:
653 switch (rev) {
654 case 0:
655 omap_revision = DRA722_REV_ES1_0;
656 break;
657 default:
658 /* If we have no new revisions */
659 omap_revision = DRA722_REV_ES1_0;
660 break;
661 }
662 break;
663
652 default: 664 default:
653 /* Unknown default to latest silicon rev as default*/ 665 /* Unknown default to latest silicon rev as default*/
654 pr_warn("%s: unknown idcode=0x%08x (hawkeye=0x%08x,rev=0x%d)\n", 666 pr_warn("%s: unknown idcode=0x%08x (hawkeye=0x%08x,rev=0x%d)\n",
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index fd88edeb027f..f62f7537d899 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -183,8 +183,10 @@ static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition,
183 m0_entry = mux->muxnames[0]; 183 m0_entry = mux->muxnames[0];
184 184
185 /* First check for full name in mode0.muxmode format */ 185 /* First check for full name in mode0.muxmode format */
186 if (mode0_len && strncmp(muxname, m0_entry, mode0_len)) 186 if (mode0_len)
187 continue; 187 if (strncmp(muxname, m0_entry, mode0_len) ||
188 (strlen(m0_entry) != mode0_len))
189 continue;
188 190
189 /* Then check for muxmode only */ 191 /* Then check for muxmode only */
190 for (i = 0; i < OMAP_MUX_NR_MODES; i++) { 192 for (i = 0; i < OMAP_MUX_NR_MODES; i++) {
diff --git a/arch/arm/mach-omap2/omap4-common.c b/arch/arm/mach-omap2/omap4-common.c
index 326cd982a3cb..539e8106eb96 100644
--- a/arch/arm/mach-omap2/omap4-common.c
+++ b/arch/arm/mach-omap2/omap4-common.c
@@ -102,26 +102,6 @@ void __init omap_barriers_init(void)
102{} 102{}
103#endif 103#endif
104 104
105void __init gic_init_irq(void)
106{
107 void __iomem *omap_irq_base;
108
109 /* Static mapping, never released */
110 gic_dist_base_addr = ioremap(OMAP44XX_GIC_DIST_BASE, SZ_4K);
111 BUG_ON(!gic_dist_base_addr);
112
113 twd_base = ioremap(OMAP44XX_LOCAL_TWD_BASE, SZ_4K);
114 BUG_ON(!twd_base);
115
116 /* Static mapping, never released */
117 omap_irq_base = ioremap(OMAP44XX_GIC_CPU_BASE, SZ_512);
118 BUG_ON(!omap_irq_base);
119
120 omap_wakeupgen_init();
121
122 gic_init(0, 29, gic_dist_base_addr, omap_irq_base);
123}
124
125void gic_dist_disable(void) 105void gic_dist_disable(void)
126{ 106{
127 if (gic_dist_base_addr) 107 if (gic_dist_base_addr)
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index f7bb435bb543..6c074f37cdd2 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -4251,9 +4251,9 @@ void __init omap_hwmod_init(void)
4251 soc_ops.enable_module = _omap4_enable_module; 4251 soc_ops.enable_module = _omap4_enable_module;
4252 soc_ops.disable_module = _omap4_disable_module; 4252 soc_ops.disable_module = _omap4_disable_module;
4253 soc_ops.wait_target_ready = _omap4_wait_target_ready; 4253 soc_ops.wait_target_ready = _omap4_wait_target_ready;
4254 soc_ops.assert_hardreset = _omap4_assert_hardreset; 4254 soc_ops.assert_hardreset = _am33xx_assert_hardreset;
4255 soc_ops.deassert_hardreset = _omap4_deassert_hardreset; 4255 soc_ops.deassert_hardreset = _am33xx_deassert_hardreset;
4256 soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted; 4256 soc_ops.is_hardreset_asserted = _am33xx_is_hardreset_asserted;
4257 soc_ops.init_clkdm = _init_clkdm; 4257 soc_ops.init_clkdm = _init_clkdm;
4258 } else if (soc_is_am33xx()) { 4258 } else if (soc_is_am33xx()) {
4259 soc_ops.enable_module = _am33xx_enable_module; 4259 soc_ops.enable_module = _am33xx_enable_module;
diff --git a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
index 290213f2cbe3..1103aa0e0d29 100644
--- a/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_54xx_data.c
@@ -2020,6 +2020,77 @@ static struct omap_hwmod omap54xx_wd_timer2_hwmod = {
2020 }, 2020 },
2021}; 2021};
2022 2022
2023/*
2024 * 'ocp2scp' class
2025 * bridge to transform ocp interface protocol to scp (serial control port)
2026 * protocol
2027 */
2028/* ocp2scp3 */
2029static struct omap_hwmod omap54xx_ocp2scp3_hwmod;
2030/* l4_cfg -> ocp2scp3 */
2031static struct omap_hwmod_ocp_if omap54xx_l4_cfg__ocp2scp3 = {
2032 .master = &omap54xx_l4_cfg_hwmod,
2033 .slave = &omap54xx_ocp2scp3_hwmod,
2034 .clk = "l4_root_clk_div",
2035 .user = OCP_USER_MPU | OCP_USER_SDMA,
2036};
2037
2038static struct omap_hwmod omap54xx_ocp2scp3_hwmod = {
2039 .name = "ocp2scp3",
2040 .class = &omap54xx_ocp2scp_hwmod_class,
2041 .clkdm_name = "l3init_clkdm",
2042 .prcm = {
2043 .omap4 = {
2044 .clkctrl_offs = OMAP54XX_CM_L3INIT_OCP2SCP3_CLKCTRL_OFFSET,
2045 .context_offs = OMAP54XX_RM_L3INIT_OCP2SCP3_CONTEXT_OFFSET,
2046 .modulemode = MODULEMODE_HWCTRL,
2047 },
2048 },
2049};
2050
2051/*
2052 * 'sata' class
2053 * sata: serial ata interface gen2 compliant ( 1 rx/ 1 tx)
2054 */
2055
2056static struct omap_hwmod_class_sysconfig omap54xx_sata_sysc = {
2057 .sysc_offs = 0x0000,
2058 .sysc_flags = (SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE),
2059 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
2060 SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
2061 MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
2062 .sysc_fields = &omap_hwmod_sysc_type2,
2063};
2064
2065static struct omap_hwmod_class omap54xx_sata_hwmod_class = {
2066 .name = "sata",
2067 .sysc = &omap54xx_sata_sysc,
2068};
2069
2070/* sata */
2071static struct omap_hwmod omap54xx_sata_hwmod = {
2072 .name = "sata",
2073 .class = &omap54xx_sata_hwmod_class,
2074 .clkdm_name = "l3init_clkdm",
2075 .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
2076 .main_clk = "func_48m_fclk",
2077 .mpu_rt_idx = 1,
2078 .prcm = {
2079 .omap4 = {
2080 .clkctrl_offs = OMAP54XX_CM_L3INIT_SATA_CLKCTRL_OFFSET,
2081 .context_offs = OMAP54XX_RM_L3INIT_SATA_CONTEXT_OFFSET,
2082 .modulemode = MODULEMODE_SWCTRL,
2083 },
2084 },
2085};
2086
2087/* l4_cfg -> sata */
2088static struct omap_hwmod_ocp_if omap54xx_l4_cfg__sata = {
2089 .master = &omap54xx_l4_cfg_hwmod,
2090 .slave = &omap54xx_sata_hwmod,
2091 .clk = "l3_iclk_div",
2092 .user = OCP_USER_MPU | OCP_USER_SDMA,
2093};
2023 2094
2024/* 2095/*
2025 * Interfaces 2096 * Interfaces
@@ -2765,6 +2836,8 @@ static struct omap_hwmod_ocp_if *omap54xx_hwmod_ocp_ifs[] __initdata = {
2765 &omap54xx_l4_cfg__usb_tll_hs, 2836 &omap54xx_l4_cfg__usb_tll_hs,
2766 &omap54xx_l4_cfg__usb_otg_ss, 2837 &omap54xx_l4_cfg__usb_otg_ss,
2767 &omap54xx_l4_wkup__wd_timer2, 2838 &omap54xx_l4_wkup__wd_timer2,
2839 &omap54xx_l4_cfg__ocp2scp3,
2840 &omap54xx_l4_cfg__sata,
2768 NULL, 2841 NULL,
2769}; 2842};
2770 2843
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 20b4398cec05..284324f2b98a 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -1268,9 +1268,6 @@ static struct omap_hwmod_class dra7xx_sata_hwmod_class = {
1268}; 1268};
1269 1269
1270/* sata */ 1270/* sata */
1271static struct omap_hwmod_opt_clk sata_opt_clks[] = {
1272 { .role = "ref_clk", .clk = "sata_ref_clk" },
1273};
1274 1271
1275static struct omap_hwmod dra7xx_sata_hwmod = { 1272static struct omap_hwmod dra7xx_sata_hwmod = {
1276 .name = "sata", 1273 .name = "sata",
@@ -1278,6 +1275,7 @@ static struct omap_hwmod dra7xx_sata_hwmod = {
1278 .clkdm_name = "l3init_clkdm", 1275 .clkdm_name = "l3init_clkdm",
1279 .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY, 1276 .flags = HWMOD_SWSUP_SIDLE | HWMOD_SWSUP_MSTANDBY,
1280 .main_clk = "func_48m_fclk", 1277 .main_clk = "func_48m_fclk",
1278 .mpu_rt_idx = 1,
1281 .prcm = { 1279 .prcm = {
1282 .omap4 = { 1280 .omap4 = {
1283 .clkctrl_offs = DRA7XX_CM_L3INIT_SATA_CLKCTRL_OFFSET, 1281 .clkctrl_offs = DRA7XX_CM_L3INIT_SATA_CLKCTRL_OFFSET,
@@ -1285,8 +1283,6 @@ static struct omap_hwmod dra7xx_sata_hwmod = {
1285 .modulemode = MODULEMODE_SWCTRL, 1283 .modulemode = MODULEMODE_SWCTRL,
1286 }, 1284 },
1287 }, 1285 },
1288 .opt_clks = sata_opt_clks,
1289 .opt_clks_cnt = ARRAY_SIZE(sata_opt_clks),
1290}; 1286};
1291 1287
1292/* 1288/*
@@ -1731,8 +1727,20 @@ static struct omap_hwmod dra7xx_uart6_hwmod = {
1731 * 1727 *
1732 */ 1728 */
1733 1729
1730static struct omap_hwmod_class_sysconfig dra7xx_usb_otg_ss_sysc = {
1731 .rev_offs = 0x0000,
1732 .sysc_offs = 0x0010,
1733 .sysc_flags = (SYSC_HAS_DMADISABLE | SYSC_HAS_MIDLEMODE |
1734 SYSC_HAS_SIDLEMODE),
1735 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
1736 SIDLE_SMART_WKUP | MSTANDBY_FORCE | MSTANDBY_NO |
1737 MSTANDBY_SMART | MSTANDBY_SMART_WKUP),
1738 .sysc_fields = &omap_hwmod_sysc_type2,
1739};
1740
1734static struct omap_hwmod_class dra7xx_usb_otg_ss_hwmod_class = { 1741static struct omap_hwmod_class dra7xx_usb_otg_ss_hwmod_class = {
1735 .name = "usb_otg_ss", 1742 .name = "usb_otg_ss",
1743 .sysc = &dra7xx_usb_otg_ss_sysc,
1736}; 1744};
1737 1745
1738/* usb_otg_ss1 */ 1746/* usb_otg_ss1 */
diff --git a/arch/arm/mach-omap2/prm-regbits-34xx.h b/arch/arm/mach-omap2/prm-regbits-34xx.h
index 106132db532b..cbefbd7cfdb5 100644
--- a/arch/arm/mach-omap2/prm-regbits-34xx.h
+++ b/arch/arm/mach-omap2/prm-regbits-34xx.h
@@ -35,6 +35,8 @@
35#define OMAP3430_LOGICSTATEST_MASK (1 << 2) 35#define OMAP3430_LOGICSTATEST_MASK (1 << 2)
36#define OMAP3430_LASTLOGICSTATEENTERED_MASK (1 << 2) 36#define OMAP3430_LASTLOGICSTATEENTERED_MASK (1 << 2)
37#define OMAP3430_LASTPOWERSTATEENTERED_MASK (0x3 << 0) 37#define OMAP3430_LASTPOWERSTATEENTERED_MASK (0x3 << 0)
38#define OMAP3430_GRPSEL_MCBSP5_MASK (1 << 10)
39#define OMAP3430_GRPSEL_MCBSP1_MASK (1 << 9)
38#define OMAP3630_GRPSEL_UART4_MASK (1 << 18) 40#define OMAP3630_GRPSEL_UART4_MASK (1 << 18)
39#define OMAP3430_GRPSEL_GPIO6_MASK (1 << 17) 41#define OMAP3430_GRPSEL_GPIO6_MASK (1 << 17)
40#define OMAP3430_GRPSEL_GPIO5_MASK (1 << 16) 42#define OMAP3430_GRPSEL_GPIO5_MASK (1 << 16)
@@ -42,6 +44,10 @@
42#define OMAP3430_GRPSEL_GPIO3_MASK (1 << 14) 44#define OMAP3430_GRPSEL_GPIO3_MASK (1 << 14)
43#define OMAP3430_GRPSEL_GPIO2_MASK (1 << 13) 45#define OMAP3430_GRPSEL_GPIO2_MASK (1 << 13)
44#define OMAP3430_GRPSEL_UART3_MASK (1 << 11) 46#define OMAP3430_GRPSEL_UART3_MASK (1 << 11)
47#define OMAP3430_GRPSEL_GPT8_MASK (1 << 9)
48#define OMAP3430_GRPSEL_GPT7_MASK (1 << 8)
49#define OMAP3430_GRPSEL_GPT6_MASK (1 << 7)
50#define OMAP3430_GRPSEL_GPT5_MASK (1 << 6)
45#define OMAP3430_GRPSEL_MCBSP4_MASK (1 << 2) 51#define OMAP3430_GRPSEL_MCBSP4_MASK (1 << 2)
46#define OMAP3430_GRPSEL_MCBSP3_MASK (1 << 1) 52#define OMAP3430_GRPSEL_MCBSP3_MASK (1 << 1)
47#define OMAP3430_GRPSEL_MCBSP2_MASK (1 << 0) 53#define OMAP3430_GRPSEL_MCBSP2_MASK (1 << 0)
diff --git a/arch/arm/mach-omap2/soc.h b/arch/arm/mach-omap2/soc.h
index de2a34c423a7..01ca8086fb6c 100644
--- a/arch/arm/mach-omap2/soc.h
+++ b/arch/arm/mach-omap2/soc.h
@@ -462,6 +462,7 @@ IS_OMAP_TYPE(3430, 0x3430)
462#define DRA7XX_CLASS 0x07000000 462#define DRA7XX_CLASS 0x07000000
463#define DRA752_REV_ES1_0 (DRA7XX_CLASS | (0x52 << 16) | (0x10 << 8)) 463#define DRA752_REV_ES1_0 (DRA7XX_CLASS | (0x52 << 16) | (0x10 << 8))
464#define DRA752_REV_ES1_1 (DRA7XX_CLASS | (0x52 << 16) | (0x11 << 8)) 464#define DRA752_REV_ES1_1 (DRA7XX_CLASS | (0x52 << 16) | (0x11 << 8))
465#define DRA722_REV_ES1_0 (DRA7XX_CLASS | (0x22 << 16) | (0x10 << 8))
465 466
466void omap2xxx_check_revision(void); 467void omap2xxx_check_revision(void);
467void omap3xxx_check_revision(void); 468void omap3xxx_check_revision(void);
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c
index f9874ba60cc8..108939f8d053 100644
--- a/arch/arm/mach-sa1100/collie.c
+++ b/arch/arm/mach-sa1100/collie.c
@@ -329,6 +329,11 @@ static struct mtd_partition collie_partitions[] = {
329 .name = "rootfs", 329 .name = "rootfs",
330 .offset = MTDPART_OFS_APPEND, 330 .offset = MTDPART_OFS_APPEND,
331 .size = 0x00e20000, 331 .size = 0x00e20000,
332 }, {
333 .name = "bootblock",
334 .offset = MTDPART_OFS_APPEND,
335 .size = 0x00020000,
336 .mask_flags = MTD_WRITEABLE
332 } 337 }
333}; 338};
334 339
@@ -356,7 +361,7 @@ static void collie_flash_exit(void)
356} 361}
357 362
358static struct flash_platform_data collie_flash_data = { 363static struct flash_platform_data collie_flash_data = {
359 .map_name = "jedec_probe", 364 .map_name = "cfi_probe",
360 .init = collie_flash_init, 365 .init = collie_flash_init,
361 .set_vpp = collie_set_vpp, 366 .set_vpp = collie_set_vpp,
362 .exit = collie_flash_exit, 367 .exit = collie_flash_exit,
diff --git a/arch/arm/mach-sti/Kconfig b/arch/arm/mach-sti/Kconfig
index 7e33e9d2c42e..878e9ec97d0f 100644
--- a/arch/arm/mach-sti/Kconfig
+++ b/arch/arm/mach-sti/Kconfig
@@ -11,8 +11,8 @@ menuconfig ARCH_STI
11 select ARM_ERRATA_754322 11 select ARM_ERRATA_754322
12 select ARM_ERRATA_764369 if SMP 12 select ARM_ERRATA_764369 if SMP
13 select ARM_ERRATA_775420 13 select ARM_ERRATA_775420
14 select PL310_ERRATA_753970 if CACHE_PL310 14 select PL310_ERRATA_753970 if CACHE_L2X0
15 select PL310_ERRATA_769419 if CACHE_PL310 15 select PL310_ERRATA_769419 if CACHE_L2X0
16 help 16 help
17 Include support for STiH41x SOCs like STiH415/416 using the device tree 17 Include support for STiH41x SOCs like STiH415/416 using the device tree
18 for discovery 18 for discovery
diff --git a/arch/arm/mach-sunxi/sunxi.c b/arch/arm/mach-sunxi/sunxi.c
index 3f9587bb51f6..b6085084e0ff 100644
--- a/arch/arm/mach-sunxi/sunxi.c
+++ b/arch/arm/mach-sunxi/sunxi.c
@@ -12,8 +12,81 @@
12 12
13#include <linux/clk-provider.h> 13#include <linux/clk-provider.h>
14#include <linux/clocksource.h> 14#include <linux/clocksource.h>
15#include <linux/delay.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/of_address.h>
19#include <linux/of_irq.h>
20#include <linux/of_platform.h>
21#include <linux/io.h>
22#include <linux/reboot.h>
15 23
16#include <asm/mach/arch.h> 24#include <asm/mach/arch.h>
25#include <asm/mach/map.h>
26#include <asm/system_misc.h>
27
28#define SUN4I_WATCHDOG_CTRL_REG 0x00
29#define SUN4I_WATCHDOG_CTRL_RESTART BIT(0)
30#define SUN4I_WATCHDOG_MODE_REG 0x04
31#define SUN4I_WATCHDOG_MODE_ENABLE BIT(0)
32#define SUN4I_WATCHDOG_MODE_RESET_ENABLE BIT(1)
33
34#define SUN6I_WATCHDOG1_IRQ_REG 0x00
35#define SUN6I_WATCHDOG1_CTRL_REG 0x10
36#define SUN6I_WATCHDOG1_CTRL_RESTART BIT(0)
37#define SUN6I_WATCHDOG1_CONFIG_REG 0x14
38#define SUN6I_WATCHDOG1_CONFIG_RESTART BIT(0)
39#define SUN6I_WATCHDOG1_CONFIG_IRQ BIT(1)
40#define SUN6I_WATCHDOG1_MODE_REG 0x18
41#define SUN6I_WATCHDOG1_MODE_ENABLE BIT(0)
42
43static void __iomem *wdt_base;
44
45static void sun4i_restart(enum reboot_mode mode, const char *cmd)
46{
47 if (!wdt_base)
48 return;
49
50 /* Enable timer and set reset bit in the watchdog */
51 writel(SUN4I_WATCHDOG_MODE_ENABLE | SUN4I_WATCHDOG_MODE_RESET_ENABLE,
52 wdt_base + SUN4I_WATCHDOG_MODE_REG);
53
54 /*
55 * Restart the watchdog. The default (and lowest) interval
56 * value for the watchdog is 0.5s.
57 */
58 writel(SUN4I_WATCHDOG_CTRL_RESTART, wdt_base + SUN4I_WATCHDOG_CTRL_REG);
59
60 while (1) {
61 mdelay(5);
62 writel(SUN4I_WATCHDOG_MODE_ENABLE | SUN4I_WATCHDOG_MODE_RESET_ENABLE,
63 wdt_base + SUN4I_WATCHDOG_MODE_REG);
64 }
65}
66
67static struct of_device_id sunxi_restart_ids[] = {
68 { .compatible = "allwinner,sun4i-a10-wdt" },
69 { /*sentinel*/ }
70};
71
72static void sunxi_setup_restart(void)
73{
74 struct device_node *np;
75
76 np = of_find_matching_node(NULL, sunxi_restart_ids);
77 if (WARN(!np, "unable to setup watchdog restart"))
78 return;
79
80 wdt_base = of_iomap(np, 0);
81 WARN(!wdt_base, "failed to map watchdog base address");
82}
83
84static void __init sunxi_dt_init(void)
85{
86 sunxi_setup_restart();
87
88 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
89}
17 90
18static const char * const sunxi_board_dt_compat[] = { 91static const char * const sunxi_board_dt_compat[] = {
19 "allwinner,sun4i-a10", 92 "allwinner,sun4i-a10",
@@ -23,7 +96,9 @@ static const char * const sunxi_board_dt_compat[] = {
23}; 96};
24 97
25DT_MACHINE_START(SUNXI_DT, "Allwinner A1X (Device Tree)") 98DT_MACHINE_START(SUNXI_DT, "Allwinner A1X (Device Tree)")
99 .init_machine = sunxi_dt_init,
26 .dt_compat = sunxi_board_dt_compat, 100 .dt_compat = sunxi_board_dt_compat,
101 .restart = sun4i_restart,
27MACHINE_END 102MACHINE_END
28 103
29static const char * const sun6i_board_dt_compat[] = { 104static const char * const sun6i_board_dt_compat[] = {
@@ -51,5 +126,7 @@ static const char * const sun7i_board_dt_compat[] = {
51}; 126};
52 127
53DT_MACHINE_START(SUN7I_DT, "Allwinner sun7i (A20) Family") 128DT_MACHINE_START(SUN7I_DT, "Allwinner sun7i (A20) Family")
129 .init_machine = sunxi_dt_init,
54 .dt_compat = sun7i_board_dt_compat, 130 .dt_compat = sun7i_board_dt_compat,
131 .restart = sun4i_restart,
55MACHINE_END 132MACHINE_END
diff --git a/arch/arm/mach-ux500/Kconfig b/arch/arm/mach-ux500/Kconfig
index 5be7c4583a93..699e8601dbf0 100644
--- a/arch/arm/mach-ux500/Kconfig
+++ b/arch/arm/mach-ux500/Kconfig
@@ -15,7 +15,7 @@ menuconfig ARCH_U8500
15 select PINCTRL 15 select PINCTRL
16 select PINCTRL_ABX500 16 select PINCTRL_ABX500
17 select PINCTRL_NOMADIK 17 select PINCTRL_NOMADIK
18 select PL310_ERRATA_753970 if CACHE_PL310 18 select PL310_ERRATA_753970 if CACHE_L2X0
19 help 19 help
20 Support for ST-Ericsson's Ux500 architecture 20 Support for ST-Ericsson's Ux500 architecture
21 21
diff --git a/arch/arm/mach-vexpress/Kconfig b/arch/arm/mach-vexpress/Kconfig
index 99c1f151c403..d8b9330f896a 100644
--- a/arch/arm/mach-vexpress/Kconfig
+++ b/arch/arm/mach-vexpress/Kconfig
@@ -43,7 +43,7 @@ config ARCH_VEXPRESS_CORTEX_A5_A9_ERRATA
43 bool "Enable A5 and A9 only errata work-arounds" 43 bool "Enable A5 and A9 only errata work-arounds"
44 default y 44 default y
45 select ARM_ERRATA_720789 45 select ARM_ERRATA_720789
46 select PL310_ERRATA_753970 if CACHE_PL310 46 select PL310_ERRATA_753970 if CACHE_L2X0
47 help 47 help
48 Provides common dependencies for Versatile Express platforms 48 Provides common dependencies for Versatile Express platforms
49 based on Cortex-A5 and Cortex-A9 processors. In order to 49 based on Cortex-A5 and Cortex-A9 processors. In order to
diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
index eda0dd0ab97b..c348eaee7ee2 100644
--- a/arch/arm/mm/Kconfig
+++ b/arch/arm/mm/Kconfig
@@ -889,9 +889,10 @@ config CACHE_L2X0
889 help 889 help
890 This option enables the L2x0 PrimeCell. 890 This option enables the L2x0 PrimeCell.
891 891
892if CACHE_L2X0
893
892config CACHE_PL310 894config CACHE_PL310
893 bool 895 bool
894 depends on CACHE_L2X0
895 default y if CPU_V7 && !(CPU_V6 || CPU_V6K) 896 default y if CPU_V7 && !(CPU_V6 || CPU_V6K)
896 help 897 help
897 This option enables optimisations for the PL310 cache 898 This option enables optimisations for the PL310 cache
@@ -899,7 +900,6 @@ config CACHE_PL310
899 900
900config PL310_ERRATA_588369 901config PL310_ERRATA_588369
901 bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines" 902 bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
902 depends on CACHE_L2X0
903 help 903 help
904 The PL310 L2 cache controller implements three types of Clean & 904 The PL310 L2 cache controller implements three types of Clean &
905 Invalidate maintenance operations: by Physical Address 905 Invalidate maintenance operations: by Physical Address
@@ -912,7 +912,6 @@ config PL310_ERRATA_588369
912 912
913config PL310_ERRATA_727915 913config PL310_ERRATA_727915
914 bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption" 914 bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
915 depends on CACHE_L2X0
916 help 915 help
917 PL310 implements the Clean & Invalidate by Way L2 cache maintenance 916 PL310 implements the Clean & Invalidate by Way L2 cache maintenance
918 operation (offset 0x7FC). This operation runs in background so that 917 operation (offset 0x7FC). This operation runs in background so that
@@ -923,7 +922,6 @@ config PL310_ERRATA_727915
923 922
924config PL310_ERRATA_753970 923config PL310_ERRATA_753970
925 bool "PL310 errata: cache sync operation may be faulty" 924 bool "PL310 errata: cache sync operation may be faulty"
926 depends on CACHE_PL310
927 help 925 help
928 This option enables the workaround for the 753970 PL310 (r3p0) erratum. 926 This option enables the workaround for the 753970 PL310 (r3p0) erratum.
929 927
@@ -938,7 +936,6 @@ config PL310_ERRATA_753970
938 936
939config PL310_ERRATA_769419 937config PL310_ERRATA_769419
940 bool "PL310 errata: no automatic Store Buffer drain" 938 bool "PL310 errata: no automatic Store Buffer drain"
941 depends on CACHE_L2X0
942 help 939 help
943 On revisions of the PL310 prior to r3p2, the Store Buffer does 940 On revisions of the PL310 prior to r3p2, the Store Buffer does
944 not automatically drain. This can cause normal, non-cacheable 941 not automatically drain. This can cause normal, non-cacheable
@@ -948,6 +945,8 @@ config PL310_ERRATA_769419
948 on systems with an outer cache, the store buffer is drained 945 on systems with an outer cache, the store buffer is drained
949 explicitly. 946 explicitly.
950 947
948endif
949
951config CACHE_TAUROS2 950config CACHE_TAUROS2
952 bool "Enable the Tauros2 L2 cache controller" 951 bool "Enable the Tauros2 L2 cache controller"
953 depends on (ARCH_DOVE || ARCH_MMP || CPU_PJ4) 952 depends on (ARCH_DOVE || ARCH_MMP || CPU_PJ4)
diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c
index efc5cabf70e0..7c3fb41a462e 100644
--- a/arch/arm/mm/cache-l2x0.c
+++ b/arch/arm/mm/cache-l2x0.c
@@ -664,7 +664,7 @@ static int l2c310_cpu_enable_flz(struct notifier_block *nb, unsigned long act, v
664 664
665static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock) 665static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
666{ 666{
667 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK; 667 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_RTL_MASK;
668 bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9; 668 bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9;
669 669
670 if (rev >= L310_CACHE_ID_RTL_R2P0) { 670 if (rev >= L310_CACHE_ID_RTL_R2P0) {
@@ -1069,6 +1069,33 @@ static const struct l2c_init_data of_l2c310_data __initconst = {
1069}; 1069};
1070 1070
1071/* 1071/*
1072 * This is a variant of the of_l2c310_data with .sync set to
1073 * NULL. Outer sync operations are not needed when the system is I/O
1074 * coherent, and potentially harmful in certain situations (PCIe/PL310
1075 * deadlock on Armada 375/38x due to hardware I/O coherency). The
1076 * other operations are kept because they are infrequent (therefore do
1077 * not cause the deadlock in practice) and needed for secondary CPU
1078 * boot and other power management activities.
1079 */
1080static const struct l2c_init_data of_l2c310_coherent_data __initconst = {
1081 .type = "L2C-310 Coherent",
1082 .way_size_0 = SZ_8K,
1083 .num_lock = 8,
1084 .of_parse = l2c310_of_parse,
1085 .enable = l2c310_enable,
1086 .fixup = l2c310_fixup,
1087 .save = l2c310_save,
1088 .outer_cache = {
1089 .inv_range = l2c210_inv_range,
1090 .clean_range = l2c210_clean_range,
1091 .flush_range = l2c210_flush_range,
1092 .flush_all = l2c210_flush_all,
1093 .disable = l2c310_disable,
1094 .resume = l2c310_resume,
1095 },
1096};
1097
1098/*
1072 * Note that the end addresses passed to Linux primitives are 1099 * Note that the end addresses passed to Linux primitives are
1073 * noninclusive, while the hardware cache range operations use 1100 * noninclusive, while the hardware cache range operations use
1074 * inclusive start and end addresses. 1101 * inclusive start and end addresses.
@@ -1487,6 +1514,10 @@ int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
1487 1514
1488 data = of_match_node(l2x0_ids, np)->data; 1515 data = of_match_node(l2x0_ids, np)->data;
1489 1516
1517 if (of_device_is_compatible(np, "arm,pl310-cache") &&
1518 of_property_read_bool(np, "arm,io-coherent"))
1519 data = &of_l2c310_coherent_data;
1520
1490 old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); 1521 old_aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
1491 if (old_aux != ((old_aux & aux_mask) | aux_val)) { 1522 if (old_aux != ((old_aux & aux_mask) | aux_val)) {
1492 pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n", 1523 pr_warn("L2C: platform modifies aux control register: 0x%08x -> 0x%08x\n",
diff --git a/arch/arm/mm/nommu.c b/arch/arm/mm/nommu.c
index da1874f9f8cf..a014dfacd5ca 100644
--- a/arch/arm/mm/nommu.c
+++ b/arch/arm/mm/nommu.c
@@ -300,6 +300,7 @@ void __init sanity_check_meminfo(void)
300 sanity_check_meminfo_mpu(); 300 sanity_check_meminfo_mpu();
301 end = memblock_end_of_DRAM(); 301 end = memblock_end_of_DRAM();
302 high_memory = __va(end - 1) + 1; 302 high_memory = __va(end - 1) + 1;
303 memblock_set_current_limit(end);
303} 304}
304 305
305/* 306/*
diff --git a/arch/arm/mm/proc-arm925.S b/arch/arm/mm/proc-arm925.S
index 97448c3acf38..ba0d58e1a2a2 100644
--- a/arch/arm/mm/proc-arm925.S
+++ b/arch/arm/mm/proc-arm925.S
@@ -502,6 +502,7 @@ __\name\()_proc_info:
502 .long \cpu_val 502 .long \cpu_val
503 .long \cpu_mask 503 .long \cpu_mask
504 .long PMD_TYPE_SECT | \ 504 .long PMD_TYPE_SECT | \
505 PMD_SECT_CACHEABLE | \
505 PMD_BIT4 | \ 506 PMD_BIT4 | \
506 PMD_SECT_AP_WRITE | \ 507 PMD_SECT_AP_WRITE | \
507 PMD_SECT_AP_READ 508 PMD_SECT_AP_READ
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index a474de346be6..839f48c26ef0 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -4,6 +4,7 @@ config ARM64
4 select ARCH_HAS_OPP 4 select ARCH_HAS_OPP
5 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST 5 select ARCH_HAS_TICK_BROADCAST if GENERIC_CLOCKEVENTS_BROADCAST
6 select ARCH_USE_CMPXCHG_LOCKREF 6 select ARCH_USE_CMPXCHG_LOCKREF
7 select ARCH_SUPPORTS_ATOMIC_RMW
7 select ARCH_WANT_OPTIONAL_GPIOLIB 8 select ARCH_WANT_OPTIONAL_GPIOLIB
8 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION 9 select ARCH_WANT_COMPAT_IPC_PARSE_VERSION
9 select ARCH_WANT_FRAME_POINTERS 10 select ARCH_WANT_FRAME_POINTERS
diff --git a/arch/arm64/include/asm/memory.h b/arch/arm64/include/asm/memory.h
index 993bce527b85..902eb708804a 100644
--- a/arch/arm64/include/asm/memory.h
+++ b/arch/arm64/include/asm/memory.h
@@ -56,6 +56,8 @@
56#define TASK_SIZE_32 UL(0x100000000) 56#define TASK_SIZE_32 UL(0x100000000)
57#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ 57#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
58 TASK_SIZE_32 : TASK_SIZE_64) 58 TASK_SIZE_32 : TASK_SIZE_64)
59#define TASK_SIZE_OF(tsk) (test_tsk_thread_flag(tsk, TIF_32BIT) ? \
60 TASK_SIZE_32 : TASK_SIZE_64)
59#else 61#else
60#define TASK_SIZE TASK_SIZE_64 62#define TASK_SIZE TASK_SIZE_64
61#endif /* CONFIG_COMPAT */ 63#endif /* CONFIG_COMPAT */
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 579702086488..e0ccceb317d9 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -292,7 +292,7 @@ extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
292#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \ 292#define pmd_sect(pmd) ((pmd_val(pmd) & PMD_TYPE_MASK) == \
293 PMD_TYPE_SECT) 293 PMD_TYPE_SECT)
294 294
295#ifdef ARM64_64K_PAGES 295#ifdef CONFIG_ARM64_64K_PAGES
296#define pud_sect(pud) (0) 296#define pud_sect(pud) (0)
297#else 297#else
298#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \ 298#define pud_sect(pud) ((pud_val(pud) & PUD_TYPE_MASK) == \
diff --git a/arch/arm64/include/asm/ptrace.h b/arch/arm64/include/asm/ptrace.h
index a429b5940be2..501000fadb6f 100644
--- a/arch/arm64/include/asm/ptrace.h
+++ b/arch/arm64/include/asm/ptrace.h
@@ -21,6 +21,10 @@
21 21
22#include <uapi/asm/ptrace.h> 22#include <uapi/asm/ptrace.h>
23 23
24/* Current Exception Level values, as contained in CurrentEL */
25#define CurrentEL_EL1 (1 << 2)
26#define CurrentEL_EL2 (2 << 2)
27
24/* AArch32-specific ptrace requests */ 28/* AArch32-specific ptrace requests */
25#define COMPAT_PTRACE_GETREGS 12 29#define COMPAT_PTRACE_GETREGS 12
26#define COMPAT_PTRACE_SETREGS 13 30#define COMPAT_PTRACE_SETREGS 13
diff --git a/arch/arm64/kernel/efi-entry.S b/arch/arm64/kernel/efi-entry.S
index 66716c9b9e5f..619b1dd7bcde 100644
--- a/arch/arm64/kernel/efi-entry.S
+++ b/arch/arm64/kernel/efi-entry.S
@@ -78,8 +78,7 @@ ENTRY(efi_stub_entry)
78 78
79 /* Turn off Dcache and MMU */ 79 /* Turn off Dcache and MMU */
80 mrs x0, CurrentEL 80 mrs x0, CurrentEL
81 cmp x0, #PSR_MODE_EL2t 81 cmp x0, #CurrentEL_EL2
82 ccmp x0, #PSR_MODE_EL2h, #0x4, ne
83 b.ne 1f 82 b.ne 1f
84 mrs x0, sctlr_el2 83 mrs x0, sctlr_el2
85 bic x0, x0, #1 << 0 // clear SCTLR.M 84 bic x0, x0, #1 << 0 // clear SCTLR.M
diff --git a/arch/arm64/kernel/efi-stub.c b/arch/arm64/kernel/efi-stub.c
index 60e98a639ac5..e786e6cdc400 100644
--- a/arch/arm64/kernel/efi-stub.c
+++ b/arch/arm64/kernel/efi-stub.c
@@ -12,8 +12,6 @@
12#include <linux/efi.h> 12#include <linux/efi.h>
13#include <linux/libfdt.h> 13#include <linux/libfdt.h>
14#include <asm/sections.h> 14#include <asm/sections.h>
15#include <generated/compile.h>
16#include <generated/utsrelease.h>
17 15
18/* 16/*
19 * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from 17 * AArch64 requires the DTB to be 8-byte aligned in the first 512MiB from
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index a96d3a6a63f6..a2c1195abb7f 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -270,8 +270,7 @@ ENDPROC(stext)
270 */ 270 */
271ENTRY(el2_setup) 271ENTRY(el2_setup)
272 mrs x0, CurrentEL 272 mrs x0, CurrentEL
273 cmp x0, #PSR_MODE_EL2t 273 cmp x0, #CurrentEL_EL2
274 ccmp x0, #PSR_MODE_EL2h, #0x4, ne
275 b.ne 1f 274 b.ne 1f
276 mrs x0, sctlr_el2 275 mrs x0, sctlr_el2
277CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2 276CPU_BE( orr x0, x0, #(1 << 25) ) // Set the EE bit for EL2
diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c
index 9aecbace4128..13bbc3be6f5a 100644
--- a/arch/arm64/mm/copypage.c
+++ b/arch/arm64/mm/copypage.c
@@ -27,8 +27,10 @@ void __cpu_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
27 copy_page(kto, kfrom); 27 copy_page(kto, kfrom);
28 __flush_dcache_area(kto, PAGE_SIZE); 28 __flush_dcache_area(kto, PAGE_SIZE);
29} 29}
30EXPORT_SYMBOL_GPL(__cpu_copy_user_page);
30 31
31void __cpu_clear_user_page(void *kaddr, unsigned long vaddr) 32void __cpu_clear_user_page(void *kaddr, unsigned long vaddr)
32{ 33{
33 clear_page(kaddr); 34 clear_page(kaddr);
34} 35}
36EXPORT_SYMBOL_GPL(__cpu_clear_user_page);
diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c
index e4193e3adc7f..0d64089d28b5 100644
--- a/arch/arm64/mm/flush.c
+++ b/arch/arm64/mm/flush.c
@@ -79,7 +79,8 @@ void __sync_icache_dcache(pte_t pte, unsigned long addr)
79 return; 79 return;
80 80
81 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) { 81 if (!test_and_set_bit(PG_dcache_clean, &page->flags)) {
82 __flush_dcache_area(page_address(page), PAGE_SIZE); 82 __flush_dcache_area(page_address(page),
83 PAGE_SIZE << compound_order(page));
83 __flush_icache_all(); 84 __flush_icache_all();
84 } else if (icache_is_aivivt()) { 85 } else if (icache_is_aivivt()) {
85 __flush_icache_all(); 86 __flush_icache_all();
diff --git a/arch/ia64/include/uapi/asm/fcntl.h b/arch/ia64/include/uapi/asm/fcntl.h
index 1dd275dc8f65..7b485876cad4 100644
--- a/arch/ia64/include/uapi/asm/fcntl.h
+++ b/arch/ia64/include/uapi/asm/fcntl.h
@@ -8,6 +8,7 @@
8#define force_o_largefile() \ 8#define force_o_largefile() \
9 (personality(current->personality) != PER_LINUX32) 9 (personality(current->personality) != PER_LINUX32)
10 10
11#include <linux/personality.h>
11#include <asm-generic/fcntl.h> 12#include <asm-generic/fcntl.h>
12 13
13#endif /* _ASM_IA64_FCNTL_H */ 14#endif /* _ASM_IA64_FCNTL_H */
diff --git a/arch/m68k/kernel/head.S b/arch/m68k/kernel/head.S
index dbb118e1a4e0..a54788458ca3 100644
--- a/arch/m68k/kernel/head.S
+++ b/arch/m68k/kernel/head.S
@@ -921,7 +921,8 @@ L(nocon):
921 jls 1f 921 jls 1f
922 lsrl #1,%d1 922 lsrl #1,%d1
9231: 9231:
924 movel %d1,m68k_init_mapped_size 924 lea %pc@(m68k_init_mapped_size),%a0
925 movel %d1,%a0@
925 mmu_map #PAGE_OFFSET,%pc@(L(phys_kernel_start)),%d1,\ 926 mmu_map #PAGE_OFFSET,%pc@(L(phys_kernel_start)),%d1,\
926 %pc@(m68k_supervisor_cachemode) 927 %pc@(m68k_supervisor_cachemode)
927 928
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index 958f1adb9d0c..3857737e3958 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <linux/export.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/sched.h> 16#include <linux/sched.h>
16#include <linux/kernel.h> 17#include <linux/kernel.h>
@@ -30,6 +31,7 @@
30 31
31 32
32unsigned long (*mach_random_get_entropy)(void); 33unsigned long (*mach_random_get_entropy)(void);
34EXPORT_SYMBOL_GPL(mach_random_get_entropy);
33 35
34 36
35/* 37/*
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 7a469acee33c..4e238e6e661c 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -269,6 +269,7 @@ config LANTIQ
269config LASAT 269config LASAT
270 bool "LASAT Networks platforms" 270 bool "LASAT Networks platforms"
271 select CEVT_R4K 271 select CEVT_R4K
272 select CRC32
272 select CSRC_R4K 273 select CSRC_R4K
273 select DMA_NONCOHERENT 274 select DMA_NONCOHERENT
274 select SYS_HAS_EARLY_PRINTK 275 select SYS_HAS_EARLY_PRINTK
diff --git a/arch/mips/include/asm/sigcontext.h b/arch/mips/include/asm/sigcontext.h
index f54bdbe85c0d..eeeb0f48c767 100644
--- a/arch/mips/include/asm/sigcontext.h
+++ b/arch/mips/include/asm/sigcontext.h
@@ -32,8 +32,6 @@ struct sigcontext32 {
32 __u32 sc_lo2; 32 __u32 sc_lo2;
33 __u32 sc_hi3; 33 __u32 sc_hi3;
34 __u32 sc_lo3; 34 __u32 sc_lo3;
35 __u64 sc_msaregs[32]; /* Most significant 64 bits */
36 __u32 sc_msa_csr;
37}; 35};
38#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */ 36#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
39#endif /* _ASM_SIGCONTEXT_H */ 37#endif /* _ASM_SIGCONTEXT_H */
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index f8d63b3b40b4..708c5d414905 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -67,6 +67,9 @@ void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
67#define Ip_u2s3u1(op) \ 67#define Ip_u2s3u1(op) \
68void ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c) 68void ISAOPC(op)(u32 **buf, unsigned int a, signed int b, unsigned int c)
69 69
70#define Ip_s3s1s2(op) \
71void ISAOPC(op)(u32 **buf, int a, int b, int c)
72
70#define Ip_u2u1s3(op) \ 73#define Ip_u2u1s3(op) \
71void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c) 74void ISAOPC(op)(u32 **buf, unsigned int a, unsigned int b, signed int c)
72 75
@@ -147,6 +150,7 @@ Ip_u2s3u1(_scd);
147Ip_u2s3u1(_sd); 150Ip_u2s3u1(_sd);
148Ip_u2u1u3(_sll); 151Ip_u2u1u3(_sll);
149Ip_u3u2u1(_sllv); 152Ip_u3u2u1(_sllv);
153Ip_s3s1s2(_slt);
150Ip_u2u1s3(_sltiu); 154Ip_u2u1s3(_sltiu);
151Ip_u3u1u2(_sltu); 155Ip_u3u1u2(_sltu);
152Ip_u2u1u3(_sra); 156Ip_u2u1u3(_sra);
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index 4b7160259292..4bfdb9d4c186 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -273,6 +273,7 @@ enum mm_32a_minor_op {
273 mm_and_op = 0x250, 273 mm_and_op = 0x250,
274 mm_or32_op = 0x290, 274 mm_or32_op = 0x290,
275 mm_xor32_op = 0x310, 275 mm_xor32_op = 0x310,
276 mm_slt_op = 0x350,
276 mm_sltu_op = 0x390, 277 mm_sltu_op = 0x390,
277}; 278};
278 279
diff --git a/arch/mips/include/uapi/asm/sigcontext.h b/arch/mips/include/uapi/asm/sigcontext.h
index 681c17603a48..6c9906f59c6e 100644
--- a/arch/mips/include/uapi/asm/sigcontext.h
+++ b/arch/mips/include/uapi/asm/sigcontext.h
@@ -12,10 +12,6 @@
12#include <linux/types.h> 12#include <linux/types.h>
13#include <asm/sgidefs.h> 13#include <asm/sgidefs.h>
14 14
15/* Bits which may be set in sc_used_math */
16#define USEDMATH_FP (1 << 0)
17#define USEDMATH_MSA (1 << 1)
18
19#if _MIPS_SIM == _MIPS_SIM_ABI32 15#if _MIPS_SIM == _MIPS_SIM_ABI32
20 16
21/* 17/*
@@ -41,8 +37,6 @@ struct sigcontext {
41 unsigned long sc_lo2; 37 unsigned long sc_lo2;
42 unsigned long sc_hi3; 38 unsigned long sc_hi3;
43 unsigned long sc_lo3; 39 unsigned long sc_lo3;
44 unsigned long long sc_msaregs[32]; /* Most significant 64 bits */
45 unsigned long sc_msa_csr;
46}; 40};
47 41
48#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ 42#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
@@ -76,8 +70,6 @@ struct sigcontext {
76 __u32 sc_used_math; 70 __u32 sc_used_math;
77 __u32 sc_dsp; 71 __u32 sc_dsp;
78 __u32 sc_reserved; 72 __u32 sc_reserved;
79 __u64 sc_msaregs[32];
80 __u32 sc_msa_csr;
81}; 73};
82 74
83 75
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 02f075df8f2e..4bb5107511e2 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -293,7 +293,6 @@ void output_sc_defines(void)
293 OFFSET(SC_LO2, sigcontext, sc_lo2); 293 OFFSET(SC_LO2, sigcontext, sc_lo2);
294 OFFSET(SC_HI3, sigcontext, sc_hi3); 294 OFFSET(SC_HI3, sigcontext, sc_hi3);
295 OFFSET(SC_LO3, sigcontext, sc_lo3); 295 OFFSET(SC_LO3, sigcontext, sc_lo3);
296 OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
297 BLANK(); 296 BLANK();
298} 297}
299#endif 298#endif
@@ -308,7 +307,6 @@ void output_sc_defines(void)
308 OFFSET(SC_MDLO, sigcontext, sc_mdlo); 307 OFFSET(SC_MDLO, sigcontext, sc_mdlo);
309 OFFSET(SC_PC, sigcontext, sc_pc); 308 OFFSET(SC_PC, sigcontext, sc_pc);
310 OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); 309 OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
311 OFFSET(SC_MSAREGS, sigcontext, sc_msaregs);
312 BLANK(); 310 BLANK();
313} 311}
314#endif 312#endif
@@ -320,7 +318,6 @@ void output_sc32_defines(void)
320 OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs); 318 OFFSET(SC32_FPREGS, sigcontext32, sc_fpregs);
321 OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr); 319 OFFSET(SC32_FPC_CSR, sigcontext32, sc_fpc_csr);
322 OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir); 320 OFFSET(SC32_FPC_EIR, sigcontext32, sc_fpc_eir);
323 OFFSET(SC32_MSAREGS, sigcontext32, sc_msaregs);
324 BLANK(); 321 BLANK();
325} 322}
326#endif 323#endif
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 4858642d543d..a734b2c2f9ea 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -126,7 +126,7 @@ void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqma
126 126
127 board_bind_eic_interrupt = &msc_bind_eic_interrupt; 127 board_bind_eic_interrupt = &msc_bind_eic_interrupt;
128 128
129 for (; nirq >= 0; nirq--, imp++) { 129 for (; nirq > 0; nirq--, imp++) {
130 int n = imp->im_irq; 130 int n = imp->im_irq;
131 131
132 switch (imp->im_type) { 132 switch (imp->im_type) {
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index 5aa4c6f8cf83..c4c2069d3a20 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -101,7 +101,7 @@ static void coupled_barrier(atomic_t *a, unsigned online)
101 if (!coupled_coherence) 101 if (!coupled_coherence)
102 return; 102 return;
103 103
104 smp_mb__before_atomic_inc(); 104 smp_mb__before_atomic();
105 atomic_inc(a); 105 atomic_inc(a);
106 106
107 while (atomic_read(a) < online) 107 while (atomic_read(a) < online)
@@ -158,7 +158,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
158 158
159 /* Indicate that this CPU might not be coherent */ 159 /* Indicate that this CPU might not be coherent */
160 cpumask_clear_cpu(cpu, &cpu_coherent_mask); 160 cpumask_clear_cpu(cpu, &cpu_coherent_mask);
161 smp_mb__after_clear_bit(); 161 smp_mb__after_atomic();
162 162
163 /* Create a non-coherent mapping of the core ready_count */ 163 /* Create a non-coherent mapping of the core ready_count */
164 core_ready_count = per_cpu(ready_count, core); 164 core_ready_count = per_cpu(ready_count, core);
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 71814272d148..8352523568e6 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -13,7 +13,6 @@
13 * Copyright (C) 1999, 2001 Silicon Graphics, Inc. 13 * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
14 */ 14 */
15#include <asm/asm.h> 15#include <asm/asm.h>
16#include <asm/asmmacro.h>
17#include <asm/errno.h> 16#include <asm/errno.h>
18#include <asm/fpregdef.h> 17#include <asm/fpregdef.h>
19#include <asm/mipsregs.h> 18#include <asm/mipsregs.h>
@@ -246,218 +245,6 @@ LEAF(_restore_fp_context32)
246 END(_restore_fp_context32) 245 END(_restore_fp_context32)
247#endif 246#endif
248 247
249#ifdef CONFIG_CPU_HAS_MSA
250
251 .macro save_sc_msareg wr, off, sc, tmp
252#ifdef CONFIG_64BIT
253 copy_u_d \tmp, \wr, 1
254 EX sd \tmp, (\off+(\wr*8))(\sc)
255#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
256 copy_u_w \tmp, \wr, 2
257 EX sw \tmp, (\off+(\wr*8)+0)(\sc)
258 copy_u_w \tmp, \wr, 3
259 EX sw \tmp, (\off+(\wr*8)+4)(\sc)
260#else /* CONFIG_CPU_BIG_ENDIAN */
261 copy_u_w \tmp, \wr, 2
262 EX sw \tmp, (\off+(\wr*8)+4)(\sc)
263 copy_u_w \tmp, \wr, 3
264 EX sw \tmp, (\off+(\wr*8)+0)(\sc)
265#endif
266 .endm
267
268/*
269 * int _save_msa_context(struct sigcontext *sc)
270 *
271 * Save the upper 64 bits of each vector register along with the MSA_CSR
272 * register into sc. Returns zero on success, else non-zero.
273 */
274LEAF(_save_msa_context)
275 save_sc_msareg 0, SC_MSAREGS, a0, t0
276 save_sc_msareg 1, SC_MSAREGS, a0, t0
277 save_sc_msareg 2, SC_MSAREGS, a0, t0
278 save_sc_msareg 3, SC_MSAREGS, a0, t0
279 save_sc_msareg 4, SC_MSAREGS, a0, t0
280 save_sc_msareg 5, SC_MSAREGS, a0, t0
281 save_sc_msareg 6, SC_MSAREGS, a0, t0
282 save_sc_msareg 7, SC_MSAREGS, a0, t0
283 save_sc_msareg 8, SC_MSAREGS, a0, t0
284 save_sc_msareg 9, SC_MSAREGS, a0, t0
285 save_sc_msareg 10, SC_MSAREGS, a0, t0
286 save_sc_msareg 11, SC_MSAREGS, a0, t0
287 save_sc_msareg 12, SC_MSAREGS, a0, t0
288 save_sc_msareg 13, SC_MSAREGS, a0, t0
289 save_sc_msareg 14, SC_MSAREGS, a0, t0
290 save_sc_msareg 15, SC_MSAREGS, a0, t0
291 save_sc_msareg 16, SC_MSAREGS, a0, t0
292 save_sc_msareg 17, SC_MSAREGS, a0, t0
293 save_sc_msareg 18, SC_MSAREGS, a0, t0
294 save_sc_msareg 19, SC_MSAREGS, a0, t0
295 save_sc_msareg 20, SC_MSAREGS, a0, t0
296 save_sc_msareg 21, SC_MSAREGS, a0, t0
297 save_sc_msareg 22, SC_MSAREGS, a0, t0
298 save_sc_msareg 23, SC_MSAREGS, a0, t0
299 save_sc_msareg 24, SC_MSAREGS, a0, t0
300 save_sc_msareg 25, SC_MSAREGS, a0, t0
301 save_sc_msareg 26, SC_MSAREGS, a0, t0
302 save_sc_msareg 27, SC_MSAREGS, a0, t0
303 save_sc_msareg 28, SC_MSAREGS, a0, t0
304 save_sc_msareg 29, SC_MSAREGS, a0, t0
305 save_sc_msareg 30, SC_MSAREGS, a0, t0
306 save_sc_msareg 31, SC_MSAREGS, a0, t0
307 jr ra
308 li v0, 0
309 END(_save_msa_context)
310
311#ifdef CONFIG_MIPS32_COMPAT
312
313/*
314 * int _save_msa_context32(struct sigcontext32 *sc)
315 *
316 * Save the upper 64 bits of each vector register along with the MSA_CSR
317 * register into sc. Returns zero on success, else non-zero.
318 */
319LEAF(_save_msa_context32)
320 save_sc_msareg 0, SC32_MSAREGS, a0, t0
321 save_sc_msareg 1, SC32_MSAREGS, a0, t0
322 save_sc_msareg 2, SC32_MSAREGS, a0, t0
323 save_sc_msareg 3, SC32_MSAREGS, a0, t0
324 save_sc_msareg 4, SC32_MSAREGS, a0, t0
325 save_sc_msareg 5, SC32_MSAREGS, a0, t0
326 save_sc_msareg 6, SC32_MSAREGS, a0, t0
327 save_sc_msareg 7, SC32_MSAREGS, a0, t0
328 save_sc_msareg 8, SC32_MSAREGS, a0, t0
329 save_sc_msareg 9, SC32_MSAREGS, a0, t0
330 save_sc_msareg 10, SC32_MSAREGS, a0, t0
331 save_sc_msareg 11, SC32_MSAREGS, a0, t0
332 save_sc_msareg 12, SC32_MSAREGS, a0, t0
333 save_sc_msareg 13, SC32_MSAREGS, a0, t0
334 save_sc_msareg 14, SC32_MSAREGS, a0, t0
335 save_sc_msareg 15, SC32_MSAREGS, a0, t0
336 save_sc_msareg 16, SC32_MSAREGS, a0, t0
337 save_sc_msareg 17, SC32_MSAREGS, a0, t0
338 save_sc_msareg 18, SC32_MSAREGS, a0, t0
339 save_sc_msareg 19, SC32_MSAREGS, a0, t0
340 save_sc_msareg 20, SC32_MSAREGS, a0, t0
341 save_sc_msareg 21, SC32_MSAREGS, a0, t0
342 save_sc_msareg 22, SC32_MSAREGS, a0, t0
343 save_sc_msareg 23, SC32_MSAREGS, a0, t0
344 save_sc_msareg 24, SC32_MSAREGS, a0, t0
345 save_sc_msareg 25, SC32_MSAREGS, a0, t0
346 save_sc_msareg 26, SC32_MSAREGS, a0, t0
347 save_sc_msareg 27, SC32_MSAREGS, a0, t0
348 save_sc_msareg 28, SC32_MSAREGS, a0, t0
349 save_sc_msareg 29, SC32_MSAREGS, a0, t0
350 save_sc_msareg 30, SC32_MSAREGS, a0, t0
351 save_sc_msareg 31, SC32_MSAREGS, a0, t0
352 jr ra
353 li v0, 0
354 END(_save_msa_context32)
355
356#endif /* CONFIG_MIPS32_COMPAT */
357
358 .macro restore_sc_msareg wr, off, sc, tmp
359#ifdef CONFIG_64BIT
360 EX ld \tmp, (\off+(\wr*8))(\sc)
361 insert_d \wr, 1, \tmp
362#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
363 EX lw \tmp, (\off+(\wr*8)+0)(\sc)
364 insert_w \wr, 2, \tmp
365 EX lw \tmp, (\off+(\wr*8)+4)(\sc)
366 insert_w \wr, 3, \tmp
367#else /* CONFIG_CPU_BIG_ENDIAN */
368 EX lw \tmp, (\off+(\wr*8)+4)(\sc)
369 insert_w \wr, 2, \tmp
370 EX lw \tmp, (\off+(\wr*8)+0)(\sc)
371 insert_w \wr, 3, \tmp
372#endif
373 .endm
374
375/*
376 * int _restore_msa_context(struct sigcontext *sc)
377 */
378LEAF(_restore_msa_context)
379 restore_sc_msareg 0, SC_MSAREGS, a0, t0
380 restore_sc_msareg 1, SC_MSAREGS, a0, t0
381 restore_sc_msareg 2, SC_MSAREGS, a0, t0
382 restore_sc_msareg 3, SC_MSAREGS, a0, t0
383 restore_sc_msareg 4, SC_MSAREGS, a0, t0
384 restore_sc_msareg 5, SC_MSAREGS, a0, t0
385 restore_sc_msareg 6, SC_MSAREGS, a0, t0
386 restore_sc_msareg 7, SC_MSAREGS, a0, t0
387 restore_sc_msareg 8, SC_MSAREGS, a0, t0
388 restore_sc_msareg 9, SC_MSAREGS, a0, t0
389 restore_sc_msareg 10, SC_MSAREGS, a0, t0
390 restore_sc_msareg 11, SC_MSAREGS, a0, t0
391 restore_sc_msareg 12, SC_MSAREGS, a0, t0
392 restore_sc_msareg 13, SC_MSAREGS, a0, t0
393 restore_sc_msareg 14, SC_MSAREGS, a0, t0
394 restore_sc_msareg 15, SC_MSAREGS, a0, t0
395 restore_sc_msareg 16, SC_MSAREGS, a0, t0
396 restore_sc_msareg 17, SC_MSAREGS, a0, t0
397 restore_sc_msareg 18, SC_MSAREGS, a0, t0
398 restore_sc_msareg 19, SC_MSAREGS, a0, t0
399 restore_sc_msareg 20, SC_MSAREGS, a0, t0
400 restore_sc_msareg 21, SC_MSAREGS, a0, t0
401 restore_sc_msareg 22, SC_MSAREGS, a0, t0
402 restore_sc_msareg 23, SC_MSAREGS, a0, t0
403 restore_sc_msareg 24, SC_MSAREGS, a0, t0
404 restore_sc_msareg 25, SC_MSAREGS, a0, t0
405 restore_sc_msareg 26, SC_MSAREGS, a0, t0
406 restore_sc_msareg 27, SC_MSAREGS, a0, t0
407 restore_sc_msareg 28, SC_MSAREGS, a0, t0
408 restore_sc_msareg 29, SC_MSAREGS, a0, t0
409 restore_sc_msareg 30, SC_MSAREGS, a0, t0
410 restore_sc_msareg 31, SC_MSAREGS, a0, t0
411 jr ra
412 li v0, 0
413 END(_restore_msa_context)
414
415#ifdef CONFIG_MIPS32_COMPAT
416
417/*
418 * int _restore_msa_context32(struct sigcontext32 *sc)
419 */
420LEAF(_restore_msa_context32)
421 restore_sc_msareg 0, SC32_MSAREGS, a0, t0
422 restore_sc_msareg 1, SC32_MSAREGS, a0, t0
423 restore_sc_msareg 2, SC32_MSAREGS, a0, t0
424 restore_sc_msareg 3, SC32_MSAREGS, a0, t0
425 restore_sc_msareg 4, SC32_MSAREGS, a0, t0
426 restore_sc_msareg 5, SC32_MSAREGS, a0, t0
427 restore_sc_msareg 6, SC32_MSAREGS, a0, t0
428 restore_sc_msareg 7, SC32_MSAREGS, a0, t0
429 restore_sc_msareg 8, SC32_MSAREGS, a0, t0
430 restore_sc_msareg 9, SC32_MSAREGS, a0, t0
431 restore_sc_msareg 10, SC32_MSAREGS, a0, t0
432 restore_sc_msareg 11, SC32_MSAREGS, a0, t0
433 restore_sc_msareg 12, SC32_MSAREGS, a0, t0
434 restore_sc_msareg 13, SC32_MSAREGS, a0, t0
435 restore_sc_msareg 14, SC32_MSAREGS, a0, t0
436 restore_sc_msareg 15, SC32_MSAREGS, a0, t0
437 restore_sc_msareg 16, SC32_MSAREGS, a0, t0
438 restore_sc_msareg 17, SC32_MSAREGS, a0, t0
439 restore_sc_msareg 18, SC32_MSAREGS, a0, t0
440 restore_sc_msareg 19, SC32_MSAREGS, a0, t0
441 restore_sc_msareg 20, SC32_MSAREGS, a0, t0
442 restore_sc_msareg 21, SC32_MSAREGS, a0, t0
443 restore_sc_msareg 22, SC32_MSAREGS, a0, t0
444 restore_sc_msareg 23, SC32_MSAREGS, a0, t0
445 restore_sc_msareg 24, SC32_MSAREGS, a0, t0
446 restore_sc_msareg 25, SC32_MSAREGS, a0, t0
447 restore_sc_msareg 26, SC32_MSAREGS, a0, t0
448 restore_sc_msareg 27, SC32_MSAREGS, a0, t0
449 restore_sc_msareg 28, SC32_MSAREGS, a0, t0
450 restore_sc_msareg 29, SC32_MSAREGS, a0, t0
451 restore_sc_msareg 30, SC32_MSAREGS, a0, t0
452 restore_sc_msareg 31, SC32_MSAREGS, a0, t0
453 jr ra
454 li v0, 0
455 END(_restore_msa_context32)
456
457#endif /* CONFIG_MIPS32_COMPAT */
458
459#endif /* CONFIG_CPU_HAS_MSA */
460
461 .set reorder 248 .set reorder
462 249
463 .type fault@function 250 .type fault@function
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 33133d3df3e5..9e60d117e41e 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -31,7 +31,6 @@
31#include <linux/bitops.h> 31#include <linux/bitops.h>
32#include <asm/cacheflush.h> 32#include <asm/cacheflush.h>
33#include <asm/fpu.h> 33#include <asm/fpu.h>
34#include <asm/msa.h>
35#include <asm/sim.h> 34#include <asm/sim.h>
36#include <asm/ucontext.h> 35#include <asm/ucontext.h>
37#include <asm/cpu-features.h> 36#include <asm/cpu-features.h>
@@ -48,9 +47,6 @@ static int (*restore_fp_context)(struct sigcontext __user *sc);
48extern asmlinkage int _save_fp_context(struct sigcontext __user *sc); 47extern asmlinkage int _save_fp_context(struct sigcontext __user *sc);
49extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc); 48extern asmlinkage int _restore_fp_context(struct sigcontext __user *sc);
50 49
51extern asmlinkage int _save_msa_context(struct sigcontext __user *sc);
52extern asmlinkage int _restore_msa_context(struct sigcontext __user *sc);
53
54struct sigframe { 50struct sigframe {
55 u32 sf_ass[4]; /* argument save space for o32 */ 51 u32 sf_ass[4]; /* argument save space for o32 */
56 u32 sf_pad[2]; /* Was: signal trampoline */ 52 u32 sf_pad[2]; /* Was: signal trampoline */
@@ -100,60 +96,20 @@ static int copy_fp_from_sigcontext(struct sigcontext __user *sc)
100} 96}
101 97
102/* 98/*
103 * These functions will save only the upper 64 bits of the vector registers,
104 * since the lower 64 bits have already been saved as the scalar FP context.
105 */
106static int copy_msa_to_sigcontext(struct sigcontext __user *sc)
107{
108 int i;
109 int err = 0;
110
111 for (i = 0; i < NUM_FPU_REGS; i++) {
112 err |=
113 __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
114 &sc->sc_msaregs[i]);
115 }
116 err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
117
118 return err;
119}
120
121static int copy_msa_from_sigcontext(struct sigcontext __user *sc)
122{
123 int i;
124 int err = 0;
125 u64 val;
126
127 for (i = 0; i < NUM_FPU_REGS; i++) {
128 err |= __get_user(val, &sc->sc_msaregs[i]);
129 set_fpr64(&current->thread.fpu.fpr[i], 1, val);
130 }
131 err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
132
133 return err;
134}
135
136/*
137 * Helper routines 99 * Helper routines
138 */ 100 */
139static int protected_save_fp_context(struct sigcontext __user *sc, 101static int protected_save_fp_context(struct sigcontext __user *sc)
140 unsigned used_math)
141{ 102{
142 int err; 103 int err;
143 bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
144#ifndef CONFIG_EVA 104#ifndef CONFIG_EVA
145 while (1) { 105 while (1) {
146 lock_fpu_owner(); 106 lock_fpu_owner();
147 if (is_fpu_owner()) { 107 if (is_fpu_owner()) {
148 err = save_fp_context(sc); 108 err = save_fp_context(sc);
149 if (save_msa && !err)
150 err = _save_msa_context(sc);
151 unlock_fpu_owner(); 109 unlock_fpu_owner();
152 } else { 110 } else {
153 unlock_fpu_owner(); 111 unlock_fpu_owner();
154 err = copy_fp_to_sigcontext(sc); 112 err = copy_fp_to_sigcontext(sc);
155 if (save_msa && !err)
156 err = copy_msa_to_sigcontext(sc);
157 } 113 }
158 if (likely(!err)) 114 if (likely(!err))
159 break; 115 break;
@@ -169,38 +125,24 @@ static int protected_save_fp_context(struct sigcontext __user *sc,
169 * EVA does not have FPU EVA instructions so saving fpu context directly 125 * EVA does not have FPU EVA instructions so saving fpu context directly
170 * does not work. 126 * does not work.
171 */ 127 */
172 disable_msa();
173 lose_fpu(1); 128 lose_fpu(1);
174 err = save_fp_context(sc); /* this might fail */ 129 err = save_fp_context(sc); /* this might fail */
175 if (save_msa && !err)
176 err = copy_msa_to_sigcontext(sc);
177#endif 130#endif
178 return err; 131 return err;
179} 132}
180 133
181static int protected_restore_fp_context(struct sigcontext __user *sc, 134static int protected_restore_fp_context(struct sigcontext __user *sc)
182 unsigned used_math)
183{ 135{
184 int err, tmp __maybe_unused; 136 int err, tmp __maybe_unused;
185 bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
186#ifndef CONFIG_EVA 137#ifndef CONFIG_EVA
187 while (1) { 138 while (1) {
188 lock_fpu_owner(); 139 lock_fpu_owner();
189 if (is_fpu_owner()) { 140 if (is_fpu_owner()) {
190 err = restore_fp_context(sc); 141 err = restore_fp_context(sc);
191 if (restore_msa && !err) {
192 enable_msa();
193 err = _restore_msa_context(sc);
194 } else {
195 /* signal handler may have used MSA */
196 disable_msa();
197 }
198 unlock_fpu_owner(); 142 unlock_fpu_owner();
199 } else { 143 } else {
200 unlock_fpu_owner(); 144 unlock_fpu_owner();
201 err = copy_fp_from_sigcontext(sc); 145 err = copy_fp_from_sigcontext(sc);
202 if (!err && (used_math & USEDMATH_MSA))
203 err = copy_msa_from_sigcontext(sc);
204 } 146 }
205 if (likely(!err)) 147 if (likely(!err))
206 break; 148 break;
@@ -216,11 +158,8 @@ static int protected_restore_fp_context(struct sigcontext __user *sc,
216 * EVA does not have FPU EVA instructions so restoring fpu context 158 * EVA does not have FPU EVA instructions so restoring fpu context
217 * directly does not work. 159 * directly does not work.
218 */ 160 */
219 enable_msa();
220 lose_fpu(0); 161 lose_fpu(0);
221 err = restore_fp_context(sc); /* this might fail */ 162 err = restore_fp_context(sc); /* this might fail */
222 if (restore_msa && !err)
223 err = copy_msa_from_sigcontext(sc);
224#endif 163#endif
225 return err; 164 return err;
226} 165}
@@ -252,8 +191,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
252 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); 191 err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
253 } 192 }
254 193
255 used_math = used_math() ? USEDMATH_FP : 0; 194 used_math = !!used_math();
256 used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
257 err |= __put_user(used_math, &sc->sc_used_math); 195 err |= __put_user(used_math, &sc->sc_used_math);
258 196
259 if (used_math) { 197 if (used_math) {
@@ -261,7 +199,7 @@ int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
261 * Save FPU state to signal context. Signal handler 199 * Save FPU state to signal context. Signal handler
262 * will "inherit" current FPU state. 200 * will "inherit" current FPU state.
263 */ 201 */
264 err |= protected_save_fp_context(sc, used_math); 202 err |= protected_save_fp_context(sc);
265 } 203 }
266 return err; 204 return err;
267} 205}
@@ -286,14 +224,14 @@ int fpcsr_pending(unsigned int __user *fpcsr)
286} 224}
287 225
288static int 226static int
289check_and_restore_fp_context(struct sigcontext __user *sc, unsigned used_math) 227check_and_restore_fp_context(struct sigcontext __user *sc)
290{ 228{
291 int err, sig; 229 int err, sig;
292 230
293 err = sig = fpcsr_pending(&sc->sc_fpc_csr); 231 err = sig = fpcsr_pending(&sc->sc_fpc_csr);
294 if (err > 0) 232 if (err > 0)
295 err = 0; 233 err = 0;
296 err |= protected_restore_fp_context(sc, used_math); 234 err |= protected_restore_fp_context(sc);
297 return err ?: sig; 235 return err ?: sig;
298} 236}
299 237
@@ -333,10 +271,9 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
333 if (used_math) { 271 if (used_math) {
334 /* restore fpu context if we have used it before */ 272 /* restore fpu context if we have used it before */
335 if (!err) 273 if (!err)
336 err = check_and_restore_fp_context(sc, used_math); 274 err = check_and_restore_fp_context(sc);
337 } else { 275 } else {
338 /* signal handler may have used FPU or MSA. Disable them. */ 276 /* signal handler may have used FPU. Give it up. */
339 disable_msa();
340 lose_fpu(0); 277 lose_fpu(0);
341 } 278 }
342 279
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 299f956e4db3..bae2e6ee2109 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -30,7 +30,6 @@
30#include <asm/sim.h> 30#include <asm/sim.h>
31#include <asm/ucontext.h> 31#include <asm/ucontext.h>
32#include <asm/fpu.h> 32#include <asm/fpu.h>
33#include <asm/msa.h>
34#include <asm/war.h> 33#include <asm/war.h>
35#include <asm/vdso.h> 34#include <asm/vdso.h>
36#include <asm/dsp.h> 35#include <asm/dsp.h>
@@ -43,9 +42,6 @@ static int (*restore_fp_context32)(struct sigcontext32 __user *sc);
43extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc); 42extern asmlinkage int _save_fp_context32(struct sigcontext32 __user *sc);
44extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc); 43extern asmlinkage int _restore_fp_context32(struct sigcontext32 __user *sc);
45 44
46extern asmlinkage int _save_msa_context32(struct sigcontext32 __user *sc);
47extern asmlinkage int _restore_msa_context32(struct sigcontext32 __user *sc);
48
49/* 45/*
50 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... 46 * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
51 */ 47 */
@@ -115,59 +111,19 @@ static int copy_fp_from_sigcontext32(struct sigcontext32 __user *sc)
115} 111}
116 112
117/* 113/*
118 * These functions will save only the upper 64 bits of the vector registers,
119 * since the lower 64 bits have already been saved as the scalar FP context.
120 */
121static int copy_msa_to_sigcontext32(struct sigcontext32 __user *sc)
122{
123 int i;
124 int err = 0;
125
126 for (i = 0; i < NUM_FPU_REGS; i++) {
127 err |=
128 __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
129 &sc->sc_msaregs[i]);
130 }
131 err |= __put_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
132
133 return err;
134}
135
136static int copy_msa_from_sigcontext32(struct sigcontext32 __user *sc)
137{
138 int i;
139 int err = 0;
140 u64 val;
141
142 for (i = 0; i < NUM_FPU_REGS; i++) {
143 err |= __get_user(val, &sc->sc_msaregs[i]);
144 set_fpr64(&current->thread.fpu.fpr[i], 1, val);
145 }
146 err |= __get_user(current->thread.fpu.msacsr, &sc->sc_msa_csr);
147
148 return err;
149}
150
151/*
152 * sigcontext handlers 114 * sigcontext handlers
153 */ 115 */
154static int protected_save_fp_context32(struct sigcontext32 __user *sc, 116static int protected_save_fp_context32(struct sigcontext32 __user *sc)
155 unsigned used_math)
156{ 117{
157 int err; 118 int err;
158 bool save_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
159 while (1) { 119 while (1) {
160 lock_fpu_owner(); 120 lock_fpu_owner();
161 if (is_fpu_owner()) { 121 if (is_fpu_owner()) {
162 err = save_fp_context32(sc); 122 err = save_fp_context32(sc);
163 if (save_msa && !err)
164 err = _save_msa_context32(sc);
165 unlock_fpu_owner(); 123 unlock_fpu_owner();
166 } else { 124 } else {
167 unlock_fpu_owner(); 125 unlock_fpu_owner();
168 err = copy_fp_to_sigcontext32(sc); 126 err = copy_fp_to_sigcontext32(sc);
169 if (save_msa && !err)
170 err = copy_msa_to_sigcontext32(sc);
171 } 127 }
172 if (likely(!err)) 128 if (likely(!err))
173 break; 129 break;
@@ -181,28 +137,17 @@ static int protected_save_fp_context32(struct sigcontext32 __user *sc,
181 return err; 137 return err;
182} 138}
183 139
184static int protected_restore_fp_context32(struct sigcontext32 __user *sc, 140static int protected_restore_fp_context32(struct sigcontext32 __user *sc)
185 unsigned used_math)
186{ 141{
187 int err, tmp __maybe_unused; 142 int err, tmp __maybe_unused;
188 bool restore_msa = cpu_has_msa && (used_math & USEDMATH_MSA);
189 while (1) { 143 while (1) {
190 lock_fpu_owner(); 144 lock_fpu_owner();
191 if (is_fpu_owner()) { 145 if (is_fpu_owner()) {
192 err = restore_fp_context32(sc); 146 err = restore_fp_context32(sc);
193 if (restore_msa && !err) {
194 enable_msa();
195 err = _restore_msa_context32(sc);
196 } else {
197 /* signal handler may have used MSA */
198 disable_msa();
199 }
200 unlock_fpu_owner(); 147 unlock_fpu_owner();
201 } else { 148 } else {
202 unlock_fpu_owner(); 149 unlock_fpu_owner();
203 err = copy_fp_from_sigcontext32(sc); 150 err = copy_fp_from_sigcontext32(sc);
204 if (restore_msa && !err)
205 err = copy_msa_from_sigcontext32(sc);
206 } 151 }
207 if (likely(!err)) 152 if (likely(!err))
208 break; 153 break;
@@ -241,8 +186,7 @@ static int setup_sigcontext32(struct pt_regs *regs,
241 err |= __put_user(mflo3(), &sc->sc_lo3); 186 err |= __put_user(mflo3(), &sc->sc_lo3);
242 } 187 }
243 188
244 used_math = used_math() ? USEDMATH_FP : 0; 189 used_math = !!used_math();
245 used_math |= thread_msa_context_live() ? USEDMATH_MSA : 0;
246 err |= __put_user(used_math, &sc->sc_used_math); 190 err |= __put_user(used_math, &sc->sc_used_math);
247 191
248 if (used_math) { 192 if (used_math) {
@@ -250,21 +194,20 @@ static int setup_sigcontext32(struct pt_regs *regs,
250 * Save FPU state to signal context. Signal handler 194 * Save FPU state to signal context. Signal handler
251 * will "inherit" current FPU state. 195 * will "inherit" current FPU state.
252 */ 196 */
253 err |= protected_save_fp_context32(sc, used_math); 197 err |= protected_save_fp_context32(sc);
254 } 198 }
255 return err; 199 return err;
256} 200}
257 201
258static int 202static int
259check_and_restore_fp_context32(struct sigcontext32 __user *sc, 203check_and_restore_fp_context32(struct sigcontext32 __user *sc)
260 unsigned used_math)
261{ 204{
262 int err, sig; 205 int err, sig;
263 206
264 err = sig = fpcsr_pending(&sc->sc_fpc_csr); 207 err = sig = fpcsr_pending(&sc->sc_fpc_csr);
265 if (err > 0) 208 if (err > 0)
266 err = 0; 209 err = 0;
267 err |= protected_restore_fp_context32(sc, used_math); 210 err |= protected_restore_fp_context32(sc);
268 return err ?: sig; 211 return err ?: sig;
269} 212}
270 213
@@ -301,10 +244,9 @@ static int restore_sigcontext32(struct pt_regs *regs,
301 if (used_math) { 244 if (used_math) {
302 /* restore fpu context if we have used it before */ 245 /* restore fpu context if we have used it before */
303 if (!err) 246 if (!err)
304 err = check_and_restore_fp_context32(sc, used_math); 247 err = check_and_restore_fp_context32(sc);
305 } else { 248 } else {
306 /* signal handler may have used FPU or MSA. Disable them. */ 249 /* signal handler may have used FPU. Give it up. */
307 disable_msa();
308 lose_fpu(0); 250 lose_fpu(0);
309 } 251 }
310 252
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index df0598d9bfdd..949f2c6827a0 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -301,7 +301,7 @@ static int cps_cpu_disable(void)
301 301
302 core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core]; 302 core_cfg = &mips_cps_core_bootcfg[current_cpu_data.core];
303 atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask); 303 atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
304 smp_mb__after_atomic_dec(); 304 smp_mb__after_atomic();
305 set_cpu_online(cpu, false); 305 set_cpu_online(cpu, false);
306 cpu_clear(cpu, cpu_callin_map); 306 cpu_clear(cpu, cpu_callin_map);
307 307
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
index cd5e4f568439..f3c56a182fd8 100644
--- a/arch/mips/kvm/kvm_mips.c
+++ b/arch/mips/kvm/kvm_mips.c
@@ -384,6 +384,7 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
384 384
385 kfree(vcpu->arch.guest_ebase); 385 kfree(vcpu->arch.guest_ebase);
386 kfree(vcpu->arch.kseg0_commpage); 386 kfree(vcpu->arch.kseg0_commpage);
387 kfree(vcpu);
387} 388}
388 389
389void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) 390void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
diff --git a/arch/mips/math-emu/ieee754.c b/arch/mips/math-emu/ieee754.c
index 53f1d2287084..8e97acbbe22c 100644
--- a/arch/mips/math-emu/ieee754.c
+++ b/arch/mips/math-emu/ieee754.c
@@ -34,13 +34,22 @@
34 * Special constants 34 * Special constants
35 */ 35 */
36 36
37#define DPCNST(s, b, m) \ 37/*
38 * Older GCC requires the inner braces for initialization of union ieee754dp's
39 * anonymous struct member. Without an error will result.
40 */
41#define xPCNST(s, b, m, ebias) \
38{ \ 42{ \
39 .sign = (s), \ 43 { \
40 .bexp = (b) + DP_EBIAS, \ 44 .sign = (s), \
41 .mant = (m) \ 45 .bexp = (b) + ebias, \
46 .mant = (m) \
47 } \
42} 48}
43 49
50#define DPCNST(s, b, m) \
51 xPCNST(s, b, m, DP_EBIAS)
52
44const union ieee754dp __ieee754dp_spcvals[] = { 53const union ieee754dp __ieee754dp_spcvals[] = {
45 DPCNST(0, DP_EMIN - 1, 0x0000000000000ULL), /* + zero */ 54 DPCNST(0, DP_EMIN - 1, 0x0000000000000ULL), /* + zero */
46 DPCNST(1, DP_EMIN - 1, 0x0000000000000ULL), /* - zero */ 55 DPCNST(1, DP_EMIN - 1, 0x0000000000000ULL), /* - zero */
@@ -62,11 +71,7 @@ const union ieee754dp __ieee754dp_spcvals[] = {
62}; 71};
63 72
64#define SPCNST(s, b, m) \ 73#define SPCNST(s, b, m) \
65{ \ 74 xPCNST(s, b, m, SP_EBIAS)
66 .sign = (s), \
67 .bexp = (b) + SP_EBIAS, \
68 .mant = (m) \
69}
70 75
71const union ieee754sp __ieee754sp_spcvals[] = { 76const union ieee754sp __ieee754sp_spcvals[] = {
72 SPCNST(0, SP_EMIN - 1, 0x000000), /* + zero */ 77 SPCNST(0, SP_EMIN - 1, 0x000000), /* + zero */
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index 775c2800cba2..8399ddf03a02 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -102,6 +102,7 @@ static struct insn insn_table_MM[] = {
102 { insn_sd, 0, 0 }, 102 { insn_sd, 0, 0 },
103 { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD }, 103 { insn_sll, M(mm_pool32a_op, 0, 0, 0, 0, mm_sll32_op), RT | RS | RD },
104 { insn_sllv, M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD }, 104 { insn_sllv, M(mm_pool32a_op, 0, 0, 0, 0, mm_sllv32_op), RT | RS | RD },
105 { insn_slt, M(mm_pool32a_op, 0, 0, 0, 0, mm_slt_op), RT | RS | RD },
105 { insn_sltiu, M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM }, 106 { insn_sltiu, M(mm_sltiu32_op, 0, 0, 0, 0, 0), RT | RS | SIMM },
106 { insn_sltu, M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD }, 107 { insn_sltu, M(mm_pool32a_op, 0, 0, 0, 0, mm_sltu_op), RT | RS | RD },
107 { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD }, 108 { insn_sra, M(mm_pool32a_op, 0, 0, 0, 0, mm_sra_op), RT | RS | RD },
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index 38792c2364f5..6708a2dbf934 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -89,7 +89,7 @@ static struct insn insn_table[] = {
89 { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 89 { insn_lb, M(lb_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
90 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 90 { insn_ld, M(ld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
91 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD }, 91 { insn_ldx, M(spec3_op, 0, 0, 0, ldx_op, lx_op), RS | RT | RD },
92 { insn_lh, M(lw_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 92 { insn_lh, M(lh_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
93 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 93 { insn_lld, M(lld_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
94 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 94 { insn_ll, M(ll_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
95 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM }, 95 { insn_lui, M(lui_op, 0, 0, 0, 0, 0), RT | SIMM },
@@ -110,6 +110,7 @@ static struct insn insn_table[] = {
110 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 110 { insn_sd, M(sd_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
111 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE }, 111 { insn_sll, M(spec_op, 0, 0, 0, 0, sll_op), RT | RD | RE },
112 { insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD }, 112 { insn_sllv, M(spec_op, 0, 0, 0, 0, sllv_op), RS | RT | RD },
113 { insn_slt, M(spec_op, 0, 0, 0, 0, slt_op), RS | RT | RD },
113 { insn_sltiu, M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM }, 114 { insn_sltiu, M(sltiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
114 { insn_sltu, M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD }, 115 { insn_sltu, M(spec_op, 0, 0, 0, 0, sltu_op), RS | RT | RD },
115 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE }, 116 { insn_sra, M(spec_op, 0, 0, 0, 0, sra_op), RT | RD | RE },
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 00515805fe41..a01b0d6cedd2 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -53,7 +53,7 @@ enum opcode {
53 insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw, 53 insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw,
54 insn_lwx, insn_mfc0, insn_mfhi, insn_mflo, insn_mtc0, insn_mul, 54 insn_lwx, insn_mfc0, insn_mfhi, insn_mflo, insn_mtc0, insn_mul,
55 insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, 55 insn_or, insn_ori, insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd,
56 insn_sd, insn_sll, insn_sllv, insn_sltiu, insn_sltu, insn_sra, 56 insn_sd, insn_sll, insn_sllv, insn_slt, insn_sltiu, insn_sltu, insn_sra,
57 insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, 57 insn_srl, insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall,
58 insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, 58 insn_tlbp, insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh,
59 insn_xor, insn_xori, insn_yield, 59 insn_xor, insn_xori, insn_yield,
@@ -139,6 +139,13 @@ Ip_u1u2u3(op) \
139} \ 139} \
140UASM_EXPORT_SYMBOL(uasm_i##op); 140UASM_EXPORT_SYMBOL(uasm_i##op);
141 141
142#define I_s3s1s2(op) \
143Ip_s3s1s2(op) \
144{ \
145 build_insn(buf, insn##op, b, c, a); \
146} \
147UASM_EXPORT_SYMBOL(uasm_i##op);
148
142#define I_u2u1u3(op) \ 149#define I_u2u1u3(op) \
143Ip_u2u1u3(op) \ 150Ip_u2u1u3(op) \
144{ \ 151{ \
@@ -289,6 +296,7 @@ I_u2s3u1(_scd)
289I_u2s3u1(_sd) 296I_u2s3u1(_sd)
290I_u2u1u3(_sll) 297I_u2u1u3(_sll)
291I_u3u2u1(_sllv) 298I_u3u2u1(_sllv)
299I_s3s1s2(_slt)
292I_u2u1s3(_sltiu) 300I_u2u1s3(_sltiu)
293I_u3u1u2(_sltu) 301I_u3u1u2(_sltu)
294I_u2u1u3(_sra) 302I_u2u1u3(_sra)
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index a67b9753330b..b87390a56a2f 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -119,8 +119,6 @@
119/* Arguments used by JIT */ 119/* Arguments used by JIT */
120#define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */ 120#define ARGS_USED_BY_JIT 2 /* only applicable to 64-bit */
121 121
122#define FLAG_NEED_X_RESET (1 << 0)
123
124#define SBIT(x) (1 << (x)) /* Signed version of BIT() */ 122#define SBIT(x) (1 << (x)) /* Signed version of BIT() */
125 123
126/** 124/**
@@ -153,6 +151,8 @@ static inline int optimize_div(u32 *k)
153 return 0; 151 return 0;
154} 152}
155 153
154static inline void emit_jit_reg_move(ptr dst, ptr src, struct jit_ctx *ctx);
155
156/* Simply emit the instruction if the JIT memory space has been allocated */ 156/* Simply emit the instruction if the JIT memory space has been allocated */
157#define emit_instr(ctx, func, ...) \ 157#define emit_instr(ctx, func, ...) \
158do { \ 158do { \
@@ -166,9 +166,7 @@ do { \
166/* Determine if immediate is within the 16-bit signed range */ 166/* Determine if immediate is within the 16-bit signed range */
167static inline bool is_range16(s32 imm) 167static inline bool is_range16(s32 imm)
168{ 168{
169 if (imm >= SBIT(15) || imm < -SBIT(15)) 169 return !(imm >= SBIT(15) || imm < -SBIT(15));
170 return true;
171 return false;
172} 170}
173 171
174static inline void emit_addu(unsigned int dst, unsigned int src1, 172static inline void emit_addu(unsigned int dst, unsigned int src1,
@@ -187,7 +185,7 @@ static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
187{ 185{
188 if (ctx->target != NULL) { 186 if (ctx->target != NULL) {
189 /* addiu can only handle s16 */ 187 /* addiu can only handle s16 */
190 if (is_range16(imm)) { 188 if (!is_range16(imm)) {
191 u32 *p = &ctx->target[ctx->idx]; 189 u32 *p = &ctx->target[ctx->idx];
192 uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16); 190 uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16);
193 p = &ctx->target[ctx->idx + 1]; 191 p = &ctx->target[ctx->idx + 1];
@@ -199,7 +197,7 @@ static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx)
199 } 197 }
200 ctx->idx++; 198 ctx->idx++;
201 199
202 if (is_range16(imm)) 200 if (!is_range16(imm))
203 ctx->idx++; 201 ctx->idx++;
204} 202}
205 203
@@ -240,7 +238,7 @@ static inline void emit_daddiu(unsigned int dst, unsigned int src,
240static inline void emit_addiu(unsigned int dst, unsigned int src, 238static inline void emit_addiu(unsigned int dst, unsigned int src,
241 u32 imm, struct jit_ctx *ctx) 239 u32 imm, struct jit_ctx *ctx)
242{ 240{
243 if (is_range16(imm)) { 241 if (!is_range16(imm)) {
244 emit_load_imm(r_tmp, imm, ctx); 242 emit_load_imm(r_tmp, imm, ctx);
245 emit_addu(dst, r_tmp, src, ctx); 243 emit_addu(dst, r_tmp, src, ctx);
246 } else { 244 } else {
@@ -313,8 +311,11 @@ static inline void emit_sll(unsigned int dst, unsigned int src,
313 unsigned int sa, struct jit_ctx *ctx) 311 unsigned int sa, struct jit_ctx *ctx)
314{ 312{
315 /* sa is 5-bits long */ 313 /* sa is 5-bits long */
316 BUG_ON(sa >= BIT(5)); 314 if (sa >= BIT(5))
317 emit_instr(ctx, sll, dst, src, sa); 315 /* Shifting >= 32 results in zero */
316 emit_jit_reg_move(dst, r_zero, ctx);
317 else
318 emit_instr(ctx, sll, dst, src, sa);
318} 319}
319 320
320static inline void emit_srlv(unsigned int dst, unsigned int src, 321static inline void emit_srlv(unsigned int dst, unsigned int src,
@@ -327,8 +328,17 @@ static inline void emit_srl(unsigned int dst, unsigned int src,
327 unsigned int sa, struct jit_ctx *ctx) 328 unsigned int sa, struct jit_ctx *ctx)
328{ 329{
329 /* sa is 5-bits long */ 330 /* sa is 5-bits long */
330 BUG_ON(sa >= BIT(5)); 331 if (sa >= BIT(5))
331 emit_instr(ctx, srl, dst, src, sa); 332 /* Shifting >= 32 results in zero */
333 emit_jit_reg_move(dst, r_zero, ctx);
334 else
335 emit_instr(ctx, srl, dst, src, sa);
336}
337
338static inline void emit_slt(unsigned int dst, unsigned int src1,
339 unsigned int src2, struct jit_ctx *ctx)
340{
341 emit_instr(ctx, slt, dst, src1, src2);
332} 342}
333 343
334static inline void emit_sltu(unsigned int dst, unsigned int src1, 344static inline void emit_sltu(unsigned int dst, unsigned int src1,
@@ -341,7 +351,7 @@ static inline void emit_sltiu(unsigned dst, unsigned int src,
341 unsigned int imm, struct jit_ctx *ctx) 351 unsigned int imm, struct jit_ctx *ctx)
342{ 352{
343 /* 16 bit immediate */ 353 /* 16 bit immediate */
344 if (is_range16((s32)imm)) { 354 if (!is_range16((s32)imm)) {
345 emit_load_imm(r_tmp, imm, ctx); 355 emit_load_imm(r_tmp, imm, ctx);
346 emit_sltu(dst, src, r_tmp, ctx); 356 emit_sltu(dst, src, r_tmp, ctx);
347 } else { 357 } else {
@@ -408,7 +418,7 @@ static inline void emit_div(unsigned int dst, unsigned int src,
408 u32 *p = &ctx->target[ctx->idx]; 418 u32 *p = &ctx->target[ctx->idx];
409 uasm_i_divu(&p, dst, src); 419 uasm_i_divu(&p, dst, src);
410 p = &ctx->target[ctx->idx + 1]; 420 p = &ctx->target[ctx->idx + 1];
411 uasm_i_mfhi(&p, dst); 421 uasm_i_mflo(&p, dst);
412 } 422 }
413 ctx->idx += 2; /* 2 insts */ 423 ctx->idx += 2; /* 2 insts */
414} 424}
@@ -443,6 +453,17 @@ static inline void emit_wsbh(unsigned int dst, unsigned int src,
443 emit_instr(ctx, wsbh, dst, src); 453 emit_instr(ctx, wsbh, dst, src);
444} 454}
445 455
456/* load pointer to register */
457static inline void emit_load_ptr(unsigned int dst, unsigned int src,
458 int imm, struct jit_ctx *ctx)
459{
460 /* src contains the base addr of the 32/64-pointer */
461 if (config_enabled(CONFIG_64BIT))
462 emit_instr(ctx, ld, dst, imm, src);
463 else
464 emit_instr(ctx, lw, dst, imm, src);
465}
466
446/* load a function pointer to register */ 467/* load a function pointer to register */
447static inline void emit_load_func(unsigned int reg, ptr imm, 468static inline void emit_load_func(unsigned int reg, ptr imm,
448 struct jit_ctx *ctx) 469 struct jit_ctx *ctx)
@@ -545,29 +566,13 @@ static inline u16 align_sp(unsigned int num)
545 return num; 566 return num;
546} 567}
547 568
548static inline void update_on_xread(struct jit_ctx *ctx)
549{
550 if (!(ctx->flags & SEEN_X))
551 ctx->flags |= FLAG_NEED_X_RESET;
552
553 ctx->flags |= SEEN_X;
554}
555
556static bool is_load_to_a(u16 inst) 569static bool is_load_to_a(u16 inst)
557{ 570{
558 switch (inst) { 571 switch (inst) {
559 case BPF_S_LD_W_LEN: 572 case BPF_LD | BPF_W | BPF_LEN:
560 case BPF_S_LD_W_ABS: 573 case BPF_LD | BPF_W | BPF_ABS:
561 case BPF_S_LD_H_ABS: 574 case BPF_LD | BPF_H | BPF_ABS:
562 case BPF_S_LD_B_ABS: 575 case BPF_LD | BPF_B | BPF_ABS:
563 case BPF_S_ANC_CPU:
564 case BPF_S_ANC_IFINDEX:
565 case BPF_S_ANC_MARK:
566 case BPF_S_ANC_PROTOCOL:
567 case BPF_S_ANC_RXHASH:
568 case BPF_S_ANC_VLAN_TAG:
569 case BPF_S_ANC_VLAN_TAG_PRESENT:
570 case BPF_S_ANC_QUEUE:
571 return true; 576 return true;
572 default: 577 default:
573 return false; 578 return false;
@@ -618,7 +623,10 @@ static void save_bpf_jit_regs(struct jit_ctx *ctx, unsigned offset)
618 if (ctx->flags & SEEN_MEM) { 623 if (ctx->flags & SEEN_MEM) {
619 if (real_off % (RSIZE * 2)) 624 if (real_off % (RSIZE * 2))
620 real_off += RSIZE; 625 real_off += RSIZE;
621 emit_addiu(r_M, r_sp, real_off, ctx); 626 if (config_enabled(CONFIG_64BIT))
627 emit_daddiu(r_M, r_sp, real_off, ctx);
628 else
629 emit_addiu(r_M, r_sp, real_off, ctx);
622 } 630 }
623} 631}
624 632
@@ -705,11 +713,11 @@ static void build_prologue(struct jit_ctx *ctx)
705 if (ctx->flags & SEEN_SKB) 713 if (ctx->flags & SEEN_SKB)
706 emit_reg_move(r_skb, MIPS_R_A0, ctx); 714 emit_reg_move(r_skb, MIPS_R_A0, ctx);
707 715
708 if (ctx->flags & FLAG_NEED_X_RESET) 716 if (ctx->flags & SEEN_X)
709 emit_jit_reg_move(r_X, r_zero, ctx); 717 emit_jit_reg_move(r_X, r_zero, ctx);
710 718
711 /* Do not leak kernel data to userspace */ 719 /* Do not leak kernel data to userspace */
712 if ((first_inst != BPF_S_RET_K) && !(is_load_to_a(first_inst))) 720 if ((first_inst != (BPF_RET | BPF_K)) && !(is_load_to_a(first_inst)))
713 emit_jit_reg_move(r_A, r_zero, ctx); 721 emit_jit_reg_move(r_A, r_zero, ctx);
714} 722}
715 723
@@ -757,13 +765,17 @@ static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset)
757 return (u64)err << 32 | ntohl(ret); 765 return (u64)err << 32 | ntohl(ret);
758} 766}
759 767
760#define PKT_TYPE_MAX 7 768#ifdef __BIG_ENDIAN_BITFIELD
769#define PKT_TYPE_MAX (7 << 5)
770#else
771#define PKT_TYPE_MAX 7
772#endif
761static int pkt_type_offset(void) 773static int pkt_type_offset(void)
762{ 774{
763 struct sk_buff skb_probe = { 775 struct sk_buff skb_probe = {
764 .pkt_type = ~0, 776 .pkt_type = ~0,
765 }; 777 };
766 char *ct = (char *)&skb_probe; 778 u8 *ct = (u8 *)&skb_probe;
767 unsigned int off; 779 unsigned int off;
768 780
769 for (off = 0; off < sizeof(struct sk_buff); off++) { 781 for (off = 0; off < sizeof(struct sk_buff); off++) {
@@ -783,46 +795,62 @@ static int build_body(struct jit_ctx *ctx)
783 u32 k, b_off __maybe_unused; 795 u32 k, b_off __maybe_unused;
784 796
785 for (i = 0; i < prog->len; i++) { 797 for (i = 0; i < prog->len; i++) {
798 u16 code;
799
786 inst = &(prog->insns[i]); 800 inst = &(prog->insns[i]);
787 pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n", 801 pr_debug("%s: code->0x%02x, jt->0x%x, jf->0x%x, k->0x%x\n",
788 __func__, inst->code, inst->jt, inst->jf, inst->k); 802 __func__, inst->code, inst->jt, inst->jf, inst->k);
789 k = inst->k; 803 k = inst->k;
804 code = bpf_anc_helper(inst);
790 805
791 if (ctx->target == NULL) 806 if (ctx->target == NULL)
792 ctx->offsets[i] = ctx->idx * 4; 807 ctx->offsets[i] = ctx->idx * 4;
793 808
794 switch (inst->code) { 809 switch (code) {
795 case BPF_S_LD_IMM: 810 case BPF_LD | BPF_IMM:
796 /* A <- k ==> li r_A, k */ 811 /* A <- k ==> li r_A, k */
797 ctx->flags |= SEEN_A; 812 ctx->flags |= SEEN_A;
798 emit_load_imm(r_A, k, ctx); 813 emit_load_imm(r_A, k, ctx);
799 break; 814 break;
800 case BPF_S_LD_W_LEN: 815 case BPF_LD | BPF_W | BPF_LEN:
801 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4); 816 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, len) != 4);
802 /* A <- len ==> lw r_A, offset(skb) */ 817 /* A <- len ==> lw r_A, offset(skb) */
803 ctx->flags |= SEEN_SKB | SEEN_A; 818 ctx->flags |= SEEN_SKB | SEEN_A;
804 off = offsetof(struct sk_buff, len); 819 off = offsetof(struct sk_buff, len);
805 emit_load(r_A, r_skb, off, ctx); 820 emit_load(r_A, r_skb, off, ctx);
806 break; 821 break;
807 case BPF_S_LD_MEM: 822 case BPF_LD | BPF_MEM:
808 /* A <- M[k] ==> lw r_A, offset(M) */ 823 /* A <- M[k] ==> lw r_A, offset(M) */
809 ctx->flags |= SEEN_MEM | SEEN_A; 824 ctx->flags |= SEEN_MEM | SEEN_A;
810 emit_load(r_A, r_M, SCRATCH_OFF(k), ctx); 825 emit_load(r_A, r_M, SCRATCH_OFF(k), ctx);
811 break; 826 break;
812 case BPF_S_LD_W_ABS: 827 case BPF_LD | BPF_W | BPF_ABS:
813 /* A <- P[k:4] */ 828 /* A <- P[k:4] */
814 load_order = 2; 829 load_order = 2;
815 goto load; 830 goto load;
816 case BPF_S_LD_H_ABS: 831 case BPF_LD | BPF_H | BPF_ABS:
817 /* A <- P[k:2] */ 832 /* A <- P[k:2] */
818 load_order = 1; 833 load_order = 1;
819 goto load; 834 goto load;
820 case BPF_S_LD_B_ABS: 835 case BPF_LD | BPF_B | BPF_ABS:
821 /* A <- P[k:1] */ 836 /* A <- P[k:1] */
822 load_order = 0; 837 load_order = 0;
823load: 838load:
839 /* the interpreter will deal with the negative K */
840 if ((int)k < 0)
841 return -ENOTSUPP;
842
824 emit_load_imm(r_off, k, ctx); 843 emit_load_imm(r_off, k, ctx);
825load_common: 844load_common:
845 /*
846 * We may got here from the indirect loads so
847 * return if offset is negative.
848 */
849 emit_slt(r_s0, r_off, r_zero, ctx);
850 emit_bcond(MIPS_COND_NE, r_s0, r_zero,
851 b_imm(prog->len, ctx), ctx);
852 emit_reg_move(r_ret, r_zero, ctx);
853
826 ctx->flags |= SEEN_CALL | SEEN_OFF | SEEN_S0 | 854 ctx->flags |= SEEN_CALL | SEEN_OFF | SEEN_S0 |
827 SEEN_SKB | SEEN_A; 855 SEEN_SKB | SEEN_A;
828 856
@@ -852,39 +880,42 @@ load_common:
852 emit_b(b_imm(prog->len, ctx), ctx); 880 emit_b(b_imm(prog->len, ctx), ctx);
853 emit_reg_move(r_ret, r_zero, ctx); 881 emit_reg_move(r_ret, r_zero, ctx);
854 break; 882 break;
855 case BPF_S_LD_W_IND: 883 case BPF_LD | BPF_W | BPF_IND:
856 /* A <- P[X + k:4] */ 884 /* A <- P[X + k:4] */
857 load_order = 2; 885 load_order = 2;
858 goto load_ind; 886 goto load_ind;
859 case BPF_S_LD_H_IND: 887 case BPF_LD | BPF_H | BPF_IND:
860 /* A <- P[X + k:2] */ 888 /* A <- P[X + k:2] */
861 load_order = 1; 889 load_order = 1;
862 goto load_ind; 890 goto load_ind;
863 case BPF_S_LD_B_IND: 891 case BPF_LD | BPF_B | BPF_IND:
864 /* A <- P[X + k:1] */ 892 /* A <- P[X + k:1] */
865 load_order = 0; 893 load_order = 0;
866load_ind: 894load_ind:
867 update_on_xread(ctx);
868 ctx->flags |= SEEN_OFF | SEEN_X; 895 ctx->flags |= SEEN_OFF | SEEN_X;
869 emit_addiu(r_off, r_X, k, ctx); 896 emit_addiu(r_off, r_X, k, ctx);
870 goto load_common; 897 goto load_common;
871 case BPF_S_LDX_IMM: 898 case BPF_LDX | BPF_IMM:
872 /* X <- k */ 899 /* X <- k */
873 ctx->flags |= SEEN_X; 900 ctx->flags |= SEEN_X;
874 emit_load_imm(r_X, k, ctx); 901 emit_load_imm(r_X, k, ctx);
875 break; 902 break;
876 case BPF_S_LDX_MEM: 903 case BPF_LDX | BPF_MEM:
877 /* X <- M[k] */ 904 /* X <- M[k] */
878 ctx->flags |= SEEN_X | SEEN_MEM; 905 ctx->flags |= SEEN_X | SEEN_MEM;
879 emit_load(r_X, r_M, SCRATCH_OFF(k), ctx); 906 emit_load(r_X, r_M, SCRATCH_OFF(k), ctx);
880 break; 907 break;
881 case BPF_S_LDX_W_LEN: 908 case BPF_LDX | BPF_W | BPF_LEN:
882 /* X <- len */ 909 /* X <- len */
883 ctx->flags |= SEEN_X | SEEN_SKB; 910 ctx->flags |= SEEN_X | SEEN_SKB;
884 off = offsetof(struct sk_buff, len); 911 off = offsetof(struct sk_buff, len);
885 emit_load(r_X, r_skb, off, ctx); 912 emit_load(r_X, r_skb, off, ctx);
886 break; 913 break;
887 case BPF_S_LDX_B_MSH: 914 case BPF_LDX | BPF_B | BPF_MSH:
915 /* the interpreter will deal with the negative K */
916 if ((int)k < 0)
917 return -ENOTSUPP;
918
888 /* X <- 4 * (P[k:1] & 0xf) */ 919 /* X <- 4 * (P[k:1] & 0xf) */
889 ctx->flags |= SEEN_X | SEEN_CALL | SEEN_S0 | SEEN_SKB; 920 ctx->flags |= SEEN_X | SEEN_CALL | SEEN_S0 | SEEN_SKB;
890 /* Load offset to a1 */ 921 /* Load offset to a1 */
@@ -917,50 +948,49 @@ load_ind:
917 emit_b(b_imm(prog->len, ctx), ctx); 948 emit_b(b_imm(prog->len, ctx), ctx);
918 emit_load_imm(r_ret, 0, ctx); /* delay slot */ 949 emit_load_imm(r_ret, 0, ctx); /* delay slot */
919 break; 950 break;
920 case BPF_S_ST: 951 case BPF_ST:
921 /* M[k] <- A */ 952 /* M[k] <- A */
922 ctx->flags |= SEEN_MEM | SEEN_A; 953 ctx->flags |= SEEN_MEM | SEEN_A;
923 emit_store(r_A, r_M, SCRATCH_OFF(k), ctx); 954 emit_store(r_A, r_M, SCRATCH_OFF(k), ctx);
924 break; 955 break;
925 case BPF_S_STX: 956 case BPF_STX:
926 /* M[k] <- X */ 957 /* M[k] <- X */
927 ctx->flags |= SEEN_MEM | SEEN_X; 958 ctx->flags |= SEEN_MEM | SEEN_X;
928 emit_store(r_X, r_M, SCRATCH_OFF(k), ctx); 959 emit_store(r_X, r_M, SCRATCH_OFF(k), ctx);
929 break; 960 break;
930 case BPF_S_ALU_ADD_K: 961 case BPF_ALU | BPF_ADD | BPF_K:
931 /* A += K */ 962 /* A += K */
932 ctx->flags |= SEEN_A; 963 ctx->flags |= SEEN_A;
933 emit_addiu(r_A, r_A, k, ctx); 964 emit_addiu(r_A, r_A, k, ctx);
934 break; 965 break;
935 case BPF_S_ALU_ADD_X: 966 case BPF_ALU | BPF_ADD | BPF_X:
936 /* A += X */ 967 /* A += X */
937 ctx->flags |= SEEN_A | SEEN_X; 968 ctx->flags |= SEEN_A | SEEN_X;
938 emit_addu(r_A, r_A, r_X, ctx); 969 emit_addu(r_A, r_A, r_X, ctx);
939 break; 970 break;
940 case BPF_S_ALU_SUB_K: 971 case BPF_ALU | BPF_SUB | BPF_K:
941 /* A -= K */ 972 /* A -= K */
942 ctx->flags |= SEEN_A; 973 ctx->flags |= SEEN_A;
943 emit_addiu(r_A, r_A, -k, ctx); 974 emit_addiu(r_A, r_A, -k, ctx);
944 break; 975 break;
945 case BPF_S_ALU_SUB_X: 976 case BPF_ALU | BPF_SUB | BPF_X:
946 /* A -= X */ 977 /* A -= X */
947 ctx->flags |= SEEN_A | SEEN_X; 978 ctx->flags |= SEEN_A | SEEN_X;
948 emit_subu(r_A, r_A, r_X, ctx); 979 emit_subu(r_A, r_A, r_X, ctx);
949 break; 980 break;
950 case BPF_S_ALU_MUL_K: 981 case BPF_ALU | BPF_MUL | BPF_K:
951 /* A *= K */ 982 /* A *= K */
952 /* Load K to scratch register before MUL */ 983 /* Load K to scratch register before MUL */
953 ctx->flags |= SEEN_A | SEEN_S0; 984 ctx->flags |= SEEN_A | SEEN_S0;
954 emit_load_imm(r_s0, k, ctx); 985 emit_load_imm(r_s0, k, ctx);
955 emit_mul(r_A, r_A, r_s0, ctx); 986 emit_mul(r_A, r_A, r_s0, ctx);
956 break; 987 break;
957 case BPF_S_ALU_MUL_X: 988 case BPF_ALU | BPF_MUL | BPF_X:
958 /* A *= X */ 989 /* A *= X */
959 update_on_xread(ctx);
960 ctx->flags |= SEEN_A | SEEN_X; 990 ctx->flags |= SEEN_A | SEEN_X;
961 emit_mul(r_A, r_A, r_X, ctx); 991 emit_mul(r_A, r_A, r_X, ctx);
962 break; 992 break;
963 case BPF_S_ALU_DIV_K: 993 case BPF_ALU | BPF_DIV | BPF_K:
964 /* A /= k */ 994 /* A /= k */
965 if (k == 1) 995 if (k == 1)
966 break; 996 break;
@@ -973,7 +1003,7 @@ load_ind:
973 emit_load_imm(r_s0, k, ctx); 1003 emit_load_imm(r_s0, k, ctx);
974 emit_div(r_A, r_s0, ctx); 1004 emit_div(r_A, r_s0, ctx);
975 break; 1005 break;
976 case BPF_S_ALU_MOD_K: 1006 case BPF_ALU | BPF_MOD | BPF_K:
977 /* A %= k */ 1007 /* A %= k */
978 if (k == 1 || optimize_div(&k)) { 1008 if (k == 1 || optimize_div(&k)) {
979 ctx->flags |= SEEN_A; 1009 ctx->flags |= SEEN_A;
@@ -984,9 +1014,8 @@ load_ind:
984 emit_mod(r_A, r_s0, ctx); 1014 emit_mod(r_A, r_s0, ctx);
985 } 1015 }
986 break; 1016 break;
987 case BPF_S_ALU_DIV_X: 1017 case BPF_ALU | BPF_DIV | BPF_X:
988 /* A /= X */ 1018 /* A /= X */
989 update_on_xread(ctx);
990 ctx->flags |= SEEN_X | SEEN_A; 1019 ctx->flags |= SEEN_X | SEEN_A;
991 /* Check if r_X is zero */ 1020 /* Check if r_X is zero */
992 emit_bcond(MIPS_COND_EQ, r_X, r_zero, 1021 emit_bcond(MIPS_COND_EQ, r_X, r_zero,
@@ -994,9 +1023,8 @@ load_ind:
994 emit_load_imm(r_val, 0, ctx); /* delay slot */ 1023 emit_load_imm(r_val, 0, ctx); /* delay slot */
995 emit_div(r_A, r_X, ctx); 1024 emit_div(r_A, r_X, ctx);
996 break; 1025 break;
997 case BPF_S_ALU_MOD_X: 1026 case BPF_ALU | BPF_MOD | BPF_X:
998 /* A %= X */ 1027 /* A %= X */
999 update_on_xread(ctx);
1000 ctx->flags |= SEEN_X | SEEN_A; 1028 ctx->flags |= SEEN_X | SEEN_A;
1001 /* Check if r_X is zero */ 1029 /* Check if r_X is zero */
1002 emit_bcond(MIPS_COND_EQ, r_X, r_zero, 1030 emit_bcond(MIPS_COND_EQ, r_X, r_zero,
@@ -1004,94 +1032,89 @@ load_ind:
1004 emit_load_imm(r_val, 0, ctx); /* delay slot */ 1032 emit_load_imm(r_val, 0, ctx); /* delay slot */
1005 emit_mod(r_A, r_X, ctx); 1033 emit_mod(r_A, r_X, ctx);
1006 break; 1034 break;
1007 case BPF_S_ALU_OR_K: 1035 case BPF_ALU | BPF_OR | BPF_K:
1008 /* A |= K */ 1036 /* A |= K */
1009 ctx->flags |= SEEN_A; 1037 ctx->flags |= SEEN_A;
1010 emit_ori(r_A, r_A, k, ctx); 1038 emit_ori(r_A, r_A, k, ctx);
1011 break; 1039 break;
1012 case BPF_S_ALU_OR_X: 1040 case BPF_ALU | BPF_OR | BPF_X:
1013 /* A |= X */ 1041 /* A |= X */
1014 update_on_xread(ctx);
1015 ctx->flags |= SEEN_A; 1042 ctx->flags |= SEEN_A;
1016 emit_ori(r_A, r_A, r_X, ctx); 1043 emit_ori(r_A, r_A, r_X, ctx);
1017 break; 1044 break;
1018 case BPF_S_ALU_XOR_K: 1045 case BPF_ALU | BPF_XOR | BPF_K:
1019 /* A ^= k */ 1046 /* A ^= k */
1020 ctx->flags |= SEEN_A; 1047 ctx->flags |= SEEN_A;
1021 emit_xori(r_A, r_A, k, ctx); 1048 emit_xori(r_A, r_A, k, ctx);
1022 break; 1049 break;
1023 case BPF_S_ANC_ALU_XOR_X: 1050 case BPF_ANC | SKF_AD_ALU_XOR_X:
1024 case BPF_S_ALU_XOR_X: 1051 case BPF_ALU | BPF_XOR | BPF_X:
1025 /* A ^= X */ 1052 /* A ^= X */
1026 update_on_xread(ctx);
1027 ctx->flags |= SEEN_A; 1053 ctx->flags |= SEEN_A;
1028 emit_xor(r_A, r_A, r_X, ctx); 1054 emit_xor(r_A, r_A, r_X, ctx);
1029 break; 1055 break;
1030 case BPF_S_ALU_AND_K: 1056 case BPF_ALU | BPF_AND | BPF_K:
1031 /* A &= K */ 1057 /* A &= K */
1032 ctx->flags |= SEEN_A; 1058 ctx->flags |= SEEN_A;
1033 emit_andi(r_A, r_A, k, ctx); 1059 emit_andi(r_A, r_A, k, ctx);
1034 break; 1060 break;
1035 case BPF_S_ALU_AND_X: 1061 case BPF_ALU | BPF_AND | BPF_X:
1036 /* A &= X */ 1062 /* A &= X */
1037 update_on_xread(ctx);
1038 ctx->flags |= SEEN_A | SEEN_X; 1063 ctx->flags |= SEEN_A | SEEN_X;
1039 emit_and(r_A, r_A, r_X, ctx); 1064 emit_and(r_A, r_A, r_X, ctx);
1040 break; 1065 break;
1041 case BPF_S_ALU_LSH_K: 1066 case BPF_ALU | BPF_LSH | BPF_K:
1042 /* A <<= K */ 1067 /* A <<= K */
1043 ctx->flags |= SEEN_A; 1068 ctx->flags |= SEEN_A;
1044 emit_sll(r_A, r_A, k, ctx); 1069 emit_sll(r_A, r_A, k, ctx);
1045 break; 1070 break;
1046 case BPF_S_ALU_LSH_X: 1071 case BPF_ALU | BPF_LSH | BPF_X:
1047 /* A <<= X */ 1072 /* A <<= X */
1048 ctx->flags |= SEEN_A | SEEN_X; 1073 ctx->flags |= SEEN_A | SEEN_X;
1049 update_on_xread(ctx);
1050 emit_sllv(r_A, r_A, r_X, ctx); 1074 emit_sllv(r_A, r_A, r_X, ctx);
1051 break; 1075 break;
1052 case BPF_S_ALU_RSH_K: 1076 case BPF_ALU | BPF_RSH | BPF_K:
1053 /* A >>= K */ 1077 /* A >>= K */
1054 ctx->flags |= SEEN_A; 1078 ctx->flags |= SEEN_A;
1055 emit_srl(r_A, r_A, k, ctx); 1079 emit_srl(r_A, r_A, k, ctx);
1056 break; 1080 break;
1057 case BPF_S_ALU_RSH_X: 1081 case BPF_ALU | BPF_RSH | BPF_X:
1058 ctx->flags |= SEEN_A | SEEN_X; 1082 ctx->flags |= SEEN_A | SEEN_X;
1059 update_on_xread(ctx);
1060 emit_srlv(r_A, r_A, r_X, ctx); 1083 emit_srlv(r_A, r_A, r_X, ctx);
1061 break; 1084 break;
1062 case BPF_S_ALU_NEG: 1085 case BPF_ALU | BPF_NEG:
1063 /* A = -A */ 1086 /* A = -A */
1064 ctx->flags |= SEEN_A; 1087 ctx->flags |= SEEN_A;
1065 emit_neg(r_A, ctx); 1088 emit_neg(r_A, ctx);
1066 break; 1089 break;
1067 case BPF_S_JMP_JA: 1090 case BPF_JMP | BPF_JA:
1068 /* pc += K */ 1091 /* pc += K */
1069 emit_b(b_imm(i + k + 1, ctx), ctx); 1092 emit_b(b_imm(i + k + 1, ctx), ctx);
1070 emit_nop(ctx); 1093 emit_nop(ctx);
1071 break; 1094 break;
1072 case BPF_S_JMP_JEQ_K: 1095 case BPF_JMP | BPF_JEQ | BPF_K:
1073 /* pc += ( A == K ) ? pc->jt : pc->jf */ 1096 /* pc += ( A == K ) ? pc->jt : pc->jf */
1074 condt = MIPS_COND_EQ | MIPS_COND_K; 1097 condt = MIPS_COND_EQ | MIPS_COND_K;
1075 goto jmp_cmp; 1098 goto jmp_cmp;
1076 case BPF_S_JMP_JEQ_X: 1099 case BPF_JMP | BPF_JEQ | BPF_X:
1077 ctx->flags |= SEEN_X; 1100 ctx->flags |= SEEN_X;
1078 /* pc += ( A == X ) ? pc->jt : pc->jf */ 1101 /* pc += ( A == X ) ? pc->jt : pc->jf */
1079 condt = MIPS_COND_EQ | MIPS_COND_X; 1102 condt = MIPS_COND_EQ | MIPS_COND_X;
1080 goto jmp_cmp; 1103 goto jmp_cmp;
1081 case BPF_S_JMP_JGE_K: 1104 case BPF_JMP | BPF_JGE | BPF_K:
1082 /* pc += ( A >= K ) ? pc->jt : pc->jf */ 1105 /* pc += ( A >= K ) ? pc->jt : pc->jf */
1083 condt = MIPS_COND_GE | MIPS_COND_K; 1106 condt = MIPS_COND_GE | MIPS_COND_K;
1084 goto jmp_cmp; 1107 goto jmp_cmp;
1085 case BPF_S_JMP_JGE_X: 1108 case BPF_JMP | BPF_JGE | BPF_X:
1086 ctx->flags |= SEEN_X; 1109 ctx->flags |= SEEN_X;
1087 /* pc += ( A >= X ) ? pc->jt : pc->jf */ 1110 /* pc += ( A >= X ) ? pc->jt : pc->jf */
1088 condt = MIPS_COND_GE | MIPS_COND_X; 1111 condt = MIPS_COND_GE | MIPS_COND_X;
1089 goto jmp_cmp; 1112 goto jmp_cmp;
1090 case BPF_S_JMP_JGT_K: 1113 case BPF_JMP | BPF_JGT | BPF_K:
1091 /* pc += ( A > K ) ? pc->jt : pc->jf */ 1114 /* pc += ( A > K ) ? pc->jt : pc->jf */
1092 condt = MIPS_COND_GT | MIPS_COND_K; 1115 condt = MIPS_COND_GT | MIPS_COND_K;
1093 goto jmp_cmp; 1116 goto jmp_cmp;
1094 case BPF_S_JMP_JGT_X: 1117 case BPF_JMP | BPF_JGT | BPF_X:
1095 ctx->flags |= SEEN_X; 1118 ctx->flags |= SEEN_X;
1096 /* pc += ( A > X ) ? pc->jt : pc->jf */ 1119 /* pc += ( A > X ) ? pc->jt : pc->jf */
1097 condt = MIPS_COND_GT | MIPS_COND_X; 1120 condt = MIPS_COND_GT | MIPS_COND_X;
@@ -1109,7 +1132,7 @@ jmp_cmp:
1109 } 1132 }
1110 /* A < (K|X) ? r_scrach = 1 */ 1133 /* A < (K|X) ? r_scrach = 1 */
1111 b_off = b_imm(i + inst->jf + 1, ctx); 1134 b_off = b_imm(i + inst->jf + 1, ctx);
1112 emit_bcond(MIPS_COND_GT, r_s0, r_zero, b_off, 1135 emit_bcond(MIPS_COND_NE, r_s0, r_zero, b_off,
1113 ctx); 1136 ctx);
1114 emit_nop(ctx); 1137 emit_nop(ctx);
1115 /* A > (K|X) ? scratch = 0 */ 1138 /* A > (K|X) ? scratch = 0 */
@@ -1167,7 +1190,7 @@ jmp_cmp:
1167 } 1190 }
1168 } 1191 }
1169 break; 1192 break;
1170 case BPF_S_JMP_JSET_K: 1193 case BPF_JMP | BPF_JSET | BPF_K:
1171 ctx->flags |= SEEN_S0 | SEEN_S1 | SEEN_A; 1194 ctx->flags |= SEEN_S0 | SEEN_S1 | SEEN_A;
1172 /* pc += (A & K) ? pc -> jt : pc -> jf */ 1195 /* pc += (A & K) ? pc -> jt : pc -> jf */
1173 emit_load_imm(r_s1, k, ctx); 1196 emit_load_imm(r_s1, k, ctx);
@@ -1181,7 +1204,7 @@ jmp_cmp:
1181 emit_b(b_off, ctx); 1204 emit_b(b_off, ctx);
1182 emit_nop(ctx); 1205 emit_nop(ctx);
1183 break; 1206 break;
1184 case BPF_S_JMP_JSET_X: 1207 case BPF_JMP | BPF_JSET | BPF_X:
1185 ctx->flags |= SEEN_S0 | SEEN_X | SEEN_A; 1208 ctx->flags |= SEEN_S0 | SEEN_X | SEEN_A;
1186 /* pc += (A & X) ? pc -> jt : pc -> jf */ 1209 /* pc += (A & X) ? pc -> jt : pc -> jf */
1187 emit_and(r_s0, r_A, r_X, ctx); 1210 emit_and(r_s0, r_A, r_X, ctx);
@@ -1194,7 +1217,7 @@ jmp_cmp:
1194 emit_b(b_off, ctx); 1217 emit_b(b_off, ctx);
1195 emit_nop(ctx); 1218 emit_nop(ctx);
1196 break; 1219 break;
1197 case BPF_S_RET_A: 1220 case BPF_RET | BPF_A:
1198 ctx->flags |= SEEN_A; 1221 ctx->flags |= SEEN_A;
1199 if (i != prog->len - 1) 1222 if (i != prog->len - 1)
1200 /* 1223 /*
@@ -1204,7 +1227,7 @@ jmp_cmp:
1204 emit_b(b_imm(prog->len, ctx), ctx); 1227 emit_b(b_imm(prog->len, ctx), ctx);
1205 emit_reg_move(r_ret, r_A, ctx); /* delay slot */ 1228 emit_reg_move(r_ret, r_A, ctx); /* delay slot */
1206 break; 1229 break;
1207 case BPF_S_RET_K: 1230 case BPF_RET | BPF_K:
1208 /* 1231 /*
1209 * It can emit two instructions so it does not fit on 1232 * It can emit two instructions so it does not fit on
1210 * the delay slot. 1233 * the delay slot.
@@ -1219,19 +1242,18 @@ jmp_cmp:
1219 emit_nop(ctx); 1242 emit_nop(ctx);
1220 } 1243 }
1221 break; 1244 break;
1222 case BPF_S_MISC_TAX: 1245 case BPF_MISC | BPF_TAX:
1223 /* X = A */ 1246 /* X = A */
1224 ctx->flags |= SEEN_X | SEEN_A; 1247 ctx->flags |= SEEN_X | SEEN_A;
1225 emit_jit_reg_move(r_X, r_A, ctx); 1248 emit_jit_reg_move(r_X, r_A, ctx);
1226 break; 1249 break;
1227 case BPF_S_MISC_TXA: 1250 case BPF_MISC | BPF_TXA:
1228 /* A = X */ 1251 /* A = X */
1229 ctx->flags |= SEEN_A | SEEN_X; 1252 ctx->flags |= SEEN_A | SEEN_X;
1230 update_on_xread(ctx);
1231 emit_jit_reg_move(r_A, r_X, ctx); 1253 emit_jit_reg_move(r_A, r_X, ctx);
1232 break; 1254 break;
1233 /* AUX */ 1255 /* AUX */
1234 case BPF_S_ANC_PROTOCOL: 1256 case BPF_ANC | SKF_AD_PROTOCOL:
1235 /* A = ntohs(skb->protocol */ 1257 /* A = ntohs(skb->protocol */
1236 ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A; 1258 ctx->flags |= SEEN_SKB | SEEN_OFF | SEEN_A;
1237 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 1259 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
@@ -1256,7 +1278,7 @@ jmp_cmp:
1256 } 1278 }
1257#endif 1279#endif
1258 break; 1280 break;
1259 case BPF_S_ANC_CPU: 1281 case BPF_ANC | SKF_AD_CPU:
1260 ctx->flags |= SEEN_A | SEEN_OFF; 1282 ctx->flags |= SEEN_A | SEEN_OFF;
1261 /* A = current_thread_info()->cpu */ 1283 /* A = current_thread_info()->cpu */
1262 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info, 1284 BUILD_BUG_ON(FIELD_SIZEOF(struct thread_info,
@@ -1265,11 +1287,12 @@ jmp_cmp:
1265 /* $28/gp points to the thread_info struct */ 1287 /* $28/gp points to the thread_info struct */
1266 emit_load(r_A, 28, off, ctx); 1288 emit_load(r_A, 28, off, ctx);
1267 break; 1289 break;
1268 case BPF_S_ANC_IFINDEX: 1290 case BPF_ANC | SKF_AD_IFINDEX:
1269 /* A = skb->dev->ifindex */ 1291 /* A = skb->dev->ifindex */
1270 ctx->flags |= SEEN_SKB | SEEN_A | SEEN_S0; 1292 ctx->flags |= SEEN_SKB | SEEN_A | SEEN_S0;
1271 off = offsetof(struct sk_buff, dev); 1293 off = offsetof(struct sk_buff, dev);
1272 emit_load(r_s0, r_skb, off, ctx); 1294 /* Load *dev pointer */
1295 emit_load_ptr(r_s0, r_skb, off, ctx);
1273 /* error (0) in the delay slot */ 1296 /* error (0) in the delay slot */
1274 emit_bcond(MIPS_COND_EQ, r_s0, r_zero, 1297 emit_bcond(MIPS_COND_EQ, r_s0, r_zero,
1275 b_imm(prog->len, ctx), ctx); 1298 b_imm(prog->len, ctx), ctx);
@@ -1279,31 +1302,36 @@ jmp_cmp:
1279 off = offsetof(struct net_device, ifindex); 1302 off = offsetof(struct net_device, ifindex);
1280 emit_load(r_A, r_s0, off, ctx); 1303 emit_load(r_A, r_s0, off, ctx);
1281 break; 1304 break;
1282 case BPF_S_ANC_MARK: 1305 case BPF_ANC | SKF_AD_MARK:
1283 ctx->flags |= SEEN_SKB | SEEN_A; 1306 ctx->flags |= SEEN_SKB | SEEN_A;
1284 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4); 1307 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, mark) != 4);
1285 off = offsetof(struct sk_buff, mark); 1308 off = offsetof(struct sk_buff, mark);
1286 emit_load(r_A, r_skb, off, ctx); 1309 emit_load(r_A, r_skb, off, ctx);
1287 break; 1310 break;
1288 case BPF_S_ANC_RXHASH: 1311 case BPF_ANC | SKF_AD_RXHASH:
1289 ctx->flags |= SEEN_SKB | SEEN_A; 1312 ctx->flags |= SEEN_SKB | SEEN_A;
1290 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4); 1313 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, hash) != 4);
1291 off = offsetof(struct sk_buff, hash); 1314 off = offsetof(struct sk_buff, hash);
1292 emit_load(r_A, r_skb, off, ctx); 1315 emit_load(r_A, r_skb, off, ctx);
1293 break; 1316 break;
1294 case BPF_S_ANC_VLAN_TAG: 1317 case BPF_ANC | SKF_AD_VLAN_TAG:
1295 case BPF_S_ANC_VLAN_TAG_PRESENT: 1318 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
1296 ctx->flags |= SEEN_SKB | SEEN_S0 | SEEN_A; 1319 ctx->flags |= SEEN_SKB | SEEN_S0 | SEEN_A;
1297 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 1320 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1298 vlan_tci) != 2); 1321 vlan_tci) != 2);
1299 off = offsetof(struct sk_buff, vlan_tci); 1322 off = offsetof(struct sk_buff, vlan_tci);
1300 emit_half_load(r_s0, r_skb, off, ctx); 1323 emit_half_load(r_s0, r_skb, off, ctx);
1301 if (inst->code == BPF_S_ANC_VLAN_TAG) 1324 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
1302 emit_and(r_A, r_s0, VLAN_VID_MASK, ctx); 1325 emit_andi(r_A, r_s0, (u16)~VLAN_TAG_PRESENT, ctx);
1303 else 1326 } else {
1304 emit_and(r_A, r_s0, VLAN_TAG_PRESENT, ctx); 1327 emit_andi(r_A, r_s0, VLAN_TAG_PRESENT, ctx);
1328 /* return 1 if present */
1329 emit_sltu(r_A, r_zero, r_A, ctx);
1330 }
1305 break; 1331 break;
1306 case BPF_S_ANC_PKTTYPE: 1332 case BPF_ANC | SKF_AD_PKTTYPE:
1333 ctx->flags |= SEEN_SKB;
1334
1307 off = pkt_type_offset(); 1335 off = pkt_type_offset();
1308 1336
1309 if (off < 0) 1337 if (off < 0)
@@ -1311,8 +1339,12 @@ jmp_cmp:
1311 emit_load_byte(r_tmp, r_skb, off, ctx); 1339 emit_load_byte(r_tmp, r_skb, off, ctx);
1312 /* Keep only the last 3 bits */ 1340 /* Keep only the last 3 bits */
1313 emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx); 1341 emit_andi(r_A, r_tmp, PKT_TYPE_MAX, ctx);
1342#ifdef __BIG_ENDIAN_BITFIELD
1343 /* Get the actual packet type to the lower 3 bits */
1344 emit_srl(r_A, r_A, 5, ctx);
1345#endif
1314 break; 1346 break;
1315 case BPF_S_ANC_QUEUE: 1347 case BPF_ANC | SKF_AD_QUEUE:
1316 ctx->flags |= SEEN_SKB | SEEN_A; 1348 ctx->flags |= SEEN_SKB | SEEN_A;
1317 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 1349 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
1318 queue_mapping) != 2); 1350 queue_mapping) != 2);
@@ -1322,8 +1354,8 @@ jmp_cmp:
1322 emit_half_load(r_A, r_skb, off, ctx); 1354 emit_half_load(r_A, r_skb, off, ctx);
1323 break; 1355 break;
1324 default: 1356 default:
1325 pr_warn("%s: Unhandled opcode: 0x%02x\n", __FILE__, 1357 pr_debug("%s: Unhandled opcode: 0x%02x\n", __FILE__,
1326 inst->code); 1358 inst->code);
1327 return -1; 1359 return -1;
1328 } 1360 }
1329 } 1361 }
diff --git a/arch/parisc/kernel/hardware.c b/arch/parisc/kernel/hardware.c
index 608716f8496b..af3bc359dc70 100644
--- a/arch/parisc/kernel/hardware.c
+++ b/arch/parisc/kernel/hardware.c
@@ -1210,7 +1210,8 @@ static struct hp_hardware hp_hardware_list[] = {
1210 {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"}, 1210 {HPHW_FIO, 0x004, 0x00320, 0x0, "Metheus Frame Buffer"},
1211 {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"}, 1211 {HPHW_FIO, 0x004, 0x00340, 0x0, "BARCO CX4500 VME Grphx Cnsl"},
1212 {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"}, 1212 {HPHW_FIO, 0x004, 0x00360, 0x0, "Hughes TOG VME FDDI"},
1213 {HPHW_FIO, 0x076, 0x000AD, 0x00, "Crestone Peak RS-232"}, 1213 {HPHW_FIO, 0x076, 0x000AD, 0x0, "Crestone Peak Core RS-232"},
1214 {HPHW_FIO, 0x077, 0x000AD, 0x0, "Crestone Peak Fast? Core RS-232"},
1214 {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"}, 1215 {HPHW_IOA, 0x185, 0x0000B, 0x00, "Java BC Summit Port"},
1215 {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"}, 1216 {HPHW_IOA, 0x1FF, 0x0000B, 0x00, "Hitachi Ghostview Summit Port"},
1216 {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"}, 1217 {HPHW_IOA, 0x580, 0x0000B, 0x10, "U2-IOA BC Runway Port"},
diff --git a/arch/parisc/kernel/sys_parisc32.c b/arch/parisc/kernel/sys_parisc32.c
index bb9f3b64de55..93c1963d76fe 100644
--- a/arch/parisc/kernel/sys_parisc32.c
+++ b/arch/parisc/kernel/sys_parisc32.c
@@ -4,6 +4,7 @@
4 * Copyright (C) 2000-2001 Hewlett Packard Company 4 * Copyright (C) 2000-2001 Hewlett Packard Company
5 * Copyright (C) 2000 John Marvin 5 * Copyright (C) 2000 John Marvin
6 * Copyright (C) 2001 Matthew Wilcox 6 * Copyright (C) 2001 Matthew Wilcox
7 * Copyright (C) 2014 Helge Deller <deller@gmx.de>
7 * 8 *
8 * These routines maintain argument size conversion between 32bit and 64bit 9 * These routines maintain argument size conversion between 32bit and 64bit
9 * environment. Based heavily on sys_ia32.c and sys_sparc32.c. 10 * environment. Based heavily on sys_ia32.c and sys_sparc32.c.
@@ -11,44 +12,8 @@
11 12
12#include <linux/compat.h> 13#include <linux/compat.h>
13#include <linux/kernel.h> 14#include <linux/kernel.h>
14#include <linux/sched.h>
15#include <linux/fs.h>
16#include <linux/mm.h>
17#include <linux/file.h>
18#include <linux/signal.h>
19#include <linux/resource.h>
20#include <linux/times.h>
21#include <linux/time.h>
22#include <linux/smp.h>
23#include <linux/sem.h>
24#include <linux/shm.h>
25#include <linux/slab.h>
26#include <linux/uio.h>
27#include <linux/ncp_fs.h>
28#include <linux/poll.h>
29#include <linux/personality.h>
30#include <linux/stat.h>
31#include <linux/highmem.h>
32#include <linux/highuid.h>
33#include <linux/mman.h>
34#include <linux/binfmts.h>
35#include <linux/namei.h>
36#include <linux/vfs.h>
37#include <linux/ptrace.h>
38#include <linux/swap.h>
39#include <linux/syscalls.h> 15#include <linux/syscalls.h>
40 16
41#include <asm/types.h>
42#include <asm/uaccess.h>
43#include <asm/mmu_context.h>
44
45#undef DEBUG
46
47#ifdef DEBUG
48#define DBG(x) printk x
49#else
50#define DBG(x)
51#endif
52 17
53asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23, 18asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
54 int r22, int r21, int r20) 19 int r22, int r21, int r20)
@@ -57,3 +22,12 @@ asmlinkage long sys32_unimplemented(int r26, int r25, int r24, int r23,
57 current->comm, current->pid, r20); 22 current->comm, current->pid, r20);
58 return -ENOSYS; 23 return -ENOSYS;
59} 24}
25
26asmlinkage long sys32_fanotify_mark(compat_int_t fanotify_fd, compat_uint_t flags,
27 compat_uint_t mask0, compat_uint_t mask1, compat_int_t dfd,
28 const char __user * pathname)
29{
30 return sys_fanotify_mark(fanotify_fd, flags,
31 ((__u64)mask1 << 32) | mask0,
32 dfd, pathname);
33}
diff --git a/arch/parisc/kernel/syscall_table.S b/arch/parisc/kernel/syscall_table.S
index c5fa7a697fba..84c5d3a58fa1 100644
--- a/arch/parisc/kernel/syscall_table.S
+++ b/arch/parisc/kernel/syscall_table.S
@@ -418,7 +418,7 @@
418 ENTRY_SAME(accept4) /* 320 */ 418 ENTRY_SAME(accept4) /* 320 */
419 ENTRY_SAME(prlimit64) 419 ENTRY_SAME(prlimit64)
420 ENTRY_SAME(fanotify_init) 420 ENTRY_SAME(fanotify_init)
421 ENTRY_COMP(fanotify_mark) 421 ENTRY_DIFF(fanotify_mark)
422 ENTRY_COMP(clock_adjtime) 422 ENTRY_COMP(clock_adjtime)
423 ENTRY_SAME(name_to_handle_at) /* 325 */ 423 ENTRY_SAME(name_to_handle_at) /* 325 */
424 ENTRY_COMP(open_by_handle_at) 424 ENTRY_COMP(open_by_handle_at)
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index bd6dd6ed3a9f..80b94b0add1f 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -145,6 +145,7 @@ config PPC
145 select HAVE_IRQ_EXIT_ON_IRQ_STACK 145 select HAVE_IRQ_EXIT_ON_IRQ_STACK
146 select ARCH_USE_CMPXCHG_LOCKREF if PPC64 146 select ARCH_USE_CMPXCHG_LOCKREF if PPC64
147 select HAVE_ARCH_AUDITSYSCALL 147 select HAVE_ARCH_AUDITSYSCALL
148 select ARCH_SUPPORTS_ATOMIC_RMW
148 149
149config GENERIC_CSUM 150config GENERIC_CSUM
150 def_bool CPU_LITTLE_ENDIAN 151 def_bool CPU_LITTLE_ENDIAN
@@ -414,7 +415,7 @@ config KEXEC
414config CRASH_DUMP 415config CRASH_DUMP
415 bool "Build a kdump crash kernel" 416 bool "Build a kdump crash kernel"
416 depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP) 417 depends on PPC64 || 6xx || FSL_BOOKE || (44x && !SMP)
417 select RELOCATABLE if PPC64 || 44x || FSL_BOOKE 418 select RELOCATABLE if (PPC64 && !COMPILE_TEST) || 44x || FSL_BOOKE
418 help 419 help
419 Build a kernel suitable for use as a kdump capture kernel. 420 Build a kernel suitable for use as a kdump capture kernel.
420 The same kernel binary can be used as production kernel and dump 421 The same kernel binary can be used as production kernel and dump
@@ -1017,6 +1018,7 @@ endmenu
1017if PPC64 1018if PPC64
1018config RELOCATABLE 1019config RELOCATABLE
1019 bool "Build a relocatable kernel" 1020 bool "Build a relocatable kernel"
1021 depends on !COMPILE_TEST
1020 select NONSTATIC_KERNEL 1022 select NONSTATIC_KERNEL
1021 help 1023 help
1022 This builds a kernel image that is capable of running anywhere 1024 This builds a kernel image that is capable of running anywhere
diff --git a/arch/powerpc/Kconfig.debug b/arch/powerpc/Kconfig.debug
index 790352f93700..35d16bd2760b 100644
--- a/arch/powerpc/Kconfig.debug
+++ b/arch/powerpc/Kconfig.debug
@@ -303,7 +303,6 @@ config PPC_EARLY_DEBUG_OPAL_VTERMNO
303 This correspond to which /dev/hvcN you want to use for early 303 This correspond to which /dev/hvcN you want to use for early
304 debug. 304 debug.
305 305
306 On OPAL v1 (takeover) this should always be 0
307 On OPAL v2, this will be 0 for network console and 1 or 2 for 306 On OPAL v2, this will be 0 for network console and 1 or 2 for
308 the machine built-in serial ports. 307 the machine built-in serial ports.
309 308
diff --git a/arch/powerpc/include/asm/code-patching.h b/arch/powerpc/include/asm/code-patching.h
index 37991e154ef8..840a5509b3f1 100644
--- a/arch/powerpc/include/asm/code-patching.h
+++ b/arch/powerpc/include/asm/code-patching.h
@@ -88,4 +88,15 @@ static inline unsigned long ppc_function_entry(void *func)
88#endif 88#endif
89} 89}
90 90
91static inline unsigned long ppc_global_function_entry(void *func)
92{
93#if defined(CONFIG_PPC64) && defined(_CALL_ELF) && _CALL_ELF == 2
94 /* PPC64 ABIv2 the global entry point is at the address */
95 return (unsigned long)func;
96#else
97 /* All other cases there is no change vs ppc_function_entry() */
98 return ppc_function_entry(func);
99#endif
100}
101
91#endif /* _ASM_POWERPC_CODE_PATCHING_H */ 102#endif /* _ASM_POWERPC_CODE_PATCHING_H */
diff --git a/arch/powerpc/include/asm/mmu.h b/arch/powerpc/include/asm/mmu.h
index f8d1d6dcf7db..e61f24ed4e65 100644
--- a/arch/powerpc/include/asm/mmu.h
+++ b/arch/powerpc/include/asm/mmu.h
@@ -19,8 +19,7 @@
19#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004) 19#define MMU_FTR_TYPE_40x ASM_CONST(0x00000004)
20#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008) 20#define MMU_FTR_TYPE_44x ASM_CONST(0x00000008)
21#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010) 21#define MMU_FTR_TYPE_FSL_E ASM_CONST(0x00000010)
22#define MMU_FTR_TYPE_3E ASM_CONST(0x00000020) 22#define MMU_FTR_TYPE_47x ASM_CONST(0x00000020)
23#define MMU_FTR_TYPE_47x ASM_CONST(0x00000040)
24 23
25/* 24/*
26 * This is individual features 25 * This is individual features
@@ -106,13 +105,6 @@
106 MMU_FTR_CI_LARGE_PAGE 105 MMU_FTR_CI_LARGE_PAGE
107#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \ 106#define MMU_FTRS_PA6T MMU_FTRS_DEFAULT_HPTE_ARCH_V2 | \
108 MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B 107 MMU_FTR_CI_LARGE_PAGE | MMU_FTR_NO_SLBIE_B
109#define MMU_FTRS_A2 MMU_FTR_TYPE_3E | MMU_FTR_USE_TLBILX | \
110 MMU_FTR_USE_TLBIVAX_BCAST | \
111 MMU_FTR_LOCK_BCAST_INVAL | \
112 MMU_FTR_USE_TLBRSRV | \
113 MMU_FTR_USE_PAIRED_MAS | \
114 MMU_FTR_TLBIEL | \
115 MMU_FTR_16M_PAGE
116#ifndef __ASSEMBLY__ 108#ifndef __ASSEMBLY__
117#include <asm/cputable.h> 109#include <asm/cputable.h>
118 110
diff --git a/arch/powerpc/include/asm/opal.h b/arch/powerpc/include/asm/opal.h
index 460018889ba9..0da1dbd42e02 100644
--- a/arch/powerpc/include/asm/opal.h
+++ b/arch/powerpc/include/asm/opal.h
@@ -12,27 +12,7 @@
12#ifndef __OPAL_H 12#ifndef __OPAL_H
13#define __OPAL_H 13#define __OPAL_H
14 14
15/****** Takeover interface ********/
16
17/* PAPR H-Call used to querty the HAL existence and/or instanciate
18 * it from within pHyp (tech preview only).
19 *
20 * This is exclusively used in prom_init.c
21 */
22
23#ifndef __ASSEMBLY__ 15#ifndef __ASSEMBLY__
24
25struct opal_takeover_args {
26 u64 k_image; /* r4 */
27 u64 k_size; /* r5 */
28 u64 k_entry; /* r6 */
29 u64 k_entry2; /* r7 */
30 u64 hal_addr; /* r8 */
31 u64 rd_image; /* r9 */
32 u64 rd_size; /* r10 */
33 u64 rd_loc; /* r11 */
34};
35
36/* 16/*
37 * SG entry 17 * SG entry
38 * 18 *
@@ -55,15 +35,6 @@ struct opal_sg_list {
55/* We calculate number of sg entries based on PAGE_SIZE */ 35/* We calculate number of sg entries based on PAGE_SIZE */
56#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry)) 36#define SG_ENTRIES_PER_NODE ((PAGE_SIZE - 16) / sizeof(struct opal_sg_entry))
57 37
58extern long opal_query_takeover(u64 *hal_size, u64 *hal_align);
59
60extern long opal_do_takeover(struct opal_takeover_args *args);
61
62struct rtas_args;
63extern int opal_enter_rtas(struct rtas_args *args,
64 unsigned long data,
65 unsigned long entry);
66
67#endif /* __ASSEMBLY__ */ 38#endif /* __ASSEMBLY__ */
68 39
69/****** OPAL APIs ******/ 40/****** OPAL APIs ******/
diff --git a/arch/powerpc/include/asm/perf_event_server.h b/arch/powerpc/include/asm/perf_event_server.h
index 9ed737146dbb..b3e936027b26 100644
--- a/arch/powerpc/include/asm/perf_event_server.h
+++ b/arch/powerpc/include/asm/perf_event_server.h
@@ -61,8 +61,7 @@ struct power_pmu {
61#define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */ 61#define PPMU_SIAR_VALID 0x00000010 /* Processor has SIAR Valid bit */
62#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */ 62#define PPMU_HAS_SSLOT 0x00000020 /* Has sampled slot in MMCRA */
63#define PPMU_HAS_SIER 0x00000040 /* Has SIER */ 63#define PPMU_HAS_SIER 0x00000040 /* Has SIER */
64#define PPMU_BHRB 0x00000080 /* has BHRB feature enabled */ 64#define PPMU_ARCH_207S 0x00000080 /* PMC is architecture v2.07S */
65#define PPMU_EBB 0x00000100 /* supports event based branch */
66 65
67/* 66/*
68 * Values for flags to get_alternatives() 67 * Values for flags to get_alternatives()
diff --git a/arch/powerpc/include/asm/swab.h b/arch/powerpc/include/asm/swab.h
index b9bd1ca944d0..96f59de61855 100644
--- a/arch/powerpc/include/asm/swab.h
+++ b/arch/powerpc/include/asm/swab.h
@@ -9,10 +9,6 @@
9 9
10#include <uapi/asm/swab.h> 10#include <uapi/asm/swab.h>
11 11
12#ifdef __GNUC__
13#ifndef __powerpc64__
14#endif /* __powerpc64__ */
15
16static __inline__ __u16 ld_le16(const volatile __u16 *addr) 12static __inline__ __u16 ld_le16(const volatile __u16 *addr)
17{ 13{
18 __u16 val; 14 __u16 val;
@@ -20,19 +16,12 @@ static __inline__ __u16 ld_le16(const volatile __u16 *addr)
20 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); 16 __asm__ __volatile__ ("lhbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
21 return val; 17 return val;
22} 18}
23#define __arch_swab16p ld_le16
24 19
25static __inline__ void st_le16(volatile __u16 *addr, const __u16 val) 20static __inline__ void st_le16(volatile __u16 *addr, const __u16 val)
26{ 21{
27 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); 22 __asm__ __volatile__ ("sthbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
28} 23}
29 24
30static inline void __arch_swab16s(__u16 *addr)
31{
32 st_le16(addr, *addr);
33}
34#define __arch_swab16s __arch_swab16s
35
36static __inline__ __u32 ld_le32(const volatile __u32 *addr) 25static __inline__ __u32 ld_le32(const volatile __u32 *addr)
37{ 26{
38 __u32 val; 27 __u32 val;
@@ -40,42 +29,10 @@ static __inline__ __u32 ld_le32(const volatile __u32 *addr)
40 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr)); 29 __asm__ __volatile__ ("lwbrx %0,0,%1" : "=r" (val) : "r" (addr), "m" (*addr));
41 return val; 30 return val;
42} 31}
43#define __arch_swab32p ld_le32
44 32
45static __inline__ void st_le32(volatile __u32 *addr, const __u32 val) 33static __inline__ void st_le32(volatile __u32 *addr, const __u32 val)
46{ 34{
47 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr)); 35 __asm__ __volatile__ ("stwbrx %1,0,%2" : "=m" (*addr) : "r" (val), "r" (addr));
48} 36}
49 37
50static inline void __arch_swab32s(__u32 *addr)
51{
52 st_le32(addr, *addr);
53}
54#define __arch_swab32s __arch_swab32s
55
56static inline __attribute_const__ __u16 __arch_swab16(__u16 value)
57{
58 __u16 result;
59
60 __asm__("rlwimi %0,%1,8,16,23"
61 : "=r" (result)
62 : "r" (value), "0" (value >> 8));
63 return result;
64}
65#define __arch_swab16 __arch_swab16
66
67static inline __attribute_const__ __u32 __arch_swab32(__u32 value)
68{
69 __u32 result;
70
71 __asm__("rlwimi %0,%1,24,16,23\n\t"
72 "rlwimi %0,%1,8,8,15\n\t"
73 "rlwimi %0,%1,24,0,7"
74 : "=r" (result)
75 : "r" (value), "0" (value >> 24));
76 return result;
77}
78#define __arch_swab32 __arch_swab32
79
80#endif /* __GNUC__ */
81#endif /* _ASM_POWERPC_SWAB_H */ 38#endif /* _ASM_POWERPC_SWAB_H */
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index f202d0731b06..d178834fe508 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -10,6 +10,8 @@
10 * 10 *
11 */ 11 */
12 12
13#define pr_fmt(fmt) "ftrace-powerpc: " fmt
14
13#include <linux/spinlock.h> 15#include <linux/spinlock.h>
14#include <linux/hardirq.h> 16#include <linux/hardirq.h>
15#include <linux/uaccess.h> 17#include <linux/uaccess.h>
@@ -105,7 +107,7 @@ __ftrace_make_nop(struct module *mod,
105 struct dyn_ftrace *rec, unsigned long addr) 107 struct dyn_ftrace *rec, unsigned long addr)
106{ 108{
107 unsigned int op; 109 unsigned int op;
108 unsigned long ptr; 110 unsigned long entry, ptr;
109 unsigned long ip = rec->ip; 111 unsigned long ip = rec->ip;
110 void *tramp; 112 void *tramp;
111 113
@@ -115,7 +117,7 @@ __ftrace_make_nop(struct module *mod,
115 117
116 /* Make sure that that this is still a 24bit jump */ 118 /* Make sure that that this is still a 24bit jump */
117 if (!is_bl_op(op)) { 119 if (!is_bl_op(op)) {
118 printk(KERN_ERR "Not expected bl: opcode is %x\n", op); 120 pr_err("Not expected bl: opcode is %x\n", op);
119 return -EINVAL; 121 return -EINVAL;
120 } 122 }
121 123
@@ -125,21 +127,21 @@ __ftrace_make_nop(struct module *mod,
125 pr_devel("ip:%lx jumps to %p", ip, tramp); 127 pr_devel("ip:%lx jumps to %p", ip, tramp);
126 128
127 if (!is_module_trampoline(tramp)) { 129 if (!is_module_trampoline(tramp)) {
128 printk(KERN_ERR "Not a trampoline\n"); 130 pr_err("Not a trampoline\n");
129 return -EINVAL; 131 return -EINVAL;
130 } 132 }
131 133
132 if (module_trampoline_target(mod, tramp, &ptr)) { 134 if (module_trampoline_target(mod, tramp, &ptr)) {
133 printk(KERN_ERR "Failed to get trampoline target\n"); 135 pr_err("Failed to get trampoline target\n");
134 return -EFAULT; 136 return -EFAULT;
135 } 137 }
136 138
137 pr_devel("trampoline target %lx", ptr); 139 pr_devel("trampoline target %lx", ptr);
138 140
141 entry = ppc_global_function_entry((void *)addr);
139 /* This should match what was called */ 142 /* This should match what was called */
140 if (ptr != ppc_function_entry((void *)addr)) { 143 if (ptr != entry) {
141 printk(KERN_ERR "addr %lx does not match expected %lx\n", 144 pr_err("addr %lx does not match expected %lx\n", ptr, entry);
142 ptr, ppc_function_entry((void *)addr));
143 return -EINVAL; 145 return -EINVAL;
144 } 146 }
145 147
@@ -179,7 +181,7 @@ __ftrace_make_nop(struct module *mod,
179 181
180 /* Make sure that that this is still a 24bit jump */ 182 /* Make sure that that this is still a 24bit jump */
181 if (!is_bl_op(op)) { 183 if (!is_bl_op(op)) {
182 printk(KERN_ERR "Not expected bl: opcode is %x\n", op); 184 pr_err("Not expected bl: opcode is %x\n", op);
183 return -EINVAL; 185 return -EINVAL;
184 } 186 }
185 187
@@ -198,7 +200,7 @@ __ftrace_make_nop(struct module *mod,
198 200
199 /* Find where the trampoline jumps to */ 201 /* Find where the trampoline jumps to */
200 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) { 202 if (probe_kernel_read(jmp, (void *)tramp, sizeof(jmp))) {
201 printk(KERN_ERR "Failed to read %lx\n", tramp); 203 pr_err("Failed to read %lx\n", tramp);
202 return -EFAULT; 204 return -EFAULT;
203 } 205 }
204 206
@@ -209,7 +211,7 @@ __ftrace_make_nop(struct module *mod,
209 ((jmp[1] & 0xffff0000) != 0x398c0000) || 211 ((jmp[1] & 0xffff0000) != 0x398c0000) ||
210 (jmp[2] != 0x7d8903a6) || 212 (jmp[2] != 0x7d8903a6) ||
211 (jmp[3] != 0x4e800420)) { 213 (jmp[3] != 0x4e800420)) {
212 printk(KERN_ERR "Not a trampoline\n"); 214 pr_err("Not a trampoline\n");
213 return -EINVAL; 215 return -EINVAL;
214 } 216 }
215 217
@@ -221,8 +223,7 @@ __ftrace_make_nop(struct module *mod,
221 pr_devel(" %lx ", tramp); 223 pr_devel(" %lx ", tramp);
222 224
223 if (tramp != addr) { 225 if (tramp != addr) {
224 printk(KERN_ERR 226 pr_err("Trampoline location %08lx does not match addr\n",
225 "Trampoline location %08lx does not match addr\n",
226 tramp); 227 tramp);
227 return -EINVAL; 228 return -EINVAL;
228 } 229 }
@@ -263,15 +264,13 @@ int ftrace_make_nop(struct module *mod,
263 */ 264 */
264 if (!rec->arch.mod) { 265 if (!rec->arch.mod) {
265 if (!mod) { 266 if (!mod) {
266 printk(KERN_ERR "No module loaded addr=%lx\n", 267 pr_err("No module loaded addr=%lx\n", addr);
267 addr);
268 return -EFAULT; 268 return -EFAULT;
269 } 269 }
270 rec->arch.mod = mod; 270 rec->arch.mod = mod;
271 } else if (mod) { 271 } else if (mod) {
272 if (mod != rec->arch.mod) { 272 if (mod != rec->arch.mod) {
273 printk(KERN_ERR 273 pr_err("Record mod %p not equal to passed in mod %p\n",
274 "Record mod %p not equal to passed in mod %p\n",
275 rec->arch.mod, mod); 274 rec->arch.mod, mod);
276 return -EINVAL; 275 return -EINVAL;
277 } 276 }
@@ -307,26 +306,25 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
307 * The load offset is different depending on the ABI. For simplicity 306 * The load offset is different depending on the ABI. For simplicity
308 * just mask it out when doing the compare. 307 * just mask it out when doing the compare.
309 */ 308 */
310 if ((op[0] != 0x48000008) || ((op[1] & 0xffff00000) != 0xe8410000)) { 309 if ((op[0] != 0x48000008) || ((op[1] & 0xffff0000) != 0xe8410000)) {
311 printk(KERN_ERR "Unexpected call sequence: %x %x\n", 310 pr_err("Unexpected call sequence: %x %x\n", op[0], op[1]);
312 op[0], op[1]);
313 return -EINVAL; 311 return -EINVAL;
314 } 312 }
315 313
316 /* If we never set up a trampoline to ftrace_caller, then bail */ 314 /* If we never set up a trampoline to ftrace_caller, then bail */
317 if (!rec->arch.mod->arch.tramp) { 315 if (!rec->arch.mod->arch.tramp) {
318 printk(KERN_ERR "No ftrace trampoline\n"); 316 pr_err("No ftrace trampoline\n");
319 return -EINVAL; 317 return -EINVAL;
320 } 318 }
321 319
322 /* Ensure branch is within 24 bits */ 320 /* Ensure branch is within 24 bits */
323 if (create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { 321 if (!create_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
324 printk(KERN_ERR "Branch out of range"); 322 pr_err("Branch out of range\n");
325 return -EINVAL; 323 return -EINVAL;
326 } 324 }
327 325
328 if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) { 326 if (patch_branch(ip, rec->arch.mod->arch.tramp, BRANCH_SET_LINK)) {
329 printk(KERN_ERR "REL24 out of range!\n"); 327 pr_err("REL24 out of range!\n");
330 return -EINVAL; 328 return -EINVAL;
331 } 329 }
332 330
@@ -345,13 +343,13 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
345 343
346 /* It should be pointing to a nop */ 344 /* It should be pointing to a nop */
347 if (op != PPC_INST_NOP) { 345 if (op != PPC_INST_NOP) {
348 printk(KERN_ERR "Expected NOP but have %x\n", op); 346 pr_err("Expected NOP but have %x\n", op);
349 return -EINVAL; 347 return -EINVAL;
350 } 348 }
351 349
352 /* If we never set up a trampoline to ftrace_caller, then bail */ 350 /* If we never set up a trampoline to ftrace_caller, then bail */
353 if (!rec->arch.mod->arch.tramp) { 351 if (!rec->arch.mod->arch.tramp) {
354 printk(KERN_ERR "No ftrace trampoline\n"); 352 pr_err("No ftrace trampoline\n");
355 return -EINVAL; 353 return -EINVAL;
356 } 354 }
357 355
@@ -359,7 +357,7 @@ __ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
359 op = create_branch((unsigned int *)ip, 357 op = create_branch((unsigned int *)ip,
360 rec->arch.mod->arch.tramp, BRANCH_SET_LINK); 358 rec->arch.mod->arch.tramp, BRANCH_SET_LINK);
361 if (!op) { 359 if (!op) {
362 printk(KERN_ERR "REL24 out of range!\n"); 360 pr_err("REL24 out of range!\n");
363 return -EINVAL; 361 return -EINVAL;
364 } 362 }
365 363
@@ -397,7 +395,7 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
397 * already have a module defined. 395 * already have a module defined.
398 */ 396 */
399 if (!rec->arch.mod) { 397 if (!rec->arch.mod) {
400 printk(KERN_ERR "No module loaded\n"); 398 pr_err("No module loaded\n");
401 return -EINVAL; 399 return -EINVAL;
402 } 400 }
403 401
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 2480256272d4..5cf3d367190d 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -131,7 +131,7 @@ _GLOBAL(power7_nap)
131 131
132_GLOBAL(power7_sleep) 132_GLOBAL(power7_sleep)
133 li r3,1 133 li r3,1
134 li r4,0 134 li r4,1
135 b power7_powersave_common 135 b power7_powersave_common
136 /* No return */ 136 /* No return */
137 137
diff --git a/arch/powerpc/kernel/iomap.c b/arch/powerpc/kernel/iomap.c
index b82227e7e21b..12e48d56f771 100644
--- a/arch/powerpc/kernel/iomap.c
+++ b/arch/powerpc/kernel/iomap.c
@@ -23,7 +23,7 @@ unsigned int ioread16(void __iomem *addr)
23} 23}
24unsigned int ioread16be(void __iomem *addr) 24unsigned int ioread16be(void __iomem *addr)
25{ 25{
26 return in_be16(addr); 26 return readw_be(addr);
27} 27}
28unsigned int ioread32(void __iomem *addr) 28unsigned int ioread32(void __iomem *addr)
29{ 29{
@@ -31,7 +31,7 @@ unsigned int ioread32(void __iomem *addr)
31} 31}
32unsigned int ioread32be(void __iomem *addr) 32unsigned int ioread32be(void __iomem *addr)
33{ 33{
34 return in_be32(addr); 34 return readl_be(addr);
35} 35}
36EXPORT_SYMBOL(ioread8); 36EXPORT_SYMBOL(ioread8);
37EXPORT_SYMBOL(ioread16); 37EXPORT_SYMBOL(ioread16);
@@ -49,7 +49,7 @@ void iowrite16(u16 val, void __iomem *addr)
49} 49}
50void iowrite16be(u16 val, void __iomem *addr) 50void iowrite16be(u16 val, void __iomem *addr)
51{ 51{
52 out_be16(addr, val); 52 writew_be(val, addr);
53} 53}
54void iowrite32(u32 val, void __iomem *addr) 54void iowrite32(u32 val, void __iomem *addr)
55{ 55{
@@ -57,7 +57,7 @@ void iowrite32(u32 val, void __iomem *addr)
57} 57}
58void iowrite32be(u32 val, void __iomem *addr) 58void iowrite32be(u32 val, void __iomem *addr)
59{ 59{
60 out_be32(addr, val); 60 writel_be(val, addr);
61} 61}
62EXPORT_SYMBOL(iowrite8); 62EXPORT_SYMBOL(iowrite8);
63EXPORT_SYMBOL(iowrite16); 63EXPORT_SYMBOL(iowrite16);
@@ -75,15 +75,15 @@ EXPORT_SYMBOL(iowrite32be);
75 */ 75 */
76void ioread8_rep(void __iomem *addr, void *dst, unsigned long count) 76void ioread8_rep(void __iomem *addr, void *dst, unsigned long count)
77{ 77{
78 _insb((u8 __iomem *) addr, dst, count); 78 readsb(addr, dst, count);
79} 79}
80void ioread16_rep(void __iomem *addr, void *dst, unsigned long count) 80void ioread16_rep(void __iomem *addr, void *dst, unsigned long count)
81{ 81{
82 _insw_ns((u16 __iomem *) addr, dst, count); 82 readsw(addr, dst, count);
83} 83}
84void ioread32_rep(void __iomem *addr, void *dst, unsigned long count) 84void ioread32_rep(void __iomem *addr, void *dst, unsigned long count)
85{ 85{
86 _insl_ns((u32 __iomem *) addr, dst, count); 86 readsl(addr, dst, count);
87} 87}
88EXPORT_SYMBOL(ioread8_rep); 88EXPORT_SYMBOL(ioread8_rep);
89EXPORT_SYMBOL(ioread16_rep); 89EXPORT_SYMBOL(ioread16_rep);
@@ -91,15 +91,15 @@ EXPORT_SYMBOL(ioread32_rep);
91 91
92void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count) 92void iowrite8_rep(void __iomem *addr, const void *src, unsigned long count)
93{ 93{
94 _outsb((u8 __iomem *) addr, src, count); 94 writesb(addr, src, count);
95} 95}
96void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count) 96void iowrite16_rep(void __iomem *addr, const void *src, unsigned long count)
97{ 97{
98 _outsw_ns((u16 __iomem *) addr, src, count); 98 writesw(addr, src, count);
99} 99}
100void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count) 100void iowrite32_rep(void __iomem *addr, const void *src, unsigned long count)
101{ 101{
102 _outsl_ns((u32 __iomem *) addr, src, count); 102 writesl(addr, src, count);
103} 103}
104EXPORT_SYMBOL(iowrite8_rep); 104EXPORT_SYMBOL(iowrite8_rep);
105EXPORT_SYMBOL(iowrite16_rep); 105EXPORT_SYMBOL(iowrite16_rep);
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 90fab64d911d..2f72af82513c 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -32,6 +32,7 @@
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/kdebug.h> 33#include <linux/kdebug.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <asm/code-patching.h>
35#include <asm/cacheflush.h> 36#include <asm/cacheflush.h>
36#include <asm/sstep.h> 37#include <asm/sstep.h>
37#include <asm/uaccess.h> 38#include <asm/uaccess.h>
@@ -491,12 +492,10 @@ int __kprobes kprobe_exceptions_notify(struct notifier_block *self,
491 return ret; 492 return ret;
492} 493}
493 494
494#ifdef CONFIG_PPC64
495unsigned long arch_deref_entry_point(void *entry) 495unsigned long arch_deref_entry_point(void *entry)
496{ 496{
497 return ((func_descr_t *)entry)->entry; 497 return ppc_global_function_entry(entry);
498} 498}
499#endif
500 499
501int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs) 500int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
502{ 501{
@@ -508,8 +507,12 @@ int __kprobes setjmp_pre_handler(struct kprobe *p, struct pt_regs *regs)
508 /* setup return addr to the jprobe handler routine */ 507 /* setup return addr to the jprobe handler routine */
509 regs->nip = arch_deref_entry_point(jp->entry); 508 regs->nip = arch_deref_entry_point(jp->entry);
510#ifdef CONFIG_PPC64 509#ifdef CONFIG_PPC64
510#if defined(_CALL_ELF) && _CALL_ELF == 2
511 regs->gpr[12] = (unsigned long)jp->entry;
512#else
511 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc); 513 regs->gpr[2] = (unsigned long)(((func_descr_t *)jp->entry)->toc);
512#endif 514#endif
515#endif
513 516
514 return 1; 517 return 1;
515} 518}
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index 077d2ce6c5a7..d807ee626af9 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -315,8 +315,17 @@ static void dedotify_versions(struct modversion_info *vers,
315 struct modversion_info *end; 315 struct modversion_info *end;
316 316
317 for (end = (void *)vers + size; vers < end; vers++) 317 for (end = (void *)vers + size; vers < end; vers++)
318 if (vers->name[0] == '.') 318 if (vers->name[0] == '.') {
319 memmove(vers->name, vers->name+1, strlen(vers->name)); 319 memmove(vers->name, vers->name+1, strlen(vers->name));
320#ifdef ARCH_RELOCATES_KCRCTAB
321 /* The TOC symbol has no CRC computed. To avoid CRC
322 * check failing, we must force it to the expected
323 * value (see CRC check in module.c).
324 */
325 if (!strcmp(vers->name, "TOC."))
326 vers->crc = -(unsigned long)reloc_start;
327#endif
328 }
320} 329}
321 330
322/* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */ 331/* Undefined symbols which refer to .funcname, hack to funcname (or .TOC.) */
diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c
index 613a860a203c..b694b0730971 100644
--- a/arch/powerpc/kernel/prom.c
+++ b/arch/powerpc/kernel/prom.c
@@ -662,13 +662,6 @@ void __init early_init_devtree(void *params)
662 of_scan_flat_dt(early_init_dt_scan_fw_dump, NULL); 662 of_scan_flat_dt(early_init_dt_scan_fw_dump, NULL);
663#endif 663#endif
664 664
665 /* Pre-initialize the cmd_line with the content of boot_commmand_line,
666 * which will be empty except when the content of the variable has
667 * been overriden by a bootloading mechanism. This happens typically
668 * with HAL takeover
669 */
670 strlcpy(cmd_line, boot_command_line, COMMAND_LINE_SIZE);
671
672 /* Retrieve various informations from the /chosen node of the 665 /* Retrieve various informations from the /chosen node of the
673 * device-tree, including the platform type, initrd location and 666 * device-tree, including the platform type, initrd location and
674 * size, TCE reserve, and more ... 667 * size, TCE reserve, and more ...
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 078145acf7fb..1a85d8f96739 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -1268,201 +1268,6 @@ static u64 __initdata prom_opal_base;
1268static u64 __initdata prom_opal_entry; 1268static u64 __initdata prom_opal_entry;
1269#endif 1269#endif
1270 1270
1271#ifdef __BIG_ENDIAN__
1272/* XXX Don't change this structure without updating opal-takeover.S */
1273static struct opal_secondary_data {
1274 s64 ack; /* 0 */
1275 u64 go; /* 8 */
1276 struct opal_takeover_args args; /* 16 */
1277} opal_secondary_data;
1278
1279static u64 __initdata prom_opal_align;
1280static u64 __initdata prom_opal_size;
1281static int __initdata prom_rtas_start_cpu;
1282static u64 __initdata prom_rtas_data;
1283static u64 __initdata prom_rtas_entry;
1284
1285extern char opal_secondary_entry;
1286
1287static void __init prom_query_opal(void)
1288{
1289 long rc;
1290
1291 /* We must not query for OPAL presence on a machine that
1292 * supports TNK takeover (970 blades), as this uses the same
1293 * h-call with different arguments and will crash
1294 */
1295 if (PHANDLE_VALID(call_prom("finddevice", 1, 1,
1296 ADDR("/tnk-memory-map")))) {
1297 prom_printf("TNK takeover detected, skipping OPAL check\n");
1298 return;
1299 }
1300
1301 prom_printf("Querying for OPAL presence... ");
1302
1303 rc = opal_query_takeover(&prom_opal_size,
1304 &prom_opal_align);
1305 prom_debug("(rc = %ld) ", rc);
1306 if (rc != 0) {
1307 prom_printf("not there.\n");
1308 return;
1309 }
1310 of_platform = PLATFORM_OPAL;
1311 prom_printf(" there !\n");
1312 prom_debug(" opal_size = 0x%lx\n", prom_opal_size);
1313 prom_debug(" opal_align = 0x%lx\n", prom_opal_align);
1314 if (prom_opal_align < 0x10000)
1315 prom_opal_align = 0x10000;
1316}
1317
1318static int __init prom_rtas_call(int token, int nargs, int nret,
1319 int *outputs, ...)
1320{
1321 struct rtas_args rtas_args;
1322 va_list list;
1323 int i;
1324
1325 rtas_args.token = token;
1326 rtas_args.nargs = nargs;
1327 rtas_args.nret = nret;
1328 rtas_args.rets = (rtas_arg_t *)&(rtas_args.args[nargs]);
1329 va_start(list, outputs);
1330 for (i = 0; i < nargs; ++i)
1331 rtas_args.args[i] = va_arg(list, rtas_arg_t);
1332 va_end(list);
1333
1334 for (i = 0; i < nret; ++i)
1335 rtas_args.rets[i] = 0;
1336
1337 opal_enter_rtas(&rtas_args, prom_rtas_data,
1338 prom_rtas_entry);
1339
1340 if (nret > 1 && outputs != NULL)
1341 for (i = 0; i < nret-1; ++i)
1342 outputs[i] = rtas_args.rets[i+1];
1343 return (nret > 0)? rtas_args.rets[0]: 0;
1344}
1345
1346static void __init prom_opal_hold_cpus(void)
1347{
1348 int i, cnt, cpu, rc;
1349 long j;
1350 phandle node;
1351 char type[64];
1352 u32 servers[8];
1353 void *entry = (unsigned long *)&opal_secondary_entry;
1354 struct opal_secondary_data *data = &opal_secondary_data;
1355
1356 prom_debug("prom_opal_hold_cpus: start...\n");
1357 prom_debug(" - entry = 0x%x\n", entry);
1358 prom_debug(" - data = 0x%x\n", data);
1359
1360 data->ack = -1;
1361 data->go = 0;
1362
1363 /* look for cpus */
1364 for (node = 0; prom_next_node(&node); ) {
1365 type[0] = 0;
1366 prom_getprop(node, "device_type", type, sizeof(type));
1367 if (strcmp(type, "cpu") != 0)
1368 continue;
1369
1370 /* Skip non-configured cpus. */
1371 if (prom_getprop(node, "status", type, sizeof(type)) > 0)
1372 if (strcmp(type, "okay") != 0)
1373 continue;
1374
1375 cnt = prom_getprop(node, "ibm,ppc-interrupt-server#s", servers,
1376 sizeof(servers));
1377 if (cnt == PROM_ERROR)
1378 break;
1379 cnt >>= 2;
1380 for (i = 0; i < cnt; i++) {
1381 cpu = servers[i];
1382 prom_debug("CPU %d ... ", cpu);
1383 if (cpu == prom.cpu) {
1384 prom_debug("booted !\n");
1385 continue;
1386 }
1387 prom_debug("starting ... ");
1388
1389 /* Init the acknowledge var which will be reset by
1390 * the secondary cpu when it awakens from its OF
1391 * spinloop.
1392 */
1393 data->ack = -1;
1394 rc = prom_rtas_call(prom_rtas_start_cpu, 3, 1,
1395 NULL, cpu, entry, data);
1396 prom_debug("rtas rc=%d ...", rc);
1397
1398 for (j = 0; j < 100000000 && data->ack == -1; j++) {
1399 HMT_low();
1400 mb();
1401 }
1402 HMT_medium();
1403 if (data->ack != -1)
1404 prom_debug("done, PIR=0x%x\n", data->ack);
1405 else
1406 prom_debug("timeout !\n");
1407 }
1408 }
1409 prom_debug("prom_opal_hold_cpus: end...\n");
1410}
1411
1412static void __init prom_opal_takeover(void)
1413{
1414 struct opal_secondary_data *data = &opal_secondary_data;
1415 struct opal_takeover_args *args = &data->args;
1416 u64 align = prom_opal_align;
1417 u64 top_addr, opal_addr;
1418
1419 args->k_image = (u64)_stext;
1420 args->k_size = _end - _stext;
1421 args->k_entry = 0;
1422 args->k_entry2 = 0x60;
1423
1424 top_addr = _ALIGN_UP(args->k_size, align);
1425
1426 if (prom_initrd_start != 0) {
1427 args->rd_image = prom_initrd_start;
1428 args->rd_size = prom_initrd_end - args->rd_image;
1429 args->rd_loc = top_addr;
1430 top_addr = _ALIGN_UP(args->rd_loc + args->rd_size, align);
1431 }
1432
1433 /* Pickup an address for the HAL. We want to go really high
1434 * up to avoid problem with future kexecs. On the other hand
1435 * we don't want to be all over the TCEs on P5IOC2 machines
1436 * which are going to be up there too. We assume the machine
1437 * has plenty of memory, and we ask for the HAL for now to
1438 * be just below the 1G point, or above the initrd
1439 */
1440 opal_addr = _ALIGN_DOWN(0x40000000 - prom_opal_size, align);
1441 if (opal_addr < top_addr)
1442 opal_addr = top_addr;
1443 args->hal_addr = opal_addr;
1444
1445 /* Copy the command line to the kernel image */
1446 strlcpy(boot_command_line, prom_cmd_line,
1447 COMMAND_LINE_SIZE);
1448
1449 prom_debug(" k_image = 0x%lx\n", args->k_image);
1450 prom_debug(" k_size = 0x%lx\n", args->k_size);
1451 prom_debug(" k_entry = 0x%lx\n", args->k_entry);
1452 prom_debug(" k_entry2 = 0x%lx\n", args->k_entry2);
1453 prom_debug(" hal_addr = 0x%lx\n", args->hal_addr);
1454 prom_debug(" rd_image = 0x%lx\n", args->rd_image);
1455 prom_debug(" rd_size = 0x%lx\n", args->rd_size);
1456 prom_debug(" rd_loc = 0x%lx\n", args->rd_loc);
1457 prom_printf("Performing OPAL takeover,this can take a few minutes..\n");
1458 prom_close_stdin();
1459 mb();
1460 data->go = 1;
1461 for (;;)
1462 opal_do_takeover(args);
1463}
1464#endif /* __BIG_ENDIAN__ */
1465
1466/* 1271/*
1467 * Allocate room for and instantiate OPAL 1272 * Allocate room for and instantiate OPAL
1468 */ 1273 */
@@ -1597,12 +1402,6 @@ static void __init prom_instantiate_rtas(void)
1597 &val, sizeof(val)) != PROM_ERROR) 1402 &val, sizeof(val)) != PROM_ERROR)
1598 rtas_has_query_cpu_stopped = true; 1403 rtas_has_query_cpu_stopped = true;
1599 1404
1600#if defined(CONFIG_PPC_POWERNV) && defined(__BIG_ENDIAN__)
1601 /* PowerVN takeover hack */
1602 prom_rtas_data = base;
1603 prom_rtas_entry = entry;
1604 prom_getprop(rtas_node, "start-cpu", &prom_rtas_start_cpu, 4);
1605#endif
1606 prom_debug("rtas base = 0x%x\n", base); 1405 prom_debug("rtas base = 0x%x\n", base);
1607 prom_debug("rtas entry = 0x%x\n", entry); 1406 prom_debug("rtas entry = 0x%x\n", entry);
1608 prom_debug("rtas size = 0x%x\n", (long)size); 1407 prom_debug("rtas size = 0x%x\n", (long)size);
@@ -3027,16 +2826,6 @@ unsigned long __init prom_init(unsigned long r3, unsigned long r4,
3027 prom_instantiate_rtas(); 2826 prom_instantiate_rtas();
3028 2827
3029#ifdef CONFIG_PPC_POWERNV 2828#ifdef CONFIG_PPC_POWERNV
3030#ifdef __BIG_ENDIAN__
3031 /* Detect HAL and try instanciating it & doing takeover */
3032 if (of_platform == PLATFORM_PSERIES_LPAR) {
3033 prom_query_opal();
3034 if (of_platform == PLATFORM_OPAL) {
3035 prom_opal_hold_cpus();
3036 prom_opal_takeover();
3037 }
3038 } else
3039#endif /* __BIG_ENDIAN__ */
3040 if (of_platform == PLATFORM_OPAL) 2829 if (of_platform == PLATFORM_OPAL)
3041 prom_instantiate_opal(); 2830 prom_instantiate_opal();
3042#endif /* CONFIG_PPC_POWERNV */ 2831#endif /* CONFIG_PPC_POWERNV */
diff --git a/arch/powerpc/kernel/prom_init_check.sh b/arch/powerpc/kernel/prom_init_check.sh
index 77aa1e95e904..fe8e54b9ef7d 100644
--- a/arch/powerpc/kernel/prom_init_check.sh
+++ b/arch/powerpc/kernel/prom_init_check.sh
@@ -21,9 +21,7 @@ _end enter_prom memcpy memset reloc_offset __secondary_hold
21__secondary_hold_acknowledge __secondary_hold_spinloop __start 21__secondary_hold_acknowledge __secondary_hold_spinloop __start
22strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224 22strcmp strcpy strlcpy strlen strncmp strstr logo_linux_clut224
23reloc_got2 kernstart_addr memstart_addr linux_banner _stext 23reloc_got2 kernstart_addr memstart_addr linux_banner _stext
24opal_query_takeover opal_do_takeover opal_enter_rtas opal_secondary_entry 24__prom_init_toc_start __prom_init_toc_end btext_setup_display TOC."
25boot_command_line __prom_init_toc_start __prom_init_toc_end
26btext_setup_display TOC."
27 25
28NM="$1" 26NM="$1"
29OBJ="$2" 27OBJ="$2"
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index e239df3768ac..e5b022c55ccd 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -469,9 +469,17 @@ void __init smp_setup_cpu_maps(void)
469 } 469 }
470 470
471 for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) { 471 for (j = 0; j < nthreads && cpu < nr_cpu_ids; j++) {
472 bool avail;
473
472 DBG(" thread %d -> cpu %d (hard id %d)\n", 474 DBG(" thread %d -> cpu %d (hard id %d)\n",
473 j, cpu, be32_to_cpu(intserv[j])); 475 j, cpu, be32_to_cpu(intserv[j]));
474 set_cpu_present(cpu, of_device_is_available(dn)); 476
477 avail = of_device_is_available(dn);
478 if (!avail)
479 avail = !of_property_match_string(dn,
480 "enable-method", "spin-table");
481
482 set_cpu_present(cpu, avail);
475 set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j])); 483 set_hard_smp_processor_id(cpu, be32_to_cpu(intserv[j]));
476 set_cpu_possible(cpu, true); 484 set_cpu_possible(cpu, true);
477 cpu++; 485 cpu++;
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 4e47db686b5d..1bc5a1755ed4 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -54,7 +54,6 @@
54 54
55#include "signal.h" 55#include "signal.h"
56 56
57#undef DEBUG_SIG
58 57
59#ifdef CONFIG_PPC64 58#ifdef CONFIG_PPC64
60#define sys_rt_sigreturn compat_sys_rt_sigreturn 59#define sys_rt_sigreturn compat_sys_rt_sigreturn
@@ -1063,10 +1062,6 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
1063 return 1; 1062 return 1;
1064 1063
1065badframe: 1064badframe:
1066#ifdef DEBUG_SIG
1067 printk("badframe in handle_rt_signal, regs=%p frame=%p newsp=%lx\n",
1068 regs, frame, newsp);
1069#endif
1070 if (show_unhandled_signals) 1065 if (show_unhandled_signals)
1071 printk_ratelimited(KERN_INFO 1066 printk_ratelimited(KERN_INFO
1072 "%s[%d]: bad frame in handle_rt_signal32: " 1067 "%s[%d]: bad frame in handle_rt_signal32: "
@@ -1484,10 +1479,6 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
1484 return 1; 1479 return 1;
1485 1480
1486badframe: 1481badframe:
1487#ifdef DEBUG_SIG
1488 printk("badframe in handle_signal, regs=%p frame=%p newsp=%lx\n",
1489 regs, frame, newsp);
1490#endif
1491 if (show_unhandled_signals) 1482 if (show_unhandled_signals)
1492 printk_ratelimited(KERN_INFO 1483 printk_ratelimited(KERN_INFO
1493 "%s[%d]: bad frame in handle_signal32: " 1484 "%s[%d]: bad frame in handle_signal32: "
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index d501dc4dc3e6..97c1e4b683fc 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -38,7 +38,6 @@
38 38
39#include "signal.h" 39#include "signal.h"
40 40
41#define DEBUG_SIG 0
42 41
43#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs)) 42#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
44#define FP_REGS_SIZE sizeof(elf_fpregset_t) 43#define FP_REGS_SIZE sizeof(elf_fpregset_t)
@@ -700,10 +699,6 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
700 return 0; 699 return 0;
701 700
702badframe: 701badframe:
703#if DEBUG_SIG
704 printk("badframe in sys_rt_sigreturn, regs=%p uc=%p &uc->uc_mcontext=%p\n",
705 regs, uc, &uc->uc_mcontext);
706#endif
707 if (show_unhandled_signals) 702 if (show_unhandled_signals)
708 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 703 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
709 current->comm, current->pid, "rt_sigreturn", 704 current->comm, current->pid, "rt_sigreturn",
@@ -809,10 +804,6 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
809 return 1; 804 return 1;
810 805
811badframe: 806badframe:
812#if DEBUG_SIG
813 printk("badframe in setup_rt_frame, regs=%p frame=%p newsp=%lx\n",
814 regs, frame, newsp);
815#endif
816 if (show_unhandled_signals) 807 if (show_unhandled_signals)
817 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32, 808 printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
818 current->comm, current->pid, "setup_rt_frame", 809 current->comm, current->pid, "setup_rt_frame",
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 51a3ff78838a..1007fb802e6b 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -747,7 +747,7 @@ int setup_profiling_timer(unsigned int multiplier)
747 747
748#ifdef CONFIG_SCHED_SMT 748#ifdef CONFIG_SCHED_SMT
749/* cpumask of CPUs with asymetric SMT dependancy */ 749/* cpumask of CPUs with asymetric SMT dependancy */
750static const int powerpc_smt_flags(void) 750static int powerpc_smt_flags(void)
751{ 751{
752 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; 752 int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
753 753
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index 8c86422a1e37..731be7478b27 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -127,11 +127,6 @@ BEGIN_FTR_SECTION
127 stw r10, HSTATE_PMC + 24(r13) 127 stw r10, HSTATE_PMC + 24(r13)
128 stw r11, HSTATE_PMC + 28(r13) 128 stw r11, HSTATE_PMC + 28(r13)
129END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 129END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
130BEGIN_FTR_SECTION
131 mfspr r9, SPRN_SIER
132 std r8, HSTATE_MMCR + 40(r13)
133 std r9, HSTATE_MMCR + 48(r13)
134END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
13531: 13031:
136 131
137 /* 132 /*
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index af3d78e19302..928ebe79668b 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -410,17 +410,7 @@ void __init mmu_context_init(void)
410 } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) { 410 } else if (mmu_has_feature(MMU_FTR_TYPE_47x)) {
411 first_context = 1; 411 first_context = 1;
412 last_context = 65535; 412 last_context = 65535;
413 } else 413 } else {
414#ifdef CONFIG_PPC_BOOK3E_MMU
415 if (mmu_has_feature(MMU_FTR_TYPE_3E)) {
416 u32 mmucfg = mfspr(SPRN_MMUCFG);
417 u32 pid_bits = (mmucfg & MMUCFG_PIDSIZE_MASK)
418 >> MMUCFG_PIDSIZE_SHIFT;
419 first_context = 1;
420 last_context = (1UL << (pid_bits + 1)) - 1;
421 } else
422#endif
423 {
424 first_context = 1; 414 first_context = 1;
425 last_context = 255; 415 last_context = 255;
426 } 416 }
diff --git a/arch/powerpc/net/bpf_jit_comp.c b/arch/powerpc/net/bpf_jit_comp.c
index 6dcdadefd8d0..82e82cadcde5 100644
--- a/arch/powerpc/net/bpf_jit_comp.c
+++ b/arch/powerpc/net/bpf_jit_comp.c
@@ -390,12 +390,16 @@ static int bpf_jit_build_body(struct sk_filter *fp, u32 *image,
390 case BPF_ANC | SKF_AD_VLAN_TAG: 390 case BPF_ANC | SKF_AD_VLAN_TAG:
391 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: 391 case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT:
392 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2); 392 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, vlan_tci) != 2);
393 BUILD_BUG_ON(VLAN_TAG_PRESENT != 0x1000);
394
393 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff, 395 PPC_LHZ_OFFS(r_A, r_skb, offsetof(struct sk_buff,
394 vlan_tci)); 396 vlan_tci));
395 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) 397 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) {
396 PPC_ANDI(r_A, r_A, VLAN_VID_MASK); 398 PPC_ANDI(r_A, r_A, ~VLAN_TAG_PRESENT);
397 else 399 } else {
398 PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT); 400 PPC_ANDI(r_A, r_A, VLAN_TAG_PRESENT);
401 PPC_SRWI(r_A, r_A, 12);
402 }
399 break; 403 break;
400 case BPF_ANC | SKF_AD_QUEUE: 404 case BPF_ANC | SKF_AD_QUEUE:
401 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, 405 BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff,
diff --git a/arch/powerpc/perf/core-book3s.c b/arch/powerpc/perf/core-book3s.c
index 4520c9356b54..6b0641c3f03f 100644
--- a/arch/powerpc/perf/core-book3s.c
+++ b/arch/powerpc/perf/core-book3s.c
@@ -485,7 +485,7 @@ static bool is_ebb_event(struct perf_event *event)
485 * check that the PMU supports EBB, meaning those that don't can still 485 * check that the PMU supports EBB, meaning those that don't can still
486 * use bit 63 of the event code for something else if they wish. 486 * use bit 63 of the event code for something else if they wish.
487 */ 487 */
488 return (ppmu->flags & PPMU_EBB) && 488 return (ppmu->flags & PPMU_ARCH_207S) &&
489 ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1); 489 ((event->attr.config >> PERF_EVENT_CONFIG_EBB_SHIFT) & 1);
490} 490}
491 491
@@ -777,7 +777,7 @@ void perf_event_print_debug(void)
777 if (ppmu->flags & PPMU_HAS_SIER) 777 if (ppmu->flags & PPMU_HAS_SIER)
778 sier = mfspr(SPRN_SIER); 778 sier = mfspr(SPRN_SIER);
779 779
780 if (ppmu->flags & PPMU_EBB) { 780 if (ppmu->flags & PPMU_ARCH_207S) {
781 pr_info("MMCR2: %016lx EBBHR: %016lx\n", 781 pr_info("MMCR2: %016lx EBBHR: %016lx\n",
782 mfspr(SPRN_MMCR2), mfspr(SPRN_EBBHR)); 782 mfspr(SPRN_MMCR2), mfspr(SPRN_EBBHR));
783 pr_info("EBBRR: %016lx BESCR: %016lx\n", 783 pr_info("EBBRR: %016lx BESCR: %016lx\n",
@@ -996,7 +996,22 @@ static void power_pmu_read(struct perf_event *event)
996 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev); 996 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
997 997
998 local64_add(delta, &event->count); 998 local64_add(delta, &event->count);
999 local64_sub(delta, &event->hw.period_left); 999
1000 /*
1001 * A number of places program the PMC with (0x80000000 - period_left).
1002 * We never want period_left to be less than 1 because we will program
1003 * the PMC with a value >= 0x800000000 and an edge detected PMC will
1004 * roll around to 0 before taking an exception. We have seen this
1005 * on POWER8.
1006 *
1007 * To fix this, clamp the minimum value of period_left to 1.
1008 */
1009 do {
1010 prev = local64_read(&event->hw.period_left);
1011 val = prev - delta;
1012 if (val < 1)
1013 val = 1;
1014 } while (local64_cmpxchg(&event->hw.period_left, prev, val) != prev);
1000} 1015}
1001 1016
1002/* 1017/*
@@ -1300,6 +1315,9 @@ static void power_pmu_enable(struct pmu *pmu)
1300 1315
1301 write_mmcr0(cpuhw, mmcr0); 1316 write_mmcr0(cpuhw, mmcr0);
1302 1317
1318 if (ppmu->flags & PPMU_ARCH_207S)
1319 mtspr(SPRN_MMCR2, 0);
1320
1303 /* 1321 /*
1304 * Enable instruction sampling if necessary 1322 * Enable instruction sampling if necessary
1305 */ 1323 */
@@ -1696,7 +1714,7 @@ static int power_pmu_event_init(struct perf_event *event)
1696 1714
1697 if (has_branch_stack(event)) { 1715 if (has_branch_stack(event)) {
1698 /* PMU has BHRB enabled */ 1716 /* PMU has BHRB enabled */
1699 if (!(ppmu->flags & PPMU_BHRB)) 1717 if (!(ppmu->flags & PPMU_ARCH_207S))
1700 return -EOPNOTSUPP; 1718 return -EOPNOTSUPP;
1701 } 1719 }
1702 1720
diff --git a/arch/powerpc/perf/power8-pmu.c b/arch/powerpc/perf/power8-pmu.c
index fe2763b6e039..639cd9156585 100644
--- a/arch/powerpc/perf/power8-pmu.c
+++ b/arch/powerpc/perf/power8-pmu.c
@@ -792,7 +792,7 @@ static struct power_pmu power8_pmu = {
792 .get_constraint = power8_get_constraint, 792 .get_constraint = power8_get_constraint,
793 .get_alternatives = power8_get_alternatives, 793 .get_alternatives = power8_get_alternatives,
794 .disable_pmc = power8_disable_pmc, 794 .disable_pmc = power8_disable_pmc,
795 .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_BHRB | PPMU_EBB, 795 .flags = PPMU_HAS_SSLOT | PPMU_HAS_SIER | PPMU_ARCH_207S,
796 .n_generic = ARRAY_SIZE(power8_generic_events), 796 .n_generic = ARRAY_SIZE(power8_generic_events),
797 .generic_events = power8_generic_events, 797 .generic_events = power8_generic_events,
798 .cache_events = &power8_cache_events, 798 .cache_events = &power8_cache_events,
diff --git a/arch/powerpc/platforms/cell/cbe_thermal.c b/arch/powerpc/platforms/cell/cbe_thermal.c
index 94560db788bf..2c15ff094483 100644
--- a/arch/powerpc/platforms/cell/cbe_thermal.c
+++ b/arch/powerpc/platforms/cell/cbe_thermal.c
@@ -125,7 +125,7 @@ static ssize_t show_throttle(struct cbe_pmd_regs __iomem *pmd_regs, char *buf, i
125static ssize_t store_throttle(struct cbe_pmd_regs __iomem *pmd_regs, const char *buf, size_t size, int pos) 125static ssize_t store_throttle(struct cbe_pmd_regs __iomem *pmd_regs, const char *buf, size_t size, int pos)
126{ 126{
127 u64 reg_value; 127 u64 reg_value;
128 int temp; 128 unsigned int temp;
129 u64 new_value; 129 u64 new_value;
130 int ret; 130 int ret;
131 131
diff --git a/arch/powerpc/platforms/cell/spu_syscalls.c b/arch/powerpc/platforms/cell/spu_syscalls.c
index 38e0a1a5cec3..5e6e0bad6db6 100644
--- a/arch/powerpc/platforms/cell/spu_syscalls.c
+++ b/arch/powerpc/platforms/cell/spu_syscalls.c
@@ -111,6 +111,7 @@ asmlinkage long sys_spu_run(int fd, __u32 __user *unpc, __u32 __user *ustatus)
111 return ret; 111 return ret;
112} 112}
113 113
114#ifdef CONFIG_COREDUMP
114int elf_coredump_extra_notes_size(void) 115int elf_coredump_extra_notes_size(void)
115{ 116{
116 struct spufs_calls *calls; 117 struct spufs_calls *calls;
@@ -142,6 +143,7 @@ int elf_coredump_extra_notes_write(struct coredump_params *cprm)
142 143
143 return ret; 144 return ret;
144} 145}
146#endif
145 147
146void notify_spus_active(void) 148void notify_spus_active(void)
147{ 149{
diff --git a/arch/powerpc/platforms/cell/spufs/Makefile b/arch/powerpc/platforms/cell/spufs/Makefile
index b9d5d678aa44..52a7d2596d30 100644
--- a/arch/powerpc/platforms/cell/spufs/Makefile
+++ b/arch/powerpc/platforms/cell/spufs/Makefile
@@ -1,8 +1,9 @@
1 1
2obj-$(CONFIG_SPU_FS) += spufs.o 2obj-$(CONFIG_SPU_FS) += spufs.o
3spufs-y += inode.o file.o context.o syscalls.o coredump.o 3spufs-y += inode.o file.o context.o syscalls.o
4spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o 4spufs-y += sched.o backing_ops.o hw_ops.o run.o gang.o
5spufs-y += switch.o fault.o lscsa_alloc.o 5spufs-y += switch.o fault.o lscsa_alloc.o
6spufs-$(CONFIG_COREDUMP) += coredump.o
6 7
7# magic for the trace events 8# magic for the trace events
8CFLAGS_sched.o := -I$(src) 9CFLAGS_sched.o := -I$(src)
diff --git a/arch/powerpc/platforms/cell/spufs/syscalls.c b/arch/powerpc/platforms/cell/spufs/syscalls.c
index b045fdda4845..a87200a535fa 100644
--- a/arch/powerpc/platforms/cell/spufs/syscalls.c
+++ b/arch/powerpc/platforms/cell/spufs/syscalls.c
@@ -79,8 +79,10 @@ static long do_spu_create(const char __user *pathname, unsigned int flags,
79struct spufs_calls spufs_calls = { 79struct spufs_calls spufs_calls = {
80 .create_thread = do_spu_create, 80 .create_thread = do_spu_create,
81 .spu_run = do_spu_run, 81 .spu_run = do_spu_run,
82 .coredump_extra_notes_size = spufs_coredump_extra_notes_size,
83 .coredump_extra_notes_write = spufs_coredump_extra_notes_write,
84 .notify_spus_active = do_notify_spus_active, 82 .notify_spus_active = do_notify_spus_active,
85 .owner = THIS_MODULE, 83 .owner = THIS_MODULE,
84#ifdef CONFIG_COREDUMP
85 .coredump_extra_notes_size = spufs_coredump_extra_notes_size,
86 .coredump_extra_notes_write = spufs_coredump_extra_notes_write,
87#endif
86}; 88};
diff --git a/arch/powerpc/platforms/powernv/Makefile b/arch/powerpc/platforms/powernv/Makefile
index d55891f89a2c..4ad227d04c1a 100644
--- a/arch/powerpc/platforms/powernv/Makefile
+++ b/arch/powerpc/platforms/powernv/Makefile
@@ -1,4 +1,4 @@
1obj-y += setup.o opal-takeover.o opal-wrappers.o opal.o opal-async.o 1obj-y += setup.o opal-wrappers.o opal.o opal-async.o
2obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o 2obj-y += opal-rtc.o opal-nvram.o opal-lpc.o opal-flash.o
3obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o 3obj-y += rng.o opal-elog.o opal-dump.o opal-sysparam.o opal-sensor.o
4obj-y += opal-msglog.o 4obj-y += opal-msglog.o
diff --git a/arch/powerpc/platforms/powernv/opal-takeover.S b/arch/powerpc/platforms/powernv/opal-takeover.S
deleted file mode 100644
index 11a3169ee583..000000000000
--- a/arch/powerpc/platforms/powernv/opal-takeover.S
+++ /dev/null
@@ -1,140 +0,0 @@
1/*
2 * PowerNV OPAL takeover assembly code, for use by prom_init.c
3 *
4 * Copyright 2011 IBM Corp.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <asm/ppc_asm.h>
13#include <asm/hvcall.h>
14#include <asm/asm-offsets.h>
15#include <asm/opal.h>
16
17#define H_HAL_TAKEOVER 0x5124
18#define H_HAL_TAKEOVER_QUERY_MAGIC -1
19
20 .text
21_GLOBAL(opal_query_takeover)
22 mfcr r0
23 stw r0,8(r1)
24 stdu r1,-STACKFRAMESIZE(r1)
25 std r3,STK_PARAM(R3)(r1)
26 std r4,STK_PARAM(R4)(r1)
27 li r3,H_HAL_TAKEOVER
28 li r4,H_HAL_TAKEOVER_QUERY_MAGIC
29 HVSC
30 addi r1,r1,STACKFRAMESIZE
31 ld r10,STK_PARAM(R3)(r1)
32 std r4,0(r10)
33 ld r10,STK_PARAM(R4)(r1)
34 std r5,0(r10)
35 lwz r0,8(r1)
36 mtcrf 0xff,r0
37 blr
38
39_GLOBAL(opal_do_takeover)
40 mfcr r0
41 stw r0,8(r1)
42 mflr r0
43 std r0,16(r1)
44 bl __opal_do_takeover
45 ld r0,16(r1)
46 mtlr r0
47 lwz r0,8(r1)
48 mtcrf 0xff,r0
49 blr
50
51__opal_do_takeover:
52 ld r4,0(r3)
53 ld r5,0x8(r3)
54 ld r6,0x10(r3)
55 ld r7,0x18(r3)
56 ld r8,0x20(r3)
57 ld r9,0x28(r3)
58 ld r10,0x30(r3)
59 ld r11,0x38(r3)
60 li r3,H_HAL_TAKEOVER
61 HVSC
62 blr
63
64 .globl opal_secondary_entry
65opal_secondary_entry:
66 mr r31,r3
67 mfmsr r11
68 li r12,(MSR_SF | MSR_ISF)@highest
69 sldi r12,r12,48
70 or r11,r11,r12
71 mtmsrd r11
72 isync
73 mfspr r4,SPRN_PIR
74 std r4,0(r3)
751: HMT_LOW
76 ld r4,8(r3)
77 cmpli cr0,r4,0
78 beq 1b
79 HMT_MEDIUM
801: addi r3,r31,16
81 bl __opal_do_takeover
82 b 1b
83
84_GLOBAL(opal_enter_rtas)
85 mflr r0
86 std r0,16(r1)
87 stdu r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
88
89 /* Because PROM is running in 32b mode, it clobbers the high order half
90 * of all registers that it saves. We therefore save those registers
91 * PROM might touch to the stack. (r0, r3-r13 are caller saved)
92 */
93 SAVE_GPR(2, r1)
94 SAVE_GPR(13, r1)
95 SAVE_8GPRS(14, r1)
96 SAVE_10GPRS(22, r1)
97 mfcr r10
98 mfmsr r11
99 std r10,_CCR(r1)
100 std r11,_MSR(r1)
101
102 /* Get the PROM entrypoint */
103 mtlr r5
104
105 /* Switch MSR to 32 bits mode
106 */
107 li r12,1
108 rldicr r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
109 andc r11,r11,r12
110 li r12,1
111 rldicr r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
112 andc r11,r11,r12
113 mtmsrd r11
114 isync
115
116 /* Enter RTAS here... */
117 blrl
118
119 /* Just make sure that r1 top 32 bits didn't get
120 * corrupt by OF
121 */
122 rldicl r1,r1,0,32
123
124 /* Restore the MSR (back to 64 bits) */
125 ld r0,_MSR(r1)
126 MTMSRD(r0)
127 isync
128
129 /* Restore other registers */
130 REST_GPR(2, r1)
131 REST_GPR(13, r1)
132 REST_8GPRS(14, r1)
133 REST_10GPRS(22, r1)
134 ld r4,_CCR(r1)
135 mtcr r4
136
137 addi r1,r1,PROM_FRAME_SIZE
138 ld r0,16(r1)
139 mtlr r0
140 blr
diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c
index 62c47bb76517..9e5353ff6d1b 100644
--- a/arch/powerpc/sysdev/dart_iommu.c
+++ b/arch/powerpc/sysdev/dart_iommu.c
@@ -476,6 +476,11 @@ void __init alloc_dart_table(void)
476 */ 476 */
477 dart_tablebase = (unsigned long) 477 dart_tablebase = (unsigned long)
478 __va(memblock_alloc_base(1UL<<24, 1UL<<24, 0x80000000L)); 478 __va(memblock_alloc_base(1UL<<24, 1UL<<24, 0x80000000L));
479 /*
480 * The DART space is later unmapped from the kernel linear mapping and
481 * accessing dart_tablebase during kmemleak scanning will fault.
482 */
483 kmemleak_no_scan((void *)dart_tablebase);
479 484
480 printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase); 485 printk(KERN_INFO "DART table allocated at: %lx\n", dart_tablebase);
481} 486}
diff --git a/arch/s390/include/uapi/asm/Kbuild b/arch/s390/include/uapi/asm/Kbuild
index 6a9a9eb645f5..736637363d31 100644
--- a/arch/s390/include/uapi/asm/Kbuild
+++ b/arch/s390/include/uapi/asm/Kbuild
@@ -36,6 +36,7 @@ header-y += signal.h
36header-y += socket.h 36header-y += socket.h
37header-y += sockios.h 37header-y += sockios.h
38header-y += sclp_ctl.h 38header-y += sclp_ctl.h
39header-y += sie.h
39header-y += stat.h 40header-y += stat.h
40header-y += statfs.h 41header-y += statfs.h
41header-y += swab.h 42header-y += swab.h
diff --git a/arch/s390/include/uapi/asm/sie.h b/arch/s390/include/uapi/asm/sie.h
index 3d97f610198d..5d9cc19462c4 100644
--- a/arch/s390/include/uapi/asm/sie.h
+++ b/arch/s390/include/uapi/asm/sie.h
@@ -1,8 +1,6 @@
1#ifndef _UAPI_ASM_S390_SIE_H 1#ifndef _UAPI_ASM_S390_SIE_H
2#define _UAPI_ASM_S390_SIE_H 2#define _UAPI_ASM_S390_SIE_H
3 3
4#include <asm/sigp.h>
5
6#define diagnose_codes \ 4#define diagnose_codes \
7 { 0x10, "DIAG (0x10) release pages" }, \ 5 { 0x10, "DIAG (0x10) release pages" }, \
8 { 0x44, "DIAG (0x44) time slice end" }, \ 6 { 0x44, "DIAG (0x44) time slice end" }, \
@@ -13,18 +11,18 @@
13 { 0x500, "DIAG (0x500) KVM virtio functions" }, \ 11 { 0x500, "DIAG (0x500) KVM virtio functions" }, \
14 { 0x501, "DIAG (0x501) KVM breakpoint" } 12 { 0x501, "DIAG (0x501) KVM breakpoint" }
15 13
16#define sigp_order_codes \ 14#define sigp_order_codes \
17 { SIGP_SENSE, "SIGP sense" }, \ 15 { 0x01, "SIGP sense" }, \
18 { SIGP_EXTERNAL_CALL, "SIGP external call" }, \ 16 { 0x02, "SIGP external call" }, \
19 { SIGP_EMERGENCY_SIGNAL, "SIGP emergency signal" }, \ 17 { 0x03, "SIGP emergency signal" }, \
20 { SIGP_STOP, "SIGP stop" }, \ 18 { 0x05, "SIGP stop" }, \
21 { SIGP_STOP_AND_STORE_STATUS, "SIGP stop and store status" }, \ 19 { 0x06, "SIGP restart" }, \
22 { SIGP_SET_ARCHITECTURE, "SIGP set architecture" }, \ 20 { 0x09, "SIGP stop and store status" }, \
23 { SIGP_SET_PREFIX, "SIGP set prefix" }, \ 21 { 0x0b, "SIGP initial cpu reset" }, \
24 { SIGP_SENSE_RUNNING, "SIGP sense running" }, \ 22 { 0x0d, "SIGP set prefix" }, \
25 { SIGP_RESTART, "SIGP restart" }, \ 23 { 0x0e, "SIGP store status at address" }, \
26 { SIGP_INITIAL_CPU_RESET, "SIGP initial cpu reset" }, \ 24 { 0x12, "SIGP set architecture" }, \
27 { SIGP_STORE_STATUS_AT_ADDRESS, "SIGP store status at address" } 25 { 0x15, "SIGP sense running" }
28 26
29#define icpt_prog_codes \ 27#define icpt_prog_codes \
30 { 0x0001, "Prog Operation" }, \ 28 { 0x0001, "Prog Operation" }, \
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 29f2e988c56a..407c87d9879a 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -78,6 +78,7 @@ config SPARC64
78 select HAVE_C_RECORDMCOUNT 78 select HAVE_C_RECORDMCOUNT
79 select NO_BOOTMEM 79 select NO_BOOTMEM
80 select HAVE_ARCH_AUDITSYSCALL 80 select HAVE_ARCH_AUDITSYSCALL
81 select ARCH_SUPPORTS_ATOMIC_RMW
81 82
82config ARCH_DEFCONFIG 83config ARCH_DEFCONFIG
83 string 84 string
diff --git a/arch/sparc/include/asm/irq_64.h b/arch/sparc/include/asm/irq_64.h
index 375cffcf7dbd..91d219381306 100644
--- a/arch/sparc/include/asm/irq_64.h
+++ b/arch/sparc/include/asm/irq_64.h
@@ -89,7 +89,7 @@ static inline unsigned long get_softint(void)
89 return retval; 89 return retval;
90} 90}
91 91
92void arch_trigger_all_cpu_backtrace(void); 92void arch_trigger_all_cpu_backtrace(bool);
93#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace 93#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
94 94
95extern void *hardirq_stack[NR_CPUS]; 95extern void *hardirq_stack[NR_CPUS];
diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
index b2988f25e230..027e09986194 100644
--- a/arch/sparc/kernel/process_64.c
+++ b/arch/sparc/kernel/process_64.c
@@ -239,7 +239,7 @@ static void __global_reg_poll(struct global_reg_snapshot *gp)
239 } 239 }
240} 240}
241 241
242void arch_trigger_all_cpu_backtrace(void) 242void arch_trigger_all_cpu_backtrace(bool include_self)
243{ 243{
244 struct thread_info *tp = current_thread_info(); 244 struct thread_info *tp = current_thread_info();
245 struct pt_regs *regs = get_irq_regs(); 245 struct pt_regs *regs = get_irq_regs();
@@ -251,16 +251,22 @@ void arch_trigger_all_cpu_backtrace(void)
251 251
252 spin_lock_irqsave(&global_cpu_snapshot_lock, flags); 252 spin_lock_irqsave(&global_cpu_snapshot_lock, flags);
253 253
254 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
255
256 this_cpu = raw_smp_processor_id(); 254 this_cpu = raw_smp_processor_id();
257 255
258 __global_reg_self(tp, regs, this_cpu); 256 memset(global_cpu_snapshot, 0, sizeof(global_cpu_snapshot));
257
258 if (include_self)
259 __global_reg_self(tp, regs, this_cpu);
259 260
260 smp_fetch_global_regs(); 261 smp_fetch_global_regs();
261 262
262 for_each_online_cpu(cpu) { 263 for_each_online_cpu(cpu) {
263 struct global_reg_snapshot *gp = &global_cpu_snapshot[cpu].reg; 264 struct global_reg_snapshot *gp;
265
266 if (!include_self && cpu == this_cpu)
267 continue;
268
269 gp = &global_cpu_snapshot[cpu].reg;
264 270
265 __global_reg_poll(gp); 271 __global_reg_poll(gp);
266 272
@@ -292,7 +298,7 @@ void arch_trigger_all_cpu_backtrace(void)
292 298
293static void sysrq_handle_globreg(int key) 299static void sysrq_handle_globreg(int key)
294{ 300{
295 arch_trigger_all_cpu_backtrace(); 301 arch_trigger_all_cpu_backtrace(true);
296} 302}
297 303
298static struct sysrq_key_op sparc_globalreg_op = { 304static struct sysrq_key_op sparc_globalreg_op = {
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 9472079471bb..f1b3eb14b855 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -12,6 +12,7 @@
12#include <mem_user.h> 12#include <mem_user.h>
13#include <os.h> 13#include <os.h>
14#include <skas.h> 14#include <skas.h>
15#include <kern_util.h>
15 16
16struct host_vm_change { 17struct host_vm_change {
17 struct host_vm_op { 18 struct host_vm_op {
@@ -124,6 +125,9 @@ static int add_munmap(unsigned long addr, unsigned long len,
124 struct host_vm_op *last; 125 struct host_vm_op *last;
125 int ret = 0; 126 int ret = 0;
126 127
128 if ((addr >= STUB_START) && (addr < STUB_END))
129 return -EINVAL;
130
127 if (hvc->index != 0) { 131 if (hvc->index != 0) {
128 last = &hvc->ops[hvc->index - 1]; 132 last = &hvc->ops[hvc->index - 1];
129 if ((last->type == MUNMAP) && 133 if ((last->type == MUNMAP) &&
@@ -283,8 +287,11 @@ void fix_range_common(struct mm_struct *mm, unsigned long start_addr,
283 /* This is not an else because ret is modified above */ 287 /* This is not an else because ret is modified above */
284 if (ret) { 288 if (ret) {
285 printk(KERN_ERR "fix_range_common: failed, killing current " 289 printk(KERN_ERR "fix_range_common: failed, killing current "
286 "process\n"); 290 "process: %d\n", task_tgid_vnr(current));
291 /* We are under mmap_sem, release it such that current can terminate */
292 up_write(&current->mm->mmap_sem);
287 force_sig(SIGKILL, current); 293 force_sig(SIGKILL, current);
294 do_signal();
288 } 295 }
289} 296}
290 297
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 974b87474a99..5678c3571e7c 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -206,7 +206,7 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
206 int is_write = FAULT_WRITE(fi); 206 int is_write = FAULT_WRITE(fi);
207 unsigned long address = FAULT_ADDRESS(fi); 207 unsigned long address = FAULT_ADDRESS(fi);
208 208
209 if (regs) 209 if (!is_user && regs)
210 current->thread.segv_regs = container_of(regs, struct pt_regs, regs); 210 current->thread.segv_regs = container_of(regs, struct pt_regs, regs);
211 211
212 if (!is_user && (address >= start_vm) && (address < end_vm)) { 212 if (!is_user && (address >= start_vm) && (address < end_vm)) {
diff --git a/arch/um/os-Linux/skas/process.c b/arch/um/os-Linux/skas/process.c
index d531879a4617..908579f2b0ab 100644
--- a/arch/um/os-Linux/skas/process.c
+++ b/arch/um/os-Linux/skas/process.c
@@ -54,7 +54,7 @@ static int ptrace_dump_regs(int pid)
54 54
55void wait_stub_done(int pid) 55void wait_stub_done(int pid)
56{ 56{
57 int n, status, err, bad_stop = 0; 57 int n, status, err;
58 58
59 while (1) { 59 while (1) {
60 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL)); 60 CATCH_EINTR(n = waitpid(pid, &status, WUNTRACED | __WALL));
@@ -74,8 +74,6 @@ void wait_stub_done(int pid)
74 74
75 if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0) 75 if (((1 << WSTOPSIG(status)) & STUB_DONE_MASK) != 0)
76 return; 76 return;
77 else
78 bad_stop = 1;
79 77
80bad_wait: 78bad_wait:
81 err = ptrace_dump_regs(pid); 79 err = ptrace_dump_regs(pid);
@@ -85,10 +83,7 @@ bad_wait:
85 printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, " 83 printk(UM_KERN_ERR "wait_stub_done : failed to wait for SIGTRAP, "
86 "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno, 84 "pid = %d, n = %d, errno = %d, status = 0x%x\n", pid, n, errno,
87 status); 85 status);
88 if (bad_stop) 86 fatal_sigsegv();
89 kill(pid, SIGKILL);
90 else
91 fatal_sigsegv();
92} 87}
93 88
94extern unsigned long current_stub_stack(void); 89extern unsigned long current_stub_stack(void);
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index a8f749ef0fdc..43873442dee1 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -131,6 +131,9 @@ config X86
131 select HAVE_CC_STACKPROTECTOR 131 select HAVE_CC_STACKPROTECTOR
132 select GENERIC_CPU_AUTOPROBE 132 select GENERIC_CPU_AUTOPROBE
133 select HAVE_ARCH_AUDITSYSCALL 133 select HAVE_ARCH_AUDITSYSCALL
134 select ARCH_SUPPORTS_ATOMIC_RMW
135 select HAVE_ACPI_APEI if ACPI
136 select HAVE_ACPI_APEI_NMI if ACPI
134 137
135config INSTRUCTION_DECODER 138config INSTRUCTION_DECODER
136 def_bool y 139 def_bool y
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 84c223479e3c..7a6d43a554d7 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -91,10 +91,9 @@ bs_die:
91 91
92 .section ".bsdata", "a" 92 .section ".bsdata", "a"
93bugger_off_msg: 93bugger_off_msg:
94 .ascii "Direct floppy boot is not supported. " 94 .ascii "Use a boot loader.\r\n"
95 .ascii "Use a boot loader program instead.\r\n"
96 .ascii "\n" 95 .ascii "\n"
97 .ascii "Remove disk and press any key to reboot ...\r\n" 96 .ascii "Remove disk and press any key to reboot...\r\n"
98 .byte 0 97 .byte 0
99 98
100#ifdef CONFIG_EFI_STUB 99#ifdef CONFIG_EFI_STUB
@@ -108,7 +107,7 @@ coff_header:
108#else 107#else
109 .word 0x8664 # x86-64 108 .word 0x8664 # x86-64
110#endif 109#endif
111 .word 3 # nr_sections 110 .word 4 # nr_sections
112 .long 0 # TimeDateStamp 111 .long 0 # TimeDateStamp
113 .long 0 # PointerToSymbolTable 112 .long 0 # PointerToSymbolTable
114 .long 1 # NumberOfSymbols 113 .long 1 # NumberOfSymbols
@@ -250,6 +249,25 @@ section_table:
250 .word 0 # NumberOfLineNumbers 249 .word 0 # NumberOfLineNumbers
251 .long 0x60500020 # Characteristics (section flags) 250 .long 0x60500020 # Characteristics (section flags)
252 251
252 #
253 # The offset & size fields are filled in by build.c.
254 #
255 .ascii ".bss"
256 .byte 0
257 .byte 0
258 .byte 0
259 .byte 0
260 .long 0
261 .long 0x0
262 .long 0 # Size of initialized data
263 # on disk
264 .long 0x0
265 .long 0 # PointerToRelocations
266 .long 0 # PointerToLineNumbers
267 .word 0 # NumberOfRelocations
268 .word 0 # NumberOfLineNumbers
269 .long 0xc8000080 # Characteristics (section flags)
270
253#endif /* CONFIG_EFI_STUB */ 271#endif /* CONFIG_EFI_STUB */
254 272
255 # Kernel attributes; used by setup. This is part 1 of the 273 # Kernel attributes; used by setup. This is part 1 of the
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index 1a2f2121cada..a7661c430cd9 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -143,7 +143,7 @@ static void usage(void)
143 143
144#ifdef CONFIG_EFI_STUB 144#ifdef CONFIG_EFI_STUB
145 145
146static void update_pecoff_section_header(char *section_name, u32 offset, u32 size) 146static void update_pecoff_section_header_fields(char *section_name, u32 vma, u32 size, u32 datasz, u32 offset)
147{ 147{
148 unsigned int pe_header; 148 unsigned int pe_header;
149 unsigned short num_sections; 149 unsigned short num_sections;
@@ -164,10 +164,10 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
164 put_unaligned_le32(size, section + 0x8); 164 put_unaligned_le32(size, section + 0x8);
165 165
166 /* section header vma field */ 166 /* section header vma field */
167 put_unaligned_le32(offset, section + 0xc); 167 put_unaligned_le32(vma, section + 0xc);
168 168
169 /* section header 'size of initialised data' field */ 169 /* section header 'size of initialised data' field */
170 put_unaligned_le32(size, section + 0x10); 170 put_unaligned_le32(datasz, section + 0x10);
171 171
172 /* section header 'file offset' field */ 172 /* section header 'file offset' field */
173 put_unaligned_le32(offset, section + 0x14); 173 put_unaligned_le32(offset, section + 0x14);
@@ -179,6 +179,11 @@ static void update_pecoff_section_header(char *section_name, u32 offset, u32 siz
179 } 179 }
180} 180}
181 181
182static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
183{
184 update_pecoff_section_header_fields(section_name, offset, size, size, offset);
185}
186
182static void update_pecoff_setup_and_reloc(unsigned int size) 187static void update_pecoff_setup_and_reloc(unsigned int size)
183{ 188{
184 u32 setup_offset = 0x200; 189 u32 setup_offset = 0x200;
@@ -203,9 +208,6 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
203 208
204 pe_header = get_unaligned_le32(&buf[0x3c]); 209 pe_header = get_unaligned_le32(&buf[0x3c]);
205 210
206 /* Size of image */
207 put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
208
209 /* 211 /*
210 * Size of code: Subtract the size of the first sector (512 bytes) 212 * Size of code: Subtract the size of the first sector (512 bytes)
211 * which includes the header. 213 * which includes the header.
@@ -220,6 +222,22 @@ static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
220 update_pecoff_section_header(".text", text_start, text_sz); 222 update_pecoff_section_header(".text", text_start, text_sz);
221} 223}
222 224
225static void update_pecoff_bss(unsigned int file_sz, unsigned int init_sz)
226{
227 unsigned int pe_header;
228 unsigned int bss_sz = init_sz - file_sz;
229
230 pe_header = get_unaligned_le32(&buf[0x3c]);
231
232 /* Size of uninitialized data */
233 put_unaligned_le32(bss_sz, &buf[pe_header + 0x24]);
234
235 /* Size of image */
236 put_unaligned_le32(init_sz, &buf[pe_header + 0x50]);
237
238 update_pecoff_section_header_fields(".bss", file_sz, bss_sz, 0, 0);
239}
240
223static int reserve_pecoff_reloc_section(int c) 241static int reserve_pecoff_reloc_section(int c)
224{ 242{
225 /* Reserve 0x20 bytes for .reloc section */ 243 /* Reserve 0x20 bytes for .reloc section */
@@ -259,6 +277,8 @@ static void efi_stub_entry_update(void)
259static inline void update_pecoff_setup_and_reloc(unsigned int size) {} 277static inline void update_pecoff_setup_and_reloc(unsigned int size) {}
260static inline void update_pecoff_text(unsigned int text_start, 278static inline void update_pecoff_text(unsigned int text_start,
261 unsigned int file_sz) {} 279 unsigned int file_sz) {}
280static inline void update_pecoff_bss(unsigned int file_sz,
281 unsigned int init_sz) {}
262static inline void efi_stub_defaults(void) {} 282static inline void efi_stub_defaults(void) {}
263static inline void efi_stub_entry_update(void) {} 283static inline void efi_stub_entry_update(void) {}
264 284
@@ -310,7 +330,7 @@ static void parse_zoffset(char *fname)
310 330
311int main(int argc, char ** argv) 331int main(int argc, char ** argv)
312{ 332{
313 unsigned int i, sz, setup_sectors; 333 unsigned int i, sz, setup_sectors, init_sz;
314 int c; 334 int c;
315 u32 sys_size; 335 u32 sys_size;
316 struct stat sb; 336 struct stat sb;
@@ -376,7 +396,9 @@ int main(int argc, char ** argv)
376 buf[0x1f1] = setup_sectors-1; 396 buf[0x1f1] = setup_sectors-1;
377 put_unaligned_le32(sys_size, &buf[0x1f4]); 397 put_unaligned_le32(sys_size, &buf[0x1f4]);
378 398
379 update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz)); 399 update_pecoff_text(setup_sectors * 512, i + (sys_size * 16));
400 init_sz = get_unaligned_le32(&buf[0x260]);
401 update_pecoff_bss(i + (sys_size * 16), init_sz);
380 402
381 efi_stub_entry_update(); 403 efi_stub_entry_update();
382 404
diff --git a/arch/x86/crypto/sha512_ssse3_glue.c b/arch/x86/crypto/sha512_ssse3_glue.c
index f30cd10293f0..8626b03e83b7 100644
--- a/arch/x86/crypto/sha512_ssse3_glue.c
+++ b/arch/x86/crypto/sha512_ssse3_glue.c
@@ -141,7 +141,7 @@ static int sha512_ssse3_final(struct shash_desc *desc, u8 *out)
141 141
142 /* save number of bits */ 142 /* save number of bits */
143 bits[1] = cpu_to_be64(sctx->count[0] << 3); 143 bits[1] = cpu_to_be64(sctx->count[0] << 3);
144 bits[0] = cpu_to_be64(sctx->count[1] << 3) | sctx->count[0] >> 61; 144 bits[0] = cpu_to_be64(sctx->count[1] << 3 | sctx->count[0] >> 61);
145 145
146 /* Pad out to 112 mod 128 and append length */ 146 /* Pad out to 112 mod 128 and append length */
147 index = sctx->count[0] & 0x7f; 147 index = sctx->count[0] & 0x7f;
diff --git a/arch/x86/include/asm/irq.h b/arch/x86/include/asm/irq.h
index cb6cfcd034cf..a80cbb88ea91 100644
--- a/arch/x86/include/asm/irq.h
+++ b/arch/x86/include/asm/irq.h
@@ -43,7 +43,7 @@ extern int vector_used_by_percpu_irq(unsigned int vector);
43extern void init_ISA_irqs(void); 43extern void init_ISA_irqs(void);
44 44
45#ifdef CONFIG_X86_LOCAL_APIC 45#ifdef CONFIG_X86_LOCAL_APIC
46void arch_trigger_all_cpu_backtrace(void); 46void arch_trigger_all_cpu_backtrace(bool);
47#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace 47#define arch_trigger_all_cpu_backtrace arch_trigger_all_cpu_backtrace
48#endif 48#endif
49 49
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 49314155b66c..49205d01b9ad 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -95,7 +95,7 @@ static inline gfn_t gfn_to_index(gfn_t gfn, gfn_t base_gfn, int level)
95#define KVM_REFILL_PAGES 25 95#define KVM_REFILL_PAGES 25
96#define KVM_MAX_CPUID_ENTRIES 80 96#define KVM_MAX_CPUID_ENTRIES 80
97#define KVM_NR_FIXED_MTRR_REGION 88 97#define KVM_NR_FIXED_MTRR_REGION 88
98#define KVM_NR_VAR_MTRR 8 98#define KVM_NR_VAR_MTRR 10
99 99
100#define ASYNC_PF_PER_VCPU 64 100#define ASYNC_PF_PER_VCPU 64
101 101
@@ -461,7 +461,7 @@ struct kvm_vcpu_arch {
461 bool nmi_injected; /* Trying to inject an NMI this entry */ 461 bool nmi_injected; /* Trying to inject an NMI this entry */
462 462
463 struct mtrr_state_type mtrr_state; 463 struct mtrr_state_type mtrr_state;
464 u32 pat; 464 u64 pat;
465 465
466 unsigned switch_db_regs; 466 unsigned switch_db_regs;
467 unsigned long db[KVM_NR_DB_REGS]; 467 unsigned long db[KVM_NR_DB_REGS];
diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
index 14fd6fd75a19..6205f0c434db 100644
--- a/arch/x86/include/asm/ptrace.h
+++ b/arch/x86/include/asm/ptrace.h
@@ -231,6 +231,22 @@ static inline unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
231 231
232#define ARCH_HAS_USER_SINGLE_STEP_INFO 232#define ARCH_HAS_USER_SINGLE_STEP_INFO
233 233
234/*
235 * When hitting ptrace_stop(), we cannot return using SYSRET because
236 * that does not restore the full CPU state, only a minimal set. The
237 * ptracer can change arbitrary register values, which is usually okay
238 * because the usual ptrace stops run off the signal delivery path which
239 * forces IRET; however, ptrace_event() stops happen in arbitrary places
240 * in the kernel and don't force IRET path.
241 *
242 * So force IRET path after a ptrace stop.
243 */
244#define arch_ptrace_stop_needed(code, info) \
245({ \
246 set_thread_flag(TIF_NOTIFY_RESUME); \
247 false; \
248})
249
234struct user_desc; 250struct user_desc;
235extern int do_get_thread_area(struct task_struct *p, int idx, 251extern int do_get_thread_area(struct task_struct *p, int idx,
236 struct user_desc __user *info); 252 struct user_desc __user *info);
diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile
index 163b22581472..3242e591fa82 100644
--- a/arch/x86/kernel/acpi/Makefile
+++ b/arch/x86/kernel/acpi/Makefile
@@ -1,5 +1,6 @@
1obj-$(CONFIG_ACPI) += boot.o 1obj-$(CONFIG_ACPI) += boot.o
2obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_$(BITS).o 2obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_$(BITS).o
3obj-$(CONFIG_ACPI_APEI) += apei.o
3 4
4ifneq ($(CONFIG_ACPI_PROCESSOR),) 5ifneq ($(CONFIG_ACPI_PROCESSOR),)
5obj-y += cstate.o 6obj-y += cstate.o
diff --git a/arch/x86/kernel/acpi/apei.c b/arch/x86/kernel/acpi/apei.c
new file mode 100644
index 000000000000..c280df6b2aa2
--- /dev/null
+++ b/arch/x86/kernel/acpi/apei.c
@@ -0,0 +1,62 @@
1/*
2 * Arch-specific APEI-related functions.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15#include <acpi/apei.h>
16
17#include <asm/mce.h>
18#include <asm/tlbflush.h>
19
20int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data)
21{
22#ifdef CONFIG_X86_MCE
23 int i;
24 struct acpi_hest_ia_corrected *cmc;
25 struct acpi_hest_ia_error_bank *mc_bank;
26
27 if (hest_hdr->type != ACPI_HEST_TYPE_IA32_CORRECTED_CHECK)
28 return 0;
29
30 cmc = (struct acpi_hest_ia_corrected *)hest_hdr;
31 if (!cmc->enabled)
32 return 0;
33
34 /*
35 * We expect HEST to provide a list of MC banks that report errors
36 * in firmware first mode. Otherwise, return non-zero value to
37 * indicate that we are done parsing HEST.
38 */
39 if (!(cmc->flags & ACPI_HEST_FIRMWARE_FIRST) ||
40 !cmc->num_hardware_banks)
41 return 1;
42
43 pr_info("HEST: Enabling Firmware First mode for corrected errors.\n");
44
45 mc_bank = (struct acpi_hest_ia_error_bank *)(cmc + 1);
46 for (i = 0; i < cmc->num_hardware_banks; i++, mc_bank++)
47 mce_disable_bank(mc_bank->bank_number);
48#endif
49 return 1;
50}
51
52void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err)
53{
54#ifdef CONFIG_X86_MCE
55 apei_mce_report_mem_error(sev, mem_err);
56#endif
57}
58
59void arch_apei_flush_tlb_one(unsigned long addr)
60{
61 __flush_tlb_one(addr);
62}
diff --git a/arch/x86/kernel/apic/hw_nmi.c b/arch/x86/kernel/apic/hw_nmi.c
index c3fcb5de5083..6a1e71bde323 100644
--- a/arch/x86/kernel/apic/hw_nmi.c
+++ b/arch/x86/kernel/apic/hw_nmi.c
@@ -33,31 +33,41 @@ static DECLARE_BITMAP(backtrace_mask, NR_CPUS) __read_mostly;
33/* "in progress" flag of arch_trigger_all_cpu_backtrace */ 33/* "in progress" flag of arch_trigger_all_cpu_backtrace */
34static unsigned long backtrace_flag; 34static unsigned long backtrace_flag;
35 35
36void arch_trigger_all_cpu_backtrace(void) 36void arch_trigger_all_cpu_backtrace(bool include_self)
37{ 37{
38 int i; 38 int i;
39 int cpu = get_cpu();
39 40
40 if (test_and_set_bit(0, &backtrace_flag)) 41 if (test_and_set_bit(0, &backtrace_flag)) {
41 /* 42 /*
42 * If there is already a trigger_all_cpu_backtrace() in progress 43 * If there is already a trigger_all_cpu_backtrace() in progress
43 * (backtrace_flag == 1), don't output double cpu dump infos. 44 * (backtrace_flag == 1), don't output double cpu dump infos.
44 */ 45 */
46 put_cpu();
45 return; 47 return;
48 }
46 49
47 cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask); 50 cpumask_copy(to_cpumask(backtrace_mask), cpu_online_mask);
51 if (!include_self)
52 cpumask_clear_cpu(cpu, to_cpumask(backtrace_mask));
48 53
49 printk(KERN_INFO "sending NMI to all CPUs:\n"); 54 if (!cpumask_empty(to_cpumask(backtrace_mask))) {
50 apic->send_IPI_all(NMI_VECTOR); 55 pr_info("sending NMI to %s CPUs:\n",
56 (include_self ? "all" : "other"));
57 apic->send_IPI_mask(to_cpumask(backtrace_mask), NMI_VECTOR);
58 }
51 59
52 /* Wait for up to 10 seconds for all CPUs to do the backtrace */ 60 /* Wait for up to 10 seconds for all CPUs to do the backtrace */
53 for (i = 0; i < 10 * 1000; i++) { 61 for (i = 0; i < 10 * 1000; i++) {
54 if (cpumask_empty(to_cpumask(backtrace_mask))) 62 if (cpumask_empty(to_cpumask(backtrace_mask)))
55 break; 63 break;
56 mdelay(1); 64 mdelay(1);
65 touch_softlockup_watchdog();
57 } 66 }
58 67
59 clear_bit(0, &backtrace_flag); 68 clear_bit(0, &backtrace_flag);
60 smp_mb__after_atomic(); 69 smp_mb__after_atomic();
70 put_cpu();
61} 71}
62 72
63static int 73static int
diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
index f3a1f04ed4cb..584874451414 100644
--- a/arch/x86/kernel/apm_32.c
+++ b/arch/x86/kernel/apm_32.c
@@ -841,7 +841,6 @@ static int apm_do_idle(void)
841 u32 eax; 841 u32 eax;
842 u8 ret = 0; 842 u8 ret = 0;
843 int idled = 0; 843 int idled = 0;
844 int polling;
845 int err = 0; 844 int err = 0;
846 845
847 if (!need_resched()) { 846 if (!need_resched()) {
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index adb02aa62af5..07846d738bdb 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1382,6 +1382,15 @@ again:
1382 intel_pmu_lbr_read(); 1382 intel_pmu_lbr_read();
1383 1383
1384 /* 1384 /*
1385 * CondChgd bit 63 doesn't mean any overflow status. Ignore
1386 * and clear the bit.
1387 */
1388 if (__test_and_clear_bit(63, (unsigned long *)&status)) {
1389 if (!status)
1390 goto done;
1391 }
1392
1393 /*
1385 * PEBS overflow sets bit 62 in the global status register 1394 * PEBS overflow sets bit 62 in the global status register
1386 */ 1395 */
1387 if (__test_and_clear_bit(62, (unsigned long *)&status)) { 1396 if (__test_and_clear_bit(62, (unsigned long *)&status)) {
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index f0da82b8e634..dbaa23e78b36 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -423,9 +423,10 @@ sysenter_past_esp:
423 jnz sysenter_audit 423 jnz sysenter_audit
424sysenter_do_call: 424sysenter_do_call:
425 cmpl $(NR_syscalls), %eax 425 cmpl $(NR_syscalls), %eax
426 jae syscall_badsys 426 jae sysenter_badsys
427 call *sys_call_table(,%eax,4) 427 call *sys_call_table(,%eax,4)
428 movl %eax,PT_EAX(%esp) 428 movl %eax,PT_EAX(%esp)
429sysenter_after_call:
429 LOCKDEP_SYS_EXIT 430 LOCKDEP_SYS_EXIT
430 DISABLE_INTERRUPTS(CLBR_ANY) 431 DISABLE_INTERRUPTS(CLBR_ANY)
431 TRACE_IRQS_OFF 432 TRACE_IRQS_OFF
@@ -675,7 +676,12 @@ END(syscall_fault)
675 676
676syscall_badsys: 677syscall_badsys:
677 movl $-ENOSYS,PT_EAX(%esp) 678 movl $-ENOSYS,PT_EAX(%esp)
678 jmp resume_userspace 679 jmp syscall_exit
680END(syscall_badsys)
681
682sysenter_badsys:
683 movl $-ENOSYS,PT_EAX(%esp)
684 jmp sysenter_after_call
679END(syscall_badsys) 685END(syscall_badsys)
680 CFI_ENDPROC 686 CFI_ENDPROC
681 687
diff --git a/arch/x86/kernel/espfix_64.c b/arch/x86/kernel/espfix_64.c
index 6afbb16e9b79..94d857fb1033 100644
--- a/arch/x86/kernel/espfix_64.c
+++ b/arch/x86/kernel/espfix_64.c
@@ -175,7 +175,7 @@ void init_espfix_ap(void)
175 if (!pud_present(pud)) { 175 if (!pud_present(pud)) {
176 pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP); 176 pmd_p = (pmd_t *)__get_free_page(PGALLOC_GFP);
177 pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask)); 177 pud = __pud(__pa(pmd_p) | (PGTABLE_PROT & ptemask));
178 paravirt_alloc_pud(&init_mm, __pa(pmd_p) >> PAGE_SHIFT); 178 paravirt_alloc_pmd(&init_mm, __pa(pmd_p) >> PAGE_SHIFT);
179 for (n = 0; n < ESPFIX_PUD_CLONES; n++) 179 for (n = 0; n < ESPFIX_PUD_CLONES; n++)
180 set_pud(&pud_p[n], pud); 180 set_pud(&pud_p[n], pud);
181 } 181 }
@@ -185,7 +185,7 @@ void init_espfix_ap(void)
185 if (!pmd_present(pmd)) { 185 if (!pmd_present(pmd)) {
186 pte_p = (pte_t *)__get_free_page(PGALLOC_GFP); 186 pte_p = (pte_t *)__get_free_page(PGALLOC_GFP);
187 pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask)); 187 pmd = __pmd(__pa(pte_p) | (PGTABLE_PROT & ptemask));
188 paravirt_alloc_pmd(&init_mm, __pa(pte_p) >> PAGE_SHIFT); 188 paravirt_alloc_pte(&init_mm, __pa(pte_p) >> PAGE_SHIFT);
189 for (n = 0; n < ESPFIX_PMD_CLONES; n++) 189 for (n = 0; n < ESPFIX_PMD_CLONES; n++)
190 set_pmd(&pmd_p[n], pmd); 190 set_pmd(&pmd_p[n], pmd);
191 } 191 }
@@ -193,7 +193,6 @@ void init_espfix_ap(void)
193 pte_p = pte_offset_kernel(&pmd, addr); 193 pte_p = pte_offset_kernel(&pmd, addr);
194 stack_page = (void *)__get_free_page(GFP_KERNEL); 194 stack_page = (void *)__get_free_page(GFP_KERNEL);
195 pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask)); 195 pte = __pte(__pa(stack_page) | (__PAGE_KERNEL_RO & ptemask));
196 paravirt_alloc_pte(&init_mm, __pa(stack_page) >> PAGE_SHIFT);
197 for (n = 0; n < ESPFIX_PTE_CLONES; n++) 196 for (n = 0; n < ESPFIX_PTE_CLONES; n++)
198 set_pte(&pte_p[n*PTE_STRIDE], pte); 197 set_pte(&pte_p[n*PTE_STRIDE], pte);
199 198
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index a0da58db43a8..2851d63c1202 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -363,7 +363,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
363 363
364 /* Set up to return from userspace. */ 364 /* Set up to return from userspace. */
365 restorer = current->mm->context.vdso + 365 restorer = current->mm->context.vdso +
366 selected_vdso32->sym___kernel_sigreturn; 366 selected_vdso32->sym___kernel_rt_sigreturn;
367 if (ksig->ka.sa.sa_flags & SA_RESTORER) 367 if (ksig->ka.sa.sa_flags & SA_RESTORER)
368 restorer = ksig->ka.sa.sa_restorer; 368 restorer = ksig->ka.sa.sa_restorer;
369 put_user_ex(restorer, &frame->pretcode); 369 put_user_ex(restorer, &frame->pretcode);
diff --git a/arch/x86/kernel/tsc.c b/arch/x86/kernel/tsc.c
index 57e5ce126d5a..ea030319b321 100644
--- a/arch/x86/kernel/tsc.c
+++ b/arch/x86/kernel/tsc.c
@@ -920,9 +920,9 @@ static int time_cpufreq_notifier(struct notifier_block *nb, unsigned long val,
920 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new); 920 tsc_khz = cpufreq_scale(tsc_khz_ref, ref_freq, freq->new);
921 if (!(freq->flags & CPUFREQ_CONST_LOOPS)) 921 if (!(freq->flags & CPUFREQ_CONST_LOOPS))
922 mark_tsc_unstable("cpufreq changes"); 922 mark_tsc_unstable("cpufreq changes");
923 }
924 923
925 set_cyc2ns_scale(tsc_khz, freq->cpu); 924 set_cyc2ns_scale(tsc_khz, freq->cpu);
925 }
926 926
927 return 0; 927 return 0;
928} 928}
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ec8366c5cfea..b5e994ad0135 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1462,6 +1462,7 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
1462 */ 1462 */
1463 if (var->unusable) 1463 if (var->unusable)
1464 var->db = 0; 1464 var->db = 0;
1465 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
1465 break; 1466 break;
1466 } 1467 }
1467} 1468}
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index f32a02578c0d..f6449334ec45 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1898,7 +1898,7 @@ static int set_msr_hyperv_pw(struct kvm_vcpu *vcpu, u32 msr, u64 data)
1898 if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE)) 1898 if (!(data & HV_X64_MSR_TSC_REFERENCE_ENABLE))
1899 break; 1899 break;
1900 gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT; 1900 gfn = data >> HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT;
1901 if (kvm_write_guest(kvm, data, 1901 if (kvm_write_guest(kvm, gfn << HV_X64_MSR_TSC_REFERENCE_ADDRESS_SHIFT,
1902 &tsc_ref, sizeof(tsc_ref))) 1902 &tsc_ref, sizeof(tsc_ref)))
1903 return 1; 1903 return 1;
1904 mark_page_dirty(kvm, gfn); 1904 mark_page_dirty(kvm, gfn);
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
index 3c0809a0631f..61b04fe36e66 100644
--- a/arch/x86/vdso/Makefile
+++ b/arch/x86/vdso/Makefile
@@ -11,7 +11,6 @@ VDSO32-$(CONFIG_COMPAT) := y
11 11
12# files to link into the vdso 12# files to link into the vdso
13vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vdso-fakesections.o 13vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o vdso-fakesections.o
14vobjs-nox32 := vdso-fakesections.o
15 14
16# files to link into kernel 15# files to link into kernel
17obj-y += vma.o 16obj-y += vma.o
@@ -67,7 +66,8 @@ $(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso2c FORCE
67# 66#
68CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \ 67CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
69 $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \ 68 $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
70 -fno-omit-frame-pointer -foptimize-sibling-calls 69 -fno-omit-frame-pointer -foptimize-sibling-calls \
70 -DDISABLE_BRANCH_PROFILING
71 71
72$(vobjs): KBUILD_CFLAGS += $(CFL) 72$(vobjs): KBUILD_CFLAGS += $(CFL)
73 73
@@ -134,7 +134,7 @@ override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
134 134
135targets += vdso32/vdso32.lds 135targets += vdso32/vdso32.lds
136targets += vdso32/note.o vdso32/vclock_gettime.o $(vdso32.so-y:%=vdso32/%.o) 136targets += vdso32/note.o vdso32/vclock_gettime.o $(vdso32.so-y:%=vdso32/%.o)
137targets += vdso32/vclock_gettime.o 137targets += vdso32/vclock_gettime.o vdso32/vdso-fakesections.o
138 138
139$(obj)/vdso32.o: $(vdso32-images:%=$(obj)/%) 139$(obj)/vdso32.o: $(vdso32-images:%=$(obj)/%)
140 140
@@ -150,11 +150,13 @@ KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
150KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector) 150KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
151KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls) 151KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
152KBUILD_CFLAGS_32 += -fno-omit-frame-pointer 152KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
153KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
153$(vdso32-images:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32) 154$(vdso32-images:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
154 155
155$(vdso32-images:%=$(obj)/%.dbg): $(obj)/vdso32-%.so.dbg: FORCE \ 156$(vdso32-images:%=$(obj)/%.dbg): $(obj)/vdso32-%.so.dbg: FORCE \
156 $(obj)/vdso32/vdso32.lds \ 157 $(obj)/vdso32/vdso32.lds \
157 $(obj)/vdso32/vclock_gettime.o \ 158 $(obj)/vdso32/vclock_gettime.o \
159 $(obj)/vdso32/vdso-fakesections.o \
158 $(obj)/vdso32/note.o \ 160 $(obj)/vdso32/note.o \
159 $(obj)/vdso32/%.o 161 $(obj)/vdso32/%.o
160 $(call if_changed,vdso) 162 $(call if_changed,vdso)
@@ -169,14 +171,24 @@ quiet_cmd_vdso = VDSO $@
169 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@' 171 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
170 172
171VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \ 173VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
172 -Wl,-Bsymbolic $(LTO_CFLAGS) 174 $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
173GCOV_PROFILE := n 175GCOV_PROFILE := n
174 176
175# 177#
176# Install the unstripped copies of vdso*.so. 178# Install the unstripped copies of vdso*.so. If our toolchain supports
179# build-id, install .build-id links as well.
177# 180#
178quiet_cmd_vdso_install = INSTALL $(@:install_%=%) 181quiet_cmd_vdso_install = INSTALL $(@:install_%=%)
179 cmd_vdso_install = cp $< $(MODLIB)/vdso/$(@:install_%=%) 182define cmd_vdso_install
183 cp $< "$(MODLIB)/vdso/$(@:install_%=%)"; \
184 if readelf -n $< |grep -q 'Build ID'; then \
185 buildid=`readelf -n $< |grep 'Build ID' |sed -e 's/^.*Build ID: \(.*\)$$/\1/'`; \
186 first=`echo $$buildid | cut -b-2`; \
187 last=`echo $$buildid | cut -b3-`; \
188 mkdir -p "$(MODLIB)/vdso/.build-id/$$first"; \
189 ln -sf "../../$(@:install_%=%)" "$(MODLIB)/vdso/.build-id/$$first/$$last.debug"; \
190 fi
191endef
180 192
181vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%) 193vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%)
182 194
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
index b2e4f493e5b0..9793322751e0 100644
--- a/arch/x86/vdso/vclock_gettime.c
+++ b/arch/x86/vdso/vclock_gettime.c
@@ -11,9 +11,6 @@
11 * Check with readelf after changing. 11 * Check with readelf after changing.
12 */ 12 */
13 13
14/* Disable profiling for userspace code: */
15#define DISABLE_BRANCH_PROFILING
16
17#include <uapi/linux/time.h> 14#include <uapi/linux/time.h>
18#include <asm/vgtod.h> 15#include <asm/vgtod.h>
19#include <asm/hpet.h> 16#include <asm/hpet.h>
diff --git a/arch/x86/vdso/vdso-fakesections.c b/arch/x86/vdso/vdso-fakesections.c
index cb8a8d72c24b..aa5fbfab20a5 100644
--- a/arch/x86/vdso/vdso-fakesections.c
+++ b/arch/x86/vdso/vdso-fakesections.c
@@ -2,31 +2,20 @@
2 * Copyright 2014 Andy Lutomirski 2 * Copyright 2014 Andy Lutomirski
3 * Subject to the GNU Public License, v.2 3 * Subject to the GNU Public License, v.2
4 * 4 *
5 * Hack to keep broken Go programs working. 5 * String table for loadable section headers. See vdso2c.h for why
6 * 6 * this exists.
7 * The Go runtime had a couple of bugs: it would read the section table to try
8 * to figure out how many dynamic symbols there were (it shouldn't have looked
9 * at the section table at all) and, if there were no SHT_SYNDYM section table
10 * entry, it would use an uninitialized value for the number of symbols. As a
11 * workaround, we supply a minimal section table. vdso2c will adjust the
12 * in-memory image so that "vdso_fake_sections" becomes the section table.
13 *
14 * The bug was introduced by:
15 * https://code.google.com/p/go/source/detail?r=56ea40aac72b (2012-08-31)
16 * and is being addressed in the Go runtime in this issue:
17 * https://code.google.com/p/go/issues/detail?id=8197
18 */ 7 */
19 8
20#ifndef __x86_64__ 9const char fake_shstrtab[] __attribute__((section(".fake_shstrtab"))) =
21#error This hack is specific to the 64-bit vDSO 10 ".hash\0"
22#endif 11 ".dynsym\0"
23 12 ".dynstr\0"
24#include <linux/elf.h> 13 ".gnu.version\0"
25 14 ".gnu.version_d\0"
26extern const __visible struct elf64_shdr vdso_fake_sections[]; 15 ".dynamic\0"
27const __visible struct elf64_shdr vdso_fake_sections[] = { 16 ".rodata\0"
28 { 17 ".fake_shstrtab\0" /* Yay, self-referential code. */
29 .sh_type = SHT_DYNSYM, 18 ".note\0"
30 .sh_entsize = sizeof(Elf64_Sym), 19 ".eh_frame_hdr\0"
31 } 20 ".eh_frame\0"
32}; 21 ".text";
diff --git a/arch/x86/vdso/vdso-layout.lds.S b/arch/x86/vdso/vdso-layout.lds.S
index 2ec72f651ebf..9197544eea9a 100644
--- a/arch/x86/vdso/vdso-layout.lds.S
+++ b/arch/x86/vdso/vdso-layout.lds.S
@@ -6,6 +6,16 @@
6 * This script controls its layout. 6 * This script controls its layout.
7 */ 7 */
8 8
9#if defined(BUILD_VDSO64)
10# define SHDR_SIZE 64
11#elif defined(BUILD_VDSO32) || defined(BUILD_VDSOX32)
12# define SHDR_SIZE 40
13#else
14# error unknown VDSO target
15#endif
16
17#define NUM_FAKE_SHDRS 13
18
9SECTIONS 19SECTIONS
10{ 20{
11 . = SIZEOF_HEADERS; 21 . = SIZEOF_HEADERS;
@@ -18,36 +28,53 @@ SECTIONS
18 .gnu.version_d : { *(.gnu.version_d) } 28 .gnu.version_d : { *(.gnu.version_d) }
19 .gnu.version_r : { *(.gnu.version_r) } 29 .gnu.version_r : { *(.gnu.version_r) }
20 30
31 .dynamic : { *(.dynamic) } :text :dynamic
32
33 .rodata : {
34 *(.rodata*)
35 *(.data*)
36 *(.sdata*)
37 *(.got.plt) *(.got)
38 *(.gnu.linkonce.d.*)
39 *(.bss*)
40 *(.dynbss*)
41 *(.gnu.linkonce.b.*)
42
43 /*
44 * Ideally this would live in a C file, but that won't
45 * work cleanly for x32 until we start building the x32
46 * C code using an x32 toolchain.
47 */
48 VDSO_FAKE_SECTION_TABLE_START = .;
49 . = . + NUM_FAKE_SHDRS * SHDR_SIZE;
50 VDSO_FAKE_SECTION_TABLE_END = .;
51 } :text
52
53 .fake_shstrtab : { *(.fake_shstrtab) } :text
54
55
21 .note : { *(.note.*) } :text :note 56 .note : { *(.note.*) } :text :note
22 57
23 .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr 58 .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
24 .eh_frame : { KEEP (*(.eh_frame)) } :text 59 .eh_frame : { KEEP (*(.eh_frame)) } :text
25 60
26 .dynamic : { *(.dynamic) } :text :dynamic
27
28 .rodata : { *(.rodata*) } :text
29 .data : {
30 *(.data*)
31 *(.sdata*)
32 *(.got.plt) *(.got)
33 *(.gnu.linkonce.d.*)
34 *(.bss*)
35 *(.dynbss*)
36 *(.gnu.linkonce.b.*)
37 }
38
39 .altinstructions : { *(.altinstructions) }
40 .altinstr_replacement : { *(.altinstr_replacement) }
41 61
42 /* 62 /*
43 * Align the actual code well away from the non-instruction data. 63 * Text is well-separated from actual data: there's plenty of
44 * This is the best thing for the I-cache. 64 * stuff that isn't used at runtime in between.
45 */ 65 */
46 . = ALIGN(0x100);
47 66
48 .text : { *(.text*) } :text =0x90909090, 67 .text : { *(.text*) } :text =0x90909090,
49 68
50 /* 69 /*
70 * At the end so that eu-elflint stays happy when vdso2c strips
71 * these. A better implementation would avoid allocating space
72 * for these.
73 */
74 .altinstructions : { *(.altinstructions) } :text
75 .altinstr_replacement : { *(.altinstr_replacement) } :text
76
77 /*
51 * The remainder of the vDSO consists of special pages that are 78 * The remainder of the vDSO consists of special pages that are
52 * shared between the kernel and userspace. It needs to be at the 79 * shared between the kernel and userspace. It needs to be at the
53 * end so that it doesn't overlap the mapping of the actual 80 * end so that it doesn't overlap the mapping of the actual
@@ -75,6 +102,7 @@ SECTIONS
75 /DISCARD/ : { 102 /DISCARD/ : {
76 *(.discard) 103 *(.discard)
77 *(.discard.*) 104 *(.discard.*)
105 *(__bug_table)
78 } 106 }
79} 107}
80 108
diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
index 75e3404c83b1..6807932643c2 100644
--- a/arch/x86/vdso/vdso.lds.S
+++ b/arch/x86/vdso/vdso.lds.S
@@ -6,6 +6,8 @@
6 * the DSO. 6 * the DSO.
7 */ 7 */
8 8
9#define BUILD_VDSO64
10
9#include "vdso-layout.lds.S" 11#include "vdso-layout.lds.S"
10 12
11/* 13/*
diff --git a/arch/x86/vdso/vdso2c.c b/arch/x86/vdso/vdso2c.c
index 7a6bf50f9165..238dbe82776e 100644
--- a/arch/x86/vdso/vdso2c.c
+++ b/arch/x86/vdso/vdso2c.c
@@ -23,6 +23,8 @@ enum {
23 sym_vvar_page, 23 sym_vvar_page,
24 sym_hpet_page, 24 sym_hpet_page,
25 sym_end_mapping, 25 sym_end_mapping,
26 sym_VDSO_FAKE_SECTION_TABLE_START,
27 sym_VDSO_FAKE_SECTION_TABLE_END,
26}; 28};
27 29
28const int special_pages[] = { 30const int special_pages[] = {
@@ -30,15 +32,26 @@ const int special_pages[] = {
30 sym_hpet_page, 32 sym_hpet_page,
31}; 33};
32 34
33char const * const required_syms[] = { 35struct vdso_sym {
34 [sym_vvar_page] = "vvar_page", 36 const char *name;
35 [sym_hpet_page] = "hpet_page", 37 bool export;
36 [sym_end_mapping] = "end_mapping", 38};
37 "VDSO32_NOTE_MASK", 39
38 "VDSO32_SYSENTER_RETURN", 40struct vdso_sym required_syms[] = {
39 "__kernel_vsyscall", 41 [sym_vvar_page] = {"vvar_page", true},
40 "__kernel_sigreturn", 42 [sym_hpet_page] = {"hpet_page", true},
41 "__kernel_rt_sigreturn", 43 [sym_end_mapping] = {"end_mapping", true},
44 [sym_VDSO_FAKE_SECTION_TABLE_START] = {
45 "VDSO_FAKE_SECTION_TABLE_START", false
46 },
47 [sym_VDSO_FAKE_SECTION_TABLE_END] = {
48 "VDSO_FAKE_SECTION_TABLE_END", false
49 },
50 {"VDSO32_NOTE_MASK", true},
51 {"VDSO32_SYSENTER_RETURN", true},
52 {"__kernel_vsyscall", true},
53 {"__kernel_sigreturn", true},
54 {"__kernel_rt_sigreturn", true},
42}; 55};
43 56
44__attribute__((format(printf, 1, 2))) __attribute__((noreturn)) 57__attribute__((format(printf, 1, 2))) __attribute__((noreturn))
@@ -83,37 +96,21 @@ extern void bad_put_le(void);
83 96
84#define NSYMS (sizeof(required_syms) / sizeof(required_syms[0])) 97#define NSYMS (sizeof(required_syms) / sizeof(required_syms[0]))
85 98
86#define BITS 64 99#define BITSFUNC3(name, bits) name##bits
87#define GOFUNC go64 100#define BITSFUNC2(name, bits) BITSFUNC3(name, bits)
88#define Elf_Ehdr Elf64_Ehdr 101#define BITSFUNC(name) BITSFUNC2(name, ELF_BITS)
89#define Elf_Shdr Elf64_Shdr 102
90#define Elf_Phdr Elf64_Phdr 103#define ELF_BITS_XFORM2(bits, x) Elf##bits##_##x
91#define Elf_Sym Elf64_Sym 104#define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x)
92#define Elf_Dyn Elf64_Dyn 105#define ELF(x) ELF_BITS_XFORM(ELF_BITS, x)
106
107#define ELF_BITS 64
93#include "vdso2c.h" 108#include "vdso2c.h"
94#undef BITS 109#undef ELF_BITS
95#undef GOFUNC 110
96#undef Elf_Ehdr 111#define ELF_BITS 32
97#undef Elf_Shdr
98#undef Elf_Phdr
99#undef Elf_Sym
100#undef Elf_Dyn
101
102#define BITS 32
103#define GOFUNC go32
104#define Elf_Ehdr Elf32_Ehdr
105#define Elf_Shdr Elf32_Shdr
106#define Elf_Phdr Elf32_Phdr
107#define Elf_Sym Elf32_Sym
108#define Elf_Dyn Elf32_Dyn
109#include "vdso2c.h" 112#include "vdso2c.h"
110#undef BITS 113#undef ELF_BITS
111#undef GOFUNC
112#undef Elf_Ehdr
113#undef Elf_Shdr
114#undef Elf_Phdr
115#undef Elf_Sym
116#undef Elf_Dyn
117 114
118static void go(void *addr, size_t len, FILE *outfile, const char *name) 115static void go(void *addr, size_t len, FILE *outfile, const char *name)
119{ 116{
diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
index c6eefaf389b9..11b65d4f9414 100644
--- a/arch/x86/vdso/vdso2c.h
+++ b/arch/x86/vdso/vdso2c.h
@@ -4,23 +4,139 @@
4 * are built for 32-bit userspace. 4 * are built for 32-bit userspace.
5 */ 5 */
6 6
7static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name) 7/*
8 * We're writing a section table for a few reasons:
9 *
10 * The Go runtime had a couple of bugs: it would read the section
11 * table to try to figure out how many dynamic symbols there were (it
12 * shouldn't have looked at the section table at all) and, if there
13 * were no SHT_SYNDYM section table entry, it would use an
14 * uninitialized value for the number of symbols. An empty DYNSYM
15 * table would work, but I see no reason not to write a valid one (and
16 * keep full performance for old Go programs). This hack is only
17 * needed on x86_64.
18 *
19 * The bug was introduced on 2012-08-31 by:
20 * https://code.google.com/p/go/source/detail?r=56ea40aac72b
21 * and was fixed on 2014-06-13 by:
22 * https://code.google.com/p/go/source/detail?r=fc1cd5e12595
23 *
24 * Binutils has issues debugging the vDSO: it reads the section table to
25 * find SHT_NOTE; it won't look at PT_NOTE for the in-memory vDSO, which
26 * would break build-id if we removed the section table. Binutils
27 * also requires that shstrndx != 0. See:
28 * https://sourceware.org/bugzilla/show_bug.cgi?id=17064
29 *
30 * elfutils might not look for PT_NOTE if there is a section table at
31 * all. I don't know whether this matters for any practical purpose.
32 *
33 * For simplicity, rather than hacking up a partial section table, we
34 * just write a mostly complete one. We omit non-dynamic symbols,
35 * though, since they're rather large.
36 *
37 * Once binutils gets fixed, we might be able to drop this for all but
38 * the 64-bit vdso, since build-id only works in kernel RPMs, and
39 * systems that update to new enough kernel RPMs will likely update
40 * binutils in sync. build-id has never worked for home-built kernel
41 * RPMs without manual symlinking, and I suspect that no one ever does
42 * that.
43 */
44struct BITSFUNC(fake_sections)
45{
46 ELF(Shdr) *table;
47 unsigned long table_offset;
48 int count, max_count;
49
50 int in_shstrndx;
51 unsigned long shstr_offset;
52 const char *shstrtab;
53 size_t shstrtab_len;
54
55 int out_shstrndx;
56};
57
58static unsigned int BITSFUNC(find_shname)(struct BITSFUNC(fake_sections) *out,
59 const char *name)
60{
61 const char *outname = out->shstrtab;
62 while (outname - out->shstrtab < out->shstrtab_len) {
63 if (!strcmp(name, outname))
64 return (outname - out->shstrtab) + out->shstr_offset;
65 outname += strlen(outname) + 1;
66 }
67
68 if (*name)
69 printf("Warning: could not find output name \"%s\"\n", name);
70 return out->shstr_offset + out->shstrtab_len - 1; /* Use a null. */
71}
72
73static void BITSFUNC(init_sections)(struct BITSFUNC(fake_sections) *out)
74{
75 if (!out->in_shstrndx)
76 fail("didn't find the fake shstrndx\n");
77
78 memset(out->table, 0, out->max_count * sizeof(ELF(Shdr)));
79
80 if (out->max_count < 1)
81 fail("we need at least two fake output sections\n");
82
83 PUT_LE(&out->table[0].sh_type, SHT_NULL);
84 PUT_LE(&out->table[0].sh_name, BITSFUNC(find_shname)(out, ""));
85
86 out->count = 1;
87}
88
89static void BITSFUNC(copy_section)(struct BITSFUNC(fake_sections) *out,
90 int in_idx, const ELF(Shdr) *in,
91 const char *name)
92{
93 uint64_t flags = GET_LE(&in->sh_flags);
94
95 bool copy = flags & SHF_ALLOC &&
96 (GET_LE(&in->sh_size) ||
97 (GET_LE(&in->sh_type) != SHT_RELA &&
98 GET_LE(&in->sh_type) != SHT_REL)) &&
99 strcmp(name, ".altinstructions") &&
100 strcmp(name, ".altinstr_replacement");
101
102 if (!copy)
103 return;
104
105 if (out->count >= out->max_count)
106 fail("too many copied sections (max = %d)\n", out->max_count);
107
108 if (in_idx == out->in_shstrndx)
109 out->out_shstrndx = out->count;
110
111 out->table[out->count] = *in;
112 PUT_LE(&out->table[out->count].sh_name,
113 BITSFUNC(find_shname)(out, name));
114
115 /* elfutils requires that a strtab have the correct type. */
116 if (!strcmp(name, ".fake_shstrtab"))
117 PUT_LE(&out->table[out->count].sh_type, SHT_STRTAB);
118
119 out->count++;
120}
121
122static void BITSFUNC(go)(void *addr, size_t len,
123 FILE *outfile, const char *name)
8{ 124{
9 int found_load = 0; 125 int found_load = 0;
10 unsigned long load_size = -1; /* Work around bogus warning */ 126 unsigned long load_size = -1; /* Work around bogus warning */
11 unsigned long data_size; 127 unsigned long data_size;
12 Elf_Ehdr *hdr = (Elf_Ehdr *)addr; 128 ELF(Ehdr) *hdr = (ELF(Ehdr) *)addr;
13 int i; 129 int i;
14 unsigned long j; 130 unsigned long j;
15 Elf_Shdr *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr, 131 ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
16 *alt_sec = NULL; 132 *alt_sec = NULL;
17 Elf_Dyn *dyn = 0, *dyn_end = 0; 133 ELF(Dyn) *dyn = 0, *dyn_end = 0;
18 const char *secstrings; 134 const char *secstrings;
19 uint64_t syms[NSYMS] = {}; 135 uint64_t syms[NSYMS] = {};
20 136
21 uint64_t fake_sections_value = 0, fake_sections_size = 0; 137 struct BITSFUNC(fake_sections) fake_sections = {};
22 138
23 Elf_Phdr *pt = (Elf_Phdr *)(addr + GET_LE(&hdr->e_phoff)); 139 ELF(Phdr) *pt = (ELF(Phdr) *)(addr + GET_LE(&hdr->e_phoff));
24 140
25 /* Walk the segment table. */ 141 /* Walk the segment table. */
26 for (i = 0; i < GET_LE(&hdr->e_phnum); i++) { 142 for (i = 0; i < GET_LE(&hdr->e_phnum); i++) {
@@ -51,7 +167,7 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
51 for (i = 0; dyn + i < dyn_end && 167 for (i = 0; dyn + i < dyn_end &&
52 GET_LE(&dyn[i].d_tag) != DT_NULL; i++) { 168 GET_LE(&dyn[i].d_tag) != DT_NULL; i++) {
53 typeof(dyn[i].d_tag) tag = GET_LE(&dyn[i].d_tag); 169 typeof(dyn[i].d_tag) tag = GET_LE(&dyn[i].d_tag);
54 if (tag == DT_REL || tag == DT_RELSZ || 170 if (tag == DT_REL || tag == DT_RELSZ || tag == DT_RELA ||
55 tag == DT_RELENT || tag == DT_TEXTREL) 171 tag == DT_RELENT || tag == DT_TEXTREL)
56 fail("vdso image contains dynamic relocations\n"); 172 fail("vdso image contains dynamic relocations\n");
57 } 173 }
@@ -61,7 +177,7 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
61 GET_LE(&hdr->e_shentsize)*GET_LE(&hdr->e_shstrndx); 177 GET_LE(&hdr->e_shentsize)*GET_LE(&hdr->e_shstrndx);
62 secstrings = addr + GET_LE(&secstrings_hdr->sh_offset); 178 secstrings = addr + GET_LE(&secstrings_hdr->sh_offset);
63 for (i = 0; i < GET_LE(&hdr->e_shnum); i++) { 179 for (i = 0; i < GET_LE(&hdr->e_shnum); i++) {
64 Elf_Shdr *sh = addr + GET_LE(&hdr->e_shoff) + 180 ELF(Shdr) *sh = addr + GET_LE(&hdr->e_shoff) +
65 GET_LE(&hdr->e_shentsize) * i; 181 GET_LE(&hdr->e_shentsize) * i;
66 if (GET_LE(&sh->sh_type) == SHT_SYMTAB) 182 if (GET_LE(&sh->sh_type) == SHT_SYMTAB)
67 symtab_hdr = sh; 183 symtab_hdr = sh;
@@ -82,29 +198,63 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
82 i < GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize); 198 i < GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize);
83 i++) { 199 i++) {
84 int k; 200 int k;
85 Elf_Sym *sym = addr + GET_LE(&symtab_hdr->sh_offset) + 201 ELF(Sym) *sym = addr + GET_LE(&symtab_hdr->sh_offset) +
86 GET_LE(&symtab_hdr->sh_entsize) * i; 202 GET_LE(&symtab_hdr->sh_entsize) * i;
87 const char *name = addr + GET_LE(&strtab_hdr->sh_offset) + 203 const char *name = addr + GET_LE(&strtab_hdr->sh_offset) +
88 GET_LE(&sym->st_name); 204 GET_LE(&sym->st_name);
89 205
90 for (k = 0; k < NSYMS; k++) { 206 for (k = 0; k < NSYMS; k++) {
91 if (!strcmp(name, required_syms[k])) { 207 if (!strcmp(name, required_syms[k].name)) {
92 if (syms[k]) { 208 if (syms[k]) {
93 fail("duplicate symbol %s\n", 209 fail("duplicate symbol %s\n",
94 required_syms[k]); 210 required_syms[k].name);
95 } 211 }
96 syms[k] = GET_LE(&sym->st_value); 212 syms[k] = GET_LE(&sym->st_value);
97 } 213 }
98 } 214 }
99 215
100 if (!strcmp(name, "vdso_fake_sections")) { 216 if (!strcmp(name, "fake_shstrtab")) {
101 if (fake_sections_value) 217 ELF(Shdr) *sh;
102 fail("duplicate vdso_fake_sections\n"); 218
103 fake_sections_value = GET_LE(&sym->st_value); 219 fake_sections.in_shstrndx = GET_LE(&sym->st_shndx);
104 fake_sections_size = GET_LE(&sym->st_size); 220 fake_sections.shstrtab = addr + GET_LE(&sym->st_value);
221 fake_sections.shstrtab_len = GET_LE(&sym->st_size);
222 sh = addr + GET_LE(&hdr->e_shoff) +
223 GET_LE(&hdr->e_shentsize) *
224 fake_sections.in_shstrndx;
225 fake_sections.shstr_offset = GET_LE(&sym->st_value) -
226 GET_LE(&sh->sh_addr);
105 } 227 }
106 } 228 }
107 229
230 /* Build the output section table. */
231 if (!syms[sym_VDSO_FAKE_SECTION_TABLE_START] ||
232 !syms[sym_VDSO_FAKE_SECTION_TABLE_END])
233 fail("couldn't find fake section table\n");
234 if ((syms[sym_VDSO_FAKE_SECTION_TABLE_END] -
235 syms[sym_VDSO_FAKE_SECTION_TABLE_START]) % sizeof(ELF(Shdr)))
236 fail("fake section table size isn't a multiple of sizeof(Shdr)\n");
237 fake_sections.table = addr + syms[sym_VDSO_FAKE_SECTION_TABLE_START];
238 fake_sections.table_offset = syms[sym_VDSO_FAKE_SECTION_TABLE_START];
239 fake_sections.max_count = (syms[sym_VDSO_FAKE_SECTION_TABLE_END] -
240 syms[sym_VDSO_FAKE_SECTION_TABLE_START]) /
241 sizeof(ELF(Shdr));
242
243 BITSFUNC(init_sections)(&fake_sections);
244 for (i = 0; i < GET_LE(&hdr->e_shnum); i++) {
245 ELF(Shdr) *sh = addr + GET_LE(&hdr->e_shoff) +
246 GET_LE(&hdr->e_shentsize) * i;
247 BITSFUNC(copy_section)(&fake_sections, i, sh,
248 secstrings + GET_LE(&sh->sh_name));
249 }
250 if (!fake_sections.out_shstrndx)
251 fail("didn't generate shstrndx?!?\n");
252
253 PUT_LE(&hdr->e_shoff, fake_sections.table_offset);
254 PUT_LE(&hdr->e_shentsize, sizeof(ELF(Shdr)));
255 PUT_LE(&hdr->e_shnum, fake_sections.count);
256 PUT_LE(&hdr->e_shstrndx, fake_sections.out_shstrndx);
257
108 /* Validate mapping addresses. */ 258 /* Validate mapping addresses. */
109 for (i = 0; i < sizeof(special_pages) / sizeof(special_pages[0]); i++) { 259 for (i = 0; i < sizeof(special_pages) / sizeof(special_pages[0]); i++) {
110 if (!syms[i]) 260 if (!syms[i])
@@ -112,25 +262,17 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
112 262
113 if (syms[i] % 4096) 263 if (syms[i] % 4096)
114 fail("%s must be a multiple of 4096\n", 264 fail("%s must be a multiple of 4096\n",
115 required_syms[i]); 265 required_syms[i].name);
116 if (syms[i] < data_size) 266 if (syms[i] < data_size)
117 fail("%s must be after the text mapping\n", 267 fail("%s must be after the text mapping\n",
118 required_syms[i]); 268 required_syms[i].name);
119 if (syms[sym_end_mapping] < syms[i] + 4096) 269 if (syms[sym_end_mapping] < syms[i] + 4096)
120 fail("%s overruns end_mapping\n", required_syms[i]); 270 fail("%s overruns end_mapping\n",
271 required_syms[i].name);
121 } 272 }
122 if (syms[sym_end_mapping] % 4096) 273 if (syms[sym_end_mapping] % 4096)
123 fail("end_mapping must be a multiple of 4096\n"); 274 fail("end_mapping must be a multiple of 4096\n");
124 275
125 /* Remove sections or use fakes */
126 if (fake_sections_size % sizeof(Elf_Shdr))
127 fail("vdso_fake_sections size is not a multiple of %ld\n",
128 (long)sizeof(Elf_Shdr));
129 PUT_LE(&hdr->e_shoff, fake_sections_value);
130 PUT_LE(&hdr->e_shentsize, fake_sections_value ? sizeof(Elf_Shdr) : 0);
131 PUT_LE(&hdr->e_shnum, fake_sections_size / sizeof(Elf_Shdr));
132 PUT_LE(&hdr->e_shstrndx, SHN_UNDEF);
133
134 if (!name) { 276 if (!name) {
135 fwrite(addr, load_size, 1, outfile); 277 fwrite(addr, load_size, 1, outfile);
136 return; 278 return;
@@ -168,9 +310,9 @@ static void GOFUNC(void *addr, size_t len, FILE *outfile, const char *name)
168 (unsigned long)GET_LE(&alt_sec->sh_size)); 310 (unsigned long)GET_LE(&alt_sec->sh_size));
169 } 311 }
170 for (i = 0; i < NSYMS; i++) { 312 for (i = 0; i < NSYMS; i++) {
171 if (syms[i]) 313 if (required_syms[i].export && syms[i])
172 fprintf(outfile, "\t.sym_%s = 0x%" PRIx64 ",\n", 314 fprintf(outfile, "\t.sym_%s = 0x%" PRIx64 ",\n",
173 required_syms[i], syms[i]); 315 required_syms[i].name, syms[i]);
174 } 316 }
175 fprintf(outfile, "};\n"); 317 fprintf(outfile, "};\n");
176} 318}
diff --git a/arch/x86/vdso/vdso32/vdso-fakesections.c b/arch/x86/vdso/vdso32/vdso-fakesections.c
new file mode 100644
index 000000000000..541468e25265
--- /dev/null
+++ b/arch/x86/vdso/vdso32/vdso-fakesections.c
@@ -0,0 +1 @@
#include "../vdso-fakesections.c"
diff --git a/arch/x86/vdso/vdsox32.lds.S b/arch/x86/vdso/vdsox32.lds.S
index 46b991b578a8..697c11ece90c 100644
--- a/arch/x86/vdso/vdsox32.lds.S
+++ b/arch/x86/vdso/vdsox32.lds.S
@@ -6,6 +6,8 @@
6 * the DSO. 6 * the DSO.
7 */ 7 */
8 8
9#define BUILD_VDSOX32
10
9#include "vdso-layout.lds.S" 11#include "vdso-layout.lds.S"
10 12
11/* 13/*
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
index e1513c47872a..5a5176de8d0a 100644
--- a/arch/x86/vdso/vma.c
+++ b/arch/x86/vdso/vma.c
@@ -62,6 +62,9 @@ struct linux_binprm;
62 Only used for the 64-bit and x32 vdsos. */ 62 Only used for the 64-bit and x32 vdsos. */
63static unsigned long vdso_addr(unsigned long start, unsigned len) 63static unsigned long vdso_addr(unsigned long start, unsigned len)
64{ 64{
65#ifdef CONFIG_X86_32
66 return 0;
67#else
65 unsigned long addr, end; 68 unsigned long addr, end;
66 unsigned offset; 69 unsigned offset;
67 end = (start + PMD_SIZE - 1) & PMD_MASK; 70 end = (start + PMD_SIZE - 1) & PMD_MASK;
@@ -83,6 +86,7 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
83 addr = align_vdso_addr(addr); 86 addr = align_vdso_addr(addr);
84 87
85 return addr; 88 return addr;
89#endif
86} 90}
87 91
88static int map_vdso(const struct vdso_image *image, bool calculate_addr) 92static int map_vdso(const struct vdso_image *image, bool calculate_addr)
diff --git a/block/bio.c b/block/bio.c
index 8c2e55e39a1b..0ec61c9e536c 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -746,6 +746,14 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
746 746
747 goto done; 747 goto done;
748 } 748 }
749
750 /*
751 * If the queue doesn't support SG gaps and adding this
752 * offset would create a gap, disallow it.
753 */
754 if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS) &&
755 bvec_gap_to_prev(prev, offset))
756 return 0;
749 } 757 }
750 758
751 if (bio->bi_vcnt >= bio->bi_max_vecs) 759 if (bio->bi_vcnt >= bio->bi_max_vecs)
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 069bc202ffe3..b9f4cc494ece 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -80,7 +80,7 @@ static struct blkcg_gq *blkg_alloc(struct blkcg *blkcg, struct request_queue *q,
80 blkg->q = q; 80 blkg->q = q;
81 INIT_LIST_HEAD(&blkg->q_node); 81 INIT_LIST_HEAD(&blkg->q_node);
82 blkg->blkcg = blkcg; 82 blkg->blkcg = blkcg;
83 blkg->refcnt = 1; 83 atomic_set(&blkg->refcnt, 1);
84 84
85 /* root blkg uses @q->root_rl, init rl only for !root blkgs */ 85 /* root blkg uses @q->root_rl, init rl only for !root blkgs */
86 if (blkcg != &blkcg_root) { 86 if (blkcg != &blkcg_root) {
@@ -399,11 +399,8 @@ void __blkg_release_rcu(struct rcu_head *rcu_head)
399 399
400 /* release the blkcg and parent blkg refs this blkg has been holding */ 400 /* release the blkcg and parent blkg refs this blkg has been holding */
401 css_put(&blkg->blkcg->css); 401 css_put(&blkg->blkcg->css);
402 if (blkg->parent) { 402 if (blkg->parent)
403 spin_lock_irq(blkg->q->queue_lock);
404 blkg_put(blkg->parent); 403 blkg_put(blkg->parent);
405 spin_unlock_irq(blkg->q->queue_lock);
406 }
407 404
408 blkg_free(blkg); 405 blkg_free(blkg);
409} 406}
@@ -1093,7 +1090,7 @@ EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
1093 * Register @pol with blkcg core. Might sleep and @pol may be modified on 1090 * Register @pol with blkcg core. Might sleep and @pol may be modified on
1094 * successful registration. Returns 0 on success and -errno on failure. 1091 * successful registration. Returns 0 on success and -errno on failure.
1095 */ 1092 */
1096int __init blkcg_policy_register(struct blkcg_policy *pol) 1093int blkcg_policy_register(struct blkcg_policy *pol)
1097{ 1094{
1098 int i, ret; 1095 int i, ret;
1099 1096
diff --git a/block/blk-cgroup.h b/block/blk-cgroup.h
index cbb7f943f78a..d3fd7aa3d2a3 100644
--- a/block/blk-cgroup.h
+++ b/block/blk-cgroup.h
@@ -18,6 +18,7 @@
18#include <linux/seq_file.h> 18#include <linux/seq_file.h>
19#include <linux/radix-tree.h> 19#include <linux/radix-tree.h>
20#include <linux/blkdev.h> 20#include <linux/blkdev.h>
21#include <linux/atomic.h>
21 22
22/* Max limits for throttle policy */ 23/* Max limits for throttle policy */
23#define THROTL_IOPS_MAX UINT_MAX 24#define THROTL_IOPS_MAX UINT_MAX
@@ -104,7 +105,7 @@ struct blkcg_gq {
104 struct request_list rl; 105 struct request_list rl;
105 106
106 /* reference count */ 107 /* reference count */
107 int refcnt; 108 atomic_t refcnt;
108 109
109 /* is this blkg online? protected by both blkcg and q locks */ 110 /* is this blkg online? protected by both blkcg and q locks */
110 bool online; 111 bool online;
@@ -145,7 +146,7 @@ void blkcg_drain_queue(struct request_queue *q);
145void blkcg_exit_queue(struct request_queue *q); 146void blkcg_exit_queue(struct request_queue *q);
146 147
147/* Blkio controller policy registration */ 148/* Blkio controller policy registration */
148int __init blkcg_policy_register(struct blkcg_policy *pol); 149int blkcg_policy_register(struct blkcg_policy *pol);
149void blkcg_policy_unregister(struct blkcg_policy *pol); 150void blkcg_policy_unregister(struct blkcg_policy *pol);
150int blkcg_activate_policy(struct request_queue *q, 151int blkcg_activate_policy(struct request_queue *q,
151 const struct blkcg_policy *pol); 152 const struct blkcg_policy *pol);
@@ -257,13 +258,12 @@ static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
257 * blkg_get - get a blkg reference 258 * blkg_get - get a blkg reference
258 * @blkg: blkg to get 259 * @blkg: blkg to get
259 * 260 *
260 * The caller should be holding queue_lock and an existing reference. 261 * The caller should be holding an existing reference.
261 */ 262 */
262static inline void blkg_get(struct blkcg_gq *blkg) 263static inline void blkg_get(struct blkcg_gq *blkg)
263{ 264{
264 lockdep_assert_held(blkg->q->queue_lock); 265 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
265 WARN_ON_ONCE(!blkg->refcnt); 266 atomic_inc(&blkg->refcnt);
266 blkg->refcnt++;
267} 267}
268 268
269void __blkg_release_rcu(struct rcu_head *rcu); 269void __blkg_release_rcu(struct rcu_head *rcu);
@@ -271,14 +271,11 @@ void __blkg_release_rcu(struct rcu_head *rcu);
271/** 271/**
272 * blkg_put - put a blkg reference 272 * blkg_put - put a blkg reference
273 * @blkg: blkg to put 273 * @blkg: blkg to put
274 *
275 * The caller should be holding queue_lock.
276 */ 274 */
277static inline void blkg_put(struct blkcg_gq *blkg) 275static inline void blkg_put(struct blkcg_gq *blkg)
278{ 276{
279 lockdep_assert_held(blkg->q->queue_lock); 277 WARN_ON_ONCE(atomic_read(&blkg->refcnt) <= 0);
280 WARN_ON_ONCE(blkg->refcnt <= 0); 278 if (atomic_dec_and_test(&blkg->refcnt))
281 if (!--blkg->refcnt)
282 call_rcu(&blkg->rcu_head, __blkg_release_rcu); 279 call_rcu(&blkg->rcu_head, __blkg_release_rcu);
283} 280}
284 281
@@ -580,7 +577,7 @@ static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { ret
580static inline int blkcg_init_queue(struct request_queue *q) { return 0; } 577static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
581static inline void blkcg_drain_queue(struct request_queue *q) { } 578static inline void blkcg_drain_queue(struct request_queue *q) { }
582static inline void blkcg_exit_queue(struct request_queue *q) { } 579static inline void blkcg_exit_queue(struct request_queue *q) { }
583static inline int __init blkcg_policy_register(struct blkcg_policy *pol) { return 0; } 580static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
584static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { } 581static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
585static inline int blkcg_activate_policy(struct request_queue *q, 582static inline int blkcg_activate_policy(struct request_queue *q,
586 const struct blkcg_policy *pol) { return 0; } 583 const struct blkcg_policy *pol) { return 0; }
diff --git a/block/blk-merge.c b/block/blk-merge.c
index b3bf0df0f4c2..54535831f1e1 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -568,6 +568,8 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
568 568
569bool blk_rq_merge_ok(struct request *rq, struct bio *bio) 569bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
570{ 570{
571 struct request_queue *q = rq->q;
572
571 if (!rq_mergeable(rq) || !bio_mergeable(bio)) 573 if (!rq_mergeable(rq) || !bio_mergeable(bio))
572 return false; 574 return false;
573 575
@@ -591,6 +593,14 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
591 !blk_write_same_mergeable(rq->bio, bio)) 593 !blk_write_same_mergeable(rq->bio, bio))
592 return false; 594 return false;
593 595
596 if (q->queue_flags & (1 << QUEUE_FLAG_SG_GAPS)) {
597 struct bio_vec *bprev;
598
599 bprev = &rq->biotail->bi_io_vec[bio->bi_vcnt - 1];
600 if (bvec_gap_to_prev(bprev, bio->bi_io_vec[0].bv_offset))
601 return false;
602 }
603
594 return true; 604 return true;
595} 605}
596 606
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 0ef2dc7f01bf..ad69ef657e85 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -878,7 +878,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
878 clear_bit(BLK_MQ_S_STOPPED, &hctx->state); 878 clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
879 879
880 preempt_disable(); 880 preempt_disable();
881 __blk_mq_run_hw_queue(hctx); 881 blk_mq_run_hw_queue(hctx, false);
882 preempt_enable(); 882 preempt_enable();
883} 883}
884EXPORT_SYMBOL(blk_mq_start_hw_queue); 884EXPORT_SYMBOL(blk_mq_start_hw_queue);
diff --git a/block/elevator.c b/block/elevator.c
index 34bded18910e..24c28b659bb3 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -825,7 +825,7 @@ void elv_unregister_queue(struct request_queue *q)
825} 825}
826EXPORT_SYMBOL(elv_unregister_queue); 826EXPORT_SYMBOL(elv_unregister_queue);
827 827
828int __init elv_register(struct elevator_type *e) 828int elv_register(struct elevator_type *e)
829{ 829{
830 char *def = ""; 830 char *def = "";
831 831
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index c67f6f5ad611..36b0e61f9c09 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -30,6 +30,10 @@
30#include <linux/types.h> 30#include <linux/types.h>
31#include <linux/dmi.h> 31#include <linux/dmi.h>
32#include <linux/delay.h> 32#include <linux/delay.h>
33#ifdef CONFIG_ACPI_PROCFS_POWER
34#include <linux/proc_fs.h>
35#include <linux/seq_file.h>
36#endif
33#include <linux/platform_device.h> 37#include <linux/platform_device.h>
34#include <linux/power_supply.h> 38#include <linux/power_supply.h>
35#include <linux/acpi.h> 39#include <linux/acpi.h>
@@ -52,6 +56,7 @@ MODULE_AUTHOR("Paul Diefenbaugh");
52MODULE_DESCRIPTION("ACPI AC Adapter Driver"); 56MODULE_DESCRIPTION("ACPI AC Adapter Driver");
53MODULE_LICENSE("GPL"); 57MODULE_LICENSE("GPL");
54 58
59
55static int acpi_ac_add(struct acpi_device *device); 60static int acpi_ac_add(struct acpi_device *device);
56static int acpi_ac_remove(struct acpi_device *device); 61static int acpi_ac_remove(struct acpi_device *device);
57static void acpi_ac_notify(struct acpi_device *device, u32 event); 62static void acpi_ac_notify(struct acpi_device *device, u32 event);
@@ -67,6 +72,13 @@ static int acpi_ac_resume(struct device *dev);
67#endif 72#endif
68static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume); 73static SIMPLE_DEV_PM_OPS(acpi_ac_pm, NULL, acpi_ac_resume);
69 74
75#ifdef CONFIG_ACPI_PROCFS_POWER
76extern struct proc_dir_entry *acpi_lock_ac_dir(void);
77extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir);
78static int acpi_ac_open_fs(struct inode *inode, struct file *file);
79#endif
80
81
70static int ac_sleep_before_get_state_ms; 82static int ac_sleep_before_get_state_ms;
71 83
72static struct acpi_driver acpi_ac_driver = { 84static struct acpi_driver acpi_ac_driver = {
@@ -91,6 +103,16 @@ struct acpi_ac {
91 103
92#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger) 104#define to_acpi_ac(x) container_of(x, struct acpi_ac, charger)
93 105
106#ifdef CONFIG_ACPI_PROCFS_POWER
107static const struct file_operations acpi_ac_fops = {
108 .owner = THIS_MODULE,
109 .open = acpi_ac_open_fs,
110 .read = seq_read,
111 .llseek = seq_lseek,
112 .release = single_release,
113};
114#endif
115
94/* -------------------------------------------------------------------------- 116/* --------------------------------------------------------------------------
95 AC Adapter Management 117 AC Adapter Management
96 -------------------------------------------------------------------------- */ 118 -------------------------------------------------------------------------- */
@@ -143,6 +165,83 @@ static enum power_supply_property ac_props[] = {
143 POWER_SUPPLY_PROP_ONLINE, 165 POWER_SUPPLY_PROP_ONLINE,
144}; 166};
145 167
168#ifdef CONFIG_ACPI_PROCFS_POWER
169/* --------------------------------------------------------------------------
170 FS Interface (/proc)
171 -------------------------------------------------------------------------- */
172
173static struct proc_dir_entry *acpi_ac_dir;
174
175static int acpi_ac_seq_show(struct seq_file *seq, void *offset)
176{
177 struct acpi_ac *ac = seq->private;
178
179
180 if (!ac)
181 return 0;
182
183 if (acpi_ac_get_state(ac)) {
184 seq_puts(seq, "ERROR: Unable to read AC Adapter state\n");
185 return 0;
186 }
187
188 seq_puts(seq, "state: ");
189 switch (ac->state) {
190 case ACPI_AC_STATUS_OFFLINE:
191 seq_puts(seq, "off-line\n");
192 break;
193 case ACPI_AC_STATUS_ONLINE:
194 seq_puts(seq, "on-line\n");
195 break;
196 default:
197 seq_puts(seq, "unknown\n");
198 break;
199 }
200
201 return 0;
202}
203
204static int acpi_ac_open_fs(struct inode *inode, struct file *file)
205{
206 return single_open(file, acpi_ac_seq_show, PDE_DATA(inode));
207}
208
209static int acpi_ac_add_fs(struct acpi_ac *ac)
210{
211 struct proc_dir_entry *entry = NULL;
212
213 printk(KERN_WARNING PREFIX "Deprecated procfs I/F for AC is loaded,"
214 " please retry with CONFIG_ACPI_PROCFS_POWER cleared\n");
215 if (!acpi_device_dir(ac->device)) {
216 acpi_device_dir(ac->device) =
217 proc_mkdir(acpi_device_bid(ac->device), acpi_ac_dir);
218 if (!acpi_device_dir(ac->device))
219 return -ENODEV;
220 }
221
222 /* 'state' [R] */
223 entry = proc_create_data(ACPI_AC_FILE_STATE,
224 S_IRUGO, acpi_device_dir(ac->device),
225 &acpi_ac_fops, ac);
226 if (!entry)
227 return -ENODEV;
228 return 0;
229}
230
231static int acpi_ac_remove_fs(struct acpi_ac *ac)
232{
233
234 if (acpi_device_dir(ac->device)) {
235 remove_proc_entry(ACPI_AC_FILE_STATE,
236 acpi_device_dir(ac->device));
237 remove_proc_entry(acpi_device_bid(ac->device), acpi_ac_dir);
238 acpi_device_dir(ac->device) = NULL;
239 }
240
241 return 0;
242}
243#endif
244
146/* -------------------------------------------------------------------------- 245/* --------------------------------------------------------------------------
147 Driver Model 246 Driver Model
148 -------------------------------------------------------------------------- */ 247 -------------------------------------------------------------------------- */
@@ -243,6 +342,11 @@ static int acpi_ac_add(struct acpi_device *device)
243 goto end; 342 goto end;
244 343
245 ac->charger.name = acpi_device_bid(device); 344 ac->charger.name = acpi_device_bid(device);
345#ifdef CONFIG_ACPI_PROCFS_POWER
346 result = acpi_ac_add_fs(ac);
347 if (result)
348 goto end;
349#endif
246 ac->charger.type = POWER_SUPPLY_TYPE_MAINS; 350 ac->charger.type = POWER_SUPPLY_TYPE_MAINS;
247 ac->charger.properties = ac_props; 351 ac->charger.properties = ac_props;
248 ac->charger.num_properties = ARRAY_SIZE(ac_props); 352 ac->charger.num_properties = ARRAY_SIZE(ac_props);
@@ -258,8 +362,12 @@ static int acpi_ac_add(struct acpi_device *device)
258 ac->battery_nb.notifier_call = acpi_ac_battery_notify; 362 ac->battery_nb.notifier_call = acpi_ac_battery_notify;
259 register_acpi_notifier(&ac->battery_nb); 363 register_acpi_notifier(&ac->battery_nb);
260end: 364end:
261 if (result) 365 if (result) {
366#ifdef CONFIG_ACPI_PROCFS_POWER
367 acpi_ac_remove_fs(ac);
368#endif
262 kfree(ac); 369 kfree(ac);
370 }
263 371
264 dmi_check_system(ac_dmi_table); 372 dmi_check_system(ac_dmi_table);
265 return result; 373 return result;
@@ -303,6 +411,10 @@ static int acpi_ac_remove(struct acpi_device *device)
303 power_supply_unregister(&ac->charger); 411 power_supply_unregister(&ac->charger);
304 unregister_acpi_notifier(&ac->battery_nb); 412 unregister_acpi_notifier(&ac->battery_nb);
305 413
414#ifdef CONFIG_ACPI_PROCFS_POWER
415 acpi_ac_remove_fs(ac);
416#endif
417
306 kfree(ac); 418 kfree(ac);
307 419
308 return 0; 420 return 0;
@@ -315,9 +427,20 @@ static int __init acpi_ac_init(void)
315 if (acpi_disabled) 427 if (acpi_disabled)
316 return -ENODEV; 428 return -ENODEV;
317 429
430#ifdef CONFIG_ACPI_PROCFS_POWER
431 acpi_ac_dir = acpi_lock_ac_dir();
432 if (!acpi_ac_dir)
433 return -ENODEV;
434#endif
435
436
318 result = acpi_bus_register_driver(&acpi_ac_driver); 437 result = acpi_bus_register_driver(&acpi_ac_driver);
319 if (result < 0) 438 if (result < 0) {
439#ifdef CONFIG_ACPI_PROCFS_POWER
440 acpi_unlock_ac_dir(acpi_ac_dir);
441#endif
320 return -ENODEV; 442 return -ENODEV;
443 }
321 444
322 return 0; 445 return 0;
323} 446}
@@ -325,6 +448,9 @@ static int __init acpi_ac_init(void)
325static void __exit acpi_ac_exit(void) 448static void __exit acpi_ac_exit(void)
326{ 449{
327 acpi_bus_unregister_driver(&acpi_ac_driver); 450 acpi_bus_unregister_driver(&acpi_ac_driver);
451#ifdef CONFIG_ACPI_PROCFS_POWER
452 acpi_unlock_ac_dir(acpi_ac_dir);
453#endif
328} 454}
329module_init(acpi_ac_init); 455module_init(acpi_ac_init);
330module_exit(acpi_ac_exit); 456module_exit(acpi_ac_exit);
diff --git a/drivers/acpi/acpi_pnp.c b/drivers/acpi/acpi_pnp.c
index 6703c1fd993a..4ddb0dca56f6 100644
--- a/drivers/acpi/acpi_pnp.c
+++ b/drivers/acpi/acpi_pnp.c
@@ -14,6 +14,8 @@
14#include <linux/module.h> 14#include <linux/module.h>
15 15
16static const struct acpi_device_id acpi_pnp_device_ids[] = { 16static const struct acpi_device_id acpi_pnp_device_ids[] = {
17 /* soc_button_array */
18 {"PNP0C40"},
17 /* pata_isapnp */ 19 /* pata_isapnp */
18 {"PNP0600"}, /* Generic ESDI/IDE/ATA compatible hard disk controller */ 20 {"PNP0600"}, /* Generic ESDI/IDE/ATA compatible hard disk controller */
19 /* floppy */ 21 /* floppy */
diff --git a/drivers/acpi/apei/Kconfig b/drivers/acpi/apei/Kconfig
index c4dac7150960..b0140c8fc733 100644
--- a/drivers/acpi/apei/Kconfig
+++ b/drivers/acpi/apei/Kconfig
@@ -1,9 +1,15 @@
1config HAVE_ACPI_APEI
2 bool
3
4config HAVE_ACPI_APEI_NMI
5 bool
6
1config ACPI_APEI 7config ACPI_APEI
2 bool "ACPI Platform Error Interface (APEI)" 8 bool "ACPI Platform Error Interface (APEI)"
3 select MISC_FILESYSTEMS 9 select MISC_FILESYSTEMS
4 select PSTORE 10 select PSTORE
5 select UEFI_CPER 11 select UEFI_CPER
6 depends on X86 12 depends on HAVE_ACPI_APEI
7 help 13 help
8 APEI allows to report errors (for example from the chipset) 14 APEI allows to report errors (for example from the chipset)
9 to the operating system. This improves NMI handling 15 to the operating system. This improves NMI handling
diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
index 8678dfe5366b..2cd7bdd6c8b3 100644
--- a/drivers/acpi/apei/apei-base.c
+++ b/drivers/acpi/apei/apei-base.c
@@ -745,6 +745,19 @@ struct dentry *apei_get_debugfs_dir(void)
745} 745}
746EXPORT_SYMBOL_GPL(apei_get_debugfs_dir); 746EXPORT_SYMBOL_GPL(apei_get_debugfs_dir);
747 747
748int __weak arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr,
749 void *data)
750{
751 return 1;
752}
753EXPORT_SYMBOL_GPL(arch_apei_enable_cmcff);
754
755void __weak arch_apei_report_mem_error(int sev,
756 struct cper_sec_mem_err *mem_err)
757{
758}
759EXPORT_SYMBOL_GPL(arch_apei_report_mem_error);
760
748int apei_osc_setup(void) 761int apei_osc_setup(void)
749{ 762{
750 static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c"; 763 static u8 whea_uuid_str[] = "ed855e0c-6c90-47bf-a62a-26de0fc5ad5c";
diff --git a/drivers/acpi/apei/ghes.c b/drivers/acpi/apei/ghes.c
index dab7cb7349df..e05d84e7b06d 100644
--- a/drivers/acpi/apei/ghes.c
+++ b/drivers/acpi/apei/ghes.c
@@ -47,11 +47,11 @@
47#include <linux/genalloc.h> 47#include <linux/genalloc.h>
48#include <linux/pci.h> 48#include <linux/pci.h>
49#include <linux/aer.h> 49#include <linux/aer.h>
50#include <linux/nmi.h>
50 51
51#include <acpi/ghes.h> 52#include <acpi/ghes.h>
52#include <asm/mce.h> 53#include <acpi/apei.h>
53#include <asm/tlbflush.h> 54#include <asm/tlbflush.h>
54#include <asm/nmi.h>
55 55
56#include "apei-internal.h" 56#include "apei-internal.h"
57 57
@@ -86,8 +86,6 @@
86bool ghes_disable; 86bool ghes_disable;
87module_param_named(disable, ghes_disable, bool, 0); 87module_param_named(disable, ghes_disable, bool, 0);
88 88
89static int ghes_panic_timeout __read_mostly = 30;
90
91/* 89/*
92 * All error sources notified with SCI shares one notifier function, 90 * All error sources notified with SCI shares one notifier function,
93 * so they need to be linked and checked one by one. This is applied 91 * so they need to be linked and checked one by one. This is applied
@@ -97,16 +95,9 @@ static int ghes_panic_timeout __read_mostly = 30;
97 * list changing, not for traversing. 95 * list changing, not for traversing.
98 */ 96 */
99static LIST_HEAD(ghes_sci); 97static LIST_HEAD(ghes_sci);
100static LIST_HEAD(ghes_nmi);
101static DEFINE_MUTEX(ghes_list_mutex); 98static DEFINE_MUTEX(ghes_list_mutex);
102 99
103/* 100/*
104 * NMI may be triggered on any CPU, so ghes_nmi_lock is used for
105 * mutual exclusion.
106 */
107static DEFINE_RAW_SPINLOCK(ghes_nmi_lock);
108
109/*
110 * Because the memory area used to transfer hardware error information 101 * Because the memory area used to transfer hardware error information
111 * from BIOS to Linux can be determined only in NMI, IRQ or timer 102 * from BIOS to Linux can be determined only in NMI, IRQ or timer
112 * handler, but general ioremap can not be used in atomic context, so 103 * handler, but general ioremap can not be used in atomic context, so
@@ -114,12 +105,16 @@ static DEFINE_RAW_SPINLOCK(ghes_nmi_lock);
114 */ 105 */
115 106
116/* 107/*
117 * Two virtual pages are used, one for NMI context, the other for 108 * Two virtual pages are used, one for IRQ/PROCESS context, the other for
118 * IRQ/PROCESS context 109 * NMI context (optionally).
119 */ 110 */
120#define GHES_IOREMAP_PAGES 2 111#ifdef CONFIG_HAVE_ACPI_APEI_NMI
121#define GHES_IOREMAP_NMI_PAGE(base) (base) 112#define GHES_IOREMAP_PAGES 2
122#define GHES_IOREMAP_IRQ_PAGE(base) ((base) + PAGE_SIZE) 113#else
114#define GHES_IOREMAP_PAGES 1
115#endif
116#define GHES_IOREMAP_IRQ_PAGE(base) (base)
117#define GHES_IOREMAP_NMI_PAGE(base) ((base) + PAGE_SIZE)
123 118
124/* virtual memory area for atomic ioremap */ 119/* virtual memory area for atomic ioremap */
125static struct vm_struct *ghes_ioremap_area; 120static struct vm_struct *ghes_ioremap_area;
@@ -130,18 +125,8 @@ static struct vm_struct *ghes_ioremap_area;
130static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); 125static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
131static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); 126static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
132 127
133/*
134 * printk is not safe in NMI context. So in NMI handler, we allocate
135 * required memory from lock-less memory allocator
136 * (ghes_estatus_pool), save estatus into it, put them into lock-less
137 * list (ghes_estatus_llist), then delay printk into IRQ context via
138 * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record
139 * required pool size by all NMI error source.
140 */
141static struct gen_pool *ghes_estatus_pool; 128static struct gen_pool *ghes_estatus_pool;
142static unsigned long ghes_estatus_pool_size_request; 129static unsigned long ghes_estatus_pool_size_request;
143static struct llist_head ghes_estatus_llist;
144static struct irq_work ghes_proc_irq_work;
145 130
146struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE]; 131struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
147static atomic_t ghes_estatus_cache_alloced; 132static atomic_t ghes_estatus_cache_alloced;
@@ -192,7 +177,7 @@ static void ghes_iounmap_nmi(void __iomem *vaddr_ptr)
192 177
193 BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base)); 178 BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base));
194 unmap_kernel_range_noflush(vaddr, PAGE_SIZE); 179 unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
195 __flush_tlb_one(vaddr); 180 arch_apei_flush_tlb_one(vaddr);
196} 181}
197 182
198static void ghes_iounmap_irq(void __iomem *vaddr_ptr) 183static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
@@ -202,7 +187,7 @@ static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
202 187
203 BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base)); 188 BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base));
204 unmap_kernel_range_noflush(vaddr, PAGE_SIZE); 189 unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
205 __flush_tlb_one(vaddr); 190 arch_apei_flush_tlb_one(vaddr);
206} 191}
207 192
208static int ghes_estatus_pool_init(void) 193static int ghes_estatus_pool_init(void)
@@ -249,11 +234,6 @@ static int ghes_estatus_pool_expand(unsigned long len)
249 return 0; 234 return 0;
250} 235}
251 236
252static void ghes_estatus_pool_shrink(unsigned long len)
253{
254 ghes_estatus_pool_size_request -= PAGE_ALIGN(len);
255}
256
257static struct ghes *ghes_new(struct acpi_hest_generic *generic) 237static struct ghes *ghes_new(struct acpi_hest_generic *generic)
258{ 238{
259 struct ghes *ghes; 239 struct ghes *ghes;
@@ -455,9 +435,7 @@ static void ghes_do_proc(struct ghes *ghes,
455 mem_err = (struct cper_sec_mem_err *)(gdata+1); 435 mem_err = (struct cper_sec_mem_err *)(gdata+1);
456 ghes_edac_report_mem_error(ghes, sev, mem_err); 436 ghes_edac_report_mem_error(ghes, sev, mem_err);
457 437
458#ifdef CONFIG_X86_MCE 438 arch_apei_report_mem_error(sev, mem_err);
459 apei_mce_report_mem_error(sev, mem_err);
460#endif
461 ghes_handle_memory_failure(gdata, sev); 439 ghes_handle_memory_failure(gdata, sev);
462 } 440 }
463#ifdef CONFIG_ACPI_APEI_PCIEAER 441#ifdef CONFIG_ACPI_APEI_PCIEAER
@@ -734,6 +712,32 @@ static int ghes_notify_sci(struct notifier_block *this,
734 return ret; 712 return ret;
735} 713}
736 714
715static struct notifier_block ghes_notifier_sci = {
716 .notifier_call = ghes_notify_sci,
717};
718
719#ifdef CONFIG_HAVE_ACPI_APEI_NMI
720/*
721 * printk is not safe in NMI context. So in NMI handler, we allocate
722 * required memory from lock-less memory allocator
723 * (ghes_estatus_pool), save estatus into it, put them into lock-less
724 * list (ghes_estatus_llist), then delay printk into IRQ context via
725 * irq_work (ghes_proc_irq_work). ghes_estatus_size_request record
726 * required pool size by all NMI error source.
727 */
728static struct llist_head ghes_estatus_llist;
729static struct irq_work ghes_proc_irq_work;
730
731/*
732 * NMI may be triggered on any CPU, so ghes_nmi_lock is used for
733 * mutual exclusion.
734 */
735static DEFINE_RAW_SPINLOCK(ghes_nmi_lock);
736
737static LIST_HEAD(ghes_nmi);
738
739static int ghes_panic_timeout __read_mostly = 30;
740
737static struct llist_node *llist_nodes_reverse(struct llist_node *llnode) 741static struct llist_node *llist_nodes_reverse(struct llist_node *llnode)
738{ 742{
739 struct llist_node *next, *tail = NULL; 743 struct llist_node *next, *tail = NULL;
@@ -877,10 +881,6 @@ out:
877 return ret; 881 return ret;
878} 882}
879 883
880static struct notifier_block ghes_notifier_sci = {
881 .notifier_call = ghes_notify_sci,
882};
883
884static unsigned long ghes_esource_prealloc_size( 884static unsigned long ghes_esource_prealloc_size(
885 const struct acpi_hest_generic *generic) 885 const struct acpi_hest_generic *generic)
886{ 886{
@@ -896,11 +896,71 @@ static unsigned long ghes_esource_prealloc_size(
896 return prealloc_size; 896 return prealloc_size;
897} 897}
898 898
899static void ghes_estatus_pool_shrink(unsigned long len)
900{
901 ghes_estatus_pool_size_request -= PAGE_ALIGN(len);
902}
903
904static void ghes_nmi_add(struct ghes *ghes)
905{
906 unsigned long len;
907
908 len = ghes_esource_prealloc_size(ghes->generic);
909 ghes_estatus_pool_expand(len);
910 mutex_lock(&ghes_list_mutex);
911 if (list_empty(&ghes_nmi))
912 register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, "ghes");
913 list_add_rcu(&ghes->list, &ghes_nmi);
914 mutex_unlock(&ghes_list_mutex);
915}
916
917static void ghes_nmi_remove(struct ghes *ghes)
918{
919 unsigned long len;
920
921 mutex_lock(&ghes_list_mutex);
922 list_del_rcu(&ghes->list);
923 if (list_empty(&ghes_nmi))
924 unregister_nmi_handler(NMI_LOCAL, "ghes");
925 mutex_unlock(&ghes_list_mutex);
926 /*
927 * To synchronize with NMI handler, ghes can only be
928 * freed after NMI handler finishes.
929 */
930 synchronize_rcu();
931 len = ghes_esource_prealloc_size(ghes->generic);
932 ghes_estatus_pool_shrink(len);
933}
934
935static void ghes_nmi_init_cxt(void)
936{
937 init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
938}
939#else /* CONFIG_HAVE_ACPI_APEI_NMI */
940static inline void ghes_nmi_add(struct ghes *ghes)
941{
942 pr_err(GHES_PFX "ID: %d, trying to add NMI notification which is not supported!\n",
943 ghes->generic->header.source_id);
944 BUG();
945}
946
947static inline void ghes_nmi_remove(struct ghes *ghes)
948{
949 pr_err(GHES_PFX "ID: %d, trying to remove NMI notification which is not supported!\n",
950 ghes->generic->header.source_id);
951 BUG();
952}
953
954static inline void ghes_nmi_init_cxt(void)
955{
956}
957#endif /* CONFIG_HAVE_ACPI_APEI_NMI */
958
899static int ghes_probe(struct platform_device *ghes_dev) 959static int ghes_probe(struct platform_device *ghes_dev)
900{ 960{
901 struct acpi_hest_generic *generic; 961 struct acpi_hest_generic *generic;
902 struct ghes *ghes = NULL; 962 struct ghes *ghes = NULL;
903 unsigned long len; 963
904 int rc = -EINVAL; 964 int rc = -EINVAL;
905 965
906 generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data; 966 generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
@@ -911,7 +971,13 @@ static int ghes_probe(struct platform_device *ghes_dev)
911 case ACPI_HEST_NOTIFY_POLLED: 971 case ACPI_HEST_NOTIFY_POLLED:
912 case ACPI_HEST_NOTIFY_EXTERNAL: 972 case ACPI_HEST_NOTIFY_EXTERNAL:
913 case ACPI_HEST_NOTIFY_SCI: 973 case ACPI_HEST_NOTIFY_SCI:
974 break;
914 case ACPI_HEST_NOTIFY_NMI: 975 case ACPI_HEST_NOTIFY_NMI:
976 if (!IS_ENABLED(CONFIG_HAVE_ACPI_APEI_NMI)) {
977 pr_warn(GHES_PFX "Generic hardware error source: %d notified via NMI interrupt is not supported!\n",
978 generic->header.source_id);
979 goto err;
980 }
915 break; 981 break;
916 case ACPI_HEST_NOTIFY_LOCAL: 982 case ACPI_HEST_NOTIFY_LOCAL:
917 pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n", 983 pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
@@ -972,14 +1038,7 @@ static int ghes_probe(struct platform_device *ghes_dev)
972 mutex_unlock(&ghes_list_mutex); 1038 mutex_unlock(&ghes_list_mutex);
973 break; 1039 break;
974 case ACPI_HEST_NOTIFY_NMI: 1040 case ACPI_HEST_NOTIFY_NMI:
975 len = ghes_esource_prealloc_size(generic); 1041 ghes_nmi_add(ghes);
976 ghes_estatus_pool_expand(len);
977 mutex_lock(&ghes_list_mutex);
978 if (list_empty(&ghes_nmi))
979 register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0,
980 "ghes");
981 list_add_rcu(&ghes->list, &ghes_nmi);
982 mutex_unlock(&ghes_list_mutex);
983 break; 1042 break;
984 default: 1043 default:
985 BUG(); 1044 BUG();
@@ -1001,7 +1060,6 @@ static int ghes_remove(struct platform_device *ghes_dev)
1001{ 1060{
1002 struct ghes *ghes; 1061 struct ghes *ghes;
1003 struct acpi_hest_generic *generic; 1062 struct acpi_hest_generic *generic;
1004 unsigned long len;
1005 1063
1006 ghes = platform_get_drvdata(ghes_dev); 1064 ghes = platform_get_drvdata(ghes_dev);
1007 generic = ghes->generic; 1065 generic = ghes->generic;
@@ -1022,18 +1080,7 @@ static int ghes_remove(struct platform_device *ghes_dev)
1022 mutex_unlock(&ghes_list_mutex); 1080 mutex_unlock(&ghes_list_mutex);
1023 break; 1081 break;
1024 case ACPI_HEST_NOTIFY_NMI: 1082 case ACPI_HEST_NOTIFY_NMI:
1025 mutex_lock(&ghes_list_mutex); 1083 ghes_nmi_remove(ghes);
1026 list_del_rcu(&ghes->list);
1027 if (list_empty(&ghes_nmi))
1028 unregister_nmi_handler(NMI_LOCAL, "ghes");
1029 mutex_unlock(&ghes_list_mutex);
1030 /*
1031 * To synchronize with NMI handler, ghes can only be
1032 * freed after NMI handler finishes.
1033 */
1034 synchronize_rcu();
1035 len = ghes_esource_prealloc_size(generic);
1036 ghes_estatus_pool_shrink(len);
1037 break; 1084 break;
1038 default: 1085 default:
1039 BUG(); 1086 BUG();
@@ -1077,7 +1124,7 @@ static int __init ghes_init(void)
1077 return -EINVAL; 1124 return -EINVAL;
1078 } 1125 }
1079 1126
1080 init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq); 1127 ghes_nmi_init_cxt();
1081 1128
1082 rc = ghes_ioremap_init(); 1129 rc = ghes_ioremap_init();
1083 if (rc) 1130 if (rc)
diff --git a/drivers/acpi/apei/hest.c b/drivers/acpi/apei/hest.c
index f5e37f32c71f..06e9b411a0a2 100644
--- a/drivers/acpi/apei/hest.c
+++ b/drivers/acpi/apei/hest.c
@@ -36,7 +36,6 @@
36#include <linux/io.h> 36#include <linux/io.h>
37#include <linux/platform_device.h> 37#include <linux/platform_device.h>
38#include <acpi/apei.h> 38#include <acpi/apei.h>
39#include <asm/mce.h>
40 39
41#include "apei-internal.h" 40#include "apei-internal.h"
42 41
@@ -128,33 +127,7 @@ EXPORT_SYMBOL_GPL(apei_hest_parse);
128 */ 127 */
129static int __init hest_parse_cmc(struct acpi_hest_header *hest_hdr, void *data) 128static int __init hest_parse_cmc(struct acpi_hest_header *hest_hdr, void *data)
130{ 129{
131#ifdef CONFIG_X86_MCE 130 return arch_apei_enable_cmcff(hest_hdr, data);
132 int i;
133 struct acpi_hest_ia_corrected *cmc;
134 struct acpi_hest_ia_error_bank *mc_bank;
135
136 if (hest_hdr->type != ACPI_HEST_TYPE_IA32_CORRECTED_CHECK)
137 return 0;
138
139 cmc = (struct acpi_hest_ia_corrected *)hest_hdr;
140 if (!cmc->enabled)
141 return 0;
142
143 /*
144 * We expect HEST to provide a list of MC banks that report errors
145 * in firmware first mode. Otherwise, return non-zero value to
146 * indicate that we are done parsing HEST.
147 */
148 if (!(cmc->flags & ACPI_HEST_FIRMWARE_FIRST) || !cmc->num_hardware_banks)
149 return 1;
150
151 pr_info(HEST_PFX "Enabling Firmware First mode for corrected errors.\n");
152
153 mc_bank = (struct acpi_hest_ia_error_bank *)(cmc + 1);
154 for (i = 0; i < cmc->num_hardware_banks; i++, mc_bank++)
155 mce_disable_bank(mc_bank->bank_number);
156#endif
157 return 1;
158} 131}
159 132
160struct ghes_arr { 133struct ghes_arr {
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 0d7116f34b95..130f513e08c9 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -35,6 +35,7 @@
35#include <linux/delay.h> 35#include <linux/delay.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/suspend.h> 37#include <linux/suspend.h>
38#include <linux/delay.h>
38#include <asm/unaligned.h> 39#include <asm/unaligned.h>
39 40
40#ifdef CONFIG_ACPI_PROCFS_POWER 41#ifdef CONFIG_ACPI_PROCFS_POWER
@@ -534,6 +535,20 @@ static int acpi_battery_get_state(struct acpi_battery *battery)
534 " invalid.\n"); 535 " invalid.\n");
535 } 536 }
536 537
538 /*
539 * When fully charged, some batteries wrongly report
540 * capacity_now = design_capacity instead of = full_charge_capacity
541 */
542 if (battery->capacity_now > battery->full_charge_capacity
543 && battery->full_charge_capacity != ACPI_BATTERY_VALUE_UNKNOWN) {
544 battery->capacity_now = battery->full_charge_capacity;
545 if (battery->capacity_now != battery->design_capacity)
546 printk_once(KERN_WARNING FW_BUG
547 "battery: reported current charge level (%d) "
548 "is higher than reported maximum charge level (%d).\n",
549 battery->capacity_now, battery->full_charge_capacity);
550 }
551
537 if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags) 552 if (test_bit(ACPI_BATTERY_QUIRK_PERCENTAGE_CAPACITY, &battery->flags)
538 && battery->capacity_now >= 0 && battery->capacity_now <= 100) 553 && battery->capacity_now >= 0 && battery->capacity_now <= 100)
539 battery->capacity_now = (battery->capacity_now * 554 battery->capacity_now = (battery->capacity_now *
@@ -1151,6 +1166,28 @@ static struct dmi_system_id bat_dmi_table[] = {
1151 {}, 1166 {},
1152}; 1167};
1153 1168
1169/*
1170 * Some machines'(E,G Lenovo Z480) ECs are not stable
1171 * during boot up and this causes battery driver fails to be
1172 * probed due to failure of getting battery information
1173 * from EC sometimes. After several retries, the operation
1174 * may work. So add retry code here and 20ms sleep between
1175 * every retries.
1176 */
1177static int acpi_battery_update_retry(struct acpi_battery *battery)
1178{
1179 int retry, ret;
1180
1181 for (retry = 5; retry; retry--) {
1182 ret = acpi_battery_update(battery, false);
1183 if (!ret)
1184 break;
1185
1186 msleep(20);
1187 }
1188 return ret;
1189}
1190
1154static int acpi_battery_add(struct acpi_device *device) 1191static int acpi_battery_add(struct acpi_device *device)
1155{ 1192{
1156 int result = 0; 1193 int result = 0;
@@ -1169,9 +1206,11 @@ static int acpi_battery_add(struct acpi_device *device)
1169 mutex_init(&battery->sysfs_lock); 1206 mutex_init(&battery->sysfs_lock);
1170 if (acpi_has_method(battery->device->handle, "_BIX")) 1207 if (acpi_has_method(battery->device->handle, "_BIX"))
1171 set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags); 1208 set_bit(ACPI_BATTERY_XINFO_PRESENT, &battery->flags);
1172 result = acpi_battery_update(battery, false); 1209
1210 result = acpi_battery_update_retry(battery);
1173 if (result) 1211 if (result)
1174 goto fail; 1212 goto fail;
1213
1175#ifdef CONFIG_ACPI_PROCFS_POWER 1214#ifdef CONFIG_ACPI_PROCFS_POWER
1176 result = acpi_battery_add_fs(device); 1215 result = acpi_battery_add_fs(device);
1177#endif 1216#endif
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index ad11ba4a412d..a66ab658abbc 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1,11 +1,14 @@
1/* 1/*
2 * ec.c - ACPI Embedded Controller Driver (v2.1) 2 * ec.c - ACPI Embedded Controller Driver (v2.2)
3 * 3 *
4 * Copyright (C) 2006-2008 Alexey Starikovskiy <astarikovskiy@suse.de> 4 * Copyright (C) 2001-2014 Intel Corporation
5 * Copyright (C) 2006 Denis Sadykov <denis.m.sadykov@intel.com> 5 * Author: 2014 Lv Zheng <lv.zheng@intel.com>
6 * Copyright (C) 2004 Luming Yu <luming.yu@intel.com> 6 * 2006, 2007 Alexey Starikovskiy <alexey.y.starikovskiy@intel.com>
7 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> 7 * 2006 Denis Sadykov <denis.m.sadykov@intel.com>
8 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> 8 * 2004 Luming Yu <luming.yu@intel.com>
9 * 2001, 2002 Andy Grover <andrew.grover@intel.com>
10 * 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
11 * Copyright (C) 2008 Alexey Starikovskiy <astarikovskiy@suse.de>
9 * 12 *
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 13 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 * 14 *
@@ -52,6 +55,7 @@
52/* EC status register */ 55/* EC status register */
53#define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */ 56#define ACPI_EC_FLAG_OBF 0x01 /* Output buffer full */
54#define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */ 57#define ACPI_EC_FLAG_IBF 0x02 /* Input buffer full */
58#define ACPI_EC_FLAG_CMD 0x08 /* Input buffer contains a command */
55#define ACPI_EC_FLAG_BURST 0x10 /* burst mode */ 59#define ACPI_EC_FLAG_BURST 0x10 /* burst mode */
56#define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */ 60#define ACPI_EC_FLAG_SCI 0x20 /* EC-SCI occurred */
57 61
@@ -78,6 +82,9 @@ enum {
78 EC_FLAGS_BLOCKED, /* Transactions are blocked */ 82 EC_FLAGS_BLOCKED, /* Transactions are blocked */
79}; 83};
80 84
85#define ACPI_EC_COMMAND_POLL 0x01 /* Available for command byte */
86#define ACPI_EC_COMMAND_COMPLETE 0x02 /* Completed last byte */
87
81/* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */ 88/* ec.c is compiled in acpi namespace so this shows up as acpi.ec_delay param */
82static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; 89static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY;
83module_param(ec_delay, uint, 0644); 90module_param(ec_delay, uint, 0644);
@@ -109,7 +116,7 @@ struct transaction {
109 u8 ri; 116 u8 ri;
110 u8 wlen; 117 u8 wlen;
111 u8 rlen; 118 u8 rlen;
112 bool done; 119 u8 flags;
113}; 120};
114 121
115struct acpi_ec *boot_ec, *first_ec; 122struct acpi_ec *boot_ec, *first_ec;
@@ -127,83 +134,104 @@ static int EC_FLAGS_CLEAR_ON_RESUME; /* Needs acpi_ec_clear() on boot/resume */
127static inline u8 acpi_ec_read_status(struct acpi_ec *ec) 134static inline u8 acpi_ec_read_status(struct acpi_ec *ec)
128{ 135{
129 u8 x = inb(ec->command_addr); 136 u8 x = inb(ec->command_addr);
130 pr_debug("---> status = 0x%2.2x\n", x); 137 pr_debug("EC_SC(R) = 0x%2.2x "
138 "SCI_EVT=%d BURST=%d CMD=%d IBF=%d OBF=%d\n",
139 x,
140 !!(x & ACPI_EC_FLAG_SCI),
141 !!(x & ACPI_EC_FLAG_BURST),
142 !!(x & ACPI_EC_FLAG_CMD),
143 !!(x & ACPI_EC_FLAG_IBF),
144 !!(x & ACPI_EC_FLAG_OBF));
131 return x; 145 return x;
132} 146}
133 147
134static inline u8 acpi_ec_read_data(struct acpi_ec *ec) 148static inline u8 acpi_ec_read_data(struct acpi_ec *ec)
135{ 149{
136 u8 x = inb(ec->data_addr); 150 u8 x = inb(ec->data_addr);
137 pr_debug("---> data = 0x%2.2x\n", x); 151 pr_debug("EC_DATA(R) = 0x%2.2x\n", x);
138 return x; 152 return x;
139} 153}
140 154
141static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command) 155static inline void acpi_ec_write_cmd(struct acpi_ec *ec, u8 command)
142{ 156{
143 pr_debug("<--- command = 0x%2.2x\n", command); 157 pr_debug("EC_SC(W) = 0x%2.2x\n", command);
144 outb(command, ec->command_addr); 158 outb(command, ec->command_addr);
145} 159}
146 160
147static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data) 161static inline void acpi_ec_write_data(struct acpi_ec *ec, u8 data)
148{ 162{
149 pr_debug("<--- data = 0x%2.2x\n", data); 163 pr_debug("EC_DATA(W) = 0x%2.2x\n", data);
150 outb(data, ec->data_addr); 164 outb(data, ec->data_addr);
151} 165}
152 166
153static int ec_transaction_done(struct acpi_ec *ec) 167static int ec_transaction_completed(struct acpi_ec *ec)
154{ 168{
155 unsigned long flags; 169 unsigned long flags;
156 int ret = 0; 170 int ret = 0;
157 spin_lock_irqsave(&ec->lock, flags); 171 spin_lock_irqsave(&ec->lock, flags);
158 if (!ec->curr || ec->curr->done) 172 if (ec->curr && (ec->curr->flags & ACPI_EC_COMMAND_COMPLETE))
159 ret = 1; 173 ret = 1;
160 spin_unlock_irqrestore(&ec->lock, flags); 174 spin_unlock_irqrestore(&ec->lock, flags);
161 return ret; 175 return ret;
162} 176}
163 177
164static void start_transaction(struct acpi_ec *ec) 178static bool advance_transaction(struct acpi_ec *ec)
165{ 179{
166 ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
167 ec->curr->done = false;
168 acpi_ec_write_cmd(ec, ec->curr->command);
169}
170
171static void advance_transaction(struct acpi_ec *ec, u8 status)
172{
173 unsigned long flags;
174 struct transaction *t; 180 struct transaction *t;
181 u8 status;
182 bool wakeup = false;
175 183
176 spin_lock_irqsave(&ec->lock, flags); 184 pr_debug("===== %s =====\n", in_interrupt() ? "IRQ" : "TASK");
185 status = acpi_ec_read_status(ec);
177 t = ec->curr; 186 t = ec->curr;
178 if (!t) 187 if (!t)
179 goto unlock; 188 goto err;
180 if (t->wlen > t->wi) { 189 if (t->flags & ACPI_EC_COMMAND_POLL) {
181 if ((status & ACPI_EC_FLAG_IBF) == 0) 190 if (t->wlen > t->wi) {
182 acpi_ec_write_data(ec, 191 if ((status & ACPI_EC_FLAG_IBF) == 0)
183 t->wdata[t->wi++]); 192 acpi_ec_write_data(ec, t->wdata[t->wi++]);
184 else 193 else
185 goto err; 194 goto err;
186 } else if (t->rlen > t->ri) { 195 } else if (t->rlen > t->ri) {
187 if ((status & ACPI_EC_FLAG_OBF) == 1) { 196 if ((status & ACPI_EC_FLAG_OBF) == 1) {
188 t->rdata[t->ri++] = acpi_ec_read_data(ec); 197 t->rdata[t->ri++] = acpi_ec_read_data(ec);
189 if (t->rlen == t->ri) 198 if (t->rlen == t->ri) {
190 t->done = true; 199 t->flags |= ACPI_EC_COMMAND_COMPLETE;
200 wakeup = true;
201 }
202 } else
203 goto err;
204 } else if (t->wlen == t->wi &&
205 (status & ACPI_EC_FLAG_IBF) == 0) {
206 t->flags |= ACPI_EC_COMMAND_COMPLETE;
207 wakeup = true;
208 }
209 return wakeup;
210 } else {
211 if ((status & ACPI_EC_FLAG_IBF) == 0) {
212 acpi_ec_write_cmd(ec, t->command);
213 t->flags |= ACPI_EC_COMMAND_POLL;
191 } else 214 } else
192 goto err; 215 goto err;
193 } else if (t->wlen == t->wi && 216 return wakeup;
194 (status & ACPI_EC_FLAG_IBF) == 0) 217 }
195 t->done = true;
196 goto unlock;
197err: 218err:
198 /* 219 /*
199 * If SCI bit is set, then don't think it's a false IRQ 220 * If SCI bit is set, then don't think it's a false IRQ
200 * otherwise will take a not handled IRQ as a false one. 221 * otherwise will take a not handled IRQ as a false one.
201 */ 222 */
202 if (in_interrupt() && !(status & ACPI_EC_FLAG_SCI)) 223 if (!(status & ACPI_EC_FLAG_SCI)) {
203 ++t->irq_count; 224 if (in_interrupt() && t)
225 ++t->irq_count;
226 }
227 return wakeup;
228}
204 229
205unlock: 230static void start_transaction(struct acpi_ec *ec)
206 spin_unlock_irqrestore(&ec->lock, flags); 231{
232 ec->curr->irq_count = ec->curr->wi = ec->curr->ri = 0;
233 ec->curr->flags = 0;
234 (void)advance_transaction(ec);
207} 235}
208 236
209static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data); 237static int acpi_ec_sync_query(struct acpi_ec *ec, u8 *data);
@@ -228,15 +256,17 @@ static int ec_poll(struct acpi_ec *ec)
228 /* don't sleep with disabled interrupts */ 256 /* don't sleep with disabled interrupts */
229 if (EC_FLAGS_MSI || irqs_disabled()) { 257 if (EC_FLAGS_MSI || irqs_disabled()) {
230 udelay(ACPI_EC_MSI_UDELAY); 258 udelay(ACPI_EC_MSI_UDELAY);
231 if (ec_transaction_done(ec)) 259 if (ec_transaction_completed(ec))
232 return 0; 260 return 0;
233 } else { 261 } else {
234 if (wait_event_timeout(ec->wait, 262 if (wait_event_timeout(ec->wait,
235 ec_transaction_done(ec), 263 ec_transaction_completed(ec),
236 msecs_to_jiffies(1))) 264 msecs_to_jiffies(1)))
237 return 0; 265 return 0;
238 } 266 }
239 advance_transaction(ec, acpi_ec_read_status(ec)); 267 spin_lock_irqsave(&ec->lock, flags);
268 (void)advance_transaction(ec);
269 spin_unlock_irqrestore(&ec->lock, flags);
240 } while (time_before(jiffies, delay)); 270 } while (time_before(jiffies, delay));
241 pr_debug("controller reset, restart transaction\n"); 271 pr_debug("controller reset, restart transaction\n");
242 spin_lock_irqsave(&ec->lock, flags); 272 spin_lock_irqsave(&ec->lock, flags);
@@ -268,23 +298,6 @@ static int acpi_ec_transaction_unlocked(struct acpi_ec *ec,
268 return ret; 298 return ret;
269} 299}
270 300
271static int ec_check_ibf0(struct acpi_ec *ec)
272{
273 u8 status = acpi_ec_read_status(ec);
274 return (status & ACPI_EC_FLAG_IBF) == 0;
275}
276
277static int ec_wait_ibf0(struct acpi_ec *ec)
278{
279 unsigned long delay = jiffies + msecs_to_jiffies(ec_delay);
280 /* interrupt wait manually if GPE mode is not active */
281 while (time_before(jiffies, delay))
282 if (wait_event_timeout(ec->wait, ec_check_ibf0(ec),
283 msecs_to_jiffies(1)))
284 return 0;
285 return -ETIME;
286}
287
288static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) 301static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
289{ 302{
290 int status; 303 int status;
@@ -305,12 +318,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
305 goto unlock; 318 goto unlock;
306 } 319 }
307 } 320 }
308 if (ec_wait_ibf0(ec)) {
309 pr_err("input buffer is not empty, "
310 "aborting transaction\n");
311 status = -ETIME;
312 goto end;
313 }
314 pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n", 321 pr_debug("transaction start (cmd=0x%02x, addr=0x%02x)\n",
315 t->command, t->wdata ? t->wdata[0] : 0); 322 t->command, t->wdata ? t->wdata[0] : 0);
316 /* disable GPE during transaction if storm is detected */ 323 /* disable GPE during transaction if storm is detected */
@@ -334,7 +341,6 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t)
334 set_bit(EC_FLAGS_GPE_STORM, &ec->flags); 341 set_bit(EC_FLAGS_GPE_STORM, &ec->flags);
335 } 342 }
336 pr_debug("transaction end\n"); 343 pr_debug("transaction end\n");
337end:
338 if (ec->global_lock) 344 if (ec->global_lock)
339 acpi_release_global_lock(glk); 345 acpi_release_global_lock(glk);
340unlock: 346unlock:
@@ -634,17 +640,14 @@ static int ec_check_sci(struct acpi_ec *ec, u8 state)
634static u32 acpi_ec_gpe_handler(acpi_handle gpe_device, 640static u32 acpi_ec_gpe_handler(acpi_handle gpe_device,
635 u32 gpe_number, void *data) 641 u32 gpe_number, void *data)
636{ 642{
643 unsigned long flags;
637 struct acpi_ec *ec = data; 644 struct acpi_ec *ec = data;
638 u8 status = acpi_ec_read_status(ec);
639 645
640 pr_debug("~~~> interrupt, status:0x%02x\n", status); 646 spin_lock_irqsave(&ec->lock, flags);
641 647 if (advance_transaction(ec))
642 advance_transaction(ec, status);
643 if (ec_transaction_done(ec) &&
644 (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) == 0) {
645 wake_up(&ec->wait); 648 wake_up(&ec->wait);
646 ec_check_sci(ec, acpi_ec_read_status(ec)); 649 spin_unlock_irqrestore(&ec->lock, flags);
647 } 650 ec_check_sci(ec, acpi_ec_read_status(ec));
648 return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE; 651 return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
649} 652}
650 653
@@ -1066,8 +1069,10 @@ int __init acpi_ec_ecdt_probe(void)
1066 /* fall through */ 1069 /* fall through */
1067 } 1070 }
1068 1071
1069 if (EC_FLAGS_SKIP_DSDT_SCAN) 1072 if (EC_FLAGS_SKIP_DSDT_SCAN) {
1073 kfree(saved_ec);
1070 return -ENODEV; 1074 return -ENODEV;
1075 }
1071 1076
1072 /* This workaround is needed only on some broken machines, 1077 /* This workaround is needed only on some broken machines,
1073 * which require early EC, but fail to provide ECDT */ 1078 * which require early EC, but fail to provide ECDT */
@@ -1105,6 +1110,7 @@ install:
1105 } 1110 }
1106error: 1111error:
1107 kfree(boot_ec); 1112 kfree(boot_ec);
1113 kfree(saved_ec);
1108 boot_ec = NULL; 1114 boot_ec = NULL;
1109 return -ENODEV; 1115 return -ENODEV;
1110} 1116}
diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c
index 0bdacc5e26a3..2ba8f02ced36 100644
--- a/drivers/acpi/resource.c
+++ b/drivers/acpi/resource.c
@@ -77,7 +77,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
77 switch (ares->type) { 77 switch (ares->type) {
78 case ACPI_RESOURCE_TYPE_MEMORY24: 78 case ACPI_RESOURCE_TYPE_MEMORY24:
79 memory24 = &ares->data.memory24; 79 memory24 = &ares->data.memory24;
80 if (!memory24->address_length) 80 if (!memory24->minimum && !memory24->address_length)
81 return false; 81 return false;
82 acpi_dev_get_memresource(res, memory24->minimum, 82 acpi_dev_get_memresource(res, memory24->minimum,
83 memory24->address_length, 83 memory24->address_length,
@@ -85,7 +85,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
85 break; 85 break;
86 case ACPI_RESOURCE_TYPE_MEMORY32: 86 case ACPI_RESOURCE_TYPE_MEMORY32:
87 memory32 = &ares->data.memory32; 87 memory32 = &ares->data.memory32;
88 if (!memory32->address_length) 88 if (!memory32->minimum && !memory32->address_length)
89 return false; 89 return false;
90 acpi_dev_get_memresource(res, memory32->minimum, 90 acpi_dev_get_memresource(res, memory32->minimum,
91 memory32->address_length, 91 memory32->address_length,
@@ -93,7 +93,7 @@ bool acpi_dev_resource_memory(struct acpi_resource *ares, struct resource *res)
93 break; 93 break;
94 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32: 94 case ACPI_RESOURCE_TYPE_FIXED_MEMORY32:
95 fixed_memory32 = &ares->data.fixed_memory32; 95 fixed_memory32 = &ares->data.fixed_memory32;
96 if (!fixed_memory32->address_length) 96 if (!fixed_memory32->address && !fixed_memory32->address_length)
97 return false; 97 return false;
98 acpi_dev_get_memresource(res, fixed_memory32->address, 98 acpi_dev_get_memresource(res, fixed_memory32->address,
99 fixed_memory32->address_length, 99 fixed_memory32->address_length,
@@ -150,7 +150,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
150 switch (ares->type) { 150 switch (ares->type) {
151 case ACPI_RESOURCE_TYPE_IO: 151 case ACPI_RESOURCE_TYPE_IO:
152 io = &ares->data.io; 152 io = &ares->data.io;
153 if (!io->address_length) 153 if (!io->minimum && !io->address_length)
154 return false; 154 return false;
155 acpi_dev_get_ioresource(res, io->minimum, 155 acpi_dev_get_ioresource(res, io->minimum,
156 io->address_length, 156 io->address_length,
@@ -158,7 +158,7 @@ bool acpi_dev_resource_io(struct acpi_resource *ares, struct resource *res)
158 break; 158 break;
159 case ACPI_RESOURCE_TYPE_FIXED_IO: 159 case ACPI_RESOURCE_TYPE_FIXED_IO:
160 fixed_io = &ares->data.fixed_io; 160 fixed_io = &ares->data.fixed_io;
161 if (!fixed_io->address_length) 161 if (!fixed_io->address && !fixed_io->address_length)
162 return false; 162 return false;
163 acpi_dev_get_ioresource(res, fixed_io->address, 163 acpi_dev_get_ioresource(res, fixed_io->address,
164 fixed_io->address_length, 164 fixed_io->address_length,
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index fb9ffe9adc64..350d52a8f781 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -68,7 +68,7 @@ MODULE_AUTHOR("Bruno Ducrot");
68MODULE_DESCRIPTION("ACPI Video Driver"); 68MODULE_DESCRIPTION("ACPI Video Driver");
69MODULE_LICENSE("GPL"); 69MODULE_LICENSE("GPL");
70 70
71static bool brightness_switch_enabled; 71static bool brightness_switch_enabled = 1;
72module_param(brightness_switch_enabled, bool, 0644); 72module_param(brightness_switch_enabled, bool, 0644);
73 73
74/* 74/*
@@ -241,13 +241,14 @@ static bool acpi_video_use_native_backlight(void)
241 return use_native_backlight_dmi; 241 return use_native_backlight_dmi;
242} 242}
243 243
244static bool acpi_video_verify_backlight_support(void) 244bool acpi_video_verify_backlight_support(void)
245{ 245{
246 if (acpi_osi_is_win8() && acpi_video_use_native_backlight() && 246 if (acpi_osi_is_win8() && acpi_video_use_native_backlight() &&
247 backlight_device_registered(BACKLIGHT_RAW)) 247 backlight_device_registered(BACKLIGHT_RAW))
248 return false; 248 return false;
249 return acpi_video_backlight_support(); 249 return acpi_video_backlight_support();
250} 250}
251EXPORT_SYMBOL_GPL(acpi_video_verify_backlight_support);
251 252
252/* backlight device sysfs support */ 253/* backlight device sysfs support */
253static int acpi_video_get_brightness(struct backlight_device *bd) 254static int acpi_video_get_brightness(struct backlight_device *bd)
@@ -563,6 +564,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
563 }, 564 },
564 }, 565 },
565 { 566 {
567 .callback = video_set_use_native_backlight,
568 .ident = "Acer TravelMate B113",
569 .matches = {
570 DMI_MATCH(DMI_SYS_VENDOR, "Acer"),
571 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate B113"),
572 },
573 },
574 {
566 .callback = video_set_use_native_backlight, 575 .callback = video_set_use_native_backlight,
567 .ident = "HP ProBook 4340s", 576 .ident = "HP ProBook 4340s",
568 .matches = { 577 .matches = {
@@ -572,6 +581,14 @@ static struct dmi_system_id video_dmi_table[] __initdata = {
572 }, 581 },
573 { 582 {
574 .callback = video_set_use_native_backlight, 583 .callback = video_set_use_native_backlight,
584 .ident = "HP ProBook 4540s",
585 .matches = {
586 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
587 DMI_MATCH(DMI_PRODUCT_VERSION, "HP ProBook 4540s"),
588 },
589 },
590 {
591 .callback = video_set_use_native_backlight,
575 .ident = "HP ProBook 2013 models", 592 .ident = "HP ProBook 2013 models",
576 .matches = { 593 .matches = {
577 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), 594 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
diff --git a/drivers/acpi/video_detect.c b/drivers/acpi/video_detect.c
index 33e3db548a29..c42feb2bacd0 100644
--- a/drivers/acpi/video_detect.c
+++ b/drivers/acpi/video_detect.c
@@ -166,6 +166,14 @@ static struct dmi_system_id video_detect_dmi_table[] = {
166 DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"), 166 DMI_MATCH(DMI_PRODUCT_NAME, "UL30A"),
167 }, 167 },
168 }, 168 },
169 {
170 .callback = video_detect_force_vendor,
171 .ident = "Dell Inspiron 5737",
172 .matches = {
173 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
174 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5737"),
175 },
176 },
169 { }, 177 { },
170}; 178};
171 179
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index 05882e4445a6..5513296e5e2e 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -371,7 +371,9 @@ int ahci_do_softreset(struct ata_link *link, unsigned int *class,
371 int pmp, unsigned long deadline, 371 int pmp, unsigned long deadline,
372 int (*check_ready)(struct ata_link *link)); 372 int (*check_ready)(struct ata_link *link));
373 373
374unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
374int ahci_stop_engine(struct ata_port *ap); 375int ahci_stop_engine(struct ata_port *ap);
376void ahci_start_fis_rx(struct ata_port *ap);
375void ahci_start_engine(struct ata_port *ap); 377void ahci_start_engine(struct ata_port *ap);
376int ahci_check_ready(struct ata_link *link); 378int ahci_check_ready(struct ata_link *link);
377int ahci_kick_engine(struct ata_port *ap); 379int ahci_kick_engine(struct ata_port *ap);
diff --git a/drivers/ata/ahci_imx.c b/drivers/ata/ahci_imx.c
index 3a901520c62b..cac4360f272a 100644
--- a/drivers/ata/ahci_imx.c
+++ b/drivers/ata/ahci_imx.c
@@ -58,6 +58,8 @@ enum ahci_imx_type {
58struct imx_ahci_priv { 58struct imx_ahci_priv {
59 struct platform_device *ahci_pdev; 59 struct platform_device *ahci_pdev;
60 enum ahci_imx_type type; 60 enum ahci_imx_type type;
61 struct clk *sata_clk;
62 struct clk *sata_ref_clk;
61 struct clk *ahb_clk; 63 struct clk *ahb_clk;
62 struct regmap *gpr; 64 struct regmap *gpr;
63 bool no_device; 65 bool no_device;
@@ -224,7 +226,7 @@ static int imx_sata_enable(struct ahci_host_priv *hpriv)
224 return ret; 226 return ret;
225 } 227 }
226 228
227 ret = ahci_platform_enable_clks(hpriv); 229 ret = clk_prepare_enable(imxpriv->sata_ref_clk);
228 if (ret < 0) 230 if (ret < 0)
229 goto disable_regulator; 231 goto disable_regulator;
230 232
@@ -291,7 +293,7 @@ static void imx_sata_disable(struct ahci_host_priv *hpriv)
291 !IMX6Q_GPR13_SATA_MPLL_CLK_EN); 293 !IMX6Q_GPR13_SATA_MPLL_CLK_EN);
292 } 294 }
293 295
294 ahci_platform_disable_clks(hpriv); 296 clk_disable_unprepare(imxpriv->sata_ref_clk);
295 297
296 if (hpriv->target_pwr) 298 if (hpriv->target_pwr)
297 regulator_disable(hpriv->target_pwr); 299 regulator_disable(hpriv->target_pwr);
@@ -324,6 +326,9 @@ static void ahci_imx_error_handler(struct ata_port *ap)
324 writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR); 326 writel(reg_val | IMX_P0PHYCR_TEST_PDDQ, mmio + IMX_P0PHYCR);
325 imx_sata_disable(hpriv); 327 imx_sata_disable(hpriv);
326 imxpriv->no_device = true; 328 imxpriv->no_device = true;
329
330 dev_info(ap->dev, "no device found, disabling link.\n");
331 dev_info(ap->dev, "pass " MODULE_PARAM_PREFIX ".hotplug=1 to enable hotplug\n");
327} 332}
328 333
329static int ahci_imx_softreset(struct ata_link *link, unsigned int *class, 334static int ahci_imx_softreset(struct ata_link *link, unsigned int *class,
@@ -385,6 +390,19 @@ static int imx_ahci_probe(struct platform_device *pdev)
385 imxpriv->no_device = false; 390 imxpriv->no_device = false;
386 imxpriv->first_time = true; 391 imxpriv->first_time = true;
387 imxpriv->type = (enum ahci_imx_type)of_id->data; 392 imxpriv->type = (enum ahci_imx_type)of_id->data;
393
394 imxpriv->sata_clk = devm_clk_get(dev, "sata");
395 if (IS_ERR(imxpriv->sata_clk)) {
396 dev_err(dev, "can't get sata clock.\n");
397 return PTR_ERR(imxpriv->sata_clk);
398 }
399
400 imxpriv->sata_ref_clk = devm_clk_get(dev, "sata_ref");
401 if (IS_ERR(imxpriv->sata_ref_clk)) {
402 dev_err(dev, "can't get sata_ref clock.\n");
403 return PTR_ERR(imxpriv->sata_ref_clk);
404 }
405
388 imxpriv->ahb_clk = devm_clk_get(dev, "ahb"); 406 imxpriv->ahb_clk = devm_clk_get(dev, "ahb");
389 if (IS_ERR(imxpriv->ahb_clk)) { 407 if (IS_ERR(imxpriv->ahb_clk)) {
390 dev_err(dev, "can't get ahb clock.\n"); 408 dev_err(dev, "can't get ahb clock.\n");
@@ -407,10 +425,14 @@ static int imx_ahci_probe(struct platform_device *pdev)
407 425
408 hpriv->plat_data = imxpriv; 426 hpriv->plat_data = imxpriv;
409 427
410 ret = imx_sata_enable(hpriv); 428 ret = clk_prepare_enable(imxpriv->sata_clk);
411 if (ret) 429 if (ret)
412 return ret; 430 return ret;
413 431
432 ret = imx_sata_enable(hpriv);
433 if (ret)
434 goto disable_clk;
435
414 /* 436 /*
415 * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL, 437 * Configure the HWINIT bits of the HOST_CAP and HOST_PORTS_IMPL,
416 * and IP vendor specific register IMX_TIMER1MS. 438 * and IP vendor specific register IMX_TIMER1MS.
@@ -435,16 +457,24 @@ static int imx_ahci_probe(struct platform_device *pdev)
435 ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info, 457 ret = ahci_platform_init_host(pdev, hpriv, &ahci_imx_port_info,
436 0, 0, 0); 458 0, 0, 0);
437 if (ret) 459 if (ret)
438 imx_sata_disable(hpriv); 460 goto disable_sata;
439 461
462 return 0;
463
464disable_sata:
465 imx_sata_disable(hpriv);
466disable_clk:
467 clk_disable_unprepare(imxpriv->sata_clk);
440 return ret; 468 return ret;
441} 469}
442 470
443static void ahci_imx_host_stop(struct ata_host *host) 471static void ahci_imx_host_stop(struct ata_host *host)
444{ 472{
445 struct ahci_host_priv *hpriv = host->private_data; 473 struct ahci_host_priv *hpriv = host->private_data;
474 struct imx_ahci_priv *imxpriv = hpriv->plat_data;
446 475
447 imx_sata_disable(hpriv); 476 imx_sata_disable(hpriv);
477 clk_disable_unprepare(imxpriv->sata_clk);
448} 478}
449 479
450#ifdef CONFIG_PM_SLEEP 480#ifdef CONFIG_PM_SLEEP
diff --git a/drivers/ata/ahci_platform.c b/drivers/ata/ahci_platform.c
index ebe505c17763..b10d81ddb528 100644
--- a/drivers/ata/ahci_platform.c
+++ b/drivers/ata/ahci_platform.c
@@ -58,7 +58,7 @@ static int ahci_probe(struct platform_device *pdev)
58 } 58 }
59 59
60 if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci")) 60 if (of_device_is_compatible(dev->of_node, "hisilicon,hisi-ahci"))
61 hflags |= AHCI_HFLAG_NO_FBS; 61 hflags |= AHCI_HFLAG_NO_FBS | AHCI_HFLAG_NO_NCQ;
62 62
63 rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info, 63 rc = ahci_platform_init_host(pdev, hpriv, &ahci_port_info,
64 hflags, 0, 0); 64 hflags, 0, 0);
diff --git a/drivers/ata/ahci_xgene.c b/drivers/ata/ahci_xgene.c
index 042a9bb45c86..ee3a3659bd9e 100644
--- a/drivers/ata/ahci_xgene.c
+++ b/drivers/ata/ahci_xgene.c
@@ -78,6 +78,7 @@
78struct xgene_ahci_context { 78struct xgene_ahci_context {
79 struct ahci_host_priv *hpriv; 79 struct ahci_host_priv *hpriv;
80 struct device *dev; 80 struct device *dev;
81 u8 last_cmd[MAX_AHCI_CHN_PERCTR]; /* tracking the last command issued*/
81 void __iomem *csr_core; /* Core CSR address of IP */ 82 void __iomem *csr_core; /* Core CSR address of IP */
82 void __iomem *csr_diag; /* Diag CSR address of IP */ 83 void __iomem *csr_diag; /* Diag CSR address of IP */
83 void __iomem *csr_axi; /* AXI CSR address of IP */ 84 void __iomem *csr_axi; /* AXI CSR address of IP */
@@ -98,20 +99,62 @@ static int xgene_ahci_init_memram(struct xgene_ahci_context *ctx)
98} 99}
99 100
100/** 101/**
102 * xgene_ahci_restart_engine - Restart the dma engine.
103 * @ap : ATA port of interest
104 *
105 * Restarts the dma engine inside the controller.
106 */
107static int xgene_ahci_restart_engine(struct ata_port *ap)
108{
109 struct ahci_host_priv *hpriv = ap->host->private_data;
110
111 ahci_stop_engine(ap);
112 ahci_start_fis_rx(ap);
113 hpriv->start_engine(ap);
114
115 return 0;
116}
117
118/**
119 * xgene_ahci_qc_issue - Issue commands to the device
120 * @qc: Command to issue
121 *
122 * Due to Hardware errata for IDENTIFY DEVICE command, the controller cannot
123 * clear the BSY bit after receiving the PIO setup FIS. This results in the dma
124 * state machine goes into the CMFatalErrorUpdate state and locks up. By
125 * restarting the dma engine, it removes the controller out of lock up state.
126 */
127static unsigned int xgene_ahci_qc_issue(struct ata_queued_cmd *qc)
128{
129 struct ata_port *ap = qc->ap;
130 struct ahci_host_priv *hpriv = ap->host->private_data;
131 struct xgene_ahci_context *ctx = hpriv->plat_data;
132 int rc = 0;
133
134 if (unlikely(ctx->last_cmd[ap->port_no] == ATA_CMD_ID_ATA))
135 xgene_ahci_restart_engine(ap);
136
137 rc = ahci_qc_issue(qc);
138
139 /* Save the last command issued */
140 ctx->last_cmd[ap->port_no] = qc->tf.command;
141
142 return rc;
143}
144
145/**
101 * xgene_ahci_read_id - Read ID data from the specified device 146 * xgene_ahci_read_id - Read ID data from the specified device
102 * @dev: device 147 * @dev: device
103 * @tf: proposed taskfile 148 * @tf: proposed taskfile
104 * @id: data buffer 149 * @id: data buffer
105 * 150 *
106 * This custom read ID function is required due to the fact that the HW 151 * This custom read ID function is required due to the fact that the HW
107 * does not support DEVSLP and the controller state machine may get stuck 152 * does not support DEVSLP.
108 * after processing the ID query command.
109 */ 153 */
110static unsigned int xgene_ahci_read_id(struct ata_device *dev, 154static unsigned int xgene_ahci_read_id(struct ata_device *dev,
111 struct ata_taskfile *tf, u16 *id) 155 struct ata_taskfile *tf, u16 *id)
112{ 156{
113 u32 err_mask; 157 u32 err_mask;
114 void __iomem *port_mmio = ahci_port_base(dev->link->ap);
115 158
116 err_mask = ata_do_dev_read_id(dev, tf, id); 159 err_mask = ata_do_dev_read_id(dev, tf, id);
117 if (err_mask) 160 if (err_mask)
@@ -133,16 +176,6 @@ static unsigned int xgene_ahci_read_id(struct ata_device *dev,
133 */ 176 */
134 id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8); 177 id[ATA_ID_FEATURE_SUPP] &= ~(1 << 8);
135 178
136 /*
137 * Due to HW errata, restart the port if no other command active.
138 * Otherwise the controller may get stuck.
139 */
140 if (!readl(port_mmio + PORT_CMD_ISSUE)) {
141 writel(PORT_CMD_FIS_RX, port_mmio + PORT_CMD);
142 readl(port_mmio + PORT_CMD); /* Force a barrier */
143 writel(PORT_CMD_FIS_RX | PORT_CMD_START, port_mmio + PORT_CMD);
144 readl(port_mmio + PORT_CMD); /* Force a barrier */
145 }
146 return 0; 179 return 0;
147} 180}
148 181
@@ -300,6 +333,7 @@ static struct ata_port_operations xgene_ahci_ops = {
300 .host_stop = xgene_ahci_host_stop, 333 .host_stop = xgene_ahci_host_stop,
301 .hardreset = xgene_ahci_hardreset, 334 .hardreset = xgene_ahci_hardreset,
302 .read_id = xgene_ahci_read_id, 335 .read_id = xgene_ahci_read_id,
336 .qc_issue = xgene_ahci_qc_issue,
303}; 337};
304 338
305static const struct ata_port_info xgene_ahci_port_info = { 339static const struct ata_port_info xgene_ahci_port_info = {
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index 40ea583d3610..d72ce0470309 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -68,7 +68,6 @@ static ssize_t ahci_transmit_led_message(struct ata_port *ap, u32 state,
68 68
69static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val); 69static int ahci_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val);
70static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val); 70static int ahci_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val);
71static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc);
72static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc); 71static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc);
73static int ahci_port_start(struct ata_port *ap); 72static int ahci_port_start(struct ata_port *ap);
74static void ahci_port_stop(struct ata_port *ap); 73static void ahci_port_stop(struct ata_port *ap);
@@ -620,7 +619,7 @@ int ahci_stop_engine(struct ata_port *ap)
620} 619}
621EXPORT_SYMBOL_GPL(ahci_stop_engine); 620EXPORT_SYMBOL_GPL(ahci_stop_engine);
622 621
623static void ahci_start_fis_rx(struct ata_port *ap) 622void ahci_start_fis_rx(struct ata_port *ap)
624{ 623{
625 void __iomem *port_mmio = ahci_port_base(ap); 624 void __iomem *port_mmio = ahci_port_base(ap);
626 struct ahci_host_priv *hpriv = ap->host->private_data; 625 struct ahci_host_priv *hpriv = ap->host->private_data;
@@ -646,6 +645,7 @@ static void ahci_start_fis_rx(struct ata_port *ap)
646 /* flush */ 645 /* flush */
647 readl(port_mmio + PORT_CMD); 646 readl(port_mmio + PORT_CMD);
648} 647}
648EXPORT_SYMBOL_GPL(ahci_start_fis_rx);
649 649
650static int ahci_stop_fis_rx(struct ata_port *ap) 650static int ahci_stop_fis_rx(struct ata_port *ap)
651{ 651{
@@ -1945,7 +1945,7 @@ irqreturn_t ahci_interrupt(int irq, void *dev_instance)
1945} 1945}
1946EXPORT_SYMBOL_GPL(ahci_interrupt); 1946EXPORT_SYMBOL_GPL(ahci_interrupt);
1947 1947
1948static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc) 1948unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1949{ 1949{
1950 struct ata_port *ap = qc->ap; 1950 struct ata_port *ap = qc->ap;
1951 void __iomem *port_mmio = ahci_port_base(ap); 1951 void __iomem *port_mmio = ahci_port_base(ap);
@@ -1974,6 +1974,7 @@ static unsigned int ahci_qc_issue(struct ata_queued_cmd *qc)
1974 1974
1975 return 0; 1975 return 0;
1976} 1976}
1977EXPORT_SYMBOL_GPL(ahci_qc_issue);
1977 1978
1978static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc) 1979static bool ahci_qc_fill_rtf(struct ata_queued_cmd *qc)
1979{ 1980{
diff --git a/drivers/ata/libahci_platform.c b/drivers/ata/libahci_platform.c
index 3a5b4ed25a4f..b0077589f065 100644
--- a/drivers/ata/libahci_platform.c
+++ b/drivers/ata/libahci_platform.c
@@ -250,8 +250,13 @@ struct ahci_host_priv *ahci_platform_get_resources(struct platform_device *pdev)
250 if (IS_ERR(hpriv->phy)) { 250 if (IS_ERR(hpriv->phy)) {
251 rc = PTR_ERR(hpriv->phy); 251 rc = PTR_ERR(hpriv->phy);
252 switch (rc) { 252 switch (rc) {
253 case -ENODEV:
254 case -ENOSYS: 253 case -ENOSYS:
254 /* No PHY support. Check if PHY is required. */
255 if (of_find_property(dev->of_node, "phys", NULL)) {
256 dev_err(dev, "couldn't get sata-phy: ENOSYS\n");
257 goto err_out;
258 }
259 case -ENODEV:
255 /* continue normally */ 260 /* continue normally */
256 hpriv->phy = NULL; 261 hpriv->phy = NULL;
257 break; 262 break;
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
index 83969f8c5727..6467c919c509 100644
--- a/drivers/base/dma-contiguous.c
+++ b/drivers/base/dma-contiguous.c
@@ -176,14 +176,24 @@ static int __init cma_activate_area(struct cma *cma)
176 base_pfn = pfn; 176 base_pfn = pfn;
177 for (j = pageblock_nr_pages; j; --j, pfn++) { 177 for (j = pageblock_nr_pages; j; --j, pfn++) {
178 WARN_ON_ONCE(!pfn_valid(pfn)); 178 WARN_ON_ONCE(!pfn_valid(pfn));
179 /*
180 * alloc_contig_range requires the pfn range
181 * specified to be in the same zone. Make this
182 * simple by forcing the entire CMA resv range
183 * to be in the same zone.
184 */
179 if (page_zone(pfn_to_page(pfn)) != zone) 185 if (page_zone(pfn_to_page(pfn)) != zone)
180 return -EINVAL; 186 goto err;
181 } 187 }
182 init_cma_reserved_pageblock(pfn_to_page(base_pfn)); 188 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
183 } while (--i); 189 } while (--i);
184 190
185 mutex_init(&cma->lock); 191 mutex_init(&cma->lock);
186 return 0; 192 return 0;
193
194err:
195 kfree(cma->bitmap);
196 return -EINVAL;
187} 197}
188 198
189static struct cma cma_areas[MAX_CMA_AREAS]; 199static struct cma cma_areas[MAX_CMA_AREAS];
diff --git a/drivers/base/platform.c b/drivers/base/platform.c
index 9e9227e1762d..eee48c49f5de 100644
--- a/drivers/base/platform.c
+++ b/drivers/base/platform.c
@@ -89,8 +89,13 @@ int platform_get_irq(struct platform_device *dev, unsigned int num)
89 return dev->archdata.irqs[num]; 89 return dev->archdata.irqs[num];
90#else 90#else
91 struct resource *r; 91 struct resource *r;
92 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) 92 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
93 return of_irq_get(dev->dev.of_node, num); 93 int ret;
94
95 ret = of_irq_get(dev->dev.of_node, num);
96 if (ret >= 0 || ret == -EPROBE_DEFER)
97 return ret;
98 }
94 99
95 r = platform_get_resource(dev, IORESOURCE_IRQ, num); 100 r = platform_get_resource(dev, IORESOURCE_IRQ, num);
96 101
@@ -133,8 +138,13 @@ int platform_get_irq_byname(struct platform_device *dev, const char *name)
133{ 138{
134 struct resource *r; 139 struct resource *r;
135 140
136 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) 141 if (IS_ENABLED(CONFIG_OF_IRQ) && dev->dev.of_node) {
137 return of_irq_get_byname(dev->dev.of_node, name); 142 int ret;
143
144 ret = of_irq_get_byname(dev->dev.of_node, name);
145 if (ret >= 0 || ret == -EPROBE_DEFER)
146 return ret;
147 }
138 148
139 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name); 149 r = platform_get_resource_byname(dev, IORESOURCE_IRQ, name);
140 return r ? r->start : -ENXIO; 150 return r ? r->start : -ENXIO;
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index b6c8aaf4931b..5b17ec88ea05 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1337,8 +1337,11 @@ int drbd_submit_peer_request(struct drbd_device *device,
1337 return 0; 1337 return 0;
1338 } 1338 }
1339 1339
1340 /* Discards don't have any payload.
1341 * But the scsi layer still expects a bio_vec it can use internally,
1342 * see sd_setup_discard_cmnd() and blk_add_request_payload(). */
1340 if (peer_req->flags & EE_IS_TRIM) 1343 if (peer_req->flags & EE_IS_TRIM)
1341 nr_pages = 0; /* discards don't have any payload. */ 1344 nr_pages = 1;
1342 1345
1343 /* In most cases, we will only need one bio. But in case the lower 1346 /* In most cases, we will only need one bio. But in case the lower
1344 * level restrictions happen to be different at this offset on this 1347 * level restrictions happen to be different at this offset on this
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 677db049f55a..56d46ffb08e1 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3777,7 +3777,7 @@ static void floppy_rb0_cb(struct bio *bio, int err)
3777 int drive = cbdata->drive; 3777 int drive = cbdata->drive;
3778 3778
3779 if (err) { 3779 if (err) {
3780 pr_info("floppy: error %d while reading block 0", err); 3780 pr_info("floppy: error %d while reading block 0\n", err);
3781 set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags); 3781 set_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
3782 } 3782 }
3783 complete(&cbdata->complete); 3783 complete(&cbdata->complete);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index bbeb404b3a07..b2c98c1bc037 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1431,6 +1431,14 @@ static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1431 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0; 1431 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1432} 1432}
1433 1433
1434static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1435{
1436 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1437
1438 return obj_request->img_offset <
1439 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1440}
1441
1434static void rbd_obj_request_get(struct rbd_obj_request *obj_request) 1442static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1435{ 1443{
1436 dout("%s: obj %p (was %d)\n", __func__, obj_request, 1444 dout("%s: obj %p (was %d)\n", __func__, obj_request,
@@ -2748,7 +2756,7 @@ static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2748 */ 2756 */
2749 if (!img_request_write_test(img_request) || 2757 if (!img_request_write_test(img_request) ||
2750 !img_request_layered_test(img_request) || 2758 !img_request_layered_test(img_request) ||
2751 rbd_dev->parent_overlap <= obj_request->img_offset || 2759 !obj_request_overlaps_parent(obj_request) ||
2752 ((known = obj_request_known_test(obj_request)) && 2760 ((known = obj_request_known_test(obj_request)) &&
2753 obj_request_exists_test(obj_request))) { 2761 obj_request_exists_test(obj_request))) {
2754 2762
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 48eccb350180..089e72cd37be 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -622,8 +622,10 @@ static void zram_reset_device(struct zram *zram, bool reset_capacity)
622 memset(&zram->stats, 0, sizeof(zram->stats)); 622 memset(&zram->stats, 0, sizeof(zram->stats));
623 623
624 zram->disksize = 0; 624 zram->disksize = 0;
625 if (reset_capacity) 625 if (reset_capacity) {
626 set_capacity(zram->disk, 0); 626 set_capacity(zram->disk, 0);
627 revalidate_disk(zram->disk);
628 }
627 up_write(&zram->init_lock); 629 up_write(&zram->init_lock);
628} 630}
629 631
@@ -664,6 +666,7 @@ static ssize_t disksize_store(struct device *dev,
664 zram->comp = comp; 666 zram->comp = comp;
665 zram->disksize = disksize; 667 zram->disksize = disksize;
666 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT); 668 set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
669 revalidate_disk(zram->disk);
667 up_write(&zram->init_lock); 670 up_write(&zram->init_lock);
668 return len; 671 return len;
669 672
diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c
index f98380648cb3..f50dffc0374f 100644
--- a/drivers/bluetooth/ath3k.c
+++ b/drivers/bluetooth/ath3k.c
@@ -90,7 +90,6 @@ static const struct usb_device_id ath3k_table[] = {
90 { USB_DEVICE(0x0b05, 0x17d0) }, 90 { USB_DEVICE(0x0b05, 0x17d0) },
91 { USB_DEVICE(0x0CF3, 0x0036) }, 91 { USB_DEVICE(0x0CF3, 0x0036) },
92 { USB_DEVICE(0x0CF3, 0x3004) }, 92 { USB_DEVICE(0x0CF3, 0x3004) },
93 { USB_DEVICE(0x0CF3, 0x3005) },
94 { USB_DEVICE(0x0CF3, 0x3008) }, 93 { USB_DEVICE(0x0CF3, 0x3008) },
95 { USB_DEVICE(0x0CF3, 0x311D) }, 94 { USB_DEVICE(0x0CF3, 0x311D) },
96 { USB_DEVICE(0x0CF3, 0x311E) }, 95 { USB_DEVICE(0x0CF3, 0x311E) },
@@ -140,7 +139,6 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
140 { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, 139 { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
141 { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 }, 140 { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 },
142 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, 141 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
143 { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
144 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, 142 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
145 { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, 143 { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 },
146 { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 }, 144 { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index a1c80b0c7663..6250fc2fb93a 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -162,7 +162,6 @@ static const struct usb_device_id blacklist_table[] = {
162 { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, 162 { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 },
163 { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, 163 { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 },
164 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, 164 { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 },
165 { USB_DEVICE(0x0cf3, 0x3005), .driver_info = BTUSB_ATH3012 },
166 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, 165 { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 },
167 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, 166 { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 },
168 { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, 167 { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 },
diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c
index 04680ead9275..fede8ca7147c 100644
--- a/drivers/bluetooth/hci_h5.c
+++ b/drivers/bluetooth/hci_h5.c
@@ -406,6 +406,7 @@ static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c)
406 H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) { 406 H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) {
407 BT_ERR("Non-link packet received in non-active state"); 407 BT_ERR("Non-link packet received in non-active state");
408 h5_reset_rx(h5); 408 h5_reset_rx(h5);
409 return 0;
409 } 410 }
410 411
411 h5->rx_func = h5_rx_payload; 412 h5->rx_func = h5_rx_payload;
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 334601cc81cf..c4419ea1ab07 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -55,16 +55,41 @@ static DEFINE_MUTEX(rng_mutex);
55static int data_avail; 55static int data_avail;
56static u8 *rng_buffer; 56static u8 *rng_buffer;
57 57
58static inline int rng_get_data(struct hwrng *rng, u8 *buffer, size_t size,
59 int wait);
60
58static size_t rng_buffer_size(void) 61static size_t rng_buffer_size(void)
59{ 62{
60 return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES; 63 return SMP_CACHE_BYTES < 32 ? 32 : SMP_CACHE_BYTES;
61} 64}
62 65
66static void add_early_randomness(struct hwrng *rng)
67{
68 unsigned char bytes[16];
69 int bytes_read;
70
71 /*
72 * Currently only virtio-rng cannot return data during device
73 * probe, and that's handled in virtio-rng.c itself. If there
74 * are more such devices, this call to rng_get_data can be
75 * made conditional here instead of doing it per-device.
76 */
77 bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1);
78 if (bytes_read > 0)
79 add_device_randomness(bytes, bytes_read);
80}
81
63static inline int hwrng_init(struct hwrng *rng) 82static inline int hwrng_init(struct hwrng *rng)
64{ 83{
65 if (!rng->init) 84 if (rng->init) {
66 return 0; 85 int ret;
67 return rng->init(rng); 86
87 ret = rng->init(rng);
88 if (ret)
89 return ret;
90 }
91 add_early_randomness(rng);
92 return 0;
68} 93}
69 94
70static inline void hwrng_cleanup(struct hwrng *rng) 95static inline void hwrng_cleanup(struct hwrng *rng)
@@ -304,8 +329,6 @@ int hwrng_register(struct hwrng *rng)
304{ 329{
305 int err = -EINVAL; 330 int err = -EINVAL;
306 struct hwrng *old_rng, *tmp; 331 struct hwrng *old_rng, *tmp;
307 unsigned char bytes[16];
308 int bytes_read;
309 332
310 if (rng->name == NULL || 333 if (rng->name == NULL ||
311 (rng->data_read == NULL && rng->read == NULL)) 334 (rng->data_read == NULL && rng->read == NULL))
@@ -347,9 +370,17 @@ int hwrng_register(struct hwrng *rng)
347 INIT_LIST_HEAD(&rng->list); 370 INIT_LIST_HEAD(&rng->list);
348 list_add_tail(&rng->list, &rng_list); 371 list_add_tail(&rng->list, &rng_list);
349 372
350 bytes_read = rng_get_data(rng, bytes, sizeof(bytes), 1); 373 if (old_rng && !rng->init) {
351 if (bytes_read > 0) 374 /*
352 add_device_randomness(bytes, bytes_read); 375 * Use a new device's input to add some randomness to
376 * the system. If this rng device isn't going to be
377 * used right away, its init function hasn't been
378 * called yet; so only use the randomness from devices
379 * that don't need an init callback.
380 */
381 add_early_randomness(rng);
382 }
383
353out_unlock: 384out_unlock:
354 mutex_unlock(&rng_mutex); 385 mutex_unlock(&rng_mutex);
355out: 386out:
diff --git a/drivers/char/hw_random/virtio-rng.c b/drivers/char/hw_random/virtio-rng.c
index f3e71501de54..e9b15bc18b4d 100644
--- a/drivers/char/hw_random/virtio-rng.c
+++ b/drivers/char/hw_random/virtio-rng.c
@@ -38,6 +38,8 @@ struct virtrng_info {
38 int index; 38 int index;
39}; 39};
40 40
41static bool probe_done;
42
41static void random_recv_done(struct virtqueue *vq) 43static void random_recv_done(struct virtqueue *vq)
42{ 44{
43 struct virtrng_info *vi = vq->vdev->priv; 45 struct virtrng_info *vi = vq->vdev->priv;
@@ -67,6 +69,13 @@ static int virtio_read(struct hwrng *rng, void *buf, size_t size, bool wait)
67 int ret; 69 int ret;
68 struct virtrng_info *vi = (struct virtrng_info *)rng->priv; 70 struct virtrng_info *vi = (struct virtrng_info *)rng->priv;
69 71
72 /*
73 * Don't ask host for data till we're setup. This call can
74 * happen during hwrng_register(), after commit d9e7972619.
75 */
76 if (unlikely(!probe_done))
77 return 0;
78
70 if (!vi->busy) { 79 if (!vi->busy) {
71 vi->busy = true; 80 vi->busy = true;
72 init_completion(&vi->have_data); 81 init_completion(&vi->have_data);
@@ -137,6 +146,7 @@ static int probe_common(struct virtio_device *vdev)
137 return err; 146 return err;
138 } 147 }
139 148
149 probe_done = true;
140 return 0; 150 return 0;
141} 151}
142 152
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index d915707d2ba1..93dcad0c1cbe 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -138,7 +138,9 @@ static int i8k_smm(struct smm_regs *regs)
138 if (!alloc_cpumask_var(&old_mask, GFP_KERNEL)) 138 if (!alloc_cpumask_var(&old_mask, GFP_KERNEL))
139 return -ENOMEM; 139 return -ENOMEM;
140 cpumask_copy(old_mask, &current->cpus_allowed); 140 cpumask_copy(old_mask, &current->cpus_allowed);
141 set_cpus_allowed_ptr(current, cpumask_of(0)); 141 rc = set_cpus_allowed_ptr(current, cpumask_of(0));
142 if (rc)
143 goto out;
142 if (smp_processor_id() != 0) { 144 if (smp_processor_id() != 0) {
143 rc = -EBUSY; 145 rc = -EBUSY;
144 goto out; 146 goto out;
diff --git a/drivers/char/random.c b/drivers/char/random.c
index 0a7ac0a7b252..71529e196b84 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -641,7 +641,7 @@ retry:
641 } while (unlikely(entropy_count < pool_size-2 && pnfrac)); 641 } while (unlikely(entropy_count < pool_size-2 && pnfrac));
642 } 642 }
643 643
644 if (entropy_count < 0) { 644 if (unlikely(entropy_count < 0)) {
645 pr_warn("random: negative entropy/overflow: pool %s count %d\n", 645 pr_warn("random: negative entropy/overflow: pool %s count %d\n",
646 r->name, entropy_count); 646 r->name, entropy_count);
647 WARN_ON(1); 647 WARN_ON(1);
@@ -981,7 +981,7 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
981 int reserved) 981 int reserved)
982{ 982{
983 int entropy_count, orig; 983 int entropy_count, orig;
984 size_t ibytes; 984 size_t ibytes, nfrac;
985 985
986 BUG_ON(r->entropy_count > r->poolinfo->poolfracbits); 986 BUG_ON(r->entropy_count > r->poolinfo->poolfracbits);
987 987
@@ -999,7 +999,17 @@ retry:
999 } 999 }
1000 if (ibytes < min) 1000 if (ibytes < min)
1001 ibytes = 0; 1001 ibytes = 0;
1002 if ((entropy_count -= ibytes << (ENTROPY_SHIFT + 3)) < 0) 1002
1003 if (unlikely(entropy_count < 0)) {
1004 pr_warn("random: negative entropy count: pool %s count %d\n",
1005 r->name, entropy_count);
1006 WARN_ON(1);
1007 entropy_count = 0;
1008 }
1009 nfrac = ibytes << (ENTROPY_SHIFT + 3);
1010 if ((size_t) entropy_count > nfrac)
1011 entropy_count -= nfrac;
1012 else
1003 entropy_count = 0; 1013 entropy_count = 0;
1004 1014
1005 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig) 1015 if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
@@ -1376,6 +1386,7 @@ urandom_read(struct file *file, char __user *buf, size_t nbytes, loff_t *ppos)
1376 "with %d bits of entropy available\n", 1386 "with %d bits of entropy available\n",
1377 current->comm, nonblocking_pool.entropy_total); 1387 current->comm, nonblocking_pool.entropy_total);
1378 1388
1389 nbytes = min_t(size_t, nbytes, INT_MAX >> (ENTROPY_SHIFT + 3));
1379 ret = extract_entropy_user(&nonblocking_pool, buf, nbytes); 1390 ret = extract_entropy_user(&nonblocking_pool, buf, nbytes);
1380 1391
1381 trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool), 1392 trace_urandom_read(8 * nbytes, ENTROPY_BITS(&nonblocking_pool),
diff --git a/drivers/clk/clk-s2mps11.c b/drivers/clk/clk-s2mps11.c
index 9b7b5859a420..3757e9e72d37 100644
--- a/drivers/clk/clk-s2mps11.c
+++ b/drivers/clk/clk-s2mps11.c
@@ -230,16 +230,13 @@ static int s2mps11_clk_probe(struct platform_device *pdev)
230 goto err_reg; 230 goto err_reg;
231 } 231 }
232 232
233 s2mps11_clk->lookup = devm_kzalloc(&pdev->dev, 233 s2mps11_clk->lookup = clkdev_alloc(s2mps11_clk->clk,
234 sizeof(struct clk_lookup), GFP_KERNEL); 234 s2mps11_name(s2mps11_clk), NULL);
235 if (!s2mps11_clk->lookup) { 235 if (!s2mps11_clk->lookup) {
236 ret = -ENOMEM; 236 ret = -ENOMEM;
237 goto err_lup; 237 goto err_lup;
238 } 238 }
239 239
240 s2mps11_clk->lookup->con_id = s2mps11_name(s2mps11_clk);
241 s2mps11_clk->lookup->clk = s2mps11_clk->clk;
242
243 clkdev_add(s2mps11_clk->lookup); 240 clkdev_add(s2mps11_clk->lookup);
244 } 241 }
245 242
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index 12f3c0b64fcd..4c449b3170f6 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -1209,7 +1209,7 @@ static struct clk_branch rot_clk = {
1209 1209
1210static u8 mmcc_pxo_hdmi_map[] = { 1210static u8 mmcc_pxo_hdmi_map[] = {
1211 [P_PXO] = 0, 1211 [P_PXO] = 0,
1212 [P_HDMI_PLL] = 2, 1212 [P_HDMI_PLL] = 3,
1213}; 1213};
1214 1214
1215static const char *mmcc_pxo_hdmi[] = { 1215static const char *mmcc_pxo_hdmi[] = {
diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c
index 4f150c9dd38c..7f4a473a7ad7 100644
--- a/drivers/clk/samsung/clk-exynos4.c
+++ b/drivers/clk/samsung/clk-exynos4.c
@@ -925,21 +925,13 @@ static struct samsung_gate_clock exynos4x12_gate_clks[] __initdata = {
925 GATE(CLK_RTC, "rtc", "aclk100", E4X12_GATE_IP_PERIR, 15, 925 GATE(CLK_RTC, "rtc", "aclk100", E4X12_GATE_IP_PERIR, 15,
926 0, 0), 926 0, 0),
927 GATE(CLK_KEYIF, "keyif", "aclk100", E4X12_GATE_IP_PERIR, 16, 0, 0), 927 GATE(CLK_KEYIF, "keyif", "aclk100", E4X12_GATE_IP_PERIR, 16, 0, 0),
928 GATE(CLK_SCLK_PWM_ISP, "sclk_pwm_isp", "div_pwm_isp", 928 GATE(CLK_PWM_ISP_SCLK, "pwm_isp_sclk", "div_pwm_isp",
929 E4X12_SRC_MASK_ISP, 0, CLK_SET_RATE_PARENT, 0),
930 GATE(CLK_SCLK_SPI0_ISP, "sclk_spi0_isp", "div_spi0_isp_pre",
931 E4X12_SRC_MASK_ISP, 4, CLK_SET_RATE_PARENT, 0),
932 GATE(CLK_SCLK_SPI1_ISP, "sclk_spi1_isp", "div_spi1_isp_pre",
933 E4X12_SRC_MASK_ISP, 8, CLK_SET_RATE_PARENT, 0),
934 GATE(CLK_SCLK_UART_ISP, "sclk_uart_isp", "div_uart_isp",
935 E4X12_SRC_MASK_ISP, 12, CLK_SET_RATE_PARENT, 0),
936 GATE(CLK_PWM_ISP_SCLK, "pwm_isp_sclk", "sclk_pwm_isp",
937 E4X12_GATE_IP_ISP, 0, 0, 0), 929 E4X12_GATE_IP_ISP, 0, 0, 0),
938 GATE(CLK_SPI0_ISP_SCLK, "spi0_isp_sclk", "sclk_spi0_isp", 930 GATE(CLK_SPI0_ISP_SCLK, "spi0_isp_sclk", "div_spi0_isp_pre",
939 E4X12_GATE_IP_ISP, 1, 0, 0), 931 E4X12_GATE_IP_ISP, 1, 0, 0),
940 GATE(CLK_SPI1_ISP_SCLK, "spi1_isp_sclk", "sclk_spi1_isp", 932 GATE(CLK_SPI1_ISP_SCLK, "spi1_isp_sclk", "div_spi1_isp_pre",
941 E4X12_GATE_IP_ISP, 2, 0, 0), 933 E4X12_GATE_IP_ISP, 2, 0, 0),
942 GATE(CLK_UART_ISP_SCLK, "uart_isp_sclk", "sclk_uart_isp", 934 GATE(CLK_UART_ISP_SCLK, "uart_isp_sclk", "div_uart_isp",
943 E4X12_GATE_IP_ISP, 3, 0, 0), 935 E4X12_GATE_IP_ISP, 3, 0, 0),
944 GATE(CLK_WDT, "watchdog", "aclk100", E4X12_GATE_IP_PERIR, 14, 0, 0), 936 GATE(CLK_WDT, "watchdog", "aclk100", E4X12_GATE_IP_PERIR, 14, 0, 0),
945 GATE(CLK_PCM0, "pcm0", "aclk100", E4X12_GATE_IP_MAUDIO, 2, 937 GATE(CLK_PCM0, "pcm0", "aclk100", E4X12_GATE_IP_MAUDIO, 2,
diff --git a/drivers/clk/samsung/clk-exynos5250.c b/drivers/clk/samsung/clk-exynos5250.c
index 1fad4c5e3f5d..184f64293b26 100644
--- a/drivers/clk/samsung/clk-exynos5250.c
+++ b/drivers/clk/samsung/clk-exynos5250.c
@@ -661,7 +661,7 @@ static struct samsung_gate_clock exynos5250_gate_clks[] __initdata = {
661 GATE(CLK_RTC, "rtc", "div_aclk66", GATE_IP_PERIS, 20, 0, 0), 661 GATE(CLK_RTC, "rtc", "div_aclk66", GATE_IP_PERIS, 20, 0, 0),
662 GATE(CLK_TMU, "tmu", "div_aclk66", GATE_IP_PERIS, 21, 0, 0), 662 GATE(CLK_TMU, "tmu", "div_aclk66", GATE_IP_PERIS, 21, 0, 0),
663 GATE(CLK_SMMU_TV, "smmu_tv", "mout_aclk200_disp1_sub", 663 GATE(CLK_SMMU_TV, "smmu_tv", "mout_aclk200_disp1_sub",
664 GATE_IP_DISP1, 2, 0, 0), 664 GATE_IP_DISP1, 9, 0, 0),
665 GATE(CLK_SMMU_FIMD1, "smmu_fimd1", "mout_aclk200_disp1_sub", 665 GATE(CLK_SMMU_FIMD1, "smmu_fimd1", "mout_aclk200_disp1_sub",
666 GATE_IP_DISP1, 8, 0, 0), 666 GATE_IP_DISP1, 8, 0, 0),
667 GATE(CLK_SMMU_2D, "smmu_2d", "div_aclk200", GATE_IP_ACP, 7, 0, 0), 667 GATE(CLK_SMMU_2D, "smmu_2d", "div_aclk200", GATE_IP_ACP, 7, 0, 0),
diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c
index 9d7d7eed03fd..a4e6cc782e5c 100644
--- a/drivers/clk/samsung/clk-exynos5420.c
+++ b/drivers/clk/samsung/clk-exynos5420.c
@@ -631,7 +631,8 @@ static struct samsung_mux_clock exynos5x_mux_clks[] __initdata = {
631 SRC_TOP4, 16, 1), 631 SRC_TOP4, 16, 1),
632 MUX(0, "mout_user_aclk266", mout_user_aclk266_p, SRC_TOP4, 20, 1), 632 MUX(0, "mout_user_aclk266", mout_user_aclk266_p, SRC_TOP4, 20, 1),
633 MUX(0, "mout_user_aclk166", mout_user_aclk166_p, SRC_TOP4, 24, 1), 633 MUX(0, "mout_user_aclk166", mout_user_aclk166_p, SRC_TOP4, 24, 1),
634 MUX(0, "mout_user_aclk333", mout_user_aclk333_p, SRC_TOP4, 28, 1), 634 MUX(CLK_MOUT_USER_ACLK333, "mout_user_aclk333", mout_user_aclk333_p,
635 SRC_TOP4, 28, 1),
635 636
636 MUX(0, "mout_user_aclk400_disp1", mout_user_aclk400_disp1_p, 637 MUX(0, "mout_user_aclk400_disp1", mout_user_aclk400_disp1_p,
637 SRC_TOP5, 0, 1), 638 SRC_TOP5, 0, 1),
@@ -684,7 +685,8 @@ static struct samsung_mux_clock exynos5x_mux_clks[] __initdata = {
684 SRC_TOP11, 12, 1), 685 SRC_TOP11, 12, 1),
685 MUX(0, "mout_sw_aclk266", mout_sw_aclk266_p, SRC_TOP11, 20, 1), 686 MUX(0, "mout_sw_aclk266", mout_sw_aclk266_p, SRC_TOP11, 20, 1),
686 MUX(0, "mout_sw_aclk166", mout_sw_aclk166_p, SRC_TOP11, 24, 1), 687 MUX(0, "mout_sw_aclk166", mout_sw_aclk166_p, SRC_TOP11, 24, 1),
687 MUX(0, "mout_sw_aclk333", mout_sw_aclk333_p, SRC_TOP11, 28, 1), 688 MUX(CLK_MOUT_SW_ACLK333, "mout_sw_aclk333", mout_sw_aclk333_p,
689 SRC_TOP11, 28, 1),
688 690
689 MUX(0, "mout_sw_aclk400_disp1", mout_sw_aclk400_disp1_p, 691 MUX(0, "mout_sw_aclk400_disp1", mout_sw_aclk400_disp1_p,
690 SRC_TOP12, 4, 1), 692 SRC_TOP12, 4, 1),
@@ -890,8 +892,6 @@ static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = {
890 GATE_BUS_TOP, 9, CLK_IGNORE_UNUSED, 0), 892 GATE_BUS_TOP, 9, CLK_IGNORE_UNUSED, 0),
891 GATE(0, "aclk66_psgen", "mout_user_aclk66_psgen", 893 GATE(0, "aclk66_psgen", "mout_user_aclk66_psgen",
892 GATE_BUS_TOP, 10, CLK_IGNORE_UNUSED, 0), 894 GATE_BUS_TOP, 10, CLK_IGNORE_UNUSED, 0),
893 GATE(CLK_ACLK66_PERIC, "aclk66_peric", "mout_user_aclk66_peric",
894 GATE_BUS_TOP, 11, CLK_IGNORE_UNUSED, 0),
895 GATE(0, "aclk266_isp", "mout_user_aclk266_isp", 895 GATE(0, "aclk266_isp", "mout_user_aclk266_isp",
896 GATE_BUS_TOP, 13, 0, 0), 896 GATE_BUS_TOP, 13, 0, 0),
897 GATE(0, "aclk166", "mout_user_aclk166", 897 GATE(0, "aclk166", "mout_user_aclk166",
@@ -994,34 +994,61 @@ static struct samsung_gate_clock exynos5x_gate_clks[] __initdata = {
994 SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0), 994 SRC_MASK_FSYS, 24, CLK_SET_RATE_PARENT, 0),
995 995
996 /* PERIC Block */ 996 /* PERIC Block */
997 GATE(CLK_UART0, "uart0", "aclk66_peric", GATE_IP_PERIC, 0, 0, 0), 997 GATE(CLK_UART0, "uart0", "mout_user_aclk66_peric",
998 GATE(CLK_UART1, "uart1", "aclk66_peric", GATE_IP_PERIC, 1, 0, 0), 998 GATE_IP_PERIC, 0, 0, 0),
999 GATE(CLK_UART2, "uart2", "aclk66_peric", GATE_IP_PERIC, 2, 0, 0), 999 GATE(CLK_UART1, "uart1", "mout_user_aclk66_peric",
1000 GATE(CLK_UART3, "uart3", "aclk66_peric", GATE_IP_PERIC, 3, 0, 0), 1000 GATE_IP_PERIC, 1, 0, 0),
1001 GATE(CLK_I2C0, "i2c0", "aclk66_peric", GATE_IP_PERIC, 6, 0, 0), 1001 GATE(CLK_UART2, "uart2", "mout_user_aclk66_peric",
1002 GATE(CLK_I2C1, "i2c1", "aclk66_peric", GATE_IP_PERIC, 7, 0, 0), 1002 GATE_IP_PERIC, 2, 0, 0),
1003 GATE(CLK_I2C2, "i2c2", "aclk66_peric", GATE_IP_PERIC, 8, 0, 0), 1003 GATE(CLK_UART3, "uart3", "mout_user_aclk66_peric",
1004 GATE(CLK_I2C3, "i2c3", "aclk66_peric", GATE_IP_PERIC, 9, 0, 0), 1004 GATE_IP_PERIC, 3, 0, 0),
1005 GATE(CLK_USI0, "usi0", "aclk66_peric", GATE_IP_PERIC, 10, 0, 0), 1005 GATE(CLK_I2C0, "i2c0", "mout_user_aclk66_peric",
1006 GATE(CLK_USI1, "usi1", "aclk66_peric", GATE_IP_PERIC, 11, 0, 0), 1006 GATE_IP_PERIC, 6, 0, 0),
1007 GATE(CLK_USI2, "usi2", "aclk66_peric", GATE_IP_PERIC, 12, 0, 0), 1007 GATE(CLK_I2C1, "i2c1", "mout_user_aclk66_peric",
1008 GATE(CLK_USI3, "usi3", "aclk66_peric", GATE_IP_PERIC, 13, 0, 0), 1008 GATE_IP_PERIC, 7, 0, 0),
1009 GATE(CLK_I2C_HDMI, "i2c_hdmi", "aclk66_peric", GATE_IP_PERIC, 14, 0, 0), 1009 GATE(CLK_I2C2, "i2c2", "mout_user_aclk66_peric",
1010 GATE(CLK_TSADC, "tsadc", "aclk66_peric", GATE_IP_PERIC, 15, 0, 0), 1010 GATE_IP_PERIC, 8, 0, 0),
1011 GATE(CLK_SPI0, "spi0", "aclk66_peric", GATE_IP_PERIC, 16, 0, 0), 1011 GATE(CLK_I2C3, "i2c3", "mout_user_aclk66_peric",
1012 GATE(CLK_SPI1, "spi1", "aclk66_peric", GATE_IP_PERIC, 17, 0, 0), 1012 GATE_IP_PERIC, 9, 0, 0),
1013 GATE(CLK_SPI2, "spi2", "aclk66_peric", GATE_IP_PERIC, 18, 0, 0), 1013 GATE(CLK_USI0, "usi0", "mout_user_aclk66_peric",
1014 GATE(CLK_I2S1, "i2s1", "aclk66_peric", GATE_IP_PERIC, 20, 0, 0), 1014 GATE_IP_PERIC, 10, 0, 0),
1015 GATE(CLK_I2S2, "i2s2", "aclk66_peric", GATE_IP_PERIC, 21, 0, 0), 1015 GATE(CLK_USI1, "usi1", "mout_user_aclk66_peric",
1016 GATE(CLK_PCM1, "pcm1", "aclk66_peric", GATE_IP_PERIC, 22, 0, 0), 1016 GATE_IP_PERIC, 11, 0, 0),
1017 GATE(CLK_PCM2, "pcm2", "aclk66_peric", GATE_IP_PERIC, 23, 0, 0), 1017 GATE(CLK_USI2, "usi2", "mout_user_aclk66_peric",
1018 GATE(CLK_PWM, "pwm", "aclk66_peric", GATE_IP_PERIC, 24, 0, 0), 1018 GATE_IP_PERIC, 12, 0, 0),
1019 GATE(CLK_SPDIF, "spdif", "aclk66_peric", GATE_IP_PERIC, 26, 0, 0), 1019 GATE(CLK_USI3, "usi3", "mout_user_aclk66_peric",
1020 GATE(CLK_USI4, "usi4", "aclk66_peric", GATE_IP_PERIC, 28, 0, 0), 1020 GATE_IP_PERIC, 13, 0, 0),
1021 GATE(CLK_USI5, "usi5", "aclk66_peric", GATE_IP_PERIC, 30, 0, 0), 1021 GATE(CLK_I2C_HDMI, "i2c_hdmi", "mout_user_aclk66_peric",
1022 GATE(CLK_USI6, "usi6", "aclk66_peric", GATE_IP_PERIC, 31, 0, 0), 1022 GATE_IP_PERIC, 14, 0, 0),
1023 1023 GATE(CLK_TSADC, "tsadc", "mout_user_aclk66_peric",
1024 GATE(CLK_KEYIF, "keyif", "aclk66_peric", GATE_BUS_PERIC, 22, 0, 0), 1024 GATE_IP_PERIC, 15, 0, 0),
1025 GATE(CLK_SPI0, "spi0", "mout_user_aclk66_peric",
1026 GATE_IP_PERIC, 16, 0, 0),
1027 GATE(CLK_SPI1, "spi1", "mout_user_aclk66_peric",
1028 GATE_IP_PERIC, 17, 0, 0),
1029 GATE(CLK_SPI2, "spi2", "mout_user_aclk66_peric",
1030 GATE_IP_PERIC, 18, 0, 0),
1031 GATE(CLK_I2S1, "i2s1", "mout_user_aclk66_peric",
1032 GATE_IP_PERIC, 20, 0, 0),
1033 GATE(CLK_I2S2, "i2s2", "mout_user_aclk66_peric",
1034 GATE_IP_PERIC, 21, 0, 0),
1035 GATE(CLK_PCM1, "pcm1", "mout_user_aclk66_peric",
1036 GATE_IP_PERIC, 22, 0, 0),
1037 GATE(CLK_PCM2, "pcm2", "mout_user_aclk66_peric",
1038 GATE_IP_PERIC, 23, 0, 0),
1039 GATE(CLK_PWM, "pwm", "mout_user_aclk66_peric",
1040 GATE_IP_PERIC, 24, 0, 0),
1041 GATE(CLK_SPDIF, "spdif", "mout_user_aclk66_peric",
1042 GATE_IP_PERIC, 26, 0, 0),
1043 GATE(CLK_USI4, "usi4", "mout_user_aclk66_peric",
1044 GATE_IP_PERIC, 28, 0, 0),
1045 GATE(CLK_USI5, "usi5", "mout_user_aclk66_peric",
1046 GATE_IP_PERIC, 30, 0, 0),
1047 GATE(CLK_USI6, "usi6", "mout_user_aclk66_peric",
1048 GATE_IP_PERIC, 31, 0, 0),
1049
1050 GATE(CLK_KEYIF, "keyif", "mout_user_aclk66_peric",
1051 GATE_BUS_PERIC, 22, 0, 0),
1025 1052
1026 /* PERIS Block */ 1053 /* PERIS Block */
1027 GATE(CLK_CHIPID, "chipid", "aclk66_psgen", 1054 GATE(CLK_CHIPID, "chipid", "aclk66_psgen",
diff --git a/drivers/clk/samsung/clk-s3c2410.c b/drivers/clk/samsung/clk-s3c2410.c
index ba0716801db2..140f4733c02e 100644
--- a/drivers/clk/samsung/clk-s3c2410.c
+++ b/drivers/clk/samsung/clk-s3c2410.c
@@ -152,6 +152,11 @@ struct samsung_clock_alias s3c2410_common_aliases[] __initdata = {
152 ALIAS(HCLK, NULL, "hclk"), 152 ALIAS(HCLK, NULL, "hclk"),
153 ALIAS(MPLL, NULL, "mpll"), 153 ALIAS(MPLL, NULL, "mpll"),
154 ALIAS(FCLK, NULL, "fclk"), 154 ALIAS(FCLK, NULL, "fclk"),
155 ALIAS(PCLK, NULL, "watchdog"),
156 ALIAS(PCLK_SDI, NULL, "sdi"),
157 ALIAS(HCLK_NAND, NULL, "nand"),
158 ALIAS(PCLK_I2S, NULL, "iis"),
159 ALIAS(PCLK_I2C, NULL, "i2c"),
155}; 160};
156 161
157/* S3C2410 specific clocks */ 162/* S3C2410 specific clocks */
@@ -378,7 +383,7 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
378 if (!np) 383 if (!np)
379 s3c2410_common_clk_register_fixed_ext(ctx, xti_f); 384 s3c2410_common_clk_register_fixed_ext(ctx, xti_f);
380 385
381 if (current_soc == 2410) { 386 if (current_soc == S3C2410) {
382 if (_get_rate("xti") == 12 * MHZ) { 387 if (_get_rate("xti") == 12 * MHZ) {
383 s3c2410_plls[mpll].rate_table = pll_s3c2410_12mhz_tbl; 388 s3c2410_plls[mpll].rate_table = pll_s3c2410_12mhz_tbl;
384 s3c2410_plls[upll].rate_table = pll_s3c2410_12mhz_tbl; 389 s3c2410_plls[upll].rate_table = pll_s3c2410_12mhz_tbl;
@@ -432,7 +437,7 @@ void __init s3c2410_common_clk_init(struct device_node *np, unsigned long xti_f,
432 samsung_clk_register_fixed_factor(ctx, s3c2410_ffactor, 437 samsung_clk_register_fixed_factor(ctx, s3c2410_ffactor,
433 ARRAY_SIZE(s3c2410_ffactor)); 438 ARRAY_SIZE(s3c2410_ffactor));
434 samsung_clk_register_alias(ctx, s3c2410_aliases, 439 samsung_clk_register_alias(ctx, s3c2410_aliases,
435 ARRAY_SIZE(s3c2410_common_aliases)); 440 ARRAY_SIZE(s3c2410_aliases));
436 break; 441 break;
437 case S3C2440: 442 case S3C2440:
438 samsung_clk_register_mux(ctx, s3c2440_muxes, 443 samsung_clk_register_mux(ctx, s3c2440_muxes,
diff --git a/drivers/clk/samsung/clk-s3c64xx.c b/drivers/clk/samsung/clk-s3c64xx.c
index efa16ee592c8..8889ff1c10fc 100644
--- a/drivers/clk/samsung/clk-s3c64xx.c
+++ b/drivers/clk/samsung/clk-s3c64xx.c
@@ -418,8 +418,10 @@ static struct samsung_clock_alias s3c64xx_clock_aliases[] = {
418 ALIAS(SCLK_MMC2, "s3c-sdhci.2", "mmc_busclk.2"), 418 ALIAS(SCLK_MMC2, "s3c-sdhci.2", "mmc_busclk.2"),
419 ALIAS(SCLK_MMC1, "s3c-sdhci.1", "mmc_busclk.2"), 419 ALIAS(SCLK_MMC1, "s3c-sdhci.1", "mmc_busclk.2"),
420 ALIAS(SCLK_MMC0, "s3c-sdhci.0", "mmc_busclk.2"), 420 ALIAS(SCLK_MMC0, "s3c-sdhci.0", "mmc_busclk.2"),
421 ALIAS(SCLK_SPI1, "s3c6410-spi.1", "spi-bus"), 421 ALIAS(PCLK_SPI1, "s3c6410-spi.1", "spi_busclk0"),
422 ALIAS(SCLK_SPI0, "s3c6410-spi.0", "spi-bus"), 422 ALIAS(SCLK_SPI1, "s3c6410-spi.1", "spi_busclk2"),
423 ALIAS(PCLK_SPI0, "s3c6410-spi.0", "spi_busclk0"),
424 ALIAS(SCLK_SPI0, "s3c6410-spi.0", "spi_busclk2"),
423 ALIAS(SCLK_AUDIO1, "samsung-pcm.1", "audio-bus"), 425 ALIAS(SCLK_AUDIO1, "samsung-pcm.1", "audio-bus"),
424 ALIAS(SCLK_AUDIO1, "samsung-i2s.1", "audio-bus"), 426 ALIAS(SCLK_AUDIO1, "samsung-i2s.1", "audio-bus"),
425 ALIAS(SCLK_AUDIO0, "samsung-pcm.0", "audio-bus"), 427 ALIAS(SCLK_AUDIO0, "samsung-pcm.0", "audio-bus"),
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index c2d204315546..bb5f387774e2 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -211,7 +211,7 @@ static inline void spear310_clk_init(void) { }
211/* array of all spear 320 clock lookups */ 211/* array of all spear 320 clock lookups */
212#ifdef CONFIG_MACH_SPEAR320 212#ifdef CONFIG_MACH_SPEAR320
213 213
214#define SPEAR320_CONTROL_REG (soc_config_base + 0x0000) 214#define SPEAR320_CONTROL_REG (soc_config_base + 0x0010)
215#define SPEAR320_EXT_CTRL_REG (soc_config_base + 0x0018) 215#define SPEAR320_EXT_CTRL_REG (soc_config_base + 0x0018)
216 216
217 #define SPEAR320_UARTX_PCLK_MASK 0x1 217 #define SPEAR320_UARTX_PCLK_MASK 0x1
@@ -245,7 +245,8 @@ static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk",
245 "ras_syn0_gclk", }; 245 "ras_syn0_gclk", };
246static const char *uartx_parents[] = { "ras_syn1_gclk", "ras_apb_clk", }; 246static const char *uartx_parents[] = { "ras_syn1_gclk", "ras_apb_clk", };
247 247
248static void __init spear320_clk_init(void __iomem *soc_config_base) 248static void __init spear320_clk_init(void __iomem *soc_config_base,
249 struct clk *ras_apb_clk)
249{ 250{
250 struct clk *clk; 251 struct clk *clk;
251 252
@@ -342,6 +343,8 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
342 SPEAR320_CONTROL_REG, UART1_PCLK_SHIFT, UART1_PCLK_MASK, 343 SPEAR320_CONTROL_REG, UART1_PCLK_SHIFT, UART1_PCLK_MASK,
343 0, &_lock); 344 0, &_lock);
344 clk_register_clkdev(clk, NULL, "a3000000.serial"); 345 clk_register_clkdev(clk, NULL, "a3000000.serial");
346 /* Enforce ras_apb_clk */
347 clk_set_parent(clk, ras_apb_clk);
345 348
346 clk = clk_register_mux(NULL, "uart2_clk", uartx_parents, 349 clk = clk_register_mux(NULL, "uart2_clk", uartx_parents,
347 ARRAY_SIZE(uartx_parents), 350 ARRAY_SIZE(uartx_parents),
@@ -349,6 +352,8 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
349 SPEAR320_EXT_CTRL_REG, SPEAR320_UART2_PCLK_SHIFT, 352 SPEAR320_EXT_CTRL_REG, SPEAR320_UART2_PCLK_SHIFT,
350 SPEAR320_UARTX_PCLK_MASK, 0, &_lock); 353 SPEAR320_UARTX_PCLK_MASK, 0, &_lock);
351 clk_register_clkdev(clk, NULL, "a4000000.serial"); 354 clk_register_clkdev(clk, NULL, "a4000000.serial");
355 /* Enforce ras_apb_clk */
356 clk_set_parent(clk, ras_apb_clk);
352 357
353 clk = clk_register_mux(NULL, "uart3_clk", uartx_parents, 358 clk = clk_register_mux(NULL, "uart3_clk", uartx_parents,
354 ARRAY_SIZE(uartx_parents), 359 ARRAY_SIZE(uartx_parents),
@@ -379,12 +384,12 @@ static void __init spear320_clk_init(void __iomem *soc_config_base)
379 clk_register_clkdev(clk, NULL, "60100000.serial"); 384 clk_register_clkdev(clk, NULL, "60100000.serial");
380} 385}
381#else 386#else
382static inline void spear320_clk_init(void __iomem *soc_config_base) { } 387static inline void spear320_clk_init(void __iomem *sb, struct clk *rc) { }
383#endif 388#endif
384 389
385void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base) 390void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base)
386{ 391{
387 struct clk *clk, *clk1; 392 struct clk *clk, *clk1, *ras_apb_clk;
388 393
389 clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT, 394 clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
390 32000); 395 32000);
@@ -613,6 +618,7 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
613 clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0, RAS_CLK_ENB, 618 clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0, RAS_CLK_ENB,
614 RAS_APB_CLK_ENB, 0, &_lock); 619 RAS_APB_CLK_ENB, 0, &_lock);
615 clk_register_clkdev(clk, "ras_apb_clk", NULL); 620 clk_register_clkdev(clk, "ras_apb_clk", NULL);
621 ras_apb_clk = clk;
616 622
617 clk = clk_register_gate(NULL, "ras_32k_clk", "osc_32k_clk", 0, 623 clk = clk_register_gate(NULL, "ras_32k_clk", "osc_32k_clk", 0,
618 RAS_CLK_ENB, RAS_32K_CLK_ENB, 0, &_lock); 624 RAS_CLK_ENB, RAS_32K_CLK_ENB, 0, &_lock);
@@ -659,5 +665,5 @@ void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_
659 else if (of_machine_is_compatible("st,spear310")) 665 else if (of_machine_is_compatible("st,spear310"))
660 spear310_clk_init(); 666 spear310_clk_init();
661 else if (of_machine_is_compatible("st,spear320")) 667 else if (of_machine_is_compatible("st,spear320"))
662 spear320_clk_init(soc_config_base); 668 spear320_clk_init(soc_config_base, ras_apb_clk);
663} 669}
diff --git a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
index 44cd27c5c401..670f90d629d7 100644
--- a/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
+++ b/drivers/clk/sunxi/clk-sun6i-apb0-gates.c
@@ -29,7 +29,7 @@ static int sun6i_a31_apb0_gates_clk_probe(struct platform_device *pdev)
29 29
30 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 30 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
31 reg = devm_ioremap_resource(&pdev->dev, r); 31 reg = devm_ioremap_resource(&pdev->dev, r);
32 if (!reg) 32 if (IS_ERR(reg))
33 return PTR_ERR(reg); 33 return PTR_ERR(reg);
34 34
35 clk_parent = of_clk_get_parent_name(np, 0); 35 clk_parent = of_clk_get_parent_name(np, 0);
diff --git a/drivers/clk/ti/apll.c b/drivers/clk/ti/apll.c
index 5428c9c547cd..72d97279eae1 100644
--- a/drivers/clk/ti/apll.c
+++ b/drivers/clk/ti/apll.c
@@ -77,13 +77,11 @@ static int dra7_apll_enable(struct clk_hw *hw)
77 if (i == MAX_APLL_WAIT_TRIES) { 77 if (i == MAX_APLL_WAIT_TRIES) {
78 pr_warn("clock: %s failed transition to '%s'\n", 78 pr_warn("clock: %s failed transition to '%s'\n",
79 clk_name, (state) ? "locked" : "bypassed"); 79 clk_name, (state) ? "locked" : "bypassed");
80 } else { 80 r = -EBUSY;
81 } else
81 pr_debug("clock: %s transition to '%s' in %d loops\n", 82 pr_debug("clock: %s transition to '%s' in %d loops\n",
82 clk_name, (state) ? "locked" : "bypassed", i); 83 clk_name, (state) ? "locked" : "bypassed", i);
83 84
84 r = 0;
85 }
86
87 return r; 85 return r;
88} 86}
89 87
@@ -338,7 +336,7 @@ static void __init of_omap2_apll_setup(struct device_node *node)
338 const char *parent_name; 336 const char *parent_name;
339 u32 val; 337 u32 val;
340 338
341 ad = kzalloc(sizeof(*clk_hw), GFP_KERNEL); 339 ad = kzalloc(sizeof(*ad), GFP_KERNEL);
342 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL); 340 clk_hw = kzalloc(sizeof(*clk_hw), GFP_KERNEL);
343 init = kzalloc(sizeof(*init), GFP_KERNEL); 341 init = kzalloc(sizeof(*init), GFP_KERNEL);
344 342
diff --git a/drivers/clk/ti/dpll.c b/drivers/clk/ti/dpll.c
index abd956d5f838..79791e1bf282 100644
--- a/drivers/clk/ti/dpll.c
+++ b/drivers/clk/ti/dpll.c
@@ -161,7 +161,8 @@ cleanup:
161} 161}
162 162
163#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \ 163#if defined(CONFIG_ARCH_OMAP4) || defined(CONFIG_SOC_OMAP5) || \
164 defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) 164 defined(CONFIG_SOC_DRA7XX) || defined(CONFIG_SOC_AM33XX) || \
165 defined(CONFIG_SOC_AM43XX)
165/** 166/**
166 * ti_clk_register_dpll_x2 - Registers a DPLLx2 clock 167 * ti_clk_register_dpll_x2 - Registers a DPLLx2 clock
167 * @node: device node for this clock 168 * @node: device node for this clock
@@ -322,7 +323,7 @@ CLK_OF_DECLARE(ti_omap4_dpll_x2_clock, "ti,omap4-dpll-x2-clock",
322 of_ti_omap4_dpll_x2_setup); 323 of_ti_omap4_dpll_x2_setup);
323#endif 324#endif
324 325
325#ifdef CONFIG_SOC_AM33XX 326#if defined(CONFIG_SOC_AM33XX) || defined(CONFIG_SOC_AM43XX)
326static void __init of_ti_am3_dpll_x2_setup(struct device_node *node) 327static void __init of_ti_am3_dpll_x2_setup(struct device_node *node)
327{ 328{
328 ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, NULL); 329 ti_clk_register_dpll_x2(node, &dpll_x2_ck_ops, NULL);
diff --git a/drivers/clk/ti/mux.c b/drivers/clk/ti/mux.c
index 0197a478720c..e9d650e51287 100644
--- a/drivers/clk/ti/mux.c
+++ b/drivers/clk/ti/mux.c
@@ -160,7 +160,7 @@ static void of_mux_clk_setup(struct device_node *node)
160 u8 clk_mux_flags = 0; 160 u8 clk_mux_flags = 0;
161 u32 mask = 0; 161 u32 mask = 0;
162 u32 shift = 0; 162 u32 shift = 0;
163 u32 flags = 0; 163 u32 flags = CLK_SET_RATE_NO_REPARENT;
164 164
165 num_parents = of_clk_get_parent_count(node); 165 num_parents = of_clk_get_parent_count(node);
166 if (num_parents < 2) { 166 if (num_parents < 2) {
diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c
index 8d6420013a04..ab51bf20a3ed 100644
--- a/drivers/clocksource/exynos_mct.c
+++ b/drivers/clocksource/exynos_mct.c
@@ -153,19 +153,16 @@ static void exynos4_mct_write(unsigned int value, unsigned long offset)
153} 153}
154 154
155/* Clocksource handling */ 155/* Clocksource handling */
156static void exynos4_mct_frc_start(u32 hi, u32 lo) 156static void exynos4_mct_frc_start(void)
157{ 157{
158 u32 reg; 158 u32 reg;
159 159
160 exynos4_mct_write(lo, EXYNOS4_MCT_G_CNT_L);
161 exynos4_mct_write(hi, EXYNOS4_MCT_G_CNT_U);
162
163 reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON); 160 reg = __raw_readl(reg_base + EXYNOS4_MCT_G_TCON);
164 reg |= MCT_G_TCON_START; 161 reg |= MCT_G_TCON_START;
165 exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON); 162 exynos4_mct_write(reg, EXYNOS4_MCT_G_TCON);
166} 163}
167 164
168static cycle_t exynos4_frc_read(struct clocksource *cs) 165static cycle_t notrace _exynos4_frc_read(void)
169{ 166{
170 unsigned int lo, hi; 167 unsigned int lo, hi;
171 u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U); 168 u32 hi2 = __raw_readl(reg_base + EXYNOS4_MCT_G_CNT_U);
@@ -179,9 +176,14 @@ static cycle_t exynos4_frc_read(struct clocksource *cs)
179 return ((cycle_t)hi << 32) | lo; 176 return ((cycle_t)hi << 32) | lo;
180} 177}
181 178
179static cycle_t exynos4_frc_read(struct clocksource *cs)
180{
181 return _exynos4_frc_read();
182}
183
182static void exynos4_frc_resume(struct clocksource *cs) 184static void exynos4_frc_resume(struct clocksource *cs)
183{ 185{
184 exynos4_mct_frc_start(0, 0); 186 exynos4_mct_frc_start();
185} 187}
186 188
187struct clocksource mct_frc = { 189struct clocksource mct_frc = {
@@ -195,12 +197,23 @@ struct clocksource mct_frc = {
195 197
196static u64 notrace exynos4_read_sched_clock(void) 198static u64 notrace exynos4_read_sched_clock(void)
197{ 199{
198 return exynos4_frc_read(&mct_frc); 200 return _exynos4_frc_read();
201}
202
203static struct delay_timer exynos4_delay_timer;
204
205static cycles_t exynos4_read_current_timer(void)
206{
207 return _exynos4_frc_read();
199} 208}
200 209
201static void __init exynos4_clocksource_init(void) 210static void __init exynos4_clocksource_init(void)
202{ 211{
203 exynos4_mct_frc_start(0, 0); 212 exynos4_mct_frc_start();
213
214 exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer;
215 exynos4_delay_timer.freq = clk_rate;
216 register_current_timer_delay(&exynos4_delay_timer);
204 217
205 if (clocksource_register_hz(&mct_frc, clk_rate)) 218 if (clocksource_register_hz(&mct_frc, clk_rate))
206 panic("%s: can't register clocksource\n", mct_frc.name); 219 panic("%s: can't register clocksource\n", mct_frc.name);
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index ebac67115009..7364a538e056 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -104,6 +104,7 @@ config ARM_IMX6Q_CPUFREQ
104 tristate "Freescale i.MX6 cpufreq support" 104 tristate "Freescale i.MX6 cpufreq support"
105 depends on ARCH_MXC 105 depends on ARCH_MXC
106 depends on REGULATOR_ANATOP 106 depends on REGULATOR_ANATOP
107 select PM_OPP
107 help 108 help
108 This adds cpufreq driver support for Freescale i.MX6 series SoCs. 109 This adds cpufreq driver support for Freescale i.MX6 series SoCs.
109 110
@@ -118,7 +119,7 @@ config ARM_INTEGRATOR
118 If in doubt, say Y. 119 If in doubt, say Y.
119 120
120config ARM_KIRKWOOD_CPUFREQ 121config ARM_KIRKWOOD_CPUFREQ
121 def_bool MACH_KIRKWOOD 122 def_bool ARCH_KIRKWOOD || MACH_KIRKWOOD
122 help 123 help
123 This adds the CPUFreq driver for Marvell Kirkwood 124 This adds the CPUFreq driver for Marvell Kirkwood
124 SoCs. 125 SoCs.
diff --git a/drivers/cpufreq/Makefile b/drivers/cpufreq/Makefile
index 738c8b7b17dc..db6d9a2fea4d 100644
--- a/drivers/cpufreq/Makefile
+++ b/drivers/cpufreq/Makefile
@@ -49,7 +49,7 @@ obj-$(CONFIG_ARM_BIG_LITTLE_CPUFREQ) += arm_big_little.o
49# LITTLE drivers, so that it is probed last. 49# LITTLE drivers, so that it is probed last.
50obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o 50obj-$(CONFIG_ARM_DT_BL_CPUFREQ) += arm_big_little_dt.o
51 51
52obj-$(CONFIG_ARCH_DAVINCI_DA850) += davinci-cpufreq.o 52obj-$(CONFIG_ARCH_DAVINCI) += davinci-cpufreq.o
53obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o 53obj-$(CONFIG_UX500_SOC_DB8500) += dbx500-cpufreq.o
54obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o 54obj-$(CONFIG_ARM_EXYNOS_CPUFREQ) += exynos-cpufreq.o
55obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o 55obj-$(CONFIG_ARM_EXYNOS4210_CPUFREQ) += exynos4210-cpufreq.o
diff --git a/drivers/cpufreq/cpufreq-cpu0.c b/drivers/cpufreq/cpufreq-cpu0.c
index ee1ae303a07c..86beda9f950b 100644
--- a/drivers/cpufreq/cpufreq-cpu0.c
+++ b/drivers/cpufreq/cpufreq-cpu0.c
@@ -152,11 +152,8 @@ static int cpu0_cpufreq_probe(struct platform_device *pdev)
152 goto out_put_reg; 152 goto out_put_reg;
153 } 153 }
154 154
155 ret = of_init_opp_table(cpu_dev); 155 /* OPPs might be populated at runtime, don't check for error here */
156 if (ret) { 156 of_init_opp_table(cpu_dev);
157 pr_err("failed to init OPP table: %d\n", ret);
158 goto out_put_clk;
159 }
160 157
161 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table); 158 ret = dev_pm_opp_init_cpufreq_table(cpu_dev, &freq_table);
162 if (ret) { 159 if (ret) {
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 62259d27f03e..6f024852c6fb 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1153,10 +1153,12 @@ static int __cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1153 * the creation of a brand new one. So we need to perform this update 1153 * the creation of a brand new one. So we need to perform this update
1154 * by invoking update_policy_cpu(). 1154 * by invoking update_policy_cpu().
1155 */ 1155 */
1156 if (recover_policy && cpu != policy->cpu) 1156 if (recover_policy && cpu != policy->cpu) {
1157 update_policy_cpu(policy, cpu); 1157 update_policy_cpu(policy, cpu);
1158 else 1158 WARN_ON(kobject_move(&policy->kobj, &dev->kobj));
1159 } else {
1159 policy->cpu = cpu; 1160 policy->cpu = cpu;
1161 }
1160 1162
1161 cpumask_copy(policy->cpus, cpumask_of(cpu)); 1163 cpumask_copy(policy->cpus, cpumask_of(cpu));
1162 1164
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 924bb2d42b1c..86631cb6f7de 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -128,6 +128,7 @@ static struct pstate_funcs pstate_funcs;
128 128
129struct perf_limits { 129struct perf_limits {
130 int no_turbo; 130 int no_turbo;
131 int turbo_disabled;
131 int max_perf_pct; 132 int max_perf_pct;
132 int min_perf_pct; 133 int min_perf_pct;
133 int32_t max_perf; 134 int32_t max_perf;
@@ -287,7 +288,10 @@ static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
287 if (ret != 1) 288 if (ret != 1)
288 return -EINVAL; 289 return -EINVAL;
289 limits.no_turbo = clamp_t(int, input, 0 , 1); 290 limits.no_turbo = clamp_t(int, input, 0 , 1);
290 291 if (limits.turbo_disabled) {
292 pr_warn("Turbo disabled by BIOS or unavailable on processor\n");
293 limits.no_turbo = limits.turbo_disabled;
294 }
291 return count; 295 return count;
292} 296}
293 297
@@ -357,21 +361,21 @@ static int byt_get_min_pstate(void)
357{ 361{
358 u64 value; 362 u64 value;
359 rdmsrl(BYT_RATIOS, value); 363 rdmsrl(BYT_RATIOS, value);
360 return (value >> 8) & 0x3F; 364 return (value >> 8) & 0x7F;
361} 365}
362 366
363static int byt_get_max_pstate(void) 367static int byt_get_max_pstate(void)
364{ 368{
365 u64 value; 369 u64 value;
366 rdmsrl(BYT_RATIOS, value); 370 rdmsrl(BYT_RATIOS, value);
367 return (value >> 16) & 0x3F; 371 return (value >> 16) & 0x7F;
368} 372}
369 373
370static int byt_get_turbo_pstate(void) 374static int byt_get_turbo_pstate(void)
371{ 375{
372 u64 value; 376 u64 value;
373 rdmsrl(BYT_TURBO_RATIOS, value); 377 rdmsrl(BYT_TURBO_RATIOS, value);
374 return value & 0x3F; 378 return value & 0x7F;
375} 379}
376 380
377static void byt_set_pstate(struct cpudata *cpudata, int pstate) 381static void byt_set_pstate(struct cpudata *cpudata, int pstate)
@@ -381,7 +385,7 @@ static void byt_set_pstate(struct cpudata *cpudata, int pstate)
381 u32 vid; 385 u32 vid;
382 386
383 val = pstate << 8; 387 val = pstate << 8;
384 if (limits.no_turbo) 388 if (limits.no_turbo && !limits.turbo_disabled)
385 val |= (u64)1 << 32; 389 val |= (u64)1 << 32;
386 390
387 vid_fp = cpudata->vid.min + mul_fp( 391 vid_fp = cpudata->vid.min + mul_fp(
@@ -405,8 +409,8 @@ static void byt_get_vid(struct cpudata *cpudata)
405 409
406 410
407 rdmsrl(BYT_VIDS, value); 411 rdmsrl(BYT_VIDS, value);
408 cpudata->vid.min = int_tofp((value >> 8) & 0x3f); 412 cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
409 cpudata->vid.max = int_tofp((value >> 16) & 0x3f); 413 cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
410 cpudata->vid.ratio = div_fp( 414 cpudata->vid.ratio = div_fp(
411 cpudata->vid.max - cpudata->vid.min, 415 cpudata->vid.max - cpudata->vid.min,
412 int_tofp(cpudata->pstate.max_pstate - 416 int_tofp(cpudata->pstate.max_pstate -
@@ -448,7 +452,7 @@ static void core_set_pstate(struct cpudata *cpudata, int pstate)
448 u64 val; 452 u64 val;
449 453
450 val = pstate << 8; 454 val = pstate << 8;
451 if (limits.no_turbo) 455 if (limits.no_turbo && !limits.turbo_disabled)
452 val |= (u64)1 << 32; 456 val |= (u64)1 << 32;
453 457
454 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val); 458 wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
@@ -696,9 +700,8 @@ static int intel_pstate_init_cpu(unsigned int cpunum)
696 700
697 cpu = all_cpu_data[cpunum]; 701 cpu = all_cpu_data[cpunum];
698 702
699 intel_pstate_get_cpu_pstates(cpu);
700
701 cpu->cpu = cpunum; 703 cpu->cpu = cpunum;
704 intel_pstate_get_cpu_pstates(cpu);
702 705
703 init_timer_deferrable(&cpu->timer); 706 init_timer_deferrable(&cpu->timer);
704 cpu->timer.function = intel_pstate_timer_func; 707 cpu->timer.function = intel_pstate_timer_func;
@@ -741,7 +744,7 @@ static int intel_pstate_set_policy(struct cpufreq_policy *policy)
741 limits.min_perf = int_tofp(1); 744 limits.min_perf = int_tofp(1);
742 limits.max_perf_pct = 100; 745 limits.max_perf_pct = 100;
743 limits.max_perf = int_tofp(1); 746 limits.max_perf = int_tofp(1);
744 limits.no_turbo = 0; 747 limits.no_turbo = limits.turbo_disabled;
745 return 0; 748 return 0;
746 } 749 }
747 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq; 750 limits.min_perf_pct = (policy->min * 100) / policy->cpuinfo.max_freq;
@@ -784,6 +787,7 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
784{ 787{
785 struct cpudata *cpu; 788 struct cpudata *cpu;
786 int rc; 789 int rc;
790 u64 misc_en;
787 791
788 rc = intel_pstate_init_cpu(policy->cpu); 792 rc = intel_pstate_init_cpu(policy->cpu);
789 if (rc) 793 if (rc)
@@ -791,8 +795,13 @@ static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
791 795
792 cpu = all_cpu_data[policy->cpu]; 796 cpu = all_cpu_data[policy->cpu];
793 797
794 if (!limits.no_turbo && 798 rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
795 limits.min_perf_pct == 100 && limits.max_perf_pct == 100) 799 if (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
800 cpu->pstate.max_pstate == cpu->pstate.turbo_pstate) {
801 limits.turbo_disabled = 1;
802 limits.no_turbo = 1;
803 }
804 if (limits.min_perf_pct == 100 && limits.max_perf_pct == 100)
796 policy->policy = CPUFREQ_POLICY_PERFORMANCE; 805 policy->policy = CPUFREQ_POLICY_PERFORMANCE;
797 else 806 else
798 policy->policy = CPUFREQ_POLICY_POWERSAVE; 807 policy->policy = CPUFREQ_POLICY_POWERSAVE;
diff --git a/drivers/cpufreq/sa1110-cpufreq.c b/drivers/cpufreq/sa1110-cpufreq.c
index 546376719d8f..b5befc211172 100644
--- a/drivers/cpufreq/sa1110-cpufreq.c
+++ b/drivers/cpufreq/sa1110-cpufreq.c
@@ -349,7 +349,7 @@ static int __init sa1110_clk_init(void)
349 name = "K4S641632D"; 349 name = "K4S641632D";
350 if (machine_is_h3100()) 350 if (machine_is_h3100())
351 name = "KM416S4030CT"; 351 name = "KM416S4030CT";
352 if (machine_is_jornada720()) 352 if (machine_is_jornada720() || machine_is_h3600())
353 name = "K4S281632B-1H"; 353 name = "K4S281632B-1H";
354 if (machine_is_nanoengine()) 354 if (machine_is_nanoengine())
355 name = "MT48LC8M16A2TG-75"; 355 name = "MT48LC8M16A2TG-75";
diff --git a/drivers/crypto/caam/jr.c b/drivers/crypto/caam/jr.c
index 1d80bd3636c5..b512a4ba7569 100644
--- a/drivers/crypto/caam/jr.c
+++ b/drivers/crypto/caam/jr.c
@@ -453,8 +453,8 @@ static int caam_jr_probe(struct platform_device *pdev)
453 int error; 453 int error;
454 454
455 jrdev = &pdev->dev; 455 jrdev = &pdev->dev;
456 jrpriv = kmalloc(sizeof(struct caam_drv_private_jr), 456 jrpriv = devm_kmalloc(jrdev, sizeof(struct caam_drv_private_jr),
457 GFP_KERNEL); 457 GFP_KERNEL);
458 if (!jrpriv) 458 if (!jrpriv)
459 return -ENOMEM; 459 return -ENOMEM;
460 460
@@ -487,10 +487,8 @@ static int caam_jr_probe(struct platform_device *pdev)
487 487
488 /* Now do the platform independent part */ 488 /* Now do the platform independent part */
489 error = caam_jr_init(jrdev); /* now turn on hardware */ 489 error = caam_jr_init(jrdev); /* now turn on hardware */
490 if (error) { 490 if (error)
491 kfree(jrpriv);
492 return error; 491 return error;
493 }
494 492
495 jrpriv->dev = jrdev; 493 jrpriv->dev = jrdev;
496 spin_lock(&driver_data.jr_alloc_lock); 494 spin_lock(&driver_data.jr_alloc_lock);
diff --git a/drivers/dma/cppi41.c b/drivers/dma/cppi41.c
index d028f36ae655..8f8b0b608875 100644
--- a/drivers/dma/cppi41.c
+++ b/drivers/dma/cppi41.c
@@ -86,6 +86,9 @@
86 86
87#define USBSS_IRQ_PD_COMP (1 << 2) 87#define USBSS_IRQ_PD_COMP (1 << 2)
88 88
89/* Packet Descriptor */
90#define PD2_ZERO_LENGTH (1 << 19)
91
89struct cppi41_channel { 92struct cppi41_channel {
90 struct dma_chan chan; 93 struct dma_chan chan;
91 struct dma_async_tx_descriptor txd; 94 struct dma_async_tx_descriptor txd;
@@ -307,7 +310,7 @@ static irqreturn_t cppi41_irq(int irq, void *data)
307 __iormb(); 310 __iormb();
308 311
309 while (val) { 312 while (val) {
310 u32 desc; 313 u32 desc, len;
311 314
312 q_num = __fls(val); 315 q_num = __fls(val);
313 val &= ~(1 << q_num); 316 val &= ~(1 << q_num);
@@ -319,9 +322,13 @@ static irqreturn_t cppi41_irq(int irq, void *data)
319 q_num, desc); 322 q_num, desc);
320 continue; 323 continue;
321 } 324 }
322 c->residue = pd_trans_len(c->desc->pd6) -
323 pd_trans_len(c->desc->pd0);
324 325
326 if (c->desc->pd2 & PD2_ZERO_LENGTH)
327 len = 0;
328 else
329 len = pd_trans_len(c->desc->pd0);
330
331 c->residue = pd_trans_len(c->desc->pd6) - len;
325 dma_cookie_complete(&c->txd); 332 dma_cookie_complete(&c->txd);
326 c->txd.callback(c->txd.callback_param); 333 c->txd.callback(c->txd.callback_param);
327 } 334 }
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 128714622bf5..14867e3ac8ff 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -255,6 +255,7 @@ struct sdma_channel {
255 enum dma_slave_buswidth word_size; 255 enum dma_slave_buswidth word_size;
256 unsigned int buf_tail; 256 unsigned int buf_tail;
257 unsigned int num_bd; 257 unsigned int num_bd;
258 unsigned int period_len;
258 struct sdma_buffer_descriptor *bd; 259 struct sdma_buffer_descriptor *bd;
259 dma_addr_t bd_phys; 260 dma_addr_t bd_phys;
260 unsigned int pc_from_device, pc_to_device; 261 unsigned int pc_from_device, pc_to_device;
@@ -593,6 +594,12 @@ static void sdma_event_disable(struct sdma_channel *sdmac, unsigned int event)
593 594
594static void sdma_handle_channel_loop(struct sdma_channel *sdmac) 595static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
595{ 596{
597 if (sdmac->desc.callback)
598 sdmac->desc.callback(sdmac->desc.callback_param);
599}
600
601static void sdma_update_channel_loop(struct sdma_channel *sdmac)
602{
596 struct sdma_buffer_descriptor *bd; 603 struct sdma_buffer_descriptor *bd;
597 604
598 /* 605 /*
@@ -611,9 +618,6 @@ static void sdma_handle_channel_loop(struct sdma_channel *sdmac)
611 bd->mode.status |= BD_DONE; 618 bd->mode.status |= BD_DONE;
612 sdmac->buf_tail++; 619 sdmac->buf_tail++;
613 sdmac->buf_tail %= sdmac->num_bd; 620 sdmac->buf_tail %= sdmac->num_bd;
614
615 if (sdmac->desc.callback)
616 sdmac->desc.callback(sdmac->desc.callback_param);
617 } 621 }
618} 622}
619 623
@@ -669,6 +673,9 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
669 int channel = fls(stat) - 1; 673 int channel = fls(stat) - 1;
670 struct sdma_channel *sdmac = &sdma->channel[channel]; 674 struct sdma_channel *sdmac = &sdma->channel[channel];
671 675
676 if (sdmac->flags & IMX_DMA_SG_LOOP)
677 sdma_update_channel_loop(sdmac);
678
672 tasklet_schedule(&sdmac->tasklet); 679 tasklet_schedule(&sdmac->tasklet);
673 680
674 __clear_bit(channel, &stat); 681 __clear_bit(channel, &stat);
@@ -1129,6 +1136,7 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1129 sdmac->status = DMA_IN_PROGRESS; 1136 sdmac->status = DMA_IN_PROGRESS;
1130 1137
1131 sdmac->buf_tail = 0; 1138 sdmac->buf_tail = 0;
1139 sdmac->period_len = period_len;
1132 1140
1133 sdmac->flags |= IMX_DMA_SG_LOOP; 1141 sdmac->flags |= IMX_DMA_SG_LOOP;
1134 sdmac->direction = direction; 1142 sdmac->direction = direction;
@@ -1225,9 +1233,15 @@ static enum dma_status sdma_tx_status(struct dma_chan *chan,
1225 struct dma_tx_state *txstate) 1233 struct dma_tx_state *txstate)
1226{ 1234{
1227 struct sdma_channel *sdmac = to_sdma_chan(chan); 1235 struct sdma_channel *sdmac = to_sdma_chan(chan);
1236 u32 residue;
1237
1238 if (sdmac->flags & IMX_DMA_SG_LOOP)
1239 residue = (sdmac->num_bd - sdmac->buf_tail) * sdmac->period_len;
1240 else
1241 residue = sdmac->chn_count - sdmac->chn_real_count;
1228 1242
1229 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie, 1243 dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
1230 sdmac->chn_count - sdmac->chn_real_count); 1244 residue);
1231 1245
1232 return sdmac->status; 1246 return sdmac->status;
1233} 1247}
diff --git a/drivers/firewire/Kconfig b/drivers/firewire/Kconfig
index 4199849e3758..145974f9662b 100644
--- a/drivers/firewire/Kconfig
+++ b/drivers/firewire/Kconfig
@@ -1,4 +1,5 @@
1menu "IEEE 1394 (FireWire) support" 1menu "IEEE 1394 (FireWire) support"
2 depends on HAS_DMA
2 depends on PCI || COMPILE_TEST 3 depends on PCI || COMPILE_TEST
3 # firewire-core does not depend on PCI but is 4 # firewire-core does not depend on PCI but is
4 # not useful without PCI controller driver 5 # not useful without PCI controller driver
diff --git a/drivers/firmware/efi/efi-pstore.c b/drivers/firmware/efi/efi-pstore.c
index 4b9dc836dcf9..e992abc5ef26 100644
--- a/drivers/firmware/efi/efi-pstore.c
+++ b/drivers/firmware/efi/efi-pstore.c
@@ -40,7 +40,7 @@ struct pstore_read_data {
40static inline u64 generic_id(unsigned long timestamp, 40static inline u64 generic_id(unsigned long timestamp,
41 unsigned int part, int count) 41 unsigned int part, int count)
42{ 42{
43 return (timestamp * 100 + part) * 1000 + count; 43 return ((u64) timestamp * 100 + part) * 1000 + count;
44} 44}
45 45
46static int efi_pstore_read_func(struct efivar_entry *entry, void *data) 46static int efi_pstore_read_func(struct efivar_entry *entry, void *data)
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index cd36deb619fa..dc79346689e6 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -346,6 +346,7 @@ static __initdata struct {
346 346
347struct param_info { 347struct param_info {
348 int verbose; 348 int verbose;
349 int found;
349 void *params; 350 void *params;
350}; 351};
351 352
@@ -353,25 +354,21 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
353 int depth, void *data) 354 int depth, void *data)
354{ 355{
355 struct param_info *info = data; 356 struct param_info *info = data;
356 void *prop, *dest; 357 const void *prop;
357 unsigned long len; 358 void *dest;
358 u64 val; 359 u64 val;
359 int i; 360 int i, len;
360 361
361 if (depth != 1 || 362 if (depth != 1 ||
362 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) 363 (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
363 return 0; 364 return 0;
364 365
365 pr_info("Getting parameters from FDT:\n");
366
367 for (i = 0; i < ARRAY_SIZE(dt_params); i++) { 366 for (i = 0; i < ARRAY_SIZE(dt_params); i++) {
368 prop = of_get_flat_dt_prop(node, dt_params[i].propname, &len); 367 prop = of_get_flat_dt_prop(node, dt_params[i].propname, &len);
369 if (!prop) { 368 if (!prop)
370 pr_err("Can't find %s in device tree!\n",
371 dt_params[i].name);
372 return 0; 369 return 0;
373 }
374 dest = info->params + dt_params[i].offset; 370 dest = info->params + dt_params[i].offset;
371 info->found++;
375 372
376 val = of_read_number(prop, len / sizeof(u32)); 373 val = of_read_number(prop, len / sizeof(u32));
377 374
@@ -390,10 +387,21 @@ static int __init fdt_find_uefi_params(unsigned long node, const char *uname,
390int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose) 387int __init efi_get_fdt_params(struct efi_fdt_params *params, int verbose)
391{ 388{
392 struct param_info info; 389 struct param_info info;
390 int ret;
391
392 pr_info("Getting EFI parameters from FDT:\n");
393 393
394 info.verbose = verbose; 394 info.verbose = verbose;
395 info.found = 0;
395 info.params = params; 396 info.params = params;
396 397
397 return of_scan_flat_dt(fdt_find_uefi_params, &info); 398 ret = of_scan_flat_dt(fdt_find_uefi_params, &info);
399 if (!info.found)
400 pr_info("UEFI not found.\n");
401 else if (!ret)
402 pr_err("Can't find '%s' in device tree!\n",
403 dt_params[info.found].name);
404
405 return ret;
398} 406}
399#endif /* CONFIG_EFI_PARAMS_FROM_FDT */ 407#endif /* CONFIG_EFI_PARAMS_FROM_FDT */
diff --git a/drivers/firmware/efi/fdt.c b/drivers/firmware/efi/fdt.c
index 5c6a8e8a9580..507a3df46a5d 100644
--- a/drivers/firmware/efi/fdt.c
+++ b/drivers/firmware/efi/fdt.c
@@ -23,16 +23,6 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
23 u32 fdt_val32; 23 u32 fdt_val32;
24 u64 fdt_val64; 24 u64 fdt_val64;
25 25
26 /*
27 * Copy definition of linux_banner here. Since this code is
28 * built as part of the decompressor for ARM v7, pulling
29 * in version.c where linux_banner is defined for the
30 * kernel brings other kernel dependencies with it.
31 */
32 const char linux_banner[] =
33 "Linux version " UTS_RELEASE " (" LINUX_COMPILE_BY "@"
34 LINUX_COMPILE_HOST ") (" LINUX_COMPILER ") " UTS_VERSION "\n";
35
36 /* Do some checks on provided FDT, if it exists*/ 26 /* Do some checks on provided FDT, if it exists*/
37 if (orig_fdt) { 27 if (orig_fdt) {
38 if (fdt_check_header(orig_fdt)) { 28 if (fdt_check_header(orig_fdt)) {
@@ -63,7 +53,7 @@ static efi_status_t update_fdt(efi_system_table_t *sys_table, void *orig_fdt,
63 */ 53 */
64 prev = 0; 54 prev = 0;
65 for (;;) { 55 for (;;) {
66 const char *type, *name; 56 const char *type;
67 int len; 57 int len;
68 58
69 node = fdt_next_node(fdt, prev, NULL); 59 node = fdt_next_node(fdt, prev, NULL);
diff --git a/drivers/gpio/gpio-mcp23s08.c b/drivers/gpio/gpio-mcp23s08.c
index fe7c0e211f9a..57adbc90fdad 100644
--- a/drivers/gpio/gpio-mcp23s08.c
+++ b/drivers/gpio/gpio-mcp23s08.c
@@ -900,8 +900,6 @@ static int mcp23s08_probe(struct spi_device *spi)
900 if (spi_present_mask & (1 << addr)) 900 if (spi_present_mask & (1 << addr))
901 chips++; 901 chips++;
902 } 902 }
903 if (!chips)
904 return -ENODEV;
905 } else { 903 } else {
906 type = spi_get_device_id(spi)->driver_data; 904 type = spi_get_device_id(spi)->driver_data;
907 pdata = dev_get_platdata(&spi->dev); 905 pdata = dev_get_platdata(&spi->dev);
@@ -940,10 +938,6 @@ static int mcp23s08_probe(struct spi_device *spi)
940 if (!(spi_present_mask & (1 << addr))) 938 if (!(spi_present_mask & (1 << addr)))
941 continue; 939 continue;
942 chips--; 940 chips--;
943 if (chips < 0) {
944 dev_err(&spi->dev, "FATAL: invalid negative chip id\n");
945 goto fail;
946 }
947 data->mcp[addr] = &data->chip[chips]; 941 data->mcp[addr] = &data->chip[chips];
948 status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi, 942 status = mcp23s08_probe_one(data->mcp[addr], &spi->dev, spi,
949 0x40 | (addr << 1), type, base, 943 0x40 | (addr << 1), type, base,
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index 03711d00aaae..8218078b6133 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -419,8 +419,9 @@ long drm_ioctl(struct file *filp,
419 retcode = -EFAULT; 419 retcode = -EFAULT;
420 goto err_i1; 420 goto err_i1;
421 } 421 }
422 } else 422 } else if (cmd & IOC_OUT) {
423 memset(kdata, 0, usize); 423 memset(kdata, 0, usize);
424 }
424 425
425 if (ioctl->flags & DRM_UNLOCKED) 426 if (ioctl->flags & DRM_UNLOCKED)
426 retcode = func(dev, kdata, file_priv); 427 retcode = func(dev, kdata, file_priv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dpi.c b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
index 482127f633c5..9e530f205ad2 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dpi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dpi.c
@@ -40,7 +40,7 @@ exynos_dpi_detect(struct drm_connector *connector, bool force)
40{ 40{
41 struct exynos_dpi *ctx = connector_to_dpi(connector); 41 struct exynos_dpi *ctx = connector_to_dpi(connector);
42 42
43 if (!ctx->panel->connector) 43 if (ctx->panel && !ctx->panel->connector)
44 drm_panel_attach(ctx->panel, &ctx->connector); 44 drm_panel_attach(ctx->panel, &ctx->connector);
45 45
46 return connector_status_connected; 46 return connector_status_connected;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index d91f27777537..ab7d182063c3 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -765,24 +765,24 @@ static int exynos_drm_init(void)
765 765
766 return 0; 766 return 0;
767 767
768err_unregister_pd:
769 platform_device_unregister(exynos_drm_pdev);
770
771err_remove_vidi: 768err_remove_vidi:
772#ifdef CONFIG_DRM_EXYNOS_VIDI 769#ifdef CONFIG_DRM_EXYNOS_VIDI
773 exynos_drm_remove_vidi(); 770 exynos_drm_remove_vidi();
771
772err_unregister_pd:
774#endif 773#endif
774 platform_device_unregister(exynos_drm_pdev);
775 775
776 return ret; 776 return ret;
777} 777}
778 778
779static void exynos_drm_exit(void) 779static void exynos_drm_exit(void)
780{ 780{
781 platform_driver_unregister(&exynos_drm_platform_driver);
781#ifdef CONFIG_DRM_EXYNOS_VIDI 782#ifdef CONFIG_DRM_EXYNOS_VIDI
782 exynos_drm_remove_vidi(); 783 exynos_drm_remove_vidi();
783#endif 784#endif
784 platform_device_unregister(exynos_drm_pdev); 785 platform_device_unregister(exynos_drm_pdev);
785 platform_driver_unregister(&exynos_drm_platform_driver);
786} 786}
787 787
788module_init(exynos_drm_init); 788module_init(exynos_drm_init);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 36535f398848..06cde4506278 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -343,7 +343,7 @@ struct exynos_drm_display * exynos_dpi_probe(struct device *dev);
343int exynos_dpi_remove(struct device *dev); 343int exynos_dpi_remove(struct device *dev);
344#else 344#else
345static inline struct exynos_drm_display * 345static inline struct exynos_drm_display *
346exynos_dpi_probe(struct device *dev) { return 0; } 346exynos_dpi_probe(struct device *dev) { return NULL; }
347static inline int exynos_dpi_remove(struct device *dev) { return 0; } 347static inline int exynos_dpi_remove(struct device *dev) { return 0; }
348#endif 348#endif
349 349
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index bb45ab2e7384..33161ad38201 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -741,6 +741,8 @@ static void fimd_apply(struct exynos_drm_manager *mgr)
741 win_data = &ctx->win_data[i]; 741 win_data = &ctx->win_data[i];
742 if (win_data->enabled) 742 if (win_data->enabled)
743 fimd_win_commit(mgr, i); 743 fimd_win_commit(mgr, i);
744 else
745 fimd_win_disable(mgr, i);
744 } 746 }
745 747
746 fimd_commit(mgr); 748 fimd_commit(mgr);
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index c104d0c9b385..aa259b0a873a 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -2090,6 +2090,11 @@ out:
2090 2090
2091static void hdmi_dpms(struct exynos_drm_display *display, int mode) 2091static void hdmi_dpms(struct exynos_drm_display *display, int mode)
2092{ 2092{
2093 struct hdmi_context *hdata = display->ctx;
2094 struct drm_encoder *encoder = hdata->encoder;
2095 struct drm_crtc *crtc = encoder->crtc;
2096 struct drm_crtc_helper_funcs *funcs = NULL;
2097
2093 DRM_DEBUG_KMS("mode %d\n", mode); 2098 DRM_DEBUG_KMS("mode %d\n", mode);
2094 2099
2095 switch (mode) { 2100 switch (mode) {
@@ -2099,6 +2104,20 @@ static void hdmi_dpms(struct exynos_drm_display *display, int mode)
2099 case DRM_MODE_DPMS_STANDBY: 2104 case DRM_MODE_DPMS_STANDBY:
2100 case DRM_MODE_DPMS_SUSPEND: 2105 case DRM_MODE_DPMS_SUSPEND:
2101 case DRM_MODE_DPMS_OFF: 2106 case DRM_MODE_DPMS_OFF:
2107 /*
2108 * The SFRs of VP and Mixer are updated by Vertical Sync of
2109 * Timing generator which is a part of HDMI so the sequence
2110 * to disable TV Subsystem should be as following,
2111 * VP -> Mixer -> HDMI
2112 *
2113 * Below codes will try to disable Mixer and VP(if used)
2114 * prior to disabling HDMI.
2115 */
2116 if (crtc)
2117 funcs = crtc->helper_private;
2118 if (funcs && funcs->dpms)
2119 (*funcs->dpms)(crtc, mode);
2120
2102 hdmi_poweroff(display); 2121 hdmi_poweroff(display);
2103 break; 2122 break;
2104 default: 2123 default:
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 4c5aed7e54c8..7529946d0a74 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -377,6 +377,20 @@ static void mixer_run(struct mixer_context *ctx)
377 mixer_regs_dump(ctx); 377 mixer_regs_dump(ctx);
378} 378}
379 379
380static void mixer_stop(struct mixer_context *ctx)
381{
382 struct mixer_resources *res = &ctx->mixer_res;
383 int timeout = 20;
384
385 mixer_reg_writemask(res, MXR_STATUS, 0, MXR_STATUS_REG_RUN);
386
387 while (!(mixer_reg_read(res, MXR_STATUS) & MXR_STATUS_REG_IDLE) &&
388 --timeout)
389 usleep_range(10000, 12000);
390
391 mixer_regs_dump(ctx);
392}
393
380static void vp_video_buffer(struct mixer_context *ctx, int win) 394static void vp_video_buffer(struct mixer_context *ctx, int win)
381{ 395{
382 struct mixer_resources *res = &ctx->mixer_res; 396 struct mixer_resources *res = &ctx->mixer_res;
@@ -497,13 +511,8 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
497static void mixer_layer_update(struct mixer_context *ctx) 511static void mixer_layer_update(struct mixer_context *ctx)
498{ 512{
499 struct mixer_resources *res = &ctx->mixer_res; 513 struct mixer_resources *res = &ctx->mixer_res;
500 u32 val;
501
502 val = mixer_reg_read(res, MXR_CFG);
503 514
504 /* allow one update per vsync only */ 515 mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
505 if (!(val & MXR_CFG_LAYER_UPDATE_COUNT_MASK))
506 mixer_reg_writemask(res, MXR_CFG, ~0, MXR_CFG_LAYER_UPDATE);
507} 516}
508 517
509static void mixer_graph_buffer(struct mixer_context *ctx, int win) 518static void mixer_graph_buffer(struct mixer_context *ctx, int win)
@@ -1010,6 +1019,8 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
1010 } 1019 }
1011 mutex_unlock(&mixer_ctx->mixer_mutex); 1020 mutex_unlock(&mixer_ctx->mixer_mutex);
1012 1021
1022 drm_vblank_get(mgr->crtc->dev, mixer_ctx->pipe);
1023
1013 atomic_set(&mixer_ctx->wait_vsync_event, 1); 1024 atomic_set(&mixer_ctx->wait_vsync_event, 1);
1014 1025
1015 /* 1026 /*
@@ -1020,6 +1031,8 @@ static void mixer_wait_for_vblank(struct exynos_drm_manager *mgr)
1020 !atomic_read(&mixer_ctx->wait_vsync_event), 1031 !atomic_read(&mixer_ctx->wait_vsync_event),
1021 HZ/20)) 1032 HZ/20))
1022 DRM_DEBUG_KMS("vblank wait timed out.\n"); 1033 DRM_DEBUG_KMS("vblank wait timed out.\n");
1034
1035 drm_vblank_put(mgr->crtc->dev, mixer_ctx->pipe);
1023} 1036}
1024 1037
1025static void mixer_window_suspend(struct exynos_drm_manager *mgr) 1038static void mixer_window_suspend(struct exynos_drm_manager *mgr)
@@ -1061,7 +1074,7 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
1061 mutex_unlock(&ctx->mixer_mutex); 1074 mutex_unlock(&ctx->mixer_mutex);
1062 return; 1075 return;
1063 } 1076 }
1064 ctx->powered = true; 1077
1065 mutex_unlock(&ctx->mixer_mutex); 1078 mutex_unlock(&ctx->mixer_mutex);
1066 1079
1067 pm_runtime_get_sync(ctx->dev); 1080 pm_runtime_get_sync(ctx->dev);
@@ -1072,6 +1085,12 @@ static void mixer_poweron(struct exynos_drm_manager *mgr)
1072 clk_prepare_enable(res->sclk_mixer); 1085 clk_prepare_enable(res->sclk_mixer);
1073 } 1086 }
1074 1087
1088 mutex_lock(&ctx->mixer_mutex);
1089 ctx->powered = true;
1090 mutex_unlock(&ctx->mixer_mutex);
1091
1092 mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
1093
1075 mixer_reg_write(res, MXR_INT_EN, ctx->int_en); 1094 mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
1076 mixer_win_reset(ctx); 1095 mixer_win_reset(ctx);
1077 1096
@@ -1084,14 +1103,21 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
1084 struct mixer_resources *res = &ctx->mixer_res; 1103 struct mixer_resources *res = &ctx->mixer_res;
1085 1104
1086 mutex_lock(&ctx->mixer_mutex); 1105 mutex_lock(&ctx->mixer_mutex);
1087 if (!ctx->powered) 1106 if (!ctx->powered) {
1088 goto out; 1107 mutex_unlock(&ctx->mixer_mutex);
1108 return;
1109 }
1089 mutex_unlock(&ctx->mixer_mutex); 1110 mutex_unlock(&ctx->mixer_mutex);
1090 1111
1112 mixer_stop(ctx);
1091 mixer_window_suspend(mgr); 1113 mixer_window_suspend(mgr);
1092 1114
1093 ctx->int_en = mixer_reg_read(res, MXR_INT_EN); 1115 ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
1094 1116
1117 mutex_lock(&ctx->mixer_mutex);
1118 ctx->powered = false;
1119 mutex_unlock(&ctx->mixer_mutex);
1120
1095 clk_disable_unprepare(res->mixer); 1121 clk_disable_unprepare(res->mixer);
1096 if (ctx->vp_enabled) { 1122 if (ctx->vp_enabled) {
1097 clk_disable_unprepare(res->vp); 1123 clk_disable_unprepare(res->vp);
@@ -1099,12 +1125,6 @@ static void mixer_poweroff(struct exynos_drm_manager *mgr)
1099 } 1125 }
1100 1126
1101 pm_runtime_put_sync(ctx->dev); 1127 pm_runtime_put_sync(ctx->dev);
1102
1103 mutex_lock(&ctx->mixer_mutex);
1104 ctx->powered = false;
1105
1106out:
1107 mutex_unlock(&ctx->mixer_mutex);
1108} 1128}
1109 1129
1110static void mixer_dpms(struct exynos_drm_manager *mgr, int mode) 1130static void mixer_dpms(struct exynos_drm_manager *mgr, int mode)
diff --git a/drivers/gpu/drm/exynos/regs-mixer.h b/drivers/gpu/drm/exynos/regs-mixer.h
index 4537026bc385..5f32e1a29411 100644
--- a/drivers/gpu/drm/exynos/regs-mixer.h
+++ b/drivers/gpu/drm/exynos/regs-mixer.h
@@ -78,6 +78,7 @@
78#define MXR_STATUS_BIG_ENDIAN (1 << 3) 78#define MXR_STATUS_BIG_ENDIAN (1 << 3)
79#define MXR_STATUS_ENDIAN_MASK (1 << 3) 79#define MXR_STATUS_ENDIAN_MASK (1 << 3)
80#define MXR_STATUS_SYNC_ENABLE (1 << 2) 80#define MXR_STATUS_SYNC_ENABLE (1 << 2)
81#define MXR_STATUS_REG_IDLE (1 << 1)
81#define MXR_STATUS_REG_RUN (1 << 0) 82#define MXR_STATUS_REG_RUN (1 << 0)
82 83
83/* bits for MXR_CFG */ 84/* bits for MXR_CFG */
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index 240c331405b9..ac357b02bd35 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -810,6 +810,12 @@ static int
810tda998x_encoder_mode_valid(struct drm_encoder *encoder, 810tda998x_encoder_mode_valid(struct drm_encoder *encoder,
811 struct drm_display_mode *mode) 811 struct drm_display_mode *mode)
812{ 812{
813 if (mode->clock > 150000)
814 return MODE_CLOCK_HIGH;
815 if (mode->htotal >= BIT(13))
816 return MODE_BAD_HVALUE;
817 if (mode->vtotal >= BIT(11))
818 return MODE_BAD_VVALUE;
813 return MODE_OK; 819 return MODE_OK;
814} 820}
815 821
@@ -1048,8 +1054,8 @@ read_edid_block(struct drm_encoder *encoder, uint8_t *buf, int blk)
1048 return i; 1054 return i;
1049 } 1055 }
1050 } else { 1056 } else {
1051 for (i = 10; i > 0; i--) { 1057 for (i = 100; i > 0; i--) {
1052 msleep(10); 1058 msleep(1);
1053 ret = reg_read(priv, REG_INT_FLAGS_2); 1059 ret = reg_read(priv, REG_INT_FLAGS_2);
1054 if (ret < 0) 1060 if (ret < 0)
1055 return ret; 1061 return ret;
@@ -1183,7 +1189,6 @@ static void
1183tda998x_encoder_destroy(struct drm_encoder *encoder) 1189tda998x_encoder_destroy(struct drm_encoder *encoder)
1184{ 1190{
1185 struct tda998x_priv *priv = to_tda998x_priv(encoder); 1191 struct tda998x_priv *priv = to_tda998x_priv(encoder);
1186 drm_i2c_encoder_destroy(encoder);
1187 1192
1188 /* disable all IRQs and free the IRQ handler */ 1193 /* disable all IRQs and free the IRQ handler */
1189 cec_write(priv, REG_CEC_RXSHPDINTENA, 0); 1194 cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
@@ -1193,6 +1198,7 @@ tda998x_encoder_destroy(struct drm_encoder *encoder)
1193 1198
1194 if (priv->cec) 1199 if (priv->cec)
1195 i2c_unregister_device(priv->cec); 1200 i2c_unregister_device(priv->cec);
1201 drm_i2c_encoder_destroy(encoder);
1196 kfree(priv); 1202 kfree(priv);
1197} 1203}
1198 1204
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 601caa88c092..b8c689202c40 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -446,7 +446,9 @@ static int i915_gem_object_info(struct seq_file *m, void* data)
446 446
447 memset(&stats, 0, sizeof(stats)); 447 memset(&stats, 0, sizeof(stats));
448 stats.file_priv = file->driver_priv; 448 stats.file_priv = file->driver_priv;
449 spin_lock(&file->table_lock);
449 idr_for_each(&file->object_idr, per_file_stats, &stats); 450 idr_for_each(&file->object_idr, per_file_stats, &stats);
451 spin_unlock(&file->table_lock);
450 /* 452 /*
451 * Although we have a valid reference on file->pid, that does 453 * Although we have a valid reference on file->pid, that does
452 * not guarantee that the task_struct who called get_pid() is 454 * not guarantee that the task_struct who called get_pid() is
diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
index 6c656392d67d..d44344140627 100644
--- a/drivers/gpu/drm/i915/i915_dma.c
+++ b/drivers/gpu/drm/i915/i915_dma.c
@@ -1464,12 +1464,13 @@ static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
1464#else 1464#else
1465static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv) 1465static int i915_kick_out_vgacon(struct drm_i915_private *dev_priv)
1466{ 1466{
1467 int ret; 1467 int ret = 0;
1468 1468
1469 DRM_INFO("Replacing VGA console driver\n"); 1469 DRM_INFO("Replacing VGA console driver\n");
1470 1470
1471 console_lock(); 1471 console_lock();
1472 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1); 1472 if (con_is_bound(&vga_con))
1473 ret = do_take_over_console(&dummy_con, 0, MAX_NR_CONSOLES - 1, 1);
1473 if (ret == 0) { 1474 if (ret == 0) {
1474 ret = do_unregister_con_driver(&vga_con); 1475 ret = do_unregister_con_driver(&vga_con);
1475 1476
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 49414d30e8d4..374f964323ad 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -656,6 +656,7 @@ enum intel_sbi_destination {
656#define QUIRK_PIPEA_FORCE (1<<0) 656#define QUIRK_PIPEA_FORCE (1<<0)
657#define QUIRK_LVDS_SSC_DISABLE (1<<1) 657#define QUIRK_LVDS_SSC_DISABLE (1<<1)
658#define QUIRK_INVERT_BRIGHTNESS (1<<2) 658#define QUIRK_INVERT_BRIGHTNESS (1<<2)
659#define QUIRK_BACKLIGHT_PRESENT (1<<3)
659 660
660struct intel_fbdev; 661struct intel_fbdev;
661struct intel_fbc_work; 662struct intel_fbc_work;
@@ -977,6 +978,8 @@ struct i915_power_well {
977 bool always_on; 978 bool always_on;
978 /* power well enable/disable usage count */ 979 /* power well enable/disable usage count */
979 int count; 980 int count;
981 /* cached hw enabled state */
982 bool hw_enabled;
980 unsigned long domains; 983 unsigned long domains;
981 unsigned long data; 984 unsigned long data;
982 const struct i915_power_well_ops *ops; 985 const struct i915_power_well_ops *ops;
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 3ffe308d5893..a5ddf3bce9c3 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -598,6 +598,7 @@ static int do_switch(struct intel_engine_cs *ring,
598 struct intel_context *from = ring->last_context; 598 struct intel_context *from = ring->last_context;
599 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to); 599 struct i915_hw_ppgtt *ppgtt = ctx_to_ppgtt(to);
600 u32 hw_flags = 0; 600 u32 hw_flags = 0;
601 bool uninitialized = false;
601 int ret, i; 602 int ret, i;
602 603
603 if (from != NULL && ring == &dev_priv->ring[RCS]) { 604 if (from != NULL && ring == &dev_priv->ring[RCS]) {
@@ -696,19 +697,20 @@ static int do_switch(struct intel_engine_cs *ring,
696 i915_gem_context_unreference(from); 697 i915_gem_context_unreference(from);
697 } 698 }
698 699
700 uninitialized = !to->is_initialized && from == NULL;
701 to->is_initialized = true;
702
699done: 703done:
700 i915_gem_context_reference(to); 704 i915_gem_context_reference(to);
701 ring->last_context = to; 705 ring->last_context = to;
702 to->last_ring = ring; 706 to->last_ring = ring;
703 707
704 if (ring->id == RCS && !to->is_initialized && from == NULL) { 708 if (uninitialized) {
705 ret = i915_gem_render_state_init(ring); 709 ret = i915_gem_render_state_init(ring);
706 if (ret) 710 if (ret)
707 DRM_ERROR("init render state: %d\n", ret); 711 DRM_ERROR("init render state: %d\n", ret);
708 } 712 }
709 713
710 to->is_initialized = true;
711
712 return 0; 714 return 0;
713 715
714unpin_out: 716unpin_out:
diff --git a/drivers/gpu/drm/i915/i915_gem_stolen.c b/drivers/gpu/drm/i915/i915_gem_stolen.c
index 62ef55ba061c..7465ab0fd396 100644
--- a/drivers/gpu/drm/i915/i915_gem_stolen.c
+++ b/drivers/gpu/drm/i915/i915_gem_stolen.c
@@ -74,6 +74,50 @@ static unsigned long i915_stolen_to_physical(struct drm_device *dev)
74 if (base == 0) 74 if (base == 0)
75 return 0; 75 return 0;
76 76
77 /* make sure we don't clobber the GTT if it's within stolen memory */
78 if (INTEL_INFO(dev)->gen <= 4 && !IS_G33(dev) && !IS_G4X(dev)) {
79 struct {
80 u32 start, end;
81 } stolen[2] = {
82 { .start = base, .end = base + dev_priv->gtt.stolen_size, },
83 { .start = base, .end = base + dev_priv->gtt.stolen_size, },
84 };
85 u64 gtt_start, gtt_end;
86
87 gtt_start = I915_READ(PGTBL_CTL);
88 if (IS_GEN4(dev))
89 gtt_start = (gtt_start & PGTBL_ADDRESS_LO_MASK) |
90 (gtt_start & PGTBL_ADDRESS_HI_MASK) << 28;
91 else
92 gtt_start &= PGTBL_ADDRESS_LO_MASK;
93 gtt_end = gtt_start + gtt_total_entries(dev_priv->gtt) * 4;
94
95 if (gtt_start >= stolen[0].start && gtt_start < stolen[0].end)
96 stolen[0].end = gtt_start;
97 if (gtt_end > stolen[1].start && gtt_end <= stolen[1].end)
98 stolen[1].start = gtt_end;
99
100 /* pick the larger of the two chunks */
101 if (stolen[0].end - stolen[0].start >
102 stolen[1].end - stolen[1].start) {
103 base = stolen[0].start;
104 dev_priv->gtt.stolen_size = stolen[0].end - stolen[0].start;
105 } else {
106 base = stolen[1].start;
107 dev_priv->gtt.stolen_size = stolen[1].end - stolen[1].start;
108 }
109
110 if (stolen[0].start != stolen[1].start ||
111 stolen[0].end != stolen[1].end) {
112 DRM_DEBUG_KMS("GTT within stolen memory at 0x%llx-0x%llx\n",
113 (unsigned long long) gtt_start,
114 (unsigned long long) gtt_end - 1);
115 DRM_DEBUG_KMS("Stolen memory adjusted to 0x%x-0x%x\n",
116 base, base + (u32) dev_priv->gtt.stolen_size - 1);
117 }
118 }
119
120
77 /* Verify that nothing else uses this physical address. Stolen 121 /* Verify that nothing else uses this physical address. Stolen
78 * memory should be reserved by the BIOS and hidden from the 122 * memory should be reserved by the BIOS and hidden from the
79 * kernel. So if the region is already marked as busy, something 123 * kernel. So if the region is already marked as busy, something
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index e691b30b2817..a5bab61bfc00 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -942,6 +942,9 @@ enum punit_power_well {
942/* 942/*
943 * Instruction and interrupt control regs 943 * Instruction and interrupt control regs
944 */ 944 */
945#define PGTBL_CTL 0x02020
946#define PGTBL_ADDRESS_LO_MASK 0xfffff000 /* bits [31:12] */
947#define PGTBL_ADDRESS_HI_MASK 0x000000f0 /* bits [35:32] (gen4) */
945#define PGTBL_ER 0x02024 948#define PGTBL_ER 0x02024
946#define RENDER_RING_BASE 0x02000 949#define RENDER_RING_BASE 0x02000
947#define BSD_RING_BASE 0x04000 950#define BSD_RING_BASE 0x04000
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 1ee98f121a00..827498e081df 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -315,9 +315,6 @@ parse_lfp_backlight(struct drm_i915_private *dev_priv, struct bdb_header *bdb)
315 const struct bdb_lfp_backlight_data *backlight_data; 315 const struct bdb_lfp_backlight_data *backlight_data;
316 const struct bdb_lfp_backlight_data_entry *entry; 316 const struct bdb_lfp_backlight_data_entry *entry;
317 317
318 /* Err to enabling backlight if no backlight block. */
319 dev_priv->vbt.backlight.present = true;
320
321 backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT); 318 backlight_data = find_section(bdb, BDB_LVDS_BACKLIGHT);
322 if (!backlight_data) 319 if (!backlight_data)
323 return; 320 return;
@@ -1088,6 +1085,9 @@ init_vbt_defaults(struct drm_i915_private *dev_priv)
1088 1085
1089 dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC; 1086 dev_priv->vbt.crt_ddc_pin = GMBUS_PORT_VGADDC;
1090 1087
1088 /* Default to having backlight */
1089 dev_priv->vbt.backlight.present = true;
1090
1091 /* LFP panel data */ 1091 /* LFP panel data */
1092 dev_priv->vbt.lvds_dither = 1; 1092 dev_priv->vbt.lvds_dither = 1;
1093 dev_priv->vbt.lvds_vbt = 0; 1093 dev_priv->vbt.lvds_vbt = 0;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index efd3cf50cb0f..f0be855ddf45 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2087,6 +2087,7 @@ void intel_flush_primary_plane(struct drm_i915_private *dev_priv,
2087static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv, 2087static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
2088 enum plane plane, enum pipe pipe) 2088 enum plane plane, enum pipe pipe)
2089{ 2089{
2090 struct drm_device *dev = dev_priv->dev;
2090 struct intel_crtc *intel_crtc = 2091 struct intel_crtc *intel_crtc =
2091 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]); 2092 to_intel_crtc(dev_priv->pipe_to_crtc_mapping[pipe]);
2092 int reg; 2093 int reg;
@@ -2106,6 +2107,14 @@ static void intel_enable_primary_hw_plane(struct drm_i915_private *dev_priv,
2106 2107
2107 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE); 2108 I915_WRITE(reg, val | DISPLAY_PLANE_ENABLE);
2108 intel_flush_primary_plane(dev_priv, plane); 2109 intel_flush_primary_plane(dev_priv, plane);
2110
2111 /*
2112 * BDW signals flip done immediately if the plane
2113 * is disabled, even if the plane enable is already
2114 * armed to occur at the next vblank :(
2115 */
2116 if (IS_BROADWELL(dev))
2117 intel_wait_for_vblank(dev, intel_crtc->pipe);
2109} 2118}
2110 2119
2111/** 2120/**
@@ -4564,7 +4573,10 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
4564 if (intel_crtc->active) 4573 if (intel_crtc->active)
4565 return; 4574 return;
4566 4575
4567 vlv_prepare_pll(intel_crtc); 4576 is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
4577
4578 if (!is_dsi && !IS_CHERRYVIEW(dev))
4579 vlv_prepare_pll(intel_crtc);
4568 4580
4569 /* Set up the display plane register */ 4581 /* Set up the display plane register */
4570 dspcntr = DISPPLANE_GAMMA_ENABLE; 4582 dspcntr = DISPPLANE_GAMMA_ENABLE;
@@ -4598,8 +4610,6 @@ static void valleyview_crtc_enable(struct drm_crtc *crtc)
4598 if (encoder->pre_pll_enable) 4610 if (encoder->pre_pll_enable)
4599 encoder->pre_pll_enable(encoder); 4611 encoder->pre_pll_enable(encoder);
4600 4612
4601 is_dsi = intel_pipe_has_type(crtc, INTEL_OUTPUT_DSI);
4602
4603 if (!is_dsi) { 4613 if (!is_dsi) {
4604 if (IS_CHERRYVIEW(dev)) 4614 if (IS_CHERRYVIEW(dev))
4605 chv_enable_pll(intel_crtc); 4615 chv_enable_pll(intel_crtc);
@@ -11087,6 +11097,22 @@ const char *intel_output_name(int output)
11087 return names[output]; 11097 return names[output];
11088} 11098}
11089 11099
11100static bool intel_crt_present(struct drm_device *dev)
11101{
11102 struct drm_i915_private *dev_priv = dev->dev_private;
11103
11104 if (IS_ULT(dev))
11105 return false;
11106
11107 if (IS_CHERRYVIEW(dev))
11108 return false;
11109
11110 if (IS_VALLEYVIEW(dev) && !dev_priv->vbt.int_crt_support)
11111 return false;
11112
11113 return true;
11114}
11115
11090static void intel_setup_outputs(struct drm_device *dev) 11116static void intel_setup_outputs(struct drm_device *dev)
11091{ 11117{
11092 struct drm_i915_private *dev_priv = dev->dev_private; 11118 struct drm_i915_private *dev_priv = dev->dev_private;
@@ -11095,7 +11121,7 @@ static void intel_setup_outputs(struct drm_device *dev)
11095 11121
11096 intel_lvds_init(dev); 11122 intel_lvds_init(dev);
11097 11123
11098 if (!IS_ULT(dev) && !IS_CHERRYVIEW(dev) && dev_priv->vbt.int_crt_support) 11124 if (intel_crt_present(dev))
11099 intel_crt_init(dev); 11125 intel_crt_init(dev);
11100 11126
11101 if (HAS_DDI(dev)) { 11127 if (HAS_DDI(dev)) {
@@ -11565,6 +11591,14 @@ static void quirk_invert_brightness(struct drm_device *dev)
11565 DRM_INFO("applying inverted panel brightness quirk\n"); 11591 DRM_INFO("applying inverted panel brightness quirk\n");
11566} 11592}
11567 11593
11594/* Some VBT's incorrectly indicate no backlight is present */
11595static void quirk_backlight_present(struct drm_device *dev)
11596{
11597 struct drm_i915_private *dev_priv = dev->dev_private;
11598 dev_priv->quirks |= QUIRK_BACKLIGHT_PRESENT;
11599 DRM_INFO("applying backlight present quirk\n");
11600}
11601
11568struct intel_quirk { 11602struct intel_quirk {
11569 int device; 11603 int device;
11570 int subsystem_vendor; 11604 int subsystem_vendor;
@@ -11633,6 +11667,15 @@ static struct intel_quirk intel_quirks[] = {
11633 11667
11634 /* Acer Aspire 5336 */ 11668 /* Acer Aspire 5336 */
11635 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness }, 11669 { 0x2a42, 0x1025, 0x048a, quirk_invert_brightness },
11670
11671 /* Acer C720 and C720P Chromebooks (Celeron 2955U) have backlights */
11672 { 0x0a06, 0x1025, 0x0a11, quirk_backlight_present },
11673
11674 /* Toshiba CB35 Chromebook (Celeron 2955U) */
11675 { 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
11676
11677 /* HP Chromebook 14 (Celeron 2955U) */
11678 { 0x0a06, 0x103c, 0x21ed, quirk_backlight_present },
11636}; 11679};
11637 11680
11638static void intel_init_quirks(struct drm_device *dev) 11681static void intel_init_quirks(struct drm_device *dev)
@@ -11871,6 +11914,7 @@ static void intel_sanitize_crtc(struct intel_crtc *crtc)
11871 * ... */ 11914 * ... */
11872 plane = crtc->plane; 11915 plane = crtc->plane;
11873 crtc->plane = !plane; 11916 crtc->plane = !plane;
11917 crtc->primary_enabled = true;
11874 dev_priv->display.crtc_disable(&crtc->base); 11918 dev_priv->display.crtc_disable(&crtc->base);
11875 crtc->plane = plane; 11919 crtc->plane = plane;
11876 11920
@@ -12411,8 +12455,8 @@ intel_display_capture_error_state(struct drm_device *dev)
12411 12455
12412 for_each_pipe(i) { 12456 for_each_pipe(i) {
12413 error->pipe[i].power_domain_on = 12457 error->pipe[i].power_domain_on =
12414 intel_display_power_enabled_sw(dev_priv, 12458 intel_display_power_enabled_unlocked(dev_priv,
12415 POWER_DOMAIN_PIPE(i)); 12459 POWER_DOMAIN_PIPE(i));
12416 if (!error->pipe[i].power_domain_on) 12460 if (!error->pipe[i].power_domain_on)
12417 continue; 12461 continue;
12418 12462
@@ -12447,7 +12491,7 @@ intel_display_capture_error_state(struct drm_device *dev)
12447 enum transcoder cpu_transcoder = transcoders[i]; 12491 enum transcoder cpu_transcoder = transcoders[i];
12448 12492
12449 error->transcoder[i].power_domain_on = 12493 error->transcoder[i].power_domain_on =
12450 intel_display_power_enabled_sw(dev_priv, 12494 intel_display_power_enabled_unlocked(dev_priv,
12451 POWER_DOMAIN_TRANSCODER(cpu_transcoder)); 12495 POWER_DOMAIN_TRANSCODER(cpu_transcoder));
12452 if (!error->transcoder[i].power_domain_on) 12496 if (!error->transcoder[i].power_domain_on)
12453 continue; 12497 continue;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 52fda950fd2a..8a1a4fbc06ac 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -28,6 +28,8 @@
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/slab.h> 29#include <linux/slab.h>
30#include <linux/export.h> 30#include <linux/export.h>
31#include <linux/notifier.h>
32#include <linux/reboot.h>
31#include <drm/drmP.h> 33#include <drm/drmP.h>
32#include <drm/drm_crtc.h> 34#include <drm/drm_crtc.h>
33#include <drm/drm_crtc_helper.h> 35#include <drm/drm_crtc_helper.h>
@@ -336,6 +338,37 @@ static u32 _pp_stat_reg(struct intel_dp *intel_dp)
336 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp)); 338 return VLV_PIPE_PP_STATUS(vlv_power_sequencer_pipe(intel_dp));
337} 339}
338 340
341/* Reboot notifier handler to shutdown panel power to guarantee T12 timing
342 This function only applicable when panel PM state is not to be tracked */
343static int edp_notify_handler(struct notifier_block *this, unsigned long code,
344 void *unused)
345{
346 struct intel_dp *intel_dp = container_of(this, typeof(* intel_dp),
347 edp_notifier);
348 struct drm_device *dev = intel_dp_to_dev(intel_dp);
349 struct drm_i915_private *dev_priv = dev->dev_private;
350 u32 pp_div;
351 u32 pp_ctrl_reg, pp_div_reg;
352 enum pipe pipe = vlv_power_sequencer_pipe(intel_dp);
353
354 if (!is_edp(intel_dp) || code != SYS_RESTART)
355 return 0;
356
357 if (IS_VALLEYVIEW(dev)) {
358 pp_ctrl_reg = VLV_PIPE_PP_CONTROL(pipe);
359 pp_div_reg = VLV_PIPE_PP_DIVISOR(pipe);
360 pp_div = I915_READ(pp_div_reg);
361 pp_div &= PP_REFERENCE_DIVIDER_MASK;
362
363 /* 0x1F write to PP_DIV_REG sets max cycle delay */
364 I915_WRITE(pp_div_reg, pp_div | 0x1F);
365 I915_WRITE(pp_ctrl_reg, PANEL_UNLOCK_REGS | PANEL_POWER_OFF);
366 msleep(intel_dp->panel_power_cycle_delay);
367 }
368
369 return 0;
370}
371
339static bool edp_have_panel_power(struct intel_dp *intel_dp) 372static bool edp_have_panel_power(struct intel_dp *intel_dp)
340{ 373{
341 struct drm_device *dev = intel_dp_to_dev(intel_dp); 374 struct drm_device *dev = intel_dp_to_dev(intel_dp);
@@ -873,8 +906,8 @@ intel_dp_compute_config(struct intel_encoder *encoder,
873 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock, 906 mode_rate = intel_dp_link_required(adjusted_mode->crtc_clock,
874 bpp); 907 bpp);
875 908
876 for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) { 909 for (clock = min_clock; clock <= max_clock; clock++) {
877 for (clock = min_clock; clock <= max_clock; clock++) { 910 for (lane_count = min_lane_count; lane_count <= max_lane_count; lane_count <<= 1) {
878 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]); 911 link_clock = drm_dp_bw_code_to_link_rate(bws[clock]);
879 link_avail = intel_dp_max_data_rate(link_clock, 912 link_avail = intel_dp_max_data_rate(link_clock,
880 lane_count); 913 lane_count);
@@ -3707,6 +3740,10 @@ void intel_dp_encoder_destroy(struct drm_encoder *encoder)
3707 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL); 3740 drm_modeset_lock(&dev->mode_config.connection_mutex, NULL);
3708 edp_panel_vdd_off_sync(intel_dp); 3741 edp_panel_vdd_off_sync(intel_dp);
3709 drm_modeset_unlock(&dev->mode_config.connection_mutex); 3742 drm_modeset_unlock(&dev->mode_config.connection_mutex);
3743 if (intel_dp->edp_notifier.notifier_call) {
3744 unregister_reboot_notifier(&intel_dp->edp_notifier);
3745 intel_dp->edp_notifier.notifier_call = NULL;
3746 }
3710 } 3747 }
3711 kfree(intel_dig_port); 3748 kfree(intel_dig_port);
3712} 3749}
@@ -4184,6 +4221,11 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp,
4184 } 4221 }
4185 mutex_unlock(&dev->mode_config.mutex); 4222 mutex_unlock(&dev->mode_config.mutex);
4186 4223
4224 if (IS_VALLEYVIEW(dev)) {
4225 intel_dp->edp_notifier.notifier_call = edp_notify_handler;
4226 register_reboot_notifier(&intel_dp->edp_notifier);
4227 }
4228
4187 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode); 4229 intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
4188 intel_panel_setup_backlight(connector); 4230 intel_panel_setup_backlight(connector);
4189 4231
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index bda0ae3d80cc..f67340ed2c12 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -538,6 +538,8 @@ struct intel_dp {
538 unsigned long last_power_on; 538 unsigned long last_power_on;
539 unsigned long last_backlight_off; 539 unsigned long last_backlight_off;
540 bool psr_setup_done; 540 bool psr_setup_done;
541 struct notifier_block edp_notifier;
542
541 bool use_tps3; 543 bool use_tps3;
542 struct intel_connector *attached_connector; 544 struct intel_connector *attached_connector;
543 545
@@ -950,8 +952,8 @@ int intel_power_domains_init(struct drm_i915_private *);
950void intel_power_domains_remove(struct drm_i915_private *); 952void intel_power_domains_remove(struct drm_i915_private *);
951bool intel_display_power_enabled(struct drm_i915_private *dev_priv, 953bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
952 enum intel_display_power_domain domain); 954 enum intel_display_power_domain domain);
953bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv, 955bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
954 enum intel_display_power_domain domain); 956 enum intel_display_power_domain domain);
955void intel_display_power_get(struct drm_i915_private *dev_priv, 957void intel_display_power_get(struct drm_i915_private *dev_priv,
956 enum intel_display_power_domain domain); 958 enum intel_display_power_domain domain);
957void intel_display_power_put(struct drm_i915_private *dev_priv, 959void intel_display_power_put(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 02f99d768d49..3fd082933c87 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -117,17 +117,18 @@ static void intel_dsi_device_ready(struct intel_encoder *encoder)
117 /* bandgap reset is needed after everytime we do power gate */ 117 /* bandgap reset is needed after everytime we do power gate */
118 band_gap_reset(dev_priv); 118 band_gap_reset(dev_priv);
119 119
120 I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER);
121 usleep_range(2500, 3000);
122
120 val = I915_READ(MIPI_PORT_CTRL(pipe)); 123 val = I915_READ(MIPI_PORT_CTRL(pipe));
121 I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD); 124 I915_WRITE(MIPI_PORT_CTRL(pipe), val | LP_OUTPUT_HOLD);
122 usleep_range(1000, 1500); 125 usleep_range(1000, 1500);
123 I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT); 126
124 usleep_range(2000, 2500); 127 I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT);
125 I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY); 128 usleep_range(2500, 3000);
126 usleep_range(2000, 2500); 129
127 I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
128 usleep_range(2000, 2500);
129 I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY); 130 I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY);
130 usleep_range(2000, 2500); 131 usleep_range(2500, 3000);
131} 132}
132 133
133static void intel_dsi_enable(struct intel_encoder *encoder) 134static void intel_dsi_enable(struct intel_encoder *encoder)
@@ -271,23 +272,23 @@ static void intel_dsi_clear_device_ready(struct intel_encoder *encoder)
271 272
272 DRM_DEBUG_KMS("\n"); 273 DRM_DEBUG_KMS("\n");
273 274
274 I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER); 275 I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
275 usleep_range(2000, 2500); 276 usleep_range(2000, 2500);
276 277
277 I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_EXIT); 278 I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_EXIT);
278 usleep_range(2000, 2500); 279 usleep_range(2000, 2500);
279 280
280 I915_WRITE(MIPI_DEVICE_READY(pipe), ULPS_STATE_ENTER); 281 I915_WRITE(MIPI_DEVICE_READY(pipe), DEVICE_READY | ULPS_STATE_ENTER);
281 usleep_range(2000, 2500); 282 usleep_range(2000, 2500);
282 283
283 val = I915_READ(MIPI_PORT_CTRL(pipe));
284 I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
285 usleep_range(1000, 1500);
286
287 if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT) 284 if (wait_for(((I915_READ(MIPI_PORT_CTRL(pipe)) & AFE_LATCHOUT)
288 == 0x00000), 30)) 285 == 0x00000), 30))
289 DRM_ERROR("DSI LP not going Low\n"); 286 DRM_ERROR("DSI LP not going Low\n");
290 287
288 val = I915_READ(MIPI_PORT_CTRL(pipe));
289 I915_WRITE(MIPI_PORT_CTRL(pipe), val & ~LP_OUTPUT_HOLD);
290 usleep_range(1000, 1500);
291
291 I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00); 292 I915_WRITE(MIPI_DEVICE_READY(pipe), 0x00);
292 usleep_range(2000, 2500); 293 usleep_range(2000, 2500);
293 294
diff --git a/drivers/gpu/drm/i915/intel_dsi_cmd.c b/drivers/gpu/drm/i915/intel_dsi_cmd.c
index 3eeb21b9fddf..933c86305237 100644
--- a/drivers/gpu/drm/i915/intel_dsi_cmd.c
+++ b/drivers/gpu/drm/i915/intel_dsi_cmd.c
@@ -404,12 +404,6 @@ int dpi_send_cmd(struct intel_dsi *intel_dsi, u32 cmd, bool hs)
404 else 404 else
405 cmd |= DPI_LP_MODE; 405 cmd |= DPI_LP_MODE;
406 406
407 /* DPI virtual channel?! */
408
409 mask = DPI_FIFO_EMPTY;
410 if (wait_for((I915_READ(MIPI_GEN_FIFO_STAT(pipe)) & mask) == mask, 50))
411 DRM_ERROR("Timeout waiting for DPI FIFO empty.\n");
412
413 /* clear bit */ 407 /* clear bit */
414 I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT); 408 I915_WRITE(MIPI_INTR_STAT(pipe), SPL_PKT_SENT_INTERRUPT);
415 409
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 23126023aeba..5e5a72fca5fb 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -111,6 +111,13 @@ static void intel_lvds_get_config(struct intel_encoder *encoder,
111 111
112 pipe_config->adjusted_mode.flags |= flags; 112 pipe_config->adjusted_mode.flags |= flags;
113 113
114 /* gen2/3 store dither state in pfit control, needs to match */
115 if (INTEL_INFO(dev)->gen < 4) {
116 tmp = I915_READ(PFIT_CONTROL);
117
118 pipe_config->gmch_pfit.control |= tmp & PANEL_8TO6_DITHER_ENABLE;
119 }
120
114 dotclock = pipe_config->port_clock; 121 dotclock = pipe_config->port_clock;
115 122
116 if (HAS_PCH_SPLIT(dev_priv->dev)) 123 if (HAS_PCH_SPLIT(dev_priv->dev))
diff --git a/drivers/gpu/drm/i915/intel_opregion.c b/drivers/gpu/drm/i915/intel_opregion.c
index 2e2c71fcc9ed..4f6b53998d79 100644
--- a/drivers/gpu/drm/i915/intel_opregion.c
+++ b/drivers/gpu/drm/i915/intel_opregion.c
@@ -403,6 +403,15 @@ static u32 asle_set_backlight(struct drm_device *dev, u32 bclp)
403 403
404 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp); 404 DRM_DEBUG_DRIVER("bclp = 0x%08x\n", bclp);
405 405
406 /*
407 * If the acpi_video interface is not supposed to be used, don't
408 * bother processing backlight level change requests from firmware.
409 */
410 if (!acpi_video_verify_backlight_support()) {
411 DRM_DEBUG_KMS("opregion backlight request ignored\n");
412 return 0;
413 }
414
406 if (!(bclp & ASLE_BCLP_VALID)) 415 if (!(bclp & ASLE_BCLP_VALID))
407 return ASLC_BACKLIGHT_FAILED; 416 return ASLC_BACKLIGHT_FAILED;
408 417
diff --git a/drivers/gpu/drm/i915/intel_panel.c b/drivers/gpu/drm/i915/intel_panel.c
index 38a98570d10c..12b02fe1d0ae 100644
--- a/drivers/gpu/drm/i915/intel_panel.c
+++ b/drivers/gpu/drm/i915/intel_panel.c
@@ -361,16 +361,16 @@ void intel_gmch_panel_fitting(struct intel_crtc *intel_crtc,
361 pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) | 361 pfit_control |= ((intel_crtc->pipe << PFIT_PIPE_SHIFT) |
362 PFIT_FILTER_FUZZY); 362 PFIT_FILTER_FUZZY);
363 363
364 /* Make sure pre-965 set dither correctly for 18bpp panels. */
365 if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
366 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
367
368out: 364out:
369 if ((pfit_control & PFIT_ENABLE) == 0) { 365 if ((pfit_control & PFIT_ENABLE) == 0) {
370 pfit_control = 0; 366 pfit_control = 0;
371 pfit_pgm_ratios = 0; 367 pfit_pgm_ratios = 0;
372 } 368 }
373 369
370 /* Make sure pre-965 set dither correctly for 18bpp panels. */
371 if (INTEL_INFO(dev)->gen < 4 && pipe_config->pipe_bpp == 18)
372 pfit_control |= PANEL_8TO6_DITHER_ENABLE;
373
374 pipe_config->gmch_pfit.control = pfit_control; 374 pipe_config->gmch_pfit.control = pfit_control;
375 pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios; 375 pipe_config->gmch_pfit.pgm_ratios = pfit_pgm_ratios;
376 pipe_config->gmch_pfit.lvds_border_bits = border; 376 pipe_config->gmch_pfit.lvds_border_bits = border;
@@ -1118,8 +1118,12 @@ int intel_panel_setup_backlight(struct drm_connector *connector)
1118 int ret; 1118 int ret;
1119 1119
1120 if (!dev_priv->vbt.backlight.present) { 1120 if (!dev_priv->vbt.backlight.present) {
1121 DRM_DEBUG_KMS("native backlight control not available per VBT\n"); 1121 if (dev_priv->quirks & QUIRK_BACKLIGHT_PRESENT) {
1122 return 0; 1122 DRM_DEBUG_KMS("no backlight present per VBT, but present per quirk\n");
1123 } else {
1124 DRM_DEBUG_KMS("no backlight present per VBT\n");
1125 return 0;
1126 }
1123 } 1127 }
1124 1128
1125 /* set level and max in panel struct */ 1129 /* set level and max in panel struct */
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index 54242e4f6f4c..ee72807069e4 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -3209,6 +3209,14 @@ void gen6_set_rps(struct drm_device *dev, u8 val)
3209*/ 3209*/
3210static void vlv_set_rps_idle(struct drm_i915_private *dev_priv) 3210static void vlv_set_rps_idle(struct drm_i915_private *dev_priv)
3211{ 3211{
3212 struct drm_device *dev = dev_priv->dev;
3213
3214 /* Latest VLV doesn't need to force the gfx clock */
3215 if (dev->pdev->revision >= 0xd) {
3216 valleyview_set_rps(dev_priv->dev, dev_priv->rps.min_freq_softlimit);
3217 return;
3218 }
3219
3212 /* 3220 /*
3213 * When we are idle. Drop to min voltage state. 3221 * When we are idle. Drop to min voltage state.
3214 */ 3222 */
@@ -5603,8 +5611,8 @@ static bool hsw_power_well_enabled(struct drm_i915_private *dev_priv,
5603 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED); 5611 (HSW_PWR_WELL_ENABLE_REQUEST | HSW_PWR_WELL_STATE_ENABLED);
5604} 5612}
5605 5613
5606bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv, 5614bool intel_display_power_enabled_unlocked(struct drm_i915_private *dev_priv,
5607 enum intel_display_power_domain domain) 5615 enum intel_display_power_domain domain)
5608{ 5616{
5609 struct i915_power_domains *power_domains; 5617 struct i915_power_domains *power_domains;
5610 struct i915_power_well *power_well; 5618 struct i915_power_well *power_well;
@@ -5615,16 +5623,19 @@ bool intel_display_power_enabled_sw(struct drm_i915_private *dev_priv,
5615 return false; 5623 return false;
5616 5624
5617 power_domains = &dev_priv->power_domains; 5625 power_domains = &dev_priv->power_domains;
5626
5618 is_enabled = true; 5627 is_enabled = true;
5628
5619 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 5629 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) {
5620 if (power_well->always_on) 5630 if (power_well->always_on)
5621 continue; 5631 continue;
5622 5632
5623 if (!power_well->count) { 5633 if (!power_well->hw_enabled) {
5624 is_enabled = false; 5634 is_enabled = false;
5625 break; 5635 break;
5626 } 5636 }
5627 } 5637 }
5638
5628 return is_enabled; 5639 return is_enabled;
5629} 5640}
5630 5641
@@ -5632,30 +5643,15 @@ bool intel_display_power_enabled(struct drm_i915_private *dev_priv,
5632 enum intel_display_power_domain domain) 5643 enum intel_display_power_domain domain)
5633{ 5644{
5634 struct i915_power_domains *power_domains; 5645 struct i915_power_domains *power_domains;
5635 struct i915_power_well *power_well; 5646 bool ret;
5636 bool is_enabled;
5637 int i;
5638
5639 if (dev_priv->pm.suspended)
5640 return false;
5641 5647
5642 power_domains = &dev_priv->power_domains; 5648 power_domains = &dev_priv->power_domains;
5643 5649
5644 is_enabled = true;
5645
5646 mutex_lock(&power_domains->lock); 5650 mutex_lock(&power_domains->lock);
5647 for_each_power_well_rev(i, power_well, BIT(domain), power_domains) { 5651 ret = intel_display_power_enabled_unlocked(dev_priv, domain);
5648 if (power_well->always_on)
5649 continue;
5650
5651 if (!power_well->ops->is_enabled(dev_priv, power_well)) {
5652 is_enabled = false;
5653 break;
5654 }
5655 }
5656 mutex_unlock(&power_domains->lock); 5652 mutex_unlock(&power_domains->lock);
5657 5653
5658 return is_enabled; 5654 return ret;
5659} 5655}
5660 5656
5661/* 5657/*
@@ -5976,6 +5972,7 @@ void intel_display_power_get(struct drm_i915_private *dev_priv,
5976 if (!power_well->count++) { 5972 if (!power_well->count++) {
5977 DRM_DEBUG_KMS("enabling %s\n", power_well->name); 5973 DRM_DEBUG_KMS("enabling %s\n", power_well->name);
5978 power_well->ops->enable(dev_priv, power_well); 5974 power_well->ops->enable(dev_priv, power_well);
5975 power_well->hw_enabled = true;
5979 } 5976 }
5980 5977
5981 check_power_well_state(dev_priv, power_well); 5978 check_power_well_state(dev_priv, power_well);
@@ -6005,6 +6002,7 @@ void intel_display_power_put(struct drm_i915_private *dev_priv,
6005 6002
6006 if (!--power_well->count && i915.disable_power_well) { 6003 if (!--power_well->count && i915.disable_power_well) {
6007 DRM_DEBUG_KMS("disabling %s\n", power_well->name); 6004 DRM_DEBUG_KMS("disabling %s\n", power_well->name);
6005 power_well->hw_enabled = false;
6008 power_well->ops->disable(dev_priv, power_well); 6006 power_well->ops->disable(dev_priv, power_well);
6009 } 6007 }
6010 6008
@@ -6048,6 +6046,27 @@ int i915_release_power_well(void)
6048} 6046}
6049EXPORT_SYMBOL_GPL(i915_release_power_well); 6047EXPORT_SYMBOL_GPL(i915_release_power_well);
6050 6048
6049/*
6050 * Private interface for the audio driver to get CDCLK in kHz.
6051 *
6052 * Caller must request power well using i915_request_power_well() prior to
6053 * making the call.
6054 */
6055int i915_get_cdclk_freq(void)
6056{
6057 struct drm_i915_private *dev_priv;
6058
6059 if (!hsw_pwr)
6060 return -ENODEV;
6061
6062 dev_priv = container_of(hsw_pwr, struct drm_i915_private,
6063 power_domains);
6064
6065 return intel_ddi_get_cdclk_freq(dev_priv);
6066}
6067EXPORT_SYMBOL_GPL(i915_get_cdclk_freq);
6068
6069
6051#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1) 6070#define POWER_DOMAIN_MASK (BIT(POWER_DOMAIN_NUM) - 1)
6052 6071
6053#define HSW_ALWAYS_ON_POWER_DOMAINS ( \ 6072#define HSW_ALWAYS_ON_POWER_DOMAINS ( \
@@ -6267,8 +6286,11 @@ static void intel_power_domains_resume(struct drm_i915_private *dev_priv)
6267 int i; 6286 int i;
6268 6287
6269 mutex_lock(&power_domains->lock); 6288 mutex_lock(&power_domains->lock);
6270 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) 6289 for_each_power_well(i, power_well, POWER_DOMAIN_MASK, power_domains) {
6271 power_well->ops->sync_hw(dev_priv, power_well); 6290 power_well->ops->sync_hw(dev_priv, power_well);
6291 power_well->hw_enabled = power_well->ops->is_enabled(dev_priv,
6292 power_well);
6293 }
6272 mutex_unlock(&power_domains->lock); 6294 mutex_unlock(&power_domains->lock);
6273} 6295}
6274 6296
diff --git a/drivers/gpu/drm/i915/intel_sprite.c b/drivers/gpu/drm/i915/intel_sprite.c
index 1b66ddcdfb33..9a17b4e92ef4 100644
--- a/drivers/gpu/drm/i915/intel_sprite.c
+++ b/drivers/gpu/drm/i915/intel_sprite.c
@@ -691,6 +691,14 @@ intel_post_enable_primary(struct drm_crtc *crtc)
691 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 691 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
692 692
693 /* 693 /*
694 * BDW signals flip done immediately if the plane
695 * is disabled, even if the plane enable is already
696 * armed to occur at the next vblank :(
697 */
698 if (IS_BROADWELL(dev))
699 intel_wait_for_vblank(dev, intel_crtc->pipe);
700
701 /*
694 * FIXME IPS should be fine as long as one plane is 702 * FIXME IPS should be fine as long as one plane is
695 * enabled, but in practice it seems to have problems 703 * enabled, but in practice it seems to have problems
696 * when going from primary only to sprite only and vice 704 * when going from primary only to sprite only and vice
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index ae750f6928c1..7f7aadef8a82 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -277,6 +277,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
277 static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"}; 277 static const char *hpd_reg_names[] = {"hpd-gdsc", "hpd-5v"};
278 static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"}; 278 static const char *pwr_reg_names[] = {"core-vdda", "core-vcc"};
279 static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"}; 279 static const char *hpd_clk_names[] = {"iface_clk", "core_clk", "mdp_core_clk"};
280 static unsigned long hpd_clk_freq[] = {0, 19200000, 0};
280 static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"}; 281 static const char *pwr_clk_names[] = {"extp_clk", "alt_iface_clk"};
281 282
282 config.phy_init = hdmi_phy_8x74_init; 283 config.phy_init = hdmi_phy_8x74_init;
@@ -286,6 +287,7 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
286 config.pwr_reg_names = pwr_reg_names; 287 config.pwr_reg_names = pwr_reg_names;
287 config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names); 288 config.pwr_reg_cnt = ARRAY_SIZE(pwr_reg_names);
288 config.hpd_clk_names = hpd_clk_names; 289 config.hpd_clk_names = hpd_clk_names;
290 config.hpd_freq = hpd_clk_freq;
289 config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names); 291 config.hpd_clk_cnt = ARRAY_SIZE(hpd_clk_names);
290 config.pwr_clk_names = pwr_clk_names; 292 config.pwr_clk_names = pwr_clk_names;
291 config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names); 293 config.pwr_clk_cnt = ARRAY_SIZE(pwr_clk_names);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index 9fafee6a3e43..9d7723c6528a 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -87,6 +87,7 @@ struct hdmi_platform_config {
87 87
88 /* clks that need to be on for hpd: */ 88 /* clks that need to be on for hpd: */
89 const char **hpd_clk_names; 89 const char **hpd_clk_names;
90 const long unsigned *hpd_freq;
90 int hpd_clk_cnt; 91 int hpd_clk_cnt;
91 92
92 /* clks that need to be on for screen pwr (ie pixel clk): */ 93 /* clks that need to be on for screen pwr (ie pixel clk): */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
index e56a6196867c..28f7e3ec6c28 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_connector.c
@@ -127,6 +127,14 @@ static int hpd_enable(struct hdmi_connector *hdmi_connector)
127 } 127 }
128 128
129 for (i = 0; i < config->hpd_clk_cnt; i++) { 129 for (i = 0; i < config->hpd_clk_cnt; i++) {
130 if (config->hpd_freq && config->hpd_freq[i]) {
131 ret = clk_set_rate(hdmi->hpd_clks[i],
132 config->hpd_freq[i]);
133 if (ret)
134 dev_warn(dev->dev, "failed to set clk %s (%d)\n",
135 config->hpd_clk_names[i], ret);
136 }
137
130 ret = clk_prepare_enable(hdmi->hpd_clks[i]); 138 ret = clk_prepare_enable(hdmi->hpd_clks[i]);
131 if (ret) { 139 if (ret) {
132 dev_err(dev->dev, "failed to enable hpd clk: %s (%d)\n", 140 dev_err(dev->dev, "failed to enable hpd clk: %s (%d)\n",
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 42caf7fcb0b9..71510ee26e96 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -20,6 +20,10 @@
20#include "msm_mmu.h" 20#include "msm_mmu.h"
21#include "mdp5_kms.h" 21#include "mdp5_kms.h"
22 22
23static const char *iommu_ports[] = {
24 "mdp_0",
25};
26
23static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev); 27static struct mdp5_platform_config *mdp5_get_config(struct platform_device *dev);
24 28
25static int mdp5_hw_init(struct msm_kms *kms) 29static int mdp5_hw_init(struct msm_kms *kms)
@@ -104,6 +108,12 @@ static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
104static void mdp5_destroy(struct msm_kms *kms) 108static void mdp5_destroy(struct msm_kms *kms)
105{ 109{
106 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 110 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
111 struct msm_mmu *mmu = mdp5_kms->mmu;
112
113 if (mmu) {
114 mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
115 mmu->funcs->destroy(mmu);
116 }
107 kfree(mdp5_kms); 117 kfree(mdp5_kms);
108} 118}
109 119
@@ -216,10 +226,6 @@ fail:
216 return ret; 226 return ret;
217} 227}
218 228
219static const char *iommu_ports[] = {
220 "mdp_0",
221};
222
223static int get_clk(struct platform_device *pdev, struct clk **clkp, 229static int get_clk(struct platform_device *pdev, struct clk **clkp,
224 const char *name) 230 const char *name)
225{ 231{
@@ -317,17 +323,23 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
317 mmu = msm_iommu_new(dev, config->iommu); 323 mmu = msm_iommu_new(dev, config->iommu);
318 if (IS_ERR(mmu)) { 324 if (IS_ERR(mmu)) {
319 ret = PTR_ERR(mmu); 325 ret = PTR_ERR(mmu);
326 dev_err(dev->dev, "failed to init iommu: %d\n", ret);
320 goto fail; 327 goto fail;
321 } 328 }
329
322 ret = mmu->funcs->attach(mmu, iommu_ports, 330 ret = mmu->funcs->attach(mmu, iommu_ports,
323 ARRAY_SIZE(iommu_ports)); 331 ARRAY_SIZE(iommu_ports));
324 if (ret) 332 if (ret) {
333 dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
334 mmu->funcs->destroy(mmu);
325 goto fail; 335 goto fail;
336 }
326 } else { 337 } else {
327 dev_info(dev->dev, "no iommu, fallback to phys " 338 dev_info(dev->dev, "no iommu, fallback to phys "
328 "contig buffers for scanout\n"); 339 "contig buffers for scanout\n");
329 mmu = NULL; 340 mmu = NULL;
330 } 341 }
342 mdp5_kms->mmu = mmu;
331 343
332 mdp5_kms->id = msm_register_mmu(dev, mmu); 344 mdp5_kms->id = msm_register_mmu(dev, mmu);
333 if (mdp5_kms->id < 0) { 345 if (mdp5_kms->id < 0) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index c8b1a2522c25..6e981b692d1d 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -33,6 +33,7 @@ struct mdp5_kms {
33 33
34 /* mapper-id used to request GEM buffer mapped for scanout: */ 34 /* mapper-id used to request GEM buffer mapped for scanout: */
35 int id; 35 int id;
36 struct msm_mmu *mmu;
36 37
37 /* for tracking smp allocation amongst pipes: */ 38 /* for tracking smp allocation amongst pipes: */
38 mdp5_smp_state_t smp_state; 39 mdp5_smp_state_t smp_state;
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 0d2562fb681e..9a5d87db5c23 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -159,7 +159,7 @@ static int msm_unload(struct drm_device *dev)
159static int get_mdp_ver(struct platform_device *pdev) 159static int get_mdp_ver(struct platform_device *pdev)
160{ 160{
161#ifdef CONFIG_OF 161#ifdef CONFIG_OF
162 const static struct of_device_id match_types[] = { { 162 static const struct of_device_id match_types[] = { {
163 .compatible = "qcom,mdss_mdp", 163 .compatible = "qcom,mdss_mdp",
164 .data = (void *)5, 164 .data = (void *)5,
165 }, { 165 }, {
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index a752ab83b810..5107fc4826bc 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -59,7 +59,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
59 struct drm_framebuffer *fb = NULL; 59 struct drm_framebuffer *fb = NULL;
60 struct fb_info *fbi = NULL; 60 struct fb_info *fbi = NULL;
61 struct drm_mode_fb_cmd2 mode_cmd = {0}; 61 struct drm_mode_fb_cmd2 mode_cmd = {0};
62 dma_addr_t paddr; 62 uint32_t paddr;
63 int ret, size; 63 int ret, size;
64 64
65 sizes->surface_bpp = 32; 65 sizes->surface_bpp = 32;
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index bb8026daebc9..690d7e7b6d1e 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -278,6 +278,7 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
278 uint32_t *iova) 278 uint32_t *iova)
279{ 279{
280 struct msm_gem_object *msm_obj = to_msm_bo(obj); 280 struct msm_gem_object *msm_obj = to_msm_bo(obj);
281 struct drm_device *dev = obj->dev;
281 int ret = 0; 282 int ret = 0;
282 283
283 if (!msm_obj->domain[id].iova) { 284 if (!msm_obj->domain[id].iova) {
@@ -285,6 +286,11 @@ int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
285 struct msm_mmu *mmu = priv->mmus[id]; 286 struct msm_mmu *mmu = priv->mmus[id];
286 struct page **pages = get_pages(obj); 287 struct page **pages = get_pages(obj);
287 288
289 if (!mmu) {
290 dev_err(dev->dev, "null MMU pointer\n");
291 return -EINVAL;
292 }
293
288 if (IS_ERR(pages)) 294 if (IS_ERR(pages))
289 return PTR_ERR(pages); 295 return PTR_ERR(pages);
290 296
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 92b745986231..4b2ad9181edf 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -28,7 +28,7 @@ static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
28 unsigned long iova, int flags, void *arg) 28 unsigned long iova, int flags, void *arg)
29{ 29{
30 DBG("*** fault: iova=%08lx, flags=%d", iova, flags); 30 DBG("*** fault: iova=%08lx, flags=%d", iova, flags);
31 return 0; 31 return -ENOSYS;
32} 32}
33 33
34static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt) 34static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
@@ -40,8 +40,10 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
40 for (i = 0; i < cnt; i++) { 40 for (i = 0; i < cnt; i++) {
41 struct device *msm_iommu_get_ctx(const char *ctx_name); 41 struct device *msm_iommu_get_ctx(const char *ctx_name);
42 struct device *ctx = msm_iommu_get_ctx(names[i]); 42 struct device *ctx = msm_iommu_get_ctx(names[i]);
43 if (IS_ERR_OR_NULL(ctx)) 43 if (IS_ERR_OR_NULL(ctx)) {
44 dev_warn(dev->dev, "couldn't get %s context", names[i]);
44 continue; 45 continue;
46 }
45 ret = iommu_attach_device(iommu->domain, ctx); 47 ret = iommu_attach_device(iommu->domain, ctx);
46 if (ret) { 48 if (ret) {
47 dev_warn(dev->dev, "could not attach iommu to %s", names[i]); 49 dev_warn(dev->dev, "could not attach iommu to %s", names[i]);
@@ -52,6 +54,20 @@ static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
52 return 0; 54 return 0;
53} 55}
54 56
57static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
58{
59 struct msm_iommu *iommu = to_msm_iommu(mmu);
60 int i;
61
62 for (i = 0; i < cnt; i++) {
63 struct device *msm_iommu_get_ctx(const char *ctx_name);
64 struct device *ctx = msm_iommu_get_ctx(names[i]);
65 if (IS_ERR_OR_NULL(ctx))
66 continue;
67 iommu_detach_device(iommu->domain, ctx);
68 }
69}
70
55static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova, 71static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
56 struct sg_table *sgt, unsigned len, int prot) 72 struct sg_table *sgt, unsigned len, int prot)
57{ 73{
@@ -110,7 +126,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
110 126
111 VERB("unmap[%d]: %08x(%x)", i, iova, bytes); 127 VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
112 128
113 BUG_ON(!IS_ALIGNED(bytes, PAGE_SIZE)); 129 BUG_ON(!PAGE_ALIGNED(bytes));
114 130
115 da += bytes; 131 da += bytes;
116 } 132 }
@@ -127,6 +143,7 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
127 143
128static const struct msm_mmu_funcs funcs = { 144static const struct msm_mmu_funcs funcs = {
129 .attach = msm_iommu_attach, 145 .attach = msm_iommu_attach,
146 .detach = msm_iommu_detach,
130 .map = msm_iommu_map, 147 .map = msm_iommu_map,
131 .unmap = msm_iommu_unmap, 148 .unmap = msm_iommu_unmap,
132 .destroy = msm_iommu_destroy, 149 .destroy = msm_iommu_destroy,
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 030324482b4a..21da6d154f71 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -22,6 +22,7 @@
22 22
23struct msm_mmu_funcs { 23struct msm_mmu_funcs {
24 int (*attach)(struct msm_mmu *mmu, const char **names, int cnt); 24 int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
25 void (*detach)(struct msm_mmu *mmu, const char **names, int cnt);
25 int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, 26 int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
26 unsigned len, int prot); 27 unsigned len, int prot);
27 int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt, 28 int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
index 26e962b7e702..2283c442a10d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nv50.c
@@ -1516,11 +1516,11 @@ nv50_disp_intr_unk20_2(struct nv50_disp_priv *priv, int head)
1516 } 1516 }
1517 1517
1518 switch ((ctrl & 0x000f0000) >> 16) { 1518 switch ((ctrl & 0x000f0000) >> 16) {
1519 case 6: datarate = pclk * 30 / 8; break; 1519 case 6: datarate = pclk * 30; break;
1520 case 5: datarate = pclk * 24 / 8; break; 1520 case 5: datarate = pclk * 24; break;
1521 case 2: 1521 case 2:
1522 default: 1522 default:
1523 datarate = pclk * 18 / 8; 1523 datarate = pclk * 18;
1524 break; 1524 break;
1525 } 1525 }
1526 1526
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
index 48aa38a87e3f..fa30d8196f35 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/nvd0.c
@@ -1159,11 +1159,11 @@ nvd0_disp_intr_unk2_2(struct nv50_disp_priv *priv, int head)
1159 if (outp->info.type == DCB_OUTPUT_DP) { 1159 if (outp->info.type == DCB_OUTPUT_DP) {
1160 u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300)); 1160 u32 sync = nv_rd32(priv, 0x660404 + (head * 0x300));
1161 switch ((sync & 0x000003c0) >> 6) { 1161 switch ((sync & 0x000003c0) >> 6) {
1162 case 6: pclk = pclk * 30 / 8; break; 1162 case 6: pclk = pclk * 30; break;
1163 case 5: pclk = pclk * 24 / 8; break; 1163 case 5: pclk = pclk * 24; break;
1164 case 2: 1164 case 2:
1165 default: 1165 default:
1166 pclk = pclk * 18 / 8; 1166 pclk = pclk * 18;
1167 break; 1167 break;
1168 } 1168 }
1169 1169
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
index 52c299c3d300..eb2d7789555d 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/outpdp.c
@@ -34,7 +34,7 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
34 struct nvkm_output_dp *outp = (void *)base; 34 struct nvkm_output_dp *outp = (void *)base;
35 bool retrain = true; 35 bool retrain = true;
36 u8 link[2], stat[3]; 36 u8 link[2], stat[3];
37 u32 rate; 37 u32 linkrate;
38 int ret, i; 38 int ret, i;
39 39
40 /* check that the link is trained at a high enough rate */ 40 /* check that the link is trained at a high enough rate */
@@ -44,8 +44,10 @@ nvkm_output_dp_train(struct nvkm_output *base, u32 datarate, bool wait)
44 goto done; 44 goto done;
45 } 45 }
46 46
47 rate = link[0] * 27000 * (link[1] & DPCD_LC01_LANE_COUNT_SET); 47 linkrate = link[0] * 27000 * (link[1] & DPCD_LC01_LANE_COUNT_SET);
48 if (rate < ((datarate / 8) * 10)) { 48 linkrate = (linkrate * 8) / 10; /* 8B/10B coding overhead */
49 datarate = (datarate + 9) / 10; /* -> decakilobits */
50 if (linkrate < datarate) {
49 DBG("link not trained at sufficient rate\n"); 51 DBG("link not trained at sufficient rate\n");
50 goto done; 52 goto done;
51 } 53 }
diff --git a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
index e1832778e8b6..7a1ebdfa9e1b 100644
--- a/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
+++ b/drivers/gpu/drm/nouveau/core/engine/disp/sornv50.c
@@ -87,6 +87,7 @@ nv50_sor_mthd(struct nouveau_object *object, u32 mthd, void *args, u32 size)
87 struct nvkm_output_dp *outpdp = (void *)outp; 87 struct nvkm_output_dp *outpdp = (void *)outp;
88 switch (data) { 88 switch (data) {
89 case NV94_DISP_SOR_DP_PWR_STATE_OFF: 89 case NV94_DISP_SOR_DP_PWR_STATE_OFF:
90 nouveau_event_put(outpdp->irq);
90 ((struct nvkm_output_dp_impl *)nv_oclass(outp)) 91 ((struct nvkm_output_dp_impl *)nv_oclass(outp))
91 ->lnk_pwr(outpdp, 0); 92 ->lnk_pwr(outpdp, 0);
92 atomic_set(&outpdp->lt.done, 0); 93 atomic_set(&outpdp->lt.done, 0);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
index 0f57fcfe0bbf..2af9cfd2c60f 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramfuc.h
@@ -26,7 +26,7 @@ ramfuc_reg2(u32 addr1, u32 addr2)
26 }; 26 };
27} 27}
28 28
29static inline struct ramfuc_reg 29static noinline struct ramfuc_reg
30ramfuc_reg(u32 addr) 30ramfuc_reg(u32 addr)
31{ 31{
32 return ramfuc_reg2(addr, addr); 32 return ramfuc_reg2(addr, addr);
@@ -107,7 +107,7 @@ ramfuc_nsec(struct ramfuc *ram, u32 nsec)
107 107
108#define ram_init(s,p) ramfuc_init(&(s)->base, (p)) 108#define ram_init(s,p) ramfuc_init(&(s)->base, (p))
109#define ram_exec(s,e) ramfuc_exec(&(s)->base, (e)) 109#define ram_exec(s,e) ramfuc_exec(&(s)->base, (e))
110#define ram_have(s,r) ((s)->r_##r.addr != 0x000000) 110#define ram_have(s,r) ((s)->r_##r.addr[0] != 0x000000)
111#define ram_rd32(s,r) ramfuc_rd32(&(s)->base, &(s)->r_##r) 111#define ram_rd32(s,r) ramfuc_rd32(&(s)->base, &(s)->r_##r)
112#define ram_wr32(s,r,d) ramfuc_wr32(&(s)->base, &(s)->r_##r, (d)) 112#define ram_wr32(s,r,d) ramfuc_wr32(&(s)->base, &(s)->r_##r, (d))
113#define ram_nuke(s,r) ramfuc_nuke(&(s)->base, &(s)->r_##r) 113#define ram_nuke(s,r) ramfuc_nuke(&(s)->base, &(s)->r_##r)
diff --git a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
index 1ad3ea503133..c5b46e302319 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/fb/ramnve0.c
@@ -200,6 +200,7 @@ r1373f4_init(struct nve0_ramfuc *fuc)
200 /* (re)program mempll, if required */ 200 /* (re)program mempll, if required */
201 if (ram->mode == 2) { 201 if (ram->mode == 2) {
202 ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000); 202 ram_mask(fuc, 0x1373f4, 0x00010000, 0x00000000);
203 ram_mask(fuc, 0x132000, 0x80000000, 0x80000000);
203 ram_mask(fuc, 0x132000, 0x00000001, 0x00000000); 204 ram_mask(fuc, 0x132000, 0x00000001, 0x00000000);
204 ram_mask(fuc, 0x132004, 0x103fffff, mcoef); 205 ram_mask(fuc, 0x132004, 0x103fffff, mcoef);
205 ram_mask(fuc, 0x132000, 0x00000001, 0x00000001); 206 ram_mask(fuc, 0x132000, 0x00000001, 0x00000001);
diff --git a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
index cfde9eb44ad0..6212537b90c5 100644
--- a/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
+++ b/drivers/gpu/drm/nouveau/core/subdev/therm/temp.c
@@ -192,11 +192,11 @@ alarm_timer_callback(struct nouveau_alarm *alarm)
192 nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown, 192 nouveau_therm_threshold_hyst_polling(therm, &sensor->thrs_shutdown,
193 NOUVEAU_THERM_THRS_SHUTDOWN); 193 NOUVEAU_THERM_THRS_SHUTDOWN);
194 194
195 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
196
195 /* schedule the next poll in one second */ 197 /* schedule the next poll in one second */
196 if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head)) 198 if (therm->temp_get(therm) >= 0 && list_empty(&alarm->head))
197 ptimer->alarm(ptimer, 1000 * 1000 * 1000, alarm); 199 ptimer->alarm(ptimer, 1000000000ULL, alarm);
198
199 spin_unlock_irqrestore(&priv->sensor.alarm_program_lock, flags);
200} 200}
201 201
202void 202void
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index ddd83756b9a2..5425ffe3931d 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -652,12 +652,12 @@ int nouveau_pmops_resume(struct device *dev)
652 ret = nouveau_do_resume(drm_dev); 652 ret = nouveau_do_resume(drm_dev);
653 if (ret) 653 if (ret)
654 return ret; 654 return ret;
655 if (drm_dev->mode_config.num_crtc)
656 nouveau_fbcon_set_suspend(drm_dev, 0);
657 655
658 nouveau_fbcon_zfill_all(drm_dev); 656 if (drm_dev->mode_config.num_crtc) {
659 if (drm_dev->mode_config.num_crtc)
660 nouveau_display_resume(drm_dev); 657 nouveau_display_resume(drm_dev);
658 nouveau_fbcon_set_suspend(drm_dev, 0);
659 }
660
661 return 0; 661 return 0;
662} 662}
663 663
@@ -683,11 +683,12 @@ static int nouveau_pmops_thaw(struct device *dev)
683 ret = nouveau_do_resume(drm_dev); 683 ret = nouveau_do_resume(drm_dev);
684 if (ret) 684 if (ret)
685 return ret; 685 return ret;
686 if (drm_dev->mode_config.num_crtc) 686
687 nouveau_fbcon_set_suspend(drm_dev, 0); 687 if (drm_dev->mode_config.num_crtc) {
688 nouveau_fbcon_zfill_all(drm_dev);
689 if (drm_dev->mode_config.num_crtc)
690 nouveau_display_resume(drm_dev); 688 nouveau_display_resume(drm_dev);
689 nouveau_fbcon_set_suspend(drm_dev, 0);
690 }
691
691 return 0; 692 return 0;
692} 693}
693 694
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.c b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
index 64a42cfd3717..191665ee7f52 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.c
@@ -531,17 +531,10 @@ nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
531 if (state == 1) 531 if (state == 1)
532 nouveau_fbcon_save_disable_accel(dev); 532 nouveau_fbcon_save_disable_accel(dev);
533 fb_set_suspend(drm->fbcon->helper.fbdev, state); 533 fb_set_suspend(drm->fbcon->helper.fbdev, state);
534 if (state == 0) 534 if (state == 0) {
535 nouveau_fbcon_restore_accel(dev); 535 nouveau_fbcon_restore_accel(dev);
536 nouveau_fbcon_zfill(dev, drm->fbcon);
537 }
536 console_unlock(); 538 console_unlock();
537 } 539 }
538} 540}
539
540void
541nouveau_fbcon_zfill_all(struct drm_device *dev)
542{
543 struct nouveau_drm *drm = nouveau_drm(dev);
544 if (drm->fbcon) {
545 nouveau_fbcon_zfill(dev, drm->fbcon);
546 }
547}
diff --git a/drivers/gpu/drm/nouveau/nouveau_fbcon.h b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
index fdfc0c94fbcc..fcff797d2084 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fbcon.h
+++ b/drivers/gpu/drm/nouveau/nouveau_fbcon.h
@@ -61,7 +61,6 @@ void nouveau_fbcon_gpu_lockup(struct fb_info *info);
61int nouveau_fbcon_init(struct drm_device *dev); 61int nouveau_fbcon_init(struct drm_device *dev);
62void nouveau_fbcon_fini(struct drm_device *dev); 62void nouveau_fbcon_fini(struct drm_device *dev);
63void nouveau_fbcon_set_suspend(struct drm_device *dev, int state); 63void nouveau_fbcon_set_suspend(struct drm_device *dev, int state);
64void nouveau_fbcon_zfill_all(struct drm_device *dev);
65void nouveau_fbcon_save_disable_accel(struct drm_device *dev); 64void nouveau_fbcon_save_disable_accel(struct drm_device *dev);
66void nouveau_fbcon_restore_accel(struct drm_device *dev); 65void nouveau_fbcon_restore_accel(struct drm_device *dev);
67 66
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index afdf607df3e6..4c534b7b04da 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -1741,7 +1741,8 @@ nv50_sor_dpms(struct drm_encoder *encoder, int mode)
1741 } 1741 }
1742 } 1742 }
1743 1743
1744 mthd = (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2; 1744 mthd = (ffs(nv_encoder->dcb->heads) - 1) << 3;
1745 mthd |= (ffs(nv_encoder->dcb->sorconf.link) - 1) << 2;
1745 mthd |= nv_encoder->or; 1746 mthd |= nv_encoder->or;
1746 1747
1747 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) { 1748 if (nv_encoder->dcb->type == DCB_OUTPUT_DP) {
diff --git a/drivers/gpu/drm/qxl/qxl_irq.c b/drivers/gpu/drm/qxl/qxl_irq.c
index 34d6a85e9023..0bf1e20c6e44 100644
--- a/drivers/gpu/drm/qxl/qxl_irq.c
+++ b/drivers/gpu/drm/qxl/qxl_irq.c
@@ -33,6 +33,9 @@ irqreturn_t qxl_irq_handler(int irq, void *arg)
33 33
34 pending = xchg(&qdev->ram_header->int_pending, 0); 34 pending = xchg(&qdev->ram_header->int_pending, 0);
35 35
36 if (!pending)
37 return IRQ_NONE;
38
36 atomic_inc(&qdev->irq_received); 39 atomic_inc(&qdev->irq_received);
37 40
38 if (pending & QXL_INTERRUPT_DISPLAY) { 41 if (pending & QXL_INTERRUPT_DISPLAY) {
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index a03c73411a56..30d242b25078 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1414,8 +1414,8 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1414 tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN; 1414 tmp &= ~EVERGREEN_GRPH_SURFACE_UPDATE_H_RETRACE_EN;
1415 WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); 1415 WREG32(EVERGREEN_GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
1416 1416
1417 /* set pageflip to happen anywhere in vblank interval */ 1417 /* set pageflip to happen only at start of vblank interval (front porch) */
1418 WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); 1418 WREG32(EVERGREEN_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
1419 1419
1420 if (!atomic && fb && fb != crtc->primary->fb) { 1420 if (!atomic && fb && fb != crtc->primary->fb) {
1421 radeon_fb = to_radeon_framebuffer(fb); 1421 radeon_fb = to_radeon_framebuffer(fb);
@@ -1614,8 +1614,8 @@ static int avivo_crtc_do_set_base(struct drm_crtc *crtc,
1614 tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN; 1614 tmp &= ~AVIVO_D1GRPH_SURFACE_UPDATE_H_RETRACE_EN;
1615 WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp); 1615 WREG32(AVIVO_D1GRPH_FLIP_CONTROL + radeon_crtc->crtc_offset, tmp);
1616 1616
1617 /* set pageflip to happen anywhere in vblank interval */ 1617 /* set pageflip to happen only at start of vblank interval (front porch) */
1618 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 0); 1618 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + radeon_crtc->crtc_offset, 3);
1619 1619
1620 if (!atomic && fb && fb != crtc->primary->fb) { 1620 if (!atomic && fb && fb != crtc->primary->fb) {
1621 radeon_fb = to_radeon_framebuffer(fb); 1621 radeon_fb = to_radeon_framebuffer(fb);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index c5b1f2da3954..b1e11f8434e2 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -127,7 +127,7 @@ static int radeon_process_aux_ch(struct radeon_i2c_chan *chan,
127 /* flags not zero */ 127 /* flags not zero */
128 if (args.v1.ucReplyStatus == 2) { 128 if (args.v1.ucReplyStatus == 2) {
129 DRM_DEBUG_KMS("dp_aux_ch flags not zero\n"); 129 DRM_DEBUG_KMS("dp_aux_ch flags not zero\n");
130 r = -EBUSY; 130 r = -EIO;
131 goto done; 131 goto done;
132 } 132 }
133 133
@@ -403,16 +403,18 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
403{ 403{
404 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv; 404 struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
405 u8 msg[DP_DPCD_SIZE]; 405 u8 msg[DP_DPCD_SIZE];
406 int ret, i; 406 int ret;
407
408 char dpcd_hex_dump[DP_DPCD_SIZE * 3];
407 409
408 ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg, 410 ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
409 DP_DPCD_SIZE); 411 DP_DPCD_SIZE);
410 if (ret > 0) { 412 if (ret > 0) {
411 memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE); 413 memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
412 DRM_DEBUG_KMS("DPCD: "); 414
413 for (i = 0; i < DP_DPCD_SIZE; i++) 415 hex_dump_to_buffer(dig_connector->dpcd, sizeof(dig_connector->dpcd),
414 DRM_DEBUG_KMS("%02x ", msg[i]); 416 32, 1, dpcd_hex_dump, sizeof(dpcd_hex_dump), false);
415 DRM_DEBUG_KMS("\n"); 417 DRM_DEBUG_KMS("DPCD: %s\n", dpcd_hex_dump);
416 418
417 radeon_dp_probe_oui(radeon_connector); 419 radeon_dp_probe_oui(radeon_connector);
418 420
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index 2b2908440644..7d68203a3737 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -183,7 +183,6 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
183 struct backlight_properties props; 183 struct backlight_properties props;
184 struct radeon_backlight_privdata *pdata; 184 struct radeon_backlight_privdata *pdata;
185 struct radeon_encoder_atom_dig *dig; 185 struct radeon_encoder_atom_dig *dig;
186 u8 backlight_level;
187 char bl_name[16]; 186 char bl_name[16];
188 187
189 /* Mac laptops with multiple GPUs use the gmux driver for backlight 188 /* Mac laptops with multiple GPUs use the gmux driver for backlight
@@ -222,12 +221,17 @@ void radeon_atom_backlight_init(struct radeon_encoder *radeon_encoder,
222 221
223 pdata->encoder = radeon_encoder; 222 pdata->encoder = radeon_encoder;
224 223
225 backlight_level = radeon_atom_get_backlight_level_from_reg(rdev);
226
227 dig = radeon_encoder->enc_priv; 224 dig = radeon_encoder->enc_priv;
228 dig->bl_dev = bd; 225 dig->bl_dev = bd;
229 226
230 bd->props.brightness = radeon_atom_backlight_get_brightness(bd); 227 bd->props.brightness = radeon_atom_backlight_get_brightness(bd);
228 /* Set a reasonable default here if the level is 0 otherwise
229 * fbdev will attempt to turn the backlight on after console
230 * unblanking and it will try and restore 0 which turns the backlight
231 * off again.
232 */
233 if (bd->props.brightness == 0)
234 bd->props.brightness = RADEON_MAX_BL_LEVEL;
231 bd->props.power = FB_BLANK_UNBLANK; 235 bd->props.power = FB_BLANK_UNBLANK;
232 backlight_update_status(bd); 236 backlight_update_status(bd);
233 237
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index 10dae4106c08..584090ac3eb9 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -1179,7 +1179,7 @@ static int ci_stop_dpm(struct radeon_device *rdev)
1179 tmp &= ~GLOBAL_PWRMGT_EN; 1179 tmp &= ~GLOBAL_PWRMGT_EN;
1180 WREG32_SMC(GENERAL_PWRMGT, tmp); 1180 WREG32_SMC(GENERAL_PWRMGT, tmp);
1181 1181
1182 tmp = RREG32(SCLK_PWRMGT_CNTL); 1182 tmp = RREG32_SMC(SCLK_PWRMGT_CNTL);
1183 tmp &= ~DYNAMIC_PM_EN; 1183 tmp &= ~DYNAMIC_PM_EN;
1184 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp); 1184 WREG32_SMC(SCLK_PWRMGT_CNTL, tmp);
1185 1185
diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c
index dcd4518a9b08..0b2471107137 100644
--- a/drivers/gpu/drm/radeon/cik.c
+++ b/drivers/gpu/drm/radeon/cik.c
@@ -7676,14 +7676,16 @@ restart_ih:
7676 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); 7676 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
7677 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); 7677 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
7678 mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); 7678 mc_client = RREG32(VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT);
7679 /* reset addr and status */
7680 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
7681 if (addr == 0x0 && status == 0x0)
7682 break;
7679 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); 7683 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
7680 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 7684 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
7681 addr); 7685 addr);
7682 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 7686 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
7683 status); 7687 status);
7684 cik_vm_decode_fault(rdev, status, addr, mc_client); 7688 cik_vm_decode_fault(rdev, status, addr, mc_client);
7685 /* reset addr and status */
7686 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
7687 break; 7689 break;
7688 case 167: /* VCE */ 7690 case 167: /* VCE */
7689 DRM_DEBUG("IH: VCE int: 0x%08x\n", src_data); 7691 DRM_DEBUG("IH: VCE int: 0x%08x\n", src_data);
diff --git a/drivers/gpu/drm/radeon/cikd.h b/drivers/gpu/drm/radeon/cikd.h
index ae88660f34ea..0c6e1b55d968 100644
--- a/drivers/gpu/drm/radeon/cikd.h
+++ b/drivers/gpu/drm/radeon/cikd.h
@@ -1752,12 +1752,12 @@
1752#define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */ 1752#define EOP_TC_WB_ACTION_EN (1 << 15) /* L2 */
1753#define EOP_TCL1_ACTION_EN (1 << 16) 1753#define EOP_TCL1_ACTION_EN (1 << 16)
1754#define EOP_TC_ACTION_EN (1 << 17) /* L2 */ 1754#define EOP_TC_ACTION_EN (1 << 17) /* L2 */
1755#define EOP_TCL2_VOLATILE (1 << 24)
1755#define EOP_CACHE_POLICY(x) ((x) << 25) 1756#define EOP_CACHE_POLICY(x) ((x) << 25)
1756 /* 0 - LRU 1757 /* 0 - LRU
1757 * 1 - Stream 1758 * 1 - Stream
1758 * 2 - Bypass 1759 * 2 - Bypass
1759 */ 1760 */
1760#define EOP_TCL2_VOLATILE (1 << 27)
1761#define DATA_SEL(x) ((x) << 29) 1761#define DATA_SEL(x) ((x) << 29)
1762 /* 0 - discard 1762 /* 0 - discard
1763 * 1 - send low 32bit data 1763 * 1 - send low 32bit data
diff --git a/drivers/gpu/drm/radeon/cypress_dpm.c b/drivers/gpu/drm/radeon/cypress_dpm.c
index 5a9a5f4d7888..47d31e915758 100644
--- a/drivers/gpu/drm/radeon/cypress_dpm.c
+++ b/drivers/gpu/drm/radeon/cypress_dpm.c
@@ -1551,7 +1551,7 @@ int cypress_populate_smc_voltage_tables(struct radeon_device *rdev,
1551 1551
1552 table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0; 1552 table->voltageMaskTable.highMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 0;
1553 table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] = 1553 table->voltageMaskTable.lowMask[RV770_SMC_VOLTAGEMASK_VDDCI] =
1554 cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); 1554 cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
1555 } 1555 }
1556 1556
1557 return 0; 1557 return 0;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index e2f605224e8c..250bac3935a4 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -189,7 +189,7 @@ static const u32 evergreen_golden_registers[] =
189 0x8c1c, 0xffffffff, 0x00001010, 189 0x8c1c, 0xffffffff, 0x00001010,
190 0x28350, 0xffffffff, 0x00000000, 190 0x28350, 0xffffffff, 0x00000000,
191 0xa008, 0xffffffff, 0x00010000, 191 0xa008, 0xffffffff, 0x00010000,
192 0x5cc, 0xffffffff, 0x00000001, 192 0x5c4, 0xffffffff, 0x00000001,
193 0x9508, 0xffffffff, 0x00000002, 193 0x9508, 0xffffffff, 0x00000002,
194 0x913c, 0x0000000f, 0x0000000a 194 0x913c, 0x0000000f, 0x0000000a
195}; 195};
@@ -476,7 +476,7 @@ static const u32 cedar_golden_registers[] =
476 0x8c1c, 0xffffffff, 0x00001010, 476 0x8c1c, 0xffffffff, 0x00001010,
477 0x28350, 0xffffffff, 0x00000000, 477 0x28350, 0xffffffff, 0x00000000,
478 0xa008, 0xffffffff, 0x00010000, 478 0xa008, 0xffffffff, 0x00010000,
479 0x5cc, 0xffffffff, 0x00000001, 479 0x5c4, 0xffffffff, 0x00000001,
480 0x9508, 0xffffffff, 0x00000002 480 0x9508, 0xffffffff, 0x00000002
481}; 481};
482 482
@@ -635,7 +635,7 @@ static const u32 juniper_mgcg_init[] =
635static const u32 supersumo_golden_registers[] = 635static const u32 supersumo_golden_registers[] =
636{ 636{
637 0x5eb4, 0xffffffff, 0x00000002, 637 0x5eb4, 0xffffffff, 0x00000002,
638 0x5cc, 0xffffffff, 0x00000001, 638 0x5c4, 0xffffffff, 0x00000001,
639 0x7030, 0xffffffff, 0x00000011, 639 0x7030, 0xffffffff, 0x00000011,
640 0x7c30, 0xffffffff, 0x00000011, 640 0x7c30, 0xffffffff, 0x00000011,
641 0x6104, 0x01000300, 0x00000000, 641 0x6104, 0x01000300, 0x00000000,
@@ -719,7 +719,7 @@ static const u32 sumo_golden_registers[] =
719static const u32 wrestler_golden_registers[] = 719static const u32 wrestler_golden_registers[] =
720{ 720{
721 0x5eb4, 0xffffffff, 0x00000002, 721 0x5eb4, 0xffffffff, 0x00000002,
722 0x5cc, 0xffffffff, 0x00000001, 722 0x5c4, 0xffffffff, 0x00000001,
723 0x7030, 0xffffffff, 0x00000011, 723 0x7030, 0xffffffff, 0x00000011,
724 0x7c30, 0xffffffff, 0x00000011, 724 0x7c30, 0xffffffff, 0x00000011,
725 0x6104, 0x01000300, 0x00000000, 725 0x6104, 0x01000300, 0x00000000,
@@ -2642,8 +2642,9 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
2642 for (i = 0; i < rdev->num_crtc; i++) { 2642 for (i = 0; i < rdev->num_crtc; i++) {
2643 if (save->crtc_enabled[i]) { 2643 if (save->crtc_enabled[i]) {
2644 tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]); 2644 tmp = RREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i]);
2645 if ((tmp & 0x3) != 0) { 2645 if ((tmp & 0x7) != 3) {
2646 tmp &= ~0x3; 2646 tmp &= ~0x7;
2647 tmp |= 0x3;
2647 WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); 2648 WREG32(EVERGREEN_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
2648 } 2649 }
2649 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]); 2650 tmp = RREG32(EVERGREEN_GRPH_UPDATE + crtc_offsets[i]);
@@ -5066,14 +5067,16 @@ restart_ih:
5066 case 147: 5067 case 147:
5067 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); 5068 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
5068 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); 5069 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
5070 /* reset addr and status */
5071 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
5072 if (addr == 0x0 && status == 0x0)
5073 break;
5069 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); 5074 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
5070 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 5075 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
5071 addr); 5076 addr);
5072 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 5077 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
5073 status); 5078 status);
5074 cayman_vm_decode_fault(rdev, status, addr); 5079 cayman_vm_decode_fault(rdev, status, addr);
5075 /* reset addr and status */
5076 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
5077 break; 5080 break;
5078 case 176: /* CP_INT in ring buffer */ 5081 case 176: /* CP_INT in ring buffer */
5079 case 177: /* CP_INT in IB1 */ 5082 case 177: /* CP_INT in IB1 */
diff --git a/drivers/gpu/drm/radeon/evergreen_reg.h b/drivers/gpu/drm/radeon/evergreen_reg.h
index 333d143fca2c..23bff590fb6e 100644
--- a/drivers/gpu/drm/radeon/evergreen_reg.h
+++ b/drivers/gpu/drm/radeon/evergreen_reg.h
@@ -239,7 +239,6 @@
239# define EVERGREEN_CRTC_V_BLANK (1 << 0) 239# define EVERGREEN_CRTC_V_BLANK (1 << 0)
240#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90 240#define EVERGREEN_CRTC_STATUS_POSITION 0x6e90
241#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0 241#define EVERGREEN_CRTC_STATUS_HV_COUNT 0x6ea0
242#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
243#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4 242#define EVERGREEN_CRTC_UPDATE_LOCK 0x6ed4
244#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4 243#define EVERGREEN_MASTER_UPDATE_LOCK 0x6ef4
245#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8 244#define EVERGREEN_MASTER_UPDATE_MODE 0x6ef8
diff --git a/drivers/gpu/drm/radeon/kv_dpm.c b/drivers/gpu/drm/radeon/kv_dpm.c
index 3f6e817d97ee..9ef8c38f2d66 100644
--- a/drivers/gpu/drm/radeon/kv_dpm.c
+++ b/drivers/gpu/drm/radeon/kv_dpm.c
@@ -2726,7 +2726,7 @@ int kv_dpm_init(struct radeon_device *rdev)
2726 pi->caps_sclk_ds = true; 2726 pi->caps_sclk_ds = true;
2727 pi->enable_auto_thermal_throttling = true; 2727 pi->enable_auto_thermal_throttling = true;
2728 pi->disable_nb_ps3_in_battery = false; 2728 pi->disable_nb_ps3_in_battery = false;
2729 pi->bapm_enable = false; 2729 pi->bapm_enable = true;
2730 pi->voltage_drop_t = 0; 2730 pi->voltage_drop_t = 0;
2731 pi->caps_sclk_throttle_low_notification = false; 2731 pi->caps_sclk_throttle_low_notification = false;
2732 pi->caps_fps = false; /* true? */ 2732 pi->caps_fps = false; /* true? */
diff --git a/drivers/gpu/drm/radeon/ni_dpm.c b/drivers/gpu/drm/radeon/ni_dpm.c
index 004c931606c4..01fc4888e6fe 100644
--- a/drivers/gpu/drm/radeon/ni_dpm.c
+++ b/drivers/gpu/drm/radeon/ni_dpm.c
@@ -1315,7 +1315,7 @@ static void ni_populate_smc_voltage_tables(struct radeon_device *rdev,
1315 1315
1316 table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0; 1316 table->voltageMaskTable.highMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 0;
1317 table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] = 1317 table->voltageMaskTable.lowMask[NISLANDS_SMC_VOLTAGEMASK_VDDCI] =
1318 cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); 1318 cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
1319 } 1319 }
1320} 1320}
1321 1321
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 4b0bbf88d5c0..b7204500a9a6 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -102,6 +102,7 @@ extern int radeon_runtime_pm;
102extern int radeon_hard_reset; 102extern int radeon_hard_reset;
103extern int radeon_vm_size; 103extern int radeon_vm_size;
104extern int radeon_vm_block_size; 104extern int radeon_vm_block_size;
105extern int radeon_deep_color;
105 106
106/* 107/*
107 * Copy from radeon_drv.h so we don't have to include both and have conflicting 108 * Copy from radeon_drv.h so we don't have to include both and have conflicting
@@ -683,10 +684,9 @@ struct radeon_flip_work {
683 struct work_struct unpin_work; 684 struct work_struct unpin_work;
684 struct radeon_device *rdev; 685 struct radeon_device *rdev;
685 int crtc_id; 686 int crtc_id;
686 struct drm_framebuffer *fb; 687 uint64_t base;
687 struct drm_pending_vblank_event *event; 688 struct drm_pending_vblank_event *event;
688 struct radeon_bo *old_rbo; 689 struct radeon_bo *old_rbo;
689 struct radeon_bo *new_rbo;
690 struct radeon_fence *fence; 690 struct radeon_fence *fence;
691}; 691};
692 692
@@ -749,10 +749,6 @@ union radeon_irq_stat_regs {
749 struct cik_irq_stat_regs cik; 749 struct cik_irq_stat_regs cik;
750}; 750};
751 751
752#define RADEON_MAX_HPD_PINS 7
753#define RADEON_MAX_CRTCS 6
754#define RADEON_MAX_AFMT_BLOCKS 7
755
756struct radeon_irq { 752struct radeon_irq {
757 bool installed; 753 bool installed;
758 spinlock_t lock; 754 spinlock_t lock;
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 30844814c25a..173f378428a9 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -1227,11 +1227,19 @@ bool radeon_atom_get_clock_info(struct drm_device *dev)
1227 rdev->clock.default_dispclk = 1227 rdev->clock.default_dispclk =
1228 le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq); 1228 le32_to_cpu(firmware_info->info_21.ulDefaultDispEngineClkFreq);
1229 if (rdev->clock.default_dispclk == 0) { 1229 if (rdev->clock.default_dispclk == 0) {
1230 if (ASIC_IS_DCE5(rdev)) 1230 if (ASIC_IS_DCE6(rdev))
1231 rdev->clock.default_dispclk = 60000; /* 600 Mhz */
1232 else if (ASIC_IS_DCE5(rdev))
1231 rdev->clock.default_dispclk = 54000; /* 540 Mhz */ 1233 rdev->clock.default_dispclk = 54000; /* 540 Mhz */
1232 else 1234 else
1233 rdev->clock.default_dispclk = 60000; /* 600 Mhz */ 1235 rdev->clock.default_dispclk = 60000; /* 600 Mhz */
1234 } 1236 }
1237 /* set a reasonable default for DP */
1238 if (ASIC_IS_DCE6(rdev) && (rdev->clock.default_dispclk < 53900)) {
1239 DRM_INFO("Changing default dispclk from %dMhz to 600Mhz\n",
1240 rdev->clock.default_dispclk / 100);
1241 rdev->clock.default_dispclk = 60000;
1242 }
1235 rdev->clock.dp_extclk = 1243 rdev->clock.dp_extclk =
1236 le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq); 1244 le16_to_cpu(firmware_info->info_21.usUniphyDPModeExtClkFreq);
1237 rdev->clock.current_dispclk = rdev->clock.default_dispclk; 1245 rdev->clock.current_dispclk = rdev->clock.default_dispclk;
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 1b9177ed181f..44831197e82e 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -199,6 +199,9 @@ int radeon_get_monitor_bpc(struct drm_connector *connector)
199 } 199 }
200 } 200 }
201 201
202 if ((radeon_deep_color == 0) && (bpc > 8))
203 bpc = 8;
204
202 DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n", 205 DRM_DEBUG("%s: Display bpc=%d, returned bpc=%d\n",
203 connector->name, connector->display_info.bpc, bpc); 206 connector->name, connector->display_info.bpc, bpc);
204 207
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 8fc362aa6a1a..bf25061c8ac4 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -285,7 +285,6 @@ static void radeon_unpin_work_func(struct work_struct *__work)
285void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id) 285void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
286{ 286{
287 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id]; 287 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[crtc_id];
288 struct radeon_flip_work *work;
289 unsigned long flags; 288 unsigned long flags;
290 u32 update_pending; 289 u32 update_pending;
291 int vpos, hpos; 290 int vpos, hpos;
@@ -295,8 +294,11 @@ void radeon_crtc_handle_vblank(struct radeon_device *rdev, int crtc_id)
295 return; 294 return;
296 295
297 spin_lock_irqsave(&rdev->ddev->event_lock, flags); 296 spin_lock_irqsave(&rdev->ddev->event_lock, flags);
298 work = radeon_crtc->flip_work; 297 if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
299 if (work == NULL) { 298 DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
299 "RADEON_FLIP_SUBMITTED(%d)\n",
300 radeon_crtc->flip_status,
301 RADEON_FLIP_SUBMITTED);
300 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 302 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
301 return; 303 return;
302 } 304 }
@@ -344,12 +346,17 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
344 346
345 spin_lock_irqsave(&rdev->ddev->event_lock, flags); 347 spin_lock_irqsave(&rdev->ddev->event_lock, flags);
346 work = radeon_crtc->flip_work; 348 work = radeon_crtc->flip_work;
347 if (work == NULL) { 349 if (radeon_crtc->flip_status != RADEON_FLIP_SUBMITTED) {
350 DRM_DEBUG_DRIVER("radeon_crtc->flip_status = %d != "
351 "RADEON_FLIP_SUBMITTED(%d)\n",
352 radeon_crtc->flip_status,
353 RADEON_FLIP_SUBMITTED);
348 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 354 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
349 return; 355 return;
350 } 356 }
351 357
352 /* Pageflip completed. Clean up. */ 358 /* Pageflip completed. Clean up. */
359 radeon_crtc->flip_status = RADEON_FLIP_NONE;
353 radeon_crtc->flip_work = NULL; 360 radeon_crtc->flip_work = NULL;
354 361
355 /* wakeup userspace */ 362 /* wakeup userspace */
@@ -359,7 +366,6 @@ void radeon_crtc_handle_flip(struct radeon_device *rdev, int crtc_id)
359 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags); 366 spin_unlock_irqrestore(&rdev->ddev->event_lock, flags);
360 367
361 drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id); 368 drm_vblank_put(rdev->ddev, radeon_crtc->crtc_id);
362 radeon_fence_unref(&work->fence);
363 radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id); 369 radeon_irq_kms_pflip_irq_put(rdev, work->crtc_id);
364 queue_work(radeon_crtc->flip_queue, &work->unpin_work); 370 queue_work(radeon_crtc->flip_queue, &work->unpin_work);
365} 371}
@@ -379,51 +385,108 @@ static void radeon_flip_work_func(struct work_struct *__work)
379 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id]; 385 struct radeon_crtc *radeon_crtc = rdev->mode_info.crtcs[work->crtc_id];
380 386
381 struct drm_crtc *crtc = &radeon_crtc->base; 387 struct drm_crtc *crtc = &radeon_crtc->base;
382 struct drm_framebuffer *fb = work->fb;
383
384 uint32_t tiling_flags, pitch_pixels;
385 uint64_t base;
386
387 unsigned long flags; 388 unsigned long flags;
388 int r; 389 int r;
389 390
390 down_read(&rdev->exclusive_lock); 391 down_read(&rdev->exclusive_lock);
391 while (work->fence) { 392 if (work->fence) {
392 r = radeon_fence_wait(work->fence, false); 393 r = radeon_fence_wait(work->fence, false);
393 if (r == -EDEADLK) { 394 if (r == -EDEADLK) {
394 up_read(&rdev->exclusive_lock); 395 up_read(&rdev->exclusive_lock);
395 r = radeon_gpu_reset(rdev); 396 r = radeon_gpu_reset(rdev);
396 down_read(&rdev->exclusive_lock); 397 down_read(&rdev->exclusive_lock);
397 } 398 }
399 if (r)
400 DRM_ERROR("failed to wait on page flip fence (%d)!\n", r);
398 401
399 if (r) { 402 /* We continue with the page flip even if we failed to wait on
400 DRM_ERROR("failed to wait on page flip fence (%d)!\n", 403 * the fence, otherwise the DRM core and userspace will be
401 r); 404 * confused about which BO the CRTC is scanning out
402 goto cleanup; 405 */
403 } else 406
404 radeon_fence_unref(&work->fence); 407 radeon_fence_unref(&work->fence);
405 } 408 }
406 409
410 /* We borrow the event spin lock for protecting flip_status */
411 spin_lock_irqsave(&crtc->dev->event_lock, flags);
412
413 /* set the proper interrupt */
414 radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id);
415
416 /* do the flip (mmio) */
417 radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
418
419 radeon_crtc->flip_status = RADEON_FLIP_SUBMITTED;
420 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
421 up_read(&rdev->exclusive_lock);
422}
423
424static int radeon_crtc_page_flip(struct drm_crtc *crtc,
425 struct drm_framebuffer *fb,
426 struct drm_pending_vblank_event *event,
427 uint32_t page_flip_flags)
428{
429 struct drm_device *dev = crtc->dev;
430 struct radeon_device *rdev = dev->dev_private;
431 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
432 struct radeon_framebuffer *old_radeon_fb;
433 struct radeon_framebuffer *new_radeon_fb;
434 struct drm_gem_object *obj;
435 struct radeon_flip_work *work;
436 struct radeon_bo *new_rbo;
437 uint32_t tiling_flags, pitch_pixels;
438 uint64_t base;
439 unsigned long flags;
440 int r;
441
442 work = kzalloc(sizeof *work, GFP_KERNEL);
443 if (work == NULL)
444 return -ENOMEM;
445
446 INIT_WORK(&work->flip_work, radeon_flip_work_func);
447 INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
448
449 work->rdev = rdev;
450 work->crtc_id = radeon_crtc->crtc_id;
451 work->event = event;
452
453 /* schedule unpin of the old buffer */
454 old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
455 obj = old_radeon_fb->obj;
456
457 /* take a reference to the old object */
458 drm_gem_object_reference(obj);
459 work->old_rbo = gem_to_radeon_bo(obj);
460
461 new_radeon_fb = to_radeon_framebuffer(fb);
462 obj = new_radeon_fb->obj;
463 new_rbo = gem_to_radeon_bo(obj);
464
465 spin_lock(&new_rbo->tbo.bdev->fence_lock);
466 if (new_rbo->tbo.sync_obj)
467 work->fence = radeon_fence_ref(new_rbo->tbo.sync_obj);
468 spin_unlock(&new_rbo->tbo.bdev->fence_lock);
469
407 /* pin the new buffer */ 470 /* pin the new buffer */
408 DRM_DEBUG_DRIVER("flip-ioctl() cur_fbo = %p, cur_bbo = %p\n", 471 DRM_DEBUG_DRIVER("flip-ioctl() cur_rbo = %p, new_rbo = %p\n",
409 work->old_rbo, work->new_rbo); 472 work->old_rbo, new_rbo);
410 473
411 r = radeon_bo_reserve(work->new_rbo, false); 474 r = radeon_bo_reserve(new_rbo, false);
412 if (unlikely(r != 0)) { 475 if (unlikely(r != 0)) {
413 DRM_ERROR("failed to reserve new rbo buffer before flip\n"); 476 DRM_ERROR("failed to reserve new rbo buffer before flip\n");
414 goto cleanup; 477 goto cleanup;
415 } 478 }
416 /* Only 27 bit offset for legacy CRTC */ 479 /* Only 27 bit offset for legacy CRTC */
417 r = radeon_bo_pin_restricted(work->new_rbo, RADEON_GEM_DOMAIN_VRAM, 480 r = radeon_bo_pin_restricted(new_rbo, RADEON_GEM_DOMAIN_VRAM,
418 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base); 481 ASIC_IS_AVIVO(rdev) ? 0 : 1 << 27, &base);
419 if (unlikely(r != 0)) { 482 if (unlikely(r != 0)) {
420 radeon_bo_unreserve(work->new_rbo); 483 radeon_bo_unreserve(new_rbo);
421 r = -EINVAL; 484 r = -EINVAL;
422 DRM_ERROR("failed to pin new rbo buffer before flip\n"); 485 DRM_ERROR("failed to pin new rbo buffer before flip\n");
423 goto cleanup; 486 goto cleanup;
424 } 487 }
425 radeon_bo_get_tiling_flags(work->new_rbo, &tiling_flags, NULL); 488 radeon_bo_get_tiling_flags(new_rbo, &tiling_flags, NULL);
426 radeon_bo_unreserve(work->new_rbo); 489 radeon_bo_unreserve(new_rbo);
427 490
428 if (!ASIC_IS_AVIVO(rdev)) { 491 if (!ASIC_IS_AVIVO(rdev)) {
429 /* crtc offset is from display base addr not FB location */ 492 /* crtc offset is from display base addr not FB location */
@@ -460,6 +523,7 @@ static void radeon_flip_work_func(struct work_struct *__work)
460 } 523 }
461 base &= ~7; 524 base &= ~7;
462 } 525 }
526 work->base = base;
463 527
464 r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id); 528 r = drm_vblank_get(crtc->dev, radeon_crtc->crtc_id);
465 if (r) { 529 if (r) {
@@ -470,98 +534,42 @@ static void radeon_flip_work_func(struct work_struct *__work)
470 /* We borrow the event spin lock for protecting flip_work */ 534 /* We borrow the event spin lock for protecting flip_work */
471 spin_lock_irqsave(&crtc->dev->event_lock, flags); 535 spin_lock_irqsave(&crtc->dev->event_lock, flags);
472 536
473 /* set the proper interrupt */ 537 if (radeon_crtc->flip_status != RADEON_FLIP_NONE) {
474 radeon_irq_kms_pflip_irq_get(rdev, radeon_crtc->crtc_id); 538 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
539 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
540 r = -EBUSY;
541 goto vblank_cleanup;
542 }
543 radeon_crtc->flip_status = RADEON_FLIP_PENDING;
544 radeon_crtc->flip_work = work;
475 545
476 /* do the flip (mmio) */ 546 /* update crtc fb */
477 radeon_page_flip(rdev, radeon_crtc->crtc_id, base); 547 crtc->primary->fb = fb;
478 548
479 spin_unlock_irqrestore(&crtc->dev->event_lock, flags); 549 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
480 up_read(&rdev->exclusive_lock);
481 550
482 return; 551 queue_work(radeon_crtc->flip_queue, &work->flip_work);
552 return 0;
553
554vblank_cleanup:
555 drm_vblank_put(crtc->dev, radeon_crtc->crtc_id);
483 556
484pflip_cleanup: 557pflip_cleanup:
485 if (unlikely(radeon_bo_reserve(work->new_rbo, false) != 0)) { 558 if (unlikely(radeon_bo_reserve(new_rbo, false) != 0)) {
486 DRM_ERROR("failed to reserve new rbo in error path\n"); 559 DRM_ERROR("failed to reserve new rbo in error path\n");
487 goto cleanup; 560 goto cleanup;
488 } 561 }
489 if (unlikely(radeon_bo_unpin(work->new_rbo) != 0)) { 562 if (unlikely(radeon_bo_unpin(new_rbo) != 0)) {
490 DRM_ERROR("failed to unpin new rbo in error path\n"); 563 DRM_ERROR("failed to unpin new rbo in error path\n");
491 } 564 }
492 radeon_bo_unreserve(work->new_rbo); 565 radeon_bo_unreserve(new_rbo);
493 566
494cleanup: 567cleanup:
495 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base); 568 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
496 radeon_fence_unref(&work->fence); 569 radeon_fence_unref(&work->fence);
497 kfree(work); 570 kfree(work);
498 up_read(&rdev->exclusive_lock);
499}
500
501static int radeon_crtc_page_flip(struct drm_crtc *crtc,
502 struct drm_framebuffer *fb,
503 struct drm_pending_vblank_event *event,
504 uint32_t page_flip_flags)
505{
506 struct drm_device *dev = crtc->dev;
507 struct radeon_device *rdev = dev->dev_private;
508 struct radeon_crtc *radeon_crtc = to_radeon_crtc(crtc);
509 struct radeon_framebuffer *old_radeon_fb;
510 struct radeon_framebuffer *new_radeon_fb;
511 struct drm_gem_object *obj;
512 struct radeon_flip_work *work;
513 unsigned long flags;
514
515 work = kzalloc(sizeof *work, GFP_KERNEL);
516 if (work == NULL)
517 return -ENOMEM;
518
519 INIT_WORK(&work->flip_work, radeon_flip_work_func);
520 INIT_WORK(&work->unpin_work, radeon_unpin_work_func);
521
522 work->rdev = rdev;
523 work->crtc_id = radeon_crtc->crtc_id;
524 work->fb = fb;
525 work->event = event;
526 571
527 /* schedule unpin of the old buffer */ 572 return r;
528 old_radeon_fb = to_radeon_framebuffer(crtc->primary->fb);
529 obj = old_radeon_fb->obj;
530
531 /* take a reference to the old object */
532 drm_gem_object_reference(obj);
533 work->old_rbo = gem_to_radeon_bo(obj);
534
535 new_radeon_fb = to_radeon_framebuffer(fb);
536 obj = new_radeon_fb->obj;
537 work->new_rbo = gem_to_radeon_bo(obj);
538
539 spin_lock(&work->new_rbo->tbo.bdev->fence_lock);
540 if (work->new_rbo->tbo.sync_obj)
541 work->fence = radeon_fence_ref(work->new_rbo->tbo.sync_obj);
542 spin_unlock(&work->new_rbo->tbo.bdev->fence_lock);
543
544 /* We borrow the event spin lock for protecting flip_work */
545 spin_lock_irqsave(&crtc->dev->event_lock, flags);
546
547 if (radeon_crtc->flip_work) {
548 DRM_DEBUG_DRIVER("flip queue: crtc already busy\n");
549 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
550 drm_gem_object_unreference_unlocked(&work->old_rbo->gem_base);
551 radeon_fence_unref(&work->fence);
552 kfree(work);
553 return -EBUSY;
554 }
555 radeon_crtc->flip_work = work;
556
557 /* update crtc fb */
558 crtc->primary->fb = fb;
559
560 spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
561
562 queue_work(radeon_crtc->flip_queue, &work->flip_work);
563
564 return 0;
565} 573}
566 574
567static int 575static int
@@ -821,6 +829,10 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
821 struct radeon_device *rdev = dev->dev_private; 829 struct radeon_device *rdev = dev->dev_private;
822 int ret = 0; 830 int ret = 0;
823 831
832 /* don't leak the edid if we already fetched it in detect() */
833 if (radeon_connector->edid)
834 goto got_edid;
835
824 /* on hw with routers, select right port */ 836 /* on hw with routers, select right port */
825 if (radeon_connector->router.ddc_valid) 837 if (radeon_connector->router.ddc_valid)
826 radeon_router_select_ddc_port(radeon_connector); 838 radeon_router_select_ddc_port(radeon_connector);
@@ -859,6 +871,7 @@ int radeon_ddc_get_modes(struct radeon_connector *radeon_connector)
859 radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev); 871 radeon_connector->edid = radeon_bios_get_hardcoded_edid(rdev);
860 } 872 }
861 if (radeon_connector->edid) { 873 if (radeon_connector->edid) {
874got_edid:
862 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid); 875 drm_mode_connector_update_edid_property(&radeon_connector->base, radeon_connector->edid);
863 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid); 876 ret = drm_add_edid_modes(&radeon_connector->base, radeon_connector->edid);
864 drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid); 877 drm_edid_to_eld(&radeon_connector->base, radeon_connector->edid);
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 6e3017413386..cb1421369e3a 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -175,6 +175,7 @@ int radeon_runtime_pm = -1;
175int radeon_hard_reset = 0; 175int radeon_hard_reset = 0;
176int radeon_vm_size = 4096; 176int radeon_vm_size = 4096;
177int radeon_vm_block_size = 9; 177int radeon_vm_block_size = 9;
178int radeon_deep_color = 0;
178 179
179MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers"); 180MODULE_PARM_DESC(no_wb, "Disable AGP writeback for scratch registers");
180module_param_named(no_wb, radeon_no_wb, int, 0444); 181module_param_named(no_wb, radeon_no_wb, int, 0444);
@@ -248,6 +249,9 @@ module_param_named(vm_size, radeon_vm_size, int, 0444);
248MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)"); 249MODULE_PARM_DESC(vm_block_size, "VM page table size in bits (default 9)");
249module_param_named(vm_block_size, radeon_vm_block_size, int, 0444); 250module_param_named(vm_block_size, radeon_vm_block_size, int, 0444);
250 251
252MODULE_PARM_DESC(deep_color, "Deep Color support (1 = enable, 0 = disable (default))");
253module_param_named(deep_color, radeon_deep_color, int, 0444);
254
251static struct pci_device_id pciidlist[] = { 255static struct pci_device_id pciidlist[] = {
252 radeon_PCI_IDS 256 radeon_PCI_IDS
253}; 257};
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index ad0e4b8cc7e3..0592ddb0904b 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -46,6 +46,10 @@ struct radeon_device;
46#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base) 46#define to_radeon_encoder(x) container_of(x, struct radeon_encoder, base)
47#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base) 47#define to_radeon_framebuffer(x) container_of(x, struct radeon_framebuffer, base)
48 48
49#define RADEON_MAX_HPD_PINS 7
50#define RADEON_MAX_CRTCS 6
51#define RADEON_MAX_AFMT_BLOCKS 7
52
49enum radeon_rmx_type { 53enum radeon_rmx_type {
50 RMX_OFF, 54 RMX_OFF,
51 RMX_FULL, 55 RMX_FULL,
@@ -233,8 +237,8 @@ struct radeon_mode_info {
233 struct card_info *atom_card_info; 237 struct card_info *atom_card_info;
234 enum radeon_connector_table connector_table; 238 enum radeon_connector_table connector_table;
235 bool mode_config_initialized; 239 bool mode_config_initialized;
236 struct radeon_crtc *crtcs[6]; 240 struct radeon_crtc *crtcs[RADEON_MAX_CRTCS];
237 struct radeon_afmt *afmt[7]; 241 struct radeon_afmt *afmt[RADEON_MAX_AFMT_BLOCKS];
238 /* DVI-I properties */ 242 /* DVI-I properties */
239 struct drm_property *coherent_mode_property; 243 struct drm_property *coherent_mode_property;
240 /* DAC enable load detect */ 244 /* DAC enable load detect */
@@ -302,6 +306,12 @@ struct radeon_atom_ss {
302 uint16_t amount; 306 uint16_t amount;
303}; 307};
304 308
309enum radeon_flip_status {
310 RADEON_FLIP_NONE,
311 RADEON_FLIP_PENDING,
312 RADEON_FLIP_SUBMITTED
313};
314
305struct radeon_crtc { 315struct radeon_crtc {
306 struct drm_crtc base; 316 struct drm_crtc base;
307 int crtc_id; 317 int crtc_id;
@@ -327,6 +337,7 @@ struct radeon_crtc {
327 /* page flipping */ 337 /* page flipping */
328 struct workqueue_struct *flip_queue; 338 struct workqueue_struct *flip_queue;
329 struct radeon_flip_work *flip_work; 339 struct radeon_flip_work *flip_work;
340 enum radeon_flip_status flip_status;
330 /* pll sharing */ 341 /* pll sharing */
331 struct radeon_atom_ss ss; 342 struct radeon_atom_ss ss;
332 bool ss_enabled; 343 bool ss_enabled;
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 12c663e86ca1..e447e390d09a 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -73,8 +73,10 @@ void radeon_pm_acpi_event_handler(struct radeon_device *rdev)
73 rdev->pm.dpm.ac_power = true; 73 rdev->pm.dpm.ac_power = true;
74 else 74 else
75 rdev->pm.dpm.ac_power = false; 75 rdev->pm.dpm.ac_power = false;
76 if (rdev->asic->dpm.enable_bapm) 76 if (rdev->family == CHIP_ARUBA) {
77 radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power); 77 if (rdev->asic->dpm.enable_bapm)
78 radeon_dpm_enable_bapm(rdev, rdev->pm.dpm.ac_power);
79 }
78 mutex_unlock(&rdev->pm.mutex); 80 mutex_unlock(&rdev->pm.mutex);
79 } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) { 81 } else if (rdev->pm.pm_method == PM_METHOD_PROFILE) {
80 if (rdev->pm.profile == PM_PROFILE_AUTO) { 82 if (rdev->pm.profile == PM_PROFILE_AUTO) {
diff --git a/drivers/gpu/drm/radeon/radeon_vm.c b/drivers/gpu/drm/radeon/radeon_vm.c
index 899d9126cad6..eecff6bbd341 100644
--- a/drivers/gpu/drm/radeon/radeon_vm.c
+++ b/drivers/gpu/drm/radeon/radeon_vm.c
@@ -495,7 +495,7 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
495 mutex_unlock(&vm->mutex); 495 mutex_unlock(&vm->mutex);
496 496
497 r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8, 497 r = radeon_bo_create(rdev, RADEON_VM_PTE_COUNT * 8,
498 RADEON_GPU_PAGE_SIZE, false, 498 RADEON_GPU_PAGE_SIZE, true,
499 RADEON_GEM_DOMAIN_VRAM, NULL, &pt); 499 RADEON_GEM_DOMAIN_VRAM, NULL, &pt);
500 if (r) 500 if (r)
501 return r; 501 return r;
@@ -992,7 +992,7 @@ int radeon_vm_init(struct radeon_device *rdev, struct radeon_vm *vm)
992 return -ENOMEM; 992 return -ENOMEM;
993 } 993 }
994 994
995 r = radeon_bo_create(rdev, pd_size, align, false, 995 r = radeon_bo_create(rdev, pd_size, align, true,
996 RADEON_GEM_DOMAIN_VRAM, NULL, 996 RADEON_GEM_DOMAIN_VRAM, NULL,
997 &vm->page_directory); 997 &vm->page_directory);
998 if (r) 998 if (r)
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 237dd29d9f1c..3e21e869015f 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -406,8 +406,9 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
406 for (i = 0; i < rdev->num_crtc; i++) { 406 for (i = 0; i < rdev->num_crtc; i++) {
407 if (save->crtc_enabled[i]) { 407 if (save->crtc_enabled[i]) {
408 tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]); 408 tmp = RREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i]);
409 if ((tmp & 0x3) != 0) { 409 if ((tmp & 0x7) != 3) {
410 tmp &= ~0x3; 410 tmp &= ~0x7;
411 tmp |= 0x3;
411 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp); 412 WREG32(AVIVO_D1MODE_MASTER_UPDATE_MODE + crtc_offsets[i], tmp);
412 } 413 }
413 tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]); 414 tmp = RREG32(AVIVO_D1GRPH_UPDATE + crtc_offsets[i]);
diff --git a/drivers/gpu/drm/radeon/rv770_dpm.c b/drivers/gpu/drm/radeon/rv770_dpm.c
index da041a43d82e..3c76e1dcdf04 100644
--- a/drivers/gpu/drm/radeon/rv770_dpm.c
+++ b/drivers/gpu/drm/radeon/rv770_dpm.c
@@ -2329,12 +2329,6 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
2329 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, 2329 pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
2330 ASIC_INTERNAL_MEMORY_SS, 0); 2330 ASIC_INTERNAL_MEMORY_SS, 0);
2331 2331
2332 /* disable ss, causes hangs on some cayman boards */
2333 if (rdev->family == CHIP_CAYMAN) {
2334 pi->sclk_ss = false;
2335 pi->mclk_ss = false;
2336 }
2337
2338 if (pi->sclk_ss || pi->mclk_ss) 2332 if (pi->sclk_ss || pi->mclk_ss)
2339 pi->dynamic_ss = true; 2333 pi->dynamic_ss = true;
2340 else 2334 else
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 730cee2c34cf..eba0225259a4 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -6376,14 +6376,16 @@ restart_ih:
6376 case 147: 6376 case 147:
6377 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR); 6377 addr = RREG32(VM_CONTEXT1_PROTECTION_FAULT_ADDR);
6378 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS); 6378 status = RREG32(VM_CONTEXT1_PROTECTION_FAULT_STATUS);
6379 /* reset addr and status */
6380 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
6381 if (addr == 0x0 && status == 0x0)
6382 break;
6379 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data); 6383 dev_err(rdev->dev, "GPU fault detected: %d 0x%08x\n", src_id, src_data);
6380 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n", 6384 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n",
6381 addr); 6385 addr);
6382 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n", 6386 dev_err(rdev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n",
6383 status); 6387 status);
6384 si_vm_decode_fault(rdev, status, addr); 6388 si_vm_decode_fault(rdev, status, addr);
6385 /* reset addr and status */
6386 WREG32_P(VM_CONTEXT1_CNTL2, 1, ~1);
6387 break; 6389 break;
6388 case 176: /* RINGID0 CP_INT */ 6390 case 176: /* RINGID0 CP_INT */
6389 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX); 6391 radeon_fence_process(rdev, RADEON_RING_TYPE_GFX_INDEX);
diff --git a/drivers/gpu/drm/radeon/trinity_dpm.c b/drivers/gpu/drm/radeon/trinity_dpm.c
index 2a2822c03329..20da6ff183df 100644
--- a/drivers/gpu/drm/radeon/trinity_dpm.c
+++ b/drivers/gpu/drm/radeon/trinity_dpm.c
@@ -1874,7 +1874,15 @@ int trinity_dpm_init(struct radeon_device *rdev)
1874 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) 1874 for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++)
1875 pi->at[i] = TRINITY_AT_DFLT; 1875 pi->at[i] = TRINITY_AT_DFLT;
1876 1876
1877 pi->enable_bapm = false; 1877 /* There are stability issues reported on latops with
1878 * bapm installed when switching between AC and battery
1879 * power. At the same time, some desktop boards hang
1880 * if it's not enabled and dpm is enabled.
1881 */
1882 if (rdev->flags & RADEON_IS_MOBILITY)
1883 pi->enable_bapm = false;
1884 else
1885 pi->enable_bapm = true;
1878 pi->enable_nbps_policy = true; 1886 pi->enable_nbps_policy = true;
1879 pi->enable_sclk_ds = true; 1887 pi->enable_sclk_ds = true;
1880 pi->enable_gfx_power_gating = true; 1888 pi->enable_gfx_power_gating = true;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
index a89ad938eacf..b031b48dbb3c 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fb.c
@@ -179,7 +179,6 @@ static int vmw_fb_set_par(struct fb_info *info)
179 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset); 179 vmw_write(vmw_priv, SVGA_REG_DISPLAY_POSITION_Y, info->var.yoffset);
180 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres); 180 vmw_write(vmw_priv, SVGA_REG_DISPLAY_WIDTH, info->var.xres);
181 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres); 181 vmw_write(vmw_priv, SVGA_REG_DISPLAY_HEIGHT, info->var.yres);
182 vmw_write(vmw_priv, SVGA_REG_BYTES_PER_LINE, info->fix.line_length);
183 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID); 182 vmw_write(vmw_priv, SVGA_REG_DISPLAY_ID, SVGA_ID_INVALID);
184 } 183 }
185 184
diff --git a/drivers/hid/Kconfig b/drivers/hid/Kconfig
index 800c8b60f7a2..5e79c6ad914f 100644
--- a/drivers/hid/Kconfig
+++ b/drivers/hid/Kconfig
@@ -810,7 +810,7 @@ config HID_ZYDACRON
810 810
811config HID_SENSOR_HUB 811config HID_SENSOR_HUB
812 tristate "HID Sensors framework support" 812 tristate "HID Sensors framework support"
813 depends on HID 813 depends on HID && HAS_IOMEM
814 select MFD_CORE 814 select MFD_CORE
815 default n 815 default n
816 ---help--- 816 ---help---
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 6d00bb9366fa..48b66bbffc94 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -323,6 +323,7 @@
323 323
324#define USB_VENDOR_ID_ETURBOTOUCH 0x22b9 324#define USB_VENDOR_ID_ETURBOTOUCH 0x22b9
325#define USB_DEVICE_ID_ETURBOTOUCH 0x0006 325#define USB_DEVICE_ID_ETURBOTOUCH 0x0006
326#define USB_DEVICE_ID_ETURBOTOUCH_2968 0x2968
326 327
327#define USB_VENDOR_ID_EZKEY 0x0518 328#define USB_VENDOR_ID_EZKEY 0x0518
328#define USB_DEVICE_ID_BTC_8193 0x0002 329#define USB_DEVICE_ID_BTC_8193 0x0002
@@ -715,6 +716,8 @@
715 716
716#define USB_VENDOR_ID_PENMOUNT 0x14e1 717#define USB_VENDOR_ID_PENMOUNT 0x14e1
717#define USB_DEVICE_ID_PENMOUNT_PCI 0x3500 718#define USB_DEVICE_ID_PENMOUNT_PCI 0x3500
719#define USB_DEVICE_ID_PENMOUNT_1610 0x1610
720#define USB_DEVICE_ID_PENMOUNT_1640 0x1640
718 721
719#define USB_VENDOR_ID_PETALYNX 0x18b1 722#define USB_VENDOR_ID_PETALYNX 0x18b1
720#define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037 723#define USB_DEVICE_ID_PETALYNX_MAXTER_REMOTE 0x0037
diff --git a/drivers/hid/hid-rmi.c b/drivers/hid/hid-rmi.c
index 2451c7e5febd..578bbe65902b 100644
--- a/drivers/hid/hid-rmi.c
+++ b/drivers/hid/hid-rmi.c
@@ -428,6 +428,7 @@ static int rmi_raw_event(struct hid_device *hdev,
428 return 0; 428 return 0;
429} 429}
430 430
431#ifdef CONFIG_PM
431static int rmi_post_reset(struct hid_device *hdev) 432static int rmi_post_reset(struct hid_device *hdev)
432{ 433{
433 return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS); 434 return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
@@ -437,6 +438,7 @@ static int rmi_post_resume(struct hid_device *hdev)
437{ 438{
438 return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS); 439 return rmi_set_mode(hdev, RMI_MODE_ATTN_REPORTS);
439} 440}
441#endif /* CONFIG_PM */
440 442
441#define RMI4_MAX_PAGE 0xff 443#define RMI4_MAX_PAGE 0xff
442#define RMI4_PAGE_SIZE 0x0100 444#define RMI4_PAGE_SIZE 0x0100
diff --git a/drivers/hid/hid-sensor-hub.c b/drivers/hid/hid-sensor-hub.c
index a8d5c8faf8cf..e244e449cbba 100644
--- a/drivers/hid/hid-sensor-hub.c
+++ b/drivers/hid/hid-sensor-hub.c
@@ -159,17 +159,18 @@ int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev,
159{ 159{
160 struct hid_sensor_hub_callbacks_list *callback; 160 struct hid_sensor_hub_callbacks_list *callback;
161 struct sensor_hub_data *pdata = hid_get_drvdata(hsdev->hdev); 161 struct sensor_hub_data *pdata = hid_get_drvdata(hsdev->hdev);
162 unsigned long flags;
162 163
163 spin_lock(&pdata->dyn_callback_lock); 164 spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
164 list_for_each_entry(callback, &pdata->dyn_callback_list, list) 165 list_for_each_entry(callback, &pdata->dyn_callback_list, list)
165 if (callback->usage_id == usage_id && 166 if (callback->usage_id == usage_id &&
166 callback->hsdev == hsdev) { 167 callback->hsdev == hsdev) {
167 spin_unlock(&pdata->dyn_callback_lock); 168 spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
168 return -EINVAL; 169 return -EINVAL;
169 } 170 }
170 callback = kzalloc(sizeof(*callback), GFP_ATOMIC); 171 callback = kzalloc(sizeof(*callback), GFP_ATOMIC);
171 if (!callback) { 172 if (!callback) {
172 spin_unlock(&pdata->dyn_callback_lock); 173 spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
173 return -ENOMEM; 174 return -ENOMEM;
174 } 175 }
175 callback->hsdev = hsdev; 176 callback->hsdev = hsdev;
@@ -177,7 +178,7 @@ int sensor_hub_register_callback(struct hid_sensor_hub_device *hsdev,
177 callback->usage_id = usage_id; 178 callback->usage_id = usage_id;
178 callback->priv = NULL; 179 callback->priv = NULL;
179 list_add_tail(&callback->list, &pdata->dyn_callback_list); 180 list_add_tail(&callback->list, &pdata->dyn_callback_list);
180 spin_unlock(&pdata->dyn_callback_lock); 181 spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
181 182
182 return 0; 183 return 0;
183} 184}
@@ -188,8 +189,9 @@ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
188{ 189{
189 struct hid_sensor_hub_callbacks_list *callback; 190 struct hid_sensor_hub_callbacks_list *callback;
190 struct sensor_hub_data *pdata = hid_get_drvdata(hsdev->hdev); 191 struct sensor_hub_data *pdata = hid_get_drvdata(hsdev->hdev);
192 unsigned long flags;
191 193
192 spin_lock(&pdata->dyn_callback_lock); 194 spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
193 list_for_each_entry(callback, &pdata->dyn_callback_list, list) 195 list_for_each_entry(callback, &pdata->dyn_callback_list, list)
194 if (callback->usage_id == usage_id && 196 if (callback->usage_id == usage_id &&
195 callback->hsdev == hsdev) { 197 callback->hsdev == hsdev) {
@@ -197,7 +199,7 @@ int sensor_hub_remove_callback(struct hid_sensor_hub_device *hsdev,
197 kfree(callback); 199 kfree(callback);
198 break; 200 break;
199 } 201 }
200 spin_unlock(&pdata->dyn_callback_lock); 202 spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
201 203
202 return 0; 204 return 0;
203} 205}
@@ -378,15 +380,16 @@ static int sensor_hub_suspend(struct hid_device *hdev, pm_message_t message)
378{ 380{
379 struct sensor_hub_data *pdata = hid_get_drvdata(hdev); 381 struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
380 struct hid_sensor_hub_callbacks_list *callback; 382 struct hid_sensor_hub_callbacks_list *callback;
383 unsigned long flags;
381 384
382 hid_dbg(hdev, " sensor_hub_suspend\n"); 385 hid_dbg(hdev, " sensor_hub_suspend\n");
383 spin_lock(&pdata->dyn_callback_lock); 386 spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
384 list_for_each_entry(callback, &pdata->dyn_callback_list, list) { 387 list_for_each_entry(callback, &pdata->dyn_callback_list, list) {
385 if (callback->usage_callback->suspend) 388 if (callback->usage_callback->suspend)
386 callback->usage_callback->suspend( 389 callback->usage_callback->suspend(
387 callback->hsdev, callback->priv); 390 callback->hsdev, callback->priv);
388 } 391 }
389 spin_unlock(&pdata->dyn_callback_lock); 392 spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
390 393
391 return 0; 394 return 0;
392} 395}
@@ -395,15 +398,16 @@ static int sensor_hub_resume(struct hid_device *hdev)
395{ 398{
396 struct sensor_hub_data *pdata = hid_get_drvdata(hdev); 399 struct sensor_hub_data *pdata = hid_get_drvdata(hdev);
397 struct hid_sensor_hub_callbacks_list *callback; 400 struct hid_sensor_hub_callbacks_list *callback;
401 unsigned long flags;
398 402
399 hid_dbg(hdev, " sensor_hub_resume\n"); 403 hid_dbg(hdev, " sensor_hub_resume\n");
400 spin_lock(&pdata->dyn_callback_lock); 404 spin_lock_irqsave(&pdata->dyn_callback_lock, flags);
401 list_for_each_entry(callback, &pdata->dyn_callback_list, list) { 405 list_for_each_entry(callback, &pdata->dyn_callback_list, list) {
402 if (callback->usage_callback->resume) 406 if (callback->usage_callback->resume)
403 callback->usage_callback->resume( 407 callback->usage_callback->resume(
404 callback->hsdev, callback->priv); 408 callback->hsdev, callback->priv);
405 } 409 }
406 spin_unlock(&pdata->dyn_callback_lock); 410 spin_unlock_irqrestore(&pdata->dyn_callback_lock, flags);
407 411
408 return 0; 412 return 0;
409} 413}
@@ -632,6 +636,7 @@ static int sensor_hub_probe(struct hid_device *hdev,
632 if (name == NULL) { 636 if (name == NULL) {
633 hid_err(hdev, "Failed MFD device name\n"); 637 hid_err(hdev, "Failed MFD device name\n");
634 ret = -ENOMEM; 638 ret = -ENOMEM;
639 kfree(hsdev);
635 goto err_no_mem; 640 goto err_no_mem;
636 } 641 }
637 sd->hid_sensor_hub_client_devs[ 642 sd->hid_sensor_hub_client_devs[
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 59badc10a08c..31e6727cd009 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -49,6 +49,7 @@ static const struct hid_blacklist {
49 49
50 { USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II, HID_QUIRK_MULTI_INPUT }, 50 { USB_VENDOR_ID_EMS, USB_DEVICE_ID_EMS_TRIO_LINKER_PLUS_II, HID_QUIRK_MULTI_INPUT },
51 { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT }, 51 { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH, HID_QUIRK_MULTI_INPUT },
52 { USB_VENDOR_ID_ETURBOTOUCH, USB_DEVICE_ID_ETURBOTOUCH_2968, HID_QUIRK_MULTI_INPUT },
52 { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT }, 53 { USB_VENDOR_ID_GREENASIA, USB_DEVICE_ID_GREENASIA_DUAL_USB_JOYPAD, HID_QUIRK_MULTI_INPUT },
53 { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS }, 54 { USB_VENDOR_ID_PANTHERLORD, USB_DEVICE_ID_PANTHERLORD_TWIN_USB_JOYSTICK, HID_QUIRK_MULTI_INPUT | HID_QUIRK_SKIP_OUTPUT_REPORTS },
54 { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT }, 55 { USB_VENDOR_ID_PLAYDOTCOM, USB_DEVICE_ID_PLAYDOTCOM_EMS_USBII, HID_QUIRK_MULTI_INPUT },
@@ -76,6 +77,8 @@ static const struct hid_blacklist {
76 { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS }, 77 { USB_VENDOR_ID_MSI, USB_DEVICE_ID_MSI_GX680R_LED_PANEL, HID_QUIRK_NO_INIT_REPORTS },
77 { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS }, 78 { USB_VENDOR_ID_NEXIO, USB_DEVICE_ID_NEXIO_MULTITOUCH_PTI0750, HID_QUIRK_NO_INIT_REPORTS },
78 { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS }, 79 { USB_VENDOR_ID_NOVATEK, USB_DEVICE_ID_NOVATEK_MOUSE, HID_QUIRK_NO_INIT_REPORTS },
80 { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1610, HID_QUIRK_NOGET },
81 { USB_VENDOR_ID_PENMOUNT, USB_DEVICE_ID_PENMOUNT_1640, HID_QUIRK_NOGET },
79 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, 82 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS },
80 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, 83 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS },
81 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS }, 84 { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN2, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hv/connection.c b/drivers/hv/connection.c
index e84f4526eb36..ae22e3c1fc4c 100644
--- a/drivers/hv/connection.c
+++ b/drivers/hv/connection.c
@@ -339,9 +339,13 @@ static void process_chn_event(u32 relid)
339 */ 339 */
340 340
341 do { 341 do {
342 hv_begin_read(&channel->inbound); 342 if (read_state)
343 hv_begin_read(&channel->inbound);
343 channel->onchannel_callback(arg); 344 channel->onchannel_callback(arg);
344 bytes_to_read = hv_end_read(&channel->inbound); 345 if (read_state)
346 bytes_to_read = hv_end_read(&channel->inbound);
347 else
348 bytes_to_read = 0;
345 } while (read_state && (bytes_to_read != 0)); 349 } while (read_state && (bytes_to_read != 0));
346 } else { 350 } else {
347 pr_err("no channel callback for relid - %u\n", relid); 351 pr_err("no channel callback for relid - %u\n", relid);
diff --git a/drivers/hv/hv_fcopy.c b/drivers/hv/hv_fcopy.c
index eaaa3d843b80..23b2ce294c4c 100644
--- a/drivers/hv/hv_fcopy.c
+++ b/drivers/hv/hv_fcopy.c
@@ -246,8 +246,8 @@ void hv_fcopy_onchannelcallback(void *context)
246 /* 246 /*
247 * Send the information to the user-level daemon. 247 * Send the information to the user-level daemon.
248 */ 248 */
249 fcopy_send_data();
250 schedule_delayed_work(&fcopy_work, 5*HZ); 249 schedule_delayed_work(&fcopy_work, 5*HZ);
250 fcopy_send_data();
251 return; 251 return;
252 } 252 }
253 icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE; 253 icmsghdr->icflags = ICMSGHDRFLAG_TRANSACTION | ICMSGHDRFLAG_RESPONSE;
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index ea852537307e..521c14625b3a 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -127,6 +127,17 @@ kvp_work_func(struct work_struct *dummy)
127 kvp_respond_to_host(NULL, HV_E_FAIL); 127 kvp_respond_to_host(NULL, HV_E_FAIL);
128} 128}
129 129
130static void poll_channel(struct vmbus_channel *channel)
131{
132 if (channel->target_cpu != smp_processor_id())
133 smp_call_function_single(channel->target_cpu,
134 hv_kvp_onchannelcallback,
135 channel, true);
136 else
137 hv_kvp_onchannelcallback(channel);
138}
139
140
130static int kvp_handle_handshake(struct hv_kvp_msg *msg) 141static int kvp_handle_handshake(struct hv_kvp_msg *msg)
131{ 142{
132 int ret = 1; 143 int ret = 1;
@@ -155,7 +166,7 @@ static int kvp_handle_handshake(struct hv_kvp_msg *msg)
155 kvp_register(dm_reg_value); 166 kvp_register(dm_reg_value);
156 kvp_transaction.active = false; 167 kvp_transaction.active = false;
157 if (kvp_transaction.kvp_context) 168 if (kvp_transaction.kvp_context)
158 hv_kvp_onchannelcallback(kvp_transaction.kvp_context); 169 poll_channel(kvp_transaction.kvp_context);
159 } 170 }
160 return ret; 171 return ret;
161} 172}
@@ -568,7 +579,7 @@ response_done:
568 579
569 vmbus_sendpacket(channel, recv_buffer, buf_len, req_id, 580 vmbus_sendpacket(channel, recv_buffer, buf_len, req_id,
570 VM_PKT_DATA_INBAND, 0); 581 VM_PKT_DATA_INBAND, 0);
571 582 poll_channel(channel);
572} 583}
573 584
574/* 585/*
@@ -603,7 +614,7 @@ void hv_kvp_onchannelcallback(void *context)
603 return; 614 return;
604 } 615 }
605 616
606 vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 2, &recvlen, 617 vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
607 &requestid); 618 &requestid);
608 619
609 if (recvlen > 0) { 620 if (recvlen > 0) {
diff --git a/drivers/hv/hv_util.c b/drivers/hv/hv_util.c
index dd761806f0e8..3b9c9ef0deb8 100644
--- a/drivers/hv/hv_util.c
+++ b/drivers/hv/hv_util.c
@@ -319,7 +319,7 @@ static int util_probe(struct hv_device *dev,
319 (struct hv_util_service *)dev_id->driver_data; 319 (struct hv_util_service *)dev_id->driver_data;
320 int ret; 320 int ret;
321 321
322 srv->recv_buffer = kmalloc(PAGE_SIZE * 2, GFP_KERNEL); 322 srv->recv_buffer = kmalloc(PAGE_SIZE * 4, GFP_KERNEL);
323 if (!srv->recv_buffer) 323 if (!srv->recv_buffer)
324 return -ENOMEM; 324 return -ENOMEM;
325 if (srv->util_init) { 325 if (srv->util_init) {
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 08531a128f53..02d3d85829f3 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -1052,7 +1052,7 @@ config SENSORS_PC87427
1052 will be called pc87427. 1052 will be called pc87427.
1053 1053
1054config SENSORS_NTC_THERMISTOR 1054config SENSORS_NTC_THERMISTOR
1055 tristate "NTC thermistor support" 1055 tristate "NTC thermistor support from Murata"
1056 depends on !OF || IIO=n || IIO 1056 depends on !OF || IIO=n || IIO
1057 help 1057 help
1058 This driver supports NTC thermistors sensor reading and its 1058 This driver supports NTC thermistors sensor reading and its
@@ -1060,7 +1060,8 @@ config SENSORS_NTC_THERMISTOR
1060 send notifications about the temperature. 1060 send notifications about the temperature.
1061 1061
1062 Currently, this driver supports 1062 Currently, this driver supports
1063 NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, and NCP15WL333. 1063 NCP15WB473, NCP18WB473, NCP21WB473, NCP03WB473, and NCP15WL333
1064 from Murata.
1064 1065
1065 This driver can also be built as a module. If so, the module 1066 This driver can also be built as a module. If so, the module
1066 will be called ntc-thermistor. 1067 will be called ntc-thermistor.
@@ -1176,6 +1177,7 @@ config SENSORS_DME1737
1176config SENSORS_EMC1403 1177config SENSORS_EMC1403
1177 tristate "SMSC EMC1403/23 thermal sensor" 1178 tristate "SMSC EMC1403/23 thermal sensor"
1178 depends on I2C 1179 depends on I2C
1180 select REGMAP_I2C
1179 help 1181 help
1180 If you say yes here you get support for the SMSC EMC1403/23 1182 If you say yes here you get support for the SMSC EMC1403/23
1181 temperature monitoring chip. 1183 temperature monitoring chip.
diff --git a/drivers/hwmon/adc128d818.c b/drivers/hwmon/adc128d818.c
index 5ffd81f19d01..0625e50d7a6e 100644
--- a/drivers/hwmon/adc128d818.c
+++ b/drivers/hwmon/adc128d818.c
@@ -239,50 +239,50 @@ static ssize_t adc128_show_alarm(struct device *dev,
239 return sprintf(buf, "%u\n", !!(alarms & mask)); 239 return sprintf(buf, "%u\n", !!(alarms & mask));
240} 240}
241 241
242static SENSOR_DEVICE_ATTR_2(in0_input, S_IWUSR | S_IRUGO, 242static SENSOR_DEVICE_ATTR_2(in0_input, S_IRUGO,
243 adc128_show_in, adc128_set_in, 0, 0); 243 adc128_show_in, NULL, 0, 0);
244static SENSOR_DEVICE_ATTR_2(in0_min, S_IWUSR | S_IRUGO, 244static SENSOR_DEVICE_ATTR_2(in0_min, S_IWUSR | S_IRUGO,
245 adc128_show_in, adc128_set_in, 0, 1); 245 adc128_show_in, adc128_set_in, 0, 1);
246static SENSOR_DEVICE_ATTR_2(in0_max, S_IWUSR | S_IRUGO, 246static SENSOR_DEVICE_ATTR_2(in0_max, S_IWUSR | S_IRUGO,
247 adc128_show_in, adc128_set_in, 0, 2); 247 adc128_show_in, adc128_set_in, 0, 2);
248 248
249static SENSOR_DEVICE_ATTR_2(in1_input, S_IWUSR | S_IRUGO, 249static SENSOR_DEVICE_ATTR_2(in1_input, S_IRUGO,
250 adc128_show_in, adc128_set_in, 1, 0); 250 adc128_show_in, NULL, 1, 0);
251static SENSOR_DEVICE_ATTR_2(in1_min, S_IWUSR | S_IRUGO, 251static SENSOR_DEVICE_ATTR_2(in1_min, S_IWUSR | S_IRUGO,
252 adc128_show_in, adc128_set_in, 1, 1); 252 adc128_show_in, adc128_set_in, 1, 1);
253static SENSOR_DEVICE_ATTR_2(in1_max, S_IWUSR | S_IRUGO, 253static SENSOR_DEVICE_ATTR_2(in1_max, S_IWUSR | S_IRUGO,
254 adc128_show_in, adc128_set_in, 1, 2); 254 adc128_show_in, adc128_set_in, 1, 2);
255 255
256static SENSOR_DEVICE_ATTR_2(in2_input, S_IWUSR | S_IRUGO, 256static SENSOR_DEVICE_ATTR_2(in2_input, S_IRUGO,
257 adc128_show_in, adc128_set_in, 2, 0); 257 adc128_show_in, NULL, 2, 0);
258static SENSOR_DEVICE_ATTR_2(in2_min, S_IWUSR | S_IRUGO, 258static SENSOR_DEVICE_ATTR_2(in2_min, S_IWUSR | S_IRUGO,
259 adc128_show_in, adc128_set_in, 2, 1); 259 adc128_show_in, adc128_set_in, 2, 1);
260static SENSOR_DEVICE_ATTR_2(in2_max, S_IWUSR | S_IRUGO, 260static SENSOR_DEVICE_ATTR_2(in2_max, S_IWUSR | S_IRUGO,
261 adc128_show_in, adc128_set_in, 2, 2); 261 adc128_show_in, adc128_set_in, 2, 2);
262 262
263static SENSOR_DEVICE_ATTR_2(in3_input, S_IWUSR | S_IRUGO, 263static SENSOR_DEVICE_ATTR_2(in3_input, S_IRUGO,
264 adc128_show_in, adc128_set_in, 3, 0); 264 adc128_show_in, NULL, 3, 0);
265static SENSOR_DEVICE_ATTR_2(in3_min, S_IWUSR | S_IRUGO, 265static SENSOR_DEVICE_ATTR_2(in3_min, S_IWUSR | S_IRUGO,
266 adc128_show_in, adc128_set_in, 3, 1); 266 adc128_show_in, adc128_set_in, 3, 1);
267static SENSOR_DEVICE_ATTR_2(in3_max, S_IWUSR | S_IRUGO, 267static SENSOR_DEVICE_ATTR_2(in3_max, S_IWUSR | S_IRUGO,
268 adc128_show_in, adc128_set_in, 3, 2); 268 adc128_show_in, adc128_set_in, 3, 2);
269 269
270static SENSOR_DEVICE_ATTR_2(in4_input, S_IWUSR | S_IRUGO, 270static SENSOR_DEVICE_ATTR_2(in4_input, S_IRUGO,
271 adc128_show_in, adc128_set_in, 4, 0); 271 adc128_show_in, NULL, 4, 0);
272static SENSOR_DEVICE_ATTR_2(in4_min, S_IWUSR | S_IRUGO, 272static SENSOR_DEVICE_ATTR_2(in4_min, S_IWUSR | S_IRUGO,
273 adc128_show_in, adc128_set_in, 4, 1); 273 adc128_show_in, adc128_set_in, 4, 1);
274static SENSOR_DEVICE_ATTR_2(in4_max, S_IWUSR | S_IRUGO, 274static SENSOR_DEVICE_ATTR_2(in4_max, S_IWUSR | S_IRUGO,
275 adc128_show_in, adc128_set_in, 4, 2); 275 adc128_show_in, adc128_set_in, 4, 2);
276 276
277static SENSOR_DEVICE_ATTR_2(in5_input, S_IWUSR | S_IRUGO, 277static SENSOR_DEVICE_ATTR_2(in5_input, S_IRUGO,
278 adc128_show_in, adc128_set_in, 5, 0); 278 adc128_show_in, NULL, 5, 0);
279static SENSOR_DEVICE_ATTR_2(in5_min, S_IWUSR | S_IRUGO, 279static SENSOR_DEVICE_ATTR_2(in5_min, S_IWUSR | S_IRUGO,
280 adc128_show_in, adc128_set_in, 5, 1); 280 adc128_show_in, adc128_set_in, 5, 1);
281static SENSOR_DEVICE_ATTR_2(in5_max, S_IWUSR | S_IRUGO, 281static SENSOR_DEVICE_ATTR_2(in5_max, S_IWUSR | S_IRUGO,
282 adc128_show_in, adc128_set_in, 5, 2); 282 adc128_show_in, adc128_set_in, 5, 2);
283 283
284static SENSOR_DEVICE_ATTR_2(in6_input, S_IWUSR | S_IRUGO, 284static SENSOR_DEVICE_ATTR_2(in6_input, S_IRUGO,
285 adc128_show_in, adc128_set_in, 6, 0); 285 adc128_show_in, NULL, 6, 0);
286static SENSOR_DEVICE_ATTR_2(in6_min, S_IWUSR | S_IRUGO, 286static SENSOR_DEVICE_ATTR_2(in6_min, S_IWUSR | S_IRUGO,
287 adc128_show_in, adc128_set_in, 6, 1); 287 adc128_show_in, adc128_set_in, 6, 1);
288static SENSOR_DEVICE_ATTR_2(in6_max, S_IWUSR | S_IRUGO, 288static SENSOR_DEVICE_ATTR_2(in6_max, S_IWUSR | S_IRUGO,
diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c
index 3eb4281689b5..d74241bb278c 100644
--- a/drivers/hwmon/adm1021.c
+++ b/drivers/hwmon/adm1021.c
@@ -185,7 +185,7 @@ static ssize_t set_temp_max(struct device *dev,
185 struct adm1021_data *data = dev_get_drvdata(dev); 185 struct adm1021_data *data = dev_get_drvdata(dev);
186 struct i2c_client *client = data->client; 186 struct i2c_client *client = data->client;
187 long temp; 187 long temp;
188 int err; 188 int reg_val, err;
189 189
190 err = kstrtol(buf, 10, &temp); 190 err = kstrtol(buf, 10, &temp);
191 if (err) 191 if (err)
@@ -193,10 +193,11 @@ static ssize_t set_temp_max(struct device *dev,
193 temp /= 1000; 193 temp /= 1000;
194 194
195 mutex_lock(&data->update_lock); 195 mutex_lock(&data->update_lock);
196 data->temp_max[index] = clamp_val(temp, -128, 127); 196 reg_val = clamp_val(temp, -128, 127);
197 data->temp_max[index] = reg_val * 1000;
197 if (!read_only) 198 if (!read_only)
198 i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index), 199 i2c_smbus_write_byte_data(client, ADM1021_REG_TOS_W(index),
199 data->temp_max[index]); 200 reg_val);
200 mutex_unlock(&data->update_lock); 201 mutex_unlock(&data->update_lock);
201 202
202 return count; 203 return count;
@@ -210,7 +211,7 @@ static ssize_t set_temp_min(struct device *dev,
210 struct adm1021_data *data = dev_get_drvdata(dev); 211 struct adm1021_data *data = dev_get_drvdata(dev);
211 struct i2c_client *client = data->client; 212 struct i2c_client *client = data->client;
212 long temp; 213 long temp;
213 int err; 214 int reg_val, err;
214 215
215 err = kstrtol(buf, 10, &temp); 216 err = kstrtol(buf, 10, &temp);
216 if (err) 217 if (err)
@@ -218,10 +219,11 @@ static ssize_t set_temp_min(struct device *dev,
218 temp /= 1000; 219 temp /= 1000;
219 220
220 mutex_lock(&data->update_lock); 221 mutex_lock(&data->update_lock);
221 data->temp_min[index] = clamp_val(temp, -128, 127); 222 reg_val = clamp_val(temp, -128, 127);
223 data->temp_min[index] = reg_val * 1000;
222 if (!read_only) 224 if (!read_only)
223 i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index), 225 i2c_smbus_write_byte_data(client, ADM1021_REG_THYST_W(index),
224 data->temp_min[index]); 226 reg_val);
225 mutex_unlock(&data->update_lock); 227 mutex_unlock(&data->update_lock);
226 228
227 return count; 229 return count;
diff --git a/drivers/hwmon/adm1029.c b/drivers/hwmon/adm1029.c
index 78339e880bd6..2804571b269e 100644
--- a/drivers/hwmon/adm1029.c
+++ b/drivers/hwmon/adm1029.c
@@ -232,6 +232,9 @@ static ssize_t set_fan_div(struct device *dev,
232 /* Update the value */ 232 /* Update the value */
233 reg = (reg & 0x3F) | (val << 6); 233 reg = (reg & 0x3F) | (val << 6);
234 234
235 /* Update the cache */
236 data->fan_div[attr->index] = reg;
237
235 /* Write value */ 238 /* Write value */
236 i2c_smbus_write_byte_data(client, 239 i2c_smbus_write_byte_data(client,
237 ADM1029_REG_FAN_DIV[attr->index], reg); 240 ADM1029_REG_FAN_DIV[attr->index], reg);
diff --git a/drivers/hwmon/adm1031.c b/drivers/hwmon/adm1031.c
index a8a540ca8c34..51c1a5a165ab 100644
--- a/drivers/hwmon/adm1031.c
+++ b/drivers/hwmon/adm1031.c
@@ -365,6 +365,7 @@ set_auto_temp_min(struct device *dev, struct device_attribute *attr,
365 if (ret) 365 if (ret)
366 return ret; 366 return ret;
367 367
368 val = clamp_val(val, 0, 127000);
368 mutex_lock(&data->update_lock); 369 mutex_lock(&data->update_lock);
369 data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]); 370 data->auto_temp[nr] = AUTO_TEMP_MIN_TO_REG(val, data->auto_temp[nr]);
370 adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr), 371 adm1031_write_value(client, ADM1031_REG_AUTO_TEMP(nr),
@@ -394,6 +395,7 @@ set_auto_temp_max(struct device *dev, struct device_attribute *attr,
394 if (ret) 395 if (ret)
395 return ret; 396 return ret;
396 397
398 val = clamp_val(val, 0, 127000);
397 mutex_lock(&data->update_lock); 399 mutex_lock(&data->update_lock);
398 data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr], 400 data->temp_max[nr] = AUTO_TEMP_MAX_TO_REG(val, data->auto_temp[nr],
399 data->pwm[nr]); 401 data->pwm[nr]);
@@ -696,7 +698,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *attr,
696 if (ret) 698 if (ret)
697 return ret; 699 return ret;
698 700
699 val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); 701 val = clamp_val(val, -55000, 127000);
700 mutex_lock(&data->update_lock); 702 mutex_lock(&data->update_lock);
701 data->temp_min[nr] = TEMP_TO_REG(val); 703 data->temp_min[nr] = TEMP_TO_REG(val);
702 adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr), 704 adm1031_write_value(client, ADM1031_REG_TEMP_MIN(nr),
@@ -717,7 +719,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *attr,
717 if (ret) 719 if (ret)
718 return ret; 720 return ret;
719 721
720 val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); 722 val = clamp_val(val, -55000, 127000);
721 mutex_lock(&data->update_lock); 723 mutex_lock(&data->update_lock);
722 data->temp_max[nr] = TEMP_TO_REG(val); 724 data->temp_max[nr] = TEMP_TO_REG(val);
723 adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr), 725 adm1031_write_value(client, ADM1031_REG_TEMP_MAX(nr),
@@ -738,7 +740,7 @@ static ssize_t set_temp_crit(struct device *dev, struct device_attribute *attr,
738 if (ret) 740 if (ret)
739 return ret; 741 return ret;
740 742
741 val = clamp_val(val, -55000, nr == 0 ? 127750 : 127875); 743 val = clamp_val(val, -55000, 127000);
742 mutex_lock(&data->update_lock); 744 mutex_lock(&data->update_lock);
743 data->temp_crit[nr] = TEMP_TO_REG(val); 745 data->temp_crit[nr] = TEMP_TO_REG(val);
744 adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr), 746 adm1031_write_value(client, ADM1031_REG_TEMP_CRIT(nr),
diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c
index 0f4dea5ccf17..9ee3913850d6 100644
--- a/drivers/hwmon/adt7470.c
+++ b/drivers/hwmon/adt7470.c
@@ -515,7 +515,7 @@ static ssize_t set_temp_min(struct device *dev,
515 return -EINVAL; 515 return -EINVAL;
516 516
517 temp = DIV_ROUND_CLOSEST(temp, 1000); 517 temp = DIV_ROUND_CLOSEST(temp, 1000);
518 temp = clamp_val(temp, 0, 255); 518 temp = clamp_val(temp, -128, 127);
519 519
520 mutex_lock(&data->lock); 520 mutex_lock(&data->lock);
521 data->temp_min[attr->index] = temp; 521 data->temp_min[attr->index] = temp;
@@ -549,7 +549,7 @@ static ssize_t set_temp_max(struct device *dev,
549 return -EINVAL; 549 return -EINVAL;
550 550
551 temp = DIV_ROUND_CLOSEST(temp, 1000); 551 temp = DIV_ROUND_CLOSEST(temp, 1000);
552 temp = clamp_val(temp, 0, 255); 552 temp = clamp_val(temp, -128, 127);
553 553
554 mutex_lock(&data->lock); 554 mutex_lock(&data->lock);
555 data->temp_max[attr->index] = temp; 555 data->temp_max[attr->index] = temp;
@@ -826,7 +826,7 @@ static ssize_t set_pwm_tmin(struct device *dev,
826 return -EINVAL; 826 return -EINVAL;
827 827
828 temp = DIV_ROUND_CLOSEST(temp, 1000); 828 temp = DIV_ROUND_CLOSEST(temp, 1000);
829 temp = clamp_val(temp, 0, 255); 829 temp = clamp_val(temp, -128, 127);
830 830
831 mutex_lock(&data->lock); 831 mutex_lock(&data->lock);
832 data->pwm_tmin[attr->index] = temp; 832 data->pwm_tmin[attr->index] = temp;
diff --git a/drivers/hwmon/amc6821.c b/drivers/hwmon/amc6821.c
index eea817296513..9f2be3dd28f3 100644
--- a/drivers/hwmon/amc6821.c
+++ b/drivers/hwmon/amc6821.c
@@ -704,7 +704,7 @@ static SENSOR_DEVICE_ATTR(temp1_max_alarm, S_IRUGO,
704 get_temp_alarm, NULL, IDX_TEMP1_MAX); 704 get_temp_alarm, NULL, IDX_TEMP1_MAX);
705static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO, 705static SENSOR_DEVICE_ATTR(temp1_crit_alarm, S_IRUGO,
706 get_temp_alarm, NULL, IDX_TEMP1_CRIT); 706 get_temp_alarm, NULL, IDX_TEMP1_CRIT);
707static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO | S_IWUSR, 707static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO,
708 get_temp, NULL, IDX_TEMP2_INPUT); 708 get_temp, NULL, IDX_TEMP2_INPUT);
709static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp, 709static SENSOR_DEVICE_ATTR(temp2_min, S_IRUGO | S_IWUSR, get_temp,
710 set_temp, IDX_TEMP2_MIN); 710 set_temp, IDX_TEMP2_MIN);
diff --git a/drivers/hwmon/da9052-hwmon.c b/drivers/hwmon/da9052-hwmon.c
index afd31042b452..d14ab3c45daa 100644
--- a/drivers/hwmon/da9052-hwmon.c
+++ b/drivers/hwmon/da9052-hwmon.c
@@ -194,7 +194,7 @@ static ssize_t da9052_hwmon_show_name(struct device *dev,
194 struct device_attribute *devattr, 194 struct device_attribute *devattr,
195 char *buf) 195 char *buf)
196{ 196{
197 return sprintf(buf, "da9052-hwmon\n"); 197 return sprintf(buf, "da9052\n");
198} 198}
199 199
200static ssize_t show_label(struct device *dev, 200static ssize_t show_label(struct device *dev,
diff --git a/drivers/hwmon/da9055-hwmon.c b/drivers/hwmon/da9055-hwmon.c
index 73b3865f1207..35eb7738d711 100644
--- a/drivers/hwmon/da9055-hwmon.c
+++ b/drivers/hwmon/da9055-hwmon.c
@@ -204,7 +204,7 @@ static ssize_t da9055_hwmon_show_name(struct device *dev,
204 struct device_attribute *devattr, 204 struct device_attribute *devattr,
205 char *buf) 205 char *buf)
206{ 206{
207 return sprintf(buf, "da9055-hwmon\n"); 207 return sprintf(buf, "da9055\n");
208} 208}
209 209
210static ssize_t show_label(struct device *dev, 210static ssize_t show_label(struct device *dev,
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index fd892dd48e4c..78002de46cb6 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -250,9 +250,7 @@ static ssize_t set_temp_min(struct device *dev, struct device_attribute *da,
250 if (result < 0) 250 if (result < 0)
251 return result; 251 return result;
252 252
253 val = DIV_ROUND_CLOSEST(val, 1000); 253 val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
254 if ((val < -63) || (val > 127))
255 return -EINVAL;
256 254
257 mutex_lock(&data->update_lock); 255 mutex_lock(&data->update_lock);
258 data->temp_min[nr] = val; 256 data->temp_min[nr] = val;
@@ -274,9 +272,7 @@ static ssize_t set_temp_max(struct device *dev, struct device_attribute *da,
274 if (result < 0) 272 if (result < 0)
275 return result; 273 return result;
276 274
277 val = DIV_ROUND_CLOSEST(val, 1000); 275 val = clamp_val(DIV_ROUND_CLOSEST(val, 1000), -63, 127);
278 if ((val < -63) || (val > 127))
279 return -EINVAL;
280 276
281 mutex_lock(&data->update_lock); 277 mutex_lock(&data->update_lock);
282 data->temp_max[nr] = val; 278 data->temp_max[nr] = val;
@@ -390,15 +386,14 @@ static ssize_t set_fan_target(struct device *dev, struct device_attribute *da,
390{ 386{
391 struct emc2103_data *data = emc2103_update_device(dev); 387 struct emc2103_data *data = emc2103_update_device(dev);
392 struct i2c_client *client = to_i2c_client(dev); 388 struct i2c_client *client = to_i2c_client(dev);
393 long rpm_target; 389 unsigned long rpm_target;
394 390
395 int result = kstrtol(buf, 10, &rpm_target); 391 int result = kstrtoul(buf, 10, &rpm_target);
396 if (result < 0) 392 if (result < 0)
397 return result; 393 return result;
398 394
399 /* Datasheet states 16384 as maximum RPM target (table 3.2) */ 395 /* Datasheet states 16384 as maximum RPM target (table 3.2) */
400 if ((rpm_target < 0) || (rpm_target > 16384)) 396 rpm_target = clamp_val(rpm_target, 0, 16384);
401 return -EINVAL;
402 397
403 mutex_lock(&data->update_lock); 398 mutex_lock(&data->update_lock);
404 399
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index ba35e4d530b5..2566c43dd1e9 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -538,7 +538,7 @@ static int gpio_fan_probe(struct platform_device *pdev)
538 538
539 /* Make this driver part of hwmon class. */ 539 /* Make this driver part of hwmon class. */
540 fan_data->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev, 540 fan_data->hwmon_dev = hwmon_device_register_with_groups(&pdev->dev,
541 "gpio-fan", fan_data, 541 "gpio_fan", fan_data,
542 gpio_fan_groups); 542 gpio_fan_groups);
543 if (IS_ERR(fan_data->hwmon_dev)) 543 if (IS_ERR(fan_data->hwmon_dev))
544 return PTR_ERR(fan_data->hwmon_dev); 544 return PTR_ERR(fan_data->hwmon_dev);
diff --git a/drivers/hwmon/ntc_thermistor.c b/drivers/hwmon/ntc_thermistor.c
index e76feb86a1d4..ae66f42c4d6d 100644
--- a/drivers/hwmon/ntc_thermistor.c
+++ b/drivers/hwmon/ntc_thermistor.c
@@ -163,6 +163,18 @@ static int ntc_adc_iio_read(struct ntc_thermistor_platform_data *pdata)
163} 163}
164 164
165static const struct of_device_id ntc_match[] = { 165static const struct of_device_id ntc_match[] = {
166 { .compatible = "murata,ncp15wb473",
167 .data = &ntc_thermistor_id[0] },
168 { .compatible = "murata,ncp18wb473",
169 .data = &ntc_thermistor_id[1] },
170 { .compatible = "murata,ncp21wb473",
171 .data = &ntc_thermistor_id[2] },
172 { .compatible = "murata,ncp03wb473",
173 .data = &ntc_thermistor_id[3] },
174 { .compatible = "murata,ncp15wl333",
175 .data = &ntc_thermistor_id[4] },
176
177 /* Usage of vendor name "ntc" is deprecated */
166 { .compatible = "ntc,ncp15wb473", 178 { .compatible = "ntc,ncp15wb473",
167 .data = &ntc_thermistor_id[0] }, 179 .data = &ntc_thermistor_id[0] },
168 { .compatible = "ntc,ncp18wb473", 180 { .compatible = "ntc,ncp18wb473",
@@ -500,7 +512,7 @@ static int ntc_thermistor_probe(struct platform_device *pdev)
500 } 512 }
501 513
502 dev_info(&pdev->dev, "Thermistor type: %s successfully probed.\n", 514 dev_info(&pdev->dev, "Thermistor type: %s successfully probed.\n",
503 pdev->name); 515 pdev_id->name);
504 516
505 return 0; 517 return 0;
506err_after_sysfs: 518err_after_sysfs:
@@ -534,7 +546,7 @@ static struct platform_driver ntc_thermistor_driver = {
534 546
535module_platform_driver(ntc_thermistor_driver); 547module_platform_driver(ntc_thermistor_driver);
536 548
537MODULE_DESCRIPTION("NTC Thermistor Driver"); 549MODULE_DESCRIPTION("NTC Thermistor Driver from Murata");
538MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); 550MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
539MODULE_LICENSE("GPL"); 551MODULE_LICENSE("GPL");
540MODULE_ALIAS("platform:ntc-thermistor"); 552MODULE_ALIAS("platform:ntc-thermistor");
diff --git a/drivers/hwmon/w83l786ng.c b/drivers/hwmon/w83l786ng.c
index 6ed76ceb9270..32487c19cbfc 100644
--- a/drivers/hwmon/w83l786ng.c
+++ b/drivers/hwmon/w83l786ng.c
@@ -249,7 +249,7 @@ static ssize_t show_##reg(struct device *dev, struct device_attribute *attr, \
249 int nr = to_sensor_dev_attr(attr)->index; \ 249 int nr = to_sensor_dev_attr(attr)->index; \
250 struct w83l786ng_data *data = w83l786ng_update_device(dev); \ 250 struct w83l786ng_data *data = w83l786ng_update_device(dev); \
251 return sprintf(buf, "%d\n", \ 251 return sprintf(buf, "%d\n", \
252 FAN_FROM_REG(data->fan[nr], DIV_FROM_REG(data->fan_div[nr]))); \ 252 FAN_FROM_REG(data->reg[nr], DIV_FROM_REG(data->fan_div[nr]))); \
253} 253}
254 254
255show_fan_reg(fan); 255show_fan_reg(fan);
diff --git a/drivers/i2c/busses/i2c-sun6i-p2wi.c b/drivers/i2c/busses/i2c-sun6i-p2wi.c
index 09de4fd12d57..4d75d4759709 100644
--- a/drivers/i2c/busses/i2c-sun6i-p2wi.c
+++ b/drivers/i2c/busses/i2c-sun6i-p2wi.c
@@ -22,7 +22,6 @@
22 * 22 *
23 */ 23 */
24#include <linux/clk.h> 24#include <linux/clk.h>
25#include <linux/module.h>
26#include <linux/i2c.h> 25#include <linux/i2c.h>
27#include <linux/io.h> 26#include <linux/io.h>
28#include <linux/interrupt.h> 27#include <linux/interrupt.h>
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index f7f9865b8b89..f6d313e528de 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -40,6 +40,7 @@ config I2C_MUX_PCA9541
40 40
41config I2C_MUX_PCA954x 41config I2C_MUX_PCA954x
42 tristate "Philips PCA954x I2C Mux/switches" 42 tristate "Philips PCA954x I2C Mux/switches"
43 depends on GPIOLIB
43 help 44 help
44 If you say yes here you get support for the Philips PCA954x 45 If you say yes here you get support for the Philips PCA954x
45 I2C mux/switch devices. 46 I2C mux/switch devices.
diff --git a/drivers/iio/accel/hid-sensor-accel-3d.c b/drivers/iio/accel/hid-sensor-accel-3d.c
index 69abf9163df7..54e464e4bb72 100644
--- a/drivers/iio/accel/hid-sensor-accel-3d.c
+++ b/drivers/iio/accel/hid-sensor-accel-3d.c
@@ -110,7 +110,6 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
110 struct accel_3d_state *accel_state = iio_priv(indio_dev); 110 struct accel_3d_state *accel_state = iio_priv(indio_dev);
111 int report_id = -1; 111 int report_id = -1;
112 u32 address; 112 u32 address;
113 int ret;
114 int ret_type; 113 int ret_type;
115 s32 poll_value; 114 s32 poll_value;
116 115
@@ -151,14 +150,12 @@ static int accel_3d_read_raw(struct iio_dev *indio_dev,
151 ret_type = IIO_VAL_INT; 150 ret_type = IIO_VAL_INT;
152 break; 151 break;
153 case IIO_CHAN_INFO_SAMP_FREQ: 152 case IIO_CHAN_INFO_SAMP_FREQ:
154 ret = hid_sensor_read_samp_freq_value( 153 ret_type = hid_sensor_read_samp_freq_value(
155 &accel_state->common_attributes, val, val2); 154 &accel_state->common_attributes, val, val2);
156 ret_type = IIO_VAL_INT_PLUS_MICRO;
157 break; 155 break;
158 case IIO_CHAN_INFO_HYSTERESIS: 156 case IIO_CHAN_INFO_HYSTERESIS:
159 ret = hid_sensor_read_raw_hyst_value( 157 ret_type = hid_sensor_read_raw_hyst_value(
160 &accel_state->common_attributes, val, val2); 158 &accel_state->common_attributes, val, val2);
161 ret_type = IIO_VAL_INT_PLUS_MICRO;
162 break; 159 break;
163 default: 160 default:
164 ret_type = -EINVAL; 161 ret_type = -EINVAL;
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index 17aeea170566..2a5fa9a436e5 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -111,8 +111,14 @@ static const int mma8452_samp_freq[8][2] = {
111 {6, 250000}, {1, 560000} 111 {6, 250000}, {1, 560000}
112}; 112};
113 113
114/*
115 * Hardware has fullscale of -2G, -4G, -8G corresponding to raw value -2048
116 * The userspace interface uses m/s^2 and we declare micro units
117 * So scale factor is given by:
118 * g * N * 1000000 / 2048 for N = 2, 4, 8 and g=9.80665
119 */
114static const int mma8452_scales[3][2] = { 120static const int mma8452_scales[3][2] = {
115 {0, 977}, {0, 1953}, {0, 3906} 121 {0, 9577}, {0, 19154}, {0, 38307}
116}; 122};
117 123
118static ssize_t mma8452_show_samp_freq_avail(struct device *dev, 124static ssize_t mma8452_show_samp_freq_avail(struct device *dev,
diff --git a/drivers/iio/adc/ad799x.c b/drivers/iio/adc/ad799x.c
index 39b4cb48d738..6eba301ee03d 100644
--- a/drivers/iio/adc/ad799x.c
+++ b/drivers/iio/adc/ad799x.c
@@ -427,9 +427,12 @@ static int ad799x_write_event_value(struct iio_dev *indio_dev,
427 int ret; 427 int ret;
428 struct ad799x_state *st = iio_priv(indio_dev); 428 struct ad799x_state *st = iio_priv(indio_dev);
429 429
430 if (val < 0 || val > RES_MASK(chan->scan_type.realbits))
431 return -EINVAL;
432
430 mutex_lock(&indio_dev->mlock); 433 mutex_lock(&indio_dev->mlock);
431 ret = ad799x_i2c_write16(st, ad799x_threshold_reg(chan, dir, info), 434 ret = ad799x_i2c_write16(st, ad799x_threshold_reg(chan, dir, info),
432 val); 435 val << chan->scan_type.shift);
433 mutex_unlock(&indio_dev->mlock); 436 mutex_unlock(&indio_dev->mlock);
434 437
435 return ret; 438 return ret;
@@ -452,7 +455,8 @@ static int ad799x_read_event_value(struct iio_dev *indio_dev,
452 mutex_unlock(&indio_dev->mlock); 455 mutex_unlock(&indio_dev->mlock);
453 if (ret < 0) 456 if (ret < 0)
454 return ret; 457 return ret;
455 *val = valin; 458 *val = (valin >> chan->scan_type.shift) &
459 RES_MASK(chan->scan_type.realbits);
456 460
457 return IIO_VAL_INT; 461 return IIO_VAL_INT;
458} 462}
diff --git a/drivers/iio/adc/ti_am335x_adc.c b/drivers/iio/adc/ti_am335x_adc.c
index a4db3026bec6..d5dc4c6ce86c 100644
--- a/drivers/iio/adc/ti_am335x_adc.c
+++ b/drivers/iio/adc/ti_am335x_adc.c
@@ -374,7 +374,7 @@ static int tiadc_read_raw(struct iio_dev *indio_dev,
374 return -EAGAIN; 374 return -EAGAIN;
375 } 375 }
376 } 376 }
377 map_val = chan->channel + TOTAL_CHANNELS; 377 map_val = adc_dev->channel_step[chan->scan_index];
378 378
379 /* 379 /*
380 * We check the complete FIFO. We programmed just one entry but in case 380 * We check the complete FIFO. We programmed just one entry but in case
diff --git a/drivers/iio/gyro/hid-sensor-gyro-3d.c b/drivers/iio/gyro/hid-sensor-gyro-3d.c
index 40f4e4935d0d..fa034a3dad78 100644
--- a/drivers/iio/gyro/hid-sensor-gyro-3d.c
+++ b/drivers/iio/gyro/hid-sensor-gyro-3d.c
@@ -110,7 +110,6 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
110 struct gyro_3d_state *gyro_state = iio_priv(indio_dev); 110 struct gyro_3d_state *gyro_state = iio_priv(indio_dev);
111 int report_id = -1; 111 int report_id = -1;
112 u32 address; 112 u32 address;
113 int ret;
114 int ret_type; 113 int ret_type;
115 s32 poll_value; 114 s32 poll_value;
116 115
@@ -151,14 +150,12 @@ static int gyro_3d_read_raw(struct iio_dev *indio_dev,
151 ret_type = IIO_VAL_INT; 150 ret_type = IIO_VAL_INT;
152 break; 151 break;
153 case IIO_CHAN_INFO_SAMP_FREQ: 152 case IIO_CHAN_INFO_SAMP_FREQ:
154 ret = hid_sensor_read_samp_freq_value( 153 ret_type = hid_sensor_read_samp_freq_value(
155 &gyro_state->common_attributes, val, val2); 154 &gyro_state->common_attributes, val, val2);
156 ret_type = IIO_VAL_INT_PLUS_MICRO;
157 break; 155 break;
158 case IIO_CHAN_INFO_HYSTERESIS: 156 case IIO_CHAN_INFO_HYSTERESIS:
159 ret = hid_sensor_read_raw_hyst_value( 157 ret_type = hid_sensor_read_raw_hyst_value(
160 &gyro_state->common_attributes, val, val2); 158 &gyro_state->common_attributes, val, val2);
161 ret_type = IIO_VAL_INT_PLUS_MICRO;
162 break; 159 break;
163 default: 160 default:
164 ret_type = -EINVAL; 161 ret_type = -EINVAL;
diff --git a/drivers/iio/industrialio-event.c b/drivers/iio/industrialio-event.c
index 258a973a1fb8..bfbf4d419f41 100644
--- a/drivers/iio/industrialio-event.c
+++ b/drivers/iio/industrialio-event.c
@@ -345,6 +345,9 @@ static int iio_device_add_event(struct iio_dev *indio_dev,
345 &indio_dev->event_interface->dev_attr_list); 345 &indio_dev->event_interface->dev_attr_list);
346 kfree(postfix); 346 kfree(postfix);
347 347
348 if ((ret == -EBUSY) && (shared_by != IIO_SEPARATE))
349 continue;
350
348 if (ret) 351 if (ret)
349 return ret; 352 return ret;
350 353
diff --git a/drivers/iio/inkern.c b/drivers/iio/inkern.c
index d833d55052ea..c7497009d60a 100644
--- a/drivers/iio/inkern.c
+++ b/drivers/iio/inkern.c
@@ -183,7 +183,7 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
183 else if (name && index >= 0) { 183 else if (name && index >= 0) {
184 pr_err("ERROR: could not get IIO channel %s:%s(%i)\n", 184 pr_err("ERROR: could not get IIO channel %s:%s(%i)\n",
185 np->full_name, name ? name : "", index); 185 np->full_name, name ? name : "", index);
186 return chan; 186 return NULL;
187 } 187 }
188 188
189 /* 189 /*
@@ -193,8 +193,9 @@ static struct iio_channel *of_iio_channel_get_by_name(struct device_node *np,
193 */ 193 */
194 np = np->parent; 194 np = np->parent;
195 if (np && !of_get_property(np, "io-channel-ranges", NULL)) 195 if (np && !of_get_property(np, "io-channel-ranges", NULL))
196 break; 196 return NULL;
197 } 197 }
198
198 return chan; 199 return chan;
199} 200}
200 201
@@ -317,6 +318,7 @@ struct iio_channel *iio_channel_get(struct device *dev,
317 if (channel != NULL) 318 if (channel != NULL)
318 return channel; 319 return channel;
319 } 320 }
321
320 return iio_channel_get_sys(name, channel_name); 322 return iio_channel_get_sys(name, channel_name);
321} 323}
322EXPORT_SYMBOL_GPL(iio_channel_get); 324EXPORT_SYMBOL_GPL(iio_channel_get);
diff --git a/drivers/iio/light/hid-sensor-als.c b/drivers/iio/light/hid-sensor-als.c
index f34c94380b41..96e71e103ea7 100644
--- a/drivers/iio/light/hid-sensor-als.c
+++ b/drivers/iio/light/hid-sensor-als.c
@@ -79,7 +79,6 @@ static int als_read_raw(struct iio_dev *indio_dev,
79 struct als_state *als_state = iio_priv(indio_dev); 79 struct als_state *als_state = iio_priv(indio_dev);
80 int report_id = -1; 80 int report_id = -1;
81 u32 address; 81 u32 address;
82 int ret;
83 int ret_type; 82 int ret_type;
84 s32 poll_value; 83 s32 poll_value;
85 84
@@ -129,14 +128,12 @@ static int als_read_raw(struct iio_dev *indio_dev,
129 ret_type = IIO_VAL_INT; 128 ret_type = IIO_VAL_INT;
130 break; 129 break;
131 case IIO_CHAN_INFO_SAMP_FREQ: 130 case IIO_CHAN_INFO_SAMP_FREQ:
132 ret = hid_sensor_read_samp_freq_value( 131 ret_type = hid_sensor_read_samp_freq_value(
133 &als_state->common_attributes, val, val2); 132 &als_state->common_attributes, val, val2);
134 ret_type = IIO_VAL_INT_PLUS_MICRO;
135 break; 133 break;
136 case IIO_CHAN_INFO_HYSTERESIS: 134 case IIO_CHAN_INFO_HYSTERESIS:
137 ret = hid_sensor_read_raw_hyst_value( 135 ret_type = hid_sensor_read_raw_hyst_value(
138 &als_state->common_attributes, val, val2); 136 &als_state->common_attributes, val, val2);
139 ret_type = IIO_VAL_INT_PLUS_MICRO;
140 break; 137 break;
141 default: 138 default:
142 ret_type = -EINVAL; 139 ret_type = -EINVAL;
diff --git a/drivers/iio/light/hid-sensor-prox.c b/drivers/iio/light/hid-sensor-prox.c
index d203ef4d892f..412bae86d6ae 100644
--- a/drivers/iio/light/hid-sensor-prox.c
+++ b/drivers/iio/light/hid-sensor-prox.c
@@ -74,7 +74,6 @@ static int prox_read_raw(struct iio_dev *indio_dev,
74 struct prox_state *prox_state = iio_priv(indio_dev); 74 struct prox_state *prox_state = iio_priv(indio_dev);
75 int report_id = -1; 75 int report_id = -1;
76 u32 address; 76 u32 address;
77 int ret;
78 int ret_type; 77 int ret_type;
79 s32 poll_value; 78 s32 poll_value;
80 79
@@ -125,14 +124,12 @@ static int prox_read_raw(struct iio_dev *indio_dev,
125 ret_type = IIO_VAL_INT; 124 ret_type = IIO_VAL_INT;
126 break; 125 break;
127 case IIO_CHAN_INFO_SAMP_FREQ: 126 case IIO_CHAN_INFO_SAMP_FREQ:
128 ret = hid_sensor_read_samp_freq_value( 127 ret_type = hid_sensor_read_samp_freq_value(
129 &prox_state->common_attributes, val, val2); 128 &prox_state->common_attributes, val, val2);
130 ret_type = IIO_VAL_INT_PLUS_MICRO;
131 break; 129 break;
132 case IIO_CHAN_INFO_HYSTERESIS: 130 case IIO_CHAN_INFO_HYSTERESIS:
133 ret = hid_sensor_read_raw_hyst_value( 131 ret_type = hid_sensor_read_raw_hyst_value(
134 &prox_state->common_attributes, val, val2); 132 &prox_state->common_attributes, val, val2);
135 ret_type = IIO_VAL_INT_PLUS_MICRO;
136 break; 133 break;
137 default: 134 default:
138 ret_type = -EINVAL; 135 ret_type = -EINVAL;
diff --git a/drivers/iio/light/tcs3472.c b/drivers/iio/light/tcs3472.c
index fe063a0a21cd..752569985d1d 100644
--- a/drivers/iio/light/tcs3472.c
+++ b/drivers/iio/light/tcs3472.c
@@ -52,6 +52,7 @@
52 52
53struct tcs3472_data { 53struct tcs3472_data {
54 struct i2c_client *client; 54 struct i2c_client *client;
55 struct mutex lock;
55 u8 enable; 56 u8 enable;
56 u8 control; 57 u8 control;
57 u8 atime; 58 u8 atime;
@@ -116,10 +117,17 @@ static int tcs3472_read_raw(struct iio_dev *indio_dev,
116 117
117 switch (mask) { 118 switch (mask) {
118 case IIO_CHAN_INFO_RAW: 119 case IIO_CHAN_INFO_RAW:
120 if (iio_buffer_enabled(indio_dev))
121 return -EBUSY;
122
123 mutex_lock(&data->lock);
119 ret = tcs3472_req_data(data); 124 ret = tcs3472_req_data(data);
120 if (ret < 0) 125 if (ret < 0) {
126 mutex_unlock(&data->lock);
121 return ret; 127 return ret;
128 }
122 ret = i2c_smbus_read_word_data(data->client, chan->address); 129 ret = i2c_smbus_read_word_data(data->client, chan->address);
130 mutex_unlock(&data->lock);
123 if (ret < 0) 131 if (ret < 0)
124 return ret; 132 return ret;
125 *val = ret; 133 *val = ret;
@@ -255,6 +263,7 @@ static int tcs3472_probe(struct i2c_client *client,
255 data = iio_priv(indio_dev); 263 data = iio_priv(indio_dev);
256 i2c_set_clientdata(client, indio_dev); 264 i2c_set_clientdata(client, indio_dev);
257 data->client = client; 265 data->client = client;
266 mutex_init(&data->lock);
258 267
259 indio_dev->dev.parent = &client->dev; 268 indio_dev->dev.parent = &client->dev;
260 indio_dev->info = &tcs3472_info; 269 indio_dev->info = &tcs3472_info;
diff --git a/drivers/iio/magnetometer/hid-sensor-magn-3d.c b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
index 41cf29e2a371..b2b0937d5133 100644
--- a/drivers/iio/magnetometer/hid-sensor-magn-3d.c
+++ b/drivers/iio/magnetometer/hid-sensor-magn-3d.c
@@ -110,7 +110,6 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
110 struct magn_3d_state *magn_state = iio_priv(indio_dev); 110 struct magn_3d_state *magn_state = iio_priv(indio_dev);
111 int report_id = -1; 111 int report_id = -1;
112 u32 address; 112 u32 address;
113 int ret;
114 int ret_type; 113 int ret_type;
115 s32 poll_value; 114 s32 poll_value;
116 115
@@ -153,14 +152,12 @@ static int magn_3d_read_raw(struct iio_dev *indio_dev,
153 ret_type = IIO_VAL_INT; 152 ret_type = IIO_VAL_INT;
154 break; 153 break;
155 case IIO_CHAN_INFO_SAMP_FREQ: 154 case IIO_CHAN_INFO_SAMP_FREQ:
156 ret = hid_sensor_read_samp_freq_value( 155 ret_type = hid_sensor_read_samp_freq_value(
157 &magn_state->common_attributes, val, val2); 156 &magn_state->common_attributes, val, val2);
158 ret_type = IIO_VAL_INT_PLUS_MICRO;
159 break; 157 break;
160 case IIO_CHAN_INFO_HYSTERESIS: 158 case IIO_CHAN_INFO_HYSTERESIS:
161 ret = hid_sensor_read_raw_hyst_value( 159 ret_type = hid_sensor_read_raw_hyst_value(
162 &magn_state->common_attributes, val, val2); 160 &magn_state->common_attributes, val, val2);
163 ret_type = IIO_VAL_INT_PLUS_MICRO;
164 break; 161 break;
165 default: 162 default:
166 ret_type = -EINVAL; 163 ret_type = -EINVAL;
diff --git a/drivers/iio/pressure/hid-sensor-press.c b/drivers/iio/pressure/hid-sensor-press.c
index 1cd190c73788..2c0d2a4fed8c 100644
--- a/drivers/iio/pressure/hid-sensor-press.c
+++ b/drivers/iio/pressure/hid-sensor-press.c
@@ -78,7 +78,6 @@ static int press_read_raw(struct iio_dev *indio_dev,
78 struct press_state *press_state = iio_priv(indio_dev); 78 struct press_state *press_state = iio_priv(indio_dev);
79 int report_id = -1; 79 int report_id = -1;
80 u32 address; 80 u32 address;
81 int ret;
82 int ret_type; 81 int ret_type;
83 s32 poll_value; 82 s32 poll_value;
84 83
@@ -128,14 +127,12 @@ static int press_read_raw(struct iio_dev *indio_dev,
128 ret_type = IIO_VAL_INT; 127 ret_type = IIO_VAL_INT;
129 break; 128 break;
130 case IIO_CHAN_INFO_SAMP_FREQ: 129 case IIO_CHAN_INFO_SAMP_FREQ:
131 ret = hid_sensor_read_samp_freq_value( 130 ret_type = hid_sensor_read_samp_freq_value(
132 &press_state->common_attributes, val, val2); 131 &press_state->common_attributes, val, val2);
133 ret_type = IIO_VAL_INT_PLUS_MICRO;
134 break; 132 break;
135 case IIO_CHAN_INFO_HYSTERESIS: 133 case IIO_CHAN_INFO_HYSTERESIS:
136 ret = hid_sensor_read_raw_hyst_value( 134 ret_type = hid_sensor_read_raw_hyst_value(
137 &press_state->common_attributes, val, val2); 135 &press_state->common_attributes, val, val2);
138 ret_type = IIO_VAL_INT_PLUS_MICRO;
139 break; 136 break;
140 default: 137 default:
141 ret_type = -EINVAL; 138 ret_type = -EINVAL;
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 5e153f6d4b48..768a0fb67dd6 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -432,8 +432,17 @@ static void arp_failure_discard(void *handle, struct sk_buff *skb)
432 */ 432 */
433static void act_open_req_arp_failure(void *handle, struct sk_buff *skb) 433static void act_open_req_arp_failure(void *handle, struct sk_buff *skb)
434{ 434{
435 struct c4iw_ep *ep = handle;
436
435 printk(KERN_ERR MOD "ARP failure duing connect\n"); 437 printk(KERN_ERR MOD "ARP failure duing connect\n");
436 kfree_skb(skb); 438 kfree_skb(skb);
439 connect_reply_upcall(ep, -EHOSTUNREACH);
440 state_set(&ep->com, DEAD);
441 remove_handle(ep->com.dev, &ep->com.dev->atid_idr, ep->atid);
442 cxgb4_free_atid(ep->com.dev->rdev.lldi.tids, ep->atid);
443 dst_release(ep->dst);
444 cxgb4_l2t_release(ep->l2t);
445 c4iw_put_ep(&ep->com);
437} 446}
438 447
439/* 448/*
@@ -658,7 +667,7 @@ static int send_connect(struct c4iw_ep *ep)
658 opt2 |= T5_OPT_2_VALID; 667 opt2 |= T5_OPT_2_VALID;
659 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE); 668 opt2 |= V_CONG_CNTRL(CONG_ALG_TAHOE);
660 } 669 }
661 t4_set_arp_err_handler(skb, NULL, act_open_req_arp_failure); 670 t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
662 671
663 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { 672 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) {
664 if (ep->com.remote_addr.ss_family == AF_INET) { 673 if (ep->com.remote_addr.ss_family == AF_INET) {
@@ -2180,7 +2189,6 @@ static void reject_cr(struct c4iw_dev *dev, u32 hwtid, struct sk_buff *skb)
2180 PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid); 2189 PDBG("%s c4iw_dev %p tid %u\n", __func__, dev, hwtid);
2181 BUG_ON(skb_cloned(skb)); 2190 BUG_ON(skb_cloned(skb));
2182 skb_trim(skb, sizeof(struct cpl_tid_release)); 2191 skb_trim(skb, sizeof(struct cpl_tid_release));
2183 skb_get(skb);
2184 release_tid(&dev->rdev, hwtid, skb); 2192 release_tid(&dev->rdev, hwtid, skb);
2185 return; 2193 return;
2186} 2194}
@@ -3917,7 +3925,7 @@ int __init c4iw_cm_init(void)
3917 return 0; 3925 return 0;
3918} 3926}
3919 3927
3920void __exit c4iw_cm_term(void) 3928void c4iw_cm_term(void)
3921{ 3929{
3922 WARN_ON(!list_empty(&timeout_list)); 3930 WARN_ON(!list_empty(&timeout_list));
3923 flush_workqueue(workq); 3931 flush_workqueue(workq);
diff --git a/drivers/infiniband/hw/cxgb4/device.c b/drivers/infiniband/hw/cxgb4/device.c
index dd93aadc996e..7db82b24302b 100644
--- a/drivers/infiniband/hw/cxgb4/device.c
+++ b/drivers/infiniband/hw/cxgb4/device.c
@@ -696,6 +696,7 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
696 pr_err(MOD "error allocating status page\n"); 696 pr_err(MOD "error allocating status page\n");
697 goto err4; 697 goto err4;
698 } 698 }
699 rdev->status_page->db_off = 0;
699 return 0; 700 return 0;
700err4: 701err4:
701 c4iw_rqtpool_destroy(rdev); 702 c4iw_rqtpool_destroy(rdev);
@@ -729,7 +730,6 @@ static void c4iw_dealloc(struct uld_ctx *ctx)
729 if (ctx->dev->rdev.oc_mw_kva) 730 if (ctx->dev->rdev.oc_mw_kva)
730 iounmap(ctx->dev->rdev.oc_mw_kva); 731 iounmap(ctx->dev->rdev.oc_mw_kva);
731 ib_dealloc_device(&ctx->dev->ibdev); 732 ib_dealloc_device(&ctx->dev->ibdev);
732 iwpm_exit(RDMA_NL_C4IW);
733 ctx->dev = NULL; 733 ctx->dev = NULL;
734} 734}
735 735
@@ -826,12 +826,6 @@ static struct c4iw_dev *c4iw_alloc(const struct cxgb4_lld_info *infop)
826 setup_debugfs(devp); 826 setup_debugfs(devp);
827 } 827 }
828 828
829 ret = iwpm_init(RDMA_NL_C4IW);
830 if (ret) {
831 pr_err("port mapper initialization failed with %d\n", ret);
832 ib_dealloc_device(&devp->ibdev);
833 return ERR_PTR(ret);
834 }
835 829
836 return devp; 830 return devp;
837} 831}
@@ -1332,6 +1326,15 @@ static int __init c4iw_init_module(void)
1332 pr_err("%s[%u]: Failed to add netlink callback\n" 1326 pr_err("%s[%u]: Failed to add netlink callback\n"
1333 , __func__, __LINE__); 1327 , __func__, __LINE__);
1334 1328
1329 err = iwpm_init(RDMA_NL_C4IW);
1330 if (err) {
1331 pr_err("port mapper initialization failed with %d\n", err);
1332 ibnl_remove_client(RDMA_NL_C4IW);
1333 c4iw_cm_term();
1334 debugfs_remove_recursive(c4iw_debugfs_root);
1335 return err;
1336 }
1337
1335 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info); 1338 cxgb4_register_uld(CXGB4_ULD_RDMA, &c4iw_uld_info);
1336 1339
1337 return 0; 1340 return 0;
@@ -1349,6 +1352,7 @@ static void __exit c4iw_exit_module(void)
1349 } 1352 }
1350 mutex_unlock(&dev_mutex); 1353 mutex_unlock(&dev_mutex);
1351 cxgb4_unregister_uld(CXGB4_ULD_RDMA); 1354 cxgb4_unregister_uld(CXGB4_ULD_RDMA);
1355 iwpm_exit(RDMA_NL_C4IW);
1352 ibnl_remove_client(RDMA_NL_C4IW); 1356 ibnl_remove_client(RDMA_NL_C4IW);
1353 c4iw_cm_term(); 1357 c4iw_cm_term();
1354 debugfs_remove_recursive(c4iw_debugfs_root); 1358 debugfs_remove_recursive(c4iw_debugfs_root);
diff --git a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
index 125bc5d1e175..361fff7a0742 100644
--- a/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
+++ b/drivers/infiniband/hw/cxgb4/iw_cxgb4.h
@@ -908,7 +908,7 @@ int c4iw_destroy_ctrl_qp(struct c4iw_rdev *rdev);
908int c4iw_register_device(struct c4iw_dev *dev); 908int c4iw_register_device(struct c4iw_dev *dev);
909void c4iw_unregister_device(struct c4iw_dev *dev); 909void c4iw_unregister_device(struct c4iw_dev *dev);
910int __init c4iw_cm_init(void); 910int __init c4iw_cm_init(void);
911void __exit c4iw_cm_term(void); 911void c4iw_cm_term(void);
912void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev, 912void c4iw_release_dev_ucontext(struct c4iw_rdev *rdev,
913 struct c4iw_dev_ucontext *uctx); 913 struct c4iw_dev_ucontext *uctx);
914void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev, 914void c4iw_init_dev_ucontext(struct c4iw_rdev *rdev,
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index d13ddf1c0033..bbbcf389272c 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -675,7 +675,7 @@ static int create_kernel_qp(struct mlx5_ib_dev *dev,
675 int err; 675 int err;
676 676
677 uuari = &dev->mdev.priv.uuari; 677 uuari = &dev->mdev.priv.uuari;
678 if (init_attr->create_flags & ~IB_QP_CREATE_SIGNATURE_EN) 678 if (init_attr->create_flags & ~(IB_QP_CREATE_SIGNATURE_EN | IB_QP_CREATE_BLOCK_MULTICAST_LOOPBACK))
679 return -EINVAL; 679 return -EINVAL;
680 680
681 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR) 681 if (init_attr->qp_type == MLX5_IB_QPT_REG_UMR)
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index d4daa05efe60..499b4366a98d 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -45,7 +45,7 @@ struct pri_queue {
45struct pasid_state { 45struct pasid_state {
46 struct list_head list; /* For global state-list */ 46 struct list_head list; /* For global state-list */
47 atomic_t count; /* Reference count */ 47 atomic_t count; /* Reference count */
48 atomic_t mmu_notifier_count; /* Counting nested mmu_notifier 48 unsigned mmu_notifier_count; /* Counting nested mmu_notifier
49 calls */ 49 calls */
50 struct task_struct *task; /* Task bound to this PASID */ 50 struct task_struct *task; /* Task bound to this PASID */
51 struct mm_struct *mm; /* mm_struct for the faults */ 51 struct mm_struct *mm; /* mm_struct for the faults */
@@ -53,7 +53,8 @@ struct pasid_state {
53 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */ 53 struct pri_queue pri[PRI_QUEUE_SIZE]; /* PRI tag states */
54 struct device_state *device_state; /* Link to our device_state */ 54 struct device_state *device_state; /* Link to our device_state */
55 int pasid; /* PASID index */ 55 int pasid; /* PASID index */
56 spinlock_t lock; /* Protect pri_queues */ 56 spinlock_t lock; /* Protect pri_queues and
57 mmu_notifer_count */
57 wait_queue_head_t wq; /* To wait for count == 0 */ 58 wait_queue_head_t wq; /* To wait for count == 0 */
58}; 59};
59 60
@@ -431,15 +432,19 @@ static void mn_invalidate_range_start(struct mmu_notifier *mn,
431{ 432{
432 struct pasid_state *pasid_state; 433 struct pasid_state *pasid_state;
433 struct device_state *dev_state; 434 struct device_state *dev_state;
435 unsigned long flags;
434 436
435 pasid_state = mn_to_state(mn); 437 pasid_state = mn_to_state(mn);
436 dev_state = pasid_state->device_state; 438 dev_state = pasid_state->device_state;
437 439
438 if (atomic_add_return(1, &pasid_state->mmu_notifier_count) == 1) { 440 spin_lock_irqsave(&pasid_state->lock, flags);
441 if (pasid_state->mmu_notifier_count == 0) {
439 amd_iommu_domain_set_gcr3(dev_state->domain, 442 amd_iommu_domain_set_gcr3(dev_state->domain,
440 pasid_state->pasid, 443 pasid_state->pasid,
441 __pa(empty_page_table)); 444 __pa(empty_page_table));
442 } 445 }
446 pasid_state->mmu_notifier_count += 1;
447 spin_unlock_irqrestore(&pasid_state->lock, flags);
443} 448}
444 449
445static void mn_invalidate_range_end(struct mmu_notifier *mn, 450static void mn_invalidate_range_end(struct mmu_notifier *mn,
@@ -448,15 +453,19 @@ static void mn_invalidate_range_end(struct mmu_notifier *mn,
448{ 453{
449 struct pasid_state *pasid_state; 454 struct pasid_state *pasid_state;
450 struct device_state *dev_state; 455 struct device_state *dev_state;
456 unsigned long flags;
451 457
452 pasid_state = mn_to_state(mn); 458 pasid_state = mn_to_state(mn);
453 dev_state = pasid_state->device_state; 459 dev_state = pasid_state->device_state;
454 460
455 if (atomic_dec_and_test(&pasid_state->mmu_notifier_count)) { 461 spin_lock_irqsave(&pasid_state->lock, flags);
462 pasid_state->mmu_notifier_count -= 1;
463 if (pasid_state->mmu_notifier_count == 0) {
456 amd_iommu_domain_set_gcr3(dev_state->domain, 464 amd_iommu_domain_set_gcr3(dev_state->domain,
457 pasid_state->pasid, 465 pasid_state->pasid,
458 __pa(pasid_state->mm->pgd)); 466 __pa(pasid_state->mm->pgd));
459 } 467 }
468 spin_unlock_irqrestore(&pasid_state->lock, flags);
460} 469}
461 470
462static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm) 471static void mn_release(struct mmu_notifier *mn, struct mm_struct *mm)
@@ -650,7 +659,6 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
650 goto out; 659 goto out;
651 660
652 atomic_set(&pasid_state->count, 1); 661 atomic_set(&pasid_state->count, 1);
653 atomic_set(&pasid_state->mmu_notifier_count, 0);
654 init_waitqueue_head(&pasid_state->wq); 662 init_waitqueue_head(&pasid_state->wq);
655 spin_lock_init(&pasid_state->lock); 663 spin_lock_init(&pasid_state->lock);
656 664
diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c
index b99dd88e31b9..bb446d742a2d 100644
--- a/drivers/iommu/fsl_pamu.c
+++ b/drivers/iommu/fsl_pamu.c
@@ -170,10 +170,10 @@ int pamu_disable_liodn(int liodn)
170static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size) 170static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size)
171{ 171{
172 /* Bug if not a power of 2 */ 172 /* Bug if not a power of 2 */
173 BUG_ON(!is_power_of_2(addrspace_size)); 173 BUG_ON((addrspace_size & (addrspace_size - 1)));
174 174
175 /* window size is 2^(WSE+1) bytes */ 175 /* window size is 2^(WSE+1) bytes */
176 return __ffs(addrspace_size) - 1; 176 return fls64(addrspace_size) - 2;
177} 177}
178 178
179/* Derive the PAACE window count encoding for the subwindow count */ 179/* Derive the PAACE window count encoding for the subwindow count */
@@ -351,7 +351,7 @@ int pamu_config_ppaace(int liodn, phys_addr_t win_addr, phys_addr_t win_size,
351 struct paace *ppaace; 351 struct paace *ppaace;
352 unsigned long fspi; 352 unsigned long fspi;
353 353
354 if (!is_power_of_2(win_size) || win_size < PAMU_PAGE_SIZE) { 354 if ((win_size & (win_size - 1)) || win_size < PAMU_PAGE_SIZE) {
355 pr_debug("window size too small or not a power of two %llx\n", win_size); 355 pr_debug("window size too small or not a power of two %llx\n", win_size);
356 return -EINVAL; 356 return -EINVAL;
357 } 357 }
@@ -464,7 +464,7 @@ int pamu_config_spaace(int liodn, u32 subwin_cnt, u32 subwin,
464 return -ENOENT; 464 return -ENOENT;
465 } 465 }
466 466
467 if (!is_power_of_2(subwin_size) || subwin_size < PAMU_PAGE_SIZE) { 467 if ((subwin_size & (subwin_size - 1)) || subwin_size < PAMU_PAGE_SIZE) {
468 pr_debug("subwindow size out of range, or not a power of 2\n"); 468 pr_debug("subwindow size out of range, or not a power of 2\n");
469 return -EINVAL; 469 return -EINVAL;
470 } 470 }
diff --git a/drivers/iommu/fsl_pamu_domain.c b/drivers/iommu/fsl_pamu_domain.c
index 93072ba44b1d..af47648301a9 100644
--- a/drivers/iommu/fsl_pamu_domain.c
+++ b/drivers/iommu/fsl_pamu_domain.c
@@ -301,7 +301,7 @@ static int check_size(u64 size, dma_addr_t iova)
301 * Size must be a power of two and at least be equal 301 * Size must be a power of two and at least be equal
302 * to PAMU page size. 302 * to PAMU page size.
303 */ 303 */
304 if (!is_power_of_2(size) || size < PAMU_PAGE_SIZE) { 304 if ((size & (size - 1)) || size < PAMU_PAGE_SIZE) {
305 pr_debug("%s: size too small or not a power of two\n", __func__); 305 pr_debug("%s: size too small or not a power of two\n", __func__);
306 return -EINVAL; 306 return -EINVAL;
307 } 307 }
@@ -335,11 +335,6 @@ static struct fsl_dma_domain *iommu_alloc_dma_domain(void)
335 return domain; 335 return domain;
336} 336}
337 337
338static inline struct device_domain_info *find_domain(struct device *dev)
339{
340 return dev->archdata.iommu_domain;
341}
342
343static void remove_device_ref(struct device_domain_info *info, u32 win_cnt) 338static void remove_device_ref(struct device_domain_info *info, u32 win_cnt)
344{ 339{
345 unsigned long flags; 340 unsigned long flags;
@@ -380,7 +375,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
380 * Check here if the device is already attached to domain or not. 375 * Check here if the device is already attached to domain or not.
381 * If the device is already attached to a domain detach it. 376 * If the device is already attached to a domain detach it.
382 */ 377 */
383 old_domain_info = find_domain(dev); 378 old_domain_info = dev->archdata.iommu_domain;
384 if (old_domain_info && old_domain_info->domain != dma_domain) { 379 if (old_domain_info && old_domain_info->domain != dma_domain) {
385 spin_unlock_irqrestore(&device_domain_lock, flags); 380 spin_unlock_irqrestore(&device_domain_lock, flags);
386 detach_device(dev, old_domain_info->domain); 381 detach_device(dev, old_domain_info->domain);
@@ -399,7 +394,7 @@ static void attach_device(struct fsl_dma_domain *dma_domain, int liodn, struct d
399 * the info for the first LIODN as all 394 * the info for the first LIODN as all
400 * LIODNs share the same domain 395 * LIODNs share the same domain
401 */ 396 */
402 if (!old_domain_info) 397 if (!dev->archdata.iommu_domain)
403 dev->archdata.iommu_domain = info; 398 dev->archdata.iommu_domain = info;
404 spin_unlock_irqrestore(&device_domain_lock, flags); 399 spin_unlock_irqrestore(&device_domain_lock, flags);
405 400
@@ -1042,12 +1037,15 @@ root_bus:
1042 group = get_shared_pci_device_group(pdev); 1037 group = get_shared_pci_device_group(pdev);
1043 } 1038 }
1044 1039
1040 if (!group)
1041 group = ERR_PTR(-ENODEV);
1042
1045 return group; 1043 return group;
1046} 1044}
1047 1045
1048static int fsl_pamu_add_device(struct device *dev) 1046static int fsl_pamu_add_device(struct device *dev)
1049{ 1047{
1050 struct iommu_group *group = NULL; 1048 struct iommu_group *group = ERR_PTR(-ENODEV);
1051 struct pci_dev *pdev; 1049 struct pci_dev *pdev;
1052 const u32 *prop; 1050 const u32 *prop;
1053 int ret, len; 1051 int ret, len;
@@ -1070,7 +1068,7 @@ static int fsl_pamu_add_device(struct device *dev)
1070 group = get_device_iommu_group(dev); 1068 group = get_device_iommu_group(dev);
1071 } 1069 }
1072 1070
1073 if (!group || IS_ERR(group)) 1071 if (IS_ERR(group))
1074 return PTR_ERR(group); 1072 return PTR_ERR(group);
1075 1073
1076 ret = iommu_group_add_device(group, dev); 1074 ret = iommu_group_add_device(group, dev);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index 6bb32773c3ac..51b6b77dc3e5 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -3816,14 +3816,11 @@ int dmar_iommu_notify_scope_dev(struct dmar_pci_notify_info *info)
3816 ((void *)rmrr) + rmrr->header.length, 3816 ((void *)rmrr) + rmrr->header.length,
3817 rmrr->segment, rmrru->devices, 3817 rmrr->segment, rmrru->devices,
3818 rmrru->devices_cnt); 3818 rmrru->devices_cnt);
3819 if (ret > 0) 3819 if(ret < 0)
3820 break;
3821 else if(ret < 0)
3822 return ret; 3820 return ret;
3823 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) { 3821 } else if (info->event == BUS_NOTIFY_DEL_DEVICE) {
3824 if (dmar_remove_dev_scope(info, rmrr->segment, 3822 dmar_remove_dev_scope(info, rmrr->segment,
3825 rmrru->devices, rmrru->devices_cnt)) 3823 rmrru->devices, rmrru->devices_cnt);
3826 break;
3827 } 3824 }
3828 } 3825 }
3829 3826
diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c
index c887e6eebc41..574aba0eba4e 100644
--- a/drivers/irqchip/irq-armada-370-xp.c
+++ b/drivers/irqchip/irq-armada-370-xp.c
@@ -334,6 +334,15 @@ static void armada_mpic_send_doorbell(const struct cpumask *mask,
334 334
335static void armada_xp_mpic_smp_cpu_init(void) 335static void armada_xp_mpic_smp_cpu_init(void)
336{ 336{
337 u32 control;
338 int nr_irqs, i;
339
340 control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
341 nr_irqs = (control >> 2) & 0x3ff;
342
343 for (i = 0; i < nr_irqs; i++)
344 writel(i, per_cpu_int_base + ARMADA_370_XP_INT_SET_MASK_OFFS);
345
337 /* Clear pending IPIs */ 346 /* Clear pending IPIs */
338 writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS); 347 writel(0, per_cpu_int_base + ARMADA_370_XP_IN_DRBEL_CAUSE_OFFS);
339 348
@@ -474,7 +483,7 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
474 struct device_node *parent) 483 struct device_node *parent)
475{ 484{
476 struct resource main_int_res, per_cpu_int_res; 485 struct resource main_int_res, per_cpu_int_res;
477 int parent_irq; 486 int parent_irq, nr_irqs, i;
478 u32 control; 487 u32 control;
479 488
480 BUG_ON(of_address_to_resource(node, 0, &main_int_res)); 489 BUG_ON(of_address_to_resource(node, 0, &main_int_res));
@@ -496,9 +505,13 @@ static int __init armada_370_xp_mpic_of_init(struct device_node *node,
496 BUG_ON(!per_cpu_int_base); 505 BUG_ON(!per_cpu_int_base);
497 506
498 control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL); 507 control = readl(main_int_base + ARMADA_370_XP_INT_CONTROL);
508 nr_irqs = (control >> 2) & 0x3ff;
509
510 for (i = 0; i < nr_irqs; i++)
511 writel(i, main_int_base + ARMADA_370_XP_INT_CLEAR_ENABLE_OFFS);
499 512
500 armada_370_xp_mpic_domain = 513 armada_370_xp_mpic_domain =
501 irq_domain_add_linear(node, (control >> 2) & 0x3ff, 514 irq_domain_add_linear(node, nr_irqs,
502 &armada_370_xp_mpic_irq_ops, NULL); 515 &armada_370_xp_mpic_irq_ops, NULL);
503 516
504 BUG_ON(!armada_370_xp_mpic_domain); 517 BUG_ON(!armada_370_xp_mpic_domain);
diff --git a/drivers/irqchip/irq-brcmstb-l2.c b/drivers/irqchip/irq-brcmstb-l2.c
index 8ee2a36d5840..c15c840987d2 100644
--- a/drivers/irqchip/irq-brcmstb-l2.c
+++ b/drivers/irqchip/irq-brcmstb-l2.c
@@ -150,7 +150,7 @@ int __init brcmstb_l2_intc_of_init(struct device_node *np,
150 150
151 /* Allocate a single Generic IRQ chip for this node */ 151 /* Allocate a single Generic IRQ chip for this node */
152 ret = irq_alloc_domain_generic_chips(data->domain, 32, 1, 152 ret = irq_alloc_domain_generic_chips(data->domain, 32, 1,
153 np->full_name, handle_level_irq, clr, 0, 0); 153 np->full_name, handle_edge_irq, clr, 0, 0);
154 if (ret) { 154 if (ret) {
155 pr_err("failed to allocate generic irq chip\n"); 155 pr_err("failed to allocate generic irq chip\n");
156 goto out_free_domain; 156 goto out_free_domain;
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 7e11c9d6ae8c..7c131cf7cc13 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -42,6 +42,7 @@
42#include <linux/irqchip/chained_irq.h> 42#include <linux/irqchip/chained_irq.h>
43#include <linux/irqchip/arm-gic.h> 43#include <linux/irqchip/arm-gic.h>
44 44
45#include <asm/cputype.h>
45#include <asm/irq.h> 46#include <asm/irq.h>
46#include <asm/exception.h> 47#include <asm/exception.h>
47#include <asm/smp_plat.h> 48#include <asm/smp_plat.h>
@@ -954,7 +955,9 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
954 } 955 }
955 956
956 for_each_possible_cpu(cpu) { 957 for_each_possible_cpu(cpu) {
957 unsigned long offset = percpu_offset * cpu_logical_map(cpu); 958 u32 mpidr = cpu_logical_map(cpu);
959 u32 core_id = MPIDR_AFFINITY_LEVEL(mpidr, 0);
960 unsigned long offset = percpu_offset * core_id;
958 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset; 961 *per_cpu_ptr(gic->dist_base.percpu_base, cpu) = dist_base + offset;
959 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset; 962 *per_cpu_ptr(gic->cpu_base.percpu_base, cpu) = cpu_base + offset;
960 } 963 }
@@ -1071,8 +1074,10 @@ gic_of_init(struct device_node *node, struct device_node *parent)
1071 gic_cnt++; 1074 gic_cnt++;
1072 return 0; 1075 return 0;
1073} 1076}
1077IRQCHIP_DECLARE(gic_400, "arm,gic-400", gic_of_init);
1074IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init); 1078IRQCHIP_DECLARE(cortex_a15_gic, "arm,cortex-a15-gic", gic_of_init);
1075IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init); 1079IRQCHIP_DECLARE(cortex_a9_gic, "arm,cortex-a9-gic", gic_of_init);
1080IRQCHIP_DECLARE(cortex_a7_gic, "arm,cortex-a7-gic", gic_of_init);
1076IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init); 1081IRQCHIP_DECLARE(msm_8660_qgic, "qcom,msm-8660-qgic", gic_of_init);
1077IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init); 1082IRQCHIP_DECLARE(msm_qgic2, "qcom,msm-qgic2", gic_of_init);
1078 1083
diff --git a/drivers/irqchip/spear-shirq.c b/drivers/irqchip/spear-shirq.c
index 3fdda3a40269..6ce6bd3441bf 100644
--- a/drivers/irqchip/spear-shirq.c
+++ b/drivers/irqchip/spear-shirq.c
@@ -125,7 +125,7 @@ static struct spear_shirq spear320_shirq_ras2 = {
125}; 125};
126 126
127static struct spear_shirq spear320_shirq_ras3 = { 127static struct spear_shirq spear320_shirq_ras3 = {
128 .irq_nr = 3, 128 .irq_nr = 7,
129 .irq_bit_off = 0, 129 .irq_bit_off = 0,
130 .invalid_irq = 1, 130 .invalid_irq = 1,
131 .regs = { 131 .regs = {
diff --git a/drivers/isdn/hisax/Kconfig b/drivers/isdn/hisax/Kconfig
index d9edcc94c2a8..97465ac5a2d5 100644
--- a/drivers/isdn/hisax/Kconfig
+++ b/drivers/isdn/hisax/Kconfig
@@ -16,7 +16,7 @@ config ISDN_DRV_HISAX
16 also to the configuration option of the driver for your particular 16 also to the configuration option of the driver for your particular
17 card, below. 17 card, below.
18 18
19if ISDN_DRV_HISAX!=n 19if ISDN_DRV_HISAX
20 20
21comment "D-channel protocol features" 21comment "D-channel protocol features"
22 22
@@ -348,10 +348,6 @@ config HISAX_ENTERNOW_PCI
348 This enables HiSax support for the Formula-n enter:now PCI 348 This enables HiSax support for the Formula-n enter:now PCI
349 ISDN card. 349 ISDN card.
350 350
351endif
352
353if ISDN_DRV_HISAX
354
355config HISAX_DEBUG 351config HISAX_DEBUG
356 bool "HiSax debugging" 352 bool "HiSax debugging"
357 help 353 help
@@ -420,11 +416,6 @@ config HISAX_FRITZ_PCIPNP
420 (the latter also needs you to select "ISA Plug and Play support" 416 (the latter also needs you to select "ISA Plug and Play support"
421 from the menu "Plug and Play configuration") 417 from the menu "Plug and Play configuration")
422 418
423config HISAX_AVM_A1_PCMCIA
424 bool
425 depends on HISAX_AVM_A1_CS
426 default y
427
428endif 419endif
429 420
430endmenu 421endmenu
diff --git a/drivers/isdn/hisax/l3ni1.c b/drivers/isdn/hisax/l3ni1.c
index 0df6691d045c..8dc791bfaa6f 100644
--- a/drivers/isdn/hisax/l3ni1.c
+++ b/drivers/isdn/hisax/l3ni1.c
@@ -2059,13 +2059,17 @@ static int l3ni1_cmd_global(struct PStack *st, isdn_ctrl *ic)
2059 memcpy(p, ic->parm.ni1_io.data, ic->parm.ni1_io.datalen); /* copy data */ 2059 memcpy(p, ic->parm.ni1_io.data, ic->parm.ni1_io.datalen); /* copy data */
2060 l = (p - temp) + ic->parm.ni1_io.datalen; /* total length */ 2060 l = (p - temp) + ic->parm.ni1_io.datalen; /* total length */
2061 2061
2062 if (ic->parm.ni1_io.timeout > 0) 2062 if (ic->parm.ni1_io.timeout > 0) {
2063 if (!(pc = ni1_new_l3_process(st, -1))) 2063 pc = ni1_new_l3_process(st, -1);
2064 { free_invoke_id(st, id); 2064 if (!pc) {
2065 free_invoke_id(st, id);
2065 return (-2); 2066 return (-2);
2066 } 2067 }
2067 pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id; /* remember id */ 2068 /* remember id */
2068 pc->prot.ni1.proc = ic->parm.ni1_io.proc; /* and procedure */ 2069 pc->prot.ni1.ll_id = ic->parm.ni1_io.ll_id;
2070 /* and procedure */
2071 pc->prot.ni1.proc = ic->parm.ni1_io.proc;
2072 }
2069 2073
2070 if (!(skb = l3_alloc_skb(l))) 2074 if (!(skb = l3_alloc_skb(l)))
2071 { free_invoke_id(st, id); 2075 { free_invoke_id(st, id);
diff --git a/drivers/isdn/i4l/isdn_ppp.c b/drivers/isdn/i4l/isdn_ppp.c
index 61ac63237446..a333b7f798d1 100644
--- a/drivers/isdn/i4l/isdn_ppp.c
+++ b/drivers/isdn/i4l/isdn_ppp.c
@@ -442,7 +442,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
442{ 442{
443 struct sock_fprog uprog; 443 struct sock_fprog uprog;
444 struct sock_filter *code = NULL; 444 struct sock_filter *code = NULL;
445 int len, err; 445 int len;
446 446
447 if (copy_from_user(&uprog, arg, sizeof(uprog))) 447 if (copy_from_user(&uprog, arg, sizeof(uprog)))
448 return -EFAULT; 448 return -EFAULT;
@@ -458,12 +458,6 @@ static int get_filter(void __user *arg, struct sock_filter **p)
458 if (IS_ERR(code)) 458 if (IS_ERR(code))
459 return PTR_ERR(code); 459 return PTR_ERR(code);
460 460
461 err = sk_chk_filter(code, uprog.len);
462 if (err) {
463 kfree(code);
464 return err;
465 }
466
467 *p = code; 461 *p = code;
468 return uprog.len; 462 return uprog.len;
469} 463}
diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c
index 23b4a3b28dbc..4eab93aa570b 100644
--- a/drivers/macintosh/smu.c
+++ b/drivers/macintosh/smu.c
@@ -1257,7 +1257,8 @@ static unsigned int smu_fpoll(struct file *file, poll_table *wait)
1257 if (pp->busy && pp->cmd.status != 1) 1257 if (pp->busy && pp->cmd.status != 1)
1258 mask |= POLLIN; 1258 mask |= POLLIN;
1259 spin_unlock_irqrestore(&pp->lock, flags); 1259 spin_unlock_irqrestore(&pp->lock, flags);
1260 } if (pp->mode == smu_file_events) { 1260 }
1261 if (pp->mode == smu_file_events) {
1261 /* Not yet implemented */ 1262 /* Not yet implemented */
1262 } 1263 }
1263 return mask; 1264 return mask;
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index 4ead4ba60656..d2899e7eb3aa 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -425,6 +425,15 @@ static int __open_metadata(struct dm_cache_metadata *cmd)
425 425
426 disk_super = dm_block_data(sblock); 426 disk_super = dm_block_data(sblock);
427 427
428 /* Verify the data block size hasn't changed */
429 if (le32_to_cpu(disk_super->data_block_size) != cmd->data_block_size) {
430 DMERR("changing the data block size (from %u to %llu) is not supported",
431 le32_to_cpu(disk_super->data_block_size),
432 (unsigned long long)cmd->data_block_size);
433 r = -EINVAL;
434 goto bad;
435 }
436
428 r = __check_incompat_features(disk_super, cmd); 437 r = __check_incompat_features(disk_super, cmd);
429 if (r < 0) 438 if (r < 0)
430 goto bad; 439 goto bad;
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 53b213226c01..4cba2d808afb 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de> 2 * Copyright (C) 2003 Jana Saout <jana@saout.de>
3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org> 3 * Copyright (C) 2004 Clemens Fruhwirth <clemens@endorphin.org>
4 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved. 4 * Copyright (C) 2006-2009 Red Hat, Inc. All rights reserved.
5 * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com> 5 * Copyright (C) 2013 Milan Broz <gmazyland@gmail.com>
@@ -1996,6 +1996,6 @@ static void __exit dm_crypt_exit(void)
1996module_init(dm_crypt_init); 1996module_init(dm_crypt_init);
1997module_exit(dm_crypt_exit); 1997module_exit(dm_crypt_exit);
1998 1998
1999MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); 1999MODULE_AUTHOR("Jana Saout <jana@saout.de>");
2000MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption"); 2000MODULE_DESCRIPTION(DM_NAME " target for transparent encryption / decryption");
2001MODULE_LICENSE("GPL"); 2001MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 3842ac738f98..db404a0f7e2c 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -10,6 +10,7 @@
10#include <linux/device-mapper.h> 10#include <linux/device-mapper.h>
11 11
12#include <linux/bio.h> 12#include <linux/bio.h>
13#include <linux/completion.h>
13#include <linux/mempool.h> 14#include <linux/mempool.h>
14#include <linux/module.h> 15#include <linux/module.h>
15#include <linux/sched.h> 16#include <linux/sched.h>
@@ -32,7 +33,7 @@ struct dm_io_client {
32struct io { 33struct io {
33 unsigned long error_bits; 34 unsigned long error_bits;
34 atomic_t count; 35 atomic_t count;
35 struct task_struct *sleeper; 36 struct completion *wait;
36 struct dm_io_client *client; 37 struct dm_io_client *client;
37 io_notify_fn callback; 38 io_notify_fn callback;
38 void *context; 39 void *context;
@@ -121,8 +122,8 @@ static void dec_count(struct io *io, unsigned int region, int error)
121 invalidate_kernel_vmap_range(io->vma_invalidate_address, 122 invalidate_kernel_vmap_range(io->vma_invalidate_address,
122 io->vma_invalidate_size); 123 io->vma_invalidate_size);
123 124
124 if (io->sleeper) 125 if (io->wait)
125 wake_up_process(io->sleeper); 126 complete(io->wait);
126 127
127 else { 128 else {
128 unsigned long r = io->error_bits; 129 unsigned long r = io->error_bits;
@@ -387,6 +388,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
387 */ 388 */
388 volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; 389 volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1];
389 struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); 390 struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io));
391 DECLARE_COMPLETION_ONSTACK(wait);
390 392
391 if (num_regions > 1 && (rw & RW_MASK) != WRITE) { 393 if (num_regions > 1 && (rw & RW_MASK) != WRITE) {
392 WARN_ON(1); 394 WARN_ON(1);
@@ -395,7 +397,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
395 397
396 io->error_bits = 0; 398 io->error_bits = 0;
397 atomic_set(&io->count, 1); /* see dispatch_io() */ 399 atomic_set(&io->count, 1); /* see dispatch_io() */
398 io->sleeper = current; 400 io->wait = &wait;
399 io->client = client; 401 io->client = client;
400 402
401 io->vma_invalidate_address = dp->vma_invalidate_address; 403 io->vma_invalidate_address = dp->vma_invalidate_address;
@@ -403,15 +405,7 @@ static int sync_io(struct dm_io_client *client, unsigned int num_regions,
403 405
404 dispatch_io(rw, num_regions, where, dp, io, 1); 406 dispatch_io(rw, num_regions, where, dp, io, 1);
405 407
406 while (1) { 408 wait_for_completion_io(&wait);
407 set_current_state(TASK_UNINTERRUPTIBLE);
408
409 if (!atomic_read(&io->count))
410 break;
411
412 io_schedule();
413 }
414 set_current_state(TASK_RUNNING);
415 409
416 if (error_bits) 410 if (error_bits)
417 *error_bits = io->error_bits; 411 *error_bits = io->error_bits;
@@ -434,7 +428,7 @@ static int async_io(struct dm_io_client *client, unsigned int num_regions,
434 io = mempool_alloc(client->pool, GFP_NOIO); 428 io = mempool_alloc(client->pool, GFP_NOIO);
435 io->error_bits = 0; 429 io->error_bits = 0;
436 atomic_set(&io->count, 1); /* see dispatch_io() */ 430 atomic_set(&io->count, 1); /* see dispatch_io() */
437 io->sleeper = NULL; 431 io->wait = NULL;
438 io->client = client; 432 io->client = client;
439 io->callback = fn; 433 io->callback = fn;
440 io->context = context; 434 io->context = context;
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 3f6fd9d33ba3..f4167b013d99 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -1611,8 +1611,9 @@ static int multipath_busy(struct dm_target *ti)
1611 1611
1612 spin_lock_irqsave(&m->lock, flags); 1612 spin_lock_irqsave(&m->lock, flags);
1613 1613
1614 /* pg_init in progress, requeue until done */ 1614 /* pg_init in progress or no paths available */
1615 if (!pg_ready(m)) { 1615 if (m->pg_init_in_progress ||
1616 (!m->nr_valid_paths && m->queue_if_no_path)) {
1616 busy = 1; 1617 busy = 1;
1617 goto out; 1618 goto out;
1618 } 1619 }
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index b086a945edcb..e9d33ad59df5 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -613,6 +613,15 @@ static int __open_metadata(struct dm_pool_metadata *pmd)
613 613
614 disk_super = dm_block_data(sblock); 614 disk_super = dm_block_data(sblock);
615 615
616 /* Verify the data block size hasn't changed */
617 if (le32_to_cpu(disk_super->data_block_size) != pmd->data_block_size) {
618 DMERR("changing the data block size (from %u to %llu) is not supported",
619 le32_to_cpu(disk_super->data_block_size),
620 (unsigned long long)pmd->data_block_size);
621 r = -EINVAL;
622 goto bad_unlock_sblock;
623 }
624
616 r = __check_incompat_features(disk_super, pmd); 625 r = __check_incompat_features(disk_super, pmd);
617 if (r < 0) 626 if (r < 0)
618 goto bad_unlock_sblock; 627 goto bad_unlock_sblock;
diff --git a/drivers/md/dm-zero.c b/drivers/md/dm-zero.c
index c99003e0d47a..b9a64bbce304 100644
--- a/drivers/md/dm-zero.c
+++ b/drivers/md/dm-zero.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (C) 2003 Christophe Saout <christophe@saout.de> 2 * Copyright (C) 2003 Jana Saout <jana@saout.de>
3 * 3 *
4 * This file is released under the GPL. 4 * This file is released under the GPL.
5 */ 5 */
@@ -79,6 +79,6 @@ static void __exit dm_zero_exit(void)
79module_init(dm_zero_init) 79module_init(dm_zero_init)
80module_exit(dm_zero_exit) 80module_exit(dm_zero_exit)
81 81
82MODULE_AUTHOR("Christophe Saout <christophe@saout.de>"); 82MODULE_AUTHOR("Jana Saout <jana@saout.de>");
83MODULE_DESCRIPTION(DM_NAME " dummy target returning zeros"); 83MODULE_DESCRIPTION(DM_NAME " dummy target returning zeros");
84MODULE_LICENSE("GPL"); 84MODULE_LICENSE("GPL");
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 437d99045ef2..32b958dbc499 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -54,6 +54,8 @@ static void do_deferred_remove(struct work_struct *w);
54 54
55static DECLARE_WORK(deferred_remove_work, do_deferred_remove); 55static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
56 56
57static struct workqueue_struct *deferred_remove_workqueue;
58
57/* 59/*
58 * For bio-based dm. 60 * For bio-based dm.
59 * One of these is allocated per bio. 61 * One of these is allocated per bio.
@@ -276,16 +278,24 @@ static int __init local_init(void)
276 if (r) 278 if (r)
277 goto out_free_rq_tio_cache; 279 goto out_free_rq_tio_cache;
278 280
281 deferred_remove_workqueue = alloc_workqueue("kdmremove", WQ_UNBOUND, 1);
282 if (!deferred_remove_workqueue) {
283 r = -ENOMEM;
284 goto out_uevent_exit;
285 }
286
279 _major = major; 287 _major = major;
280 r = register_blkdev(_major, _name); 288 r = register_blkdev(_major, _name);
281 if (r < 0) 289 if (r < 0)
282 goto out_uevent_exit; 290 goto out_free_workqueue;
283 291
284 if (!_major) 292 if (!_major)
285 _major = r; 293 _major = r;
286 294
287 return 0; 295 return 0;
288 296
297out_free_workqueue:
298 destroy_workqueue(deferred_remove_workqueue);
289out_uevent_exit: 299out_uevent_exit:
290 dm_uevent_exit(); 300 dm_uevent_exit();
291out_free_rq_tio_cache: 301out_free_rq_tio_cache:
@@ -299,6 +309,7 @@ out_free_io_cache:
299static void local_exit(void) 309static void local_exit(void)
300{ 310{
301 flush_scheduled_work(); 311 flush_scheduled_work();
312 destroy_workqueue(deferred_remove_workqueue);
302 313
303 kmem_cache_destroy(_rq_tio_cache); 314 kmem_cache_destroy(_rq_tio_cache);
304 kmem_cache_destroy(_io_cache); 315 kmem_cache_destroy(_io_cache);
@@ -407,7 +418,7 @@ static void dm_blk_close(struct gendisk *disk, fmode_t mode)
407 418
408 if (atomic_dec_and_test(&md->open_count) && 419 if (atomic_dec_and_test(&md->open_count) &&
409 (test_bit(DMF_DEFERRED_REMOVE, &md->flags))) 420 (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
410 schedule_work(&deferred_remove_work); 421 queue_work(deferred_remove_workqueue, &deferred_remove_work);
411 422
412 dm_put(md); 423 dm_put(md);
413 424
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 34846856dbc6..32fc19c540d4 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5599,7 +5599,7 @@ static int get_array_info(struct mddev * mddev, void __user * arg)
5599 if (mddev->in_sync) 5599 if (mddev->in_sync)
5600 info.state = (1<<MD_SB_CLEAN); 5600 info.state = (1<<MD_SB_CLEAN);
5601 if (mddev->bitmap && mddev->bitmap_info.offset) 5601 if (mddev->bitmap && mddev->bitmap_info.offset)
5602 info.state = (1<<MD_SB_BITMAP_PRESENT); 5602 info.state |= (1<<MD_SB_BITMAP_PRESENT);
5603 info.active_disks = insync; 5603 info.active_disks = insync;
5604 info.working_disks = working; 5604 info.working_disks = working;
5605 info.failed_disks = failed; 5605 info.failed_disks = failed;
@@ -7501,6 +7501,19 @@ void md_do_sync(struct md_thread *thread)
7501 rdev->recovery_offset < j) 7501 rdev->recovery_offset < j)
7502 j = rdev->recovery_offset; 7502 j = rdev->recovery_offset;
7503 rcu_read_unlock(); 7503 rcu_read_unlock();
7504
7505 /* If there is a bitmap, we need to make sure all
7506 * writes that started before we added a spare
7507 * complete before we start doing a recovery.
7508 * Otherwise the write might complete and (via
7509 * bitmap_endwrite) set a bit in the bitmap after the
7510 * recovery has checked that bit and skipped that
7511 * region.
7512 */
7513 if (mddev->bitmap) {
7514 mddev->pers->quiesce(mddev, 1);
7515 mddev->pers->quiesce(mddev, 0);
7516 }
7504 } 7517 }
7505 7518
7506 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev)); 7519 printk(KERN_INFO "md: %s of RAID array %s\n", desc, mdname(mddev));
diff --git a/drivers/memstick/host/rtsx_pci_ms.c b/drivers/memstick/host/rtsx_pci_ms.c
index 2a635b6fdaf7..c880ba685754 100644
--- a/drivers/memstick/host/rtsx_pci_ms.c
+++ b/drivers/memstick/host/rtsx_pci_ms.c
@@ -601,6 +601,7 @@ static int rtsx_pci_ms_drv_remove(struct platform_device *pdev)
601 pcr->slots[RTSX_MS_CARD].card_event = NULL; 601 pcr->slots[RTSX_MS_CARD].card_event = NULL;
602 msh = host->msh; 602 msh = host->msh;
603 host->eject = true; 603 host->eject = true;
604 cancel_work_sync(&host->handle_req);
604 605
605 mutex_lock(&host->host_mutex); 606 mutex_lock(&host->host_mutex);
606 if (host->req) { 607 if (host->req) {
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index ee8204cc31e9..6cc4b6acc22a 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -760,6 +760,7 @@ config MFD_SYSCON
760config MFD_DAVINCI_VOICECODEC 760config MFD_DAVINCI_VOICECODEC
761 tristate 761 tristate
762 select MFD_CORE 762 select MFD_CORE
763 select REGMAP_MMIO
763 764
764config MFD_TI_AM335X_TSCADC 765config MFD_TI_AM335X_TSCADC
765 tristate "TI ADC / Touch Screen chip support" 766 tristate "TI ADC / Touch Screen chip support"
@@ -1225,7 +1226,7 @@ config MFD_WM8994
1225 functionaltiy of the device other drivers must be enabled. 1226 functionaltiy of the device other drivers must be enabled.
1226 1227
1227config MFD_STW481X 1228config MFD_STW481X
1228 bool "Support for ST Microelectronics STw481x" 1229 tristate "Support for ST Microelectronics STw481x"
1229 depends on I2C && ARCH_NOMADIK 1230 depends on I2C && ARCH_NOMADIK
1230 select REGMAP_I2C 1231 select REGMAP_I2C
1231 select MFD_CORE 1232 select MFD_CORE
@@ -1248,7 +1249,7 @@ config MCP_SA11X0
1248 1249
1249# Chip drivers 1250# Chip drivers
1250config MCP_UCB1200 1251config MCP_UCB1200
1251 bool "Support for UCB1200 / UCB1300" 1252 tristate "Support for UCB1200 / UCB1300"
1252 depends on MCP_SA11X0 1253 depends on MCP_SA11X0
1253 select MCP 1254 select MCP
1254 1255
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index a8ee4a36a1d8..cf2e6a198c6b 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -591,7 +591,7 @@ static int ab8500_irq_init(struct ab8500 *ab8500, struct device_node *np)
591 num_irqs = AB8500_NR_IRQS; 591 num_irqs = AB8500_NR_IRQS;
592 592
593 /* If ->irq_base is zero this will give a linear mapping */ 593 /* If ->irq_base is zero this will give a linear mapping */
594 ab8500->domain = irq_domain_add_simple(NULL, 594 ab8500->domain = irq_domain_add_simple(ab8500->dev->of_node,
595 num_irqs, 0, 595 num_irqs, 0,
596 &ab8500_irq_ops, ab8500); 596 &ab8500_irq_ops, ab8500);
597 597
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index a43d0c467274..ee9402324a23 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -54,7 +54,7 @@ config AD525X_DPOT_SPI
54config ATMEL_PWM 54config ATMEL_PWM
55 tristate "Atmel AT32/AT91 PWM support" 55 tristate "Atmel AT32/AT91 PWM support"
56 depends on HAVE_CLK 56 depends on HAVE_CLK
57 depends on AVR32 || AT91SAM9263 || AT91SAM9RL || AT91SAM9G45 57 depends on AVR32 || ARCH_AT91SAM9263 || ARCH_AT91SAM9RL || ARCH_AT91SAM9G45
58 help 58 help
59 This option enables device driver support for the PWM channels 59 This option enables device driver support for the PWM channels
60 on certain Atmel processors. Pulse Width Modulation is used for 60 on certain Atmel processors. Pulse Width Modulation is used for
diff --git a/drivers/mtd/chips/cfi_cmdset_0001.c b/drivers/mtd/chips/cfi_cmdset_0001.c
index e4ec355704a6..a7543ba3e190 100644
--- a/drivers/mtd/chips/cfi_cmdset_0001.c
+++ b/drivers/mtd/chips/cfi_cmdset_0001.c
@@ -52,6 +52,11 @@
52/* Atmel chips */ 52/* Atmel chips */
53#define AT49BV640D 0x02de 53#define AT49BV640D 0x02de
54#define AT49BV640DT 0x02db 54#define AT49BV640DT 0x02db
55/* Sharp chips */
56#define LH28F640BFHE_PTTL90 0x00b0
57#define LH28F640BFHE_PBTL90 0x00b1
58#define LH28F640BFHE_PTTL70A 0x00b2
59#define LH28F640BFHE_PBTL70A 0x00b3
55 60
56static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *); 61static int cfi_intelext_read (struct mtd_info *, loff_t, size_t, size_t *, u_char *);
57static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *); 62static int cfi_intelext_write_words(struct mtd_info *, loff_t, size_t, size_t *, const u_char *);
@@ -258,6 +263,36 @@ static void fixup_st_m28w320cb(struct mtd_info *mtd)
258 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e; 263 (cfi->cfiq->EraseRegionInfo[1] & 0xffff0000) | 0x3e;
259}; 264};
260 265
266static int is_LH28F640BF(struct cfi_private *cfi)
267{
268 /* Sharp LH28F640BF Family */
269 if (cfi->mfr == CFI_MFR_SHARP && (
270 cfi->id == LH28F640BFHE_PTTL90 || cfi->id == LH28F640BFHE_PBTL90 ||
271 cfi->id == LH28F640BFHE_PTTL70A || cfi->id == LH28F640BFHE_PBTL70A))
272 return 1;
273 return 0;
274}
275
276static void fixup_LH28F640BF(struct mtd_info *mtd)
277{
278 struct map_info *map = mtd->priv;
279 struct cfi_private *cfi = map->fldrv_priv;
280 struct cfi_pri_intelext *extp = cfi->cmdset_priv;
281
282 /* Reset the Partition Configuration Register on LH28F640BF
283 * to a single partition (PCR = 0x000): PCR is embedded into A0-A15. */
284 if (is_LH28F640BF(cfi)) {
285 printk(KERN_INFO "Reset Partition Config. Register: 1 Partition of 4 planes\n");
286 map_write(map, CMD(0x60), 0);
287 map_write(map, CMD(0x04), 0);
288
289 /* We have set one single partition thus
290 * Simultaneous Operations are not allowed */
291 printk(KERN_INFO "cfi_cmdset_0001: Simultaneous Operations disabled\n");
292 extp->FeatureSupport &= ~512;
293 }
294}
295
261static void fixup_use_point(struct mtd_info *mtd) 296static void fixup_use_point(struct mtd_info *mtd)
262{ 297{
263 struct map_info *map = mtd->priv; 298 struct map_info *map = mtd->priv;
@@ -309,6 +344,8 @@ static struct cfi_fixup cfi_fixup_table[] = {
309 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct }, 344 { CFI_MFR_ST, 0x00ba, /* M28W320CT */ fixup_st_m28w320ct },
310 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb }, 345 { CFI_MFR_ST, 0x00bb, /* M28W320CB */ fixup_st_m28w320cb },
311 { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock }, 346 { CFI_MFR_INTEL, CFI_ID_ANY, fixup_unlock_powerup_lock },
347 { CFI_MFR_SHARP, CFI_ID_ANY, fixup_unlock_powerup_lock },
348 { CFI_MFR_SHARP, CFI_ID_ANY, fixup_LH28F640BF },
312 { 0, 0, NULL } 349 { 0, 0, NULL }
313}; 350};
314 351
@@ -1649,6 +1686,12 @@ static int __xipram do_write_buffer(struct map_info *map, struct flchip *chip,
1649 initial_adr = adr; 1686 initial_adr = adr;
1650 cmd_adr = adr & ~(wbufsize-1); 1687 cmd_adr = adr & ~(wbufsize-1);
1651 1688
1689 /* Sharp LH28F640BF chips need the first address for the
1690 * Page Buffer Program command. See Table 5 of
1691 * LH28F320BF, LH28F640BF, LH28F128BF Series (Appendix FUM00701) */
1692 if (is_LH28F640BF(cfi))
1693 cmd_adr = adr;
1694
1652 /* Let's determine this according to the interleave only once */ 1695 /* Let's determine this according to the interleave only once */
1653 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9); 1696 write_cmd = (cfi->cfiq->P_ID != P_ID_INTEL_PERFORMANCE) ? CMD(0xe8) : CMD(0xe9);
1654 1697
diff --git a/drivers/mtd/devices/elm.c b/drivers/mtd/devices/elm.c
index 7df86948e6d4..b4f61c7fc161 100644
--- a/drivers/mtd/devices/elm.c
+++ b/drivers/mtd/devices/elm.c
@@ -475,6 +475,7 @@ static int elm_context_save(struct elm_info *info)
475 ELM_SYNDROME_FRAGMENT_1 + offset); 475 ELM_SYNDROME_FRAGMENT_1 + offset);
476 regs->elm_syndrome_fragment_0[i] = elm_read_reg(info, 476 regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
477 ELM_SYNDROME_FRAGMENT_0 + offset); 477 ELM_SYNDROME_FRAGMENT_0 + offset);
478 break;
478 default: 479 default:
479 return -EINVAL; 480 return -EINVAL;
480 } 481 }
@@ -520,6 +521,7 @@ static int elm_context_restore(struct elm_info *info)
520 regs->elm_syndrome_fragment_1[i]); 521 regs->elm_syndrome_fragment_1[i]);
521 elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset, 522 elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
522 regs->elm_syndrome_fragment_0[i]); 523 regs->elm_syndrome_fragment_0[i]);
524 break;
523 default: 525 default:
524 return -EINVAL; 526 return -EINVAL;
525 } 527 }
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 41167e9e991e..4f3e80c68a26 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -4047,8 +4047,10 @@ int nand_scan_tail(struct mtd_info *mtd)
4047 ecc->layout->oobavail += ecc->layout->oobfree[i].length; 4047 ecc->layout->oobavail += ecc->layout->oobfree[i].length;
4048 mtd->oobavail = ecc->layout->oobavail; 4048 mtd->oobavail = ecc->layout->oobavail;
4049 4049
4050 /* ECC sanity check: warn noisily if it's too weak */ 4050 /* ECC sanity check: warn if it's too weak */
4051 WARN_ON(!nand_ecc_strength_good(mtd)); 4051 if (!nand_ecc_strength_good(mtd))
4052 pr_warn("WARNING: %s: the ECC used on your system is too weak compared to the one required by the NAND chip\n",
4053 mtd->name);
4052 4054
4053 /* 4055 /*
4054 * Set the number of read / write steps for one page depending on ECC 4056 * Set the number of read / write steps for one page depending on ECC
diff --git a/drivers/mtd/ubi/fastmap.c b/drivers/mtd/ubi/fastmap.c
index b04e7d059888..0431b46d9fd9 100644
--- a/drivers/mtd/ubi/fastmap.c
+++ b/drivers/mtd/ubi/fastmap.c
@@ -125,7 +125,7 @@ static struct ubi_ainf_volume *add_vol(struct ubi_attach_info *ai, int vol_id,
125 parent = *p; 125 parent = *p;
126 av = rb_entry(parent, struct ubi_ainf_volume, rb); 126 av = rb_entry(parent, struct ubi_ainf_volume, rb);
127 127
128 if (vol_id < av->vol_id) 128 if (vol_id > av->vol_id)
129 p = &(*p)->rb_left; 129 p = &(*p)->rb_left;
130 else 130 else
131 p = &(*p)->rb_right; 131 p = &(*p)->rb_right;
@@ -423,7 +423,7 @@ static int scan_pool(struct ubi_device *ubi, struct ubi_attach_info *ai,
423 pnum, err); 423 pnum, err);
424 ret = err > 0 ? UBI_BAD_FASTMAP : err; 424 ret = err > 0 ? UBI_BAD_FASTMAP : err;
425 goto out; 425 goto out;
426 } else if (ret == UBI_IO_BITFLIPS) 426 } else if (err == UBI_IO_BITFLIPS)
427 scrub = 1; 427 scrub = 1;
428 428
429 /* 429 /*
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 04f35f960cb8..701f86cd5993 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1025,10 +1025,14 @@ static netdev_features_t bond_fix_features(struct net_device *dev,
1025 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \ 1025 NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
1026 NETIF_F_HIGHDMA | NETIF_F_LRO) 1026 NETIF_F_HIGHDMA | NETIF_F_LRO)
1027 1027
1028#define BOND_ENC_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | NETIF_F_RXCSUM |\
1029 NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL)
1030
1028static void bond_compute_features(struct bonding *bond) 1031static void bond_compute_features(struct bonding *bond)
1029{ 1032{
1030 unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE; 1033 unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
1031 netdev_features_t vlan_features = BOND_VLAN_FEATURES; 1034 netdev_features_t vlan_features = BOND_VLAN_FEATURES;
1035 netdev_features_t enc_features = BOND_ENC_FEATURES;
1032 struct net_device *bond_dev = bond->dev; 1036 struct net_device *bond_dev = bond->dev;
1033 struct list_head *iter; 1037 struct list_head *iter;
1034 struct slave *slave; 1038 struct slave *slave;
@@ -1044,6 +1048,9 @@ static void bond_compute_features(struct bonding *bond)
1044 vlan_features = netdev_increment_features(vlan_features, 1048 vlan_features = netdev_increment_features(vlan_features,
1045 slave->dev->vlan_features, BOND_VLAN_FEATURES); 1049 slave->dev->vlan_features, BOND_VLAN_FEATURES);
1046 1050
1051 enc_features = netdev_increment_features(enc_features,
1052 slave->dev->hw_enc_features,
1053 BOND_ENC_FEATURES);
1047 dst_release_flag &= slave->dev->priv_flags; 1054 dst_release_flag &= slave->dev->priv_flags;
1048 if (slave->dev->hard_header_len > max_hard_header_len) 1055 if (slave->dev->hard_header_len > max_hard_header_len)
1049 max_hard_header_len = slave->dev->hard_header_len; 1056 max_hard_header_len = slave->dev->hard_header_len;
@@ -1054,6 +1061,7 @@ static void bond_compute_features(struct bonding *bond)
1054 1061
1055done: 1062done:
1056 bond_dev->vlan_features = vlan_features; 1063 bond_dev->vlan_features = vlan_features;
1064 bond_dev->hw_enc_features = enc_features;
1057 bond_dev->hard_header_len = max_hard_header_len; 1065 bond_dev->hard_header_len = max_hard_header_len;
1058 bond_dev->gso_max_segs = gso_max_segs; 1066 bond_dev->gso_max_segs = gso_max_segs;
1059 netif_set_gso_max_size(bond_dev, gso_max_size); 1067 netif_set_gso_max_size(bond_dev, gso_max_size);
@@ -3975,6 +3983,7 @@ void bond_setup(struct net_device *bond_dev)
3975 NETIF_F_HW_VLAN_CTAG_FILTER; 3983 NETIF_F_HW_VLAN_CTAG_FILTER;
3976 3984
3977 bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM); 3985 bond_dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
3986 bond_dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
3978 bond_dev->features |= bond_dev->hw_features; 3987 bond_dev->features |= bond_dev->hw_features;
3979} 3988}
3980 3989
@@ -4059,7 +4068,7 @@ static int bond_check_params(struct bond_params *params)
4059 } 4068 }
4060 4069
4061 if (ad_select) { 4070 if (ad_select) {
4062 bond_opt_initstr(&newval, lacp_rate); 4071 bond_opt_initstr(&newval, ad_select);
4063 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT), 4072 valptr = bond_opt_parse(bond_opt_get(BOND_OPT_AD_SELECT),
4064 &newval); 4073 &newval);
4065 if (!valptr) { 4074 if (!valptr) {
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index dcf9196f6316..ea4d4f1a6411 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -52,6 +52,7 @@
52#include <linux/delay.h> 52#include <linux/delay.h>
53#include <linux/init.h> 53#include <linux/init.h>
54#include <linux/kernel.h> 54#include <linux/kernel.h>
55#include <linux/workqueue.h>
55#include <linux/can.h> 56#include <linux/can.h>
56#include <linux/can/skb.h> 57#include <linux/can/skb.h>
57 58
@@ -85,6 +86,7 @@ struct slcan {
85 struct tty_struct *tty; /* ptr to TTY structure */ 86 struct tty_struct *tty; /* ptr to TTY structure */
86 struct net_device *dev; /* easy for intr handling */ 87 struct net_device *dev; /* easy for intr handling */
87 spinlock_t lock; 88 spinlock_t lock;
89 struct work_struct tx_work; /* Flushes transmit buffer */
88 90
89 /* These are pointers to the malloc()ed frame buffers. */ 91 /* These are pointers to the malloc()ed frame buffers. */
90 unsigned char rbuff[SLC_MTU]; /* receiver buffer */ 92 unsigned char rbuff[SLC_MTU]; /* receiver buffer */
@@ -309,36 +311,46 @@ static void slc_encaps(struct slcan *sl, struct can_frame *cf)
309 sl->dev->stats.tx_bytes += cf->can_dlc; 311 sl->dev->stats.tx_bytes += cf->can_dlc;
310} 312}
311 313
312/* 314/* Write out any remaining transmit buffer. Scheduled when tty is writable */
313 * Called by the driver when there's room for more data. If we have 315static void slcan_transmit(struct work_struct *work)
314 * more packets to send, we send them here.
315 */
316static void slcan_write_wakeup(struct tty_struct *tty)
317{ 316{
317 struct slcan *sl = container_of(work, struct slcan, tx_work);
318 int actual; 318 int actual;
319 struct slcan *sl = (struct slcan *) tty->disc_data;
320 319
320 spin_lock_bh(&sl->lock);
321 /* First make sure we're connected. */ 321 /* First make sure we're connected. */
322 if (!sl || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) 322 if (!sl->tty || sl->magic != SLCAN_MAGIC || !netif_running(sl->dev)) {
323 spin_unlock_bh(&sl->lock);
323 return; 324 return;
325 }
324 326
325 spin_lock_bh(&sl->lock);
326 if (sl->xleft <= 0) { 327 if (sl->xleft <= 0) {
327 /* Now serial buffer is almost free & we can start 328 /* Now serial buffer is almost free & we can start
328 * transmission of another packet */ 329 * transmission of another packet */
329 sl->dev->stats.tx_packets++; 330 sl->dev->stats.tx_packets++;
330 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 331 clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
331 spin_unlock_bh(&sl->lock); 332 spin_unlock_bh(&sl->lock);
332 netif_wake_queue(sl->dev); 333 netif_wake_queue(sl->dev);
333 return; 334 return;
334 } 335 }
335 336
336 actual = tty->ops->write(tty, sl->xhead, sl->xleft); 337 actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
337 sl->xleft -= actual; 338 sl->xleft -= actual;
338 sl->xhead += actual; 339 sl->xhead += actual;
339 spin_unlock_bh(&sl->lock); 340 spin_unlock_bh(&sl->lock);
340} 341}
341 342
343/*
344 * Called by the driver when there's room for more data.
345 * Schedule the transmit.
346 */
347static void slcan_write_wakeup(struct tty_struct *tty)
348{
349 struct slcan *sl = tty->disc_data;
350
351 schedule_work(&sl->tx_work);
352}
353
342/* Send a can_frame to a TTY queue. */ 354/* Send a can_frame to a TTY queue. */
343static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev) 355static netdev_tx_t slc_xmit(struct sk_buff *skb, struct net_device *dev)
344{ 356{
@@ -528,6 +540,7 @@ static struct slcan *slc_alloc(dev_t line)
528 sl->magic = SLCAN_MAGIC; 540 sl->magic = SLCAN_MAGIC;
529 sl->dev = dev; 541 sl->dev = dev;
530 spin_lock_init(&sl->lock); 542 spin_lock_init(&sl->lock);
543 INIT_WORK(&sl->tx_work, slcan_transmit);
531 slcan_devs[i] = dev; 544 slcan_devs[i] = dev;
532 545
533 return sl; 546 return sl;
@@ -626,8 +639,12 @@ static void slcan_close(struct tty_struct *tty)
626 if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty) 639 if (!sl || sl->magic != SLCAN_MAGIC || sl->tty != tty)
627 return; 640 return;
628 641
642 spin_lock_bh(&sl->lock);
629 tty->disc_data = NULL; 643 tty->disc_data = NULL;
630 sl->tty = NULL; 644 sl->tty = NULL;
645 spin_unlock_bh(&sl->lock);
646
647 flush_work(&sl->tx_work);
631 648
632 /* Flush network side */ 649 /* Flush network side */
633 unregister_netdev(sl->dev); 650 unregister_netdev(sl->dev);
diff --git a/drivers/net/ethernet/allwinner/sun4i-emac.c b/drivers/net/ethernet/allwinner/sun4i-emac.c
index 28460676b8ca..d81e7167a8b5 100644
--- a/drivers/net/ethernet/allwinner/sun4i-emac.c
+++ b/drivers/net/ethernet/allwinner/sun4i-emac.c
@@ -736,6 +736,7 @@ static int emac_open(struct net_device *dev)
736 736
737 ret = emac_mdio_probe(dev); 737 ret = emac_mdio_probe(dev);
738 if (ret < 0) { 738 if (ret < 0) {
739 free_irq(dev->irq, dev);
739 netdev_err(dev, "cannot probe MDIO bus\n"); 740 netdev_err(dev, "cannot probe MDIO bus\n");
740 return ret; 741 return ret;
741 } 742 }
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 141160ef249a..5776e503e4c5 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -654,13 +654,13 @@ static int bcm_sysport_tx_poll(struct napi_struct *napi, int budget)
654 654
655 work_done = bcm_sysport_tx_reclaim(ring->priv, ring); 655 work_done = bcm_sysport_tx_reclaim(ring->priv, ring);
656 656
657 if (work_done < budget) { 657 if (work_done == 0) {
658 napi_complete(napi); 658 napi_complete(napi);
659 /* re-enable TX interrupt */ 659 /* re-enable TX interrupt */
660 intrl2_1_mask_clear(ring->priv, BIT(ring->index)); 660 intrl2_1_mask_clear(ring->priv, BIT(ring->index));
661 } 661 }
662 662
663 return work_done; 663 return 0;
664} 664}
665 665
666static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv) 666static void bcm_sysport_tx_reclaim_all(struct bcm_sysport_priv *priv)
@@ -1254,28 +1254,17 @@ static inline void umac_enable_set(struct bcm_sysport_priv *priv,
1254 usleep_range(1000, 2000); 1254 usleep_range(1000, 2000);
1255} 1255}
1256 1256
1257static inline int umac_reset(struct bcm_sysport_priv *priv) 1257static inline void umac_reset(struct bcm_sysport_priv *priv)
1258{ 1258{
1259 unsigned int timeout = 0;
1260 u32 reg; 1259 u32 reg;
1261 int ret = 0;
1262
1263 umac_writel(priv, 0, UMAC_CMD);
1264 while (timeout++ < 1000) {
1265 reg = umac_readl(priv, UMAC_CMD);
1266 if (!(reg & CMD_SW_RESET))
1267 break;
1268
1269 udelay(1);
1270 }
1271
1272 if (timeout == 1000) {
1273 dev_err(&priv->pdev->dev,
1274 "timeout waiting for MAC to come out of reset\n");
1275 ret = -ETIMEDOUT;
1276 }
1277 1260
1278 return ret; 1261 reg = umac_readl(priv, UMAC_CMD);
1262 reg |= CMD_SW_RESET;
1263 umac_writel(priv, reg, UMAC_CMD);
1264 udelay(10);
1265 reg = umac_readl(priv, UMAC_CMD);
1266 reg &= ~CMD_SW_RESET;
1267 umac_writel(priv, reg, UMAC_CMD);
1279} 1268}
1280 1269
1281static void umac_set_hw_addr(struct bcm_sysport_priv *priv, 1270static void umac_set_hw_addr(struct bcm_sysport_priv *priv,
@@ -1303,11 +1292,7 @@ static int bcm_sysport_open(struct net_device *dev)
1303 int ret; 1292 int ret;
1304 1293
1305 /* Reset UniMAC */ 1294 /* Reset UniMAC */
1306 ret = umac_reset(priv); 1295 umac_reset(priv);
1307 if (ret) {
1308 netdev_err(dev, "UniMAC reset failed\n");
1309 return ret;
1310 }
1311 1296
1312 /* Flush TX and RX FIFOs at TOPCTRL level */ 1297 /* Flush TX and RX FIFOs at TOPCTRL level */
1313 topctrl_flush(priv); 1298 topctrl_flush(priv);
@@ -1589,12 +1574,6 @@ static int bcm_sysport_probe(struct platform_device *pdev)
1589 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8); 1574 BUILD_BUG_ON(sizeof(struct bcm_tsb) != 8);
1590 dev->needed_headroom += sizeof(struct bcm_tsb); 1575 dev->needed_headroom += sizeof(struct bcm_tsb);
1591 1576
1592 /* We are interfaced to a switch which handles the multicast
1593 * filtering for us, so we do not support programming any
1594 * multicast hash table in this Ethernet MAC.
1595 */
1596 dev->flags &= ~IFF_MULTICAST;
1597
1598 /* libphy will adjust the link state accordingly */ 1577 /* libphy will adjust the link state accordingly */
1599 netif_carrier_off(dev); 1578 netif_carrier_off(dev);
1600 1579
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index 47c5814114e1..4b875da1c7ed 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -797,7 +797,8 @@ static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
797 797
798 return; 798 return;
799 } 799 }
800 bnx2x_frag_free(fp, new_data); 800 if (new_data)
801 bnx2x_frag_free(fp, new_data);
801drop: 802drop:
802 /* drop the packet and keep the buffer in the bin */ 803 /* drop the packet and keep the buffer in the bin */
803 DP(NETIF_MSG_RX_STATUS, 804 DP(NETIF_MSG_RX_STATUS,
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index 2887034523e0..6a8b1453a1b9 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -12937,7 +12937,7 @@ static int bnx2x_get_num_non_def_sbs(struct pci_dev *pdev, int cnic_cnt)
12937 * without the default SB. 12937 * without the default SB.
12938 * For VFs there is no default SB, then we return (index+1). 12938 * For VFs there is no default SB, then we return (index+1).
12939 */ 12939 */
12940 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSI_FLAGS, &control); 12940 pci_read_config_word(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, &control);
12941 12941
12942 index = control & PCI_MSIX_FLAGS_QSIZE; 12942 index = control & PCI_MSIX_FLAGS_QSIZE;
12943 12943
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index 5ba1cfbd60da..16281ad2da12 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -1408,13 +1408,6 @@ static int bcmgenet_alloc_rx_buffers(struct bcmgenet_priv *priv)
1408 if (cb->skb) 1408 if (cb->skb)
1409 continue; 1409 continue;
1410 1410
1411 /* set the DMA descriptor length once and for all
1412 * it will only change if we support dynamically sizing
1413 * priv->rx_buf_len, but we do not
1414 */
1415 dmadesc_set_length_status(priv, priv->rx_bd_assign_ptr,
1416 priv->rx_buf_len << DMA_BUFLENGTH_SHIFT);
1417
1418 ret = bcmgenet_rx_refill(priv, cb); 1411 ret = bcmgenet_rx_refill(priv, cb);
1419 if (ret) 1412 if (ret)
1420 break; 1413 break;
@@ -2535,14 +2528,17 @@ static int bcmgenet_probe(struct platform_device *pdev)
2535 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1); 2528 netif_set_real_num_tx_queues(priv->dev, priv->hw_params->tx_queues + 1);
2536 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1); 2529 netif_set_real_num_rx_queues(priv->dev, priv->hw_params->rx_queues + 1);
2537 2530
2538 err = register_netdev(dev); 2531 /* libphy will determine the link state */
2539 if (err) 2532 netif_carrier_off(dev);
2540 goto err_clk_disable;
2541 2533
2542 /* Turn off the main clock, WOL clock is handled separately */ 2534 /* Turn off the main clock, WOL clock is handled separately */
2543 if (!IS_ERR(priv->clk)) 2535 if (!IS_ERR(priv->clk))
2544 clk_disable_unprepare(priv->clk); 2536 clk_disable_unprepare(priv->clk);
2545 2537
2538 err = register_netdev(dev);
2539 if (err)
2540 goto err;
2541
2546 return err; 2542 return err;
2547 2543
2548err_clk_disable: 2544err_clk_disable:
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.h b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
index 0f117105fed1..e23c993b1362 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.h
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.h
@@ -331,9 +331,9 @@ struct bcmgenet_mib_counters {
331#define EXT_ENERGY_DET_MASK (1 << 12) 331#define EXT_ENERGY_DET_MASK (1 << 12)
332 332
333#define EXT_RGMII_OOB_CTRL 0x0C 333#define EXT_RGMII_OOB_CTRL 0x0C
334#define RGMII_MODE_EN (1 << 0)
335#define RGMII_LINK (1 << 4) 334#define RGMII_LINK (1 << 4)
336#define OOB_DISABLE (1 << 5) 335#define OOB_DISABLE (1 << 5)
336#define RGMII_MODE_EN (1 << 6)
337#define ID_MODE_DIS (1 << 16) 337#define ID_MODE_DIS (1 << 16)
338 338
339#define EXT_GPHY_CTRL 0x1C 339#define EXT_GPHY_CTRL 0x1C
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index df2792d8383d..8afa579e7c40 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -3224,7 +3224,7 @@ static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
3224 return 0; 3224 return 0;
3225} 3225}
3226 3226
3227#define NVRAM_CMD_TIMEOUT 100 3227#define NVRAM_CMD_TIMEOUT 5000
3228 3228
3229static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd) 3229static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3230{ 3230{
@@ -3232,7 +3232,7 @@ static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
3232 3232
3233 tw32(NVRAM_CMD, nvram_cmd); 3233 tw32(NVRAM_CMD, nvram_cmd);
3234 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) { 3234 for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
3235 udelay(10); 3235 usleep_range(10, 40);
3236 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) { 3236 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
3237 udelay(10); 3237 udelay(10);
3238 break; 3238 break;
@@ -7854,8 +7854,8 @@ static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
7854 netif_wake_queue(tp->dev); 7854 netif_wake_queue(tp->dev);
7855 } 7855 }
7856 7856
7857 segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO); 7857 segs = skb_gso_segment(skb, tp->dev->features & ~(NETIF_F_TSO | NETIF_F_TSO6));
7858 if (IS_ERR(segs)) 7858 if (IS_ERR(segs) || !segs)
7859 goto tg3_tso_bug_end; 7859 goto tg3_tso_bug_end;
7860 7860
7861 do { 7861 do {
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
index 2f8d6b910383..a83271cf17c3 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_main.c
@@ -4057,22 +4057,19 @@ int cxgb4_unregister_uld(enum cxgb4_uld type)
4057EXPORT_SYMBOL(cxgb4_unregister_uld); 4057EXPORT_SYMBOL(cxgb4_unregister_uld);
4058 4058
4059/* Check if netdev on which event is occured belongs to us or not. Return 4059/* Check if netdev on which event is occured belongs to us or not. Return
4060 * suceess (1) if it belongs otherwise failure (0). 4060 * success (true) if it belongs otherwise failure (false).
4061 * Called with rcu_read_lock() held.
4061 */ 4062 */
4062static int cxgb4_netdev(struct net_device *netdev) 4063static bool cxgb4_netdev(const struct net_device *netdev)
4063{ 4064{
4064 struct adapter *adap; 4065 struct adapter *adap;
4065 int i; 4066 int i;
4066 4067
4067 spin_lock(&adap_rcu_lock);
4068 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node) 4068 list_for_each_entry_rcu(adap, &adap_rcu_list, rcu_node)
4069 for (i = 0; i < MAX_NPORTS; i++) 4069 for (i = 0; i < MAX_NPORTS; i++)
4070 if (adap->port[i] == netdev) { 4070 if (adap->port[i] == netdev)
4071 spin_unlock(&adap_rcu_lock); 4071 return true;
4072 return 1; 4072 return false;
4073 }
4074 spin_unlock(&adap_rcu_lock);
4075 return 0;
4076} 4073}
4077 4074
4078static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa, 4075static int clip_add(struct net_device *event_dev, struct inet6_ifaddr *ifa,
@@ -6396,6 +6393,7 @@ static void remove_one(struct pci_dev *pdev)
6396 adapter->flags &= ~DEV_ENABLED; 6393 adapter->flags &= ~DEV_ENABLED;
6397 } 6394 }
6398 pci_release_regions(pdev); 6395 pci_release_regions(pdev);
6396 synchronize_rcu();
6399 kfree(adapter); 6397 kfree(adapter);
6400 } else 6398 } else
6401 pci_release_regions(pdev); 6399 pci_release_regions(pdev);
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
index bba67681aeaa..931478e7bd28 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_hw.c
@@ -3962,6 +3962,7 @@ int t4_port_init(struct adapter *adap, int mbox, int pf, int vf)
3962 p->lport = j; 3962 p->lport = j;
3963 p->rss_size = rss_size; 3963 p->rss_size = rss_size;
3964 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN); 3964 memcpy(adap->port[i]->dev_addr, addr, ETH_ALEN);
3965 adap->port[i]->dev_port = j;
3965 3966
3966 ret = ntohl(c.u.info.lstatus_to_modtype); 3967 ret = ntohl(c.u.info.lstatus_to_modtype);
3967 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ? 3968 p->mdio_addr = (ret & FW_PORT_CMD_MDIOCAP) ?
diff --git a/drivers/net/ethernet/dec/tulip/timer.c b/drivers/net/ethernet/dec/tulip/timer.c
index 768379b8aee9..523d9dde50a2 100644
--- a/drivers/net/ethernet/dec/tulip/timer.c
+++ b/drivers/net/ethernet/dec/tulip/timer.c
@@ -158,7 +158,7 @@ void comet_timer(unsigned long data)
158{ 158{
159 struct net_device *dev = (struct net_device *)data; 159 struct net_device *dev = (struct net_device *)data;
160 struct tulip_private *tp = netdev_priv(dev); 160 struct tulip_private *tp = netdev_priv(dev);
161 int next_tick = 60*HZ; 161 int next_tick = 2*HZ;
162 162
163 if (tulip_debug > 1) 163 if (tulip_debug > 1)
164 netdev_dbg(dev, "Comet link status %04x partner capability %04x\n", 164 netdev_dbg(dev, "Comet link status %04x partner capability %04x\n",
diff --git a/drivers/net/ethernet/emulex/benet/be.h b/drivers/net/ethernet/emulex/benet/be.h
index 2e7c5553955e..c2f5d2d3b932 100644
--- a/drivers/net/ethernet/emulex/benet/be.h
+++ b/drivers/net/ethernet/emulex/benet/be.h
@@ -557,9 +557,7 @@ static inline u16 be_max_qs(struct be_adapter *adapter)
557#define be_pvid_tagging_enabled(adapter) (adapter->pvid) 557#define be_pvid_tagging_enabled(adapter) (adapter->pvid)
558 558
559/* Is BE in QNQ multi-channel mode */ 559/* Is BE in QNQ multi-channel mode */
560#define be_is_qnq_mode(adapter) (adapter->mc_type == FLEX10 || \ 560#define be_is_qnq_mode(adapter) (adapter->function_mode & QNQ_MODE)
561 adapter->mc_type == vNIC1 || \
562 adapter->mc_type == UFP)
563 561
564#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \ 562#define lancer_chip(adapter) (adapter->pdev->device == OC_DEVICE_ID3 || \
565 adapter->pdev->device == OC_DEVICE_ID4) 563 adapter->pdev->device == OC_DEVICE_ID4)
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 3e0a6b243806..59b3c056f329 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -1091,7 +1091,7 @@ struct be_cmd_resp_modify_eq_delay {
1091 * based on the skew/IPL. 1091 * based on the skew/IPL.
1092 */ 1092 */
1093#define RDMA_ENABLED 0x4 1093#define RDMA_ENABLED 0x4
1094#define FLEX10_MODE 0x400 1094#define QNQ_MODE 0x400
1095#define VNIC_MODE 0x20000 1095#define VNIC_MODE 0x20000
1096#define UMC_ENABLED 0x1000000 1096#define UMC_ENABLED 0x1000000
1097struct be_cmd_req_query_fw_cfg { 1097struct be_cmd_req_query_fw_cfg {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6822b3d76d85..1e187fb760f8 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -2902,7 +2902,7 @@ static int be_open(struct net_device *netdev)
2902 for_all_evt_queues(adapter, eqo, i) { 2902 for_all_evt_queues(adapter, eqo, i) {
2903 napi_enable(&eqo->napi); 2903 napi_enable(&eqo->napi);
2904 be_enable_busy_poll(eqo); 2904 be_enable_busy_poll(eqo);
2905 be_eq_notify(adapter, eqo->q.id, true, false, 0); 2905 be_eq_notify(adapter, eqo->q.id, true, true, 0);
2906 } 2906 }
2907 adapter->flags |= BE_FLAGS_NAPI_ENABLED; 2907 adapter->flags |= BE_FLAGS_NAPI_ENABLED;
2908 2908
@@ -3254,9 +3254,9 @@ err:
3254 3254
3255static u8 be_convert_mc_type(u32 function_mode) 3255static u8 be_convert_mc_type(u32 function_mode)
3256{ 3256{
3257 if (function_mode & VNIC_MODE && function_mode & FLEX10_MODE) 3257 if (function_mode & VNIC_MODE && function_mode & QNQ_MODE)
3258 return vNIC1; 3258 return vNIC1;
3259 else if (function_mode & FLEX10_MODE) 3259 else if (function_mode & QNQ_MODE)
3260 return FLEX10; 3260 return FLEX10;
3261 else if (function_mode & VNIC_MODE) 3261 else if (function_mode & VNIC_MODE)
3262 return vNIC2; 3262 return vNIC2;
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 38d9d276ab8b..77037fd377b8 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -320,6 +320,11 @@ static void *swap_buffer(void *bufaddr, int len)
320 return bufaddr; 320 return bufaddr;
321} 321}
322 322
323static inline bool is_ipv4_pkt(struct sk_buff *skb)
324{
325 return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
326}
327
323static int 328static int
324fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev) 329fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
325{ 330{
@@ -330,7 +335,8 @@ fec_enet_clear_csum(struct sk_buff *skb, struct net_device *ndev)
330 if (unlikely(skb_cow_head(skb, 0))) 335 if (unlikely(skb_cow_head(skb, 0)))
331 return -1; 336 return -1;
332 337
333 ip_hdr(skb)->check = 0; 338 if (is_ipv4_pkt(skb))
339 ip_hdr(skb)->check = 0;
334 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0; 340 *(__sum16 *)(skb->head + skb->csum_start + skb->csum_offset) = 0;
335 341
336 return 0; 342 return 0;
diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c
index fab39e295441..36fc429298e3 100644
--- a/drivers/net/ethernet/freescale/ucc_geth.c
+++ b/drivers/net/ethernet/freescale/ucc_geth.c
@@ -2990,11 +2990,11 @@ static int ucc_geth_startup(struct ucc_geth_private *ugeth)
2990 if (ug_info->rxExtendedFiltering) { 2990 if (ug_info->rxExtendedFiltering) {
2991 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING; 2991 size += THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING;
2992 if (ug_info->largestexternallookupkeysize == 2992 if (ug_info->largestexternallookupkeysize ==
2993 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_8_BYTES) 2993 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_8_BYTES)
2994 size += 2994 size +=
2995 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8; 2995 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_8;
2996 if (ug_info->largestexternallookupkeysize == 2996 if (ug_info->largestexternallookupkeysize ==
2997 QE_FLTR_TABLE_LOOKUP_KEY_SIZE_16_BYTES) 2997 QE_FLTR_LARGEST_EXTERNAL_TABLE_LOOKUP_KEY_SIZE_16_BYTES)
2998 size += 2998 size +=
2999 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16; 2999 THREAD_RX_PRAM_ADDITIONAL_FOR_EXTENDED_FILTERING_16;
3000 } 3000 }
diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c
index a2db388cc31e..ee74f9536b31 100644
--- a/drivers/net/ethernet/intel/igb/e1000_82575.c
+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c
@@ -1481,6 +1481,13 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
1481 s32 ret_val; 1481 s32 ret_val;
1482 u16 i, rar_count = mac->rar_entry_count; 1482 u16 i, rar_count = mac->rar_entry_count;
1483 1483
1484 if ((hw->mac.type >= e1000_i210) &&
1485 !(igb_get_flash_presence_i210(hw))) {
1486 ret_val = igb_pll_workaround_i210(hw);
1487 if (ret_val)
1488 return ret_val;
1489 }
1490
1484 /* Initialize identification LED */ 1491 /* Initialize identification LED */
1485 ret_val = igb_id_led_init(hw); 1492 ret_val = igb_id_led_init(hw);
1486 if (ret_val) { 1493 if (ret_val) {
diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h
index 2a8bb35c2df2..217f8138851b 100644
--- a/drivers/net/ethernet/intel/igb/e1000_defines.h
+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h
@@ -46,14 +46,15 @@
46#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */ 46#define E1000_CTRL_EXT_SDP3_DIR 0x00000800 /* SDP3 Data direction */
47 47
48/* Physical Func Reset Done Indication */ 48/* Physical Func Reset Done Indication */
49#define E1000_CTRL_EXT_PFRSTD 0x00004000 49#define E1000_CTRL_EXT_PFRSTD 0x00004000
50#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000 50#define E1000_CTRL_EXT_SDLPE 0X00040000 /* SerDes Low Power Enable */
51#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000 51#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
52#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000 52#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES 0x00C00000
53#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000 53#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX 0x00400000
54#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000 54#define E1000_CTRL_EXT_LINK_MODE_SGMII 0x00800000
55#define E1000_CTRL_EXT_EIAME 0x01000000 55#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
56#define E1000_CTRL_EXT_IRCA 0x00000001 56#define E1000_CTRL_EXT_EIAME 0x01000000
57#define E1000_CTRL_EXT_IRCA 0x00000001
57/* Interrupt delay cancellation */ 58/* Interrupt delay cancellation */
58/* Driver loaded bit for FW */ 59/* Driver loaded bit for FW */
59#define E1000_CTRL_EXT_DRV_LOAD 0x10000000 60#define E1000_CTRL_EXT_DRV_LOAD 0x10000000
@@ -62,6 +63,7 @@
62/* packet buffer parity error detection enabled */ 63/* packet buffer parity error detection enabled */
63/* descriptor FIFO parity error detection enable */ 64/* descriptor FIFO parity error detection enable */
64#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */ 65#define E1000_CTRL_EXT_PBA_CLR 0x80000000 /* PBA Clear */
66#define E1000_CTRL_EXT_PHYPDEN 0x00100000
65#define E1000_I2CCMD_REG_ADDR_SHIFT 16 67#define E1000_I2CCMD_REG_ADDR_SHIFT 16
66#define E1000_I2CCMD_PHY_ADDR_SHIFT 24 68#define E1000_I2CCMD_PHY_ADDR_SHIFT 24
67#define E1000_I2CCMD_OPCODE_READ 0x08000000 69#define E1000_I2CCMD_OPCODE_READ 0x08000000
diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h
index 89925e405849..ce55ea5d750c 100644
--- a/drivers/net/ethernet/intel/igb/e1000_hw.h
+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h
@@ -567,4 +567,7 @@ struct net_device *igb_get_hw_dev(struct e1000_hw *hw);
567/* These functions must be implemented by drivers */ 567/* These functions must be implemented by drivers */
568s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); 568s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
569s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value); 569s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
570
571void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
572void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
570#endif /* _E1000_HW_H_ */ 573#endif /* _E1000_HW_H_ */
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.c b/drivers/net/ethernet/intel/igb/e1000_i210.c
index 337161f440dd..65d931669f81 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.c
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.c
@@ -834,3 +834,69 @@ s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
834 } 834 }
835 return ret_val; 835 return ret_val;
836} 836}
837
838/**
839 * igb_pll_workaround_i210
840 * @hw: pointer to the HW structure
841 *
842 * Works around an errata in the PLL circuit where it occasionally
843 * provides the wrong clock frequency after power up.
844 **/
845s32 igb_pll_workaround_i210(struct e1000_hw *hw)
846{
847 s32 ret_val;
848 u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
849 u16 nvm_word, phy_word, pci_word, tmp_nvm;
850 int i;
851
852 /* Get and set needed register values */
853 wuc = rd32(E1000_WUC);
854 mdicnfg = rd32(E1000_MDICNFG);
855 reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
856 wr32(E1000_MDICNFG, reg_val);
857
858 /* Get data from NVM, or set default */
859 ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
860 &nvm_word);
861 if (ret_val)
862 nvm_word = E1000_INVM_DEFAULT_AL;
863 tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
864 for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
865 /* check current state directly from internal PHY */
866 igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
867 E1000_PHY_PLL_FREQ_REG), &phy_word);
868 if ((phy_word & E1000_PHY_PLL_UNCONF)
869 != E1000_PHY_PLL_UNCONF) {
870 ret_val = 0;
871 break;
872 } else {
873 ret_val = -E1000_ERR_PHY;
874 }
875 /* directly reset the internal PHY */
876 ctrl = rd32(E1000_CTRL);
877 wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
878
879 ctrl_ext = rd32(E1000_CTRL_EXT);
880 ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
881 wr32(E1000_CTRL_EXT, ctrl_ext);
882
883 wr32(E1000_WUC, 0);
884 reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
885 wr32(E1000_EEARBC_I210, reg_val);
886
887 igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
888 pci_word |= E1000_PCI_PMCSR_D3;
889 igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
890 usleep_range(1000, 2000);
891 pci_word &= ~E1000_PCI_PMCSR_D3;
892 igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
893 reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
894 wr32(E1000_EEARBC_I210, reg_val);
895
896 /* restore WUC register */
897 wr32(E1000_WUC, wuc);
898 }
899 /* restore MDICNFG setting */
900 wr32(E1000_MDICNFG, mdicnfg);
901 return ret_val;
902}
diff --git a/drivers/net/ethernet/intel/igb/e1000_i210.h b/drivers/net/ethernet/intel/igb/e1000_i210.h
index 9f34976687ba..3442b6357d01 100644
--- a/drivers/net/ethernet/intel/igb/e1000_i210.h
+++ b/drivers/net/ethernet/intel/igb/e1000_i210.h
@@ -33,6 +33,7 @@ s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
33s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data); 33s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
34s32 igb_init_nvm_params_i210(struct e1000_hw *hw); 34s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
35bool igb_get_flash_presence_i210(struct e1000_hw *hw); 35bool igb_get_flash_presence_i210(struct e1000_hw *hw);
36s32 igb_pll_workaround_i210(struct e1000_hw *hw);
36 37
37#define E1000_STM_OPCODE 0xDB00 38#define E1000_STM_OPCODE 0xDB00
38#define E1000_EEPROM_FLASH_SIZE_WORD 0x11 39#define E1000_EEPROM_FLASH_SIZE_WORD 0x11
@@ -78,4 +79,15 @@ enum E1000_INVM_STRUCTURE_TYPE {
78#define NVM_LED_1_CFG_DEFAULT_I211 0x0184 79#define NVM_LED_1_CFG_DEFAULT_I211 0x0184
79#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C 80#define NVM_LED_0_2_CFG_DEFAULT_I211 0x200C
80 81
82/* PLL Defines */
83#define E1000_PCI_PMCSR 0x44
84#define E1000_PCI_PMCSR_D3 0x03
85#define E1000_MAX_PLL_TRIES 5
86#define E1000_PHY_PLL_UNCONF 0xFF
87#define E1000_PHY_PLL_FREQ_PAGE 0xFC0000
88#define E1000_PHY_PLL_FREQ_REG 0x000E
89#define E1000_INVM_DEFAULT_AL 0x202F
90#define E1000_INVM_AUTOLOAD 0x0A
91#define E1000_INVM_PLL_WO_VAL 0x0010
92
81#endif 93#endif
diff --git a/drivers/net/ethernet/intel/igb/e1000_regs.h b/drivers/net/ethernet/intel/igb/e1000_regs.h
index 1cc4b1a7e597..f5ba4e4eafb9 100644
--- a/drivers/net/ethernet/intel/igb/e1000_regs.h
+++ b/drivers/net/ethernet/intel/igb/e1000_regs.h
@@ -66,6 +66,7 @@
66#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */ 66#define E1000_PBA 0x01000 /* Packet Buffer Allocation - RW */
67#define E1000_PBS 0x01008 /* Packet Buffer Size */ 67#define E1000_PBS 0x01008 /* Packet Buffer Size */
68#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */ 68#define E1000_EEMNGCTL 0x01010 /* MNG EEprom Control */
69#define E1000_EEARBC_I210 0x12024 /* EEPROM Auto Read Bus Control */
69#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */ 70#define E1000_EEWR 0x0102C /* EEPROM Write Register - RW */
70#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */ 71#define E1000_I2CCMD 0x01028 /* SFPI2C Command Register - RW */
71#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */ 72#define E1000_FRTIMER 0x01048 /* Free Running Timer - RW */
diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c
index f145adbb55ac..a9537ba7a5a0 100644
--- a/drivers/net/ethernet/intel/igb/igb_main.c
+++ b/drivers/net/ethernet/intel/igb/igb_main.c
@@ -7215,6 +7215,20 @@ static int igb_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
7215 } 7215 }
7216} 7216}
7217 7217
7218void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
7219{
7220 struct igb_adapter *adapter = hw->back;
7221
7222 pci_read_config_word(adapter->pdev, reg, value);
7223}
7224
7225void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
7226{
7227 struct igb_adapter *adapter = hw->back;
7228
7229 pci_write_config_word(adapter->pdev, reg, *value);
7230}
7231
7218s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value) 7232s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
7219{ 7233{
7220 struct igb_adapter *adapter = hw->back; 7234 struct igb_adapter *adapter = hw->back;
@@ -7578,6 +7592,8 @@ static int igb_sriov_reinit(struct pci_dev *dev)
7578 7592
7579 if (netif_running(netdev)) 7593 if (netif_running(netdev))
7580 igb_close(netdev); 7594 igb_close(netdev);
7595 else
7596 igb_reset(adapter);
7581 7597
7582 igb_clear_interrupt_scheme(adapter); 7598 igb_clear_interrupt_scheme(adapter);
7583 7599
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 45beca17fa50..dadd9a5f6323 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1207,7 +1207,7 @@ static u32 mvneta_txq_desc_csum(int l3_offs, int l3_proto,
1207 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT; 1207 command = l3_offs << MVNETA_TX_L3_OFF_SHIFT;
1208 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT; 1208 command |= ip_hdr_len << MVNETA_TX_IP_HLEN_SHIFT;
1209 1209
1210 if (l3_proto == swab16(ETH_P_IP)) 1210 if (l3_proto == htons(ETH_P_IP))
1211 command |= MVNETA_TXD_IP_CSUM; 1211 command |= MVNETA_TXD_IP_CSUM;
1212 else 1212 else
1213 command |= MVNETA_TX_L3_IP6; 1213 command |= MVNETA_TX_L3_IP6;
@@ -2529,7 +2529,7 @@ static void mvneta_adjust_link(struct net_device *ndev)
2529 2529
2530 if (phydev->speed == SPEED_1000) 2530 if (phydev->speed == SPEED_1000)
2531 val |= MVNETA_GMAC_CONFIG_GMII_SPEED; 2531 val |= MVNETA_GMAC_CONFIG_GMII_SPEED;
2532 else 2532 else if (phydev->speed == SPEED_100)
2533 val |= MVNETA_GMAC_CONFIG_MII_SPEED; 2533 val |= MVNETA_GMAC_CONFIG_MII_SPEED;
2534 2534
2535 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); 2535 mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val);
diff --git a/drivers/net/ethernet/marvell/skge.c b/drivers/net/ethernet/marvell/skge.c
index 7f81ae66cc89..e912b6887d40 100644
--- a/drivers/net/ethernet/marvell/skge.c
+++ b/drivers/net/ethernet/marvell/skge.c
@@ -4199,6 +4199,13 @@ static struct dmi_system_id skge_32bit_dma_boards[] = {
4199 DMI_MATCH(DMI_BOARD_NAME, "P5NSLI") 4199 DMI_MATCH(DMI_BOARD_NAME, "P5NSLI")
4200 }, 4200 },
4201 }, 4201 },
4202 {
4203 .ident = "FUJITSU SIEMENS A8NE-FM",
4204 .matches = {
4205 DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTek Computer INC."),
4206 DMI_MATCH(DMI_BOARD_NAME, "A8NE-FM")
4207 },
4208 },
4202 {} 4209 {}
4203}; 4210};
4204 4211
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 80f725228f5b..56022d647837 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -294,8 +294,6 @@ int mlx4_cq_alloc(struct mlx4_dev *dev, int nent,
294 init_completion(&cq->free); 294 init_completion(&cq->free);
295 295
296 cq->irq = priv->eq_table.eq[cq->vector].irq; 296 cq->irq = priv->eq_table.eq[cq->vector].irq;
297 cq->irq_affinity_change = false;
298
299 return 0; 297 return 0;
300 298
301err_radix: 299err_radix:
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_cq.c b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
index 4b2130760eed..14c00048bbec 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_cq.c
@@ -128,6 +128,10 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
128 mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n", 128 mlx4_warn(mdev, "Failed assigning an EQ to %s, falling back to legacy EQ's\n",
129 name); 129 name);
130 } 130 }
131
132 cq->irq_desc =
133 irq_to_desc(mlx4_eq_get_irq(mdev->dev,
134 cq->vector));
131 } 135 }
132 } else { 136 } else {
133 cq->vector = (cq->ring + 1 + priv->port) % 137 cq->vector = (cq->ring + 1 + priv->port) %
@@ -187,8 +191,6 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
187 mlx4_en_unmap_buffer(&cq->wqres.buf); 191 mlx4_en_unmap_buffer(&cq->wqres.buf);
188 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); 192 mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
189 if (priv->mdev->dev->caps.comp_pool && cq->vector) { 193 if (priv->mdev->dev->caps.comp_pool && cq->vector) {
190 if (!cq->is_tx)
191 irq_set_affinity_hint(cq->mcq.irq, NULL);
192 mlx4_release_eq(priv->mdev->dev, cq->vector); 194 mlx4_release_eq(priv->mdev->dev, cq->vector);
193 } 195 }
194 cq->vector = 0; 196 cq->vector = 0;
@@ -204,6 +206,7 @@ void mlx4_en_deactivate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq)
204 if (!cq->is_tx) { 206 if (!cq->is_tx) {
205 napi_hash_del(&cq->napi); 207 napi_hash_del(&cq->napi);
206 synchronize_rcu(); 208 synchronize_rcu();
209 irq_set_affinity_hint(cq->mcq.irq, NULL);
207 } 210 }
208 netif_napi_del(&cq->napi); 211 netif_napi_del(&cq->napi);
209 212
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
index fa1a069e14e6..68d763d2d030 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_ethtool.c
@@ -417,6 +417,8 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
417 417
418 coal->tx_coalesce_usecs = priv->tx_usecs; 418 coal->tx_coalesce_usecs = priv->tx_usecs;
419 coal->tx_max_coalesced_frames = priv->tx_frames; 419 coal->tx_max_coalesced_frames = priv->tx_frames;
420 coal->tx_max_coalesced_frames_irq = priv->tx_work_limit;
421
420 coal->rx_coalesce_usecs = priv->rx_usecs; 422 coal->rx_coalesce_usecs = priv->rx_usecs;
421 coal->rx_max_coalesced_frames = priv->rx_frames; 423 coal->rx_max_coalesced_frames = priv->rx_frames;
422 424
@@ -426,6 +428,7 @@ static int mlx4_en_get_coalesce(struct net_device *dev,
426 coal->rx_coalesce_usecs_high = priv->rx_usecs_high; 428 coal->rx_coalesce_usecs_high = priv->rx_usecs_high;
427 coal->rate_sample_interval = priv->sample_interval; 429 coal->rate_sample_interval = priv->sample_interval;
428 coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal; 430 coal->use_adaptive_rx_coalesce = priv->adaptive_rx_coal;
431
429 return 0; 432 return 0;
430} 433}
431 434
@@ -434,6 +437,9 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
434{ 437{
435 struct mlx4_en_priv *priv = netdev_priv(dev); 438 struct mlx4_en_priv *priv = netdev_priv(dev);
436 439
440 if (!coal->tx_max_coalesced_frames_irq)
441 return -EINVAL;
442
437 priv->rx_frames = (coal->rx_max_coalesced_frames == 443 priv->rx_frames = (coal->rx_max_coalesced_frames ==
438 MLX4_EN_AUTO_CONF) ? 444 MLX4_EN_AUTO_CONF) ?
439 MLX4_EN_RX_COAL_TARGET : 445 MLX4_EN_RX_COAL_TARGET :
@@ -457,6 +463,7 @@ static int mlx4_en_set_coalesce(struct net_device *dev,
457 priv->rx_usecs_high = coal->rx_coalesce_usecs_high; 463 priv->rx_usecs_high = coal->rx_coalesce_usecs_high;
458 priv->sample_interval = coal->rate_sample_interval; 464 priv->sample_interval = coal->rate_sample_interval;
459 priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce; 465 priv->adaptive_rx_coal = coal->use_adaptive_rx_coalesce;
466 priv->tx_work_limit = coal->tx_max_coalesced_frames_irq;
460 467
461 return mlx4_en_moderation_update(priv); 468 return mlx4_en_moderation_update(priv);
462} 469}
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 7d4fb7bf2593..7345c43b019e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2336,7 +2336,7 @@ static void mlx4_en_add_vxlan_port(struct net_device *dev,
2336 struct mlx4_en_priv *priv = netdev_priv(dev); 2336 struct mlx4_en_priv *priv = netdev_priv(dev);
2337 __be16 current_port; 2337 __be16 current_port;
2338 2338
2339 if (!(priv->mdev->dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_VXLAN_OFFLOADS)) 2339 if (priv->mdev->dev->caps.tunnel_offload_mode != MLX4_TUNNEL_OFFLOAD_MODE_VXLAN)
2340 return; 2340 return;
2341 2341
2342 if (sa_family == AF_INET6) 2342 if (sa_family == AF_INET6)
@@ -2473,6 +2473,7 @@ int mlx4_en_init_netdev(struct mlx4_en_dev *mdev, int port,
2473 MLX4_WQE_CTRL_SOLICITED); 2473 MLX4_WQE_CTRL_SOLICITED);
2474 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up; 2474 priv->num_tx_rings_p_up = mdev->profile.num_tx_rings_p_up;
2475 priv->tx_ring_num = prof->tx_ring_num; 2475 priv->tx_ring_num = prof->tx_ring_num;
2476 priv->tx_work_limit = MLX4_EN_DEFAULT_TX_WORK;
2476 2477
2477 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS, 2478 priv->tx_ring = kzalloc(sizeof(struct mlx4_en_tx_ring *) * MAX_TX_RINGS,
2478 GFP_KERNEL); 2479 GFP_KERNEL);
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index d2d415732d99..5535862f27cc 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -40,6 +40,7 @@
40#include <linux/if_ether.h> 40#include <linux/if_ether.h>
41#include <linux/if_vlan.h> 41#include <linux/if_vlan.h>
42#include <linux/vmalloc.h> 42#include <linux/vmalloc.h>
43#include <linux/irq.h>
43 44
44#include "mlx4_en.h" 45#include "mlx4_en.h"
45 46
@@ -782,6 +783,7 @@ int mlx4_en_process_rx_cq(struct net_device *dev, struct mlx4_en_cq *cq, int bud
782 PKT_HASH_TYPE_L3); 783 PKT_HASH_TYPE_L3);
783 784
784 skb_record_rx_queue(gro_skb, cq->ring); 785 skb_record_rx_queue(gro_skb, cq->ring);
786 skb_mark_napi_id(gro_skb, &cq->napi);
785 787
786 if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) { 788 if (ring->hwtstamp_rx_filter == HWTSTAMP_FILTER_ALL) {
787 timestamp = mlx4_en_get_cqe_ts(cqe); 789 timestamp = mlx4_en_get_cqe_ts(cqe);
@@ -896,16 +898,25 @@ int mlx4_en_poll_rx_cq(struct napi_struct *napi, int budget)
896 898
897 /* If we used up all the quota - we're probably not done yet... */ 899 /* If we used up all the quota - we're probably not done yet... */
898 if (done == budget) { 900 if (done == budget) {
901 int cpu_curr;
902 const struct cpumask *aff;
903
899 INC_PERF_COUNTER(priv->pstats.napi_quota); 904 INC_PERF_COUNTER(priv->pstats.napi_quota);
900 if (unlikely(cq->mcq.irq_affinity_change)) { 905
901 cq->mcq.irq_affinity_change = false; 906 cpu_curr = smp_processor_id();
907 aff = irq_desc_get_irq_data(cq->irq_desc)->affinity;
908
909 if (unlikely(!cpumask_test_cpu(cpu_curr, aff))) {
910 /* Current cpu is not according to smp_irq_affinity -
911 * probably affinity changed. need to stop this NAPI
912 * poll, and restart it on the right CPU
913 */
902 napi_complete(napi); 914 napi_complete(napi);
903 mlx4_en_arm_cq(priv, cq); 915 mlx4_en_arm_cq(priv, cq);
904 return 0; 916 return 0;
905 } 917 }
906 } else { 918 } else {
907 /* Done for now */ 919 /* Done for now */
908 cq->mcq.irq_affinity_change = false;
909 napi_complete(napi); 920 napi_complete(napi);
910 mlx4_en_arm_cq(priv, cq); 921 mlx4_en_arm_cq(priv, cq);
911 } 922 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 8be7483f8236..5045bab59633 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -351,9 +351,8 @@ int mlx4_en_free_tx_buf(struct net_device *dev, struct mlx4_en_tx_ring *ring)
351 return cnt; 351 return cnt;
352} 352}
353 353
354static int mlx4_en_process_tx_cq(struct net_device *dev, 354static bool mlx4_en_process_tx_cq(struct net_device *dev,
355 struct mlx4_en_cq *cq, 355 struct mlx4_en_cq *cq)
356 int budget)
357{ 356{
358 struct mlx4_en_priv *priv = netdev_priv(dev); 357 struct mlx4_en_priv *priv = netdev_priv(dev);
359 struct mlx4_cq *mcq = &cq->mcq; 358 struct mlx4_cq *mcq = &cq->mcq;
@@ -372,9 +371,10 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
372 int factor = priv->cqe_factor; 371 int factor = priv->cqe_factor;
373 u64 timestamp = 0; 372 u64 timestamp = 0;
374 int done = 0; 373 int done = 0;
374 int budget = priv->tx_work_limit;
375 375
376 if (!priv->port_up) 376 if (!priv->port_up)
377 return 0; 377 return true;
378 378
379 index = cons_index & size_mask; 379 index = cons_index & size_mask;
380 cqe = &buf[(index << factor) + factor]; 380 cqe = &buf[(index << factor) + factor];
@@ -447,7 +447,7 @@ static int mlx4_en_process_tx_cq(struct net_device *dev,
447 netif_tx_wake_queue(ring->tx_queue); 447 netif_tx_wake_queue(ring->tx_queue);
448 ring->wake_queue++; 448 ring->wake_queue++;
449 } 449 }
450 return done; 450 return done < budget;
451} 451}
452 452
453void mlx4_en_tx_irq(struct mlx4_cq *mcq) 453void mlx4_en_tx_irq(struct mlx4_cq *mcq)
@@ -467,24 +467,16 @@ int mlx4_en_poll_tx_cq(struct napi_struct *napi, int budget)
467 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi); 467 struct mlx4_en_cq *cq = container_of(napi, struct mlx4_en_cq, napi);
468 struct net_device *dev = cq->dev; 468 struct net_device *dev = cq->dev;
469 struct mlx4_en_priv *priv = netdev_priv(dev); 469 struct mlx4_en_priv *priv = netdev_priv(dev);
470 int done; 470 int clean_complete;
471 471
472 done = mlx4_en_process_tx_cq(dev, cq, budget); 472 clean_complete = mlx4_en_process_tx_cq(dev, cq);
473 if (!clean_complete)
474 return budget;
473 475
474 /* If we used up all the quota - we're probably not done yet... */ 476 napi_complete(napi);
475 if (done < budget) { 477 mlx4_en_arm_cq(priv, cq);
476 /* Done for now */ 478
477 cq->mcq.irq_affinity_change = false; 479 return 0;
478 napi_complete(napi);
479 mlx4_en_arm_cq(priv, cq);
480 return done;
481 } else if (unlikely(cq->mcq.irq_affinity_change)) {
482 cq->mcq.irq_affinity_change = false;
483 napi_complete(napi);
484 mlx4_en_arm_cq(priv, cq);
485 return 0;
486 }
487 return budget;
488} 480}
489 481
490static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv, 482static struct mlx4_en_tx_desc *mlx4_en_bounce_to_desc(struct mlx4_en_priv *priv,
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index d954ec1eac17..2a004b347e1d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -53,11 +53,6 @@ enum {
53 MLX4_EQ_ENTRY_SIZE = 0x20 53 MLX4_EQ_ENTRY_SIZE = 0x20
54}; 54};
55 55
56struct mlx4_irq_notify {
57 void *arg;
58 struct irq_affinity_notify notify;
59};
60
61#define MLX4_EQ_STATUS_OK ( 0 << 28) 56#define MLX4_EQ_STATUS_OK ( 0 << 28)
62#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28) 57#define MLX4_EQ_STATUS_WRITE_FAIL (10 << 28)
63#define MLX4_EQ_OWNER_SW ( 0 << 24) 58#define MLX4_EQ_OWNER_SW ( 0 << 24)
@@ -1088,57 +1083,6 @@ static void mlx4_unmap_clr_int(struct mlx4_dev *dev)
1088 iounmap(priv->clr_base); 1083 iounmap(priv->clr_base);
1089} 1084}
1090 1085
1091static void mlx4_irq_notifier_notify(struct irq_affinity_notify *notify,
1092 const cpumask_t *mask)
1093{
1094 struct mlx4_irq_notify *n = container_of(notify,
1095 struct mlx4_irq_notify,
1096 notify);
1097 struct mlx4_priv *priv = (struct mlx4_priv *)n->arg;
1098 struct radix_tree_iter iter;
1099 void **slot;
1100
1101 radix_tree_for_each_slot(slot, &priv->cq_table.tree, &iter, 0) {
1102 struct mlx4_cq *cq = (struct mlx4_cq *)(*slot);
1103
1104 if (cq->irq == notify->irq)
1105 cq->irq_affinity_change = true;
1106 }
1107}
1108
1109static void mlx4_release_irq_notifier(struct kref *ref)
1110{
1111 struct mlx4_irq_notify *n = container_of(ref, struct mlx4_irq_notify,
1112 notify.kref);
1113 kfree(n);
1114}
1115
1116static void mlx4_assign_irq_notifier(struct mlx4_priv *priv,
1117 struct mlx4_dev *dev, int irq)
1118{
1119 struct mlx4_irq_notify *irq_notifier = NULL;
1120 int err = 0;
1121
1122 irq_notifier = kzalloc(sizeof(*irq_notifier), GFP_KERNEL);
1123 if (!irq_notifier) {
1124 mlx4_warn(dev, "Failed to allocate irq notifier. irq %d\n",
1125 irq);
1126 return;
1127 }
1128
1129 irq_notifier->notify.irq = irq;
1130 irq_notifier->notify.notify = mlx4_irq_notifier_notify;
1131 irq_notifier->notify.release = mlx4_release_irq_notifier;
1132 irq_notifier->arg = priv;
1133 err = irq_set_affinity_notifier(irq, &irq_notifier->notify);
1134 if (err) {
1135 kfree(irq_notifier);
1136 irq_notifier = NULL;
1137 mlx4_warn(dev, "Failed to set irq notifier. irq %d\n", irq);
1138 }
1139}
1140
1141
1142int mlx4_alloc_eq_table(struct mlx4_dev *dev) 1086int mlx4_alloc_eq_table(struct mlx4_dev *dev)
1143{ 1087{
1144 struct mlx4_priv *priv = mlx4_priv(dev); 1088 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1409,8 +1353,6 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
1409 continue; 1353 continue;
1410 /*we dont want to break here*/ 1354 /*we dont want to break here*/
1411 } 1355 }
1412 mlx4_assign_irq_notifier(priv, dev,
1413 priv->eq_table.eq[vec].irq);
1414 1356
1415 eq_set_ci(&priv->eq_table.eq[vec], 1); 1357 eq_set_ci(&priv->eq_table.eq[vec], 1);
1416 } 1358 }
@@ -1427,6 +1369,14 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
1427} 1369}
1428EXPORT_SYMBOL(mlx4_assign_eq); 1370EXPORT_SYMBOL(mlx4_assign_eq);
1429 1371
1372int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec)
1373{
1374 struct mlx4_priv *priv = mlx4_priv(dev);
1375
1376 return priv->eq_table.eq[vec].irq;
1377}
1378EXPORT_SYMBOL(mlx4_eq_get_irq);
1379
1430void mlx4_release_eq(struct mlx4_dev *dev, int vec) 1380void mlx4_release_eq(struct mlx4_dev *dev, int vec)
1431{ 1381{
1432 struct mlx4_priv *priv = mlx4_priv(dev); 1382 struct mlx4_priv *priv = mlx4_priv(dev);
@@ -1438,9 +1388,6 @@ void mlx4_release_eq(struct mlx4_dev *dev, int vec)
1438 Belonging to a legacy EQ*/ 1388 Belonging to a legacy EQ*/
1439 mutex_lock(&priv->msix_ctl.pool_lock); 1389 mutex_lock(&priv->msix_ctl.pool_lock);
1440 if (priv->msix_ctl.pool_bm & 1ULL << i) { 1390 if (priv->msix_ctl.pool_bm & 1ULL << i) {
1441 irq_set_affinity_notifier(
1442 priv->eq_table.eq[vec].irq,
1443 NULL);
1444 free_irq(priv->eq_table.eq[vec].irq, 1391 free_irq(priv->eq_table.eq[vec].irq,
1445 &priv->eq_table.eq[vec]); 1392 &priv->eq_table.eq[vec]);
1446 priv->msix_ctl.pool_bm &= ~(1ULL << i); 1393 priv->msix_ctl.pool_bm &= ~(1ULL << i);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 5f42f6d6e4c6..82ab427290c3 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2439,7 +2439,8 @@ slave_start:
2439 (num_vfs_argc > 1 || probe_vfs_argc > 1)) { 2439 (num_vfs_argc > 1 || probe_vfs_argc > 1)) {
2440 mlx4_err(dev, 2440 mlx4_err(dev,
2441 "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n"); 2441 "Invalid syntax of num_vfs/probe_vfs with IB port - single port VFs syntax is only supported when all ports are configured as ethernet\n");
2442 goto err_close; 2442 err = -EINVAL;
2443 goto err_master_mfunc;
2443 } 2444 }
2444 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) { 2445 for (i = 0; i < sizeof(nvfs)/sizeof(nvfs[0]); i++) {
2445 unsigned j; 2446 unsigned j;
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
index 0e15295bedd6..d72a5a894fc6 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4_en.h
@@ -126,6 +126,8 @@ enum {
126#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \ 126#define MAX_TX_RINGS (MLX4_EN_MAX_TX_RING_P_UP * \
127 MLX4_EN_NUM_UP) 127 MLX4_EN_NUM_UP)
128 128
129#define MLX4_EN_DEFAULT_TX_WORK 256
130
129/* Target number of packets to coalesce with interrupt moderation */ 131/* Target number of packets to coalesce with interrupt moderation */
130#define MLX4_EN_RX_COAL_TARGET 44 132#define MLX4_EN_RX_COAL_TARGET 44
131#define MLX4_EN_RX_COAL_TIME 0x10 133#define MLX4_EN_RX_COAL_TIME 0x10
@@ -343,6 +345,7 @@ struct mlx4_en_cq {
343#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD) 345#define CQ_USER_PEND (MLX4_EN_CQ_STATE_POLL | MLX4_EN_CQ_STATE_POLL_YIELD)
344 spinlock_t poll_lock; /* protects from LLS/napi conflicts */ 346 spinlock_t poll_lock; /* protects from LLS/napi conflicts */
345#endif /* CONFIG_NET_RX_BUSY_POLL */ 347#endif /* CONFIG_NET_RX_BUSY_POLL */
348 struct irq_desc *irq_desc;
346}; 349};
347 350
348struct mlx4_en_port_profile { 351struct mlx4_en_port_profile {
@@ -542,6 +545,7 @@ struct mlx4_en_priv {
542 __be32 ctrl_flags; 545 __be32 ctrl_flags;
543 u32 flags; 546 u32 flags;
544 u8 num_tx_rings_p_up; 547 u8 num_tx_rings_p_up;
548 u32 tx_work_limit;
545 u32 tx_ring_num; 549 u32 tx_ring_num;
546 u32 rx_ring_num; 550 u32 rx_ring_num;
547 u32 rx_skb_size; 551 u32 rx_skb_size;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/mr.c b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
index ba0401d4af50..184c3615f479 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/mr.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/mr.c
@@ -94,6 +94,11 @@ int mlx5_core_create_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr,
94 write_lock_irq(&table->lock); 94 write_lock_irq(&table->lock);
95 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr); 95 err = radix_tree_insert(&table->tree, mlx5_base_mkey(mr->key), mr);
96 write_unlock_irq(&table->lock); 96 write_unlock_irq(&table->lock);
97 if (err) {
98 mlx5_core_warn(dev, "failed radix tree insert of mr 0x%x, %d\n",
99 mlx5_base_mkey(mr->key), err);
100 mlx5_core_destroy_mkey(dev, mr);
101 }
97 102
98 return err; 103 return err;
99} 104}
@@ -104,12 +109,22 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
104 struct mlx5_mr_table *table = &dev->priv.mr_table; 109 struct mlx5_mr_table *table = &dev->priv.mr_table;
105 struct mlx5_destroy_mkey_mbox_in in; 110 struct mlx5_destroy_mkey_mbox_in in;
106 struct mlx5_destroy_mkey_mbox_out out; 111 struct mlx5_destroy_mkey_mbox_out out;
112 struct mlx5_core_mr *deleted_mr;
107 unsigned long flags; 113 unsigned long flags;
108 int err; 114 int err;
109 115
110 memset(&in, 0, sizeof(in)); 116 memset(&in, 0, sizeof(in));
111 memset(&out, 0, sizeof(out)); 117 memset(&out, 0, sizeof(out));
112 118
119 write_lock_irqsave(&table->lock, flags);
120 deleted_mr = radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
121 write_unlock_irqrestore(&table->lock, flags);
122 if (!deleted_mr) {
123 mlx5_core_warn(dev, "failed radix tree delete of mr 0x%x\n",
124 mlx5_base_mkey(mr->key));
125 return -ENOENT;
126 }
127
113 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY); 128 in.hdr.opcode = cpu_to_be16(MLX5_CMD_OP_DESTROY_MKEY);
114 in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key)); 129 in.mkey = cpu_to_be32(mlx5_mkey_to_idx(mr->key));
115 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out)); 130 err = mlx5_cmd_exec(dev, &in, sizeof(in), &out, sizeof(out));
@@ -119,10 +134,6 @@ int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, struct mlx5_core_mr *mr)
119 if (out.hdr.status) 134 if (out.hdr.status)
120 return mlx5_cmd_status_to_err(&out.hdr); 135 return mlx5_cmd_status_to_err(&out.hdr);
121 136
122 write_lock_irqsave(&table->lock, flags);
123 radix_tree_delete(&table->tree, mlx5_base_mkey(mr->key));
124 write_unlock_irqrestore(&table->lock, flags);
125
126 return err; 137 return err;
127} 138}
128EXPORT_SYMBOL(mlx5_core_destroy_mkey); 139EXPORT_SYMBOL(mlx5_core_destroy_mkey);
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index be425ad5e824..06bdc31a828d 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -538,6 +538,7 @@ enum rtl_register_content {
538 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */ 538 MagicPacket = (1 << 5), /* Wake up when receives a Magic Packet */
539 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */ 539 LinkUp = (1 << 4), /* Wake up when the cable connection is re-established */
540 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */ 540 Jumbo_En0 = (1 << 2), /* 8168 only. Reserved in the 8168b */
541 Rdy_to_L23 = (1 << 1), /* L23 Enable */
541 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */ 542 Beacon_en = (1 << 0), /* 8168 only. Reserved in the 8168b */
542 543
543 /* Config4 register */ 544 /* Config4 register */
@@ -4897,6 +4898,21 @@ static void rtl_enable_clock_request(struct pci_dev *pdev)
4897 PCI_EXP_LNKCTL_CLKREQ_EN); 4898 PCI_EXP_LNKCTL_CLKREQ_EN);
4898} 4899}
4899 4900
4901static void rtl_pcie_state_l2l3_enable(struct rtl8169_private *tp, bool enable)
4902{
4903 void __iomem *ioaddr = tp->mmio_addr;
4904 u8 data;
4905
4906 data = RTL_R8(Config3);
4907
4908 if (enable)
4909 data |= Rdy_to_L23;
4910 else
4911 data &= ~Rdy_to_L23;
4912
4913 RTL_W8(Config3, data);
4914}
4915
4900#define R8168_CPCMD_QUIRK_MASK (\ 4916#define R8168_CPCMD_QUIRK_MASK (\
4901 EnableBist | \ 4917 EnableBist | \
4902 Mac_dbgo_oe | \ 4918 Mac_dbgo_oe | \
@@ -5246,6 +5262,7 @@ static void rtl_hw_start_8411(struct rtl8169_private *tp)
5246 }; 5262 };
5247 5263
5248 rtl_hw_start_8168f(tp); 5264 rtl_hw_start_8168f(tp);
5265 rtl_pcie_state_l2l3_enable(tp, false);
5249 5266
5250 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1)); 5267 rtl_ephy_init(tp, e_info_8168f_1, ARRAY_SIZE(e_info_8168f_1));
5251 5268
@@ -5284,6 +5301,8 @@ static void rtl_hw_start_8168g_1(struct rtl8169_private *tp)
5284 5301
5285 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC); 5302 rtl_w1w0_eri(tp, 0x2fc, ERIAR_MASK_0001, 0x01, 0x06, ERIAR_EXGMAC);
5286 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC); 5303 rtl_w1w0_eri(tp, 0x1b0, ERIAR_MASK_0011, 0x0000, 0x1000, ERIAR_EXGMAC);
5304
5305 rtl_pcie_state_l2l3_enable(tp, false);
5287} 5306}
5288 5307
5289static void rtl_hw_start_8168g_2(struct rtl8169_private *tp) 5308static void rtl_hw_start_8168g_2(struct rtl8169_private *tp)
@@ -5536,6 +5555,8 @@ static void rtl_hw_start_8105e_1(struct rtl8169_private *tp)
5536 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN); 5555 RTL_W8(DLLPR, RTL_R8(DLLPR) | PFM_EN);
5537 5556
5538 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1)); 5557 rtl_ephy_init(tp, e_info_8105e_1, ARRAY_SIZE(e_info_8105e_1));
5558
5559 rtl_pcie_state_l2l3_enable(tp, false);
5539} 5560}
5540 5561
5541static void rtl_hw_start_8105e_2(struct rtl8169_private *tp) 5562static void rtl_hw_start_8105e_2(struct rtl8169_private *tp)
@@ -5571,6 +5592,8 @@ static void rtl_hw_start_8402(struct rtl8169_private *tp)
5571 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5592 rtl_eri_write(tp, 0xc0, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5572 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC); 5593 rtl_eri_write(tp, 0xb8, ERIAR_MASK_0011, 0x0000, ERIAR_EXGMAC);
5573 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC); 5594 rtl_w1w0_eri(tp, 0x0d4, ERIAR_MASK_0011, 0x0e00, 0xff00, ERIAR_EXGMAC);
5595
5596 rtl_pcie_state_l2l3_enable(tp, false);
5574} 5597}
5575 5598
5576static void rtl_hw_start_8106(struct rtl8169_private *tp) 5599static void rtl_hw_start_8106(struct rtl8169_private *tp)
@@ -5583,6 +5606,8 @@ static void rtl_hw_start_8106(struct rtl8169_private *tp)
5583 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN); 5606 RTL_W32(MISC, (RTL_R32(MISC) | DISABLE_LAN_EN) & ~EARLY_TALLY_EN);
5584 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET); 5607 RTL_W8(MCU, RTL_R8(MCU) | EN_NDP | EN_OOB_RESET);
5585 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN); 5608 RTL_W8(DLLPR, RTL_R8(DLLPR) & ~PFM_EN);
5609
5610 rtl_pcie_state_l2l3_enable(tp, false);
5586} 5611}
5587 5612
5588static void rtl_hw_start_8101(struct net_device *dev) 5613static void rtl_hw_start_8101(struct net_device *dev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
index b3e148ef5683..9d3748361a1e 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac1000_core.c
@@ -320,11 +320,8 @@ static void dwmac1000_set_eee_timer(void __iomem *ioaddr, int ls, int tw)
320 320
321static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart) 321static void dwmac1000_ctrl_ane(void __iomem *ioaddr, bool restart)
322{ 322{
323 u32 value;
324
325 value = readl(ioaddr + GMAC_AN_CTRL);
326 /* auto negotiation enable and External Loopback enable */ 323 /* auto negotiation enable and External Loopback enable */
327 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE; 324 u32 value = GMAC_AN_CTRL_ANE | GMAC_AN_CTRL_ELE;
328 325
329 if (restart) 326 if (restart)
330 value |= GMAC_AN_CTRL_RAN; 327 value |= GMAC_AN_CTRL_RAN;
diff --git a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
index 7e6628a91514..1e2bcf5f89e1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
+++ b/drivers/net/ethernet/stmicro/stmmac/enh_desc.c
@@ -145,7 +145,7 @@ static void enh_desc_get_ext_status(void *data, struct stmmac_extra_stats *x,
145 x->rx_msg_type_delay_req++; 145 x->rx_msg_type_delay_req++;
146 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP) 146 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_RESP)
147 x->rx_msg_type_delay_resp++; 147 x->rx_msg_type_delay_resp++;
148 else if (p->des4.erx.msg_type == RDES_EXT_DELAY_REQ) 148 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_REQ)
149 x->rx_msg_type_pdelay_req++; 149 x->rx_msg_type_pdelay_req++;
150 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP) 150 else if (p->des4.erx.msg_type == RDES_EXT_PDELAY_RESP)
151 x->rx_msg_type_pdelay_resp++; 151 x->rx_msg_type_pdelay_resp++;
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index ff380dac6629..b988d16cd34e 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -1212,7 +1212,12 @@ static int cpsw_ndo_open(struct net_device *ndev)
1212 for_each_slave(priv, cpsw_slave_open, priv); 1212 for_each_slave(priv, cpsw_slave_open, priv);
1213 1213
1214 /* Add default VLAN */ 1214 /* Add default VLAN */
1215 cpsw_add_default_vlan(priv); 1215 if (!priv->data.dual_emac)
1216 cpsw_add_default_vlan(priv);
1217 else
1218 cpsw_ale_add_vlan(priv->ale, priv->data.default_vlan,
1219 ALE_ALL_PORTS << priv->host_port,
1220 ALE_ALL_PORTS << priv->host_port, 0, 0);
1216 1221
1217 if (!cpsw_common_res_usage_state(priv)) { 1222 if (!cpsw_common_res_usage_state(priv)) {
1218 /* setup tx dma to fixed prio and zero offset */ 1223 /* setup tx dma to fixed prio and zero offset */
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
index 14389f841d43..4c70360967c2 100644
--- a/drivers/net/ethernet/tile/tilegx.c
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -2191,7 +2191,6 @@ static void tile_net_setup(struct net_device *dev)
2191static void tile_net_dev_init(const char *name, const uint8_t *mac) 2191static void tile_net_dev_init(const char *name, const uint8_t *mac)
2192{ 2192{
2193 int ret; 2193 int ret;
2194 int i;
2195 struct net_device *dev; 2194 struct net_device *dev;
2196 struct tile_net_priv *priv; 2195 struct tile_net_priv *priv;
2197 2196
diff --git a/drivers/net/fddi/defxx.c b/drivers/net/fddi/defxx.c
index eb78203cd58e..2aa57270838f 100644
--- a/drivers/net/fddi/defxx.c
+++ b/drivers/net/fddi/defxx.c
@@ -291,7 +291,11 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type);
291 291
292static int dfx_rcv_init(DFX_board_t *bp, int get_buffers); 292static int dfx_rcv_init(DFX_board_t *bp, int get_buffers);
293static void dfx_rcv_queue_process(DFX_board_t *bp); 293static void dfx_rcv_queue_process(DFX_board_t *bp);
294#ifdef DYNAMIC_BUFFERS
294static void dfx_rcv_flush(DFX_board_t *bp); 295static void dfx_rcv_flush(DFX_board_t *bp);
296#else
297static inline void dfx_rcv_flush(DFX_board_t *bp) {}
298#endif
295 299
296static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb, 300static netdev_tx_t dfx_xmt_queue_pkt(struct sk_buff *skb,
297 struct net_device *dev); 301 struct net_device *dev);
@@ -2849,7 +2853,7 @@ static int dfx_hw_dma_uninit(DFX_board_t *bp, PI_UINT32 type)
2849 * Align an sk_buff to a boundary power of 2 2853 * Align an sk_buff to a boundary power of 2
2850 * 2854 *
2851 */ 2855 */
2852 2856#ifdef DYNAMIC_BUFFERS
2853static void my_skb_align(struct sk_buff *skb, int n) 2857static void my_skb_align(struct sk_buff *skb, int n)
2854{ 2858{
2855 unsigned long x = (unsigned long)skb->data; 2859 unsigned long x = (unsigned long)skb->data;
@@ -2859,7 +2863,7 @@ static void my_skb_align(struct sk_buff *skb, int n)
2859 2863
2860 skb_reserve(skb, v - x); 2864 skb_reserve(skb, v - x);
2861} 2865}
2862 2866#endif
2863 2867
2864/* 2868/*
2865 * ================ 2869 * ================
@@ -3074,10 +3078,7 @@ static void dfx_rcv_queue_process(
3074 break; 3078 break;
3075 } 3079 }
3076 else { 3080 else {
3077#ifndef DYNAMIC_BUFFERS 3081 if (!rx_in_place) {
3078 if (! rx_in_place)
3079#endif
3080 {
3081 /* Receive buffer allocated, pass receive packet up */ 3082 /* Receive buffer allocated, pass receive packet up */
3082 3083
3083 skb_copy_to_linear_data(skb, 3084 skb_copy_to_linear_data(skb,
@@ -3453,10 +3454,6 @@ static void dfx_rcv_flush( DFX_board_t *bp )
3453 } 3454 }
3454 3455
3455 } 3456 }
3456#else
3457static inline void dfx_rcv_flush( DFX_board_t *bp )
3458{
3459}
3460#endif /* DYNAMIC_BUFFERS */ 3457#endif /* DYNAMIC_BUFFERS */
3461 3458
3462/* 3459/*
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index c041f63a6d30..4ed38eaecea8 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -189,7 +189,7 @@ static int netvsc_destroy_buf(struct netvsc_device *net_device)
189 "unable to teardown send buffer's gpadl\n"); 189 "unable to teardown send buffer's gpadl\n");
190 return ret; 190 return ret;
191 } 191 }
192 net_device->recv_buf_gpadl_handle = 0; 192 net_device->send_buf_gpadl_handle = 0;
193 } 193 }
194 if (net_device->send_buf) { 194 if (net_device->send_buf) {
195 /* Free up the receive buffer */ 195 /* Free up the receive buffer */
diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c
index 4517b149ed07..50899416f668 100644
--- a/drivers/net/ieee802154/at86rf230.c
+++ b/drivers/net/ieee802154/at86rf230.c
@@ -1137,6 +1137,8 @@ static int at86rf230_probe(struct spi_device *spi)
1137 dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK; 1137 dev->flags = IEEE802154_HW_OMIT_CKSUM | IEEE802154_HW_AACK;
1138 1138
1139 irq_type = irq_get_trigger_type(spi->irq); 1139 irq_type = irq_get_trigger_type(spi->irq);
1140 if (!irq_type)
1141 irq_type = IRQF_TRIGGER_RISING;
1140 if (irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) { 1142 if (irq_type & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING)) {
1141 irq_worker = at86rf230_irqwork; 1143 irq_worker = at86rf230_irqwork;
1142 irq_handler = at86rf230_isr; 1144 irq_handler = at86rf230_isr;
@@ -1168,7 +1170,8 @@ static int at86rf230_probe(struct spi_device *spi)
1168 if (rc) 1170 if (rc)
1169 goto err_hw_init; 1171 goto err_hw_init;
1170 1172
1171 rc = devm_request_irq(&spi->dev, spi->irq, irq_handler, IRQF_SHARED, 1173 rc = devm_request_irq(&spi->dev, spi->irq, irq_handler,
1174 IRQF_SHARED | irq_type,
1172 dev_name(&spi->dev), lp); 1175 dev_name(&spi->dev), lp);
1173 if (rc) 1176 if (rc)
1174 goto err_hw_init; 1177 goto err_hw_init;
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index 6c622aedbae1..fdc1b418fa6a 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -16,9 +16,13 @@
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/netdevice.h> 17#include <linux/netdevice.h>
18#include <linux/etherdevice.h> 18#include <linux/etherdevice.h>
19#include <linux/of_gpio.h>
20#include <linux/gpio/consumer.h>
19 21
20#define AT803X_INTR_ENABLE 0x12 22#define AT803X_INTR_ENABLE 0x12
21#define AT803X_INTR_STATUS 0x13 23#define AT803X_INTR_STATUS 0x13
24#define AT803X_SMART_SPEED 0x14
25#define AT803X_LED_CONTROL 0x18
22#define AT803X_WOL_ENABLE 0x01 26#define AT803X_WOL_ENABLE 0x01
23#define AT803X_DEVICE_ADDR 0x03 27#define AT803X_DEVICE_ADDR 0x03
24#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C 28#define AT803X_LOC_MAC_ADDR_0_15_OFFSET 0x804C
@@ -35,10 +39,52 @@
35#define AT803X_DEBUG_SYSTEM_MODE_CTRL 0x05 39#define AT803X_DEBUG_SYSTEM_MODE_CTRL 0x05
36#define AT803X_DEBUG_RGMII_TX_CLK_DLY BIT(8) 40#define AT803X_DEBUG_RGMII_TX_CLK_DLY BIT(8)
37 41
42#define ATH8030_PHY_ID 0x004dd076
43#define ATH8031_PHY_ID 0x004dd074
44#define ATH8035_PHY_ID 0x004dd072
45
38MODULE_DESCRIPTION("Atheros 803x PHY driver"); 46MODULE_DESCRIPTION("Atheros 803x PHY driver");
39MODULE_AUTHOR("Matus Ujhelyi"); 47MODULE_AUTHOR("Matus Ujhelyi");
40MODULE_LICENSE("GPL"); 48MODULE_LICENSE("GPL");
41 49
50struct at803x_priv {
51 bool phy_reset:1;
52 struct gpio_desc *gpiod_reset;
53};
54
55struct at803x_context {
56 u16 bmcr;
57 u16 advertise;
58 u16 control1000;
59 u16 int_enable;
60 u16 smart_speed;
61 u16 led_control;
62};
63
64/* save relevant PHY registers to private copy */
65static void at803x_context_save(struct phy_device *phydev,
66 struct at803x_context *context)
67{
68 context->bmcr = phy_read(phydev, MII_BMCR);
69 context->advertise = phy_read(phydev, MII_ADVERTISE);
70 context->control1000 = phy_read(phydev, MII_CTRL1000);
71 context->int_enable = phy_read(phydev, AT803X_INTR_ENABLE);
72 context->smart_speed = phy_read(phydev, AT803X_SMART_SPEED);
73 context->led_control = phy_read(phydev, AT803X_LED_CONTROL);
74}
75
76/* restore relevant PHY registers from private copy */
77static void at803x_context_restore(struct phy_device *phydev,
78 const struct at803x_context *context)
79{
80 phy_write(phydev, MII_BMCR, context->bmcr);
81 phy_write(phydev, MII_ADVERTISE, context->advertise);
82 phy_write(phydev, MII_CTRL1000, context->control1000);
83 phy_write(phydev, AT803X_INTR_ENABLE, context->int_enable);
84 phy_write(phydev, AT803X_SMART_SPEED, context->smart_speed);
85 phy_write(phydev, AT803X_LED_CONTROL, context->led_control);
86}
87
42static int at803x_set_wol(struct phy_device *phydev, 88static int at803x_set_wol(struct phy_device *phydev,
43 struct ethtool_wolinfo *wol) 89 struct ethtool_wolinfo *wol)
44{ 90{
@@ -142,6 +188,26 @@ static int at803x_resume(struct phy_device *phydev)
142 return 0; 188 return 0;
143} 189}
144 190
191static int at803x_probe(struct phy_device *phydev)
192{
193 struct device *dev = &phydev->dev;
194 struct at803x_priv *priv;
195
196 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
197 if (!priv)
198 return -ENOMEM;
199
200 priv->gpiod_reset = devm_gpiod_get(dev, "reset");
201 if (IS_ERR(priv->gpiod_reset))
202 priv->gpiod_reset = NULL;
203 else
204 gpiod_direction_output(priv->gpiod_reset, 1);
205
206 phydev->priv = priv;
207
208 return 0;
209}
210
145static int at803x_config_init(struct phy_device *phydev) 211static int at803x_config_init(struct phy_device *phydev)
146{ 212{
147 int ret; 213 int ret;
@@ -189,58 +255,99 @@ static int at803x_config_intr(struct phy_device *phydev)
189 return err; 255 return err;
190} 256}
191 257
258static void at803x_link_change_notify(struct phy_device *phydev)
259{
260 struct at803x_priv *priv = phydev->priv;
261
262 /*
263 * Conduct a hardware reset for AT8030 every time a link loss is
264 * signalled. This is necessary to circumvent a hardware bug that
265 * occurs when the cable is unplugged while TX packets are pending
266 * in the FIFO. In such cases, the FIFO enters an error mode it
267 * cannot recover from by software.
268 */
269 if (phydev->drv->phy_id == ATH8030_PHY_ID) {
270 if (phydev->state == PHY_NOLINK) {
271 if (priv->gpiod_reset && !priv->phy_reset) {
272 struct at803x_context context;
273
274 at803x_context_save(phydev, &context);
275
276 gpiod_set_value(priv->gpiod_reset, 0);
277 msleep(1);
278 gpiod_set_value(priv->gpiod_reset, 1);
279 msleep(1);
280
281 at803x_context_restore(phydev, &context);
282
283 dev_dbg(&phydev->dev, "%s(): phy was reset\n",
284 __func__);
285 priv->phy_reset = true;
286 }
287 } else {
288 priv->phy_reset = false;
289 }
290 }
291}
292
192static struct phy_driver at803x_driver[] = { 293static struct phy_driver at803x_driver[] = {
193{ 294{
194 /* ATHEROS 8035 */ 295 /* ATHEROS 8035 */
195 .phy_id = 0x004dd072, 296 .phy_id = ATH8035_PHY_ID,
196 .name = "Atheros 8035 ethernet", 297 .name = "Atheros 8035 ethernet",
197 .phy_id_mask = 0xffffffef, 298 .phy_id_mask = 0xffffffef,
198 .config_init = at803x_config_init, 299 .probe = at803x_probe,
199 .set_wol = at803x_set_wol, 300 .config_init = at803x_config_init,
200 .get_wol = at803x_get_wol, 301 .link_change_notify = at803x_link_change_notify,
201 .suspend = at803x_suspend, 302 .set_wol = at803x_set_wol,
202 .resume = at803x_resume, 303 .get_wol = at803x_get_wol,
203 .features = PHY_GBIT_FEATURES, 304 .suspend = at803x_suspend,
204 .flags = PHY_HAS_INTERRUPT, 305 .resume = at803x_resume,
205 .config_aneg = genphy_config_aneg, 306 .features = PHY_GBIT_FEATURES,
206 .read_status = genphy_read_status, 307 .flags = PHY_HAS_INTERRUPT,
207 .driver = { 308 .config_aneg = genphy_config_aneg,
309 .read_status = genphy_read_status,
310 .driver = {
208 .owner = THIS_MODULE, 311 .owner = THIS_MODULE,
209 }, 312 },
210}, { 313}, {
211 /* ATHEROS 8030 */ 314 /* ATHEROS 8030 */
212 .phy_id = 0x004dd076, 315 .phy_id = ATH8030_PHY_ID,
213 .name = "Atheros 8030 ethernet", 316 .name = "Atheros 8030 ethernet",
214 .phy_id_mask = 0xffffffef, 317 .phy_id_mask = 0xffffffef,
215 .config_init = at803x_config_init, 318 .probe = at803x_probe,
216 .set_wol = at803x_set_wol, 319 .config_init = at803x_config_init,
217 .get_wol = at803x_get_wol, 320 .link_change_notify = at803x_link_change_notify,
218 .suspend = at803x_suspend, 321 .set_wol = at803x_set_wol,
219 .resume = at803x_resume, 322 .get_wol = at803x_get_wol,
220 .features = PHY_GBIT_FEATURES, 323 .suspend = at803x_suspend,
221 .flags = PHY_HAS_INTERRUPT, 324 .resume = at803x_resume,
222 .config_aneg = genphy_config_aneg, 325 .features = PHY_GBIT_FEATURES,
223 .read_status = genphy_read_status, 326 .flags = PHY_HAS_INTERRUPT,
224 .driver = { 327 .config_aneg = genphy_config_aneg,
328 .read_status = genphy_read_status,
329 .driver = {
225 .owner = THIS_MODULE, 330 .owner = THIS_MODULE,
226 }, 331 },
227}, { 332}, {
228 /* ATHEROS 8031 */ 333 /* ATHEROS 8031 */
229 .phy_id = 0x004dd074, 334 .phy_id = ATH8031_PHY_ID,
230 .name = "Atheros 8031 ethernet", 335 .name = "Atheros 8031 ethernet",
231 .phy_id_mask = 0xffffffef, 336 .phy_id_mask = 0xffffffef,
232 .config_init = at803x_config_init, 337 .probe = at803x_probe,
233 .set_wol = at803x_set_wol, 338 .config_init = at803x_config_init,
234 .get_wol = at803x_get_wol, 339 .link_change_notify = at803x_link_change_notify,
235 .suspend = at803x_suspend, 340 .set_wol = at803x_set_wol,
236 .resume = at803x_resume, 341 .get_wol = at803x_get_wol,
237 .features = PHY_GBIT_FEATURES, 342 .suspend = at803x_suspend,
238 .flags = PHY_HAS_INTERRUPT, 343 .resume = at803x_resume,
239 .config_aneg = genphy_config_aneg, 344 .features = PHY_GBIT_FEATURES,
240 .read_status = genphy_read_status, 345 .flags = PHY_HAS_INTERRUPT,
241 .ack_interrupt = &at803x_ack_interrupt, 346 .config_aneg = genphy_config_aneg,
242 .config_intr = &at803x_config_intr, 347 .read_status = genphy_read_status,
243 .driver = { 348 .ack_interrupt = &at803x_ack_interrupt,
349 .config_intr = &at803x_config_intr,
350 .driver = {
244 .owner = THIS_MODULE, 351 .owner = THIS_MODULE,
245 }, 352 },
246} }; 353} };
@@ -260,9 +367,9 @@ module_init(atheros_init);
260module_exit(atheros_exit); 367module_exit(atheros_exit);
261 368
262static struct mdio_device_id __maybe_unused atheros_tbl[] = { 369static struct mdio_device_id __maybe_unused atheros_tbl[] = {
263 { 0x004dd076, 0xffffffef }, 370 { ATH8030_PHY_ID, 0xffffffef },
264 { 0x004dd074, 0xffffffef }, 371 { ATH8031_PHY_ID, 0xffffffef },
265 { 0x004dd072, 0xffffffef }, 372 { ATH8035_PHY_ID, 0xffffffef },
266 { } 373 { }
267}; 374};
268 375
diff --git a/drivers/net/phy/dp83640.c b/drivers/net/phy/dp83640.c
index 6a999e6814a0..9408157a246c 100644
--- a/drivers/net/phy/dp83640.c
+++ b/drivers/net/phy/dp83640.c
@@ -1323,15 +1323,15 @@ static bool dp83640_rxtstamp(struct phy_device *phydev,
1323{ 1323{
1324 struct dp83640_private *dp83640 = phydev->priv; 1324 struct dp83640_private *dp83640 = phydev->priv;
1325 1325
1326 if (!dp83640->hwts_rx_en)
1327 return false;
1328
1329 if (is_status_frame(skb, type)) { 1326 if (is_status_frame(skb, type)) {
1330 decode_status_frame(dp83640, skb); 1327 decode_status_frame(dp83640, skb);
1331 kfree_skb(skb); 1328 kfree_skb(skb);
1332 return true; 1329 return true;
1333 } 1330 }
1334 1331
1332 if (!dp83640->hwts_rx_en)
1333 return false;
1334
1335 SKB_PTP_TYPE(skb) = type; 1335 SKB_PTP_TYPE(skb) = type;
1336 skb_queue_tail(&dp83640->rx_queue, skb); 1336 skb_queue_tail(&dp83640->rx_queue, skb);
1337 schedule_work(&dp83640->ts_work); 1337 schedule_work(&dp83640->ts_work);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 2e58aa54484c..4eaadcfcb0fe 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -187,6 +187,50 @@ struct mii_bus *of_mdio_find_bus(struct device_node *mdio_bus_np)
187 return d ? to_mii_bus(d) : NULL; 187 return d ? to_mii_bus(d) : NULL;
188} 188}
189EXPORT_SYMBOL(of_mdio_find_bus); 189EXPORT_SYMBOL(of_mdio_find_bus);
190
191/* Walk the list of subnodes of a mdio bus and look for a node that matches the
192 * phy's address with its 'reg' property. If found, set the of_node pointer for
193 * the phy. This allows auto-probed pyh devices to be supplied with information
194 * passed in via DT.
195 */
196static void of_mdiobus_link_phydev(struct mii_bus *mdio,
197 struct phy_device *phydev)
198{
199 struct device *dev = &phydev->dev;
200 struct device_node *child;
201
202 if (dev->of_node || !mdio->dev.of_node)
203 return;
204
205 for_each_available_child_of_node(mdio->dev.of_node, child) {
206 int addr;
207 int ret;
208
209 ret = of_property_read_u32(child, "reg", &addr);
210 if (ret < 0) {
211 dev_err(dev, "%s has invalid PHY address\n",
212 child->full_name);
213 continue;
214 }
215
216 /* A PHY must have a reg property in the range [0-31] */
217 if (addr >= PHY_MAX_ADDR) {
218 dev_err(dev, "%s PHY address %i is too large\n",
219 child->full_name, addr);
220 continue;
221 }
222
223 if (addr == phydev->addr) {
224 dev->of_node = child;
225 return;
226 }
227 }
228}
229#else /* !IS_ENABLED(CONFIG_OF_MDIO) */
230static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
231 struct phy_device *phydev)
232{
233}
190#endif 234#endif
191 235
192/** 236/**
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index 3bc079a67a3d..f7c61812ea4a 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -720,6 +720,9 @@ void phy_state_machine(struct work_struct *work)
720 720
721 mutex_lock(&phydev->lock); 721 mutex_lock(&phydev->lock);
722 722
723 if (phydev->drv->link_change_notify)
724 phydev->drv->link_change_notify(phydev);
725
723 switch (phydev->state) { 726 switch (phydev->state) {
724 case PHY_DOWN: 727 case PHY_DOWN:
725 case PHY_STARTING: 728 case PHY_STARTING:
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index 91d6c1272fcf..e2f20f807de8 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -539,7 +539,7 @@ static int get_filter(void __user *arg, struct sock_filter **p)
539{ 539{
540 struct sock_fprog uprog; 540 struct sock_fprog uprog;
541 struct sock_filter *code = NULL; 541 struct sock_filter *code = NULL;
542 int len, err; 542 int len;
543 543
544 if (copy_from_user(&uprog, arg, sizeof(uprog))) 544 if (copy_from_user(&uprog, arg, sizeof(uprog)))
545 return -EFAULT; 545 return -EFAULT;
@@ -554,12 +554,6 @@ static int get_filter(void __user *arg, struct sock_filter **p)
554 if (IS_ERR(code)) 554 if (IS_ERR(code))
555 return PTR_ERR(code); 555 return PTR_ERR(code);
556 556
557 err = sk_chk_filter(code, uprog.len);
558 if (err) {
559 kfree(code);
560 return err;
561 }
562
563 *p = code; 557 *p = code;
564 return uprog.len; 558 return uprog.len;
565} 559}
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index 2ea7efd11857..6c9c16d76935 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -675,7 +675,7 @@ static int pppoe_connect(struct socket *sock, struct sockaddr *uservaddr,
675 po->chan.hdrlen = (sizeof(struct pppoe_hdr) + 675 po->chan.hdrlen = (sizeof(struct pppoe_hdr) +
676 dev->hard_header_len); 676 dev->hard_header_len);
677 677
678 po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr); 678 po->chan.mtu = dev->mtu - sizeof(struct pppoe_hdr) - 2;
679 po->chan.private = sk; 679 po->chan.private = sk;
680 po->chan.ops = &pppoe_chan_ops; 680 po->chan.ops = &pppoe_chan_ops;
681 681
diff --git a/drivers/net/slip/slip.c b/drivers/net/slip/slip.c
index ad4a94e9ff57..87526443841f 100644
--- a/drivers/net/slip/slip.c
+++ b/drivers/net/slip/slip.c
@@ -83,6 +83,7 @@
83#include <linux/delay.h> 83#include <linux/delay.h>
84#include <linux/init.h> 84#include <linux/init.h>
85#include <linux/slab.h> 85#include <linux/slab.h>
86#include <linux/workqueue.h>
86#include "slip.h" 87#include "slip.h"
87#ifdef CONFIG_INET 88#ifdef CONFIG_INET
88#include <linux/ip.h> 89#include <linux/ip.h>
@@ -416,36 +417,46 @@ static void sl_encaps(struct slip *sl, unsigned char *icp, int len)
416#endif 417#endif
417} 418}
418 419
419/* 420/* Write out any remaining transmit buffer. Scheduled when tty is writable */
420 * Called by the driver when there's room for more data. If we have 421static void slip_transmit(struct work_struct *work)
421 * more packets to send, we send them here.
422 */
423static void slip_write_wakeup(struct tty_struct *tty)
424{ 422{
423 struct slip *sl = container_of(work, struct slip, tx_work);
425 int actual; 424 int actual;
426 struct slip *sl = tty->disc_data;
427 425
426 spin_lock_bh(&sl->lock);
428 /* First make sure we're connected. */ 427 /* First make sure we're connected. */
429 if (!sl || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) 428 if (!sl->tty || sl->magic != SLIP_MAGIC || !netif_running(sl->dev)) {
429 spin_unlock_bh(&sl->lock);
430 return; 430 return;
431 }
431 432
432 spin_lock_bh(&sl->lock);
433 if (sl->xleft <= 0) { 433 if (sl->xleft <= 0) {
434 /* Now serial buffer is almost free & we can start 434 /* Now serial buffer is almost free & we can start
435 * transmission of another packet */ 435 * transmission of another packet */
436 sl->dev->stats.tx_packets++; 436 sl->dev->stats.tx_packets++;
437 clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); 437 clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags);
438 spin_unlock_bh(&sl->lock); 438 spin_unlock_bh(&sl->lock);
439 sl_unlock(sl); 439 sl_unlock(sl);
440 return; 440 return;
441 } 441 }
442 442
443 actual = tty->ops->write(tty, sl->xhead, sl->xleft); 443 actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft);
444 sl->xleft -= actual; 444 sl->xleft -= actual;
445 sl->xhead += actual; 445 sl->xhead += actual;
446 spin_unlock_bh(&sl->lock); 446 spin_unlock_bh(&sl->lock);
447} 447}
448 448
449/*
450 * Called by the driver when there's room for more data.
451 * Schedule the transmit.
452 */
453static void slip_write_wakeup(struct tty_struct *tty)
454{
455 struct slip *sl = tty->disc_data;
456
457 schedule_work(&sl->tx_work);
458}
459
449static void sl_tx_timeout(struct net_device *dev) 460static void sl_tx_timeout(struct net_device *dev)
450{ 461{
451 struct slip *sl = netdev_priv(dev); 462 struct slip *sl = netdev_priv(dev);
@@ -749,6 +760,7 @@ static struct slip *sl_alloc(dev_t line)
749 sl->magic = SLIP_MAGIC; 760 sl->magic = SLIP_MAGIC;
750 sl->dev = dev; 761 sl->dev = dev;
751 spin_lock_init(&sl->lock); 762 spin_lock_init(&sl->lock);
763 INIT_WORK(&sl->tx_work, slip_transmit);
752 sl->mode = SL_MODE_DEFAULT; 764 sl->mode = SL_MODE_DEFAULT;
753#ifdef CONFIG_SLIP_SMART 765#ifdef CONFIG_SLIP_SMART
754 /* initialize timer_list struct */ 766 /* initialize timer_list struct */
@@ -872,8 +884,12 @@ static void slip_close(struct tty_struct *tty)
872 if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty) 884 if (!sl || sl->magic != SLIP_MAGIC || sl->tty != tty)
873 return; 885 return;
874 886
887 spin_lock_bh(&sl->lock);
875 tty->disc_data = NULL; 888 tty->disc_data = NULL;
876 sl->tty = NULL; 889 sl->tty = NULL;
890 spin_unlock_bh(&sl->lock);
891
892 flush_work(&sl->tx_work);
877 893
878 /* VSV = very important to remove timers */ 894 /* VSV = very important to remove timers */
879#ifdef CONFIG_SLIP_SMART 895#ifdef CONFIG_SLIP_SMART
diff --git a/drivers/net/slip/slip.h b/drivers/net/slip/slip.h
index 67673cf1266b..cf32aadf508f 100644
--- a/drivers/net/slip/slip.h
+++ b/drivers/net/slip/slip.h
@@ -53,6 +53,7 @@ struct slip {
53 struct tty_struct *tty; /* ptr to TTY structure */ 53 struct tty_struct *tty; /* ptr to TTY structure */
54 struct net_device *dev; /* easy for intr handling */ 54 struct net_device *dev; /* easy for intr handling */
55 spinlock_t lock; 55 spinlock_t lock;
56 struct work_struct tx_work; /* Flushes transmit buffer */
56 57
57#ifdef SL_INCLUDE_CSLIP 58#ifdef SL_INCLUDE_CSLIP
58 struct slcompress *slcomp; /* for header compression */ 59 struct slcompress *slcomp; /* for header compression */
diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
index a3a05869309d..a4272ed62da8 100644
--- a/drivers/net/usb/hso.c
+++ b/drivers/net/usb/hso.c
@@ -258,10 +258,8 @@ struct hso_serial {
258 * so as not to drop characters on the floor. 258 * so as not to drop characters on the floor.
259 */ 259 */
260 int curr_rx_urb_idx; 260 int curr_rx_urb_idx;
261 u16 curr_rx_urb_offset;
262 u8 rx_urb_filled[MAX_RX_URBS]; 261 u8 rx_urb_filled[MAX_RX_URBS];
263 struct tasklet_struct unthrottle_tasklet; 262 struct tasklet_struct unthrottle_tasklet;
264 struct work_struct retry_unthrottle_workqueue;
265}; 263};
266 264
267struct hso_device { 265struct hso_device {
@@ -1252,14 +1250,6 @@ static void hso_unthrottle(struct tty_struct *tty)
1252 tasklet_hi_schedule(&serial->unthrottle_tasklet); 1250 tasklet_hi_schedule(&serial->unthrottle_tasklet);
1253} 1251}
1254 1252
1255static void hso_unthrottle_workfunc(struct work_struct *work)
1256{
1257 struct hso_serial *serial =
1258 container_of(work, struct hso_serial,
1259 retry_unthrottle_workqueue);
1260 hso_unthrottle_tasklet(serial);
1261}
1262
1263/* open the requested serial port */ 1253/* open the requested serial port */
1264static int hso_serial_open(struct tty_struct *tty, struct file *filp) 1254static int hso_serial_open(struct tty_struct *tty, struct file *filp)
1265{ 1255{
@@ -1295,8 +1285,6 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
1295 tasklet_init(&serial->unthrottle_tasklet, 1285 tasklet_init(&serial->unthrottle_tasklet,
1296 (void (*)(unsigned long))hso_unthrottle_tasklet, 1286 (void (*)(unsigned long))hso_unthrottle_tasklet,
1297 (unsigned long)serial); 1287 (unsigned long)serial);
1298 INIT_WORK(&serial->retry_unthrottle_workqueue,
1299 hso_unthrottle_workfunc);
1300 result = hso_start_serial_device(serial->parent, GFP_KERNEL); 1288 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
1301 if (result) { 1289 if (result) {
1302 hso_stop_serial_device(serial->parent); 1290 hso_stop_serial_device(serial->parent);
@@ -1345,7 +1333,6 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
1345 if (!usb_gone) 1333 if (!usb_gone)
1346 hso_stop_serial_device(serial->parent); 1334 hso_stop_serial_device(serial->parent);
1347 tasklet_kill(&serial->unthrottle_tasklet); 1335 tasklet_kill(&serial->unthrottle_tasklet);
1348 cancel_work_sync(&serial->retry_unthrottle_workqueue);
1349 } 1336 }
1350 1337
1351 if (!usb_gone) 1338 if (!usb_gone)
@@ -2013,8 +2000,7 @@ static void ctrl_callback(struct urb *urb)
2013static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial) 2000static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
2014{ 2001{
2015 struct tty_struct *tty; 2002 struct tty_struct *tty;
2016 int write_length_remaining = 0; 2003 int count;
2017 int curr_write_len;
2018 2004
2019 /* Sanity check */ 2005 /* Sanity check */
2020 if (urb == NULL || serial == NULL) { 2006 if (urb == NULL || serial == NULL) {
@@ -2024,29 +2010,28 @@ static int put_rxbuf_data(struct urb *urb, struct hso_serial *serial)
2024 2010
2025 tty = tty_port_tty_get(&serial->port); 2011 tty = tty_port_tty_get(&serial->port);
2026 2012
2013 if (tty && test_bit(TTY_THROTTLED, &tty->flags)) {
2014 tty_kref_put(tty);
2015 return -1;
2016 }
2017
2027 /* Push data to tty */ 2018 /* Push data to tty */
2028 write_length_remaining = urb->actual_length -
2029 serial->curr_rx_urb_offset;
2030 D1("data to push to tty"); 2019 D1("data to push to tty");
2031 while (write_length_remaining) { 2020 count = tty_buffer_request_room(&serial->port, urb->actual_length);
2032 if (tty && test_bit(TTY_THROTTLED, &tty->flags)) { 2021 if (count >= urb->actual_length) {
2033 tty_kref_put(tty); 2022 tty_insert_flip_string(&serial->port, urb->transfer_buffer,
2034 return -1; 2023 urb->actual_length);
2035 }
2036 curr_write_len = tty_insert_flip_string(&serial->port,
2037 urb->transfer_buffer + serial->curr_rx_urb_offset,
2038 write_length_remaining);
2039 serial->curr_rx_urb_offset += curr_write_len;
2040 write_length_remaining -= curr_write_len;
2041 tty_flip_buffer_push(&serial->port); 2024 tty_flip_buffer_push(&serial->port);
2025 } else {
2026 dev_warn(&serial->parent->usb->dev,
2027 "dropping data, %d bytes lost\n", urb->actual_length);
2042 } 2028 }
2029
2043 tty_kref_put(tty); 2030 tty_kref_put(tty);
2044 2031
2045 if (write_length_remaining == 0) { 2032 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0;
2046 serial->curr_rx_urb_offset = 0; 2033
2047 serial->rx_urb_filled[hso_urb_to_index(serial, urb)] = 0; 2034 return 0;
2048 }
2049 return write_length_remaining;
2050} 2035}
2051 2036
2052 2037
@@ -2217,7 +2202,6 @@ static int hso_stop_serial_device(struct hso_device *hso_dev)
2217 } 2202 }
2218 } 2203 }
2219 serial->curr_rx_urb_idx = 0; 2204 serial->curr_rx_urb_idx = 0;
2220 serial->curr_rx_urb_offset = 0;
2221 2205
2222 if (serial->tx_urb) 2206 if (serial->tx_urb)
2223 usb_kill_urb(serial->tx_urb); 2207 usb_kill_urb(serial->tx_urb);
diff --git a/drivers/net/usb/huawei_cdc_ncm.c b/drivers/net/usb/huawei_cdc_ncm.c
index f9822bc75425..5d95a13dbe2a 100644
--- a/drivers/net/usb/huawei_cdc_ncm.c
+++ b/drivers/net/usb/huawei_cdc_ncm.c
@@ -84,12 +84,13 @@ static int huawei_cdc_ncm_bind(struct usbnet *usbnet_dev,
84 ctx = drvstate->ctx; 84 ctx = drvstate->ctx;
85 85
86 if (usbnet_dev->status) 86 if (usbnet_dev->status)
87 /* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 87 /* The wMaxCommand buffer must be big enough to hold
88 * decimal (0x100)" 88 * any message from the modem. Experience has shown
89 * that some replies are more than 256 bytes long
89 */ 90 */
90 subdriver = usb_cdc_wdm_register(ctx->control, 91 subdriver = usb_cdc_wdm_register(ctx->control,
91 &usbnet_dev->status->desc, 92 &usbnet_dev->status->desc,
92 256, /* wMaxCommand */ 93 1024, /* wMaxCommand */
93 huawei_cdc_ncm_wdm_manage_power); 94 huawei_cdc_ncm_wdm_manage_power);
94 if (IS_ERR(subdriver)) { 95 if (IS_ERR(subdriver)) {
95 ret = PTR_ERR(subdriver); 96 ret = PTR_ERR(subdriver);
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index cf62d7e8329f..c4638c67f6b9 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -741,6 +741,7 @@ static const struct usb_device_id products[] = {
741 {QMI_FIXED_INTF(0x19d2, 0x1424, 2)}, 741 {QMI_FIXED_INTF(0x19d2, 0x1424, 2)},
742 {QMI_FIXED_INTF(0x19d2, 0x1425, 2)}, 742 {QMI_FIXED_INTF(0x19d2, 0x1425, 2)},
743 {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */ 743 {QMI_FIXED_INTF(0x19d2, 0x1426, 2)}, /* ZTE MF91 */
744 {QMI_FIXED_INTF(0x19d2, 0x1428, 2)}, /* Telewell TW-LTE 4G v2 */
744 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */ 745 {QMI_FIXED_INTF(0x19d2, 0x2002, 4)}, /* ZTE (Vodafone) K3765-Z */
745 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */ 746 {QMI_FIXED_INTF(0x0f3d, 0x68a2, 8)}, /* Sierra Wireless MC7700 */
746 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */ 747 {QMI_FIXED_INTF(0x114f, 0x68a2, 8)}, /* Sierra Wireless MC7750 */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 25431965a625..7bad2d316637 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -1359,7 +1359,7 @@ static void r8152_csum_workaround(struct r8152 *tp, struct sk_buff *skb,
1359 struct sk_buff_head seg_list; 1359 struct sk_buff_head seg_list;
1360 struct sk_buff *segs, *nskb; 1360 struct sk_buff *segs, *nskb;
1361 1361
1362 features &= ~(NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO); 1362 features &= ~(NETIF_F_SG | NETIF_F_IPV6_CSUM | NETIF_F_TSO6);
1363 segs = skb_gso_segment(skb, features); 1363 segs = skb_gso_segment(skb, features);
1364 if (IS_ERR(segs) || !segs) 1364 if (IS_ERR(segs) || !segs)
1365 goto drop; 1365 goto drop;
@@ -3204,8 +3204,13 @@ static void rtl8152_get_ethtool_stats(struct net_device *dev,
3204 struct r8152 *tp = netdev_priv(dev); 3204 struct r8152 *tp = netdev_priv(dev);
3205 struct tally_counter tally; 3205 struct tally_counter tally;
3206 3206
3207 if (usb_autopm_get_interface(tp->intf) < 0)
3208 return;
3209
3207 generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA); 3210 generic_ocp_read(tp, PLA_TALLYCNT, sizeof(tally), &tally, MCU_TYPE_PLA);
3208 3211
3212 usb_autopm_put_interface(tp->intf);
3213
3209 data[0] = le64_to_cpu(tally.tx_packets); 3214 data[0] = le64_to_cpu(tally.tx_packets);
3210 data[1] = le64_to_cpu(tally.rx_packets); 3215 data[1] = le64_to_cpu(tally.rx_packets);
3211 data[2] = le64_to_cpu(tally.tx_errors); 3216 data[2] = le64_to_cpu(tally.tx_errors);
diff --git a/drivers/net/usb/smsc95xx.c b/drivers/net/usb/smsc95xx.c
index 424db65e4396..d07bf4cb893f 100644
--- a/drivers/net/usb/smsc95xx.c
+++ b/drivers/net/usb/smsc95xx.c
@@ -1714,6 +1714,18 @@ static int smsc95xx_resume(struct usb_interface *intf)
1714 return ret; 1714 return ret;
1715} 1715}
1716 1716
1717static int smsc95xx_reset_resume(struct usb_interface *intf)
1718{
1719 struct usbnet *dev = usb_get_intfdata(intf);
1720 int ret;
1721
1722 ret = smsc95xx_reset(dev);
1723 if (ret < 0)
1724 return ret;
1725
1726 return smsc95xx_resume(intf);
1727}
1728
1717static void smsc95xx_rx_csum_offload(struct sk_buff *skb) 1729static void smsc95xx_rx_csum_offload(struct sk_buff *skb)
1718{ 1730{
1719 skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2); 1731 skb->csum = *(u16 *)(skb_tail_pointer(skb) - 2);
@@ -2004,7 +2016,7 @@ static struct usb_driver smsc95xx_driver = {
2004 .probe = usbnet_probe, 2016 .probe = usbnet_probe,
2005 .suspend = smsc95xx_suspend, 2017 .suspend = smsc95xx_suspend,
2006 .resume = smsc95xx_resume, 2018 .resume = smsc95xx_resume,
2007 .reset_resume = smsc95xx_resume, 2019 .reset_resume = smsc95xx_reset_resume,
2008 .disconnect = usbnet_disconnect, 2020 .disconnect = usbnet_disconnect,
2009 .disable_hub_initiated_lpm = 1, 2021 .disable_hub_initiated_lpm = 1,
2010 .supports_autosuspend = 1, 2022 .supports_autosuspend = 1,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 97394345e5dd..b76f7dcde0db 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2589,8 +2589,8 @@ vmxnet3_open(struct net_device *netdev)
2589 for (i = 0; i < adapter->num_tx_queues; i++) 2589 for (i = 0; i < adapter->num_tx_queues; i++)
2590 spin_lock_init(&adapter->tx_queue[i].tx_lock); 2590 spin_lock_init(&adapter->tx_queue[i].tx_lock);
2591 2591
2592 err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE, 2592 err = vmxnet3_create_queues(adapter, adapter->tx_ring_size,
2593 VMXNET3_DEF_RX_RING_SIZE, 2593 adapter->rx_ring_size,
2594 VMXNET3_DEF_RX_RING_SIZE); 2594 VMXNET3_DEF_RX_RING_SIZE);
2595 if (err) 2595 if (err)
2596 goto queue_err; 2596 goto queue_err;
@@ -2968,6 +2968,9 @@ vmxnet3_probe_device(struct pci_dev *pdev,
2968 adapter->netdev = netdev; 2968 adapter->netdev = netdev;
2969 adapter->pdev = pdev; 2969 adapter->pdev = pdev;
2970 2970
2971 adapter->tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
2972 adapter->rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
2973
2971 spin_lock_init(&adapter->cmd_lock); 2974 spin_lock_init(&adapter->cmd_lock);
2972 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter, 2975 adapter->adapter_pa = dma_map_single(&adapter->pdev->dev, adapter,
2973 sizeof(struct vmxnet3_adapter), 2976 sizeof(struct vmxnet3_adapter),
diff --git a/drivers/net/vmxnet3/vmxnet3_ethtool.c b/drivers/net/vmxnet3/vmxnet3_ethtool.c
index 40c1c7b0d9e0..b725fd9e7803 100644
--- a/drivers/net/vmxnet3/vmxnet3_ethtool.c
+++ b/drivers/net/vmxnet3/vmxnet3_ethtool.c
@@ -449,8 +449,8 @@ vmxnet3_get_ringparam(struct net_device *netdev,
449 param->rx_mini_max_pending = 0; 449 param->rx_mini_max_pending = 0;
450 param->rx_jumbo_max_pending = 0; 450 param->rx_jumbo_max_pending = 0;
451 451
452 param->rx_pending = adapter->rx_queue[0].rx_ring[0].size; 452 param->rx_pending = adapter->rx_ring_size;
453 param->tx_pending = adapter->tx_queue[0].tx_ring.size; 453 param->tx_pending = adapter->tx_ring_size;
454 param->rx_mini_pending = 0; 454 param->rx_mini_pending = 0;
455 param->rx_jumbo_pending = 0; 455 param->rx_jumbo_pending = 0;
456} 456}
@@ -529,9 +529,11 @@ vmxnet3_set_ringparam(struct net_device *netdev,
529 * size */ 529 * size */
530 netdev_err(netdev, "failed to apply new sizes, " 530 netdev_err(netdev, "failed to apply new sizes, "
531 "try the default ones\n"); 531 "try the default ones\n");
532 new_rx_ring_size = VMXNET3_DEF_RX_RING_SIZE;
533 new_tx_ring_size = VMXNET3_DEF_TX_RING_SIZE;
532 err = vmxnet3_create_queues(adapter, 534 err = vmxnet3_create_queues(adapter,
533 VMXNET3_DEF_TX_RING_SIZE, 535 new_tx_ring_size,
534 VMXNET3_DEF_RX_RING_SIZE, 536 new_rx_ring_size,
535 VMXNET3_DEF_RX_RING_SIZE); 537 VMXNET3_DEF_RX_RING_SIZE);
536 if (err) { 538 if (err) {
537 netdev_err(netdev, "failed to create queues " 539 netdev_err(netdev, "failed to create queues "
@@ -545,6 +547,8 @@ vmxnet3_set_ringparam(struct net_device *netdev,
545 netdev_err(netdev, "failed to re-activate, error %d." 547 netdev_err(netdev, "failed to re-activate, error %d."
546 " Closing it\n", err); 548 " Closing it\n", err);
547 } 549 }
550 adapter->tx_ring_size = new_tx_ring_size;
551 adapter->rx_ring_size = new_rx_ring_size;
548 552
549out: 553out:
550 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state); 554 clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 190569d02450..29ee77f2c97f 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -349,6 +349,11 @@ struct vmxnet3_adapter {
349 u32 link_speed; /* in mbps */ 349 u32 link_speed; /* in mbps */
350 350
351 u64 tx_timeout_count; 351 u64 tx_timeout_count;
352
353 /* Ring sizes */
354 u32 tx_ring_size;
355 u32 rx_ring_size;
356
352 struct work_struct work; 357 struct work_struct work;
353 358
354 unsigned long state; /* VMXNET3_STATE_BIT_xxx */ 359 unsigned long state; /* VMXNET3_STATE_BIT_xxx */
diff --git a/drivers/net/wan/farsync.c b/drivers/net/wan/farsync.c
index 93ace042d0aa..1f041271f7fe 100644
--- a/drivers/net/wan/farsync.c
+++ b/drivers/net/wan/farsync.c
@@ -2363,7 +2363,7 @@ static char *type_strings[] = {
2363 "FarSync TE1" 2363 "FarSync TE1"
2364}; 2364};
2365 2365
2366static void 2366static int
2367fst_init_card(struct fst_card_info *card) 2367fst_init_card(struct fst_card_info *card)
2368{ 2368{
2369 int i; 2369 int i;
@@ -2374,24 +2374,21 @@ fst_init_card(struct fst_card_info *card)
2374 * we'll have to revise it in some way then. 2374 * we'll have to revise it in some way then.
2375 */ 2375 */
2376 for (i = 0; i < card->nports; i++) { 2376 for (i = 0; i < card->nports; i++) {
2377 err = register_hdlc_device(card->ports[i].dev); 2377 err = register_hdlc_device(card->ports[i].dev);
2378 if (err < 0) { 2378 if (err < 0) {
2379 int j;
2380 pr_err("Cannot register HDLC device for port %d (errno %d)\n", 2379 pr_err("Cannot register HDLC device for port %d (errno %d)\n",
2381 i, -err); 2380 i, -err);
2382 for (j = i; j < card->nports; j++) { 2381 while (i--)
2383 free_netdev(card->ports[j].dev); 2382 unregister_hdlc_device(card->ports[i].dev);
2384 card->ports[j].dev = NULL; 2383 return err;
2385 } 2384 }
2386 card->nports = i;
2387 break;
2388 }
2389 } 2385 }
2390 2386
2391 pr_info("%s-%s: %s IRQ%d, %d ports\n", 2387 pr_info("%s-%s: %s IRQ%d, %d ports\n",
2392 port_to_dev(&card->ports[0])->name, 2388 port_to_dev(&card->ports[0])->name,
2393 port_to_dev(&card->ports[card->nports - 1])->name, 2389 port_to_dev(&card->ports[card->nports - 1])->name,
2394 type_strings[card->type], card->irq, card->nports); 2390 type_strings[card->type], card->irq, card->nports);
2391 return 0;
2395} 2392}
2396 2393
2397static const struct net_device_ops fst_ops = { 2394static const struct net_device_ops fst_ops = {
@@ -2447,15 +2444,12 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2447 /* Try to enable the device */ 2444 /* Try to enable the device */
2448 if ((err = pci_enable_device(pdev)) != 0) { 2445 if ((err = pci_enable_device(pdev)) != 0) {
2449 pr_err("Failed to enable card. Err %d\n", -err); 2446 pr_err("Failed to enable card. Err %d\n", -err);
2450 kfree(card); 2447 goto enable_fail;
2451 return err;
2452 } 2448 }
2453 2449
2454 if ((err = pci_request_regions(pdev, "FarSync")) !=0) { 2450 if ((err = pci_request_regions(pdev, "FarSync")) !=0) {
2455 pr_err("Failed to allocate regions. Err %d\n", -err); 2451 pr_err("Failed to allocate regions. Err %d\n", -err);
2456 pci_disable_device(pdev); 2452 goto regions_fail;
2457 kfree(card);
2458 return err;
2459 } 2453 }
2460 2454
2461 /* Get virtual addresses of memory regions */ 2455 /* Get virtual addresses of memory regions */
@@ -2464,30 +2458,21 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2464 card->phys_ctlmem = pci_resource_start(pdev, 3); 2458 card->phys_ctlmem = pci_resource_start(pdev, 3);
2465 if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) { 2459 if ((card->mem = ioremap(card->phys_mem, FST_MEMSIZE)) == NULL) {
2466 pr_err("Physical memory remap failed\n"); 2460 pr_err("Physical memory remap failed\n");
2467 pci_release_regions(pdev); 2461 err = -ENODEV;
2468 pci_disable_device(pdev); 2462 goto ioremap_physmem_fail;
2469 kfree(card);
2470 return -ENODEV;
2471 } 2463 }
2472 if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) { 2464 if ((card->ctlmem = ioremap(card->phys_ctlmem, 0x10)) == NULL) {
2473 pr_err("Control memory remap failed\n"); 2465 pr_err("Control memory remap failed\n");
2474 pci_release_regions(pdev); 2466 err = -ENODEV;
2475 pci_disable_device(pdev); 2467 goto ioremap_ctlmem_fail;
2476 iounmap(card->mem);
2477 kfree(card);
2478 return -ENODEV;
2479 } 2468 }
2480 dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem); 2469 dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem);
2481 2470
2482 /* Register the interrupt handler */ 2471 /* Register the interrupt handler */
2483 if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) { 2472 if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) {
2484 pr_err("Unable to register interrupt %d\n", card->irq); 2473 pr_err("Unable to register interrupt %d\n", card->irq);
2485 pci_release_regions(pdev); 2474 err = -ENODEV;
2486 pci_disable_device(pdev); 2475 goto irq_fail;
2487 iounmap(card->ctlmem);
2488 iounmap(card->mem);
2489 kfree(card);
2490 return -ENODEV;
2491 } 2476 }
2492 2477
2493 /* Record info we need */ 2478 /* Record info we need */
@@ -2513,13 +2498,8 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2513 while (i--) 2498 while (i--)
2514 free_netdev(card->ports[i].dev); 2499 free_netdev(card->ports[i].dev);
2515 pr_err("FarSync: out of memory\n"); 2500 pr_err("FarSync: out of memory\n");
2516 free_irq(card->irq, card); 2501 err = -ENOMEM;
2517 pci_release_regions(pdev); 2502 goto hdlcdev_fail;
2518 pci_disable_device(pdev);
2519 iounmap(card->ctlmem);
2520 iounmap(card->mem);
2521 kfree(card);
2522 return -ENODEV;
2523 } 2503 }
2524 card->ports[i].dev = dev; 2504 card->ports[i].dev = dev;
2525 card->ports[i].card = card; 2505 card->ports[i].card = card;
@@ -2565,9 +2545,16 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2565 pci_set_drvdata(pdev, card); 2545 pci_set_drvdata(pdev, card);
2566 2546
2567 /* Remainder of card setup */ 2547 /* Remainder of card setup */
2548 if (no_of_cards_added >= FST_MAX_CARDS) {
2549 pr_err("FarSync: too many cards\n");
2550 err = -ENOMEM;
2551 goto card_array_fail;
2552 }
2568 fst_card_array[no_of_cards_added] = card; 2553 fst_card_array[no_of_cards_added] = card;
2569 card->card_no = no_of_cards_added++; /* Record instance and bump it */ 2554 card->card_no = no_of_cards_added++; /* Record instance and bump it */
2570 fst_init_card(card); 2555 err = fst_init_card(card);
2556 if (err)
2557 goto init_card_fail;
2571 if (card->family == FST_FAMILY_TXU) { 2558 if (card->family == FST_FAMILY_TXU) {
2572 /* 2559 /*
2573 * Allocate a dma buffer for transmit and receives 2560 * Allocate a dma buffer for transmit and receives
@@ -2577,29 +2564,46 @@ fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2577 &card->rx_dma_handle_card); 2564 &card->rx_dma_handle_card);
2578 if (card->rx_dma_handle_host == NULL) { 2565 if (card->rx_dma_handle_host == NULL) {
2579 pr_err("Could not allocate rx dma buffer\n"); 2566 pr_err("Could not allocate rx dma buffer\n");
2580 fst_disable_intr(card); 2567 err = -ENOMEM;
2581 pci_release_regions(pdev); 2568 goto rx_dma_fail;
2582 pci_disable_device(pdev);
2583 iounmap(card->ctlmem);
2584 iounmap(card->mem);
2585 kfree(card);
2586 return -ENOMEM;
2587 } 2569 }
2588 card->tx_dma_handle_host = 2570 card->tx_dma_handle_host =
2589 pci_alloc_consistent(card->device, FST_MAX_MTU, 2571 pci_alloc_consistent(card->device, FST_MAX_MTU,
2590 &card->tx_dma_handle_card); 2572 &card->tx_dma_handle_card);
2591 if (card->tx_dma_handle_host == NULL) { 2573 if (card->tx_dma_handle_host == NULL) {
2592 pr_err("Could not allocate tx dma buffer\n"); 2574 pr_err("Could not allocate tx dma buffer\n");
2593 fst_disable_intr(card); 2575 err = -ENOMEM;
2594 pci_release_regions(pdev); 2576 goto tx_dma_fail;
2595 pci_disable_device(pdev);
2596 iounmap(card->ctlmem);
2597 iounmap(card->mem);
2598 kfree(card);
2599 return -ENOMEM;
2600 } 2577 }
2601 } 2578 }
2602 return 0; /* Success */ 2579 return 0; /* Success */
2580
2581tx_dma_fail:
2582 pci_free_consistent(card->device, FST_MAX_MTU,
2583 card->rx_dma_handle_host,
2584 card->rx_dma_handle_card);
2585rx_dma_fail:
2586 fst_disable_intr(card);
2587 for (i = 0 ; i < card->nports ; i++)
2588 unregister_hdlc_device(card->ports[i].dev);
2589init_card_fail:
2590 fst_card_array[card->card_no] = NULL;
2591card_array_fail:
2592 for (i = 0 ; i < card->nports ; i++)
2593 free_netdev(card->ports[i].dev);
2594hdlcdev_fail:
2595 free_irq(card->irq, card);
2596irq_fail:
2597 iounmap(card->ctlmem);
2598ioremap_ctlmem_fail:
2599 iounmap(card->mem);
2600ioremap_physmem_fail:
2601 pci_release_regions(pdev);
2602regions_fail:
2603 pci_disable_device(pdev);
2604enable_fail:
2605 kfree(card);
2606 return err;
2603} 2607}
2604 2608
2605/* 2609/*
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 82017f56e661..e6c56c5bb0f6 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -795,7 +795,11 @@ int ath10k_core_start(struct ath10k *ar)
795 if (status) 795 if (status)
796 goto err_htc_stop; 796 goto err_htc_stop;
797 797
798 ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1; 798 if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
799 ar->free_vdev_map = (1 << TARGET_10X_NUM_VDEVS) - 1;
800 else
801 ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
802
799 INIT_LIST_HEAD(&ar->arvifs); 803 INIT_LIST_HEAD(&ar->arvifs);
800 804
801 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags)) 805 if (!test_bit(ATH10K_FLAG_FIRST_BOOT_DONE, &ar->dev_flags))
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index 6c102b1312ff..eebc860c3655 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -312,7 +312,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
312 int msdu_len, msdu_chaining = 0; 312 int msdu_len, msdu_chaining = 0;
313 struct sk_buff *msdu; 313 struct sk_buff *msdu;
314 struct htt_rx_desc *rx_desc; 314 struct htt_rx_desc *rx_desc;
315 bool corrupted = false;
316 315
317 lockdep_assert_held(&htt->rx_ring.lock); 316 lockdep_assert_held(&htt->rx_ring.lock);
318 317
@@ -439,9 +438,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
439 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) & 438 last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
440 RX_MSDU_END_INFO0_LAST_MSDU; 439 RX_MSDU_END_INFO0_LAST_MSDU;
441 440
442 if (msdu_chaining && !last_msdu)
443 corrupted = true;
444
445 if (last_msdu) { 441 if (last_msdu) {
446 msdu->next = NULL; 442 msdu->next = NULL;
447 break; 443 break;
@@ -457,20 +453,6 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
457 msdu_chaining = -1; 453 msdu_chaining = -1;
458 454
459 /* 455 /*
460 * Apparently FW sometimes reports weird chained MSDU sequences with
461 * more than one rx descriptor. This seems like a bug but needs more
462 * analyzing. For the time being fix it by dropping such sequences to
463 * avoid blowing up the host system.
464 */
465 if (corrupted) {
466 ath10k_warn("failed to pop chained msdus, dropping\n");
467 ath10k_htt_rx_free_msdu_chain(*head_msdu);
468 *head_msdu = NULL;
469 *tail_msdu = NULL;
470 msdu_chaining = -EINVAL;
471 }
472
473 /*
474 * Don't refill the ring yet. 456 * Don't refill the ring yet.
475 * 457 *
476 * First, the elements popped here are still in use - it is not 458 * First, the elements popped here are still in use - it is not
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index e3f67b8d3f80..40fd9b7b1426 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -36,7 +36,7 @@ config B43_SSB
36choice 36choice
37 prompt "Supported bus types" 37 prompt "Supported bus types"
38 depends on B43 38 depends on B43
39 default B43_BCMA_AND_SSB 39 default B43_BUSES_BCMA_AND_SSB
40 40
41config B43_BUSES_BCMA_AND_SSB 41config B43_BUSES_BCMA_AND_SSB
42 bool "BCMA and SSB" 42 bool "BCMA and SSB"
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 32538ac5f7e4..0d6a0bb1f876 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -5221,6 +5221,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
5221 /* We don't support 5 GHz on some PHYs yet */ 5221 /* We don't support 5 GHz on some PHYs yet */
5222 switch (dev->phy.type) { 5222 switch (dev->phy.type) {
5223 case B43_PHYTYPE_A: 5223 case B43_PHYTYPE_A:
5224 case B43_PHYTYPE_G:
5224 case B43_PHYTYPE_N: 5225 case B43_PHYTYPE_N:
5225 case B43_PHYTYPE_LP: 5226 case B43_PHYTYPE_LP:
5226 case B43_PHYTYPE_HT: 5227 case B43_PHYTYPE_HT:
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index 4f38f19b8e3d..6e6ef3fc2247 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -811,9 +811,13 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
811 break; 811 break;
812 case B43_PHYTYPE_G: 812 case B43_PHYTYPE_G:
813 status.band = IEEE80211_BAND_2GHZ; 813 status.band = IEEE80211_BAND_2GHZ;
814 /* chanid is the radio channel cookie value as used 814 /* Somewhere between 478.104 and 508.1084 firmware for G-PHY
815 * to tune the radio. */ 815 * has been modified to be compatible with N-PHY and others.
816 status.freq = chanid + 2400; 816 */
817 if (dev->fw.rev >= 508)
818 status.freq = ieee80211_channel_to_frequency(chanid, status.band);
819 else
820 status.freq = chanid + 2400;
817 break; 821 break;
818 case B43_PHYTYPE_N: 822 case B43_PHYTYPE_N:
819 case B43_PHYTYPE_LP: 823 case B43_PHYTYPE_LP:
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index 6db51a666f61..d06fcb05adf2 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -1184,8 +1184,6 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
1184 bus->bus_priv.usb = bus_pub; 1184 bus->bus_priv.usb = bus_pub;
1185 dev_set_drvdata(dev, bus); 1185 dev_set_drvdata(dev, bus);
1186 bus->ops = &brcmf_usb_bus_ops; 1186 bus->ops = &brcmf_usb_bus_ops;
1187 bus->chip = bus_pub->devid;
1188 bus->chiprev = bus_pub->chiprev;
1189 bus->proto_type = BRCMF_PROTO_BCDC; 1187 bus->proto_type = BRCMF_PROTO_BCDC;
1190 bus->always_use_fws_queue = true; 1188 bus->always_use_fws_queue = true;
1191 1189
@@ -1194,6 +1192,9 @@ static int brcmf_usb_probe_cb(struct brcmf_usbdev_info *devinfo)
1194 if (ret) 1192 if (ret)
1195 goto fail; 1193 goto fail;
1196 } 1194 }
1195 bus->chip = bus_pub->devid;
1196 bus->chiprev = bus_pub->chiprev;
1197
1197 /* request firmware here */ 1198 /* request firmware here */
1198 brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL, 1199 brcmf_fw_get_firmwares(dev, 0, brcmf_usb_get_fwname(devinfo), NULL,
1199 brcmf_usb_probe_phase2); 1200 brcmf_usb_probe_phase2);
diff --git a/drivers/net/wireless/iwlwifi/dvm/rxon.c b/drivers/net/wireless/iwlwifi/dvm/rxon.c
index ed50de6362ed..6dc5dd3ced44 100644
--- a/drivers/net/wireless/iwlwifi/dvm/rxon.c
+++ b/drivers/net/wireless/iwlwifi/dvm/rxon.c
@@ -1068,13 +1068,6 @@ int iwlagn_commit_rxon(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
1068 /* recalculate basic rates */ 1068 /* recalculate basic rates */
1069 iwl_calc_basic_rates(priv, ctx); 1069 iwl_calc_basic_rates(priv, ctx);
1070 1070
1071 /*
1072 * force CTS-to-self frames protection if RTS-CTS is not preferred
1073 * one aggregation protection method
1074 */
1075 if (!priv->hw_params.use_rts_for_aggregation)
1076 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1077
1078 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) || 1071 if ((ctx->vif && ctx->vif->bss_conf.use_short_slot) ||
1079 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK)) 1072 !(ctx->staging.flags & RXON_FLG_BAND_24G_MSK))
1080 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK; 1073 ctx->staging.flags |= RXON_FLG_SHORT_SLOT_MSK;
@@ -1480,11 +1473,6 @@ void iwlagn_bss_info_changed(struct ieee80211_hw *hw,
1480 else 1473 else
1481 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK; 1474 ctx->staging.flags &= ~RXON_FLG_TGG_PROTECT_MSK;
1482 1475
1483 if (bss_conf->use_cts_prot)
1484 ctx->staging.flags |= RXON_FLG_SELF_CTS_EN;
1485 else
1486 ctx->staging.flags &= ~RXON_FLG_SELF_CTS_EN;
1487
1488 memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN); 1476 memcpy(ctx->staging.bssid_addr, bss_conf->bssid, ETH_ALEN);
1489 1477
1490 if (vif->type == NL80211_IFTYPE_AP || 1478 if (vif->type == NL80211_IFTYPE_AP ||
diff --git a/drivers/net/wireless/iwlwifi/iwl-fw.h b/drivers/net/wireless/iwlwifi/iwl-fw.h
index 0aa7c0085c9f..b1a33322b9ba 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fw.h
@@ -88,6 +88,7 @@
88 * P2P client interfaces simultaneously if they are in different bindings. 88 * P2P client interfaces simultaneously if they are in different bindings.
89 * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and 89 * @IWL_UCODE_TLV_FLAGS_P2P_BSS_PS_SCM: support power save on BSS station and
90 * P2P client interfaces simultaneously if they are in same bindings. 90 * P2P client interfaces simultaneously if they are in same bindings.
91 * @IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT: General support for uAPSD
91 * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save 92 * @IWL_UCODE_TLV_FLAGS_P2P_PS_UAPSD: P2P client supports uAPSD power save
92 * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering. 93 * @IWL_UCODE_TLV_FLAGS_BCAST_FILTERING: uCode supports broadcast filtering.
93 * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients 94 * @IWL_UCODE_TLV_FLAGS_GO_UAPSD: AP/GO interfaces support uAPSD clients
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
index 8b5302777632..725ba49576bf 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac-ctxt.c
@@ -667,10 +667,9 @@ static void iwl_mvm_mac_ctxt_cmd_common(struct iwl_mvm *mvm,
667 if (vif->bss_conf.qos) 667 if (vif->bss_conf.qos)
668 cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA); 668 cmd->qos_flags |= cpu_to_le32(MAC_QOS_FLG_UPDATE_EDCA);
669 669
670 if (vif->bss_conf.use_cts_prot) { 670 if (vif->bss_conf.use_cts_prot)
671 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT); 671 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_TGG_PROTECT);
672 cmd->protection_flags |= cpu_to_le32(MAC_PROT_FLG_SELF_CTS_EN); 672
673 }
674 IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n", 673 IWL_DEBUG_RATE(mvm, "use_cts_prot %d, ht_operation_mode %d\n",
675 vif->bss_conf.use_cts_prot, 674 vif->bss_conf.use_cts_prot,
676 vif->bss_conf.ht_operation_mode); 675 vif->bss_conf.ht_operation_mode);
diff --git a/drivers/net/wireless/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
index 7215f5980186..9bfb90680cdc 100644
--- a/drivers/net/wireless/iwlwifi/mvm/mac80211.c
+++ b/drivers/net/wireless/iwlwifi/mvm/mac80211.c
@@ -303,6 +303,13 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
303 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; 303 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
304 } 304 }
305 305
306 if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT &&
307 !iwlwifi_mod_params.uapsd_disable) {
308 hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
309 hw->uapsd_queues = IWL_UAPSD_AC_INFO;
310 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
311 }
312
306 hw->sta_data_size = sizeof(struct iwl_mvm_sta); 313 hw->sta_data_size = sizeof(struct iwl_mvm_sta);
307 hw->vif_data_size = sizeof(struct iwl_mvm_vif); 314 hw->vif_data_size = sizeof(struct iwl_mvm_vif);
308 hw->chanctx_data_size = sizeof(u16); 315 hw->chanctx_data_size = sizeof(u16);
@@ -1159,8 +1166,12 @@ static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac,
1159 1166
1160 bcast_mac = &cmd->macs[mvmvif->id]; 1167 bcast_mac = &cmd->macs[mvmvif->id];
1161 1168
1162 /* enable filtering only for associated stations */ 1169 /*
1163 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc) 1170 * enable filtering only for associated stations, but not for P2P
1171 * Clients
1172 */
1173 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p ||
1174 !vif->bss_conf.assoc)
1164 return; 1175 return;
1165 1176
1166 bcast_mac->default_discard = 1; 1177 bcast_mac->default_discard = 1;
@@ -1237,10 +1248,6 @@ static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm,
1237 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING)) 1248 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING))
1238 return 0; 1249 return 0;
1239 1250
1240 /* bcast filtering isn't supported for P2P client */
1241 if (vif->p2p)
1242 return 0;
1243
1244 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) 1251 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd))
1245 return 0; 1252 return 0;
1246 1253
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 4b6c7d4bd199..eac2b424f6a0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -588,9 +588,7 @@ static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
588 struct iwl_scan_offload_cmd *scan, 588 struct iwl_scan_offload_cmd *scan,
589 struct iwl_mvm_scan_params *params) 589 struct iwl_mvm_scan_params *params)
590{ 590{
591 scan->channel_count = 591 scan->channel_count = req->n_channels;
592 mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
593 mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
594 scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME); 592 scan->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
595 scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH); 593 scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
596 scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT; 594 scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
@@ -669,61 +667,37 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
669 struct cfg80211_sched_scan_request *req, 667 struct cfg80211_sched_scan_request *req,
670 struct iwl_scan_channel_cfg *channels, 668 struct iwl_scan_channel_cfg *channels,
671 enum ieee80211_band band, 669 enum ieee80211_band band,
672 int *head, int *tail, 670 int *head,
673 u32 ssid_bitmap, 671 u32 ssid_bitmap,
674 struct iwl_mvm_scan_params *params) 672 struct iwl_mvm_scan_params *params)
675{ 673{
676 struct ieee80211_supported_band *s_band; 674 int i, index = 0;
677 int n_channels = req->n_channels;
678 int i, j, index = 0;
679 bool partial;
680 675
681 /* 676 for (i = 0; i < req->n_channels; i++) {
682 * We have to configure all supported channels, even if we don't want to 677 struct ieee80211_channel *chan = req->channels[i];
683 * scan on them, but we have to send channels in the order that we want 678
684 * to scan. So add requested channels to head of the list and others to 679 if (chan->band != band)
685 * the end. 680 continue;
686 */ 681
687 s_band = &mvm->nvm_data->bands[band]; 682 index = *head;
688 683 (*head)++;
689 for (i = 0; i < s_band->n_channels && *head <= *tail; i++) { 684
690 partial = false; 685 channels->channel_number[index] = cpu_to_le16(chan->hw_value);
691 for (j = 0; j < n_channels; j++)
692 if (s_band->channels[i].center_freq ==
693 req->channels[j]->center_freq) {
694 index = *head;
695 (*head)++;
696 /*
697 * Channels that came with the request will be
698 * in partial scan .
699 */
700 partial = true;
701 break;
702 }
703 if (!partial) {
704 index = *tail;
705 (*tail)--;
706 }
707 channels->channel_number[index] =
708 cpu_to_le16(ieee80211_frequency_to_channel(
709 s_band->channels[i].center_freq));
710 channels->dwell_time[index][0] = params->dwell[band].active; 686 channels->dwell_time[index][0] = params->dwell[band].active;
711 channels->dwell_time[index][1] = params->dwell[band].passive; 687 channels->dwell_time[index][1] = params->dwell[band].passive;
712 688
713 channels->iter_count[index] = cpu_to_le16(1); 689 channels->iter_count[index] = cpu_to_le16(1);
714 channels->iter_interval[index] = 0; 690 channels->iter_interval[index] = 0;
715 691
716 if (!(s_band->channels[i].flags & IEEE80211_CHAN_NO_IR)) 692 if (!(chan->flags & IEEE80211_CHAN_NO_IR))
717 channels->type[index] |= 693 channels->type[index] |=
718 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE); 694 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_ACTIVE);
719 695
720 channels->type[index] |= 696 channels->type[index] |=
721 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL); 697 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_FULL |
722 if (partial) 698 IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
723 channels->type[index] |=
724 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_PARTIAL);
725 699
726 if (s_band->channels[i].flags & IEEE80211_CHAN_NO_HT40) 700 if (chan->flags & IEEE80211_CHAN_NO_HT40)
727 channels->type[index] |= 701 channels->type[index] |=
728 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW); 702 cpu_to_le32(IWL_SCAN_OFFLOAD_CHANNEL_NARROW);
729 703
@@ -740,7 +714,6 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
740 int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels; 714 int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
741 int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels; 715 int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
742 int head = 0; 716 int head = 0;
743 int tail = band_2ghz + band_5ghz - 1;
744 u32 ssid_bitmap; 717 u32 ssid_bitmap;
745 int cmd_len; 718 int cmd_len;
746 int ret; 719 int ret;
@@ -772,7 +745,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
772 &scan_cfg->scan_cmd.tx_cmd[0], 745 &scan_cfg->scan_cmd.tx_cmd[0],
773 scan_cfg->data); 746 scan_cfg->data);
774 iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg, 747 iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
775 IEEE80211_BAND_2GHZ, &head, &tail, 748 IEEE80211_BAND_2GHZ, &head,
776 ssid_bitmap, &params); 749 ssid_bitmap, &params);
777 } 750 }
778 if (band_5ghz) { 751 if (band_5ghz) {
@@ -782,7 +755,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
782 scan_cfg->data + 755 scan_cfg->data +
783 SCAN_OFFLOAD_PROBE_REQ_SIZE); 756 SCAN_OFFLOAD_PROBE_REQ_SIZE);
784 iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg, 757 iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
785 IEEE80211_BAND_5GHZ, &head, &tail, 758 IEEE80211_BAND_5GHZ, &head,
786 ssid_bitmap, &params); 759 ssid_bitmap, &params);
787 } 760 }
788 761
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 7091a18d5a72..98950e45c7b0 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -367,6 +367,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
367 {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)}, 367 {IWL_PCI_DEVICE(0x095A, 0x5012, iwl7265_2ac_cfg)},
368 {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)}, 368 {IWL_PCI_DEVICE(0x095A, 0x5412, iwl7265_2ac_cfg)},
369 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)}, 369 {IWL_PCI_DEVICE(0x095A, 0x5410, iwl7265_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x095A, 0x5510, iwl7265_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)}, 371 {IWL_PCI_DEVICE(0x095A, 0x5400, iwl7265_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)}, 372 {IWL_PCI_DEVICE(0x095A, 0x1010, iwl7265_2ac_cfg)},
372 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)}, 373 {IWL_PCI_DEVICE(0x095A, 0x5000, iwl7265_2n_cfg)},
@@ -380,7 +381,7 @@ static DEFINE_PCI_DEVICE_TABLE(iwl_hw_card_ids) = {
380 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)}, 381 {IWL_PCI_DEVICE(0x095A, 0x9110, iwl7265_2ac_cfg)},
381 {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)}, 382 {IWL_PCI_DEVICE(0x095A, 0x9112, iwl7265_2ac_cfg)},
382 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)}, 383 {IWL_PCI_DEVICE(0x095A, 0x9210, iwl7265_2ac_cfg)},
383 {IWL_PCI_DEVICE(0x095A, 0x9200, iwl7265_2ac_cfg)}, 384 {IWL_PCI_DEVICE(0x095B, 0x9200, iwl7265_2ac_cfg)},
384 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)}, 385 {IWL_PCI_DEVICE(0x095A, 0x9510, iwl7265_2ac_cfg)},
385 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)}, 386 {IWL_PCI_DEVICE(0x095A, 0x9310, iwl7265_2ac_cfg)},
386 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)}, 387 {IWL_PCI_DEVICE(0x095A, 0x9410, iwl7265_2ac_cfg)},
diff --git a/drivers/net/wireless/mwifiex/11n_aggr.c b/drivers/net/wireless/mwifiex/11n_aggr.c
index 5b32106182f8..fe0f66f73507 100644
--- a/drivers/net/wireless/mwifiex/11n_aggr.c
+++ b/drivers/net/wireless/mwifiex/11n_aggr.c
@@ -185,6 +185,7 @@ mwifiex_11n_aggregate_pkt(struct mwifiex_private *priv,
185 skb_reserve(skb_aggr, headroom + sizeof(struct txpd)); 185 skb_reserve(skb_aggr, headroom + sizeof(struct txpd));
186 tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr); 186 tx_info_aggr = MWIFIEX_SKB_TXCB(skb_aggr);
187 187
188 memset(tx_info_aggr, 0, sizeof(*tx_info_aggr));
188 tx_info_aggr->bss_type = tx_info_src->bss_type; 189 tx_info_aggr->bss_type = tx_info_src->bss_type;
189 tx_info_aggr->bss_num = tx_info_src->bss_num; 190 tx_info_aggr->bss_num = tx_info_src->bss_num;
190 191
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index e95dec91a561..b511613bba2d 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -220,6 +220,7 @@ mwifiex_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
220 } 220 }
221 221
222 tx_info = MWIFIEX_SKB_TXCB(skb); 222 tx_info = MWIFIEX_SKB_TXCB(skb);
223 memset(tx_info, 0, sizeof(*tx_info));
223 tx_info->bss_num = priv->bss_num; 224 tx_info->bss_num = priv->bss_num;
224 tx_info->bss_type = priv->bss_type; 225 tx_info->bss_type = priv->bss_type;
225 tx_info->pkt_len = pkt_len; 226 tx_info->pkt_len = pkt_len;
diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c
index 8dee6c86f4f1..c161141f6c39 100644
--- a/drivers/net/wireless/mwifiex/cmdevt.c
+++ b/drivers/net/wireless/mwifiex/cmdevt.c
@@ -453,6 +453,7 @@ int mwifiex_process_event(struct mwifiex_adapter *adapter)
453 453
454 if (skb) { 454 if (skb) {
455 rx_info = MWIFIEX_SKB_RXCB(skb); 455 rx_info = MWIFIEX_SKB_RXCB(skb);
456 memset(rx_info, 0, sizeof(*rx_info));
456 rx_info->bss_num = priv->bss_num; 457 rx_info->bss_num = priv->bss_num;
457 rx_info->bss_type = priv->bss_type; 458 rx_info->bss_type = priv->bss_type;
458 } 459 }
diff --git a/drivers/net/wireless/mwifiex/main.c b/drivers/net/wireless/mwifiex/main.c
index cbabc12fbda3..e91cd0fa5ca8 100644
--- a/drivers/net/wireless/mwifiex/main.c
+++ b/drivers/net/wireless/mwifiex/main.c
@@ -645,6 +645,7 @@ mwifiex_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
645 } 645 }
646 646
647 tx_info = MWIFIEX_SKB_TXCB(skb); 647 tx_info = MWIFIEX_SKB_TXCB(skb);
648 memset(tx_info, 0, sizeof(*tx_info));
648 tx_info->bss_num = priv->bss_num; 649 tx_info->bss_num = priv->bss_num;
649 tx_info->bss_type = priv->bss_type; 650 tx_info->bss_type = priv->bss_type;
650 tx_info->pkt_len = skb->len; 651 tx_info->pkt_len = skb->len;
diff --git a/drivers/net/wireless/mwifiex/pcie.c b/drivers/net/wireless/mwifiex/pcie.c
index 574d4b597468..2cc9b6fca490 100644
--- a/drivers/net/wireless/mwifiex/pcie.c
+++ b/drivers/net/wireless/mwifiex/pcie.c
@@ -50,7 +50,7 @@ mwifiex_map_pci_memory(struct mwifiex_adapter *adapter, struct sk_buff *skb,
50 return -1; 50 return -1;
51 } 51 }
52 mapping.len = size; 52 mapping.len = size;
53 memcpy(skb->cb, &mapping, sizeof(mapping)); 53 mwifiex_store_mapping(skb, &mapping);
54 return 0; 54 return 0;
55} 55}
56 56
@@ -60,7 +60,7 @@ static void mwifiex_unmap_pci_memory(struct mwifiex_adapter *adapter,
60 struct pcie_service_card *card = adapter->card; 60 struct pcie_service_card *card = adapter->card;
61 struct mwifiex_dma_mapping mapping; 61 struct mwifiex_dma_mapping mapping;
62 62
63 MWIFIEX_SKB_PACB(skb, &mapping); 63 mwifiex_get_mapping(skb, &mapping);
64 pci_unmap_single(card->dev, mapping.addr, mapping.len, flags); 64 pci_unmap_single(card->dev, mapping.addr, mapping.len, flags);
65} 65}
66 66
diff --git a/drivers/net/wireless/mwifiex/sta_tx.c b/drivers/net/wireless/mwifiex/sta_tx.c
index 5fce7e78a36e..70eb863c7249 100644
--- a/drivers/net/wireless/mwifiex/sta_tx.c
+++ b/drivers/net/wireless/mwifiex/sta_tx.c
@@ -150,6 +150,7 @@ int mwifiex_send_null_packet(struct mwifiex_private *priv, u8 flags)
150 return -1; 150 return -1;
151 151
152 tx_info = MWIFIEX_SKB_TXCB(skb); 152 tx_info = MWIFIEX_SKB_TXCB(skb);
153 memset(tx_info, 0, sizeof(*tx_info));
153 tx_info->bss_num = priv->bss_num; 154 tx_info->bss_num = priv->bss_num;
154 tx_info->bss_type = priv->bss_type; 155 tx_info->bss_type = priv->bss_type;
155 tx_info->pkt_len = data_len - (sizeof(struct txpd) + INTF_HEADER_LEN); 156 tx_info->pkt_len = data_len - (sizeof(struct txpd) + INTF_HEADER_LEN);
diff --git a/drivers/net/wireless/mwifiex/tdls.c b/drivers/net/wireless/mwifiex/tdls.c
index e73034fbbde9..0e88364e0c67 100644
--- a/drivers/net/wireless/mwifiex/tdls.c
+++ b/drivers/net/wireless/mwifiex/tdls.c
@@ -605,6 +605,7 @@ int mwifiex_send_tdls_data_frame(struct mwifiex_private *priv, const u8 *peer,
605 } 605 }
606 606
607 tx_info = MWIFIEX_SKB_TXCB(skb); 607 tx_info = MWIFIEX_SKB_TXCB(skb);
608 memset(tx_info, 0, sizeof(*tx_info));
608 tx_info->bss_num = priv->bss_num; 609 tx_info->bss_num = priv->bss_num;
609 tx_info->bss_type = priv->bss_type; 610 tx_info->bss_type = priv->bss_type;
610 611
@@ -760,6 +761,7 @@ int mwifiex_send_tdls_action_frame(struct mwifiex_private *priv, const u8 *peer,
760 skb->priority = MWIFIEX_PRIO_VI; 761 skb->priority = MWIFIEX_PRIO_VI;
761 762
762 tx_info = MWIFIEX_SKB_TXCB(skb); 763 tx_info = MWIFIEX_SKB_TXCB(skb);
764 memset(tx_info, 0, sizeof(*tx_info));
763 tx_info->bss_num = priv->bss_num; 765 tx_info->bss_num = priv->bss_num;
764 tx_info->bss_type = priv->bss_type; 766 tx_info->bss_type = priv->bss_type;
765 tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT; 767 tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
diff --git a/drivers/net/wireless/mwifiex/txrx.c b/drivers/net/wireless/mwifiex/txrx.c
index 37f26afd4314..fd7e5b9b4581 100644
--- a/drivers/net/wireless/mwifiex/txrx.c
+++ b/drivers/net/wireless/mwifiex/txrx.c
@@ -55,6 +55,7 @@ int mwifiex_handle_rx_packet(struct mwifiex_adapter *adapter,
55 return -1; 55 return -1;
56 } 56 }
57 57
58 memset(rx_info, 0, sizeof(*rx_info));
58 rx_info->bss_num = priv->bss_num; 59 rx_info->bss_num = priv->bss_num;
59 rx_info->bss_type = priv->bss_type; 60 rx_info->bss_type = priv->bss_type;
60 61
diff --git a/drivers/net/wireless/mwifiex/uap_txrx.c b/drivers/net/wireless/mwifiex/uap_txrx.c
index 9a56bc61cb1d..b0601b91cc4f 100644
--- a/drivers/net/wireless/mwifiex/uap_txrx.c
+++ b/drivers/net/wireless/mwifiex/uap_txrx.c
@@ -175,6 +175,7 @@ static void mwifiex_uap_queue_bridged_pkt(struct mwifiex_private *priv,
175 } 175 }
176 176
177 tx_info = MWIFIEX_SKB_TXCB(skb); 177 tx_info = MWIFIEX_SKB_TXCB(skb);
178 memset(tx_info, 0, sizeof(*tx_info));
178 tx_info->bss_num = priv->bss_num; 179 tx_info->bss_num = priv->bss_num;
179 tx_info->bss_type = priv->bss_type; 180 tx_info->bss_type = priv->bss_type;
180 tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT; 181 tx_info->flags |= MWIFIEX_BUF_FLAG_BRIDGED_PKT;
diff --git a/drivers/net/wireless/mwifiex/util.h b/drivers/net/wireless/mwifiex/util.h
index ddae57021397..caadb3737b9e 100644
--- a/drivers/net/wireless/mwifiex/util.h
+++ b/drivers/net/wireless/mwifiex/util.h
@@ -20,32 +20,55 @@
20#ifndef _MWIFIEX_UTIL_H_ 20#ifndef _MWIFIEX_UTIL_H_
21#define _MWIFIEX_UTIL_H_ 21#define _MWIFIEX_UTIL_H_
22 22
23struct mwifiex_dma_mapping {
24 dma_addr_t addr;
25 size_t len;
26};
27
28struct mwifiex_cb {
29 struct mwifiex_dma_mapping dma_mapping;
30 union {
31 struct mwifiex_rxinfo rx_info;
32 struct mwifiex_txinfo tx_info;
33 };
34};
35
23static inline struct mwifiex_rxinfo *MWIFIEX_SKB_RXCB(struct sk_buff *skb) 36static inline struct mwifiex_rxinfo *MWIFIEX_SKB_RXCB(struct sk_buff *skb)
24{ 37{
25 return (struct mwifiex_rxinfo *)(skb->cb + sizeof(dma_addr_t)); 38 struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
39
40 BUILD_BUG_ON(sizeof(struct mwifiex_cb) > sizeof(skb->cb));
41 return &cb->rx_info;
26} 42}
27 43
28static inline struct mwifiex_txinfo *MWIFIEX_SKB_TXCB(struct sk_buff *skb) 44static inline struct mwifiex_txinfo *MWIFIEX_SKB_TXCB(struct sk_buff *skb)
29{ 45{
30 return (struct mwifiex_txinfo *)(skb->cb + sizeof(dma_addr_t)); 46 struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
47
48 return &cb->tx_info;
31} 49}
32 50
33struct mwifiex_dma_mapping { 51static inline void mwifiex_store_mapping(struct sk_buff *skb,
34 dma_addr_t addr; 52 struct mwifiex_dma_mapping *mapping)
35 size_t len; 53{
36}; 54 struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
55
56 memcpy(&cb->dma_mapping, mapping, sizeof(*mapping));
57}
37 58
38static inline void MWIFIEX_SKB_PACB(struct sk_buff *skb, 59static inline void mwifiex_get_mapping(struct sk_buff *skb,
39 struct mwifiex_dma_mapping *mapping) 60 struct mwifiex_dma_mapping *mapping)
40{ 61{
41 memcpy(mapping, skb->cb, sizeof(*mapping)); 62 struct mwifiex_cb *cb = (struct mwifiex_cb *)skb->cb;
63
64 memcpy(mapping, &cb->dma_mapping, sizeof(*mapping));
42} 65}
43 66
44static inline dma_addr_t MWIFIEX_SKB_DMA_ADDR(struct sk_buff *skb) 67static inline dma_addr_t MWIFIEX_SKB_DMA_ADDR(struct sk_buff *skb)
45{ 68{
46 struct mwifiex_dma_mapping mapping; 69 struct mwifiex_dma_mapping mapping;
47 70
48 MWIFIEX_SKB_PACB(skb, &mapping); 71 mwifiex_get_mapping(skb, &mapping);
49 72
50 return mapping.addr; 73 return mapping.addr;
51} 74}
diff --git a/drivers/net/wireless/rt2x00/rt2500pci.c b/drivers/net/wireless/rt2x00/rt2500pci.c
index 2f1cd929c6f6..a511cccc9f01 100644
--- a/drivers/net/wireless/rt2x00/rt2500pci.c
+++ b/drivers/net/wireless/rt2x00/rt2500pci.c
@@ -1681,8 +1681,13 @@ static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1681 /* 1681 /*
1682 * Detect if this device has an hardware controlled radio. 1682 * Detect if this device has an hardware controlled radio.
1683 */ 1683 */
1684 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) 1684 if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) {
1685 __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags); 1685 __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags);
1686 /*
1687 * On this device RFKILL initialized during probe does not work.
1688 */
1689 __set_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags);
1690 }
1686 1691
1687 /* 1692 /*
1688 * Check if the BBP tuning should be enabled. 1693 * Check if the BBP tuning should be enabled.
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index a49c3d73ea2c..832006b5aab1 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -229,6 +229,31 @@ static enum hrtimer_restart rt2800usb_tx_sta_fifo_timeout(struct hrtimer *timer)
229/* 229/*
230 * Firmware functions 230 * Firmware functions
231 */ 231 */
232static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev)
233{
234 __le32 *reg;
235 u32 fw_mode;
236
237 reg = kmalloc(sizeof(*reg), GFP_KERNEL);
238 if (reg == NULL)
239 return -ENOMEM;
240 /* cannot use rt2x00usb_register_read here as it uses different
241 * mode (MULTI_READ vs. DEVICE_MODE) and does not pass the
242 * magic value USB_MODE_AUTORUN (0x11) to the device, thus the
243 * returned value would be invalid.
244 */
245 rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE,
246 USB_VENDOR_REQUEST_IN, 0, USB_MODE_AUTORUN,
247 reg, sizeof(*reg), REGISTER_TIMEOUT_FIRMWARE);
248 fw_mode = le32_to_cpu(*reg);
249 kfree(reg);
250
251 if ((fw_mode & 0x00000003) == 2)
252 return 1;
253
254 return 0;
255}
256
232static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev) 257static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev)
233{ 258{
234 return FIRMWARE_RT2870; 259 return FIRMWARE_RT2870;
@@ -240,6 +265,7 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
240 int status; 265 int status;
241 u32 offset; 266 u32 offset;
242 u32 length; 267 u32 length;
268 int retval;
243 269
244 /* 270 /*
245 * Check which section of the firmware we need. 271 * Check which section of the firmware we need.
@@ -257,8 +283,16 @@ static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev,
257 /* 283 /*
258 * Write firmware to device. 284 * Write firmware to device.
259 */ 285 */
260 rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, 286 retval = rt2800usb_autorun_detect(rt2x00dev);
261 data + offset, length); 287 if (retval < 0)
288 return retval;
289 if (retval) {
290 rt2x00_info(rt2x00dev,
291 "Firmware loading not required - NIC in AutoRun mode\n");
292 } else {
293 rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
294 data + offset, length);
295 }
262 296
263 rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); 297 rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
264 rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0); 298 rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
@@ -735,11 +769,26 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
735/* 769/*
736 * Device probe functions. 770 * Device probe functions.
737 */ 771 */
772static int rt2800usb_efuse_detect(struct rt2x00_dev *rt2x00dev)
773{
774 int retval;
775
776 retval = rt2800usb_autorun_detect(rt2x00dev);
777 if (retval < 0)
778 return retval;
779 if (retval)
780 return 1;
781 return rt2800_efuse_detect(rt2x00dev);
782}
783
738static int rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev) 784static int rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev)
739{ 785{
740 int retval; 786 int retval;
741 787
742 if (rt2800_efuse_detect(rt2x00dev)) 788 retval = rt2800usb_efuse_detect(rt2x00dev);
789 if (retval < 0)
790 return retval;
791 if (retval)
743 retval = rt2800_read_eeprom_efuse(rt2x00dev); 792 retval = rt2800_read_eeprom_efuse(rt2x00dev);
744 else 793 else
745 retval = rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, 794 retval = rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom,
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 010b76505243..d13f25cd70d5 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -693,6 +693,7 @@ enum rt2x00_capability_flags {
693 REQUIRE_SW_SEQNO, 693 REQUIRE_SW_SEQNO,
694 REQUIRE_HT_TX_DESC, 694 REQUIRE_HT_TX_DESC,
695 REQUIRE_PS_AUTOWAKE, 695 REQUIRE_PS_AUTOWAKE,
696 REQUIRE_DELAYED_RFKILL,
696 697
697 /* 698 /*
698 * Capabilities 699 * Capabilities
diff --git a/drivers/net/wireless/rt2x00/rt2x00dev.c b/drivers/net/wireless/rt2x00/rt2x00dev.c
index 2bde6729f5e6..4fa43a2eeb73 100644
--- a/drivers/net/wireless/rt2x00/rt2x00dev.c
+++ b/drivers/net/wireless/rt2x00/rt2x00dev.c
@@ -1126,9 +1126,10 @@ static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev)
1126 return; 1126 return;
1127 1127
1128 /* 1128 /*
1129 * Unregister extra components. 1129 * Stop rfkill polling.
1130 */ 1130 */
1131 rt2x00rfkill_unregister(rt2x00dev); 1131 if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
1132 rt2x00rfkill_unregister(rt2x00dev);
1132 1133
1133 /* 1134 /*
1134 * Allow the HW to uninitialize. 1135 * Allow the HW to uninitialize.
@@ -1166,6 +1167,12 @@ static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev)
1166 1167
1167 set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags); 1168 set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags);
1168 1169
1170 /*
1171 * Start rfkill polling.
1172 */
1173 if (test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
1174 rt2x00rfkill_register(rt2x00dev);
1175
1169 return 0; 1176 return 0;
1170} 1177}
1171 1178
@@ -1375,7 +1382,12 @@ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev)
1375 rt2x00link_register(rt2x00dev); 1382 rt2x00link_register(rt2x00dev);
1376 rt2x00leds_register(rt2x00dev); 1383 rt2x00leds_register(rt2x00dev);
1377 rt2x00debug_register(rt2x00dev); 1384 rt2x00debug_register(rt2x00dev);
1378 rt2x00rfkill_register(rt2x00dev); 1385
1386 /*
1387 * Start rfkill polling.
1388 */
1389 if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
1390 rt2x00rfkill_register(rt2x00dev);
1379 1391
1380 return 0; 1392 return 0;
1381 1393
@@ -1391,6 +1403,12 @@ void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev)
1391 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); 1403 clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
1392 1404
1393 /* 1405 /*
1406 * Stop rfkill polling.
1407 */
1408 if (!test_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags))
1409 rt2x00rfkill_unregister(rt2x00dev);
1410
1411 /*
1394 * Disable radio. 1412 * Disable radio.
1395 */ 1413 */
1396 rt2x00lib_disable_radio(rt2x00dev); 1414 rt2x00lib_disable_radio(rt2x00dev);
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index 212ac4842c16..004dff9b962d 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -487,6 +487,8 @@ int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
487 crypto.cipher = rt2x00crypto_key_to_cipher(key); 487 crypto.cipher = rt2x00crypto_key_to_cipher(key);
488 if (crypto.cipher == CIPHER_NONE) 488 if (crypto.cipher == CIPHER_NONE)
489 return -EOPNOTSUPP; 489 return -EOPNOTSUPP;
490 if (crypto.cipher == CIPHER_TKIP && rt2x00_is_usb(rt2x00dev))
491 return -EOPNOTSUPP;
490 492
491 crypto.cmd = cmd; 493 crypto.cmd = cmd;
492 494
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index e7bcf62347d5..831b65f93feb 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -93,6 +93,7 @@ enum rt2x00usb_mode_offset {
93 USB_MODE_SLEEP = 7, /* RT73USB */ 93 USB_MODE_SLEEP = 7, /* RT73USB */
94 USB_MODE_FIRMWARE = 8, /* RT73USB */ 94 USB_MODE_FIRMWARE = 8, /* RT73USB */
95 USB_MODE_WAKEUP = 9, /* RT73USB */ 95 USB_MODE_WAKEUP = 9, /* RT73USB */
96 USB_MODE_AUTORUN = 17, /* RT2800USB */
96}; 97};
97 98
98/** 99/**
diff --git a/drivers/net/xen-netback/common.h b/drivers/net/xen-netback/common.h
index 4dd7c4a1923b..2532ce85d718 100644
--- a/drivers/net/xen-netback/common.h
+++ b/drivers/net/xen-netback/common.h
@@ -222,6 +222,7 @@ struct xenvif {
222 222
223 /* Queues */ 223 /* Queues */
224 struct xenvif_queue *queues; 224 struct xenvif_queue *queues;
225 unsigned int num_queues; /* active queues, resource allocated */
225 226
226 /* Miscellaneous private stuff. */ 227 /* Miscellaneous private stuff. */
227 struct net_device *dev; 228 struct net_device *dev;
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 852da34b8961..9e97c7ca0ddd 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -137,32 +137,11 @@ static void xenvif_wake_queue_callback(unsigned long data)
137 } 137 }
138} 138}
139 139
140static u16 xenvif_select_queue(struct net_device *dev, struct sk_buff *skb,
141 void *accel_priv, select_queue_fallback_t fallback)
142{
143 unsigned int num_queues = dev->real_num_tx_queues;
144 u32 hash;
145 u16 queue_index;
146
147 /* First, check if there is only one queue to optimise the
148 * single-queue or old frontend scenario.
149 */
150 if (num_queues == 1) {
151 queue_index = 0;
152 } else {
153 /* Use skb_get_hash to obtain an L4 hash if available */
154 hash = skb_get_hash(skb);
155 queue_index = hash % num_queues;
156 }
157
158 return queue_index;
159}
160
161static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev) 140static int xenvif_start_xmit(struct sk_buff *skb, struct net_device *dev)
162{ 141{
163 struct xenvif *vif = netdev_priv(dev); 142 struct xenvif *vif = netdev_priv(dev);
164 struct xenvif_queue *queue = NULL; 143 struct xenvif_queue *queue = NULL;
165 unsigned int num_queues = dev->real_num_tx_queues; 144 unsigned int num_queues = vif->num_queues;
166 u16 index; 145 u16 index;
167 int min_slots_needed; 146 int min_slots_needed;
168 147
@@ -225,7 +204,7 @@ static struct net_device_stats *xenvif_get_stats(struct net_device *dev)
225{ 204{
226 struct xenvif *vif = netdev_priv(dev); 205 struct xenvif *vif = netdev_priv(dev);
227 struct xenvif_queue *queue = NULL; 206 struct xenvif_queue *queue = NULL;
228 unsigned int num_queues = dev->real_num_tx_queues; 207 unsigned int num_queues = vif->num_queues;
229 unsigned long rx_bytes = 0; 208 unsigned long rx_bytes = 0;
230 unsigned long rx_packets = 0; 209 unsigned long rx_packets = 0;
231 unsigned long tx_bytes = 0; 210 unsigned long tx_bytes = 0;
@@ -256,7 +235,7 @@ out:
256static void xenvif_up(struct xenvif *vif) 235static void xenvif_up(struct xenvif *vif)
257{ 236{
258 struct xenvif_queue *queue = NULL; 237 struct xenvif_queue *queue = NULL;
259 unsigned int num_queues = vif->dev->real_num_tx_queues; 238 unsigned int num_queues = vif->num_queues;
260 unsigned int queue_index; 239 unsigned int queue_index;
261 240
262 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 241 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
@@ -272,7 +251,7 @@ static void xenvif_up(struct xenvif *vif)
272static void xenvif_down(struct xenvif *vif) 251static void xenvif_down(struct xenvif *vif)
273{ 252{
274 struct xenvif_queue *queue = NULL; 253 struct xenvif_queue *queue = NULL;
275 unsigned int num_queues = vif->dev->real_num_tx_queues; 254 unsigned int num_queues = vif->num_queues;
276 unsigned int queue_index; 255 unsigned int queue_index;
277 256
278 for (queue_index = 0; queue_index < num_queues; ++queue_index) { 257 for (queue_index = 0; queue_index < num_queues; ++queue_index) {
@@ -379,7 +358,7 @@ static void xenvif_get_ethtool_stats(struct net_device *dev,
379 struct ethtool_stats *stats, u64 * data) 358 struct ethtool_stats *stats, u64 * data)
380{ 359{
381 struct xenvif *vif = netdev_priv(dev); 360 struct xenvif *vif = netdev_priv(dev);
382 unsigned int num_queues = dev->real_num_tx_queues; 361 unsigned int num_queues = vif->num_queues;
383 int i; 362 int i;
384 unsigned int queue_index; 363 unsigned int queue_index;
385 struct xenvif_stats *vif_stats; 364 struct xenvif_stats *vif_stats;
@@ -424,7 +403,6 @@ static const struct net_device_ops xenvif_netdev_ops = {
424 .ndo_fix_features = xenvif_fix_features, 403 .ndo_fix_features = xenvif_fix_features,
425 .ndo_set_mac_address = eth_mac_addr, 404 .ndo_set_mac_address = eth_mac_addr,
426 .ndo_validate_addr = eth_validate_addr, 405 .ndo_validate_addr = eth_validate_addr,
427 .ndo_select_queue = xenvif_select_queue,
428}; 406};
429 407
430struct xenvif *xenvif_alloc(struct device *parent, domid_t domid, 408struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
@@ -438,7 +416,7 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
438 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle); 416 snprintf(name, IFNAMSIZ - 1, "vif%u.%u", domid, handle);
439 /* Allocate a netdev with the max. supported number of queues. 417 /* Allocate a netdev with the max. supported number of queues.
440 * When the guest selects the desired number, it will be updated 418 * When the guest selects the desired number, it will be updated
441 * via netif_set_real_num_tx_queues(). 419 * via netif_set_real_num_*_queues().
442 */ 420 */
443 dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup, 421 dev = alloc_netdev_mq(sizeof(struct xenvif), name, ether_setup,
444 xenvif_max_queues); 422 xenvif_max_queues);
@@ -458,11 +436,9 @@ struct xenvif *xenvif_alloc(struct device *parent, domid_t domid,
458 vif->dev = dev; 436 vif->dev = dev;
459 vif->disabled = false; 437 vif->disabled = false;
460 438
461 /* Start out with no queues. The call below does not require 439 /* Start out with no queues. */
462 * rtnl_lock() as it happens before register_netdev().
463 */
464 vif->queues = NULL; 440 vif->queues = NULL;
465 netif_set_real_num_tx_queues(dev, 0); 441 vif->num_queues = 0;
466 442
467 dev->netdev_ops = &xenvif_netdev_ops; 443 dev->netdev_ops = &xenvif_netdev_ops;
468 dev->hw_features = NETIF_F_SG | 444 dev->hw_features = NETIF_F_SG |
@@ -677,7 +653,7 @@ static void xenvif_wait_unmap_timeout(struct xenvif_queue *queue,
677void xenvif_disconnect(struct xenvif *vif) 653void xenvif_disconnect(struct xenvif *vif)
678{ 654{
679 struct xenvif_queue *queue = NULL; 655 struct xenvif_queue *queue = NULL;
680 unsigned int num_queues = vif->dev->real_num_tx_queues; 656 unsigned int num_queues = vif->num_queues;
681 unsigned int queue_index; 657 unsigned int queue_index;
682 658
683 if (netif_carrier_ok(vif->dev)) 659 if (netif_carrier_ok(vif->dev))
@@ -724,7 +700,7 @@ void xenvif_deinit_queue(struct xenvif_queue *queue)
724void xenvif_free(struct xenvif *vif) 700void xenvif_free(struct xenvif *vif)
725{ 701{
726 struct xenvif_queue *queue = NULL; 702 struct xenvif_queue *queue = NULL;
727 unsigned int num_queues = vif->dev->real_num_tx_queues; 703 unsigned int num_queues = vif->num_queues;
728 unsigned int queue_index; 704 unsigned int queue_index;
729 /* Here we want to avoid timeout messages if an skb can be legitimately 705 /* Here we want to avoid timeout messages if an skb can be legitimately
730 * stuck somewhere else. Realistically this could be an another vif's 706 * stuck somewhere else. Realistically this could be an another vif's
@@ -748,12 +724,9 @@ void xenvif_free(struct xenvif *vif)
748 xenvif_deinit_queue(queue); 724 xenvif_deinit_queue(queue);
749 } 725 }
750 726
751 /* Free the array of queues. The call below does not require
752 * rtnl_lock() because it happens after unregister_netdev().
753 */
754 netif_set_real_num_tx_queues(vif->dev, 0);
755 vfree(vif->queues); 727 vfree(vif->queues);
756 vif->queues = NULL; 728 vif->queues = NULL;
729 vif->num_queues = 0;
757 730
758 free_netdev(vif->dev); 731 free_netdev(vif->dev);
759 732
diff --git a/drivers/net/xen-netback/xenbus.c b/drivers/net/xen-netback/xenbus.c
index 96c63dc2509e..3d85acd84bad 100644
--- a/drivers/net/xen-netback/xenbus.c
+++ b/drivers/net/xen-netback/xenbus.c
@@ -527,9 +527,7 @@ static void connect(struct backend_info *be)
527 /* Use the number of queues requested by the frontend */ 527 /* Use the number of queues requested by the frontend */
528 be->vif->queues = vzalloc(requested_num_queues * 528 be->vif->queues = vzalloc(requested_num_queues *
529 sizeof(struct xenvif_queue)); 529 sizeof(struct xenvif_queue));
530 rtnl_lock(); 530 be->vif->num_queues = requested_num_queues;
531 netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
532 rtnl_unlock();
533 531
534 for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) { 532 for (queue_index = 0; queue_index < requested_num_queues; ++queue_index) {
535 queue = &be->vif->queues[queue_index]; 533 queue = &be->vif->queues[queue_index];
@@ -546,9 +544,7 @@ static void connect(struct backend_info *be)
546 * earlier queues can be destroyed using the regular 544 * earlier queues can be destroyed using the regular
547 * disconnect logic. 545 * disconnect logic.
548 */ 546 */
549 rtnl_lock(); 547 be->vif->num_queues = queue_index;
550 netif_set_real_num_tx_queues(be->vif->dev, queue_index);
551 rtnl_unlock();
552 goto err; 548 goto err;
553 } 549 }
554 550
@@ -561,13 +557,19 @@ static void connect(struct backend_info *be)
561 * and also clean up any previously initialised queues. 557 * and also clean up any previously initialised queues.
562 */ 558 */
563 xenvif_deinit_queue(queue); 559 xenvif_deinit_queue(queue);
564 rtnl_lock(); 560 be->vif->num_queues = queue_index;
565 netif_set_real_num_tx_queues(be->vif->dev, queue_index);
566 rtnl_unlock();
567 goto err; 561 goto err;
568 } 562 }
569 } 563 }
570 564
565 /* Initialisation completed, tell core driver the number of
566 * active queues.
567 */
568 rtnl_lock();
569 netif_set_real_num_tx_queues(be->vif->dev, requested_num_queues);
570 netif_set_real_num_rx_queues(be->vif->dev, requested_num_queues);
571 rtnl_unlock();
572
571 xenvif_carrier_on(be->vif); 573 xenvif_carrier_on(be->vif);
572 574
573 unregister_hotplug_status_watch(be); 575 unregister_hotplug_status_watch(be);
@@ -582,13 +584,11 @@ static void connect(struct backend_info *be)
582 return; 584 return;
583 585
584err: 586err:
585 if (be->vif->dev->real_num_tx_queues > 0) 587 if (be->vif->num_queues > 0)
586 xenvif_disconnect(be->vif); /* Clean up existing queues */ 588 xenvif_disconnect(be->vif); /* Clean up existing queues */
587 vfree(be->vif->queues); 589 vfree(be->vif->queues);
588 be->vif->queues = NULL; 590 be->vif->queues = NULL;
589 rtnl_lock(); 591 be->vif->num_queues = 0;
590 netif_set_real_num_tx_queues(be->vif->dev, 0);
591 rtnl_unlock();
592 return; 592 return;
593} 593}
594 594
@@ -596,7 +596,7 @@ err:
596static int connect_rings(struct backend_info *be, struct xenvif_queue *queue) 596static int connect_rings(struct backend_info *be, struct xenvif_queue *queue)
597{ 597{
598 struct xenbus_device *dev = be->dev; 598 struct xenbus_device *dev = be->dev;
599 unsigned int num_queues = queue->vif->dev->real_num_tx_queues; 599 unsigned int num_queues = queue->vif->num_queues;
600 unsigned long tx_ring_ref, rx_ring_ref; 600 unsigned long tx_ring_ref, rx_ring_ref;
601 unsigned int tx_evtchn, rx_evtchn; 601 unsigned int tx_evtchn, rx_evtchn;
602 int err; 602 int err;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 5a7872ac3566..055222bae6e4 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -1287,7 +1287,7 @@ static irqreturn_t xennet_rx_interrupt(int irq, void *dev_id)
1287 1287
1288 if (likely(netif_carrier_ok(dev) && 1288 if (likely(netif_carrier_ok(dev) &&
1289 RING_HAS_UNCONSUMED_RESPONSES(&queue->rx))) 1289 RING_HAS_UNCONSUMED_RESPONSES(&queue->rx)))
1290 napi_schedule(&queue->napi); 1290 napi_schedule(&queue->napi);
1291 1291
1292 return IRQ_HANDLED; 1292 return IRQ_HANDLED;
1293} 1293}
@@ -1437,16 +1437,12 @@ static void xennet_end_access(int ref, void *page)
1437static void xennet_disconnect_backend(struct netfront_info *info) 1437static void xennet_disconnect_backend(struct netfront_info *info)
1438{ 1438{
1439 unsigned int i = 0; 1439 unsigned int i = 0;
1440 struct netfront_queue *queue = NULL;
1441 unsigned int num_queues = info->netdev->real_num_tx_queues; 1440 unsigned int num_queues = info->netdev->real_num_tx_queues;
1442 1441
1442 netif_carrier_off(info->netdev);
1443
1443 for (i = 0; i < num_queues; ++i) { 1444 for (i = 0; i < num_queues; ++i) {
1444 /* Stop old i/f to prevent errors whilst we rebuild the state. */ 1445 struct netfront_queue *queue = &info->queues[i];
1445 spin_lock_bh(&queue->rx_lock);
1446 spin_lock_irq(&queue->tx_lock);
1447 netif_carrier_off(queue->info->netdev);
1448 spin_unlock_irq(&queue->tx_lock);
1449 spin_unlock_bh(&queue->rx_lock);
1450 1446
1451 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq)) 1447 if (queue->tx_irq && (queue->tx_irq == queue->rx_irq))
1452 unbind_from_irqhandler(queue->tx_irq, queue); 1448 unbind_from_irqhandler(queue->tx_irq, queue);
@@ -1457,6 +1453,8 @@ static void xennet_disconnect_backend(struct netfront_info *info)
1457 queue->tx_evtchn = queue->rx_evtchn = 0; 1453 queue->tx_evtchn = queue->rx_evtchn = 0;
1458 queue->tx_irq = queue->rx_irq = 0; 1454 queue->tx_irq = queue->rx_irq = 0;
1459 1455
1456 napi_synchronize(&queue->napi);
1457
1460 /* End access and free the pages */ 1458 /* End access and free the pages */
1461 xennet_end_access(queue->tx_ring_ref, queue->tx.sring); 1459 xennet_end_access(queue->tx_ring_ref, queue->tx.sring);
1462 xennet_end_access(queue->rx_ring_ref, queue->rx.sring); 1460 xennet_end_access(queue->rx_ring_ref, queue->rx.sring);
@@ -1698,8 +1696,6 @@ static int xennet_init_queue(struct netfront_queue *queue)
1698 goto exit_free_tx; 1696 goto exit_free_tx;
1699 } 1697 }
1700 1698
1701 netif_napi_add(queue->info->netdev, &queue->napi, xennet_poll, 64);
1702
1703 return 0; 1699 return 0;
1704 1700
1705 exit_free_tx: 1701 exit_free_tx:
@@ -1790,6 +1786,70 @@ error:
1790 return err; 1786 return err;
1791} 1787}
1792 1788
1789static void xennet_destroy_queues(struct netfront_info *info)
1790{
1791 unsigned int i;
1792
1793 rtnl_lock();
1794
1795 for (i = 0; i < info->netdev->real_num_tx_queues; i++) {
1796 struct netfront_queue *queue = &info->queues[i];
1797
1798 if (netif_running(info->netdev))
1799 napi_disable(&queue->napi);
1800 netif_napi_del(&queue->napi);
1801 }
1802
1803 rtnl_unlock();
1804
1805 kfree(info->queues);
1806 info->queues = NULL;
1807}
1808
1809static int xennet_create_queues(struct netfront_info *info,
1810 unsigned int num_queues)
1811{
1812 unsigned int i;
1813 int ret;
1814
1815 info->queues = kcalloc(num_queues, sizeof(struct netfront_queue),
1816 GFP_KERNEL);
1817 if (!info->queues)
1818 return -ENOMEM;
1819
1820 rtnl_lock();
1821
1822 for (i = 0; i < num_queues; i++) {
1823 struct netfront_queue *queue = &info->queues[i];
1824
1825 queue->id = i;
1826 queue->info = info;
1827
1828 ret = xennet_init_queue(queue);
1829 if (ret < 0) {
1830 dev_warn(&info->netdev->dev, "only created %d queues\n",
1831 num_queues);
1832 num_queues = i;
1833 break;
1834 }
1835
1836 netif_napi_add(queue->info->netdev, &queue->napi,
1837 xennet_poll, 64);
1838 if (netif_running(info->netdev))
1839 napi_enable(&queue->napi);
1840 }
1841
1842 netif_set_real_num_tx_queues(info->netdev, num_queues);
1843
1844 rtnl_unlock();
1845
1846 if (num_queues == 0) {
1847 dev_err(&info->netdev->dev, "no queues\n");
1848 return -EINVAL;
1849 }
1850 return 0;
1851}
1852
1793/* Common code used when first setting up, and when resuming. */ 1853/* Common code used when first setting up, and when resuming. */
1794static int talk_to_netback(struct xenbus_device *dev, 1854static int talk_to_netback(struct xenbus_device *dev,
1795 struct netfront_info *info) 1855 struct netfront_info *info)
@@ -1826,42 +1886,20 @@ static int talk_to_netback(struct xenbus_device *dev,
1826 goto out; 1886 goto out;
1827 } 1887 }
1828 1888
1829 /* Allocate array of queues */ 1889 if (info->queues)
1830 info->queues = kcalloc(num_queues, sizeof(struct netfront_queue), GFP_KERNEL); 1890 xennet_destroy_queues(info);
1831 if (!info->queues) { 1891
1832 err = -ENOMEM; 1892 err = xennet_create_queues(info, num_queues);
1833 goto out; 1893 if (err < 0)
1834 } 1894 goto destroy_ring;
1835 rtnl_lock();
1836 netif_set_real_num_tx_queues(info->netdev, num_queues);
1837 rtnl_unlock();
1838 1895
1839 /* Create shared ring, alloc event channel -- for each queue */ 1896 /* Create shared ring, alloc event channel -- for each queue */
1840 for (i = 0; i < num_queues; ++i) { 1897 for (i = 0; i < num_queues; ++i) {
1841 queue = &info->queues[i]; 1898 queue = &info->queues[i];
1842 queue->id = i;
1843 queue->info = info;
1844 err = xennet_init_queue(queue);
1845 if (err) {
1846 /* xennet_init_queue() cleans up after itself on failure,
1847 * but we still have to clean up any previously initialised
1848 * queues. If i > 0, set num_queues to i, then goto
1849 * destroy_ring, which calls xennet_disconnect_backend()
1850 * to tidy up.
1851 */
1852 if (i > 0) {
1853 rtnl_lock();
1854 netif_set_real_num_tx_queues(info->netdev, i);
1855 rtnl_unlock();
1856 goto destroy_ring;
1857 } else {
1858 goto out;
1859 }
1860 }
1861 err = setup_netfront(dev, queue, feature_split_evtchn); 1899 err = setup_netfront(dev, queue, feature_split_evtchn);
1862 if (err) { 1900 if (err) {
1863 /* As for xennet_init_queue(), setup_netfront() will tidy 1901 /* setup_netfront() will tidy up the current
1864 * up the current queue on error, but we need to clean up 1902 * queue on error, but we need to clean up
1865 * those already allocated. 1903 * those already allocated.
1866 */ 1904 */
1867 if (i > 0) { 1905 if (i > 0) {
@@ -2005,13 +2043,15 @@ static int xennet_connect(struct net_device *dev)
2005 /* By now, the queue structures have been set up */ 2043 /* By now, the queue structures have been set up */
2006 for (j = 0; j < num_queues; ++j) { 2044 for (j = 0; j < num_queues; ++j) {
2007 queue = &np->queues[j]; 2045 queue = &np->queues[j];
2008 spin_lock_bh(&queue->rx_lock);
2009 spin_lock_irq(&queue->tx_lock);
2010 2046
2011 /* Step 1: Discard all pending TX packet fragments. */ 2047 /* Step 1: Discard all pending TX packet fragments. */
2048 spin_lock_irq(&queue->tx_lock);
2012 xennet_release_tx_bufs(queue); 2049 xennet_release_tx_bufs(queue);
2050 spin_unlock_irq(&queue->tx_lock);
2013 2051
2014 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */ 2052 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
2053 spin_lock_bh(&queue->rx_lock);
2054
2015 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) { 2055 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
2016 skb_frag_t *frag; 2056 skb_frag_t *frag;
2017 const struct page *page; 2057 const struct page *page;
@@ -2035,6 +2075,8 @@ static int xennet_connect(struct net_device *dev)
2035 } 2075 }
2036 2076
2037 queue->rx.req_prod_pvt = requeue_idx; 2077 queue->rx.req_prod_pvt = requeue_idx;
2078
2079 spin_unlock_bh(&queue->rx_lock);
2038 } 2080 }
2039 2081
2040 /* 2082 /*
@@ -2046,13 +2088,17 @@ static int xennet_connect(struct net_device *dev)
2046 netif_carrier_on(np->netdev); 2088 netif_carrier_on(np->netdev);
2047 for (j = 0; j < num_queues; ++j) { 2089 for (j = 0; j < num_queues; ++j) {
2048 queue = &np->queues[j]; 2090 queue = &np->queues[j];
2091
2049 notify_remote_via_irq(queue->tx_irq); 2092 notify_remote_via_irq(queue->tx_irq);
2050 if (queue->tx_irq != queue->rx_irq) 2093 if (queue->tx_irq != queue->rx_irq)
2051 notify_remote_via_irq(queue->rx_irq); 2094 notify_remote_via_irq(queue->rx_irq);
2052 xennet_tx_buf_gc(queue);
2053 xennet_alloc_rx_buffers(queue);
2054 2095
2096 spin_lock_irq(&queue->tx_lock);
2097 xennet_tx_buf_gc(queue);
2055 spin_unlock_irq(&queue->tx_lock); 2098 spin_unlock_irq(&queue->tx_lock);
2099
2100 spin_lock_bh(&queue->rx_lock);
2101 xennet_alloc_rx_buffers(queue);
2056 spin_unlock_bh(&queue->rx_lock); 2102 spin_unlock_bh(&queue->rx_lock);
2057 } 2103 }
2058 2104
diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c
index c4cddf0cd96d..b777d8f46bd5 100644
--- a/drivers/of/fdt.c
+++ b/drivers/of/fdt.c
@@ -880,6 +880,21 @@ void __init __weak early_init_dt_add_memory_arch(u64 base, u64 size)
880 const u64 phys_offset = __pa(PAGE_OFFSET); 880 const u64 phys_offset = __pa(PAGE_OFFSET);
881 base &= PAGE_MASK; 881 base &= PAGE_MASK;
882 size &= PAGE_MASK; 882 size &= PAGE_MASK;
883
884 if (sizeof(phys_addr_t) < sizeof(u64)) {
885 if (base > ULONG_MAX) {
886 pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
887 base, base + size);
888 return;
889 }
890
891 if (base + size > ULONG_MAX) {
892 pr_warning("Ignoring memory range 0x%lx - 0x%llx\n",
893 ULONG_MAX, base + size);
894 size = ULONG_MAX - base;
895 }
896 }
897
883 if (base + size < phys_offset) { 898 if (base + size < phys_offset) {
884 pr_warning("Ignoring memory block 0x%llx - 0x%llx\n", 899 pr_warning("Ignoring memory block 0x%llx - 0x%llx\n",
885 base, base + size); 900 base, base + size);
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index fb4a59830648..401b2453da45 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -182,40 +182,6 @@ int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
182} 182}
183EXPORT_SYMBOL(of_mdiobus_register); 183EXPORT_SYMBOL(of_mdiobus_register);
184 184
185/**
186 * of_mdiobus_link_phydev - Find a device node for a phy
187 * @mdio: pointer to mii_bus structure
188 * @phydev: phydev for which the of_node pointer should be set
189 *
190 * Walk the list of subnodes of a mdio bus and look for a node that matches the
191 * phy's address with its 'reg' property. If found, set the of_node pointer for
192 * the phy. This allows auto-probed pyh devices to be supplied with information
193 * passed in via DT.
194 */
195void of_mdiobus_link_phydev(struct mii_bus *mdio,
196 struct phy_device *phydev)
197{
198 struct device *dev = &phydev->dev;
199 struct device_node *child;
200
201 if (dev->of_node || !mdio->dev.of_node)
202 return;
203
204 for_each_available_child_of_node(mdio->dev.of_node, child) {
205 int addr;
206
207 addr = of_mdio_parse_addr(&mdio->dev, child);
208 if (addr < 0)
209 continue;
210
211 if (addr == phydev->addr) {
212 dev->of_node = child;
213 return;
214 }
215 }
216}
217EXPORT_SYMBOL(of_mdiobus_link_phydev);
218
219/* Helper function for of_phy_find_device */ 185/* Helper function for of_phy_find_device */
220static int of_phy_match(struct device *dev, void *phy_np) 186static int of_phy_match(struct device *dev, void *phy_np)
221{ 187{
@@ -323,11 +289,13 @@ int of_phy_register_fixed_link(struct device_node *np)
323 fixed_link_node = of_get_child_by_name(np, "fixed-link"); 289 fixed_link_node = of_get_child_by_name(np, "fixed-link");
324 if (fixed_link_node) { 290 if (fixed_link_node) {
325 status.link = 1; 291 status.link = 1;
326 status.duplex = of_property_read_bool(np, "full-duplex"); 292 status.duplex = of_property_read_bool(fixed_link_node,
293 "full-duplex");
327 if (of_property_read_u32(fixed_link_node, "speed", &status.speed)) 294 if (of_property_read_u32(fixed_link_node, "speed", &status.speed))
328 return -EINVAL; 295 return -EINVAL;
329 status.pause = of_property_read_bool(np, "pause"); 296 status.pause = of_property_read_bool(fixed_link_node, "pause");
330 status.asym_pause = of_property_read_bool(np, "asym-pause"); 297 status.asym_pause = of_property_read_bool(fixed_link_node,
298 "asym-pause");
331 of_node_put(fixed_link_node); 299 of_node_put(fixed_link_node);
332 return fixed_phy_register(PHY_POLL, &status, np); 300 return fixed_phy_register(PHY_POLL, &status, np);
333 } 301 }
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 63a54a340863..1c8592b0e146 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -3135,8 +3135,13 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
3135 if (probe) 3135 if (probe)
3136 return 0; 3136 return 0;
3137 3137
3138 /* Wait for Transaction Pending bit clean */ 3138 /*
3139 if (pci_wait_for_pending(dev, pos + PCI_AF_STATUS, PCI_AF_STATUS_TP)) 3139 * Wait for Transaction Pending bit to clear. A word-aligned test
3140 * is used, so we use the conrol offset rather than status and shift
3141 * the test bit to match.
3142 */
3143 if (pci_wait_for_pending(dev, pos + PCI_AF_CTRL,
3144 PCI_AF_STATUS_TP << 8))
3140 goto clear; 3145 goto clear;
3141 3146
3142 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n"); 3147 dev_err(&dev->dev, "transaction is not cleared; proceeding with reset anyway\n");
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index 16a2f067c242..64b98d242ea6 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -112,6 +112,7 @@ config PHY_EXYNOS5250_SATA
112config PHY_SUN4I_USB 112config PHY_SUN4I_USB
113 tristate "Allwinner sunxi SoC USB PHY driver" 113 tristate "Allwinner sunxi SoC USB PHY driver"
114 depends on ARCH_SUNXI && HAS_IOMEM && OF 114 depends on ARCH_SUNXI && HAS_IOMEM && OF
115 depends on RESET_CONTROLLER
115 select GENERIC_PHY 116 select GENERIC_PHY
116 help 117 help
117 Enable this to support the transceiver that is part of Allwinner 118 Enable this to support the transceiver that is part of Allwinner
@@ -122,6 +123,7 @@ config PHY_SUN4I_USB
122 123
123config PHY_SAMSUNG_USB2 124config PHY_SAMSUNG_USB2
124 tristate "Samsung USB 2.0 PHY driver" 125 tristate "Samsung USB 2.0 PHY driver"
126 depends on HAS_IOMEM
125 select GENERIC_PHY 127 select GENERIC_PHY
126 select MFD_SYSCON 128 select MFD_SYSCON
127 help 129 help
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index c64a2f3b2d62..49c446530101 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -614,8 +614,9 @@ struct phy *phy_create(struct device *dev, const struct phy_ops *ops,
614 return phy; 614 return phy;
615 615
616put_dev: 616put_dev:
617 put_device(&phy->dev); 617 put_device(&phy->dev); /* calls phy_release() which frees resources */
618 ida_remove(&phy_ida, phy->id); 618 return ERR_PTR(ret);
619
619free_phy: 620free_phy:
620 kfree(phy); 621 kfree(phy);
621 return ERR_PTR(ret); 622 return ERR_PTR(ret);
@@ -799,7 +800,7 @@ static void phy_release(struct device *dev)
799 800
800 phy = to_phy(dev); 801 phy = to_phy(dev);
801 dev_vdbg(dev, "releasing '%s'\n", dev_name(dev)); 802 dev_vdbg(dev, "releasing '%s'\n", dev_name(dev));
802 ida_remove(&phy_ida, phy->id); 803 ida_simple_remove(&phy_ida, phy->id);
803 kfree(phy); 804 kfree(phy);
804} 805}
805 806
diff --git a/drivers/phy/phy-omap-usb2.c b/drivers/phy/phy-omap-usb2.c
index 7007c11fe07d..34b396146c8a 100644
--- a/drivers/phy/phy-omap-usb2.c
+++ b/drivers/phy/phy-omap-usb2.c
@@ -233,8 +233,8 @@ static int omap_usb2_probe(struct platform_device *pdev)
233 if (phy_data->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) { 233 if (phy_data->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) {
234 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 234 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
235 phy->phy_base = devm_ioremap_resource(&pdev->dev, res); 235 phy->phy_base = devm_ioremap_resource(&pdev->dev, res);
236 if (!phy->phy_base) 236 if (IS_ERR(phy->phy_base))
237 return -ENOMEM; 237 return PTR_ERR(phy->phy_base);
238 phy->flags |= OMAP_USB2_CALIBRATE_FALSE_DISCONNECT; 238 phy->flags |= OMAP_USB2_CALIBRATE_FALSE_DISCONNECT;
239 } 239 }
240 240
@@ -262,7 +262,6 @@ static int omap_usb2_probe(struct platform_device *pdev)
262 otg->phy = &phy->phy; 262 otg->phy = &phy->phy;
263 263
264 platform_set_drvdata(pdev, phy); 264 platform_set_drvdata(pdev, phy);
265 pm_runtime_enable(phy->dev);
266 265
267 generic_phy = devm_phy_create(phy->dev, &ops, NULL); 266 generic_phy = devm_phy_create(phy->dev, &ops, NULL);
268 if (IS_ERR(generic_phy)) 267 if (IS_ERR(generic_phy))
@@ -270,10 +269,13 @@ static int omap_usb2_probe(struct platform_device *pdev)
270 269
271 phy_set_drvdata(generic_phy, phy); 270 phy_set_drvdata(generic_phy, phy);
272 271
272 pm_runtime_enable(phy->dev);
273 phy_provider = devm_of_phy_provider_register(phy->dev, 273 phy_provider = devm_of_phy_provider_register(phy->dev,
274 of_phy_simple_xlate); 274 of_phy_simple_xlate);
275 if (IS_ERR(phy_provider)) 275 if (IS_ERR(phy_provider)) {
276 pm_runtime_disable(phy->dev);
276 return PTR_ERR(phy_provider); 277 return PTR_ERR(phy_provider);
278 }
277 279
278 phy->wkupclk = devm_clk_get(phy->dev, "wkupclk"); 280 phy->wkupclk = devm_clk_get(phy->dev, "wkupclk");
279 if (IS_ERR(phy->wkupclk)) { 281 if (IS_ERR(phy->wkupclk)) {
@@ -317,6 +319,7 @@ static int omap_usb2_remove(struct platform_device *pdev)
317 if (!IS_ERR(phy->optclk)) 319 if (!IS_ERR(phy->optclk))
318 clk_unprepare(phy->optclk); 320 clk_unprepare(phy->optclk);
319 usb_remove_phy(&phy->phy); 321 usb_remove_phy(&phy->phy);
322 pm_runtime_disable(phy->dev);
320 323
321 return 0; 324 return 0;
322} 325}
diff --git a/drivers/phy/phy-samsung-usb2.c b/drivers/phy/phy-samsung-usb2.c
index 8a8c6bc8709a..1e69a32c221d 100644
--- a/drivers/phy/phy-samsung-usb2.c
+++ b/drivers/phy/phy-samsung-usb2.c
@@ -107,6 +107,7 @@ static const struct of_device_id samsung_usb2_phy_of_match[] = {
107#endif 107#endif
108 { }, 108 { },
109}; 109};
110MODULE_DEVICE_TABLE(of, samsung_usb2_phy_of_match);
110 111
111static int samsung_usb2_phy_probe(struct platform_device *pdev) 112static int samsung_usb2_phy_probe(struct platform_device *pdev)
112{ 113{
diff --git a/drivers/pinctrl/berlin/berlin.c b/drivers/pinctrl/berlin/berlin.c
index edf5d2fd2b22..86db2235ab00 100644
--- a/drivers/pinctrl/berlin/berlin.c
+++ b/drivers/pinctrl/berlin/berlin.c
@@ -320,7 +320,7 @@ int berlin_pinctrl_probe(struct platform_device *pdev,
320 320
321 regmap = dev_get_regmap(&pdev->dev, NULL); 321 regmap = dev_get_regmap(&pdev->dev, NULL);
322 if (!regmap) 322 if (!regmap)
323 return PTR_ERR(regmap); 323 return -ENODEV;
324 324
325 pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL); 325 pctrl = devm_kzalloc(dev, sizeof(*pctrl), GFP_KERNEL);
326 if (!pctrl) 326 if (!pctrl)
diff --git a/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index f1ca75e6d7b1..5f38c7f67834 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -211,6 +211,10 @@ static int sunxi_pctrl_dt_node_to_map(struct pinctrl_dev *pctldev,
211 configlen++; 211 configlen++;
212 212
213 pinconfig = kzalloc(configlen * sizeof(*pinconfig), GFP_KERNEL); 213 pinconfig = kzalloc(configlen * sizeof(*pinconfig), GFP_KERNEL);
214 if (!pinconfig) {
215 kfree(*map);
216 return -ENOMEM;
217 }
214 218
215 if (!of_property_read_u32(node, "allwinner,drive", &val)) { 219 if (!of_property_read_u32(node, "allwinner,drive", &val)) {
216 u16 strength = (val + 1) * 10; 220 u16 strength = (val + 1) * 10;
diff --git a/drivers/ptp/Kconfig b/drivers/ptp/Kconfig
index 6aea373547f6..ee3de3421f2d 100644
--- a/drivers/ptp/Kconfig
+++ b/drivers/ptp/Kconfig
@@ -74,7 +74,7 @@ config DP83640_PHY
74 74
75config PTP_1588_CLOCK_PCH 75config PTP_1588_CLOCK_PCH
76 tristate "Intel PCH EG20T as PTP clock" 76 tristate "Intel PCH EG20T as PTP clock"
77 depends on X86 || COMPILE_TEST 77 depends on X86_32 || COMPILE_TEST
78 depends on HAS_IOMEM && NET 78 depends on HAS_IOMEM && NET
79 select PTP_1588_CLOCK 79 select PTP_1588_CLOCK
80 help 80 help
diff --git a/drivers/regulator/bcm590xx-regulator.c b/drivers/regulator/bcm590xx-regulator.c
index 57544e254a78..58ece59367ae 100644
--- a/drivers/regulator/bcm590xx-regulator.c
+++ b/drivers/regulator/bcm590xx-regulator.c
@@ -119,6 +119,10 @@ static const unsigned int ldo_c_table[] = {
119 2900000, 3000000, 3300000, 119 2900000, 3000000, 3300000,
120}; 120};
121 121
122static const unsigned int ldo_vbus[] = {
123 5000000,
124};
125
122/* DCDC group CSR: supported voltages in microvolts */ 126/* DCDC group CSR: supported voltages in microvolts */
123static const struct regulator_linear_range dcdc_csr_ranges[] = { 127static const struct regulator_linear_range dcdc_csr_ranges[] = {
124 REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000), 128 REGULATOR_LINEAR_RANGE(860000, 2, 50, 10000),
@@ -192,6 +196,7 @@ static struct bcm590xx_info bcm590xx_regs[] = {
192 BCM590XX_REG_TABLE(gpldo4, ldo_a_table), 196 BCM590XX_REG_TABLE(gpldo4, ldo_a_table),
193 BCM590XX_REG_TABLE(gpldo5, ldo_a_table), 197 BCM590XX_REG_TABLE(gpldo5, ldo_a_table),
194 BCM590XX_REG_TABLE(gpldo6, ldo_a_table), 198 BCM590XX_REG_TABLE(gpldo6, ldo_a_table),
199 BCM590XX_REG_TABLE(vbus, ldo_vbus),
195}; 200};
196 201
197struct bcm590xx_reg { 202struct bcm590xx_reg {
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index b982f0ff4e01..93b4ad842901 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -325,6 +325,10 @@ static int palmas_set_mode_smps(struct regulator_dev *dev, unsigned int mode)
325 if (rail_enable) 325 if (rail_enable)
326 palmas_smps_write(pmic->palmas, 326 palmas_smps_write(pmic->palmas,
327 palmas_regs_info[id].ctrl_addr, reg); 327 palmas_regs_info[id].ctrl_addr, reg);
328
329 /* Switch the enable value to ensure this is used for enable */
330 pmic->desc[id].enable_val = pmic->current_reg_mode[id];
331
328 return 0; 332 return 0;
329} 333}
330 334
@@ -964,6 +968,14 @@ static int palmas_regulators_probe(struct platform_device *pdev)
964 return ret; 968 return ret;
965 pmic->current_reg_mode[id] = reg & 969 pmic->current_reg_mode[id] = reg &
966 PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK; 970 PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
971
972 pmic->desc[id].enable_reg =
973 PALMAS_BASE_TO_REG(PALMAS_SMPS_BASE,
974 palmas_regs_info[id].ctrl_addr);
975 pmic->desc[id].enable_mask =
976 PALMAS_SMPS12_CTRL_MODE_ACTIVE_MASK;
977 /* set_mode overrides this value */
978 pmic->desc[id].enable_val = SMPS_CTRL_MODE_ON;
967 } 979 }
968 980
969 pmic->desc[id].type = REGULATOR_VOLTAGE; 981 pmic->desc[id].type = REGULATOR_VOLTAGE;
diff --git a/drivers/regulator/tps65218-regulator.c b/drivers/regulator/tps65218-regulator.c
index 69b4b7750410..9effe48c605e 100644
--- a/drivers/regulator/tps65218-regulator.c
+++ b/drivers/regulator/tps65218-regulator.c
@@ -209,7 +209,7 @@ static const struct regulator_desc regulators[] = {
209 1, -1, -1, TPS65218_REG_ENABLE1, 209 1, -1, -1, TPS65218_REG_ENABLE1,
210 TPS65218_ENABLE1_DC6_EN, NULL, NULL, 0, 0), 210 TPS65218_ENABLE1_DC6_EN, NULL, NULL, 0, 0),
211 TPS65218_REGULATOR("LDO1", TPS65218_LDO_1, tps65218_ldo1_dcdc34_ops, 64, 211 TPS65218_REGULATOR("LDO1", TPS65218_LDO_1, tps65218_ldo1_dcdc34_ops, 64,
212 TPS65218_REG_CONTROL_DCDC4, 212 TPS65218_REG_CONTROL_LDO1,
213 TPS65218_CONTROL_LDO1_MASK, TPS65218_REG_ENABLE2, 213 TPS65218_CONTROL_LDO1_MASK, TPS65218_REG_ENABLE2,
214 TPS65218_ENABLE2_LDO1_EN, NULL, ldo1_dcdc3_ranges, 214 TPS65218_ENABLE2_LDO1_EN, NULL, ldo1_dcdc3_ranges,
215 2, 0), 215 2, 0),
@@ -240,6 +240,7 @@ static int tps65218_regulator_probe(struct platform_device *pdev)
240 config.init_data = init_data; 240 config.init_data = init_data;
241 config.driver_data = tps; 241 config.driver_data = tps;
242 config.regmap = tps->regmap; 242 config.regmap = tps->regmap;
243 config.of_node = pdev->dev.of_node;
243 244
244 rdev = devm_regulator_register(&pdev->dev, &regulators[id], &config); 245 rdev = devm_regulator_register(&pdev->dev, &regulators[id], &config);
245 if (IS_ERR(rdev)) { 246 if (IS_ERR(rdev)) {
diff --git a/drivers/scsi/be2iscsi/be_main.c b/drivers/scsi/be2iscsi/be_main.c
index 554349029628..56467df3d6de 100644
--- a/drivers/scsi/be2iscsi/be_main.c
+++ b/drivers/scsi/be2iscsi/be_main.c
@@ -4198,6 +4198,8 @@ static int hba_setup_cid_tbls(struct beiscsi_hba *phba)
4198 kfree(phba->ep_array); 4198 kfree(phba->ep_array);
4199 phba->ep_array = NULL; 4199 phba->ep_array = NULL;
4200 ret = -ENOMEM; 4200 ret = -ENOMEM;
4201
4202 goto free_memory;
4201 } 4203 }
4202 4204
4203 for (i = 0; i < phba->params.cxns_per_ctrl; i++) { 4205 for (i = 0; i < phba->params.cxns_per_ctrl; i++) {
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 6045aa78986a..07934b0b9ee1 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -1008,10 +1008,8 @@ int mgmt_set_ip(struct beiscsi_hba *phba,
1008 BE2_IPV6 : BE2_IPV4 ; 1008 BE2_IPV6 : BE2_IPV4 ;
1009 1009
1010 rc = mgmt_get_if_info(phba, ip_type, &if_info); 1010 rc = mgmt_get_if_info(phba, ip_type, &if_info);
1011 if (rc) { 1011 if (rc)
1012 kfree(if_info);
1013 return rc; 1012 return rc;
1014 }
1015 1013
1016 if (boot_proto == ISCSI_BOOTPROTO_DHCP) { 1014 if (boot_proto == ISCSI_BOOTPROTO_DHCP) {
1017 if (if_info->dhcp_state) { 1015 if (if_info->dhcp_state) {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index f54843023466..785d0d71781e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -516,23 +516,17 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
516 skb_pull(skb, sizeof(struct fcoe_hdr)); 516 skb_pull(skb, sizeof(struct fcoe_hdr));
517 fr_len = skb->len - sizeof(struct fcoe_crc_eof); 517 fr_len = skb->len - sizeof(struct fcoe_crc_eof);
518 518
519 stats = per_cpu_ptr(lport->stats, get_cpu());
520 stats->RxFrames++;
521 stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
522
523 fp = (struct fc_frame *)skb; 519 fp = (struct fc_frame *)skb;
524 fc_frame_init(fp); 520 fc_frame_init(fp);
525 fr_dev(fp) = lport; 521 fr_dev(fp) = lport;
526 fr_sof(fp) = hp->fcoe_sof; 522 fr_sof(fp) = hp->fcoe_sof;
527 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) { 523 if (skb_copy_bits(skb, fr_len, &crc_eof, sizeof(crc_eof))) {
528 put_cpu();
529 kfree_skb(skb); 524 kfree_skb(skb);
530 return; 525 return;
531 } 526 }
532 fr_eof(fp) = crc_eof.fcoe_eof; 527 fr_eof(fp) = crc_eof.fcoe_eof;
533 fr_crc(fp) = crc_eof.fcoe_crc32; 528 fr_crc(fp) = crc_eof.fcoe_crc32;
534 if (pskb_trim(skb, fr_len)) { 529 if (pskb_trim(skb, fr_len)) {
535 put_cpu();
536 kfree_skb(skb); 530 kfree_skb(skb);
537 return; 531 return;
538 } 532 }
@@ -544,7 +538,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
544 port = lport_priv(vn_port); 538 port = lport_priv(vn_port);
545 if (!ether_addr_equal(port->data_src_addr, dest_mac)) { 539 if (!ether_addr_equal(port->data_src_addr, dest_mac)) {
546 BNX2FC_HBA_DBG(lport, "fpma mismatch\n"); 540 BNX2FC_HBA_DBG(lport, "fpma mismatch\n");
547 put_cpu();
548 kfree_skb(skb); 541 kfree_skb(skb);
549 return; 542 return;
550 } 543 }
@@ -552,7 +545,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
552 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA && 545 if (fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA &&
553 fh->fh_type == FC_TYPE_FCP) { 546 fh->fh_type == FC_TYPE_FCP) {
554 /* Drop FCP data. We dont this in L2 path */ 547 /* Drop FCP data. We dont this in L2 path */
555 put_cpu();
556 kfree_skb(skb); 548 kfree_skb(skb);
557 return; 549 return;
558 } 550 }
@@ -562,7 +554,6 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
562 case ELS_LOGO: 554 case ELS_LOGO:
563 if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { 555 if (ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
564 /* drop non-FIP LOGO */ 556 /* drop non-FIP LOGO */
565 put_cpu();
566 kfree_skb(skb); 557 kfree_skb(skb);
567 return; 558 return;
568 } 559 }
@@ -572,22 +563,23 @@ static void bnx2fc_recv_frame(struct sk_buff *skb)
572 563
573 if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) { 564 if (fh->fh_r_ctl == FC_RCTL_BA_ABTS) {
574 /* Drop incoming ABTS */ 565 /* Drop incoming ABTS */
575 put_cpu();
576 kfree_skb(skb); 566 kfree_skb(skb);
577 return; 567 return;
578 } 568 }
579 569
570 stats = per_cpu_ptr(lport->stats, smp_processor_id());
571 stats->RxFrames++;
572 stats->RxWords += fr_len / FCOE_WORD_TO_BYTE;
573
580 if (le32_to_cpu(fr_crc(fp)) != 574 if (le32_to_cpu(fr_crc(fp)) !=
581 ~crc32(~0, skb->data, fr_len)) { 575 ~crc32(~0, skb->data, fr_len)) {
582 if (stats->InvalidCRCCount < 5) 576 if (stats->InvalidCRCCount < 5)
583 printk(KERN_WARNING PFX "dropping frame with " 577 printk(KERN_WARNING PFX "dropping frame with "
584 "CRC error\n"); 578 "CRC error\n");
585 stats->InvalidCRCCount++; 579 stats->InvalidCRCCount++;
586 put_cpu();
587 kfree_skb(skb); 580 kfree_skb(skb);
588 return; 581 return;
589 } 582 }
590 put_cpu();
591 fc_exch_recv(lport, fp); 583 fc_exch_recv(lport, fp);
592} 584}
593 585
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 32a5e0a2a669..7bc47fc7c686 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -282,6 +282,8 @@ struct bnx2fc_cmd_mgr *bnx2fc_cmd_mgr_alloc(struct bnx2fc_hba *hba)
282 arr_sz, GFP_KERNEL); 282 arr_sz, GFP_KERNEL);
283 if (!cmgr->free_list_lock) { 283 if (!cmgr->free_list_lock) {
284 printk(KERN_ERR PFX "failed to alloc free_list_lock\n"); 284 printk(KERN_ERR PFX "failed to alloc free_list_lock\n");
285 kfree(cmgr->free_list);
286 cmgr->free_list = NULL;
285 goto mem_err; 287 goto mem_err;
286 } 288 }
287 289
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index 2ebfb2bb0f42..7b23f21f22f1 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -185,6 +185,11 @@ static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue)
185 if (crq->valid & 0x80) { 185 if (crq->valid & 0x80) {
186 if (++queue->cur == queue->size) 186 if (++queue->cur == queue->size)
187 queue->cur = 0; 187 queue->cur = 0;
188
189 /* Ensure the read of the valid bit occurs before reading any
190 * other bits of the CRQ entry
191 */
192 rmb();
188 } else 193 } else
189 crq = NULL; 194 crq = NULL;
190 spin_unlock_irqrestore(&queue->lock, flags); 195 spin_unlock_irqrestore(&queue->lock, flags);
@@ -203,6 +208,11 @@ static int ibmvscsi_send_crq(struct ibmvscsi_host_data *hostdata,
203{ 208{
204 struct vio_dev *vdev = to_vio_dev(hostdata->dev); 209 struct vio_dev *vdev = to_vio_dev(hostdata->dev);
205 210
211 /*
212 * Ensure the command buffer is flushed to memory before handing it
213 * over to the VIOS to prevent it from fetching any stale data.
214 */
215 mb();
206 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2); 216 return plpar_hcall_norets(H_SEND_CRQ, vdev->unit_address, word1, word2);
207} 217}
208 218
@@ -797,7 +807,8 @@ static void purge_requests(struct ibmvscsi_host_data *hostdata, int error_code)
797 evt->hostdata->dev); 807 evt->hostdata->dev);
798 if (evt->cmnd_done) 808 if (evt->cmnd_done)
799 evt->cmnd_done(evt->cmnd); 809 evt->cmnd_done(evt->cmnd);
800 } else if (evt->done) 810 } else if (evt->done && evt->crq.format != VIOSRP_MAD_FORMAT &&
811 evt->iu.srp.login_req.opcode != SRP_LOGIN_REQ)
801 evt->done(evt); 812 evt->done(evt);
802 free_event_struct(&evt->hostdata->pool, evt); 813 free_event_struct(&evt->hostdata->pool, evt);
803 spin_lock_irqsave(hostdata->host->host_lock, flags); 814 spin_lock_irqsave(hostdata->host->host_lock, flags);
diff --git a/drivers/scsi/pm8001/pm8001_init.c b/drivers/scsi/pm8001/pm8001_init.c
index c4f31b21feb8..e90c89f1d480 100644
--- a/drivers/scsi/pm8001/pm8001_init.c
+++ b/drivers/scsi/pm8001/pm8001_init.c
@@ -677,7 +677,7 @@ static void pm8001_init_sas_add(struct pm8001_hba_info *pm8001_ha)
677 * pm8001_get_phy_settings_info : Read phy setting values. 677 * pm8001_get_phy_settings_info : Read phy setting values.
678 * @pm8001_ha : our hba. 678 * @pm8001_ha : our hba.
679 */ 679 */
680void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha) 680static int pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
681{ 681{
682 682
683#ifdef PM8001_READ_VPD 683#ifdef PM8001_READ_VPD
@@ -691,11 +691,15 @@ void pm8001_get_phy_settings_info(struct pm8001_hba_info *pm8001_ha)
691 payload.offset = 0; 691 payload.offset = 0;
692 payload.length = 4096; 692 payload.length = 4096;
693 payload.func_specific = kzalloc(4096, GFP_KERNEL); 693 payload.func_specific = kzalloc(4096, GFP_KERNEL);
694 if (!payload.func_specific)
695 return -ENOMEM;
694 /* Read phy setting values from flash */ 696 /* Read phy setting values from flash */
695 PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload); 697 PM8001_CHIP_DISP->get_nvmd_req(pm8001_ha, &payload);
696 wait_for_completion(&completion); 698 wait_for_completion(&completion);
697 pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific); 699 pm8001_set_phy_profile(pm8001_ha, sizeof(u8), payload.func_specific);
700 kfree(payload.func_specific);
698#endif 701#endif
702 return 0;
699} 703}
700 704
701#ifdef PM8001_USE_MSIX 705#ifdef PM8001_USE_MSIX
@@ -879,8 +883,11 @@ static int pm8001_pci_probe(struct pci_dev *pdev,
879 pm8001_init_sas_add(pm8001_ha); 883 pm8001_init_sas_add(pm8001_ha);
880 /* phy setting support for motherboard controller */ 884 /* phy setting support for motherboard controller */
881 if (pdev->subsystem_vendor != PCI_VENDOR_ID_ADAPTEC2 && 885 if (pdev->subsystem_vendor != PCI_VENDOR_ID_ADAPTEC2 &&
882 pdev->subsystem_vendor != 0) 886 pdev->subsystem_vendor != 0) {
883 pm8001_get_phy_settings_info(pm8001_ha); 887 rc = pm8001_get_phy_settings_info(pm8001_ha);
888 if (rc)
889 goto err_out_shost;
890 }
884 pm8001_post_sas_ha_init(shost, chip); 891 pm8001_post_sas_ha_init(shost, chip);
885 rc = sas_register_ha(SHOST_TO_SAS_HA(shost)); 892 rc = sas_register_ha(SHOST_TO_SAS_HA(shost));
886 if (rc) 893 if (rc)
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 4b188b0164e9..e632e14180cf 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -1128,7 +1128,7 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1128 ctio->u.status1.flags = 1128 ctio->u.status1.flags =
1129 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1129 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1130 CTIO7_FLAGS_TERMINATE); 1130 CTIO7_FLAGS_TERMINATE);
1131 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id; 1131 ctio->u.status1.ox_id = cpu_to_le16(entry->fcp_hdr_le.ox_id);
1132 1132
1133 qla2x00_start_iocbs(vha, vha->req); 1133 qla2x00_start_iocbs(vha, vha->req);
1134 1134
@@ -1262,6 +1262,7 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
1262{ 1262{
1263 struct atio_from_isp *atio = &mcmd->orig_iocb.atio; 1263 struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
1264 struct ctio7_to_24xx *ctio; 1264 struct ctio7_to_24xx *ctio;
1265 uint16_t temp;
1265 1266
1266 ql_dbg(ql_dbg_tgt, ha, 0xe008, 1267 ql_dbg(ql_dbg_tgt, ha, 0xe008,
1267 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n", 1268 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
@@ -1292,7 +1293,8 @@ static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
1292 ctio->u.status1.flags = (atio->u.isp24.attr << 9) | 1293 ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
1293 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 1294 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1294 CTIO7_FLAGS_SEND_STATUS); 1295 CTIO7_FLAGS_SEND_STATUS);
1295 ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 1296 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
1297 ctio->u.status1.ox_id = cpu_to_le16(temp);
1296 ctio->u.status1.scsi_status = 1298 ctio->u.status1.scsi_status =
1297 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID); 1299 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
1298 ctio->u.status1.response_len = __constant_cpu_to_le16(8); 1300 ctio->u.status1.response_len = __constant_cpu_to_le16(8);
@@ -1513,6 +1515,7 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
1513 struct ctio7_to_24xx *pkt; 1515 struct ctio7_to_24xx *pkt;
1514 struct qla_hw_data *ha = vha->hw; 1516 struct qla_hw_data *ha = vha->hw;
1515 struct atio_from_isp *atio = &prm->cmd->atio; 1517 struct atio_from_isp *atio = &prm->cmd->atio;
1518 uint16_t temp;
1516 1519
1517 pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr; 1520 pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
1518 prm->pkt = pkt; 1521 prm->pkt = pkt;
@@ -1541,13 +1544,13 @@ static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
1541 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0]; 1544 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1542 pkt->exchange_addr = atio->u.isp24.exchange_addr; 1545 pkt->exchange_addr = atio->u.isp24.exchange_addr;
1543 pkt->u.status0.flags |= (atio->u.isp24.attr << 9); 1546 pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
1544 pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 1547 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
1548 pkt->u.status0.ox_id = cpu_to_le16(temp);
1545 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset); 1549 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
1546 1550
1547 ql_dbg(ql_dbg_tgt, vha, 0xe00c, 1551 ql_dbg(ql_dbg_tgt, vha, 0xe00c,
1548 "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n", 1552 "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
1549 vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, 1553 vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT, temp);
1550 le16_to_cpu(pkt->u.status0.ox_id));
1551 return 0; 1554 return 0;
1552} 1555}
1553 1556
@@ -2619,6 +2622,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2619 struct qla_hw_data *ha = vha->hw; 2622 struct qla_hw_data *ha = vha->hw;
2620 request_t *pkt; 2623 request_t *pkt;
2621 int ret = 0; 2624 int ret = 0;
2625 uint16_t temp;
2622 2626
2623 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha); 2627 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
2624 2628
@@ -2655,7 +2659,8 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2655 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) | 2659 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
2656 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 | 2660 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
2657 CTIO7_FLAGS_TERMINATE); 2661 CTIO7_FLAGS_TERMINATE);
2658 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id); 2662 temp = be16_to_cpu(atio->u.isp24.fcp_hdr.ox_id);
2663 ctio24->u.status1.ox_id = cpu_to_le16(temp);
2659 2664
2660 /* Most likely, it isn't needed */ 2665 /* Most likely, it isn't needed */
2661 ctio24->u.status1.residual = get_unaligned((uint32_t *) 2666 ctio24->u.status1.residual = get_unaligned((uint32_t *)
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index e0a58fd13f66..d1d24fb0160a 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -443,7 +443,7 @@ struct ctio7_to_24xx {
443 uint16_t reserved1; 443 uint16_t reserved1;
444 __le16 flags; 444 __le16 flags;
445 uint32_t residual; 445 uint32_t residual;
446 uint16_t ox_id; 446 __le16 ox_id;
447 uint16_t scsi_status; 447 uint16_t scsi_status;
448 uint32_t relative_offset; 448 uint32_t relative_offset;
449 uint32_t reserved2; 449 uint32_t reserved2;
@@ -458,7 +458,7 @@ struct ctio7_to_24xx {
458 uint16_t sense_length; 458 uint16_t sense_length;
459 uint16_t flags; 459 uint16_t flags;
460 uint32_t residual; 460 uint32_t residual;
461 uint16_t ox_id; 461 __le16 ox_id;
462 uint16_t scsi_status; 462 uint16_t scsi_status;
463 uint16_t response_len; 463 uint16_t response_len;
464 uint16_t reserved; 464 uint16_t reserved;
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index cbe38e5e7955..7e957918f33f 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -131,7 +131,7 @@ scmd_eh_abort_handler(struct work_struct *work)
131 "aborting command %p\n", scmd)); 131 "aborting command %p\n", scmd));
132 rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd); 132 rtn = scsi_try_to_abort_cmd(sdev->host->hostt, scmd);
133 if (rtn == SUCCESS) { 133 if (rtn == SUCCESS) {
134 scmd->result |= DID_TIME_OUT << 16; 134 set_host_byte(scmd, DID_TIME_OUT);
135 if (scsi_host_eh_past_deadline(sdev->host)) { 135 if (scsi_host_eh_past_deadline(sdev->host)) {
136 SCSI_LOG_ERROR_RECOVERY(3, 136 SCSI_LOG_ERROR_RECOVERY(3,
137 scmd_printk(KERN_INFO, scmd, 137 scmd_printk(KERN_INFO, scmd,
@@ -167,7 +167,7 @@ scmd_eh_abort_handler(struct work_struct *work)
167 scmd_printk(KERN_WARNING, scmd, 167 scmd_printk(KERN_WARNING, scmd,
168 "scmd %p terminate " 168 "scmd %p terminate "
169 "aborted command\n", scmd)); 169 "aborted command\n", scmd));
170 scmd->result |= DID_TIME_OUT << 16; 170 set_host_byte(scmd, DID_TIME_OUT);
171 scsi_finish_command(scmd); 171 scsi_finish_command(scmd);
172 } 172 }
173} 173}
@@ -287,15 +287,15 @@ enum blk_eh_timer_return scsi_times_out(struct request *req)
287 else if (host->hostt->eh_timed_out) 287 else if (host->hostt->eh_timed_out)
288 rtn = host->hostt->eh_timed_out(scmd); 288 rtn = host->hostt->eh_timed_out(scmd);
289 289
290 if (rtn == BLK_EH_NOT_HANDLED && !host->hostt->no_async_abort) 290 if (rtn == BLK_EH_NOT_HANDLED) {
291 if (scsi_abort_command(scmd) == SUCCESS) 291 if (!host->hostt->no_async_abort &&
292 scsi_abort_command(scmd) == SUCCESS)
292 return BLK_EH_NOT_HANDLED; 293 return BLK_EH_NOT_HANDLED;
293 294
294 scmd->result |= DID_TIME_OUT << 16; 295 set_host_byte(scmd, DID_TIME_OUT);
295 296 if (!scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))
296 if (unlikely(rtn == BLK_EH_NOT_HANDLED && 297 rtn = BLK_EH_HANDLED;
297 !scsi_eh_scmd_add(scmd, SCSI_EH_CANCEL_CMD))) 298 }
298 rtn = BLK_EH_HANDLED;
299 299
300 return rtn; 300 return rtn;
301} 301}
@@ -1777,7 +1777,7 @@ int scsi_decide_disposition(struct scsi_cmnd *scmd)
1777 break; 1777 break;
1778 case DID_ABORT: 1778 case DID_ABORT:
1779 if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) { 1779 if (scmd->eh_eflags & SCSI_EH_ABORT_SCHEDULED) {
1780 scmd->result |= DID_TIME_OUT << 16; 1780 set_host_byte(scmd, DID_TIME_OUT);
1781 return SUCCESS; 1781 return SUCCESS;
1782 } 1782 }
1783 case DID_NO_CONNECT: 1783 case DID_NO_CONNECT:
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index f80908f74ca9..521f5838594b 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -2549,6 +2549,7 @@ fc_rport_final_delete(struct work_struct *work)
2549 fc_flush_devloss(shost); 2549 fc_flush_devloss(shost);
2550 if (!cancel_delayed_work(&rport->dev_loss_work)) 2550 if (!cancel_delayed_work(&rport->dev_loss_work))
2551 fc_flush_devloss(shost); 2551 fc_flush_devloss(shost);
2552 cancel_work_sync(&rport->scan_work);
2552 spin_lock_irqsave(shost->host_lock, flags); 2553 spin_lock_irqsave(shost->host_lock, flags);
2553 rport->flags &= ~FC_RPORT_DEVLOSS_PENDING; 2554 rport->flags &= ~FC_RPORT_DEVLOSS_PENDING;
2554 } 2555 }
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index e9689d57ccb6..6825eda1114a 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2441,7 +2441,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer)
2441 } 2441 }
2442 2442
2443 sdkp->DPOFUA = (data.device_specific & 0x10) != 0; 2443 sdkp->DPOFUA = (data.device_specific & 0x10) != 0;
2444 if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) { 2444 if (sdp->broken_fua) {
2445 sd_first_printk(KERN_NOTICE, sdkp, "Disabling FUA\n");
2446 sdkp->DPOFUA = 0;
2447 } else if (sdkp->DPOFUA && !sdkp->device->use_10_for_rw) {
2445 sd_first_printk(KERN_NOTICE, sdkp, 2448 sd_first_printk(KERN_NOTICE, sdkp,
2446 "Uses READ/WRITE(6), disabling FUA\n"); 2449 "Uses READ/WRITE(6), disabling FUA\n");
2447 sdkp->DPOFUA = 0; 2450 sdkp->DPOFUA = 0;
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 89ee5929eb6d..308256b5e4cb 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -237,6 +237,16 @@ static void virtscsi_req_done(struct virtqueue *vq)
237 virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd); 237 virtscsi_vq_done(vscsi, req_vq, virtscsi_complete_cmd);
238}; 238};
239 239
240static void virtscsi_poll_requests(struct virtio_scsi *vscsi)
241{
242 int i, num_vqs;
243
244 num_vqs = vscsi->num_queues;
245 for (i = 0; i < num_vqs; i++)
246 virtscsi_vq_done(vscsi, &vscsi->req_vqs[i],
247 virtscsi_complete_cmd);
248}
249
240static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf) 250static void virtscsi_complete_free(struct virtio_scsi *vscsi, void *buf)
241{ 251{
242 struct virtio_scsi_cmd *cmd = buf; 252 struct virtio_scsi_cmd *cmd = buf;
@@ -253,6 +263,8 @@ static void virtscsi_ctrl_done(struct virtqueue *vq)
253 virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free); 263 virtscsi_vq_done(vscsi, &vscsi->ctrl_vq, virtscsi_complete_free);
254}; 264};
255 265
266static void virtscsi_handle_event(struct work_struct *work);
267
256static int virtscsi_kick_event(struct virtio_scsi *vscsi, 268static int virtscsi_kick_event(struct virtio_scsi *vscsi,
257 struct virtio_scsi_event_node *event_node) 269 struct virtio_scsi_event_node *event_node)
258{ 270{
@@ -260,6 +272,7 @@ static int virtscsi_kick_event(struct virtio_scsi *vscsi,
260 struct scatterlist sg; 272 struct scatterlist sg;
261 unsigned long flags; 273 unsigned long flags;
262 274
275 INIT_WORK(&event_node->work, virtscsi_handle_event);
263 sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event)); 276 sg_init_one(&sg, &event_node->event, sizeof(struct virtio_scsi_event));
264 277
265 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags); 278 spin_lock_irqsave(&vscsi->event_vq.vq_lock, flags);
@@ -377,7 +390,6 @@ static void virtscsi_complete_event(struct virtio_scsi *vscsi, void *buf)
377{ 390{
378 struct virtio_scsi_event_node *event_node = buf; 391 struct virtio_scsi_event_node *event_node = buf;
379 392
380 INIT_WORK(&event_node->work, virtscsi_handle_event);
381 schedule_work(&event_node->work); 393 schedule_work(&event_node->work);
382} 394}
383 395
@@ -589,6 +601,18 @@ static int virtscsi_tmf(struct virtio_scsi *vscsi, struct virtio_scsi_cmd *cmd)
589 cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED) 601 cmd->resp.tmf.response == VIRTIO_SCSI_S_FUNCTION_SUCCEEDED)
590 ret = SUCCESS; 602 ret = SUCCESS;
591 603
604 /*
605 * The spec guarantees that all requests related to the TMF have
606 * been completed, but the callback might not have run yet if
607 * we're using independent interrupts (e.g. MSI). Poll the
608 * virtqueues once.
609 *
610 * In the abort case, sc->scsi_done will do nothing, because
611 * the block layer must have detected a timeout and as a result
612 * REQ_ATOM_COMPLETE has been set.
613 */
614 virtscsi_poll_requests(vscsi);
615
592out: 616out:
593 mempool_free(cmd, virtscsi_cmd_pool); 617 mempool_free(cmd, virtscsi_cmd_pool);
594 return ret; 618 return ret;
diff --git a/drivers/spi/spi-pxa2xx.c b/drivers/spi/spi-pxa2xx.c
index a98df7eeb42d..fe792106bdc5 100644
--- a/drivers/spi/spi-pxa2xx.c
+++ b/drivers/spi/spi-pxa2xx.c
@@ -118,6 +118,7 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
118 */ 118 */
119 orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL); 119 orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
120 120
121 /* Test SPI_CS_CONTROL_SW_MODE bit enabling */
121 value = orig | SPI_CS_CONTROL_SW_MODE; 122 value = orig | SPI_CS_CONTROL_SW_MODE;
122 writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL); 123 writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
123 value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL); 124 value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
@@ -126,10 +127,13 @@ static void lpss_ssp_setup(struct driver_data *drv_data)
126 goto detection_done; 127 goto detection_done;
127 } 128 }
128 129
129 value &= ~SPI_CS_CONTROL_SW_MODE; 130 orig = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
131
132 /* Test SPI_CS_CONTROL_SW_MODE bit disabling */
133 value = orig & ~SPI_CS_CONTROL_SW_MODE;
130 writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL); 134 writel(value, drv_data->ioaddr + offset + SPI_CS_CONTROL);
131 value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL); 135 value = readl(drv_data->ioaddr + offset + SPI_CS_CONTROL);
132 if (value != orig) { 136 if (value != (orig & ~SPI_CS_CONTROL_SW_MODE)) {
133 offset = 0x800; 137 offset = 0x800;
134 goto detection_done; 138 goto detection_done;
135 } 139 }
diff --git a/drivers/spi/spi-qup.c b/drivers/spi/spi-qup.c
index fc1de86d3c8a..c08da380cb23 100644
--- a/drivers/spi/spi-qup.c
+++ b/drivers/spi/spi-qup.c
@@ -424,31 +424,6 @@ static int spi_qup_io_config(struct spi_device *spi, struct spi_transfer *xfer)
424 return 0; 424 return 0;
425} 425}
426 426
427static void spi_qup_set_cs(struct spi_device *spi, bool enable)
428{
429 struct spi_qup *controller = spi_master_get_devdata(spi->master);
430
431 u32 iocontol, mask;
432
433 iocontol = readl_relaxed(controller->base + SPI_IO_CONTROL);
434
435 /* Disable auto CS toggle and use manual */
436 iocontol &= ~SPI_IO_C_MX_CS_MODE;
437 iocontol |= SPI_IO_C_FORCE_CS;
438
439 iocontol &= ~SPI_IO_C_CS_SELECT_MASK;
440 iocontol |= SPI_IO_C_CS_SELECT(spi->chip_select);
441
442 mask = SPI_IO_C_CS_N_POLARITY_0 << spi->chip_select;
443
444 if (enable)
445 iocontol |= mask;
446 else
447 iocontol &= ~mask;
448
449 writel_relaxed(iocontol, controller->base + SPI_IO_CONTROL);
450}
451
452static int spi_qup_transfer_one(struct spi_master *master, 427static int spi_qup_transfer_one(struct spi_master *master,
453 struct spi_device *spi, 428 struct spi_device *spi,
454 struct spi_transfer *xfer) 429 struct spi_transfer *xfer)
@@ -571,12 +546,16 @@ static int spi_qup_probe(struct platform_device *pdev)
571 return -ENOMEM; 546 return -ENOMEM;
572 } 547 }
573 548
549 /* use num-cs unless not present or out of range */
550 if (of_property_read_u16(dev->of_node, "num-cs",
551 &master->num_chipselect) ||
552 (master->num_chipselect > SPI_NUM_CHIPSELECTS))
553 master->num_chipselect = SPI_NUM_CHIPSELECTS;
554
574 master->bus_num = pdev->id; 555 master->bus_num = pdev->id;
575 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; 556 master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP;
576 master->num_chipselect = SPI_NUM_CHIPSELECTS;
577 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32); 557 master->bits_per_word_mask = SPI_BPW_RANGE_MASK(4, 32);
578 master->max_speed_hz = max_freq; 558 master->max_speed_hz = max_freq;
579 master->set_cs = spi_qup_set_cs;
580 master->transfer_one = spi_qup_transfer_one; 559 master->transfer_one = spi_qup_transfer_one;
581 master->dev.of_node = pdev->dev.of_node; 560 master->dev.of_node = pdev->dev.of_node;
582 master->auto_runtime_pm = true; 561 master->auto_runtime_pm = true;
@@ -640,16 +619,19 @@ static int spi_qup_probe(struct platform_device *pdev)
640 if (ret) 619 if (ret)
641 goto error; 620 goto error;
642 621
643 ret = devm_spi_register_master(dev, master);
644 if (ret)
645 goto error;
646
647 pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC); 622 pm_runtime_set_autosuspend_delay(dev, MSEC_PER_SEC);
648 pm_runtime_use_autosuspend(dev); 623 pm_runtime_use_autosuspend(dev);
649 pm_runtime_set_active(dev); 624 pm_runtime_set_active(dev);
650 pm_runtime_enable(dev); 625 pm_runtime_enable(dev);
626
627 ret = devm_spi_register_master(dev, master);
628 if (ret)
629 goto disable_pm;
630
651 return 0; 631 return 0;
652 632
633disable_pm:
634 pm_runtime_disable(&pdev->dev);
653error: 635error:
654 clk_disable_unprepare(cclk); 636 clk_disable_unprepare(cclk);
655 clk_disable_unprepare(iclk); 637 clk_disable_unprepare(iclk);
diff --git a/drivers/spi/spi-sh-sci.c b/drivers/spi/spi-sh-sci.c
index 1f56ef651d1a..b83dd733684c 100644
--- a/drivers/spi/spi-sh-sci.c
+++ b/drivers/spi/spi-sh-sci.c
@@ -175,9 +175,9 @@ static int sh_sci_spi_remove(struct platform_device *dev)
175{ 175{
176 struct sh_sci_spi *sp = platform_get_drvdata(dev); 176 struct sh_sci_spi *sp = platform_get_drvdata(dev);
177 177
178 iounmap(sp->membase);
179 setbits(sp, PIN_INIT, 0);
180 spi_bitbang_stop(&sp->bitbang); 178 spi_bitbang_stop(&sp->bitbang);
179 setbits(sp, PIN_INIT, 0);
180 iounmap(sp->membase);
181 spi_master_put(sp->bitbang.master); 181 spi_master_put(sp->bitbang.master);
182 return 0; 182 return 0;
183} 183}
diff --git a/drivers/staging/iio/adc/ad7291.c b/drivers/staging/iio/adc/ad7291.c
index 357cef2a6f4c..7194bd138762 100644
--- a/drivers/staging/iio/adc/ad7291.c
+++ b/drivers/staging/iio/adc/ad7291.c
@@ -465,7 +465,7 @@ static int ad7291_probe(struct i2c_client *client,
465 struct ad7291_platform_data *pdata = client->dev.platform_data; 465 struct ad7291_platform_data *pdata = client->dev.platform_data;
466 struct ad7291_chip_info *chip; 466 struct ad7291_chip_info *chip;
467 struct iio_dev *indio_dev; 467 struct iio_dev *indio_dev;
468 int ret = 0; 468 int ret;
469 469
470 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip)); 470 indio_dev = devm_iio_device_alloc(&client->dev, sizeof(*chip));
471 if (!indio_dev) 471 if (!indio_dev)
@@ -475,7 +475,7 @@ static int ad7291_probe(struct i2c_client *client,
475 if (pdata && pdata->use_external_ref) { 475 if (pdata && pdata->use_external_ref) {
476 chip->reg = devm_regulator_get(&client->dev, "vref"); 476 chip->reg = devm_regulator_get(&client->dev, "vref");
477 if (IS_ERR(chip->reg)) 477 if (IS_ERR(chip->reg))
478 return ret; 478 return PTR_ERR(chip->reg);
479 479
480 ret = regulator_enable(chip->reg); 480 ret = regulator_enable(chip->reg);
481 if (ret) 481 if (ret)
diff --git a/drivers/staging/tidspbridge/core/tiomap3430.c b/drivers/staging/tidspbridge/core/tiomap3430.c
index 8945b4e3a2a6..cb50120ed7b5 100644
--- a/drivers/staging/tidspbridge/core/tiomap3430.c
+++ b/drivers/staging/tidspbridge/core/tiomap3430.c
@@ -280,8 +280,10 @@ static int bridge_brd_monitor(struct bridge_dev_context *dev_ctxt)
280 OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); 280 OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
281 281
282 /* Wait until the state has moved to ON */ 282 /* Wait until the state has moved to ON */
283 while (*pdata->dsp_prm_read(OMAP3430_IVA2_MOD, OMAP2_PM_PWSTST)& 283 while ((*pdata->dsp_prm_read)(OMAP3430_IVA2_MOD,
284 OMAP_INTRANSITION_MASK); 284 OMAP2_PM_PWSTST) &
285 OMAP_INTRANSITION_MASK)
286 ;
285 /* Disable Automatic transition */ 287 /* Disable Automatic transition */
286 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO, 288 (*pdata->dsp_cm_write)(OMAP34XX_CLKSTCTRL_DISABLE_AUTO,
287 OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL); 289 OMAP3430_IVA2_MOD, OMAP2_CM_CLKSTCTRL);
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 5663f4d19d02..1f4c794f5fcc 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1309,7 +1309,7 @@ iscsit_check_dataout_hdr(struct iscsi_conn *conn, unsigned char *buf,
1309 if (cmd->data_direction != DMA_TO_DEVICE) { 1309 if (cmd->data_direction != DMA_TO_DEVICE) {
1310 pr_err("Command ITT: 0x%08x received DataOUT for a" 1310 pr_err("Command ITT: 0x%08x received DataOUT for a"
1311 " NON-WRITE command.\n", cmd->init_task_tag); 1311 " NON-WRITE command.\n", cmd->init_task_tag);
1312 return iscsit_reject_cmd(cmd, ISCSI_REASON_PROTOCOL_ERROR, buf); 1312 return iscsit_dump_data_payload(conn, payload_length, 1);
1313 } 1313 }
1314 se_cmd = &cmd->se_cmd; 1314 se_cmd = &cmd->se_cmd;
1315 iscsit_mod_dataout_timer(cmd); 1315 iscsit_mod_dataout_timer(cmd);
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 19b842c3e0b3..ab4915c0d933 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -174,7 +174,6 @@ static int chap_server_compute_md5(
174 char *nr_out_ptr, 174 char *nr_out_ptr,
175 unsigned int *nr_out_len) 175 unsigned int *nr_out_len)
176{ 176{
177 char *endptr;
178 unsigned long id; 177 unsigned long id;
179 unsigned char id_as_uchar; 178 unsigned char id_as_uchar;
180 unsigned char digest[MD5_SIGNATURE_SIZE]; 179 unsigned char digest[MD5_SIGNATURE_SIZE];
@@ -320,9 +319,14 @@ static int chap_server_compute_md5(
320 } 319 }
321 320
322 if (type == HEX) 321 if (type == HEX)
323 id = simple_strtoul(&identifier[2], &endptr, 0); 322 ret = kstrtoul(&identifier[2], 0, &id);
324 else 323 else
325 id = simple_strtoul(identifier, &endptr, 0); 324 ret = kstrtoul(identifier, 0, &id);
325
326 if (ret < 0) {
327 pr_err("kstrtoul() failed for CHAP identifier: %d\n", ret);
328 goto out;
329 }
326 if (id > 255) { 330 if (id > 255) {
327 pr_err("chap identifier: %lu greater than 255\n", id); 331 pr_err("chap identifier: %lu greater than 255\n", id);
328 goto out; 332 goto out;
@@ -351,6 +355,10 @@ static int chap_server_compute_md5(
351 pr_err("Unable to convert incoming challenge\n"); 355 pr_err("Unable to convert incoming challenge\n");
352 goto out; 356 goto out;
353 } 357 }
358 if (challenge_len > 1024) {
359 pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
360 goto out;
361 }
354 /* 362 /*
355 * During mutual authentication, the CHAP_C generated by the 363 * During mutual authentication, the CHAP_C generated by the
356 * initiator must not match the original CHAP_C generated by 364 * initiator must not match the original CHAP_C generated by
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index fecb69535a15..5e71ac609418 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -1216,7 +1216,7 @@ old_sess_out:
1216static int __iscsi_target_login_thread(struct iscsi_np *np) 1216static int __iscsi_target_login_thread(struct iscsi_np *np)
1217{ 1217{
1218 u8 *buffer, zero_tsih = 0; 1218 u8 *buffer, zero_tsih = 0;
1219 int ret = 0, rc, stop; 1219 int ret = 0, rc;
1220 struct iscsi_conn *conn = NULL; 1220 struct iscsi_conn *conn = NULL;
1221 struct iscsi_login *login; 1221 struct iscsi_login *login;
1222 struct iscsi_portal_group *tpg = NULL; 1222 struct iscsi_portal_group *tpg = NULL;
@@ -1230,6 +1230,9 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1230 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) { 1230 if (np->np_thread_state == ISCSI_NP_THREAD_RESET) {
1231 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; 1231 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
1232 complete(&np->np_restart_comp); 1232 complete(&np->np_restart_comp);
1233 } else if (np->np_thread_state == ISCSI_NP_THREAD_SHUTDOWN) {
1234 spin_unlock_bh(&np->np_thread_lock);
1235 goto exit;
1233 } else { 1236 } else {
1234 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE; 1237 np->np_thread_state = ISCSI_NP_THREAD_ACTIVE;
1235 } 1238 }
@@ -1422,10 +1425,8 @@ old_sess_out:
1422 } 1425 }
1423 1426
1424out: 1427out:
1425 stop = kthread_should_stop(); 1428 return 1;
1426 /* Wait for another socket.. */ 1429
1427 if (!stop)
1428 return 1;
1429exit: 1430exit:
1430 iscsi_stop_login_thread_timer(np); 1431 iscsi_stop_login_thread_timer(np);
1431 spin_lock_bh(&np->np_thread_lock); 1432 spin_lock_bh(&np->np_thread_lock);
@@ -1442,7 +1443,7 @@ int iscsi_target_login_thread(void *arg)
1442 1443
1443 allow_signal(SIGINT); 1444 allow_signal(SIGINT);
1444 1445
1445 while (!kthread_should_stop()) { 1446 while (1) {
1446 ret = __iscsi_target_login_thread(np); 1447 ret = __iscsi_target_login_thread(np);
1447 /* 1448 /*
1448 * We break and exit here unless another sock_accept() call 1449 * We break and exit here unless another sock_accept() call
diff --git a/drivers/target/iscsi/iscsi_target_util.c b/drivers/target/iscsi/iscsi_target_util.c
index 53e157cb8c54..fd90b28f1d94 100644
--- a/drivers/target/iscsi/iscsi_target_util.c
+++ b/drivers/target/iscsi/iscsi_target_util.c
@@ -1295,6 +1295,8 @@ int iscsit_tx_login_rsp(struct iscsi_conn *conn, u8 status_class, u8 status_deta
1295 login->login_failed = 1; 1295 login->login_failed = 1;
1296 iscsit_collect_login_stats(conn, status_class, status_detail); 1296 iscsit_collect_login_stats(conn, status_class, status_detail);
1297 1297
1298 memset(&login->rsp[0], 0, ISCSI_HDR_LEN);
1299
1298 hdr = (struct iscsi_login_rsp *)&login->rsp[0]; 1300 hdr = (struct iscsi_login_rsp *)&login->rsp[0];
1299 hdr->opcode = ISCSI_OP_LOGIN_RSP; 1301 hdr->opcode = ISCSI_OP_LOGIN_RSP;
1300 hdr->status_class = status_class; 1302 hdr->status_class = status_class;
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 6d2f37578b29..8c64b8776a96 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -239,6 +239,7 @@ static void tcm_loop_submission_work(struct work_struct *work)
239 return; 239 return;
240 240
241out_done: 241out_done:
242 kmem_cache_free(tcm_loop_cmd_cache, tl_cmd);
242 sc->scsi_done(sc); 243 sc->scsi_done(sc);
243 return; 244 return;
244} 245}
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 11d26fe65bfb..98da90167159 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -616,6 +616,7 @@ void core_dev_unexport(
616 dev->export_count--; 616 dev->export_count--;
617 spin_unlock(&hba->device_lock); 617 spin_unlock(&hba->device_lock);
618 618
619 lun->lun_sep = NULL;
619 lun->lun_se_dev = NULL; 620 lun->lun_se_dev = NULL;
620} 621}
621 622
diff --git a/drivers/tc/tc.c b/drivers/tc/tc.c
index a8aaf6ac2ae2..946562389ca8 100644
--- a/drivers/tc/tc.c
+++ b/drivers/tc/tc.c
@@ -129,7 +129,10 @@ static void __init tc_bus_add_devices(struct tc_bus *tbus)
129 129
130 tc_device_get_irq(tdev); 130 tc_device_get_irq(tdev);
131 131
132 device_register(&tdev->dev); 132 if (device_register(&tdev->dev)) {
133 put_device(&tdev->dev);
134 goto out_err;
135 }
133 list_add_tail(&tdev->node, &tbus->devices); 136 list_add_tail(&tdev->node, &tbus->devices);
134 137
135out_err: 138out_err:
@@ -148,7 +151,10 @@ static int __init tc_init(void)
148 151
149 INIT_LIST_HEAD(&tc_bus.devices); 152 INIT_LIST_HEAD(&tc_bus.devices);
150 dev_set_name(&tc_bus.dev, "tc"); 153 dev_set_name(&tc_bus.dev, "tc");
151 device_register(&tc_bus.dev); 154 if (device_register(&tc_bus.dev)) {
155 put_device(&tc_bus.dev);
156 return 0;
157 }
152 158
153 if (tc_bus.info.slot_size) { 159 if (tc_bus.info.slot_size) {
154 unsigned int tc_clock = tc_get_speed(&tc_bus) / 100000; 160 unsigned int tc_clock = tc_get_speed(&tc_bus) / 100000;
diff --git a/drivers/thermal/imx_thermal.c b/drivers/thermal/imx_thermal.c
index a99c63152b8d..2c516f2eebed 100644
--- a/drivers/thermal/imx_thermal.c
+++ b/drivers/thermal/imx_thermal.c
@@ -306,7 +306,7 @@ static int imx_get_sensor_data(struct platform_device *pdev)
306{ 306{
307 struct imx_thermal_data *data = platform_get_drvdata(pdev); 307 struct imx_thermal_data *data = platform_get_drvdata(pdev);
308 struct regmap *map; 308 struct regmap *map;
309 int t1, t2, n1, n2; 309 int t1, n1;
310 int ret; 310 int ret;
311 u32 val; 311 u32 val;
312 u64 temp64; 312 u64 temp64;
@@ -333,14 +333,10 @@ static int imx_get_sensor_data(struct platform_device *pdev)
333 /* 333 /*
334 * Sensor data layout: 334 * Sensor data layout:
335 * [31:20] - sensor value @ 25C 335 * [31:20] - sensor value @ 25C
336 * [19:8] - sensor value of hot
337 * [7:0] - hot temperature value
338 * Use universal formula now and only need sensor value @ 25C 336 * Use universal formula now and only need sensor value @ 25C
339 * slope = 0.4297157 - (0.0015976 * 25C fuse) 337 * slope = 0.4297157 - (0.0015976 * 25C fuse)
340 */ 338 */
341 n1 = val >> 20; 339 n1 = val >> 20;
342 n2 = (val & 0xfff00) >> 8;
343 t2 = val & 0xff;
344 t1 = 25; /* t1 always 25C */ 340 t1 = 25; /* t1 always 25C */
345 341
346 /* 342 /*
@@ -366,16 +362,16 @@ static int imx_get_sensor_data(struct platform_device *pdev)
366 data->c2 = n1 * data->c1 + 1000 * t1; 362 data->c2 = n1 * data->c1 + 1000 * t1;
367 363
368 /* 364 /*
369 * Set the default passive cooling trip point to 20 °C below the 365 * Set the default passive cooling trip point,
370 * maximum die temperature. Can be changed from userspace. 366 * can be changed from userspace.
371 */ 367 */
372 data->temp_passive = 1000 * (t2 - 20); 368 data->temp_passive = IMX_TEMP_PASSIVE;
373 369
374 /* 370 /*
375 * The maximum die temperature is t2, let's give 5 °C cushion 371 * The maximum die temperature set to 20 C higher than
376 * for noise and possible temperature rise between measurements. 372 * IMX_TEMP_PASSIVE.
377 */ 373 */
378 data->temp_critical = 1000 * (t2 - 5); 374 data->temp_critical = 1000 * 20 + data->temp_passive;
379 375
380 return 0; 376 return 0;
381} 377}
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 04b1be7fa018..4b2b999b7611 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -156,8 +156,8 @@ static int of_thermal_bind(struct thermal_zone_device *thermal,
156 156
157 ret = thermal_zone_bind_cooling_device(thermal, 157 ret = thermal_zone_bind_cooling_device(thermal,
158 tbp->trip_id, cdev, 158 tbp->trip_id, cdev,
159 tbp->min, 159 tbp->max,
160 tbp->max); 160 tbp->min);
161 if (ret) 161 if (ret)
162 return ret; 162 return ret;
163 } 163 }
@@ -712,11 +712,12 @@ thermal_of_build_thermal_zone(struct device_node *np)
712 } 712 }
713 713
714 i = 0; 714 i = 0;
715 for_each_child_of_node(child, gchild) 715 for_each_child_of_node(child, gchild) {
716 ret = thermal_of_populate_bind_params(gchild, &tz->tbps[i++], 716 ret = thermal_of_populate_bind_params(gchild, &tz->tbps[i++],
717 tz->trips, tz->ntrips); 717 tz->trips, tz->ntrips);
718 if (ret) 718 if (ret)
719 goto free_tbps; 719 goto free_tbps;
720 }
720 721
721finish: 722finish:
722 of_node_put(child); 723 of_node_put(child);
diff --git a/drivers/thermal/thermal_hwmon.c b/drivers/thermal/thermal_hwmon.c
index fdb07199d9c2..1967bee4f076 100644
--- a/drivers/thermal/thermal_hwmon.c
+++ b/drivers/thermal/thermal_hwmon.c
@@ -140,6 +140,12 @@ thermal_hwmon_lookup_temp(const struct thermal_hwmon_device *hwmon,
140 return NULL; 140 return NULL;
141} 141}
142 142
143static bool thermal_zone_crit_temp_valid(struct thermal_zone_device *tz)
144{
145 unsigned long temp;
146 return tz->ops->get_crit_temp && !tz->ops->get_crit_temp(tz, &temp);
147}
148
143int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz) 149int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
144{ 150{
145 struct thermal_hwmon_device *hwmon; 151 struct thermal_hwmon_device *hwmon;
@@ -189,21 +195,18 @@ int thermal_add_hwmon_sysfs(struct thermal_zone_device *tz)
189 if (result) 195 if (result)
190 goto free_temp_mem; 196 goto free_temp_mem;
191 197
192 if (tz->ops->get_crit_temp) { 198 if (thermal_zone_crit_temp_valid(tz)) {
193 unsigned long temperature; 199 snprintf(temp->temp_crit.name,
194 if (!tz->ops->get_crit_temp(tz, &temperature)) { 200 sizeof(temp->temp_crit.name),
195 snprintf(temp->temp_crit.name,
196 sizeof(temp->temp_crit.name),
197 "temp%d_crit", hwmon->count); 201 "temp%d_crit", hwmon->count);
198 temp->temp_crit.attr.attr.name = temp->temp_crit.name; 202 temp->temp_crit.attr.attr.name = temp->temp_crit.name;
199 temp->temp_crit.attr.attr.mode = 0444; 203 temp->temp_crit.attr.attr.mode = 0444;
200 temp->temp_crit.attr.show = temp_crit_show; 204 temp->temp_crit.attr.show = temp_crit_show;
201 sysfs_attr_init(&temp->temp_crit.attr.attr); 205 sysfs_attr_init(&temp->temp_crit.attr.attr);
202 result = device_create_file(hwmon->device, 206 result = device_create_file(hwmon->device,
203 &temp->temp_crit.attr); 207 &temp->temp_crit.attr);
204 if (result) 208 if (result)
205 goto unregister_input; 209 goto unregister_input;
206 }
207 } 210 }
208 211
209 mutex_lock(&thermal_hwmon_list_lock); 212 mutex_lock(&thermal_hwmon_list_lock);
@@ -250,7 +253,7 @@ void thermal_remove_hwmon_sysfs(struct thermal_zone_device *tz)
250 } 253 }
251 254
252 device_remove_file(hwmon->device, &temp->temp_input.attr); 255 device_remove_file(hwmon->device, &temp->temp_input.attr);
253 if (tz->ops->get_crit_temp) 256 if (thermal_zone_crit_temp_valid(tz))
254 device_remove_file(hwmon->device, &temp->temp_crit.attr); 257 device_remove_file(hwmon->device, &temp->temp_crit.attr);
255 258
256 mutex_lock(&thermal_hwmon_list_lock); 259 mutex_lock(&thermal_hwmon_list_lock);
diff --git a/drivers/thermal/ti-soc-thermal/ti-bandgap.c b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
index a1271b55103a..634b6ce0e63a 100644
--- a/drivers/thermal/ti-soc-thermal/ti-bandgap.c
+++ b/drivers/thermal/ti-soc-thermal/ti-bandgap.c
@@ -1155,7 +1155,7 @@ static struct ti_bandgap *ti_bandgap_build(struct platform_device *pdev)
1155 /* register shadow for context save and restore */ 1155 /* register shadow for context save and restore */
1156 bgp->regval = devm_kzalloc(&pdev->dev, sizeof(*bgp->regval) * 1156 bgp->regval = devm_kzalloc(&pdev->dev, sizeof(*bgp->regval) *
1157 bgp->conf->sensor_count, GFP_KERNEL); 1157 bgp->conf->sensor_count, GFP_KERNEL);
1158 if (!bgp) { 1158 if (!bgp->regval) {
1159 dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n"); 1159 dev_err(&pdev->dev, "Unable to allocate mem for driver ref\n");
1160 return ERR_PTR(-ENOMEM); 1160 return ERR_PTR(-ENOMEM);
1161 } 1161 }
diff --git a/drivers/tty/serial/arc_uart.c b/drivers/tty/serial/arc_uart.c
index c9f5c9dcc15c..008c223eaf26 100644
--- a/drivers/tty/serial/arc_uart.c
+++ b/drivers/tty/serial/arc_uart.c
@@ -177,7 +177,7 @@ static void arc_serial_tx_chars(struct arc_uart_port *uart)
177 uart->port.icount.tx++; 177 uart->port.icount.tx++;
178 uart->port.x_char = 0; 178 uart->port.x_char = 0;
179 sent = 1; 179 sent = 1;
180 } else if (xmit->tail != xmit->head) { /* TODO: uart_circ_empty */ 180 } else if (!uart_circ_empty(xmit)) {
181 ch = xmit->buf[xmit->tail]; 181 ch = xmit->buf[xmit->tail];
182 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 182 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
183 uart->port.icount.tx++; 183 uart->port.icount.tx++;
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index e2f93874989b..044e86d528ae 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -567,6 +567,9 @@ static void imx_start_tx(struct uart_port *port)
567 struct imx_port *sport = (struct imx_port *)port; 567 struct imx_port *sport = (struct imx_port *)port;
568 unsigned long temp; 568 unsigned long temp;
569 569
570 if (uart_circ_empty(&port->state->xmit))
571 return;
572
570 if (USE_IRDA(sport)) { 573 if (USE_IRDA(sport)) {
571 /* half duplex in IrDA mode; have to disable receive mode */ 574 /* half duplex in IrDA mode; have to disable receive mode */
572 temp = readl(sport->port.membase + UCR4); 575 temp = readl(sport->port.membase + UCR4);
diff --git a/drivers/tty/serial/ip22zilog.c b/drivers/tty/serial/ip22zilog.c
index 1efd4c36ba0c..99b7b8697861 100644
--- a/drivers/tty/serial/ip22zilog.c
+++ b/drivers/tty/serial/ip22zilog.c
@@ -603,6 +603,8 @@ static void ip22zilog_start_tx(struct uart_port *port)
603 } else { 603 } else {
604 struct circ_buf *xmit = &port->state->xmit; 604 struct circ_buf *xmit = &port->state->xmit;
605 605
606 if (uart_circ_empty(xmit))
607 return;
606 writeb(xmit->buf[xmit->tail], &channel->data); 608 writeb(xmit->buf[xmit->tail], &channel->data);
607 ZSDELAY(); 609 ZSDELAY();
608 ZS_WSYNC(channel); 610 ZS_WSYNC(channel);
diff --git a/drivers/tty/serial/m32r_sio.c b/drivers/tty/serial/m32r_sio.c
index 68f2c53e0b54..5702828fb62e 100644
--- a/drivers/tty/serial/m32r_sio.c
+++ b/drivers/tty/serial/m32r_sio.c
@@ -266,9 +266,11 @@ static void m32r_sio_start_tx(struct uart_port *port)
266 if (!(up->ier & UART_IER_THRI)) { 266 if (!(up->ier & UART_IER_THRI)) {
267 up->ier |= UART_IER_THRI; 267 up->ier |= UART_IER_THRI;
268 serial_out(up, UART_IER, up->ier); 268 serial_out(up, UART_IER, up->ier);
269 serial_out(up, UART_TX, xmit->buf[xmit->tail]); 269 if (!uart_circ_empty(xmit)) {
270 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 270 serial_out(up, UART_TX, xmit->buf[xmit->tail]);
271 up->port.icount.tx++; 271 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
272 up->port.icount.tx++;
273 }
272 } 274 }
273 while((serial_in(up, UART_LSR) & UART_EMPTY) != UART_EMPTY); 275 while((serial_in(up, UART_LSR) & UART_EMPTY) != UART_EMPTY);
274#else 276#else
diff --git a/drivers/tty/serial/msm_serial.c b/drivers/tty/serial/msm_serial.c
index c41aca4dfc43..72000a6d5af0 100644
--- a/drivers/tty/serial/msm_serial.c
+++ b/drivers/tty/serial/msm_serial.c
@@ -991,7 +991,7 @@ static const struct of_device_id msm_uartdm_table[] = {
991 { } 991 { }
992}; 992};
993 993
994static int __init msm_serial_probe(struct platform_device *pdev) 994static int msm_serial_probe(struct platform_device *pdev)
995{ 995{
996 struct msm_port *msm_port; 996 struct msm_port *msm_port;
997 struct resource *resource; 997 struct resource *resource;
diff --git a/drivers/tty/serial/pmac_zilog.c b/drivers/tty/serial/pmac_zilog.c
index 8193635103ee..f7ad5b903055 100644
--- a/drivers/tty/serial/pmac_zilog.c
+++ b/drivers/tty/serial/pmac_zilog.c
@@ -653,6 +653,8 @@ static void pmz_start_tx(struct uart_port *port)
653 } else { 653 } else {
654 struct circ_buf *xmit = &port->state->xmit; 654 struct circ_buf *xmit = &port->state->xmit;
655 655
656 if (uart_circ_empty(xmit))
657 goto out;
656 write_zsdata(uap, xmit->buf[xmit->tail]); 658 write_zsdata(uap, xmit->buf[xmit->tail]);
657 zssync(uap); 659 zssync(uap);
658 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 660 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
@@ -661,6 +663,7 @@ static void pmz_start_tx(struct uart_port *port)
661 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 663 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
662 uart_write_wakeup(&uap->port); 664 uart_write_wakeup(&uap->port);
663 } 665 }
666 out:
664 pmz_debug("pmz: start_tx() done.\n"); 667 pmz_debug("pmz: start_tx() done.\n");
665} 668}
666 669
diff --git a/drivers/tty/serial/sunsab.c b/drivers/tty/serial/sunsab.c
index 80a58eca785b..2f57df9a71d9 100644
--- a/drivers/tty/serial/sunsab.c
+++ b/drivers/tty/serial/sunsab.c
@@ -427,6 +427,9 @@ static void sunsab_start_tx(struct uart_port *port)
427 struct circ_buf *xmit = &up->port.state->xmit; 427 struct circ_buf *xmit = &up->port.state->xmit;
428 int i; 428 int i;
429 429
430 if (uart_circ_empty(xmit))
431 return;
432
430 up->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS|SAB82532_IMR1_XPR); 433 up->interrupt_mask1 &= ~(SAB82532_IMR1_ALLS|SAB82532_IMR1_XPR);
431 writeb(up->interrupt_mask1, &up->regs->w.imr1); 434 writeb(up->interrupt_mask1, &up->regs->w.imr1);
432 435
diff --git a/drivers/tty/serial/sunzilog.c b/drivers/tty/serial/sunzilog.c
index a85db8b87156..02df3940b95e 100644
--- a/drivers/tty/serial/sunzilog.c
+++ b/drivers/tty/serial/sunzilog.c
@@ -703,6 +703,8 @@ static void sunzilog_start_tx(struct uart_port *port)
703 } else { 703 } else {
704 struct circ_buf *xmit = &port->state->xmit; 704 struct circ_buf *xmit = &port->state->xmit;
705 705
706 if (uart_circ_empty(xmit))
707 return;
706 writeb(xmit->buf[xmit->tail], &channel->data); 708 writeb(xmit->buf[xmit->tail], &channel->data);
707 ZSDELAY(); 709 ZSDELAY();
708 ZS_WSYNC(channel); 710 ZS_WSYNC(channel);
diff --git a/drivers/usb/chipidea/udc.c b/drivers/usb/chipidea/udc.c
index 69425b3cb6b7..b8125aa64ad8 100644
--- a/drivers/usb/chipidea/udc.c
+++ b/drivers/usb/chipidea/udc.c
@@ -1169,8 +1169,8 @@ static int ep_enable(struct usb_ep *ep,
1169 1169
1170 if (hwep->type == USB_ENDPOINT_XFER_CONTROL) 1170 if (hwep->type == USB_ENDPOINT_XFER_CONTROL)
1171 cap |= QH_IOS; 1171 cap |= QH_IOS;
1172 if (hwep->num) 1172
1173 cap |= QH_ZLT; 1173 cap |= QH_ZLT;
1174 cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT; 1174 cap |= (hwep->ep.maxpacket << __ffs(QH_MAX_PKT)) & QH_MAX_PKT;
1175 /* 1175 /*
1176 * For ISO-TX, we set mult at QH as the largest value, and use 1176 * For ISO-TX, we set mult at QH as the largest value, and use
@@ -1321,6 +1321,7 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1321 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep); 1321 struct ci_hw_ep *hwep = container_of(ep, struct ci_hw_ep, ep);
1322 struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req); 1322 struct ci_hw_req *hwreq = container_of(req, struct ci_hw_req, req);
1323 unsigned long flags; 1323 unsigned long flags;
1324 struct td_node *node, *tmpnode;
1324 1325
1325 if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY || 1326 if (ep == NULL || req == NULL || hwreq->req.status != -EALREADY ||
1326 hwep->ep.desc == NULL || list_empty(&hwreq->queue) || 1327 hwep->ep.desc == NULL || list_empty(&hwreq->queue) ||
@@ -1331,6 +1332,12 @@ static int ep_dequeue(struct usb_ep *ep, struct usb_request *req)
1331 1332
1332 hw_ep_flush(hwep->ci, hwep->num, hwep->dir); 1333 hw_ep_flush(hwep->ci, hwep->num, hwep->dir);
1333 1334
1335 list_for_each_entry_safe(node, tmpnode, &hwreq->tds, td) {
1336 dma_pool_free(hwep->td_pool, node->ptr, node->dma);
1337 list_del(&node->td);
1338 kfree(node);
1339 }
1340
1334 /* pop request */ 1341 /* pop request */
1335 list_del_init(&hwreq->queue); 1342 list_del_init(&hwreq->queue);
1336 1343
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 21b99b4b4082..0e950ad8cb25 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -889,6 +889,25 @@ static int hub_usb3_port_disable(struct usb_hub *hub, int port1)
889 if (!hub_is_superspeed(hub->hdev)) 889 if (!hub_is_superspeed(hub->hdev))
890 return -EINVAL; 890 return -EINVAL;
891 891
892 ret = hub_port_status(hub, port1, &portstatus, &portchange);
893 if (ret < 0)
894 return ret;
895
896 /*
897 * USB controller Advanced Micro Devices, Inc. [AMD] FCH USB XHCI
898 * Controller [1022:7814] will have spurious result making the following
899 * usb 3.0 device hotplugging route to the 2.0 root hub and recognized
900 * as high-speed device if we set the usb 3.0 port link state to
901 * Disabled. Since it's already in USB_SS_PORT_LS_RX_DETECT state, we
902 * check the state here to avoid the bug.
903 */
904 if ((portstatus & USB_PORT_STAT_LINK_STATE) ==
905 USB_SS_PORT_LS_RX_DETECT) {
906 dev_dbg(&hub->ports[port1 - 1]->dev,
907 "Not disabling port; link state is RxDetect\n");
908 return ret;
909 }
910
892 ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED); 911 ret = hub_set_port_link_state(hub, port1, USB_SS_PORT_LS_SS_DISABLED);
893 if (ret) 912 if (ret)
894 return ret; 913 return ret;
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 8eb996e4f058..261c3b428220 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -45,6 +45,7 @@ comment "Platform Glue Driver Support"
45config USB_DWC3_OMAP 45config USB_DWC3_OMAP
46 tristate "Texas Instruments OMAP5 and similar Platforms" 46 tristate "Texas Instruments OMAP5 and similar Platforms"
47 depends on EXTCON && (ARCH_OMAP2PLUS || COMPILE_TEST) 47 depends on EXTCON && (ARCH_OMAP2PLUS || COMPILE_TEST)
48 depends on OF
48 default USB_DWC3 49 default USB_DWC3
49 help 50 help
50 Some platforms from Texas Instruments like OMAP5, DRA7xxx and 51 Some platforms from Texas Instruments like OMAP5, DRA7xxx and
diff --git a/drivers/usb/dwc3/dwc3-omap.c b/drivers/usb/dwc3/dwc3-omap.c
index 4af4c3567656..07a736acd0f2 100644
--- a/drivers/usb/dwc3/dwc3-omap.c
+++ b/drivers/usb/dwc3/dwc3-omap.c
@@ -322,7 +322,7 @@ static int dwc3_omap_remove_core(struct device *dev, void *c)
322{ 322{
323 struct platform_device *pdev = to_platform_device(dev); 323 struct platform_device *pdev = to_platform_device(dev);
324 324
325 platform_device_unregister(pdev); 325 of_device_unregister(pdev);
326 326
327 return 0; 327 return 0;
328} 328}
@@ -599,7 +599,7 @@ static int dwc3_omap_prepare(struct device *dev)
599{ 599{
600 struct dwc3_omap *omap = dev_get_drvdata(dev); 600 struct dwc3_omap *omap = dev_get_drvdata(dev);
601 601
602 dwc3_omap_disable_irqs(omap); 602 dwc3_omap_write_irqmisc_set(omap, 0x00);
603 603
604 return 0; 604 return 0;
605} 605}
@@ -607,8 +607,19 @@ static int dwc3_omap_prepare(struct device *dev)
607static void dwc3_omap_complete(struct device *dev) 607static void dwc3_omap_complete(struct device *dev)
608{ 608{
609 struct dwc3_omap *omap = dev_get_drvdata(dev); 609 struct dwc3_omap *omap = dev_get_drvdata(dev);
610 u32 reg;
610 611
611 dwc3_omap_enable_irqs(omap); 612 reg = (USBOTGSS_IRQMISC_OEVT |
613 USBOTGSS_IRQMISC_DRVVBUS_RISE |
614 USBOTGSS_IRQMISC_CHRGVBUS_RISE |
615 USBOTGSS_IRQMISC_DISCHRGVBUS_RISE |
616 USBOTGSS_IRQMISC_IDPULLUP_RISE |
617 USBOTGSS_IRQMISC_DRVVBUS_FALL |
618 USBOTGSS_IRQMISC_CHRGVBUS_FALL |
619 USBOTGSS_IRQMISC_DISCHRGVBUS_FALL |
620 USBOTGSS_IRQMISC_IDPULLUP_FALL);
621
622 dwc3_omap_write_irqmisc_set(omap, reg);
612} 623}
613 624
614static int dwc3_omap_suspend(struct device *dev) 625static int dwc3_omap_suspend(struct device *dev)
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 9d64dd02c57e..dab7927d1009 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -828,10 +828,6 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
828 length, last ? " last" : "", 828 length, last ? " last" : "",
829 chain ? " chain" : ""); 829 chain ? " chain" : "");
830 830
831 /* Skip the LINK-TRB on ISOC */
832 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
833 usb_endpoint_xfer_isoc(dep->endpoint.desc))
834 dep->free_slot++;
835 831
836 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK]; 832 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
837 833
@@ -843,6 +839,10 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
843 } 839 }
844 840
845 dep->free_slot++; 841 dep->free_slot++;
842 /* Skip the LINK-TRB on ISOC */
843 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
844 usb_endpoint_xfer_isoc(dep->endpoint.desc))
845 dep->free_slot++;
846 846
847 trb->size = DWC3_TRB_SIZE_LENGTH(length); 847 trb->size = DWC3_TRB_SIZE_LENGTH(length);
848 trb->bpl = lower_32_bits(dma); 848 trb->bpl = lower_32_bits(dma);
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 2ddcd635ca2a..97142146eead 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -1145,15 +1145,15 @@ static struct configfs_item_operations interf_item_ops = {
1145 .store_attribute = usb_os_desc_attr_store, 1145 .store_attribute = usb_os_desc_attr_store,
1146}; 1146};
1147 1147
1148static ssize_t rndis_grp_compatible_id_show(struct usb_os_desc *desc, 1148static ssize_t interf_grp_compatible_id_show(struct usb_os_desc *desc,
1149 char *page) 1149 char *page)
1150{ 1150{
1151 memcpy(page, desc->ext_compat_id, 8); 1151 memcpy(page, desc->ext_compat_id, 8);
1152 return 8; 1152 return 8;
1153} 1153}
1154 1154
1155static ssize_t rndis_grp_compatible_id_store(struct usb_os_desc *desc, 1155static ssize_t interf_grp_compatible_id_store(struct usb_os_desc *desc,
1156 const char *page, size_t len) 1156 const char *page, size_t len)
1157{ 1157{
1158 int l; 1158 int l;
1159 1159
@@ -1171,20 +1171,20 @@ static ssize_t rndis_grp_compatible_id_store(struct usb_os_desc *desc,
1171 return len; 1171 return len;
1172} 1172}
1173 1173
1174static struct usb_os_desc_attribute rndis_grp_attr_compatible_id = 1174static struct usb_os_desc_attribute interf_grp_attr_compatible_id =
1175 __CONFIGFS_ATTR(compatible_id, S_IRUGO | S_IWUSR, 1175 __CONFIGFS_ATTR(compatible_id, S_IRUGO | S_IWUSR,
1176 rndis_grp_compatible_id_show, 1176 interf_grp_compatible_id_show,
1177 rndis_grp_compatible_id_store); 1177 interf_grp_compatible_id_store);
1178 1178
1179static ssize_t rndis_grp_sub_compatible_id_show(struct usb_os_desc *desc, 1179static ssize_t interf_grp_sub_compatible_id_show(struct usb_os_desc *desc,
1180 char *page) 1180 char *page)
1181{ 1181{
1182 memcpy(page, desc->ext_compat_id + 8, 8); 1182 memcpy(page, desc->ext_compat_id + 8, 8);
1183 return 8; 1183 return 8;
1184} 1184}
1185 1185
1186static ssize_t rndis_grp_sub_compatible_id_store(struct usb_os_desc *desc, 1186static ssize_t interf_grp_sub_compatible_id_store(struct usb_os_desc *desc,
1187 const char *page, size_t len) 1187 const char *page, size_t len)
1188{ 1188{
1189 int l; 1189 int l;
1190 1190
@@ -1202,20 +1202,21 @@ static ssize_t rndis_grp_sub_compatible_id_store(struct usb_os_desc *desc,
1202 return len; 1202 return len;
1203} 1203}
1204 1204
1205static struct usb_os_desc_attribute rndis_grp_attr_sub_compatible_id = 1205static struct usb_os_desc_attribute interf_grp_attr_sub_compatible_id =
1206 __CONFIGFS_ATTR(sub_compatible_id, S_IRUGO | S_IWUSR, 1206 __CONFIGFS_ATTR(sub_compatible_id, S_IRUGO | S_IWUSR,
1207 rndis_grp_sub_compatible_id_show, 1207 interf_grp_sub_compatible_id_show,
1208 rndis_grp_sub_compatible_id_store); 1208 interf_grp_sub_compatible_id_store);
1209 1209
1210static struct configfs_attribute *interf_grp_attrs[] = { 1210static struct configfs_attribute *interf_grp_attrs[] = {
1211 &rndis_grp_attr_compatible_id.attr, 1211 &interf_grp_attr_compatible_id.attr,
1212 &rndis_grp_attr_sub_compatible_id.attr, 1212 &interf_grp_attr_sub_compatible_id.attr,
1213 NULL 1213 NULL
1214}; 1214};
1215 1215
1216int usb_os_desc_prepare_interf_dir(struct config_group *parent, 1216int usb_os_desc_prepare_interf_dir(struct config_group *parent,
1217 int n_interf, 1217 int n_interf,
1218 struct usb_os_desc **desc, 1218 struct usb_os_desc **desc,
1219 char **names,
1219 struct module *owner) 1220 struct module *owner)
1220{ 1221{
1221 struct config_group **f_default_groups, *os_desc_group, 1222 struct config_group **f_default_groups, *os_desc_group,
@@ -1257,8 +1258,8 @@ int usb_os_desc_prepare_interf_dir(struct config_group *parent,
1257 d = desc[n_interf]; 1258 d = desc[n_interf];
1258 d->owner = owner; 1259 d->owner = owner;
1259 config_group_init_type_name(&d->group, "", interface_type); 1260 config_group_init_type_name(&d->group, "", interface_type);
1260 config_item_set_name(&d->group.cg_item, "interface.%d", 1261 config_item_set_name(&d->group.cg_item, "interface.%s",
1261 n_interf); 1262 names[n_interf]);
1262 interface_groups[n_interf] = &d->group; 1263 interface_groups[n_interf] = &d->group;
1263 } 1264 }
1264 1265
diff --git a/drivers/usb/gadget/configfs.h b/drivers/usb/gadget/configfs.h
index a14ac792c698..36c468c4f5e9 100644
--- a/drivers/usb/gadget/configfs.h
+++ b/drivers/usb/gadget/configfs.h
@@ -8,6 +8,7 @@ void unregister_gadget_item(struct config_item *item);
8int usb_os_desc_prepare_interf_dir(struct config_group *parent, 8int usb_os_desc_prepare_interf_dir(struct config_group *parent,
9 int n_interf, 9 int n_interf,
10 struct usb_os_desc **desc, 10 struct usb_os_desc **desc,
11 char **names,
11 struct module *owner); 12 struct module *owner);
12 13
13static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item) 14static inline struct usb_os_desc *to_usb_os_desc(struct config_item *item)
diff --git a/drivers/usb/gadget/f_fs.c b/drivers/usb/gadget/f_fs.c
index 74202d67f911..8598c27c7d43 100644
--- a/drivers/usb/gadget/f_fs.c
+++ b/drivers/usb/gadget/f_fs.c
@@ -1483,11 +1483,13 @@ static int functionfs_bind(struct ffs_data *ffs, struct usb_composite_dev *cdev)
1483 ffs->ep0req->context = ffs; 1483 ffs->ep0req->context = ffs;
1484 1484
1485 lang = ffs->stringtabs; 1485 lang = ffs->stringtabs;
1486 for (lang = ffs->stringtabs; *lang; ++lang) { 1486 if (lang) {
1487 struct usb_string *str = (*lang)->strings; 1487 for (; *lang; ++lang) {
1488 int id = first_id; 1488 struct usb_string *str = (*lang)->strings;
1489 for (; str->s; ++id, ++str) 1489 int id = first_id;
1490 str->id = id; 1490 for (; str->s; ++id, ++str)
1491 str->id = id;
1492 }
1491 } 1493 }
1492 1494
1493 ffs->gadget = cdev->gadget; 1495 ffs->gadget = cdev->gadget;
diff --git a/drivers/usb/gadget/f_rndis.c b/drivers/usb/gadget/f_rndis.c
index eed3ad878047..9c41e9515b8e 100644
--- a/drivers/usb/gadget/f_rndis.c
+++ b/drivers/usb/gadget/f_rndis.c
@@ -687,7 +687,7 @@ rndis_bind(struct usb_configuration *c, struct usb_function *f)
687 f->os_desc_table = kzalloc(sizeof(*f->os_desc_table), 687 f->os_desc_table = kzalloc(sizeof(*f->os_desc_table),
688 GFP_KERNEL); 688 GFP_KERNEL);
689 if (!f->os_desc_table) 689 if (!f->os_desc_table)
690 return PTR_ERR(f->os_desc_table); 690 return -ENOMEM;
691 f->os_desc_n = 1; 691 f->os_desc_n = 1;
692 f->os_desc_table[0].os_desc = &rndis_opts->rndis_os_desc; 692 f->os_desc_table[0].os_desc = &rndis_opts->rndis_os_desc;
693 } 693 }
@@ -905,6 +905,7 @@ static struct usb_function_instance *rndis_alloc_inst(void)
905{ 905{
906 struct f_rndis_opts *opts; 906 struct f_rndis_opts *opts;
907 struct usb_os_desc *descs[1]; 907 struct usb_os_desc *descs[1];
908 char *names[1];
908 909
909 opts = kzalloc(sizeof(*opts), GFP_KERNEL); 910 opts = kzalloc(sizeof(*opts), GFP_KERNEL);
910 if (!opts) 911 if (!opts)
@@ -922,8 +923,9 @@ static struct usb_function_instance *rndis_alloc_inst(void)
922 INIT_LIST_HEAD(&opts->rndis_os_desc.ext_prop); 923 INIT_LIST_HEAD(&opts->rndis_os_desc.ext_prop);
923 924
924 descs[0] = &opts->rndis_os_desc; 925 descs[0] = &opts->rndis_os_desc;
926 names[0] = "rndis";
925 usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs, 927 usb_os_desc_prepare_interf_dir(&opts->func_inst.group, 1, descs,
926 THIS_MODULE); 928 names, THIS_MODULE);
927 config_group_init_type_name(&opts->func_inst.group, "", 929 config_group_init_type_name(&opts->func_inst.group, "",
928 &rndis_func_type); 930 &rndis_func_type);
929 931
diff --git a/drivers/usb/gadget/gr_udc.c b/drivers/usb/gadget/gr_udc.c
index 99a37ed03e27..c7004ee89c90 100644
--- a/drivers/usb/gadget/gr_udc.c
+++ b/drivers/usb/gadget/gr_udc.c
@@ -1532,8 +1532,9 @@ static int gr_ep_enable(struct usb_ep *_ep,
1532 "%s mode: multiple trans./microframe not valid\n", 1532 "%s mode: multiple trans./microframe not valid\n",
1533 (mode == 2 ? "Bulk" : "Control")); 1533 (mode == 2 ? "Bulk" : "Control"));
1534 return -EINVAL; 1534 return -EINVAL;
1535 } else if (nt == 0x11) { 1535 } else if (nt == 0x3) {
1536 dev_err(dev->dev, "Invalid value for trans./microframe\n"); 1536 dev_err(dev->dev,
1537 "Invalid value 0x3 for additional trans./microframe\n");
1537 return -EINVAL; 1538 return -EINVAL;
1538 } else if ((nt + 1) * max > buffer_size) { 1539 } else if ((nt + 1) * max > buffer_size) {
1539 dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n", 1540 dev_err(dev->dev, "Hw buffer size %d < max payload %d * %d\n",
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index ee6c16416c30..2e4ce7704908 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -1264,8 +1264,13 @@ dev_release (struct inode *inode, struct file *fd)
1264 1264
1265 kfree (dev->buf); 1265 kfree (dev->buf);
1266 dev->buf = NULL; 1266 dev->buf = NULL;
1267 put_dev (dev);
1268 1267
1268 /* other endpoints were all decoupled from this device */
1269 spin_lock_irq(&dev->lock);
1270 dev->state = STATE_DEV_DISABLED;
1271 spin_unlock_irq(&dev->lock);
1272
1273 put_dev (dev);
1269 return 0; 1274 return 0;
1270} 1275}
1271 1276
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 3d78a8844e43..97b027724ee7 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -1120,7 +1120,10 @@ void gether_disconnect(struct gether *link)
1120 1120
1121 DBG(dev, "%s\n", __func__); 1121 DBG(dev, "%s\n", __func__);
1122 1122
1123 netif_tx_lock(dev->net);
1123 netif_stop_queue(dev->net); 1124 netif_stop_queue(dev->net);
1125 netif_tx_unlock(dev->net);
1126
1124 netif_carrier_off(dev->net); 1127 netif_carrier_off(dev->net);
1125 1128
1126 /* disable endpoints, forcing (synchronous) completion 1129 /* disable endpoints, forcing (synchronous) completion
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 61b7817bd66b..03314f861bee 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -176,7 +176,7 @@ config USB_EHCI_HCD_AT91
176 176
177config USB_EHCI_MSM 177config USB_EHCI_MSM
178 tristate "Support for Qualcomm QSD/MSM on-chip EHCI USB controller" 178 tristate "Support for Qualcomm QSD/MSM on-chip EHCI USB controller"
179 depends on ARCH_MSM 179 depends on ARCH_MSM || ARCH_QCOM
180 select USB_EHCI_ROOT_HUB_TT 180 select USB_EHCI_ROOT_HUB_TT
181 ---help--- 181 ---help---
182 Enables support for the USB Host controller present on the 182 Enables support for the USB Host controller present on the
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 2b998c60faf2..aa79e8749040 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -22,6 +22,7 @@
22 22
23 23
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/device.h>
25#include <asm/unaligned.h> 26#include <asm/unaligned.h>
26 27
27#include "xhci.h" 28#include "xhci.h"
@@ -1139,7 +1140,9 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1139 * including the USB 3.0 roothub, but only if CONFIG_PM_RUNTIME 1140 * including the USB 3.0 roothub, but only if CONFIG_PM_RUNTIME
1140 * is enabled, so also enable remote wake here. 1141 * is enabled, so also enable remote wake here.
1141 */ 1142 */
1142 if (hcd->self.root_hub->do_remote_wakeup) { 1143 if (hcd->self.root_hub->do_remote_wakeup
1144 && device_may_wakeup(hcd->self.controller)) {
1145
1143 if (t1 & PORT_CONNECT) { 1146 if (t1 & PORT_CONNECT) {
1144 t2 |= PORT_WKOC_E | PORT_WKDISC_E; 1147 t2 |= PORT_WKOC_E | PORT_WKDISC_E;
1145 t2 &= ~PORT_WKCONN_E; 1148 t2 &= ~PORT_WKCONN_E;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index d67ff71209f5..749fc68eb5c1 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -1433,8 +1433,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
1433 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code); 1433 xhci_handle_cmd_reset_ep(xhci, slot_id, cmd_trb, cmd_comp_code);
1434 break; 1434 break;
1435 case TRB_RESET_DEV: 1435 case TRB_RESET_DEV:
1436 WARN_ON(slot_id != TRB_TO_SLOT_ID( 1436 /* SLOT_ID field in reset device cmd completion event TRB is 0.
1437 le32_to_cpu(cmd_trb->generic.field[3]))); 1437 * Use the SLOT_ID from the command TRB instead (xhci 4.6.11)
1438 */
1439 slot_id = TRB_TO_SLOT_ID(
1440 le32_to_cpu(cmd_trb->generic.field[3]));
1438 xhci_handle_cmd_reset_dev(xhci, slot_id, event); 1441 xhci_handle_cmd_reset_dev(xhci, slot_id, event);
1439 break; 1442 break;
1440 case TRB_NEC_GET_FW: 1443 case TRB_NEC_GET_FW:
@@ -3534,7 +3537,7 @@ static unsigned int xhci_get_burst_count(struct xhci_hcd *xhci,
3534 return 0; 3537 return 0;
3535 3538
3536 max_burst = urb->ep->ss_ep_comp.bMaxBurst; 3539 max_burst = urb->ep->ss_ep_comp.bMaxBurst;
3537 return roundup(total_packet_count, max_burst + 1) - 1; 3540 return DIV_ROUND_UP(total_packet_count, max_burst + 1) - 1;
3538} 3541}
3539 3542
3540/* 3543/*
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 2b8d9a24af09..7436d5f5e67a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -936,7 +936,7 @@ int xhci_suspend(struct xhci_hcd *xhci)
936 */ 936 */
937int xhci_resume(struct xhci_hcd *xhci, bool hibernated) 937int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
938{ 938{
939 u32 command, temp = 0; 939 u32 command, temp = 0, status;
940 struct usb_hcd *hcd = xhci_to_hcd(xhci); 940 struct usb_hcd *hcd = xhci_to_hcd(xhci);
941 struct usb_hcd *secondary_hcd; 941 struct usb_hcd *secondary_hcd;
942 int retval = 0; 942 int retval = 0;
@@ -1054,8 +1054,12 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
1054 1054
1055 done: 1055 done:
1056 if (retval == 0) { 1056 if (retval == 0) {
1057 usb_hcd_resume_root_hub(hcd); 1057 /* Resume root hubs only when have pending events. */
1058 usb_hcd_resume_root_hub(xhci->shared_hcd); 1058 status = readl(&xhci->op_regs->status);
1059 if (status & STS_EINT) {
1060 usb_hcd_resume_root_hub(hcd);
1061 usb_hcd_resume_root_hub(xhci->shared_hcd);
1062 }
1059 } 1063 }
1060 1064
1061 /* 1065 /*
diff --git a/drivers/usb/musb/musb_am335x.c b/drivers/usb/musb/musb_am335x.c
index d2353781bd2d..1e58ed2361cc 100644
--- a/drivers/usb/musb/musb_am335x.c
+++ b/drivers/usb/musb/musb_am335x.c
@@ -19,21 +19,6 @@ err:
19 return ret; 19 return ret;
20} 20}
21 21
22static int of_remove_populated_child(struct device *dev, void *d)
23{
24 struct platform_device *pdev = to_platform_device(dev);
25
26 of_device_unregister(pdev);
27 return 0;
28}
29
30static int am335x_child_remove(struct platform_device *pdev)
31{
32 device_for_each_child(&pdev->dev, NULL, of_remove_populated_child);
33 pm_runtime_disable(&pdev->dev);
34 return 0;
35}
36
37static const struct of_device_id am335x_child_of_match[] = { 22static const struct of_device_id am335x_child_of_match[] = {
38 { .compatible = "ti,am33xx-usb" }, 23 { .compatible = "ti,am33xx-usb" },
39 { }, 24 { },
@@ -42,13 +27,17 @@ MODULE_DEVICE_TABLE(of, am335x_child_of_match);
42 27
43static struct platform_driver am335x_child_driver = { 28static struct platform_driver am335x_child_driver = {
44 .probe = am335x_child_probe, 29 .probe = am335x_child_probe,
45 .remove = am335x_child_remove,
46 .driver = { 30 .driver = {
47 .name = "am335x-usb-childs", 31 .name = "am335x-usb-childs",
48 .of_match_table = am335x_child_of_match, 32 .of_match_table = am335x_child_of_match,
49 }, 33 },
50}; 34};
51 35
52module_platform_driver(am335x_child_driver); 36static int __init am335x_child_init(void)
37{
38 return platform_driver_register(&am335x_child_driver);
39}
40module_init(am335x_child_init);
41
53MODULE_DESCRIPTION("AM33xx child devices"); 42MODULE_DESCRIPTION("AM33xx child devices");
54MODULE_LICENSE("GPL v2"); 43MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 61da471b7aed..eff3c5cf84f4 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -849,7 +849,7 @@ b_host:
849 } 849 }
850 850
851 /* handle babble condition */ 851 /* handle babble condition */
852 if (int_usb & MUSB_INTR_BABBLE) 852 if (int_usb & MUSB_INTR_BABBLE && is_host_active(musb))
853 schedule_work(&musb->recover_work); 853 schedule_work(&musb->recover_work);
854 854
855#if 0 855#if 0
diff --git a/drivers/usb/musb/musb_cppi41.c b/drivers/usb/musb/musb_cppi41.c
index 7b8bbf53127e..5341bb223b7c 100644
--- a/drivers/usb/musb/musb_cppi41.c
+++ b/drivers/usb/musb/musb_cppi41.c
@@ -318,7 +318,7 @@ static void cppi41_dma_callback(void *private_data)
318 } 318 }
319 list_add_tail(&cppi41_channel->tx_check, 319 list_add_tail(&cppi41_channel->tx_check,
320 &controller->early_tx_list); 320 &controller->early_tx_list);
321 if (!hrtimer_active(&controller->early_tx)) { 321 if (!hrtimer_is_queued(&controller->early_tx)) {
322 hrtimer_start_range_ns(&controller->early_tx, 322 hrtimer_start_range_ns(&controller->early_tx,
323 ktime_set(0, 140 * NSEC_PER_USEC), 323 ktime_set(0, 140 * NSEC_PER_USEC),
324 40 * NSEC_PER_USEC, 324 40 * NSEC_PER_USEC,
diff --git a/drivers/usb/musb/musb_dsps.c b/drivers/usb/musb/musb_dsps.c
index 51beb13c7e1a..09529f94e72d 100644
--- a/drivers/usb/musb/musb_dsps.c
+++ b/drivers/usb/musb/musb_dsps.c
@@ -494,10 +494,9 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode)
494 struct dsps_glue *glue = dev_get_drvdata(dev->parent); 494 struct dsps_glue *glue = dev_get_drvdata(dev->parent);
495 const struct dsps_musb_wrapper *wrp = glue->wrp; 495 const struct dsps_musb_wrapper *wrp = glue->wrp;
496 void __iomem *ctrl_base = musb->ctrl_base; 496 void __iomem *ctrl_base = musb->ctrl_base;
497 void __iomem *base = musb->mregs;
498 u32 reg; 497 u32 reg;
499 498
500 reg = dsps_readl(base, wrp->mode); 499 reg = dsps_readl(ctrl_base, wrp->mode);
501 500
502 switch (mode) { 501 switch (mode) {
503 case MUSB_HOST: 502 case MUSB_HOST:
@@ -510,7 +509,7 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode)
510 */ 509 */
511 reg |= (1 << wrp->iddig_mux); 510 reg |= (1 << wrp->iddig_mux);
512 511
513 dsps_writel(base, wrp->mode, reg); 512 dsps_writel(ctrl_base, wrp->mode, reg);
514 dsps_writel(ctrl_base, wrp->phy_utmi, 0x02); 513 dsps_writel(ctrl_base, wrp->phy_utmi, 0x02);
515 break; 514 break;
516 case MUSB_PERIPHERAL: 515 case MUSB_PERIPHERAL:
@@ -523,10 +522,10 @@ static int dsps_musb_set_mode(struct musb *musb, u8 mode)
523 */ 522 */
524 reg |= (1 << wrp->iddig_mux); 523 reg |= (1 << wrp->iddig_mux);
525 524
526 dsps_writel(base, wrp->mode, reg); 525 dsps_writel(ctrl_base, wrp->mode, reg);
527 break; 526 break;
528 case MUSB_OTG: 527 case MUSB_OTG:
529 dsps_writel(base, wrp->phy_utmi, 0x02); 528 dsps_writel(ctrl_base, wrp->phy_utmi, 0x02);
530 break; 529 break;
531 default: 530 default:
532 dev_err(glue->dev, "unsupported mode %d\n", mode); 531 dev_err(glue->dev, "unsupported mode %d\n", mode);
diff --git a/drivers/usb/musb/ux500.c b/drivers/usb/musb/ux500.c
index c2e45e632723..f202e5088461 100644
--- a/drivers/usb/musb/ux500.c
+++ b/drivers/usb/musb/ux500.c
@@ -274,7 +274,6 @@ static int ux500_probe(struct platform_device *pdev)
274 musb->dev.parent = &pdev->dev; 274 musb->dev.parent = &pdev->dev;
275 musb->dev.dma_mask = &pdev->dev.coherent_dma_mask; 275 musb->dev.dma_mask = &pdev->dev.coherent_dma_mask;
276 musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask; 276 musb->dev.coherent_dma_mask = pdev->dev.coherent_dma_mask;
277 musb->dev.of_node = pdev->dev.of_node;
278 277
279 glue->dev = &pdev->dev; 278 glue->dev = &pdev->dev;
280 glue->musb = musb; 279 glue->musb = musb;
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index ced34f39bdd4..c929370cdaa6 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -1229,7 +1229,9 @@ static void msm_otg_sm_work(struct work_struct *w)
1229 motg->chg_state = USB_CHG_STATE_UNDEFINED; 1229 motg->chg_state = USB_CHG_STATE_UNDEFINED;
1230 motg->chg_type = USB_INVALID_CHARGER; 1230 motg->chg_type = USB_INVALID_CHARGER;
1231 } 1231 }
1232 pm_runtime_put_sync(otg->phy->dev); 1232
1233 if (otg->phy->state == OTG_STATE_B_IDLE)
1234 pm_runtime_put_sync(otg->phy->dev);
1233 break; 1235 break;
1234 case OTG_STATE_B_PERIPHERAL: 1236 case OTG_STATE_B_PERIPHERAL:
1235 dev_dbg(otg->phy->dev, "OTG_STATE_B_PERIPHERAL state\n"); 1237 dev_dbg(otg->phy->dev, "OTG_STATE_B_PERIPHERAL state\n");
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index d49f9c326035..4fd36530bfa3 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -681,6 +681,14 @@ usbhs_fifo_read_end:
681 usbhs_pipe_number(pipe), 681 usbhs_pipe_number(pipe),
682 pkt->length, pkt->actual, *is_done, pkt->zero); 682 pkt->length, pkt->actual, *is_done, pkt->zero);
683 683
684 /*
685 * Transmission end
686 */
687 if (*is_done) {
688 if (usbhs_pipe_is_dcp(pipe))
689 usbhs_dcp_control_transfer_done(pipe);
690 }
691
684usbhs_fifo_read_busy: 692usbhs_fifo_read_busy:
685 usbhsf_fifo_unselect(pipe, fifo); 693 usbhsf_fifo_unselect(pipe, fifo);
686 694
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 762e4a5f5ae9..330df5ce435b 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -153,6 +153,7 @@ static const struct usb_device_id id_table[] = {
153 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ 153 { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
154 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ 154 { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
155 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ 155 { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
156 { USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
156 { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ 157 { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */
157 { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ 158 { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */
158 { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ 159 { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index edf3b124583c..8a3813be1b28 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -720,7 +720,8 @@ static const struct usb_device_id id_table_combined[] = {
720 { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) }, 720 { USB_DEVICE(FTDI_VID, FTDI_ACG_HFDUAL_PID) },
721 { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) }, 721 { USB_DEVICE(FTDI_VID, FTDI_YEI_SERVOCENTER31_PID) },
722 { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) }, 722 { USB_DEVICE(FTDI_VID, FTDI_THORLABS_PID) },
723 { USB_DEVICE(TESTO_VID, TESTO_USB_INTERFACE_PID) }, 723 { USB_DEVICE(TESTO_VID, TESTO_1_PID) },
724 { USB_DEVICE(TESTO_VID, TESTO_3_PID) },
724 { USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) }, 725 { USB_DEVICE(FTDI_VID, FTDI_GAMMA_SCOUT_PID) },
725 { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13M_PID) }, 726 { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13M_PID) },
726 { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) }, 727 { USB_DEVICE(FTDI_VID, FTDI_TACTRIX_OPENPORT_13S_PID) },
@@ -944,6 +945,8 @@ static const struct usb_device_id id_table_combined[] = {
944 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) }, 945 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_2_PID) },
945 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) }, 946 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_3_PID) },
946 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) }, 947 { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_842_4_PID) },
948 /* Infineon Devices */
949 { USB_DEVICE_INTERFACE_NUMBER(INFINEON_VID, INFINEON_TRIBOARD_PID, 1) },
947 { } /* Terminating entry */ 950 { } /* Terminating entry */
948}; 951};
949 952
@@ -1566,14 +1569,17 @@ static void ftdi_set_max_packet_size(struct usb_serial_port *port)
1566 struct usb_device *udev = serial->dev; 1569 struct usb_device *udev = serial->dev;
1567 1570
1568 struct usb_interface *interface = serial->interface; 1571 struct usb_interface *interface = serial->interface;
1569 struct usb_endpoint_descriptor *ep_desc = &interface->cur_altsetting->endpoint[1].desc; 1572 struct usb_endpoint_descriptor *ep_desc;
1570 1573
1571 unsigned num_endpoints; 1574 unsigned num_endpoints;
1572 int i; 1575 unsigned i;
1573 1576
1574 num_endpoints = interface->cur_altsetting->desc.bNumEndpoints; 1577 num_endpoints = interface->cur_altsetting->desc.bNumEndpoints;
1575 dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints); 1578 dev_info(&udev->dev, "Number of endpoints %d\n", num_endpoints);
1576 1579
1580 if (!num_endpoints)
1581 return;
1582
1577 /* NOTE: some customers have programmed FT232R/FT245R devices 1583 /* NOTE: some customers have programmed FT232R/FT245R devices
1578 * with an endpoint size of 0 - not good. In this case, we 1584 * with an endpoint size of 0 - not good. In this case, we
1579 * want to override the endpoint descriptor setting and use a 1585 * want to override the endpoint descriptor setting and use a
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 500474c48f4b..c4777bc6aee0 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -584,6 +584,12 @@
584#define RATOC_PRODUCT_ID_USB60F 0xb020 584#define RATOC_PRODUCT_ID_USB60F 0xb020
585 585
586/* 586/*
587 * Infineon Technologies
588 */
589#define INFINEON_VID 0x058b
590#define INFINEON_TRIBOARD_PID 0x0028 /* DAS JTAG TriBoard TC1798 V1.0 */
591
592/*
587 * Acton Research Corp. 593 * Acton Research Corp.
588 */ 594 */
589#define ACTON_VID 0x0647 /* Vendor ID */ 595#define ACTON_VID 0x0647 /* Vendor ID */
@@ -798,7 +804,8 @@
798 * Submitted by Colin Leroy 804 * Submitted by Colin Leroy
799 */ 805 */
800#define TESTO_VID 0x128D 806#define TESTO_VID 0x128D
801#define TESTO_USB_INTERFACE_PID 0x0001 807#define TESTO_1_PID 0x0001
808#define TESTO_3_PID 0x0003
802 809
803/* 810/*
804 * Mobility Electronics products. 811 * Mobility Electronics products.
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 59c3108cc136..a9688940543d 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -352,6 +352,9 @@ static void option_instat_callback(struct urb *urb);
352/* Zoom */ 352/* Zoom */
353#define ZOOM_PRODUCT_4597 0x9607 353#define ZOOM_PRODUCT_4597 0x9607
354 354
355/* SpeedUp SU9800 usb 3g modem */
356#define SPEEDUP_PRODUCT_SU9800 0x9800
357
355/* Haier products */ 358/* Haier products */
356#define HAIER_VENDOR_ID 0x201e 359#define HAIER_VENDOR_ID 0x201e
357#define HAIER_PRODUCT_CE100 0x2009 360#define HAIER_PRODUCT_CE100 0x2009
@@ -372,8 +375,12 @@ static void option_instat_callback(struct urb *urb);
372/* Olivetti products */ 375/* Olivetti products */
373#define OLIVETTI_VENDOR_ID 0x0b3c 376#define OLIVETTI_VENDOR_ID 0x0b3c
374#define OLIVETTI_PRODUCT_OLICARD100 0xc000 377#define OLIVETTI_PRODUCT_OLICARD100 0xc000
378#define OLIVETTI_PRODUCT_OLICARD120 0xc001
379#define OLIVETTI_PRODUCT_OLICARD140 0xc002
375#define OLIVETTI_PRODUCT_OLICARD145 0xc003 380#define OLIVETTI_PRODUCT_OLICARD145 0xc003
381#define OLIVETTI_PRODUCT_OLICARD155 0xc004
376#define OLIVETTI_PRODUCT_OLICARD200 0xc005 382#define OLIVETTI_PRODUCT_OLICARD200 0xc005
383#define OLIVETTI_PRODUCT_OLICARD160 0xc00a
377#define OLIVETTI_PRODUCT_OLICARD500 0xc00b 384#define OLIVETTI_PRODUCT_OLICARD500 0xc00b
378 385
379/* Celot products */ 386/* Celot products */
@@ -1480,6 +1487,8 @@ static const struct usb_device_id option_ids[] = {
1480 .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, 1487 .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
1481 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */ 1488 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */
1482 .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, 1489 .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
1490 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G v2 */
1491 .driver_info = (kernel_ulong_t)&net_intf2_blacklist },
1483 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, 1492 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) },
1484 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, 1493 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) },
1485 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, 1494 { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) },
@@ -1577,6 +1586,7 @@ static const struct usb_device_id option_ids[] = {
1577 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), 1586 { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14),
1578 .driver_info = (kernel_ulong_t)&four_g_w14_blacklist 1587 .driver_info = (kernel_ulong_t)&four_g_w14_blacklist
1579 }, 1588 },
1589 { USB_DEVICE_INTERFACE_CLASS(LONGCHEER_VENDOR_ID, SPEEDUP_PRODUCT_SU9800, 0xff) },
1580 { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) }, 1590 { USB_DEVICE(LONGCHEER_VENDOR_ID, ZOOM_PRODUCT_4597) },
1581 { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) }, 1591 { USB_DEVICE(LONGCHEER_VENDOR_ID, IBALL_3_5G_CONNECT) },
1582 { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) }, 1592 { USB_DEVICE(HAIER_VENDOR_ID, HAIER_PRODUCT_CE100) },
@@ -1611,15 +1621,21 @@ static const struct usb_device_id option_ids[] = {
1611 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) }, 1621 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
1612 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */ 1622 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
1613 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, 1623 { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
1614 1624 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
1615 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, 1625 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1626 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
1627 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1628 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD140),
1629 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1616 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) }, 1630 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) },
1631 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD155),
1632 .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
1617 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200), 1633 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200),
1618 .driver_info = (kernel_ulong_t)&net_intf6_blacklist 1634 .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
1619 }, 1635 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD160),
1636 .driver_info = (kernel_ulong_t)&net_intf6_blacklist },
1620 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500), 1637 { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD500),
1621 .driver_info = (kernel_ulong_t)&net_intf4_blacklist 1638 .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
1622 },
1623 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ 1639 { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */
1624 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ 1640 { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/
1625 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, 1641 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) },
diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c
index 9d38ddc8da49..866b5df36ed1 100644
--- a/drivers/usb/storage/scsiglue.c
+++ b/drivers/usb/storage/scsiglue.c
@@ -256,6 +256,10 @@ static int slave_configure(struct scsi_device *sdev)
256 if (us->fflags & US_FL_WRITE_CACHE) 256 if (us->fflags & US_FL_WRITE_CACHE)
257 sdev->wce_default_on = 1; 257 sdev->wce_default_on = 1;
258 258
259 /* A few buggy USB-ATA bridges don't understand FUA */
260 if (us->fflags & US_FL_BROKEN_FUA)
261 sdev->broken_fua = 1;
262
259 } else { 263 } else {
260 264
261 /* Non-disk-type devices don't need to blacklist any pages 265 /* Non-disk-type devices don't need to blacklist any pages
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 174a447868cd..80a5b366255f 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1936,6 +1936,13 @@ UNUSUAL_DEV( 0x14cd, 0x6600, 0x0201, 0x0201,
1936 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 1936 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1937 US_FL_IGNORE_RESIDUE ), 1937 US_FL_IGNORE_RESIDUE ),
1938 1938
1939/* Reported by Michael Büsch <m@bues.ch> */
1940UNUSUAL_DEV( 0x152d, 0x0567, 0x0114, 0x0114,
1941 "JMicron",
1942 "USB to ATA/ATAPI Bridge",
1943 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1944 US_FL_BROKEN_FUA ),
1945
1939/* Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br> 1946/* Reported by Alexandre Oliva <oliva@lsd.ic.unicamp.br>
1940 * JMicron responds to USN and several other SCSI ioctls with a 1947 * JMicron responds to USN and several other SCSI ioctls with a
1941 * residue that causes subsequent I/O requests to fail. */ 1948 * residue that causes subsequent I/O requests to fail. */
diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
index 971a760af4a1..8dae2f724a35 100644
--- a/drivers/vhost/net.c
+++ b/drivers/vhost/net.c
@@ -700,14 +700,6 @@ static void handle_rx_net(struct vhost_work *work)
700 handle_rx(net); 700 handle_rx(net);
701} 701}
702 702
703static void vhost_net_free(void *addr)
704{
705 if (is_vmalloc_addr(addr))
706 vfree(addr);
707 else
708 kfree(addr);
709}
710
711static int vhost_net_open(struct inode *inode, struct file *f) 703static int vhost_net_open(struct inode *inode, struct file *f)
712{ 704{
713 struct vhost_net *n; 705 struct vhost_net *n;
@@ -723,7 +715,7 @@ static int vhost_net_open(struct inode *inode, struct file *f)
723 } 715 }
724 vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL); 716 vqs = kmalloc(VHOST_NET_VQ_MAX * sizeof(*vqs), GFP_KERNEL);
725 if (!vqs) { 717 if (!vqs) {
726 vhost_net_free(n); 718 kvfree(n);
727 return -ENOMEM; 719 return -ENOMEM;
728 } 720 }
729 721
@@ -840,7 +832,7 @@ static int vhost_net_release(struct inode *inode, struct file *f)
840 * since jobs can re-queue themselves. */ 832 * since jobs can re-queue themselves. */
841 vhost_net_flush(n); 833 vhost_net_flush(n);
842 kfree(n->dev.vqs); 834 kfree(n->dev.vqs);
843 vhost_net_free(n); 835 kvfree(n);
844 return 0; 836 return 0;
845} 837}
846 838
diff --git a/drivers/vhost/scsi.c b/drivers/vhost/scsi.c
index 4f4ffa4c604e..69906cacd04f 100644
--- a/drivers/vhost/scsi.c
+++ b/drivers/vhost/scsi.c
@@ -1503,14 +1503,6 @@ static int vhost_scsi_set_features(struct vhost_scsi *vs, u64 features)
1503 return 0; 1503 return 0;
1504} 1504}
1505 1505
1506static void vhost_scsi_free(struct vhost_scsi *vs)
1507{
1508 if (is_vmalloc_addr(vs))
1509 vfree(vs);
1510 else
1511 kfree(vs);
1512}
1513
1514static int vhost_scsi_open(struct inode *inode, struct file *f) 1506static int vhost_scsi_open(struct inode *inode, struct file *f)
1515{ 1507{
1516 struct vhost_scsi *vs; 1508 struct vhost_scsi *vs;
@@ -1550,7 +1542,7 @@ static int vhost_scsi_open(struct inode *inode, struct file *f)
1550 return 0; 1542 return 0;
1551 1543
1552err_vqs: 1544err_vqs:
1553 vhost_scsi_free(vs); 1545 kvfree(vs);
1554err_vs: 1546err_vs:
1555 return r; 1547 return r;
1556} 1548}
@@ -1569,7 +1561,7 @@ static int vhost_scsi_release(struct inode *inode, struct file *f)
1569 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */ 1561 /* Jobs can re-queue themselves in evt kick handler. Do extra flush. */
1570 vhost_scsi_flush(vs); 1562 vhost_scsi_flush(vs);
1571 kfree(vs->dev.vqs); 1563 kfree(vs->dev.vqs);
1572 vhost_scsi_free(vs); 1564 kvfree(vs);
1573 return 0; 1565 return 0;
1574} 1566}
1575 1567
diff --git a/drivers/video/fbdev/atmel_lcdfb.c b/drivers/video/fbdev/atmel_lcdfb.c
index e683b6ef9594..d36e830d6fc6 100644
--- a/drivers/video/fbdev/atmel_lcdfb.c
+++ b/drivers/video/fbdev/atmel_lcdfb.c
@@ -1057,6 +1057,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
1057 goto put_display_node; 1057 goto put_display_node;
1058 } 1058 }
1059 1059
1060 INIT_LIST_HEAD(&pdata->pwr_gpios);
1060 ret = -ENOMEM; 1061 ret = -ENOMEM;
1061 for (i = 0; i < of_gpio_named_count(display_np, "atmel,power-control-gpio"); i++) { 1062 for (i = 0; i < of_gpio_named_count(display_np, "atmel,power-control-gpio"); i++) {
1062 gpio = of_get_named_gpio_flags(display_np, "atmel,power-control-gpio", 1063 gpio = of_get_named_gpio_flags(display_np, "atmel,power-control-gpio",
@@ -1082,6 +1083,7 @@ static int atmel_lcdfb_of_init(struct atmel_lcdfb_info *sinfo)
1082 dev_err(dev, "set direction output gpio %d failed\n", gpio); 1083 dev_err(dev, "set direction output gpio %d failed\n", gpio);
1083 goto put_display_node; 1084 goto put_display_node;
1084 } 1085 }
1086 list_add(&og->list, &pdata->pwr_gpios);
1085 } 1087 }
1086 1088
1087 if (is_gpio_power) 1089 if (is_gpio_power)
diff --git a/drivers/video/fbdev/bfin_adv7393fb.c b/drivers/video/fbdev/bfin_adv7393fb.c
index a54f7f7d763b..8fe41caac38e 100644
--- a/drivers/video/fbdev/bfin_adv7393fb.c
+++ b/drivers/video/fbdev/bfin_adv7393fb.c
@@ -408,7 +408,7 @@ static int bfin_adv7393_fb_probe(struct i2c_client *client,
408 /* Workaround "PPI Does Not Start Properly In Specific Mode" */ 408 /* Workaround "PPI Does Not Start Properly In Specific Mode" */
409 if (ANOMALY_05000400) { 409 if (ANOMALY_05000400) {
410 ret = gpio_request_one(P_IDENT(P_PPI0_FS3), GPIOF_OUT_INIT_LOW, 410 ret = gpio_request_one(P_IDENT(P_PPI0_FS3), GPIOF_OUT_INIT_LOW,
411 "PPI0_FS3") 411 "PPI0_FS3");
412 if (ret) { 412 if (ret) {
413 dev_err(&client->dev, "PPI0_FS3 GPIO request failed\n"); 413 dev_err(&client->dev, "PPI0_FS3 GPIO request failed\n");
414 ret = -EBUSY; 414 ret = -EBUSY;
diff --git a/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c b/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c
index 99af9e88b2d8..2f0822ee3ff9 100644
--- a/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c
+++ b/drivers/video/fbdev/omap2/dss/omapdss-boot-init.c
@@ -121,9 +121,11 @@ static void __init omapdss_add_to_list(struct device_node *node, bool root)
121{ 121{
122 struct dss_conv_node *n = kmalloc(sizeof(struct dss_conv_node), 122 struct dss_conv_node *n = kmalloc(sizeof(struct dss_conv_node),
123 GFP_KERNEL); 123 GFP_KERNEL);
124 n->node = node; 124 if (n) {
125 n->root = root; 125 n->node = node;
126 list_add(&n->list, &dss_conv_list); 126 n->root = root;
127 list_add(&n->list, &dss_conv_list);
128 }
127} 129}
128 130
129static bool __init omapdss_list_contains(const struct device_node *node) 131static bool __init omapdss_list_contains(const struct device_node *node)
diff --git a/drivers/video/fbdev/vt8500lcdfb.c b/drivers/video/fbdev/vt8500lcdfb.c
index a8f2b280f796..a1134c3f6c11 100644
--- a/drivers/video/fbdev/vt8500lcdfb.c
+++ b/drivers/video/fbdev/vt8500lcdfb.c
@@ -474,8 +474,6 @@ static int vt8500lcd_remove(struct platform_device *pdev)
474 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 474 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
475 release_mem_region(res->start, resource_size(res)); 475 release_mem_region(res->start, resource_size(res));
476 476
477 kfree(fbi);
478
479 return 0; 477 return 0;
480} 478}
481 479
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index b7a506f2bb14..5c660c77f03b 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -426,20 +426,18 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
426 * p2m are consistent. 426 * p2m are consistent.
427 */ 427 */
428 if (!xen_feature(XENFEAT_auto_translated_physmap)) { 428 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
429 unsigned long p;
430 struct page *scratch_page = get_balloon_scratch_page();
431
432 if (!PageHighMem(page)) { 429 if (!PageHighMem(page)) {
430 struct page *scratch_page = get_balloon_scratch_page();
431
433 ret = HYPERVISOR_update_va_mapping( 432 ret = HYPERVISOR_update_va_mapping(
434 (unsigned long)__va(pfn << PAGE_SHIFT), 433 (unsigned long)__va(pfn << PAGE_SHIFT),
435 pfn_pte(page_to_pfn(scratch_page), 434 pfn_pte(page_to_pfn(scratch_page),
436 PAGE_KERNEL_RO), 0); 435 PAGE_KERNEL_RO), 0);
437 BUG_ON(ret); 436 BUG_ON(ret);
438 }
439 p = page_to_pfn(scratch_page);
440 __set_phys_to_machine(pfn, pfn_to_mfn(p));
441 437
442 put_balloon_scratch_page(); 438 put_balloon_scratch_page();
439 }
440 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
443 } 441 }
444#endif 442#endif
445 443
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index c3667b202f2f..5f1e1f3cd186 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -88,7 +88,6 @@ static int xen_suspend(void *data)
88 88
89 if (!si->cancelled) { 89 if (!si->cancelled) {
90 xen_irq_resume(); 90 xen_irq_resume();
91 xen_console_resume();
92 xen_timer_resume(); 91 xen_timer_resume();
93 } 92 }
94 93
@@ -135,6 +134,10 @@ static void do_suspend(void)
135 134
136 err = stop_machine(xen_suspend, &si, cpumask_of(0)); 135 err = stop_machine(xen_suspend, &si, cpumask_of(0));
137 136
137 /* Resume console as early as possible. */
138 if (!si.cancelled)
139 xen_console_resume();
140
138 raw_notifier_call_chain(&xen_resume_notifier, 0, NULL); 141 raw_notifier_call_chain(&xen_resume_notifier, 0, NULL);
139 142
140 dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE); 143 dpm_resume_start(si.cancelled ? PMSG_THAW : PMSG_RESTORE);
diff --git a/firmware/Makefile b/firmware/Makefile
index 5747417069ca..0862d34cf7d1 100644
--- a/firmware/Makefile
+++ b/firmware/Makefile
@@ -219,6 +219,12 @@ $(obj)/%.fw: $(obj)/%.H16 $(ihex2fw_dep)
219obj-y += $(patsubst %,%.gen.o, $(fw-external-y)) 219obj-y += $(patsubst %,%.gen.o, $(fw-external-y))
220obj-$(CONFIG_FIRMWARE_IN_KERNEL) += $(patsubst %,%.gen.o, $(fw-shipped-y)) 220obj-$(CONFIG_FIRMWARE_IN_KERNEL) += $(patsubst %,%.gen.o, $(fw-shipped-y))
221 221
222ifeq ($(KBUILD_SRC),)
223# Makefile.build only creates subdirectories for O= builds, but external
224# firmware might live outside the kernel source tree
225_dummy := $(foreach d,$(addprefix $(obj)/,$(dir $(fw-external-y))), $(shell [ -d $(d) ] || mkdir -p $(d)))
226endif
227
222# Remove .S files and binaries created from ihex 228# Remove .S files and binaries created from ihex
223# (during 'make clean' .config isn't included so they're all in $(fw-shipped-)) 229# (during 'make clean' .config isn't included so they're all in $(fw-shipped-))
224targets := $(fw-shipped-) $(patsubst $(obj)/%,%, \ 230targets := $(fw-shipped-) $(patsubst $(obj)/%,%, \
diff --git a/fs/aio.c b/fs/aio.c
index 4f078c054b41..1c9c5f0a9e2b 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -830,16 +830,20 @@ void exit_aio(struct mm_struct *mm)
830static void put_reqs_available(struct kioctx *ctx, unsigned nr) 830static void put_reqs_available(struct kioctx *ctx, unsigned nr)
831{ 831{
832 struct kioctx_cpu *kcpu; 832 struct kioctx_cpu *kcpu;
833 unsigned long flags;
833 834
834 preempt_disable(); 835 preempt_disable();
835 kcpu = this_cpu_ptr(ctx->cpu); 836 kcpu = this_cpu_ptr(ctx->cpu);
836 837
838 local_irq_save(flags);
837 kcpu->reqs_available += nr; 839 kcpu->reqs_available += nr;
840
838 while (kcpu->reqs_available >= ctx->req_batch * 2) { 841 while (kcpu->reqs_available >= ctx->req_batch * 2) {
839 kcpu->reqs_available -= ctx->req_batch; 842 kcpu->reqs_available -= ctx->req_batch;
840 atomic_add(ctx->req_batch, &ctx->reqs_available); 843 atomic_add(ctx->req_batch, &ctx->reqs_available);
841 } 844 }
842 845
846 local_irq_restore(flags);
843 preempt_enable(); 847 preempt_enable();
844} 848}
845 849
@@ -847,10 +851,12 @@ static bool get_reqs_available(struct kioctx *ctx)
847{ 851{
848 struct kioctx_cpu *kcpu; 852 struct kioctx_cpu *kcpu;
849 bool ret = false; 853 bool ret = false;
854 unsigned long flags;
850 855
851 preempt_disable(); 856 preempt_disable();
852 kcpu = this_cpu_ptr(ctx->cpu); 857 kcpu = this_cpu_ptr(ctx->cpu);
853 858
859 local_irq_save(flags);
854 if (!kcpu->reqs_available) { 860 if (!kcpu->reqs_available) {
855 int old, avail = atomic_read(&ctx->reqs_available); 861 int old, avail = atomic_read(&ctx->reqs_available);
856 862
@@ -869,6 +875,7 @@ static bool get_reqs_available(struct kioctx *ctx)
869 ret = true; 875 ret = true;
870 kcpu->reqs_available--; 876 kcpu->reqs_available--;
871out: 877out:
878 local_irq_restore(flags);
872 preempt_enable(); 879 preempt_enable();
873 return ret; 880 return ret;
874} 881}
@@ -1021,6 +1028,7 @@ void aio_complete(struct kiocb *iocb, long res, long res2)
1021 1028
1022 /* everything turned out well, dispose of the aiocb. */ 1029 /* everything turned out well, dispose of the aiocb. */
1023 kiocb_free(iocb); 1030 kiocb_free(iocb);
1031 put_reqs_available(ctx, 1);
1024 1032
1025 /* 1033 /*
1026 * We have to order our ring_info tail store above and test 1034 * We have to order our ring_info tail store above and test
@@ -1062,6 +1070,9 @@ static long aio_read_events_ring(struct kioctx *ctx,
1062 if (head == tail) 1070 if (head == tail)
1063 goto out; 1071 goto out;
1064 1072
1073 head %= ctx->nr_events;
1074 tail %= ctx->nr_events;
1075
1065 while (ret < nr) { 1076 while (ret < nr) {
1066 long avail; 1077 long avail;
1067 struct io_event *ev; 1078 struct io_event *ev;
@@ -1100,8 +1111,6 @@ static long aio_read_events_ring(struct kioctx *ctx,
1100 flush_dcache_page(ctx->ring_pages[0]); 1111 flush_dcache_page(ctx->ring_pages[0]);
1101 1112
1102 pr_debug("%li h%u t%u\n", ret, head, tail); 1113 pr_debug("%li h%u t%u\n", ret, head, tail);
1103
1104 put_reqs_available(ctx, ret);
1105out: 1114out:
1106 mutex_unlock(&ctx->ring_lock); 1115 mutex_unlock(&ctx->ring_lock);
1107 1116
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index d7bd395ab586..1c55388ae633 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -210,7 +210,7 @@ int autofs4_fill_super(struct super_block *s, void *data, int silent)
210 int pipefd; 210 int pipefd;
211 struct autofs_sb_info *sbi; 211 struct autofs_sb_info *sbi;
212 struct autofs_info *ino; 212 struct autofs_info *ino;
213 int pgrp; 213 int pgrp = 0;
214 bool pgrp_set = false; 214 bool pgrp_set = false;
215 int ret = -EINVAL; 215 int ret = -EINVAL;
216 216
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 92371c414228..1daea0b47187 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -821,7 +821,7 @@ static void free_workspace(int type, struct list_head *workspace)
821 821
822 spin_lock(workspace_lock); 822 spin_lock(workspace_lock);
823 if (*num_workspace < num_online_cpus()) { 823 if (*num_workspace < num_online_cpus()) {
824 list_add_tail(workspace, idle_workspace); 824 list_add(workspace, idle_workspace);
825 (*num_workspace)++; 825 (*num_workspace)++;
826 spin_unlock(workspace_lock); 826 spin_unlock(workspace_lock);
827 goto wake; 827 goto wake;
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 2af6e66fe788..eea26e1b2fda 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -36,6 +36,7 @@
36#include "check-integrity.h" 36#include "check-integrity.h"
37#include "rcu-string.h" 37#include "rcu-string.h"
38#include "dev-replace.h" 38#include "dev-replace.h"
39#include "sysfs.h"
39 40
40static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, 41static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
41 int scrub_ret); 42 int scrub_ret);
@@ -562,6 +563,10 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info,
562 fs_info->fs_devices->latest_bdev = tgt_device->bdev; 563 fs_info->fs_devices->latest_bdev = tgt_device->bdev;
563 list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list); 564 list_add(&tgt_device->dev_alloc_list, &fs_info->fs_devices->alloc_list);
564 565
566 /* replace the sysfs entry */
567 btrfs_kobj_rm_device(fs_info, src_device);
568 btrfs_kobj_add_device(fs_info, tgt_device);
569
565 btrfs_rm_dev_replace_blocked(fs_info); 570 btrfs_rm_dev_replace_blocked(fs_info);
566 571
567 btrfs_rm_dev_replace_srcdev(fs_info, src_device); 572 btrfs_rm_dev_replace_srcdev(fs_info, src_device);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 8bb4aa19898f..08e65e9cf2aa 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -369,7 +369,8 @@ static int verify_parent_transid(struct extent_io_tree *io_tree,
369out: 369out:
370 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1, 370 unlock_extent_cached(io_tree, eb->start, eb->start + eb->len - 1,
371 &cached_state, GFP_NOFS); 371 &cached_state, GFP_NOFS);
372 btrfs_tree_read_unlock_blocking(eb); 372 if (need_lock)
373 btrfs_tree_read_unlock_blocking(eb);
373 return ret; 374 return ret;
374} 375}
375 376
@@ -2904,7 +2905,9 @@ retry_root_backup:
2904 if (ret) 2905 if (ret)
2905 goto fail_qgroup; 2906 goto fail_qgroup;
2906 2907
2908 mutex_lock(&fs_info->cleaner_mutex);
2907 ret = btrfs_recover_relocation(tree_root); 2909 ret = btrfs_recover_relocation(tree_root);
2910 mutex_unlock(&fs_info->cleaner_mutex);
2908 if (ret < 0) { 2911 if (ret < 0) {
2909 printk(KERN_WARNING 2912 printk(KERN_WARNING
2910 "BTRFS: failed to recover relocation\n"); 2913 "BTRFS: failed to recover relocation\n");
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 99c253918208..813537f362f9 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -5678,7 +5678,6 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5678 struct btrfs_caching_control *next; 5678 struct btrfs_caching_control *next;
5679 struct btrfs_caching_control *caching_ctl; 5679 struct btrfs_caching_control *caching_ctl;
5680 struct btrfs_block_group_cache *cache; 5680 struct btrfs_block_group_cache *cache;
5681 struct btrfs_space_info *space_info;
5682 5681
5683 down_write(&fs_info->commit_root_sem); 5682 down_write(&fs_info->commit_root_sem);
5684 5683
@@ -5701,9 +5700,6 @@ void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
5701 5700
5702 up_write(&fs_info->commit_root_sem); 5701 up_write(&fs_info->commit_root_sem);
5703 5702
5704 list_for_each_entry_rcu(space_info, &fs_info->space_info, list)
5705 percpu_counter_set(&space_info->total_bytes_pinned, 0);
5706
5707 update_global_block_rsv(fs_info); 5703 update_global_block_rsv(fs_info);
5708} 5704}
5709 5705
@@ -5741,6 +5737,7 @@ static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
5741 spin_lock(&cache->lock); 5737 spin_lock(&cache->lock);
5742 cache->pinned -= len; 5738 cache->pinned -= len;
5743 space_info->bytes_pinned -= len; 5739 space_info->bytes_pinned -= len;
5740 percpu_counter_add(&space_info->total_bytes_pinned, -len);
5744 if (cache->ro) { 5741 if (cache->ro) {
5745 space_info->bytes_readonly += len; 5742 space_info->bytes_readonly += len;
5746 readonly = true; 5743 readonly = true;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 0d321c23069a..47aceb494d1d 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -136,19 +136,22 @@ static unsigned int btrfs_flags_to_ioctl(unsigned int flags)
136void btrfs_update_iflags(struct inode *inode) 136void btrfs_update_iflags(struct inode *inode)
137{ 137{
138 struct btrfs_inode *ip = BTRFS_I(inode); 138 struct btrfs_inode *ip = BTRFS_I(inode);
139 139 unsigned int new_fl = 0;
140 inode->i_flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC);
141 140
142 if (ip->flags & BTRFS_INODE_SYNC) 141 if (ip->flags & BTRFS_INODE_SYNC)
143 inode->i_flags |= S_SYNC; 142 new_fl |= S_SYNC;
144 if (ip->flags & BTRFS_INODE_IMMUTABLE) 143 if (ip->flags & BTRFS_INODE_IMMUTABLE)
145 inode->i_flags |= S_IMMUTABLE; 144 new_fl |= S_IMMUTABLE;
146 if (ip->flags & BTRFS_INODE_APPEND) 145 if (ip->flags & BTRFS_INODE_APPEND)
147 inode->i_flags |= S_APPEND; 146 new_fl |= S_APPEND;
148 if (ip->flags & BTRFS_INODE_NOATIME) 147 if (ip->flags & BTRFS_INODE_NOATIME)
149 inode->i_flags |= S_NOATIME; 148 new_fl |= S_NOATIME;
150 if (ip->flags & BTRFS_INODE_DIRSYNC) 149 if (ip->flags & BTRFS_INODE_DIRSYNC)
151 inode->i_flags |= S_DIRSYNC; 150 new_fl |= S_DIRSYNC;
151
152 set_mask_bits(&inode->i_flags,
153 S_SYNC | S_APPEND | S_IMMUTABLE | S_NOATIME | S_DIRSYNC,
154 new_fl);
152} 155}
153 156
154/* 157/*
@@ -3139,7 +3142,6 @@ out:
3139static void clone_update_extent_map(struct inode *inode, 3142static void clone_update_extent_map(struct inode *inode,
3140 const struct btrfs_trans_handle *trans, 3143 const struct btrfs_trans_handle *trans,
3141 const struct btrfs_path *path, 3144 const struct btrfs_path *path,
3142 struct btrfs_file_extent_item *fi,
3143 const u64 hole_offset, 3145 const u64 hole_offset,
3144 const u64 hole_len) 3146 const u64 hole_len)
3145{ 3147{
@@ -3154,7 +3156,11 @@ static void clone_update_extent_map(struct inode *inode,
3154 return; 3156 return;
3155 } 3157 }
3156 3158
3157 if (fi) { 3159 if (path) {
3160 struct btrfs_file_extent_item *fi;
3161
3162 fi = btrfs_item_ptr(path->nodes[0], path->slots[0],
3163 struct btrfs_file_extent_item);
3158 btrfs_extent_item_to_extent_map(inode, path, fi, false, em); 3164 btrfs_extent_item_to_extent_map(inode, path, fi, false, em);
3159 em->generation = -1; 3165 em->generation = -1;
3160 if (btrfs_file_extent_type(path->nodes[0], fi) == 3166 if (btrfs_file_extent_type(path->nodes[0], fi) ==
@@ -3508,18 +3514,15 @@ process_slot:
3508 btrfs_item_ptr_offset(leaf, slot), 3514 btrfs_item_ptr_offset(leaf, slot),
3509 size); 3515 size);
3510 inode_add_bytes(inode, datal); 3516 inode_add_bytes(inode, datal);
3511 extent = btrfs_item_ptr(leaf, slot,
3512 struct btrfs_file_extent_item);
3513 } 3517 }
3514 3518
3515 /* If we have an implicit hole (NO_HOLES feature). */ 3519 /* If we have an implicit hole (NO_HOLES feature). */
3516 if (drop_start < new_key.offset) 3520 if (drop_start < new_key.offset)
3517 clone_update_extent_map(inode, trans, 3521 clone_update_extent_map(inode, trans,
3518 path, NULL, drop_start, 3522 NULL, drop_start,
3519 new_key.offset - drop_start); 3523 new_key.offset - drop_start);
3520 3524
3521 clone_update_extent_map(inode, trans, path, 3525 clone_update_extent_map(inode, trans, path, 0, 0);
3522 extent, 0, 0);
3523 3526
3524 btrfs_mark_buffer_dirty(leaf); 3527 btrfs_mark_buffer_dirty(leaf);
3525 btrfs_release_path(path); 3528 btrfs_release_path(path);
@@ -3562,12 +3565,10 @@ process_slot:
3562 btrfs_end_transaction(trans, root); 3565 btrfs_end_transaction(trans, root);
3563 goto out; 3566 goto out;
3564 } 3567 }
3568 clone_update_extent_map(inode, trans, NULL, last_dest_end,
3569 destoff + len - last_dest_end);
3565 ret = clone_finish_inode_update(trans, inode, destoff + len, 3570 ret = clone_finish_inode_update(trans, inode, destoff + len,
3566 destoff, olen); 3571 destoff, olen);
3567 if (ret)
3568 goto out;
3569 clone_update_extent_map(inode, trans, path, NULL, last_dest_end,
3570 destoff + len - last_dest_end);
3571 } 3572 }
3572 3573
3573out: 3574out:
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index e12441c7cf1d..7187b14faa6c 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -484,8 +484,19 @@ void btrfs_wait_logged_extents(struct btrfs_root *log, u64 transid)
484 log_list); 484 log_list);
485 list_del_init(&ordered->log_list); 485 list_del_init(&ordered->log_list);
486 spin_unlock_irq(&log->log_extents_lock[index]); 486 spin_unlock_irq(&log->log_extents_lock[index]);
487
488 if (!test_bit(BTRFS_ORDERED_IO_DONE, &ordered->flags) &&
489 !test_bit(BTRFS_ORDERED_DIRECT, &ordered->flags)) {
490 struct inode *inode = ordered->inode;
491 u64 start = ordered->file_offset;
492 u64 end = ordered->file_offset + ordered->len - 1;
493
494 WARN_ON(!inode);
495 filemap_fdatawrite_range(inode->i_mapping, start, end);
496 }
487 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE, 497 wait_event(ordered->wait, test_bit(BTRFS_ORDERED_IO_DONE,
488 &ordered->flags)); 498 &ordered->flags));
499
489 btrfs_put_ordered_extent(ordered); 500 btrfs_put_ordered_extent(ordered);
490 spin_lock_irq(&log->log_extents_lock[index]); 501 spin_lock_irq(&log->log_extents_lock[index]);
491 } 502 }
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index 6efd70d3b64f..9626b4ad3b9a 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -54,7 +54,7 @@ static void print_extent_data_ref(struct extent_buffer *eb,
54 btrfs_extent_data_ref_count(eb, ref)); 54 btrfs_extent_data_ref_count(eb, ref));
55} 55}
56 56
57static void print_extent_item(struct extent_buffer *eb, int slot) 57static void print_extent_item(struct extent_buffer *eb, int slot, int type)
58{ 58{
59 struct btrfs_extent_item *ei; 59 struct btrfs_extent_item *ei;
60 struct btrfs_extent_inline_ref *iref; 60 struct btrfs_extent_inline_ref *iref;
@@ -63,7 +63,6 @@ static void print_extent_item(struct extent_buffer *eb, int slot)
63 struct btrfs_disk_key key; 63 struct btrfs_disk_key key;
64 unsigned long end; 64 unsigned long end;
65 unsigned long ptr; 65 unsigned long ptr;
66 int type;
67 u32 item_size = btrfs_item_size_nr(eb, slot); 66 u32 item_size = btrfs_item_size_nr(eb, slot);
68 u64 flags; 67 u64 flags;
69 u64 offset; 68 u64 offset;
@@ -88,7 +87,8 @@ static void print_extent_item(struct extent_buffer *eb, int slot)
88 btrfs_extent_refs(eb, ei), btrfs_extent_generation(eb, ei), 87 btrfs_extent_refs(eb, ei), btrfs_extent_generation(eb, ei),
89 flags); 88 flags);
90 89
91 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 90 if ((type == BTRFS_EXTENT_ITEM_KEY) &&
91 flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
92 struct btrfs_tree_block_info *info; 92 struct btrfs_tree_block_info *info;
93 info = (struct btrfs_tree_block_info *)(ei + 1); 93 info = (struct btrfs_tree_block_info *)(ei + 1);
94 btrfs_tree_block_key(eb, info, &key); 94 btrfs_tree_block_key(eb, info, &key);
@@ -223,7 +223,8 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
223 btrfs_disk_root_refs(l, ri)); 223 btrfs_disk_root_refs(l, ri));
224 break; 224 break;
225 case BTRFS_EXTENT_ITEM_KEY: 225 case BTRFS_EXTENT_ITEM_KEY:
226 print_extent_item(l, i); 226 case BTRFS_METADATA_ITEM_KEY:
227 print_extent_item(l, i, type);
227 break; 228 break;
228 case BTRFS_TREE_BLOCK_REF_KEY: 229 case BTRFS_TREE_BLOCK_REF_KEY:
229 printk(KERN_INFO "\t\ttree block backref\n"); 230 printk(KERN_INFO "\t\ttree block backref\n");
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 4055291a523e..4a88f073fdd7 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -1956,9 +1956,10 @@ static int __raid56_parity_recover(struct btrfs_raid_bio *rbio)
1956 * pages are going to be uptodate. 1956 * pages are going to be uptodate.
1957 */ 1957 */
1958 for (stripe = 0; stripe < bbio->num_stripes; stripe++) { 1958 for (stripe = 0; stripe < bbio->num_stripes; stripe++) {
1959 if (rbio->faila == stripe || 1959 if (rbio->faila == stripe || rbio->failb == stripe) {
1960 rbio->failb == stripe) 1960 atomic_inc(&rbio->bbio->error);
1961 continue; 1961 continue;
1962 }
1962 1963
1963 for (pagenr = 0; pagenr < nr_pages; pagenr++) { 1964 for (pagenr = 0; pagenr < nr_pages; pagenr++) {
1964 struct page *p; 1965 struct page *p;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 4662d92a4b73..8e16bca69c56 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -522,9 +522,10 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
522 case Opt_ssd_spread: 522 case Opt_ssd_spread:
523 btrfs_set_and_info(root, SSD_SPREAD, 523 btrfs_set_and_info(root, SSD_SPREAD,
524 "use spread ssd allocation scheme"); 524 "use spread ssd allocation scheme");
525 btrfs_set_opt(info->mount_opt, SSD);
525 break; 526 break;
526 case Opt_nossd: 527 case Opt_nossd:
527 btrfs_clear_and_info(root, NOSSD, 528 btrfs_set_and_info(root, NOSSD,
528 "not using ssd allocation scheme"); 529 "not using ssd allocation scheme");
529 btrfs_clear_opt(info->mount_opt, SSD); 530 btrfs_clear_opt(info->mount_opt, SSD);
530 break; 531 break;
@@ -1467,7 +1468,9 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1467 goto restore; 1468 goto restore;
1468 1469
1469 /* recover relocation */ 1470 /* recover relocation */
1471 mutex_lock(&fs_info->cleaner_mutex);
1470 ret = btrfs_recover_relocation(root); 1472 ret = btrfs_recover_relocation(root);
1473 mutex_unlock(&fs_info->cleaner_mutex);
1471 if (ret) 1474 if (ret)
1472 goto restore; 1475 goto restore;
1473 1476
@@ -1808,6 +1811,8 @@ static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
1808 list_for_each_entry(dev, head, dev_list) { 1811 list_for_each_entry(dev, head, dev_list) {
1809 if (dev->missing) 1812 if (dev->missing)
1810 continue; 1813 continue;
1814 if (!dev->name)
1815 continue;
1811 if (!first_dev || dev->devid < first_dev->devid) 1816 if (!first_dev || dev->devid < first_dev->devid)
1812 first_dev = dev; 1817 first_dev = dev;
1813 } 1818 }
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index df39458f1487..78699364f537 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -605,14 +605,37 @@ static void init_feature_attrs(void)
605 } 605 }
606} 606}
607 607
608static int add_device_membership(struct btrfs_fs_info *fs_info) 608int btrfs_kobj_rm_device(struct btrfs_fs_info *fs_info,
609 struct btrfs_device *one_device)
610{
611 struct hd_struct *disk;
612 struct kobject *disk_kobj;
613
614 if (!fs_info->device_dir_kobj)
615 return -EINVAL;
616
617 if (one_device) {
618 disk = one_device->bdev->bd_part;
619 disk_kobj = &part_to_dev(disk)->kobj;
620
621 sysfs_remove_link(fs_info->device_dir_kobj,
622 disk_kobj->name);
623 }
624
625 return 0;
626}
627
628int btrfs_kobj_add_device(struct btrfs_fs_info *fs_info,
629 struct btrfs_device *one_device)
609{ 630{
610 int error = 0; 631 int error = 0;
611 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices; 632 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
612 struct btrfs_device *dev; 633 struct btrfs_device *dev;
613 634
614 fs_info->device_dir_kobj = kobject_create_and_add("devices", 635 if (!fs_info->device_dir_kobj)
636 fs_info->device_dir_kobj = kobject_create_and_add("devices",
615 &fs_info->super_kobj); 637 &fs_info->super_kobj);
638
616 if (!fs_info->device_dir_kobj) 639 if (!fs_info->device_dir_kobj)
617 return -ENOMEM; 640 return -ENOMEM;
618 641
@@ -623,6 +646,9 @@ static int add_device_membership(struct btrfs_fs_info *fs_info)
623 if (!dev->bdev) 646 if (!dev->bdev)
624 continue; 647 continue;
625 648
649 if (one_device && one_device != dev)
650 continue;
651
626 disk = dev->bdev->bd_part; 652 disk = dev->bdev->bd_part;
627 disk_kobj = &part_to_dev(disk)->kobj; 653 disk_kobj = &part_to_dev(disk)->kobj;
628 654
@@ -666,7 +692,7 @@ int btrfs_sysfs_add_one(struct btrfs_fs_info *fs_info)
666 if (error) 692 if (error)
667 goto failure; 693 goto failure;
668 694
669 error = add_device_membership(fs_info); 695 error = btrfs_kobj_add_device(fs_info, NULL);
670 if (error) 696 if (error)
671 goto failure; 697 goto failure;
672 698
diff --git a/fs/btrfs/sysfs.h b/fs/btrfs/sysfs.h
index 9ab576318a84..ac46df37504c 100644
--- a/fs/btrfs/sysfs.h
+++ b/fs/btrfs/sysfs.h
@@ -66,4 +66,8 @@ char *btrfs_printable_features(enum btrfs_feature_set set, u64 flags);
66extern const char * const btrfs_feature_set_names[3]; 66extern const char * const btrfs_feature_set_names[3];
67extern struct kobj_type space_info_ktype; 67extern struct kobj_type space_info_ktype;
68extern struct kobj_type btrfs_raid_ktype; 68extern struct kobj_type btrfs_raid_ktype;
69int btrfs_kobj_add_device(struct btrfs_fs_info *fs_info,
70 struct btrfs_device *one_device);
71int btrfs_kobj_rm_device(struct btrfs_fs_info *fs_info,
72 struct btrfs_device *one_device);
69#endif /* _BTRFS_SYSFS_H_ */ 73#endif /* _BTRFS_SYSFS_H_ */
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 511839c04f11..5f379affdf23 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -386,11 +386,13 @@ start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
386 bool reloc_reserved = false; 386 bool reloc_reserved = false;
387 int ret; 387 int ret;
388 388
389 /* Send isn't supposed to start transactions. */
390 ASSERT(current->journal_info != (void *)BTRFS_SEND_TRANS_STUB);
391
389 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) 392 if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
390 return ERR_PTR(-EROFS); 393 return ERR_PTR(-EROFS);
391 394
392 if (current->journal_info && 395 if (current->journal_info) {
393 current->journal_info != (void *)BTRFS_SEND_TRANS_STUB) {
394 WARN_ON(type & TRANS_EXTWRITERS); 396 WARN_ON(type & TRANS_EXTWRITERS);
395 h = current->journal_info; 397 h = current->journal_info;
396 h->use_count++; 398 h->use_count++;
@@ -491,6 +493,7 @@ again:
491 smp_mb(); 493 smp_mb();
492 if (cur_trans->state >= TRANS_STATE_BLOCKED && 494 if (cur_trans->state >= TRANS_STATE_BLOCKED &&
493 may_wait_transaction(root, type)) { 495 may_wait_transaction(root, type)) {
496 current->journal_info = h;
494 btrfs_commit_transaction(h, root); 497 btrfs_commit_transaction(h, root);
495 goto again; 498 goto again;
496 } 499 }
@@ -1615,11 +1618,6 @@ static int btrfs_flush_all_pending_stuffs(struct btrfs_trans_handle *trans,
1615 int ret; 1618 int ret;
1616 1619
1617 ret = btrfs_run_delayed_items(trans, root); 1620 ret = btrfs_run_delayed_items(trans, root);
1618 /*
1619 * running the delayed items may have added new refs. account
1620 * them now so that they hinder processing of more delayed refs
1621 * as little as possible.
1622 */
1623 if (ret) 1621 if (ret)
1624 return ret; 1622 return ret;
1625 1623
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index c83b24251e53..6cb82f62cb7c 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -40,6 +40,7 @@
40#include "rcu-string.h" 40#include "rcu-string.h"
41#include "math.h" 41#include "math.h"
42#include "dev-replace.h" 42#include "dev-replace.h"
43#include "sysfs.h"
43 44
44static int init_first_rw_device(struct btrfs_trans_handle *trans, 45static int init_first_rw_device(struct btrfs_trans_handle *trans,
45 struct btrfs_root *root, 46 struct btrfs_root *root,
@@ -554,12 +555,14 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
554 * This is ok to do without rcu read locked because we hold the 555 * This is ok to do without rcu read locked because we hold the
555 * uuid mutex so nothing we touch in here is going to disappear. 556 * uuid mutex so nothing we touch in here is going to disappear.
556 */ 557 */
557 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS); 558 if (orig_dev->name) {
558 if (!name) { 559 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
559 kfree(device); 560 if (!name) {
560 goto error; 561 kfree(device);
562 goto error;
563 }
564 rcu_assign_pointer(device->name, name);
561 } 565 }
562 rcu_assign_pointer(device->name, name);
563 566
564 list_add(&device->dev_list, &fs_devices->devices); 567 list_add(&device->dev_list, &fs_devices->devices);
565 device->fs_devices = fs_devices; 568 device->fs_devices = fs_devices;
@@ -1677,8 +1680,11 @@ int btrfs_rm_device(struct btrfs_root *root, char *device_path)
1677 if (device->bdev == root->fs_info->fs_devices->latest_bdev) 1680 if (device->bdev == root->fs_info->fs_devices->latest_bdev)
1678 root->fs_info->fs_devices->latest_bdev = next_device->bdev; 1681 root->fs_info->fs_devices->latest_bdev = next_device->bdev;
1679 1682
1680 if (device->bdev) 1683 if (device->bdev) {
1681 device->fs_devices->open_devices--; 1684 device->fs_devices->open_devices--;
1685 /* remove sysfs entry */
1686 btrfs_kobj_rm_device(root->fs_info, device);
1687 }
1682 1688
1683 call_rcu(&device->rcu, free_device); 1689 call_rcu(&device->rcu, free_device);
1684 1690
@@ -2143,9 +2149,14 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
2143 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy); 2149 total_bytes = btrfs_super_num_devices(root->fs_info->super_copy);
2144 btrfs_set_super_num_devices(root->fs_info->super_copy, 2150 btrfs_set_super_num_devices(root->fs_info->super_copy,
2145 total_bytes + 1); 2151 total_bytes + 1);
2152
2153 /* add sysfs device entry */
2154 btrfs_kobj_add_device(root->fs_info, device);
2155
2146 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex); 2156 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2147 2157
2148 if (seeding_dev) { 2158 if (seeding_dev) {
2159 char fsid_buf[BTRFS_UUID_UNPARSED_SIZE];
2149 ret = init_first_rw_device(trans, root, device); 2160 ret = init_first_rw_device(trans, root, device);
2150 if (ret) { 2161 if (ret) {
2151 btrfs_abort_transaction(trans, root, ret); 2162 btrfs_abort_transaction(trans, root, ret);
@@ -2156,6 +2167,14 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
2156 btrfs_abort_transaction(trans, root, ret); 2167 btrfs_abort_transaction(trans, root, ret);
2157 goto error_trans; 2168 goto error_trans;
2158 } 2169 }
2170
2171 /* Sprouting would change fsid of the mounted root,
2172 * so rename the fsid on the sysfs
2173 */
2174 snprintf(fsid_buf, BTRFS_UUID_UNPARSED_SIZE, "%pU",
2175 root->fs_info->fsid);
2176 if (kobject_rename(&root->fs_info->super_kobj, fsid_buf))
2177 goto error_trans;
2159 } else { 2178 } else {
2160 ret = btrfs_add_device(trans, root, device); 2179 ret = btrfs_add_device(trans, root, device);
2161 if (ret) { 2180 if (ret) {
@@ -2205,6 +2224,7 @@ error_trans:
2205 unlock_chunks(root); 2224 unlock_chunks(root);
2206 btrfs_end_transaction(trans, root); 2225 btrfs_end_transaction(trans, root);
2207 rcu_string_free(device->name); 2226 rcu_string_free(device->name);
2227 btrfs_kobj_rm_device(root->fs_info, device);
2208 kfree(device); 2228 kfree(device);
2209error: 2229error:
2210 blkdev_put(bdev, FMODE_EXCL); 2230 blkdev_put(bdev, FMODE_EXCL);
diff --git a/fs/btrfs/zlib.c b/fs/btrfs/zlib.c
index 4f196314c0c1..b67d8fc81277 100644
--- a/fs/btrfs/zlib.c
+++ b/fs/btrfs/zlib.c
@@ -136,7 +136,7 @@ static int zlib_compress_pages(struct list_head *ws,
136 if (workspace->def_strm.total_in > 8192 && 136 if (workspace->def_strm.total_in > 8192 &&
137 workspace->def_strm.total_in < 137 workspace->def_strm.total_in <
138 workspace->def_strm.total_out) { 138 workspace->def_strm.total_out) {
139 ret = -EIO; 139 ret = -E2BIG;
140 goto out; 140 goto out;
141 } 141 }
142 /* we need another page for writing out. Test this 142 /* we need another page for writing out. Test this
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index 0227b45ef00a..15e9505aa35f 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -290,7 +290,8 @@ int
290cifsConvertToUTF16(__le16 *target, const char *source, int srclen, 290cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
291 const struct nls_table *cp, int mapChars) 291 const struct nls_table *cp, int mapChars)
292{ 292{
293 int i, j, charlen; 293 int i, charlen;
294 int j = 0;
294 char src_char; 295 char src_char;
295 __le16 dst_char; 296 __le16 dst_char;
296 wchar_t tmp; 297 wchar_t tmp;
@@ -298,12 +299,11 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
298 if (!mapChars) 299 if (!mapChars)
299 return cifs_strtoUTF16(target, source, PATH_MAX, cp); 300 return cifs_strtoUTF16(target, source, PATH_MAX, cp);
300 301
301 for (i = 0, j = 0; i < srclen; j++) { 302 for (i = 0; i < srclen; j++) {
302 src_char = source[i]; 303 src_char = source[i];
303 charlen = 1; 304 charlen = 1;
304 switch (src_char) { 305 switch (src_char) {
305 case 0: 306 case 0:
306 put_unaligned(0, &target[j]);
307 goto ctoUTF16_out; 307 goto ctoUTF16_out;
308 case ':': 308 case ':':
309 dst_char = cpu_to_le16(UNI_COLON); 309 dst_char = cpu_to_le16(UNI_COLON);
@@ -350,6 +350,7 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
350 } 350 }
351 351
352ctoUTF16_out: 352ctoUTF16_out:
353 put_unaligned(0, &target[j]); /* Null terminate target unicode string */
353 return j; 354 return j;
354} 355}
355 356
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 2c90d07c0b3a..888398067420 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -725,6 +725,19 @@ out_nls:
725 goto out; 725 goto out;
726} 726}
727 727
728static ssize_t
729cifs_loose_read_iter(struct kiocb *iocb, struct iov_iter *iter)
730{
731 ssize_t rc;
732 struct inode *inode = file_inode(iocb->ki_filp);
733
734 rc = cifs_revalidate_mapping(inode);
735 if (rc)
736 return rc;
737
738 return generic_file_read_iter(iocb, iter);
739}
740
728static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from) 741static ssize_t cifs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
729{ 742{
730 struct inode *inode = file_inode(iocb->ki_filp); 743 struct inode *inode = file_inode(iocb->ki_filp);
@@ -881,7 +894,7 @@ const struct inode_operations cifs_symlink_inode_ops = {
881const struct file_operations cifs_file_ops = { 894const struct file_operations cifs_file_ops = {
882 .read = new_sync_read, 895 .read = new_sync_read,
883 .write = new_sync_write, 896 .write = new_sync_write,
884 .read_iter = generic_file_read_iter, 897 .read_iter = cifs_loose_read_iter,
885 .write_iter = cifs_file_write_iter, 898 .write_iter = cifs_file_write_iter,
886 .open = cifs_open, 899 .open = cifs_open,
887 .release = cifs_close, 900 .release = cifs_close,
@@ -939,7 +952,7 @@ const struct file_operations cifs_file_direct_ops = {
939const struct file_operations cifs_file_nobrl_ops = { 952const struct file_operations cifs_file_nobrl_ops = {
940 .read = new_sync_read, 953 .read = new_sync_read,
941 .write = new_sync_write, 954 .write = new_sync_write,
942 .read_iter = generic_file_read_iter, 955 .read_iter = cifs_loose_read_iter,
943 .write_iter = cifs_file_write_iter, 956 .write_iter = cifs_file_write_iter,
944 .open = cifs_open, 957 .open = cifs_open,
945 .release = cifs_close, 958 .release = cifs_close,
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 264ece71bdb2..68559fd557fb 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -374,7 +374,7 @@ cifs_create_mf_symlink(unsigned int xid, struct cifs_tcon *tcon,
374 oparms.cifs_sb = cifs_sb; 374 oparms.cifs_sb = cifs_sb;
375 oparms.desired_access = GENERIC_WRITE; 375 oparms.desired_access = GENERIC_WRITE;
376 oparms.create_options = create_options; 376 oparms.create_options = create_options;
377 oparms.disposition = FILE_OPEN; 377 oparms.disposition = FILE_CREATE;
378 oparms.path = path; 378 oparms.path = path;
379 oparms.fid = &fid; 379 oparms.fid = &fid;
380 oparms.reconnect = false; 380 oparms.reconnect = false;
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 0762d143e252..fca382037ddd 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -194,7 +194,16 @@ static void ext4_init_block_bitmap(struct super_block *sb,
194 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { 194 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
195 ext4_error(sb, "Checksum bad for group %u", block_group); 195 ext4_error(sb, "Checksum bad for group %u", block_group);
196 grp = ext4_get_group_info(sb, block_group); 196 grp = ext4_get_group_info(sb, block_group);
197 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
198 percpu_counter_sub(&sbi->s_freeclusters_counter,
199 grp->bb_free);
197 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); 200 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
201 if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
202 int count;
203 count = ext4_free_inodes_count(sb, gdp);
204 percpu_counter_sub(&sbi->s_freeinodes_counter,
205 count);
206 }
198 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state); 207 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
199 return; 208 return;
200 } 209 }
@@ -359,6 +368,7 @@ static void ext4_validate_block_bitmap(struct super_block *sb,
359{ 368{
360 ext4_fsblk_t blk; 369 ext4_fsblk_t blk;
361 struct ext4_group_info *grp = ext4_get_group_info(sb, block_group); 370 struct ext4_group_info *grp = ext4_get_group_info(sb, block_group);
371 struct ext4_sb_info *sbi = EXT4_SB(sb);
362 372
363 if (buffer_verified(bh)) 373 if (buffer_verified(bh))
364 return; 374 return;
@@ -369,6 +379,9 @@ static void ext4_validate_block_bitmap(struct super_block *sb,
369 ext4_unlock_group(sb, block_group); 379 ext4_unlock_group(sb, block_group);
370 ext4_error(sb, "bg %u: block %llu: invalid block bitmap", 380 ext4_error(sb, "bg %u: block %llu: invalid block bitmap",
371 block_group, blk); 381 block_group, blk);
382 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
383 percpu_counter_sub(&sbi->s_freeclusters_counter,
384 grp->bb_free);
372 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); 385 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
373 return; 386 return;
374 } 387 }
@@ -376,6 +389,9 @@ static void ext4_validate_block_bitmap(struct super_block *sb,
376 desc, bh))) { 389 desc, bh))) {
377 ext4_unlock_group(sb, block_group); 390 ext4_unlock_group(sb, block_group);
378 ext4_error(sb, "bg %u: bad block bitmap checksum", block_group); 391 ext4_error(sb, "bg %u: bad block bitmap checksum", block_group);
392 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
393 percpu_counter_sub(&sbi->s_freeclusters_counter,
394 grp->bb_free);
379 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); 395 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
380 return; 396 return;
381 } 397 }
diff --git a/fs/ext4/extents_status.c b/fs/ext4/extents_status.c
index 3f5c188953a4..0b7e28e7eaa4 100644
--- a/fs/ext4/extents_status.c
+++ b/fs/ext4/extents_status.c
@@ -966,10 +966,10 @@ retry:
966 continue; 966 continue;
967 } 967 }
968 968
969 if (ei->i_es_lru_nr == 0 || ei == locked_ei) 969 if (ei->i_es_lru_nr == 0 || ei == locked_ei ||
970 !write_trylock(&ei->i_es_lock))
970 continue; 971 continue;
971 972
972 write_lock(&ei->i_es_lock);
973 shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan); 973 shrunk = __es_try_to_reclaim_extents(ei, nr_to_scan);
974 if (ei->i_es_lru_nr == 0) 974 if (ei->i_es_lru_nr == 0)
975 list_del_init(&ei->i_es_lru); 975 list_del_init(&ei->i_es_lru);
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 0ee59a6644e2..5b87fc36aab8 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -71,6 +71,7 @@ static unsigned ext4_init_inode_bitmap(struct super_block *sb,
71 struct ext4_group_desc *gdp) 71 struct ext4_group_desc *gdp)
72{ 72{
73 struct ext4_group_info *grp; 73 struct ext4_group_info *grp;
74 struct ext4_sb_info *sbi = EXT4_SB(sb);
74 J_ASSERT_BH(bh, buffer_locked(bh)); 75 J_ASSERT_BH(bh, buffer_locked(bh));
75 76
76 /* If checksum is bad mark all blocks and inodes use to prevent 77 /* If checksum is bad mark all blocks and inodes use to prevent
@@ -78,7 +79,16 @@ static unsigned ext4_init_inode_bitmap(struct super_block *sb,
78 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) { 79 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
79 ext4_error(sb, "Checksum bad for group %u", block_group); 80 ext4_error(sb, "Checksum bad for group %u", block_group);
80 grp = ext4_get_group_info(sb, block_group); 81 grp = ext4_get_group_info(sb, block_group);
82 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
83 percpu_counter_sub(&sbi->s_freeclusters_counter,
84 grp->bb_free);
81 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); 85 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
86 if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
87 int count;
88 count = ext4_free_inodes_count(sb, gdp);
89 percpu_counter_sub(&sbi->s_freeinodes_counter,
90 count);
91 }
82 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state); 92 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
83 return 0; 93 return 0;
84 } 94 }
@@ -116,6 +126,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
116 struct buffer_head *bh = NULL; 126 struct buffer_head *bh = NULL;
117 ext4_fsblk_t bitmap_blk; 127 ext4_fsblk_t bitmap_blk;
118 struct ext4_group_info *grp; 128 struct ext4_group_info *grp;
129 struct ext4_sb_info *sbi = EXT4_SB(sb);
119 130
120 desc = ext4_get_group_desc(sb, block_group, NULL); 131 desc = ext4_get_group_desc(sb, block_group, NULL);
121 if (!desc) 132 if (!desc)
@@ -185,6 +196,12 @@ verify:
185 ext4_error(sb, "Corrupt inode bitmap - block_group = %u, " 196 ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
186 "inode_bitmap = %llu", block_group, bitmap_blk); 197 "inode_bitmap = %llu", block_group, bitmap_blk);
187 grp = ext4_get_group_info(sb, block_group); 198 grp = ext4_get_group_info(sb, block_group);
199 if (!EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
200 int count;
201 count = ext4_free_inodes_count(sb, desc);
202 percpu_counter_sub(&sbi->s_freeinodes_counter,
203 count);
204 }
188 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state); 205 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
189 return NULL; 206 return NULL;
190 } 207 }
@@ -321,6 +338,12 @@ out:
321 fatal = err; 338 fatal = err;
322 } else { 339 } else {
323 ext4_error(sb, "bit already cleared for inode %lu", ino); 340 ext4_error(sb, "bit already cleared for inode %lu", ino);
341 if (gdp && !EXT4_MB_GRP_IBITMAP_CORRUPT(grp)) {
342 int count;
343 count = ext4_free_inodes_count(sb, gdp);
344 percpu_counter_sub(&sbi->s_freeinodes_counter,
345 count);
346 }
324 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state); 347 set_bit(EXT4_GROUP_INFO_IBITMAP_CORRUPT_BIT, &grp->bb_state);
325 } 348 }
326 349
@@ -851,6 +874,13 @@ got:
851 goto out; 874 goto out;
852 } 875 }
853 876
877 BUFFER_TRACE(group_desc_bh, "get_write_access");
878 err = ext4_journal_get_write_access(handle, group_desc_bh);
879 if (err) {
880 ext4_std_error(sb, err);
881 goto out;
882 }
883
854 /* We may have to initialize the block bitmap if it isn't already */ 884 /* We may have to initialize the block bitmap if it isn't already */
855 if (ext4_has_group_desc_csum(sb) && 885 if (ext4_has_group_desc_csum(sb) &&
856 gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 886 gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
@@ -887,13 +917,6 @@ got:
887 } 917 }
888 } 918 }
889 919
890 BUFFER_TRACE(group_desc_bh, "get_write_access");
891 err = ext4_journal_get_write_access(handle, group_desc_bh);
892 if (err) {
893 ext4_std_error(sb, err);
894 goto out;
895 }
896
897 /* Update the relevant bg descriptor fields */ 920 /* Update the relevant bg descriptor fields */
898 if (ext4_has_group_desc_csum(sb)) { 921 if (ext4_has_group_desc_csum(sb)) {
899 int free; 922 int free;
diff --git a/fs/ext4/indirect.c b/fs/ext4/indirect.c
index 8a57e9fcd1b9..fd69da194826 100644
--- a/fs/ext4/indirect.c
+++ b/fs/ext4/indirect.c
@@ -389,7 +389,13 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
389 return 0; 389 return 0;
390failed: 390failed:
391 for (; i >= 0; i--) { 391 for (; i >= 0; i--) {
392 if (i != indirect_blks && branch[i].bh) 392 /*
393 * We want to ext4_forget() only freshly allocated indirect
394 * blocks. Buffer for new_blocks[i-1] is at branch[i].bh and
395 * buffer at branch[0].bh is indirect block / inode already
396 * existing before ext4_alloc_branch() was called.
397 */
398 if (i > 0 && i != indirect_blks && branch[i].bh)
393 ext4_forget(handle, 1, inode, branch[i].bh, 399 ext4_forget(handle, 1, inode, branch[i].bh,
394 branch[i].bh->b_blocknr); 400 branch[i].bh->b_blocknr);
395 ext4_free_blocks(handle, inode, NULL, new_blocks[i], 401 ext4_free_blocks(handle, inode, NULL, new_blocks[i],
@@ -1310,16 +1316,24 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode,
1310 blk = *i_data; 1316 blk = *i_data;
1311 if (level > 0) { 1317 if (level > 0) {
1312 ext4_lblk_t first2; 1318 ext4_lblk_t first2;
1319 ext4_lblk_t count2;
1320
1313 bh = sb_bread(inode->i_sb, le32_to_cpu(blk)); 1321 bh = sb_bread(inode->i_sb, le32_to_cpu(blk));
1314 if (!bh) { 1322 if (!bh) {
1315 EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk), 1323 EXT4_ERROR_INODE_BLOCK(inode, le32_to_cpu(blk),
1316 "Read failure"); 1324 "Read failure");
1317 return -EIO; 1325 return -EIO;
1318 } 1326 }
1319 first2 = (first > offset) ? first - offset : 0; 1327 if (first > offset) {
1328 first2 = first - offset;
1329 count2 = count;
1330 } else {
1331 first2 = 0;
1332 count2 = count - (offset - first);
1333 }
1320 ret = free_hole_blocks(handle, inode, bh, 1334 ret = free_hole_blocks(handle, inode, bh,
1321 (__le32 *)bh->b_data, level - 1, 1335 (__le32 *)bh->b_data, level - 1,
1322 first2, count - offset, 1336 first2, count2,
1323 inode->i_sb->s_blocksize >> 2); 1337 inode->i_sb->s_blocksize >> 2);
1324 if (ret) { 1338 if (ret) {
1325 brelse(bh); 1339 brelse(bh);
@@ -1329,8 +1343,8 @@ static int free_hole_blocks(handle_t *handle, struct inode *inode,
1329 if (level == 0 || 1343 if (level == 0 ||
1330 (bh && all_zeroes((__le32 *)bh->b_data, 1344 (bh && all_zeroes((__le32 *)bh->b_data,
1331 (__le32 *)bh->b_data + addr_per_block))) { 1345 (__le32 *)bh->b_data + addr_per_block))) {
1332 ext4_free_data(handle, inode, parent_bh, &blk, &blk+1); 1346 ext4_free_data(handle, inode, parent_bh,
1333 *i_data = 0; 1347 i_data, i_data + 1);
1334 } 1348 }
1335 brelse(bh); 1349 brelse(bh);
1336 bh = NULL; 1350 bh = NULL;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 59e31622cc6e..2dcb936be90e 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -722,6 +722,7 @@ void ext4_mb_generate_buddy(struct super_block *sb,
722 void *buddy, void *bitmap, ext4_group_t group) 722 void *buddy, void *bitmap, ext4_group_t group)
723{ 723{
724 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 724 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
725 struct ext4_sb_info *sbi = EXT4_SB(sb);
725 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb); 726 ext4_grpblk_t max = EXT4_CLUSTERS_PER_GROUP(sb);
726 ext4_grpblk_t i = 0; 727 ext4_grpblk_t i = 0;
727 ext4_grpblk_t first; 728 ext4_grpblk_t first;
@@ -751,14 +752,17 @@ void ext4_mb_generate_buddy(struct super_block *sb,
751 752
752 if (free != grp->bb_free) { 753 if (free != grp->bb_free) {
753 ext4_grp_locked_error(sb, group, 0, 0, 754 ext4_grp_locked_error(sb, group, 0, 0,
754 "%u clusters in bitmap, %u in gd; " 755 "block bitmap and bg descriptor "
755 "block bitmap corrupt.", 756 "inconsistent: %u vs %u free clusters",
756 free, grp->bb_free); 757 free, grp->bb_free);
757 /* 758 /*
758 * If we intend to continue, we consider group descriptor 759 * If we intend to continue, we consider group descriptor
759 * corrupt and update bb_free using bitmap value 760 * corrupt and update bb_free using bitmap value
760 */ 761 */
761 grp->bb_free = free; 762 grp->bb_free = free;
763 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
764 percpu_counter_sub(&sbi->s_freeclusters_counter,
765 grp->bb_free);
762 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state); 766 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, &grp->bb_state);
763 } 767 }
764 mb_set_largest_free_order(sb, grp); 768 mb_set_largest_free_order(sb, grp);
@@ -1431,6 +1435,7 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1431 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap); 1435 right_is_free = !mb_test_bit(last + 1, e4b->bd_bitmap);
1432 1436
1433 if (unlikely(block != -1)) { 1437 if (unlikely(block != -1)) {
1438 struct ext4_sb_info *sbi = EXT4_SB(sb);
1434 ext4_fsblk_t blocknr; 1439 ext4_fsblk_t blocknr;
1435 1440
1436 blocknr = ext4_group_first_block_no(sb, e4b->bd_group); 1441 blocknr = ext4_group_first_block_no(sb, e4b->bd_group);
@@ -1441,6 +1446,9 @@ static void mb_free_blocks(struct inode *inode, struct ext4_buddy *e4b,
1441 "freeing already freed block " 1446 "freeing already freed block "
1442 "(bit %u); block bitmap corrupt.", 1447 "(bit %u); block bitmap corrupt.",
1443 block); 1448 block);
1449 if (!EXT4_MB_GRP_BBITMAP_CORRUPT(e4b->bd_info))
1450 percpu_counter_sub(&sbi->s_freeclusters_counter,
1451 e4b->bd_info->bb_free);
1444 /* Mark the block group as corrupt. */ 1452 /* Mark the block group as corrupt. */
1445 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT, 1453 set_bit(EXT4_GROUP_INFO_BBITMAP_CORRUPT_BIT,
1446 &e4b->bd_info->bb_state); 1454 &e4b->bd_info->bb_state);
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index b9b9aabfb4d2..6df7bc611dbd 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1525,8 +1525,6 @@ static int handle_mount_opt(struct super_block *sb, char *opt, int token,
1525 arg = JBD2_DEFAULT_MAX_COMMIT_AGE; 1525 arg = JBD2_DEFAULT_MAX_COMMIT_AGE;
1526 sbi->s_commit_interval = HZ * arg; 1526 sbi->s_commit_interval = HZ * arg;
1527 } else if (token == Opt_max_batch_time) { 1527 } else if (token == Opt_max_batch_time) {
1528 if (arg == 0)
1529 arg = EXT4_DEF_MAX_BATCH_TIME;
1530 sbi->s_max_batch_time = arg; 1528 sbi->s_max_batch_time = arg;
1531 } else if (token == Opt_min_batch_time) { 1529 } else if (token == Opt_min_batch_time) {
1532 sbi->s_min_batch_time = arg; 1530 sbi->s_min_batch_time = arg;
@@ -2809,10 +2807,11 @@ static void print_daily_error_info(unsigned long arg)
2809 es = sbi->s_es; 2807 es = sbi->s_es;
2810 2808
2811 if (es->s_error_count) 2809 if (es->s_error_count)
2812 ext4_msg(sb, KERN_NOTICE, "error count: %u", 2810 /* fsck newer than v1.41.13 is needed to clean this condition. */
2811 ext4_msg(sb, KERN_NOTICE, "error count since last fsck: %u",
2813 le32_to_cpu(es->s_error_count)); 2812 le32_to_cpu(es->s_error_count));
2814 if (es->s_first_error_time) { 2813 if (es->s_first_error_time) {
2815 printk(KERN_NOTICE "EXT4-fs (%s): initial error at %u: %.*s:%d", 2814 printk(KERN_NOTICE "EXT4-fs (%s): initial error at time %u: %.*s:%d",
2816 sb->s_id, le32_to_cpu(es->s_first_error_time), 2815 sb->s_id, le32_to_cpu(es->s_first_error_time),
2817 (int) sizeof(es->s_first_error_func), 2816 (int) sizeof(es->s_first_error_func),
2818 es->s_first_error_func, 2817 es->s_first_error_func,
@@ -2826,7 +2825,7 @@ static void print_daily_error_info(unsigned long arg)
2826 printk("\n"); 2825 printk("\n");
2827 } 2826 }
2828 if (es->s_last_error_time) { 2827 if (es->s_last_error_time) {
2829 printk(KERN_NOTICE "EXT4-fs (%s): last error at %u: %.*s:%d", 2828 printk(KERN_NOTICE "EXT4-fs (%s): last error at time %u: %.*s:%d",
2830 sb->s_id, le32_to_cpu(es->s_last_error_time), 2829 sb->s_id, le32_to_cpu(es->s_last_error_time),
2831 (int) sizeof(es->s_last_error_func), 2830 (int) sizeof(es->s_last_error_func),
2832 es->s_last_error_func, 2831 es->s_last_error_func,
@@ -3880,38 +3879,19 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3880 goto failed_mount2; 3879 goto failed_mount2;
3881 } 3880 }
3882 } 3881 }
3883
3884 /*
3885 * set up enough so that it can read an inode,
3886 * and create new inode for buddy allocator
3887 */
3888 sbi->s_gdb_count = db_count;
3889 if (!test_opt(sb, NOLOAD) &&
3890 EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
3891 sb->s_op = &ext4_sops;
3892 else
3893 sb->s_op = &ext4_nojournal_sops;
3894
3895 ext4_ext_init(sb);
3896 err = ext4_mb_init(sb);
3897 if (err) {
3898 ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
3899 err);
3900 goto failed_mount2;
3901 }
3902
3903 if (!ext4_check_descriptors(sb, &first_not_zeroed)) { 3882 if (!ext4_check_descriptors(sb, &first_not_zeroed)) {
3904 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!"); 3883 ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
3905 goto failed_mount2a; 3884 goto failed_mount2;
3906 } 3885 }
3907 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) 3886 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
3908 if (!ext4_fill_flex_info(sb)) { 3887 if (!ext4_fill_flex_info(sb)) {
3909 ext4_msg(sb, KERN_ERR, 3888 ext4_msg(sb, KERN_ERR,
3910 "unable to initialize " 3889 "unable to initialize "
3911 "flex_bg meta info!"); 3890 "flex_bg meta info!");
3912 goto failed_mount2a; 3891 goto failed_mount2;
3913 } 3892 }
3914 3893
3894 sbi->s_gdb_count = db_count;
3915 get_random_bytes(&sbi->s_next_generation, sizeof(u32)); 3895 get_random_bytes(&sbi->s_next_generation, sizeof(u32));
3916 spin_lock_init(&sbi->s_next_gen_lock); 3896 spin_lock_init(&sbi->s_next_gen_lock);
3917 3897
@@ -3946,6 +3926,14 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3946 sbi->s_stripe = ext4_get_stripe_size(sbi); 3926 sbi->s_stripe = ext4_get_stripe_size(sbi);
3947 sbi->s_extent_max_zeroout_kb = 32; 3927 sbi->s_extent_max_zeroout_kb = 32;
3948 3928
3929 /*
3930 * set up enough so that it can read an inode
3931 */
3932 if (!test_opt(sb, NOLOAD) &&
3933 EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
3934 sb->s_op = &ext4_sops;
3935 else
3936 sb->s_op = &ext4_nojournal_sops;
3949 sb->s_export_op = &ext4_export_ops; 3937 sb->s_export_op = &ext4_export_ops;
3950 sb->s_xattr = ext4_xattr_handlers; 3938 sb->s_xattr = ext4_xattr_handlers;
3951#ifdef CONFIG_QUOTA 3939#ifdef CONFIG_QUOTA
@@ -4135,13 +4123,21 @@ no_journal:
4135 if (err) { 4123 if (err) {
4136 ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for " 4124 ext4_msg(sb, KERN_ERR, "failed to reserve %llu clusters for "
4137 "reserved pool", ext4_calculate_resv_clusters(sb)); 4125 "reserved pool", ext4_calculate_resv_clusters(sb));
4138 goto failed_mount5; 4126 goto failed_mount4a;
4139 } 4127 }
4140 4128
4141 err = ext4_setup_system_zone(sb); 4129 err = ext4_setup_system_zone(sb);
4142 if (err) { 4130 if (err) {
4143 ext4_msg(sb, KERN_ERR, "failed to initialize system " 4131 ext4_msg(sb, KERN_ERR, "failed to initialize system "
4144 "zone (%d)", err); 4132 "zone (%d)", err);
4133 goto failed_mount4a;
4134 }
4135
4136 ext4_ext_init(sb);
4137 err = ext4_mb_init(sb);
4138 if (err) {
4139 ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
4140 err);
4145 goto failed_mount5; 4141 goto failed_mount5;
4146 } 4142 }
4147 4143
@@ -4218,8 +4214,11 @@ failed_mount8:
4218failed_mount7: 4214failed_mount7:
4219 ext4_unregister_li_request(sb); 4215 ext4_unregister_li_request(sb);
4220failed_mount6: 4216failed_mount6:
4221 ext4_release_system_zone(sb); 4217 ext4_mb_release(sb);
4222failed_mount5: 4218failed_mount5:
4219 ext4_ext_release(sb);
4220 ext4_release_system_zone(sb);
4221failed_mount4a:
4223 dput(sb->s_root); 4222 dput(sb->s_root);
4224 sb->s_root = NULL; 4223 sb->s_root = NULL;
4225failed_mount4: 4224failed_mount4:
@@ -4243,14 +4242,11 @@ failed_mount3:
4243 percpu_counter_destroy(&sbi->s_extent_cache_cnt); 4242 percpu_counter_destroy(&sbi->s_extent_cache_cnt);
4244 if (sbi->s_mmp_tsk) 4243 if (sbi->s_mmp_tsk)
4245 kthread_stop(sbi->s_mmp_tsk); 4244 kthread_stop(sbi->s_mmp_tsk);
4246failed_mount2a:
4247 ext4_mb_release(sb);
4248failed_mount2: 4245failed_mount2:
4249 for (i = 0; i < db_count; i++) 4246 for (i = 0; i < db_count; i++)
4250 brelse(sbi->s_group_desc[i]); 4247 brelse(sbi->s_group_desc[i]);
4251 ext4_kvfree(sbi->s_group_desc); 4248 ext4_kvfree(sbi->s_group_desc);
4252failed_mount: 4249failed_mount:
4253 ext4_ext_release(sb);
4254 if (sbi->s_chksum_driver) 4250 if (sbi->s_chksum_driver)
4255 crypto_free_shash(sbi->s_chksum_driver); 4251 crypto_free_shash(sbi->s_chksum_driver);
4256 if (sbi->s_proc) { 4252 if (sbi->s_proc) {
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 0924521306b4..f8cf619edb5f 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -608,8 +608,8 @@ static int __allocate_data_block(struct dnode_of_data *dn)
608 * b. do not use extent cache for better performance 608 * b. do not use extent cache for better performance
609 * c. give the block addresses to blockdev 609 * c. give the block addresses to blockdev
610 */ 610 */
611static int get_data_block(struct inode *inode, sector_t iblock, 611static int __get_data_block(struct inode *inode, sector_t iblock,
612 struct buffer_head *bh_result, int create) 612 struct buffer_head *bh_result, int create, bool fiemap)
613{ 613{
614 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb); 614 struct f2fs_sb_info *sbi = F2FS_SB(inode->i_sb);
615 unsigned int blkbits = inode->i_sb->s_blocksize_bits; 615 unsigned int blkbits = inode->i_sb->s_blocksize_bits;
@@ -637,7 +637,7 @@ static int get_data_block(struct inode *inode, sector_t iblock,
637 err = 0; 637 err = 0;
638 goto unlock_out; 638 goto unlock_out;
639 } 639 }
640 if (dn.data_blkaddr == NEW_ADDR) 640 if (dn.data_blkaddr == NEW_ADDR && !fiemap)
641 goto put_out; 641 goto put_out;
642 642
643 if (dn.data_blkaddr != NULL_ADDR) { 643 if (dn.data_blkaddr != NULL_ADDR) {
@@ -671,7 +671,7 @@ get_next:
671 err = 0; 671 err = 0;
672 goto unlock_out; 672 goto unlock_out;
673 } 673 }
674 if (dn.data_blkaddr == NEW_ADDR) 674 if (dn.data_blkaddr == NEW_ADDR && !fiemap)
675 goto put_out; 675 goto put_out;
676 676
677 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode)); 677 end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
@@ -708,10 +708,23 @@ out:
708 return err; 708 return err;
709} 709}
710 710
711static int get_data_block(struct inode *inode, sector_t iblock,
712 struct buffer_head *bh_result, int create)
713{
714 return __get_data_block(inode, iblock, bh_result, create, false);
715}
716
717static int get_data_block_fiemap(struct inode *inode, sector_t iblock,
718 struct buffer_head *bh_result, int create)
719{
720 return __get_data_block(inode, iblock, bh_result, create, true);
721}
722
711int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, 723int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
712 u64 start, u64 len) 724 u64 start, u64 len)
713{ 725{
714 return generic_block_fiemap(inode, fieinfo, start, len, get_data_block); 726 return generic_block_fiemap(inode, fieinfo,
727 start, len, get_data_block_fiemap);
715} 728}
716 729
717static int f2fs_read_data_page(struct file *file, struct page *page) 730static int f2fs_read_data_page(struct file *file, struct page *page)
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 966acb039e3b..a4addd72ebbd 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -376,11 +376,11 @@ static struct page *init_inode_metadata(struct inode *inode,
376 376
377put_error: 377put_error:
378 f2fs_put_page(page, 1); 378 f2fs_put_page(page, 1);
379error:
379 /* once the failed inode becomes a bad inode, i_mode is S_IFREG */ 380 /* once the failed inode becomes a bad inode, i_mode is S_IFREG */
380 truncate_inode_pages(&inode->i_data, 0); 381 truncate_inode_pages(&inode->i_data, 0);
381 truncate_blocks(inode, 0); 382 truncate_blocks(inode, 0);
382 remove_dirty_dir_inode(inode); 383 remove_dirty_dir_inode(inode);
383error:
384 remove_inode_page(inode); 384 remove_inode_page(inode);
385 return ERR_PTR(err); 385 return ERR_PTR(err);
386} 386}
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index e51c732b0dd9..58df97e174d0 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -342,9 +342,6 @@ struct f2fs_sm_info {
342 struct dirty_seglist_info *dirty_info; /* dirty segment information */ 342 struct dirty_seglist_info *dirty_info; /* dirty segment information */
343 struct curseg_info *curseg_array; /* active segment information */ 343 struct curseg_info *curseg_array; /* active segment information */
344 344
345 struct list_head wblist_head; /* list of under-writeback pages */
346 spinlock_t wblist_lock; /* lock for checkpoint */
347
348 block_t seg0_blkaddr; /* block address of 0'th segment */ 345 block_t seg0_blkaddr; /* block address of 0'th segment */
349 block_t main_blkaddr; /* start block address of main area */ 346 block_t main_blkaddr; /* start block address of main area */
350 block_t ssa_blkaddr; /* start block address of SSA area */ 347 block_t ssa_blkaddr; /* start block address of SSA area */
@@ -644,7 +641,8 @@ static inline void f2fs_unlock_all(struct f2fs_sb_info *sbi)
644 */ 641 */
645static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid) 642static inline int check_nid_range(struct f2fs_sb_info *sbi, nid_t nid)
646{ 643{
647 WARN_ON((nid >= NM_I(sbi)->max_nid)); 644 if (unlikely(nid < F2FS_ROOT_INO(sbi)))
645 return -EINVAL;
648 if (unlikely(nid >= NM_I(sbi)->max_nid)) 646 if (unlikely(nid >= NM_I(sbi)->max_nid))
649 return -EINVAL; 647 return -EINVAL;
650 return 0; 648 return 0;
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index c58e33075719..7d8b96275092 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -659,16 +659,19 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
659 off_start = offset & (PAGE_CACHE_SIZE - 1); 659 off_start = offset & (PAGE_CACHE_SIZE - 1);
660 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1); 660 off_end = (offset + len) & (PAGE_CACHE_SIZE - 1);
661 661
662 f2fs_lock_op(sbi);
663
662 for (index = pg_start; index <= pg_end; index++) { 664 for (index = pg_start; index <= pg_end; index++) {
663 struct dnode_of_data dn; 665 struct dnode_of_data dn;
664 666
665 f2fs_lock_op(sbi); 667 if (index == pg_end && !off_end)
668 goto noalloc;
669
666 set_new_dnode(&dn, inode, NULL, NULL, 0); 670 set_new_dnode(&dn, inode, NULL, NULL, 0);
667 ret = f2fs_reserve_block(&dn, index); 671 ret = f2fs_reserve_block(&dn, index);
668 f2fs_unlock_op(sbi);
669 if (ret) 672 if (ret)
670 break; 673 break;
671 674noalloc:
672 if (pg_start == pg_end) 675 if (pg_start == pg_end)
673 new_size = offset + len; 676 new_size = offset + len;
674 else if (index == pg_start && off_start) 677 else if (index == pg_start && off_start)
@@ -683,8 +686,9 @@ static int expand_inode_data(struct inode *inode, loff_t offset,
683 i_size_read(inode) < new_size) { 686 i_size_read(inode) < new_size) {
684 i_size_write(inode, new_size); 687 i_size_write(inode, new_size);
685 mark_inode_dirty(inode); 688 mark_inode_dirty(inode);
686 f2fs_write_inode(inode, NULL); 689 update_inode_page(inode);
687 } 690 }
691 f2fs_unlock_op(sbi);
688 692
689 return ret; 693 return ret;
690} 694}
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index adc622c6bdce..2cf6962f6cc8 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -78,6 +78,7 @@ static int do_read_inode(struct inode *inode)
78 if (check_nid_range(sbi, inode->i_ino)) { 78 if (check_nid_range(sbi, inode->i_ino)) {
79 f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu", 79 f2fs_msg(inode->i_sb, KERN_ERR, "bad inode number: %lu",
80 (unsigned long) inode->i_ino); 80 (unsigned long) inode->i_ino);
81 WARN_ON(1);
81 return -EINVAL; 82 return -EINVAL;
82 } 83 }
83 84
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 9138c32aa698..a6bdddc33ce2 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -417,9 +417,6 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
417 } 417 }
418 418
419 f2fs_set_link(new_dir, new_entry, new_page, old_inode); 419 f2fs_set_link(new_dir, new_entry, new_page, old_inode);
420 down_write(&F2FS_I(old_inode)->i_sem);
421 F2FS_I(old_inode)->i_pino = new_dir->i_ino;
422 up_write(&F2FS_I(old_inode)->i_sem);
423 420
424 new_inode->i_ctime = CURRENT_TIME; 421 new_inode->i_ctime = CURRENT_TIME;
425 down_write(&F2FS_I(new_inode)->i_sem); 422 down_write(&F2FS_I(new_inode)->i_sem);
@@ -448,6 +445,10 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
448 } 445 }
449 } 446 }
450 447
448 down_write(&F2FS_I(old_inode)->i_sem);
449 file_lost_pino(old_inode);
450 up_write(&F2FS_I(old_inode)->i_sem);
451
451 old_inode->i_ctime = CURRENT_TIME; 452 old_inode->i_ctime = CURRENT_TIME;
452 mark_inode_dirty(old_inode); 453 mark_inode_dirty(old_inode);
453 454
@@ -457,9 +458,6 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
457 if (old_dir != new_dir) { 458 if (old_dir != new_dir) {
458 f2fs_set_link(old_inode, old_dir_entry, 459 f2fs_set_link(old_inode, old_dir_entry,
459 old_dir_page, new_dir); 460 old_dir_page, new_dir);
460 down_write(&F2FS_I(old_inode)->i_sem);
461 F2FS_I(old_inode)->i_pino = new_dir->i_ino;
462 up_write(&F2FS_I(old_inode)->i_sem);
463 update_inode_page(old_inode); 461 update_inode_page(old_inode);
464 } else { 462 } else {
465 kunmap(old_dir_page); 463 kunmap(old_dir_page);
@@ -474,7 +472,8 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
474 return 0; 472 return 0;
475 473
476put_out_dir: 474put_out_dir:
477 f2fs_put_page(new_page, 1); 475 kunmap(new_page);
476 f2fs_put_page(new_page, 0);
478out_dir: 477out_dir:
479 if (old_dir_entry) { 478 if (old_dir_entry) {
480 kunmap(old_dir_page); 479 kunmap(old_dir_page);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index 9dfb9a042fd2..4b697ccc9b0c 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -42,6 +42,8 @@ bool available_free_memory(struct f2fs_sb_info *sbi, int type)
42 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 12; 42 mem_size = (nm_i->nat_cnt * sizeof(struct nat_entry)) >> 12;
43 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2); 43 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 2);
44 } else if (type == DIRTY_DENTS) { 44 } else if (type == DIRTY_DENTS) {
45 if (sbi->sb->s_bdi->dirty_exceeded)
46 return false;
45 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS); 47 mem_size = get_pages(sbi, F2FS_DIRTY_DENTS);
46 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 1); 48 res = mem_size < ((val.totalram * nm_i->ram_thresh / 100) >> 1);
47 } 49 }
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index f25f0e07e26f..d04613df710a 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -272,14 +272,15 @@ int create_flush_cmd_control(struct f2fs_sb_info *sbi)
272 return -ENOMEM; 272 return -ENOMEM;
273 spin_lock_init(&fcc->issue_lock); 273 spin_lock_init(&fcc->issue_lock);
274 init_waitqueue_head(&fcc->flush_wait_queue); 274 init_waitqueue_head(&fcc->flush_wait_queue);
275 sbi->sm_info->cmd_control_info = fcc;
275 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi, 276 fcc->f2fs_issue_flush = kthread_run(issue_flush_thread, sbi,
276 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev)); 277 "f2fs_flush-%u:%u", MAJOR(dev), MINOR(dev));
277 if (IS_ERR(fcc->f2fs_issue_flush)) { 278 if (IS_ERR(fcc->f2fs_issue_flush)) {
278 err = PTR_ERR(fcc->f2fs_issue_flush); 279 err = PTR_ERR(fcc->f2fs_issue_flush);
279 kfree(fcc); 280 kfree(fcc);
281 sbi->sm_info->cmd_control_info = NULL;
280 return err; 282 return err;
281 } 283 }
282 sbi->sm_info->cmd_control_info = fcc;
283 284
284 return err; 285 return err;
285} 286}
@@ -1885,8 +1886,6 @@ int build_segment_manager(struct f2fs_sb_info *sbi)
1885 1886
1886 /* init sm info */ 1887 /* init sm info */
1887 sbi->sm_info = sm_info; 1888 sbi->sm_info = sm_info;
1888 INIT_LIST_HEAD(&sm_info->wblist_head);
1889 spin_lock_init(&sm_info->wblist_lock);
1890 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr); 1889 sm_info->seg0_blkaddr = le32_to_cpu(raw_super->segment0_blkaddr);
1891 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr); 1890 sm_info->main_blkaddr = le32_to_cpu(raw_super->main_blkaddr);
1892 sm_info->segment_count = le32_to_cpu(raw_super->segment_count); 1891 sm_info->segment_count = le32_to_cpu(raw_super->segment_count);
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index b2b18637cb9e..8f96d9372ade 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -689,9 +689,7 @@ static struct inode *f2fs_nfs_get_inode(struct super_block *sb,
689 struct f2fs_sb_info *sbi = F2FS_SB(sb); 689 struct f2fs_sb_info *sbi = F2FS_SB(sb);
690 struct inode *inode; 690 struct inode *inode;
691 691
692 if (unlikely(ino < F2FS_ROOT_INO(sbi))) 692 if (check_nid_range(sbi, ino))
693 return ERR_PTR(-ESTALE);
694 if (unlikely(ino >= NM_I(sbi)->max_nid))
695 return ERR_PTR(-ESTALE); 693 return ERR_PTR(-ESTALE);
696 694
697 /* 695 /*
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 098f97bdcf1b..ca887314aba9 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -643,9 +643,8 @@ struct fuse_copy_state {
643 unsigned long seglen; 643 unsigned long seglen;
644 unsigned long addr; 644 unsigned long addr;
645 struct page *pg; 645 struct page *pg;
646 void *mapaddr;
647 void *buf;
648 unsigned len; 646 unsigned len;
647 unsigned offset;
649 unsigned move_pages:1; 648 unsigned move_pages:1;
650}; 649};
651 650
@@ -666,23 +665,17 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
666 if (cs->currbuf) { 665 if (cs->currbuf) {
667 struct pipe_buffer *buf = cs->currbuf; 666 struct pipe_buffer *buf = cs->currbuf;
668 667
669 if (!cs->write) { 668 if (cs->write)
670 kunmap_atomic(cs->mapaddr);
671 } else {
672 kunmap_atomic(cs->mapaddr);
673 buf->len = PAGE_SIZE - cs->len; 669 buf->len = PAGE_SIZE - cs->len;
674 }
675 cs->currbuf = NULL; 670 cs->currbuf = NULL;
676 cs->mapaddr = NULL; 671 } else if (cs->pg) {
677 } else if (cs->mapaddr) {
678 kunmap_atomic(cs->mapaddr);
679 if (cs->write) { 672 if (cs->write) {
680 flush_dcache_page(cs->pg); 673 flush_dcache_page(cs->pg);
681 set_page_dirty_lock(cs->pg); 674 set_page_dirty_lock(cs->pg);
682 } 675 }
683 put_page(cs->pg); 676 put_page(cs->pg);
684 cs->mapaddr = NULL;
685 } 677 }
678 cs->pg = NULL;
686} 679}
687 680
688/* 681/*
@@ -691,7 +684,7 @@ static void fuse_copy_finish(struct fuse_copy_state *cs)
691 */ 684 */
692static int fuse_copy_fill(struct fuse_copy_state *cs) 685static int fuse_copy_fill(struct fuse_copy_state *cs)
693{ 686{
694 unsigned long offset; 687 struct page *page;
695 int err; 688 int err;
696 689
697 unlock_request(cs->fc, cs->req); 690 unlock_request(cs->fc, cs->req);
@@ -706,14 +699,12 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
706 699
707 BUG_ON(!cs->nr_segs); 700 BUG_ON(!cs->nr_segs);
708 cs->currbuf = buf; 701 cs->currbuf = buf;
709 cs->mapaddr = kmap_atomic(buf->page); 702 cs->pg = buf->page;
703 cs->offset = buf->offset;
710 cs->len = buf->len; 704 cs->len = buf->len;
711 cs->buf = cs->mapaddr + buf->offset;
712 cs->pipebufs++; 705 cs->pipebufs++;
713 cs->nr_segs--; 706 cs->nr_segs--;
714 } else { 707 } else {
715 struct page *page;
716
717 if (cs->nr_segs == cs->pipe->buffers) 708 if (cs->nr_segs == cs->pipe->buffers)
718 return -EIO; 709 return -EIO;
719 710
@@ -726,8 +717,8 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
726 buf->len = 0; 717 buf->len = 0;
727 718
728 cs->currbuf = buf; 719 cs->currbuf = buf;
729 cs->mapaddr = kmap_atomic(page); 720 cs->pg = page;
730 cs->buf = cs->mapaddr; 721 cs->offset = 0;
731 cs->len = PAGE_SIZE; 722 cs->len = PAGE_SIZE;
732 cs->pipebufs++; 723 cs->pipebufs++;
733 cs->nr_segs++; 724 cs->nr_segs++;
@@ -740,14 +731,13 @@ static int fuse_copy_fill(struct fuse_copy_state *cs)
740 cs->iov++; 731 cs->iov++;
741 cs->nr_segs--; 732 cs->nr_segs--;
742 } 733 }
743 err = get_user_pages_fast(cs->addr, 1, cs->write, &cs->pg); 734 err = get_user_pages_fast(cs->addr, 1, cs->write, &page);
744 if (err < 0) 735 if (err < 0)
745 return err; 736 return err;
746 BUG_ON(err != 1); 737 BUG_ON(err != 1);
747 offset = cs->addr % PAGE_SIZE; 738 cs->pg = page;
748 cs->mapaddr = kmap_atomic(cs->pg); 739 cs->offset = cs->addr % PAGE_SIZE;
749 cs->buf = cs->mapaddr + offset; 740 cs->len = min(PAGE_SIZE - cs->offset, cs->seglen);
750 cs->len = min(PAGE_SIZE - offset, cs->seglen);
751 cs->seglen -= cs->len; 741 cs->seglen -= cs->len;
752 cs->addr += cs->len; 742 cs->addr += cs->len;
753 } 743 }
@@ -760,15 +750,20 @@ static int fuse_copy_do(struct fuse_copy_state *cs, void **val, unsigned *size)
760{ 750{
761 unsigned ncpy = min(*size, cs->len); 751 unsigned ncpy = min(*size, cs->len);
762 if (val) { 752 if (val) {
753 void *pgaddr = kmap_atomic(cs->pg);
754 void *buf = pgaddr + cs->offset;
755
763 if (cs->write) 756 if (cs->write)
764 memcpy(cs->buf, *val, ncpy); 757 memcpy(buf, *val, ncpy);
765 else 758 else
766 memcpy(*val, cs->buf, ncpy); 759 memcpy(*val, buf, ncpy);
760
761 kunmap_atomic(pgaddr);
767 *val += ncpy; 762 *val += ncpy;
768 } 763 }
769 *size -= ncpy; 764 *size -= ncpy;
770 cs->len -= ncpy; 765 cs->len -= ncpy;
771 cs->buf += ncpy; 766 cs->offset += ncpy;
772 return ncpy; 767 return ncpy;
773} 768}
774 769
@@ -874,8 +869,8 @@ static int fuse_try_move_page(struct fuse_copy_state *cs, struct page **pagep)
874out_fallback_unlock: 869out_fallback_unlock:
875 unlock_page(newpage); 870 unlock_page(newpage);
876out_fallback: 871out_fallback:
877 cs->mapaddr = kmap_atomic(buf->page); 872 cs->pg = buf->page;
878 cs->buf = cs->mapaddr + buf->offset; 873 cs->offset = buf->offset;
879 874
880 err = lock_request(cs->fc, cs->req); 875 err = lock_request(cs->fc, cs->req);
881 if (err) 876 if (err)
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index 42198359fa1b..0c6048247a34 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -198,7 +198,8 @@ static int fuse_dentry_revalidate(struct dentry *entry, unsigned int flags)
198 inode = ACCESS_ONCE(entry->d_inode); 198 inode = ACCESS_ONCE(entry->d_inode);
199 if (inode && is_bad_inode(inode)) 199 if (inode && is_bad_inode(inode))
200 goto invalid; 200 goto invalid;
201 else if (fuse_dentry_time(entry) < get_jiffies_64()) { 201 else if (time_before64(fuse_dentry_time(entry), get_jiffies_64()) ||
202 (flags & LOOKUP_REVAL)) {
202 int err; 203 int err;
203 struct fuse_entry_out outarg; 204 struct fuse_entry_out outarg;
204 struct fuse_req *req; 205 struct fuse_req *req;
@@ -814,13 +815,6 @@ static int fuse_rename_common(struct inode *olddir, struct dentry *oldent,
814 return err; 815 return err;
815} 816}
816 817
817static int fuse_rename(struct inode *olddir, struct dentry *oldent,
818 struct inode *newdir, struct dentry *newent)
819{
820 return fuse_rename_common(olddir, oldent, newdir, newent, 0,
821 FUSE_RENAME, sizeof(struct fuse_rename_in));
822}
823
824static int fuse_rename2(struct inode *olddir, struct dentry *oldent, 818static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
825 struct inode *newdir, struct dentry *newent, 819 struct inode *newdir, struct dentry *newent,
826 unsigned int flags) 820 unsigned int flags)
@@ -831,17 +825,30 @@ static int fuse_rename2(struct inode *olddir, struct dentry *oldent,
831 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE)) 825 if (flags & ~(RENAME_NOREPLACE | RENAME_EXCHANGE))
832 return -EINVAL; 826 return -EINVAL;
833 827
834 if (fc->no_rename2 || fc->minor < 23) 828 if (flags) {
835 return -EINVAL; 829 if (fc->no_rename2 || fc->minor < 23)
830 return -EINVAL;
836 831
837 err = fuse_rename_common(olddir, oldent, newdir, newent, flags, 832 err = fuse_rename_common(olddir, oldent, newdir, newent, flags,
838 FUSE_RENAME2, sizeof(struct fuse_rename2_in)); 833 FUSE_RENAME2,
839 if (err == -ENOSYS) { 834 sizeof(struct fuse_rename2_in));
840 fc->no_rename2 = 1; 835 if (err == -ENOSYS) {
841 err = -EINVAL; 836 fc->no_rename2 = 1;
837 err = -EINVAL;
838 }
839 } else {
840 err = fuse_rename_common(olddir, oldent, newdir, newent, 0,
841 FUSE_RENAME,
842 sizeof(struct fuse_rename_in));
842 } 843 }
844
843 return err; 845 return err;
846}
844 847
848static int fuse_rename(struct inode *olddir, struct dentry *oldent,
849 struct inode *newdir, struct dentry *newent)
850{
851 return fuse_rename2(olddir, oldent, newdir, newent, 0);
845} 852}
846 853
847static int fuse_link(struct dentry *entry, struct inode *newdir, 854static int fuse_link(struct dentry *entry, struct inode *newdir,
@@ -985,7 +992,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
985 int err; 992 int err;
986 bool r; 993 bool r;
987 994
988 if (fi->i_time < get_jiffies_64()) { 995 if (time_before64(fi->i_time, get_jiffies_64())) {
989 r = true; 996 r = true;
990 err = fuse_do_getattr(inode, stat, file); 997 err = fuse_do_getattr(inode, stat, file);
991 } else { 998 } else {
@@ -1171,7 +1178,7 @@ static int fuse_permission(struct inode *inode, int mask)
1171 ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) { 1178 ((mask & MAY_EXEC) && S_ISREG(inode->i_mode))) {
1172 struct fuse_inode *fi = get_fuse_inode(inode); 1179 struct fuse_inode *fi = get_fuse_inode(inode);
1173 1180
1174 if (fi->i_time < get_jiffies_64()) { 1181 if (time_before64(fi->i_time, get_jiffies_64())) {
1175 refreshed = true; 1182 refreshed = true;
1176 1183
1177 err = fuse_perm_getattr(inode, mask); 1184 err = fuse_perm_getattr(inode, mask);
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 6e16dad13e9b..40ac2628ddcf 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1687,7 +1687,7 @@ static int fuse_writepage_locked(struct page *page)
1687 error = -EIO; 1687 error = -EIO;
1688 req->ff = fuse_write_file_get(fc, fi); 1688 req->ff = fuse_write_file_get(fc, fi);
1689 if (!req->ff) 1689 if (!req->ff)
1690 goto err_free; 1690 goto err_nofile;
1691 1691
1692 fuse_write_fill(req, req->ff, page_offset(page), 0); 1692 fuse_write_fill(req, req->ff, page_offset(page), 0);
1693 1693
@@ -1715,6 +1715,8 @@ static int fuse_writepage_locked(struct page *page)
1715 1715
1716 return 0; 1716 return 0;
1717 1717
1718err_nofile:
1719 __free_page(tmp_page);
1718err_free: 1720err_free:
1719 fuse_request_free(req); 1721 fuse_request_free(req);
1720err: 1722err:
@@ -1955,8 +1957,8 @@ static int fuse_writepages(struct address_space *mapping,
1955 data.ff = NULL; 1957 data.ff = NULL;
1956 1958
1957 err = -ENOMEM; 1959 err = -ENOMEM;
1958 data.orig_pages = kzalloc(sizeof(struct page *) * 1960 data.orig_pages = kcalloc(FUSE_MAX_PAGES_PER_REQ,
1959 FUSE_MAX_PAGES_PER_REQ, 1961 sizeof(struct page *),
1960 GFP_NOFS); 1962 GFP_NOFS);
1961 if (!data.orig_pages) 1963 if (!data.orig_pages)
1962 goto out; 1964 goto out;
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 754dcf23de8a..8474028d7848 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -478,6 +478,17 @@ static const match_table_t tokens = {
478 {OPT_ERR, NULL} 478 {OPT_ERR, NULL}
479}; 479};
480 480
481static int fuse_match_uint(substring_t *s, unsigned int *res)
482{
483 int err = -ENOMEM;
484 char *buf = match_strdup(s);
485 if (buf) {
486 err = kstrtouint(buf, 10, res);
487 kfree(buf);
488 }
489 return err;
490}
491
481static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev) 492static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
482{ 493{
483 char *p; 494 char *p;
@@ -488,6 +499,7 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
488 while ((p = strsep(&opt, ",")) != NULL) { 499 while ((p = strsep(&opt, ",")) != NULL) {
489 int token; 500 int token;
490 int value; 501 int value;
502 unsigned uv;
491 substring_t args[MAX_OPT_ARGS]; 503 substring_t args[MAX_OPT_ARGS];
492 if (!*p) 504 if (!*p)
493 continue; 505 continue;
@@ -511,18 +523,18 @@ static int parse_fuse_opt(char *opt, struct fuse_mount_data *d, int is_bdev)
511 break; 523 break;
512 524
513 case OPT_USER_ID: 525 case OPT_USER_ID:
514 if (match_int(&args[0], &value)) 526 if (fuse_match_uint(&args[0], &uv))
515 return 0; 527 return 0;
516 d->user_id = make_kuid(current_user_ns(), value); 528 d->user_id = make_kuid(current_user_ns(), uv);
517 if (!uid_valid(d->user_id)) 529 if (!uid_valid(d->user_id))
518 return 0; 530 return 0;
519 d->user_id_present = 1; 531 d->user_id_present = 1;
520 break; 532 break;
521 533
522 case OPT_GROUP_ID: 534 case OPT_GROUP_ID:
523 if (match_int(&args[0], &value)) 535 if (fuse_match_uint(&args[0], &uv))
524 return 0; 536 return 0;
525 d->group_id = make_kgid(current_user_ns(), value); 537 d->group_id = make_kgid(current_user_ns(), uv);
526 if (!gid_valid(d->group_id)) 538 if (!gid_valid(d->group_id))
527 return 0; 539 return 0;
528 d->group_id_present = 1; 540 d->group_id_present = 1;
@@ -1006,7 +1018,7 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
1006 1018
1007 sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION); 1019 sb->s_flags &= ~(MS_NOSEC | MS_I_VERSION);
1008 1020
1009 if (!parse_fuse_opt((char *) data, &d, is_bdev)) 1021 if (!parse_fuse_opt(data, &d, is_bdev))
1010 goto err; 1022 goto err;
1011 1023
1012 if (is_bdev) { 1024 if (is_bdev) {
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index 4fc3a3046174..26b3f952e6b1 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -981,7 +981,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
981 int error = 0; 981 int error = 0;
982 982
983 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; 983 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
984 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE; 984 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT;
985 985
986 mutex_lock(&fp->f_fl_mutex); 986 mutex_lock(&fp->f_fl_mutex);
987 987
@@ -991,7 +991,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
991 goto out; 991 goto out;
992 flock_lock_file_wait(file, 992 flock_lock_file_wait(file,
993 &(struct file_lock){.fl_type = F_UNLCK}); 993 &(struct file_lock){.fl_type = F_UNLCK});
994 gfs2_glock_dq_wait(fl_gh); 994 gfs2_glock_dq(fl_gh);
995 gfs2_holder_reinit(state, flags, fl_gh); 995 gfs2_holder_reinit(state, flags, fl_gh);
996 } else { 996 } else {
997 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr, 997 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index c355f7320e44..ee4e04fe60fc 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -731,14 +731,14 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
731 cachep = gfs2_glock_aspace_cachep; 731 cachep = gfs2_glock_aspace_cachep;
732 else 732 else
733 cachep = gfs2_glock_cachep; 733 cachep = gfs2_glock_cachep;
734 gl = kmem_cache_alloc(cachep, GFP_KERNEL); 734 gl = kmem_cache_alloc(cachep, GFP_NOFS);
735 if (!gl) 735 if (!gl)
736 return -ENOMEM; 736 return -ENOMEM;
737 737
738 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb)); 738 memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
739 739
740 if (glops->go_flags & GLOF_LVB) { 740 if (glops->go_flags & GLOF_LVB) {
741 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_KERNEL); 741 gl->gl_lksb.sb_lvbptr = kzalloc(GFS2_MIN_LVB_SIZE, GFP_NOFS);
742 if (!gl->gl_lksb.sb_lvbptr) { 742 if (!gl->gl_lksb.sb_lvbptr) {
743 kmem_cache_free(cachep, gl); 743 kmem_cache_free(cachep, gl);
744 return -ENOMEM; 744 return -ENOMEM;
@@ -1404,12 +1404,16 @@ __acquires(&lru_lock)
1404 gl = list_entry(list->next, struct gfs2_glock, gl_lru); 1404 gl = list_entry(list->next, struct gfs2_glock, gl_lru);
1405 list_del_init(&gl->gl_lru); 1405 list_del_init(&gl->gl_lru);
1406 if (!spin_trylock(&gl->gl_spin)) { 1406 if (!spin_trylock(&gl->gl_spin)) {
1407add_back_to_lru:
1407 list_add(&gl->gl_lru, &lru_list); 1408 list_add(&gl->gl_lru, &lru_list);
1408 atomic_inc(&lru_count); 1409 atomic_inc(&lru_count);
1409 continue; 1410 continue;
1410 } 1411 }
1412 if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
1413 spin_unlock(&gl->gl_spin);
1414 goto add_back_to_lru;
1415 }
1411 clear_bit(GLF_LRU, &gl->gl_flags); 1416 clear_bit(GLF_LRU, &gl->gl_flags);
1412 spin_unlock(&lru_lock);
1413 gl->gl_lockref.count++; 1417 gl->gl_lockref.count++;
1414 if (demote_ok(gl)) 1418 if (demote_ok(gl))
1415 handle_callback(gl, LM_ST_UNLOCKED, 0, false); 1419 handle_callback(gl, LM_ST_UNLOCKED, 0, false);
@@ -1417,7 +1421,7 @@ __acquires(&lru_lock)
1417 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0) 1421 if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
1418 gl->gl_lockref.count--; 1422 gl->gl_lockref.count--;
1419 spin_unlock(&gl->gl_spin); 1423 spin_unlock(&gl->gl_spin);
1420 spin_lock(&lru_lock); 1424 cond_resched_lock(&lru_lock);
1421 } 1425 }
1422} 1426}
1423 1427
@@ -1442,7 +1446,7 @@ static long gfs2_scan_glock_lru(int nr)
1442 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru); 1446 gl = list_entry(lru_list.next, struct gfs2_glock, gl_lru);
1443 1447
1444 /* Test for being demotable */ 1448 /* Test for being demotable */
1445 if (!test_and_set_bit(GLF_LOCK, &gl->gl_flags)) { 1449 if (!test_bit(GLF_LOCK, &gl->gl_flags)) {
1446 list_move(&gl->gl_lru, &dispose); 1450 list_move(&gl->gl_lru, &dispose);
1447 atomic_dec(&lru_count); 1451 atomic_dec(&lru_count);
1448 freed++; 1452 freed++;
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index fc1100781bbc..2ffc67dce87f 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -234,8 +234,8 @@ static void inode_go_sync(struct gfs2_glock *gl)
234 * inode_go_inval - prepare a inode glock to be released 234 * inode_go_inval - prepare a inode glock to be released
235 * @gl: the glock 235 * @gl: the glock
236 * @flags: 236 * @flags:
237 * 237 *
238 * Normally we invlidate everything, but if we are moving into 238 * Normally we invalidate everything, but if we are moving into
239 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we 239 * LM_ST_DEFERRED from LM_ST_SHARED or LM_ST_EXCLUSIVE then we
240 * can keep hold of the metadata, since it won't have changed. 240 * can keep hold of the metadata, since it won't have changed.
241 * 241 *
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
index 91f274de1246..4fafea1c9ecf 100644
--- a/fs/gfs2/lock_dlm.c
+++ b/fs/gfs2/lock_dlm.c
@@ -1036,8 +1036,8 @@ static int set_recover_size(struct gfs2_sbd *sdp, struct dlm_slot *slots,
1036 1036
1037 new_size = old_size + RECOVER_SIZE_INC; 1037 new_size = old_size + RECOVER_SIZE_INC;
1038 1038
1039 submit = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS); 1039 submit = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
1040 result = kzalloc(new_size * sizeof(uint32_t), GFP_NOFS); 1040 result = kcalloc(new_size, sizeof(uint32_t), GFP_NOFS);
1041 if (!submit || !result) { 1041 if (!submit || !result) {
1042 kfree(submit); 1042 kfree(submit);
1043 kfree(result); 1043 kfree(result);
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index db629d1bd1bd..f4cb9c0d6bbd 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -337,7 +337,7 @@ static bool gfs2_unaligned_extlen(struct gfs2_rbm *rbm, u32 n_unaligned, u32 *le
337 337
338/** 338/**
339 * gfs2_free_extlen - Return extent length of free blocks 339 * gfs2_free_extlen - Return extent length of free blocks
340 * @rbm: Starting position 340 * @rrbm: Starting position
341 * @len: Max length to check 341 * @len: Max length to check
342 * 342 *
343 * Starting at the block specified by the rbm, see how many free blocks 343 * Starting at the block specified by the rbm, see how many free blocks
@@ -2522,7 +2522,7 @@ void gfs2_rlist_alloc(struct gfs2_rgrp_list *rlist, unsigned int state)
2522 2522
2523/** 2523/**
2524 * gfs2_rlist_free - free a resource group list 2524 * gfs2_rlist_free - free a resource group list
2525 * @list: the list of resource groups 2525 * @rlist: the list of resource groups
2526 * 2526 *
2527 */ 2527 */
2528 2528
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index 38cfcf5f6fce..6f0f590cc5a3 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -1588,9 +1588,12 @@ int jbd2_journal_stop(handle_t *handle)
1588 * to perform a synchronous write. We do this to detect the 1588 * to perform a synchronous write. We do this to detect the
1589 * case where a single process is doing a stream of sync 1589 * case where a single process is doing a stream of sync
1590 * writes. No point in waiting for joiners in that case. 1590 * writes. No point in waiting for joiners in that case.
1591 *
1592 * Setting max_batch_time to 0 disables this completely.
1591 */ 1593 */
1592 pid = current->pid; 1594 pid = current->pid;
1593 if (handle->h_sync && journal->j_last_sync_writer != pid) { 1595 if (handle->h_sync && journal->j_last_sync_writer != pid &&
1596 journal->j_max_batch_time) {
1594 u64 commit_time, trans_time; 1597 u64 commit_time, trans_time;
1595 1598
1596 journal->j_last_sync_writer = pid; 1599 journal->j_last_sync_writer = pid;
diff --git a/fs/kernfs/file.c b/fs/kernfs/file.c
index e3d37f607f97..d895b4b7b661 100644
--- a/fs/kernfs/file.c
+++ b/fs/kernfs/file.c
@@ -39,6 +39,19 @@ struct kernfs_open_node {
39 struct list_head files; /* goes through kernfs_open_file.list */ 39 struct list_head files; /* goes through kernfs_open_file.list */
40}; 40};
41 41
42/*
43 * kernfs_notify() may be called from any context and bounces notifications
44 * through a work item. To minimize space overhead in kernfs_node, the
45 * pending queue is implemented as a singly linked list of kernfs_nodes.
46 * The list is terminated with the self pointer so that whether a
47 * kernfs_node is on the list or not can be determined by testing the next
48 * pointer for NULL.
49 */
50#define KERNFS_NOTIFY_EOL ((void *)&kernfs_notify_list)
51
52static DEFINE_SPINLOCK(kernfs_notify_lock);
53static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL;
54
42static struct kernfs_open_file *kernfs_of(struct file *file) 55static struct kernfs_open_file *kernfs_of(struct file *file)
43{ 56{
44 return ((struct seq_file *)file->private_data)->private; 57 return ((struct seq_file *)file->private_data)->private;
@@ -783,24 +796,25 @@ static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
783 return DEFAULT_POLLMASK|POLLERR|POLLPRI; 796 return DEFAULT_POLLMASK|POLLERR|POLLPRI;
784} 797}
785 798
786/** 799static void kernfs_notify_workfn(struct work_struct *work)
787 * kernfs_notify - notify a kernfs file
788 * @kn: file to notify
789 *
790 * Notify @kn such that poll(2) on @kn wakes up.
791 */
792void kernfs_notify(struct kernfs_node *kn)
793{ 800{
794 struct kernfs_root *root = kernfs_root(kn); 801 struct kernfs_node *kn;
795 struct kernfs_open_node *on; 802 struct kernfs_open_node *on;
796 struct kernfs_super_info *info; 803 struct kernfs_super_info *info;
797 unsigned long flags; 804repeat:
798 805 /* pop one off the notify_list */
799 if (WARN_ON(kernfs_type(kn) != KERNFS_FILE)) 806 spin_lock_irq(&kernfs_notify_lock);
807 kn = kernfs_notify_list;
808 if (kn == KERNFS_NOTIFY_EOL) {
809 spin_unlock_irq(&kernfs_notify_lock);
800 return; 810 return;
811 }
812 kernfs_notify_list = kn->attr.notify_next;
813 kn->attr.notify_next = NULL;
814 spin_unlock_irq(&kernfs_notify_lock);
801 815
802 /* kick poll */ 816 /* kick poll */
803 spin_lock_irqsave(&kernfs_open_node_lock, flags); 817 spin_lock_irq(&kernfs_open_node_lock);
804 818
805 on = kn->attr.open; 819 on = kn->attr.open;
806 if (on) { 820 if (on) {
@@ -808,12 +822,12 @@ void kernfs_notify(struct kernfs_node *kn)
808 wake_up_interruptible(&on->poll); 822 wake_up_interruptible(&on->poll);
809 } 823 }
810 824
811 spin_unlock_irqrestore(&kernfs_open_node_lock, flags); 825 spin_unlock_irq(&kernfs_open_node_lock);
812 826
813 /* kick fsnotify */ 827 /* kick fsnotify */
814 mutex_lock(&kernfs_mutex); 828 mutex_lock(&kernfs_mutex);
815 829
816 list_for_each_entry(info, &root->supers, node) { 830 list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
817 struct inode *inode; 831 struct inode *inode;
818 struct dentry *dentry; 832 struct dentry *dentry;
819 833
@@ -833,6 +847,33 @@ void kernfs_notify(struct kernfs_node *kn)
833 } 847 }
834 848
835 mutex_unlock(&kernfs_mutex); 849 mutex_unlock(&kernfs_mutex);
850 kernfs_put(kn);
851 goto repeat;
852}
853
854/**
855 * kernfs_notify - notify a kernfs file
856 * @kn: file to notify
857 *
858 * Notify @kn such that poll(2) on @kn wakes up. Maybe be called from any
859 * context.
860 */
861void kernfs_notify(struct kernfs_node *kn)
862{
863 static DECLARE_WORK(kernfs_notify_work, kernfs_notify_workfn);
864 unsigned long flags;
865
866 if (WARN_ON(kernfs_type(kn) != KERNFS_FILE))
867 return;
868
869 spin_lock_irqsave(&kernfs_notify_lock, flags);
870 if (!kn->attr.notify_next) {
871 kernfs_get(kn);
872 kn->attr.notify_next = kernfs_notify_list;
873 kernfs_notify_list = kn;
874 schedule_work(&kernfs_notify_work);
875 }
876 spin_unlock_irqrestore(&kernfs_notify_lock, flags);
836} 877}
837EXPORT_SYMBOL_GPL(kernfs_notify); 878EXPORT_SYMBOL_GPL(kernfs_notify);
838 879
diff --git a/fs/kernfs/mount.c b/fs/kernfs/mount.c
index d171b98a6cdd..f973ae9b05f1 100644
--- a/fs/kernfs/mount.c
+++ b/fs/kernfs/mount.c
@@ -211,6 +211,36 @@ void kernfs_kill_sb(struct super_block *sb)
211 kernfs_put(root_kn); 211 kernfs_put(root_kn);
212} 212}
213 213
214/**
215 * kernfs_pin_sb: try to pin the superblock associated with a kernfs_root
216 * @kernfs_root: the kernfs_root in question
217 * @ns: the namespace tag
218 *
219 * Pin the superblock so the superblock won't be destroyed in subsequent
220 * operations. This can be used to block ->kill_sb() which may be useful
221 * for kernfs users which dynamically manage superblocks.
222 *
223 * Returns NULL if there's no superblock associated to this kernfs_root, or
224 * -EINVAL if the superblock is being freed.
225 */
226struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns)
227{
228 struct kernfs_super_info *info;
229 struct super_block *sb = NULL;
230
231 mutex_lock(&kernfs_mutex);
232 list_for_each_entry(info, &root->supers, node) {
233 if (info->ns == ns) {
234 sb = info->sb;
235 if (!atomic_inc_not_zero(&info->sb->s_active))
236 sb = ERR_PTR(-EINVAL);
237 break;
238 }
239 }
240 mutex_unlock(&kernfs_mutex);
241 return sb;
242}
243
214void __init kernfs_init(void) 244void __init kernfs_init(void)
215{ 245{
216 kernfs_node_cache = kmem_cache_create("kernfs_node_cache", 246 kernfs_node_cache = kmem_cache_create("kernfs_node_cache",
diff --git a/fs/mbcache.c b/fs/mbcache.c
index bf166e388f0d..187477ded6b3 100644
--- a/fs/mbcache.c
+++ b/fs/mbcache.c
@@ -73,6 +73,7 @@
73#include <linux/mbcache.h> 73#include <linux/mbcache.h>
74#include <linux/init.h> 74#include <linux/init.h>
75#include <linux/blockgroup_lock.h> 75#include <linux/blockgroup_lock.h>
76#include <linux/log2.h>
76 77
77#ifdef MB_CACHE_DEBUG 78#ifdef MB_CACHE_DEBUG
78# define mb_debug(f...) do { \ 79# define mb_debug(f...) do { \
@@ -93,7 +94,7 @@
93 94
94#define MB_CACHE_WRITER ((unsigned short)~0U >> 1) 95#define MB_CACHE_WRITER ((unsigned short)~0U >> 1)
95 96
96#define MB_CACHE_ENTRY_LOCK_BITS __builtin_log2(NR_BG_LOCKS) 97#define MB_CACHE_ENTRY_LOCK_BITS ilog2(NR_BG_LOCKS)
97#define MB_CACHE_ENTRY_LOCK_INDEX(ce) \ 98#define MB_CACHE_ENTRY_LOCK_INDEX(ce) \
98 (hash_long((unsigned long)ce, MB_CACHE_ENTRY_LOCK_BITS)) 99 (hash_long((unsigned long)ce, MB_CACHE_ENTRY_LOCK_BITS))
99 100
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 8f98138cbc43..f11b9eed0de1 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -756,7 +756,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
756 spin_unlock(&dreq->lock); 756 spin_unlock(&dreq->lock);
757 757
758 while (!list_empty(&hdr->pages)) { 758 while (!list_empty(&hdr->pages)) {
759 bool do_destroy = true;
760 759
761 req = nfs_list_entry(hdr->pages.next); 760 req = nfs_list_entry(hdr->pages.next);
762 nfs_list_remove_request(req); 761 nfs_list_remove_request(req);
@@ -765,7 +764,6 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
765 case NFS_IOHDR_NEED_COMMIT: 764 case NFS_IOHDR_NEED_COMMIT:
766 kref_get(&req->wb_kref); 765 kref_get(&req->wb_kref);
767 nfs_mark_request_commit(req, hdr->lseg, &cinfo); 766 nfs_mark_request_commit(req, hdr->lseg, &cinfo);
768 do_destroy = false;
769 } 767 }
770 nfs_unlock_and_release_request(req); 768 nfs_unlock_and_release_request(req);
771 } 769 }
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index c496f8a74639..9927913c97c2 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -147,6 +147,17 @@ int nfs_sync_mapping(struct address_space *mapping)
147 return ret; 147 return ret;
148} 148}
149 149
150static void nfs_set_cache_invalid(struct inode *inode, unsigned long flags)
151{
152 struct nfs_inode *nfsi = NFS_I(inode);
153
154 if (inode->i_mapping->nrpages == 0)
155 flags &= ~NFS_INO_INVALID_DATA;
156 nfsi->cache_validity |= flags;
157 if (flags & NFS_INO_INVALID_DATA)
158 nfs_fscache_invalidate(inode);
159}
160
150/* 161/*
151 * Invalidate the local caches 162 * Invalidate the local caches
152 */ 163 */
@@ -162,17 +173,16 @@ static void nfs_zap_caches_locked(struct inode *inode)
162 173
163 memset(NFS_I(inode)->cookieverf, 0, sizeof(NFS_I(inode)->cookieverf)); 174 memset(NFS_I(inode)->cookieverf, 0, sizeof(NFS_I(inode)->cookieverf));
164 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) { 175 if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode)) {
165 nfs_fscache_invalidate(inode); 176 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
166 nfsi->cache_validity |= NFS_INO_INVALID_ATTR
167 | NFS_INO_INVALID_DATA 177 | NFS_INO_INVALID_DATA
168 | NFS_INO_INVALID_ACCESS 178 | NFS_INO_INVALID_ACCESS
169 | NFS_INO_INVALID_ACL 179 | NFS_INO_INVALID_ACL
170 | NFS_INO_REVAL_PAGECACHE; 180 | NFS_INO_REVAL_PAGECACHE);
171 } else 181 } else
172 nfsi->cache_validity |= NFS_INO_INVALID_ATTR 182 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
173 | NFS_INO_INVALID_ACCESS 183 | NFS_INO_INVALID_ACCESS
174 | NFS_INO_INVALID_ACL 184 | NFS_INO_INVALID_ACL
175 | NFS_INO_REVAL_PAGECACHE; 185 | NFS_INO_REVAL_PAGECACHE);
176 nfs_zap_label_cache_locked(nfsi); 186 nfs_zap_label_cache_locked(nfsi);
177} 187}
178 188
@@ -187,8 +197,7 @@ void nfs_zap_mapping(struct inode *inode, struct address_space *mapping)
187{ 197{
188 if (mapping->nrpages != 0) { 198 if (mapping->nrpages != 0) {
189 spin_lock(&inode->i_lock); 199 spin_lock(&inode->i_lock);
190 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_DATA; 200 nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
191 nfs_fscache_invalidate(inode);
192 spin_unlock(&inode->i_lock); 201 spin_unlock(&inode->i_lock);
193 } 202 }
194} 203}
@@ -209,7 +218,7 @@ EXPORT_SYMBOL_GPL(nfs_zap_acl_cache);
209void nfs_invalidate_atime(struct inode *inode) 218void nfs_invalidate_atime(struct inode *inode)
210{ 219{
211 spin_lock(&inode->i_lock); 220 spin_lock(&inode->i_lock);
212 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ATIME; 221 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATIME);
213 spin_unlock(&inode->i_lock); 222 spin_unlock(&inode->i_lock);
214} 223}
215EXPORT_SYMBOL_GPL(nfs_invalidate_atime); 224EXPORT_SYMBOL_GPL(nfs_invalidate_atime);
@@ -369,7 +378,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
369 inode->i_mode = fattr->mode; 378 inode->i_mode = fattr->mode;
370 if ((fattr->valid & NFS_ATTR_FATTR_MODE) == 0 379 if ((fattr->valid & NFS_ATTR_FATTR_MODE) == 0
371 && nfs_server_capable(inode, NFS_CAP_MODE)) 380 && nfs_server_capable(inode, NFS_CAP_MODE))
372 nfsi->cache_validity |= NFS_INO_INVALID_ATTR; 381 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
373 /* Why so? Because we want revalidate for devices/FIFOs, and 382 /* Why so? Because we want revalidate for devices/FIFOs, and
374 * that's precisely what we have in nfs_file_inode_operations. 383 * that's precisely what we have in nfs_file_inode_operations.
375 */ 384 */
@@ -415,36 +424,36 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
415 if (fattr->valid & NFS_ATTR_FATTR_ATIME) 424 if (fattr->valid & NFS_ATTR_FATTR_ATIME)
416 inode->i_atime = fattr->atime; 425 inode->i_atime = fattr->atime;
417 else if (nfs_server_capable(inode, NFS_CAP_ATIME)) 426 else if (nfs_server_capable(inode, NFS_CAP_ATIME))
418 nfsi->cache_validity |= NFS_INO_INVALID_ATTR; 427 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
419 if (fattr->valid & NFS_ATTR_FATTR_MTIME) 428 if (fattr->valid & NFS_ATTR_FATTR_MTIME)
420 inode->i_mtime = fattr->mtime; 429 inode->i_mtime = fattr->mtime;
421 else if (nfs_server_capable(inode, NFS_CAP_MTIME)) 430 else if (nfs_server_capable(inode, NFS_CAP_MTIME))
422 nfsi->cache_validity |= NFS_INO_INVALID_ATTR; 431 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
423 if (fattr->valid & NFS_ATTR_FATTR_CTIME) 432 if (fattr->valid & NFS_ATTR_FATTR_CTIME)
424 inode->i_ctime = fattr->ctime; 433 inode->i_ctime = fattr->ctime;
425 else if (nfs_server_capable(inode, NFS_CAP_CTIME)) 434 else if (nfs_server_capable(inode, NFS_CAP_CTIME))
426 nfsi->cache_validity |= NFS_INO_INVALID_ATTR; 435 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
427 if (fattr->valid & NFS_ATTR_FATTR_CHANGE) 436 if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
428 inode->i_version = fattr->change_attr; 437 inode->i_version = fattr->change_attr;
429 else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR)) 438 else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR))
430 nfsi->cache_validity |= NFS_INO_INVALID_ATTR; 439 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
431 if (fattr->valid & NFS_ATTR_FATTR_SIZE) 440 if (fattr->valid & NFS_ATTR_FATTR_SIZE)
432 inode->i_size = nfs_size_to_loff_t(fattr->size); 441 inode->i_size = nfs_size_to_loff_t(fattr->size);
433 else 442 else
434 nfsi->cache_validity |= NFS_INO_INVALID_ATTR 443 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
435 | NFS_INO_REVAL_PAGECACHE; 444 | NFS_INO_REVAL_PAGECACHE);
436 if (fattr->valid & NFS_ATTR_FATTR_NLINK) 445 if (fattr->valid & NFS_ATTR_FATTR_NLINK)
437 set_nlink(inode, fattr->nlink); 446 set_nlink(inode, fattr->nlink);
438 else if (nfs_server_capable(inode, NFS_CAP_NLINK)) 447 else if (nfs_server_capable(inode, NFS_CAP_NLINK))
439 nfsi->cache_validity |= NFS_INO_INVALID_ATTR; 448 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
440 if (fattr->valid & NFS_ATTR_FATTR_OWNER) 449 if (fattr->valid & NFS_ATTR_FATTR_OWNER)
441 inode->i_uid = fattr->uid; 450 inode->i_uid = fattr->uid;
442 else if (nfs_server_capable(inode, NFS_CAP_OWNER)) 451 else if (nfs_server_capable(inode, NFS_CAP_OWNER))
443 nfsi->cache_validity |= NFS_INO_INVALID_ATTR; 452 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
444 if (fattr->valid & NFS_ATTR_FATTR_GROUP) 453 if (fattr->valid & NFS_ATTR_FATTR_GROUP)
445 inode->i_gid = fattr->gid; 454 inode->i_gid = fattr->gid;
446 else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP)) 455 else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP))
447 nfsi->cache_validity |= NFS_INO_INVALID_ATTR; 456 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
448 if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED) 457 if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
449 inode->i_blocks = fattr->du.nfs2.blocks; 458 inode->i_blocks = fattr->du.nfs2.blocks;
450 if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) { 459 if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
@@ -550,6 +559,9 @@ static int nfs_vmtruncate(struct inode * inode, loff_t offset)
550 559
551 spin_lock(&inode->i_lock); 560 spin_lock(&inode->i_lock);
552 i_size_write(inode, offset); 561 i_size_write(inode, offset);
562 /* Optimisation */
563 if (offset == 0)
564 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_DATA;
553 spin_unlock(&inode->i_lock); 565 spin_unlock(&inode->i_lock);
554 566
555 truncate_pagecache(inode, offset); 567 truncate_pagecache(inode, offset);
@@ -578,7 +590,8 @@ void nfs_setattr_update_inode(struct inode *inode, struct iattr *attr)
578 inode->i_uid = attr->ia_uid; 590 inode->i_uid = attr->ia_uid;
579 if ((attr->ia_valid & ATTR_GID) != 0) 591 if ((attr->ia_valid & ATTR_GID) != 0)
580 inode->i_gid = attr->ia_gid; 592 inode->i_gid = attr->ia_gid;
581 NFS_I(inode)->cache_validity |= NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; 593 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ACCESS
594 | NFS_INO_INVALID_ACL);
582 spin_unlock(&inode->i_lock); 595 spin_unlock(&inode->i_lock);
583 } 596 }
584 if ((attr->ia_valid & ATTR_SIZE) != 0) { 597 if ((attr->ia_valid & ATTR_SIZE) != 0) {
@@ -1101,7 +1114,7 @@ static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr
1101 && inode->i_version == fattr->pre_change_attr) { 1114 && inode->i_version == fattr->pre_change_attr) {
1102 inode->i_version = fattr->change_attr; 1115 inode->i_version = fattr->change_attr;
1103 if (S_ISDIR(inode->i_mode)) 1116 if (S_ISDIR(inode->i_mode))
1104 nfsi->cache_validity |= NFS_INO_INVALID_DATA; 1117 nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
1105 ret |= NFS_INO_INVALID_ATTR; 1118 ret |= NFS_INO_INVALID_ATTR;
1106 } 1119 }
1107 /* If we have atomic WCC data, we may update some attributes */ 1120 /* If we have atomic WCC data, we may update some attributes */
@@ -1117,7 +1130,7 @@ static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr
1117 && timespec_equal(&inode->i_mtime, &fattr->pre_mtime)) { 1130 && timespec_equal(&inode->i_mtime, &fattr->pre_mtime)) {
1118 memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime)); 1131 memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
1119 if (S_ISDIR(inode->i_mode)) 1132 if (S_ISDIR(inode->i_mode))
1120 nfsi->cache_validity |= NFS_INO_INVALID_DATA; 1133 nfs_set_cache_invalid(inode, NFS_INO_INVALID_DATA);
1121 ret |= NFS_INO_INVALID_ATTR; 1134 ret |= NFS_INO_INVALID_ATTR;
1122 } 1135 }
1123 if ((fattr->valid & NFS_ATTR_FATTR_PRESIZE) 1136 if ((fattr->valid & NFS_ATTR_FATTR_PRESIZE)
@@ -1128,9 +1141,6 @@ static unsigned long nfs_wcc_update_inode(struct inode *inode, struct nfs_fattr
1128 ret |= NFS_INO_INVALID_ATTR; 1141 ret |= NFS_INO_INVALID_ATTR;
1129 } 1142 }
1130 1143
1131 if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
1132 nfs_fscache_invalidate(inode);
1133
1134 return ret; 1144 return ret;
1135} 1145}
1136 1146
@@ -1189,7 +1199,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
1189 invalid |= NFS_INO_INVALID_ATIME; 1199 invalid |= NFS_INO_INVALID_ATIME;
1190 1200
1191 if (invalid != 0) 1201 if (invalid != 0)
1192 nfsi->cache_validity |= invalid; 1202 nfs_set_cache_invalid(inode, invalid);
1193 1203
1194 nfsi->read_cache_jiffies = fattr->time_start; 1204 nfsi->read_cache_jiffies = fattr->time_start;
1195 return 0; 1205 return 0;
@@ -1402,13 +1412,11 @@ EXPORT_SYMBOL_GPL(nfs_refresh_inode);
1402 1412
1403static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr *fattr) 1413static int nfs_post_op_update_inode_locked(struct inode *inode, struct nfs_fattr *fattr)
1404{ 1414{
1405 struct nfs_inode *nfsi = NFS_I(inode); 1415 unsigned long invalid = NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
1406 1416
1407 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE; 1417 if (S_ISDIR(inode->i_mode))
1408 if (S_ISDIR(inode->i_mode)) { 1418 invalid |= NFS_INO_INVALID_DATA;
1409 nfsi->cache_validity |= NFS_INO_INVALID_DATA; 1419 nfs_set_cache_invalid(inode, invalid);
1410 nfs_fscache_invalidate(inode);
1411 }
1412 if ((fattr->valid & NFS_ATTR_FATTR) == 0) 1420 if ((fattr->valid & NFS_ATTR_FATTR) == 0)
1413 return 0; 1421 return 0;
1414 return nfs_refresh_inode_locked(inode, fattr); 1422 return nfs_refresh_inode_locked(inode, fattr);
@@ -1601,6 +1609,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1601 if ((nfsi->npages == 0) || new_isize > cur_isize) { 1609 if ((nfsi->npages == 0) || new_isize > cur_isize) {
1602 i_size_write(inode, new_isize); 1610 i_size_write(inode, new_isize);
1603 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 1611 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
1612 invalid &= ~NFS_INO_REVAL_PAGECACHE;
1604 } 1613 }
1605 dprintk("NFS: isize change on server for file %s/%ld " 1614 dprintk("NFS: isize change on server for file %s/%ld "
1606 "(%Ld to %Ld)\n", 1615 "(%Ld to %Ld)\n",
@@ -1702,10 +1711,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1702 invalid &= ~NFS_INO_INVALID_DATA; 1711 invalid &= ~NFS_INO_INVALID_DATA;
1703 if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ) || 1712 if (!NFS_PROTO(inode)->have_delegation(inode, FMODE_READ) ||
1704 (save_cache_validity & NFS_INO_REVAL_FORCED)) 1713 (save_cache_validity & NFS_INO_REVAL_FORCED))
1705 nfsi->cache_validity |= invalid; 1714 nfs_set_cache_invalid(inode, invalid);
1706
1707 if (invalid & NFS_INO_INVALID_DATA)
1708 nfs_fscache_invalidate(inode);
1709 1715
1710 return 0; 1716 return 0;
1711 out_err: 1717 out_err:
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 82ddbf46660e..f415cbf9f6c3 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -244,6 +244,7 @@ void nfs_pgio_data_release(struct nfs_pgio_data *);
244int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *); 244int nfs_generic_pgio(struct nfs_pageio_descriptor *, struct nfs_pgio_header *);
245int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_data *, 245int nfs_initiate_pgio(struct rpc_clnt *, struct nfs_pgio_data *,
246 const struct rpc_call_ops *, int, int); 246 const struct rpc_call_ops *, int, int);
247void nfs_free_request(struct nfs_page *req);
247 248
248static inline void nfs_iocounter_init(struct nfs_io_counter *c) 249static inline void nfs_iocounter_init(struct nfs_io_counter *c)
249{ 250{
diff --git a/fs/nfs/nfs3acl.c b/fs/nfs/nfs3acl.c
index 871d6eda8dba..8f854dde4150 100644
--- a/fs/nfs/nfs3acl.c
+++ b/fs/nfs/nfs3acl.c
@@ -247,3 +247,46 @@ const struct xattr_handler *nfs3_xattr_handlers[] = {
247 &posix_acl_default_xattr_handler, 247 &posix_acl_default_xattr_handler,
248 NULL, 248 NULL,
249}; 249};
250
251static int
252nfs3_list_one_acl(struct inode *inode, int type, const char *name, void *data,
253 size_t size, ssize_t *result)
254{
255 struct posix_acl *acl;
256 char *p = data + *result;
257
258 acl = get_acl(inode, type);
259 if (!acl)
260 return 0;
261
262 posix_acl_release(acl);
263
264 *result += strlen(name);
265 *result += 1;
266 if (!size)
267 return 0;
268 if (*result > size)
269 return -ERANGE;
270
271 strcpy(p, name);
272 return 0;
273}
274
275ssize_t
276nfs3_listxattr(struct dentry *dentry, char *data, size_t size)
277{
278 struct inode *inode = dentry->d_inode;
279 ssize_t result = 0;
280 int error;
281
282 error = nfs3_list_one_acl(inode, ACL_TYPE_ACCESS,
283 POSIX_ACL_XATTR_ACCESS, data, size, &result);
284 if (error)
285 return error;
286
287 error = nfs3_list_one_acl(inode, ACL_TYPE_DEFAULT,
288 POSIX_ACL_XATTR_DEFAULT, data, size, &result);
289 if (error)
290 return error;
291 return result;
292}
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index e7daa42bbc86..f0afa291fd58 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -885,7 +885,7 @@ static const struct inode_operations nfs3_dir_inode_operations = {
885 .getattr = nfs_getattr, 885 .getattr = nfs_getattr,
886 .setattr = nfs_setattr, 886 .setattr = nfs_setattr,
887#ifdef CONFIG_NFS_V3_ACL 887#ifdef CONFIG_NFS_V3_ACL
888 .listxattr = generic_listxattr, 888 .listxattr = nfs3_listxattr,
889 .getxattr = generic_getxattr, 889 .getxattr = generic_getxattr,
890 .setxattr = generic_setxattr, 890 .setxattr = generic_setxattr,
891 .removexattr = generic_removexattr, 891 .removexattr = generic_removexattr,
@@ -899,7 +899,7 @@ static const struct inode_operations nfs3_file_inode_operations = {
899 .getattr = nfs_getattr, 899 .getattr = nfs_getattr,
900 .setattr = nfs_setattr, 900 .setattr = nfs_setattr,
901#ifdef CONFIG_NFS_V3_ACL 901#ifdef CONFIG_NFS_V3_ACL
902 .listxattr = generic_listxattr, 902 .listxattr = nfs3_listxattr,
903 .getxattr = generic_getxattr, 903 .getxattr = generic_getxattr,
904 .setxattr = generic_setxattr, 904 .setxattr = generic_setxattr,
905 .removexattr = generic_removexattr, 905 .removexattr = generic_removexattr,
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index f63cb87cd730..ba2affa51941 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -230,7 +230,7 @@ int nfs_atomic_open(struct inode *, struct dentry *, struct file *,
230extern struct file_system_type nfs4_fs_type; 230extern struct file_system_type nfs4_fs_type;
231 231
232/* nfs4namespace.c */ 232/* nfs4namespace.c */
233struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *); 233struct rpc_clnt *nfs4_negotiate_security(struct rpc_clnt *, struct inode *, struct qstr *);
234struct vfsmount *nfs4_submount(struct nfs_server *, struct dentry *, 234struct vfsmount *nfs4_submount(struct nfs_server *, struct dentry *,
235 struct nfs_fh *, struct nfs_fattr *); 235 struct nfs_fh *, struct nfs_fattr *);
236int nfs4_replace_transport(struct nfs_server *server, 236int nfs4_replace_transport(struct nfs_server *server,
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 3d5dbf80d46a..3d83cb1fdc70 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -139,16 +139,22 @@ static size_t nfs_parse_server_name(char *string, size_t len,
139 * @server: NFS server struct 139 * @server: NFS server struct
140 * @flavors: List of security tuples returned by SECINFO procedure 140 * @flavors: List of security tuples returned by SECINFO procedure
141 * 141 *
142 * Return the pseudoflavor of the first security mechanism in 142 * Return an rpc client that uses the first security mechanism in
143 * "flavors" that is locally supported. Return RPC_AUTH_UNIX if 143 * "flavors" that is locally supported. The "flavors" array
144 * no matching flavor is found in the array. The "flavors" array
145 * is searched in the order returned from the server, per RFC 3530 144 * is searched in the order returned from the server, per RFC 3530
146 * recommendation. 145 * recommendation and each flavor is checked for membership in the
146 * sec= mount option list if it exists.
147 *
148 * Return -EPERM if no matching flavor is found in the array.
149 *
150 * Please call rpc_shutdown_client() when you are done with this rpc client.
151 *
147 */ 152 */
148static rpc_authflavor_t nfs_find_best_sec(struct nfs_server *server, 153static struct rpc_clnt *nfs_find_best_sec(struct rpc_clnt *clnt,
154 struct nfs_server *server,
149 struct nfs4_secinfo_flavors *flavors) 155 struct nfs4_secinfo_flavors *flavors)
150{ 156{
151 rpc_authflavor_t pseudoflavor; 157 rpc_authflavor_t pflavor;
152 struct nfs4_secinfo4 *secinfo; 158 struct nfs4_secinfo4 *secinfo;
153 unsigned int i; 159 unsigned int i;
154 160
@@ -159,62 +165,73 @@ static rpc_authflavor_t nfs_find_best_sec(struct nfs_server *server,
159 case RPC_AUTH_NULL: 165 case RPC_AUTH_NULL:
160 case RPC_AUTH_UNIX: 166 case RPC_AUTH_UNIX:
161 case RPC_AUTH_GSS: 167 case RPC_AUTH_GSS:
162 pseudoflavor = rpcauth_get_pseudoflavor(secinfo->flavor, 168 pflavor = rpcauth_get_pseudoflavor(secinfo->flavor,
163 &secinfo->flavor_info); 169 &secinfo->flavor_info);
164 /* make sure pseudoflavor matches sec= mount opt */ 170 /* does the pseudoflavor match a sec= mount opt? */
165 if (pseudoflavor != RPC_AUTH_MAXFLAVOR && 171 if (pflavor != RPC_AUTH_MAXFLAVOR &&
166 nfs_auth_info_match(&server->auth_info, 172 nfs_auth_info_match(&server->auth_info, pflavor)) {
167 pseudoflavor)) 173 struct rpc_clnt *new;
168 return pseudoflavor; 174 struct rpc_cred *cred;
169 break; 175
176 /* Cloning creates an rpc_auth for the flavor */
177 new = rpc_clone_client_set_auth(clnt, pflavor);
178 if (IS_ERR(new))
179 continue;
180 /**
181 * Check that the user actually can use the
182 * flavor. This is mostly for RPC_AUTH_GSS
183 * where cr_init obtains a gss context
184 */
185 cred = rpcauth_lookupcred(new->cl_auth, 0);
186 if (IS_ERR(cred)) {
187 rpc_shutdown_client(new);
188 continue;
189 }
190 put_rpccred(cred);
191 return new;
192 }
170 } 193 }
171 } 194 }
172 195 return ERR_PTR(-EPERM);
173 /* if there were any sec= options then nothing matched */
174 if (server->auth_info.flavor_len > 0)
175 return -EPERM;
176
177 return RPC_AUTH_UNIX;
178} 196}
179 197
180static rpc_authflavor_t nfs4_negotiate_security(struct inode *inode, struct qstr *name) 198/**
199 * nfs4_negotiate_security - in response to an NFS4ERR_WRONGSEC on lookup,
200 * return an rpc_clnt that uses the best available security flavor with
201 * respect to the secinfo flavor list and the sec= mount options.
202 *
203 * @clnt: RPC client to clone
204 * @inode: directory inode
205 * @name: lookup name
206 *
207 * Please call rpc_shutdown_client() when you are done with this rpc client.
208 */
209struct rpc_clnt *
210nfs4_negotiate_security(struct rpc_clnt *clnt, struct inode *inode,
211 struct qstr *name)
181{ 212{
182 struct page *page; 213 struct page *page;
183 struct nfs4_secinfo_flavors *flavors; 214 struct nfs4_secinfo_flavors *flavors;
184 rpc_authflavor_t flavor; 215 struct rpc_clnt *new;
185 int err; 216 int err;
186 217
187 page = alloc_page(GFP_KERNEL); 218 page = alloc_page(GFP_KERNEL);
188 if (!page) 219 if (!page)
189 return -ENOMEM; 220 return ERR_PTR(-ENOMEM);
221
190 flavors = page_address(page); 222 flavors = page_address(page);
191 223
192 err = nfs4_proc_secinfo(inode, name, flavors); 224 err = nfs4_proc_secinfo(inode, name, flavors);
193 if (err < 0) { 225 if (err < 0) {
194 flavor = err; 226 new = ERR_PTR(err);
195 goto out; 227 goto out;
196 } 228 }
197 229
198 flavor = nfs_find_best_sec(NFS_SERVER(inode), flavors); 230 new = nfs_find_best_sec(clnt, NFS_SERVER(inode), flavors);
199 231
200out: 232out:
201 put_page(page); 233 put_page(page);
202 return flavor; 234 return new;
203}
204
205/*
206 * Please call rpc_shutdown_client() when you are done with this client.
207 */
208struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *clnt, struct inode *inode,
209 struct qstr *name)
210{
211 rpc_authflavor_t flavor;
212
213 flavor = nfs4_negotiate_security(inode, name);
214 if ((int)flavor < 0)
215 return ERR_PTR((int)flavor);
216
217 return rpc_clone_client_set_auth(clnt, flavor);
218} 235}
219 236
220static struct vfsmount *try_location(struct nfs_clone_mount *mountdata, 237static struct vfsmount *try_location(struct nfs_clone_mount *mountdata,
@@ -397,11 +414,6 @@ struct vfsmount *nfs4_submount(struct nfs_server *server, struct dentry *dentry,
397 414
398 if (client->cl_auth->au_flavor != flavor) 415 if (client->cl_auth->au_flavor != flavor)
399 flavor = client->cl_auth->au_flavor; 416 flavor = client->cl_auth->au_flavor;
400 else {
401 rpc_authflavor_t new = nfs4_negotiate_security(dir, name);
402 if ((int)new >= 0)
403 flavor = new;
404 }
405 mnt = nfs_do_submount(dentry, fh, fattr, flavor); 417 mnt = nfs_do_submount(dentry, fh, fattr, flavor);
406out: 418out:
407 rpc_shutdown_client(client); 419 rpc_shutdown_client(client);
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 285ad5334018..4bf3d97cc5a0 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3247,7 +3247,7 @@ static int nfs4_proc_lookup_common(struct rpc_clnt **clnt, struct inode *dir,
3247 err = -EPERM; 3247 err = -EPERM;
3248 if (client != *clnt) 3248 if (client != *clnt)
3249 goto out; 3249 goto out;
3250 client = nfs4_create_sec_client(client, dir, name); 3250 client = nfs4_negotiate_security(client, dir, name);
3251 if (IS_ERR(client)) 3251 if (IS_ERR(client))
3252 return PTR_ERR(client); 3252 return PTR_ERR(client);
3253 3253
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index b6ee3a6ee96d..17fab89f6358 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -29,8 +29,6 @@
29static struct kmem_cache *nfs_page_cachep; 29static struct kmem_cache *nfs_page_cachep;
30static const struct rpc_call_ops nfs_pgio_common_ops; 30static const struct rpc_call_ops nfs_pgio_common_ops;
31 31
32static void nfs_free_request(struct nfs_page *);
33
34static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount) 32static bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
35{ 33{
36 p->npages = pagecount; 34 p->npages = pagecount;
@@ -239,20 +237,28 @@ nfs_page_group_init(struct nfs_page *req, struct nfs_page *prev)
239 WARN_ON_ONCE(prev == req); 237 WARN_ON_ONCE(prev == req);
240 238
241 if (!prev) { 239 if (!prev) {
240 /* a head request */
242 req->wb_head = req; 241 req->wb_head = req;
243 req->wb_this_page = req; 242 req->wb_this_page = req;
244 } else { 243 } else {
244 /* a subrequest */
245 WARN_ON_ONCE(prev->wb_this_page != prev->wb_head); 245 WARN_ON_ONCE(prev->wb_this_page != prev->wb_head);
246 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags)); 246 WARN_ON_ONCE(!test_bit(PG_HEADLOCK, &prev->wb_head->wb_flags));
247 req->wb_head = prev->wb_head; 247 req->wb_head = prev->wb_head;
248 req->wb_this_page = prev->wb_this_page; 248 req->wb_this_page = prev->wb_this_page;
249 prev->wb_this_page = req; 249 prev->wb_this_page = req;
250 250
251 /* All subrequests take a ref on the head request until
252 * nfs_page_group_destroy is called */
253 kref_get(&req->wb_head->wb_kref);
254
251 /* grab extra ref if head request has extra ref from 255 /* grab extra ref if head request has extra ref from
252 * the write/commit path to handle handoff between write 256 * the write/commit path to handle handoff between write
253 * and commit lists */ 257 * and commit lists */
254 if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) 258 if (test_bit(PG_INODE_REF, &prev->wb_head->wb_flags)) {
259 set_bit(PG_INODE_REF, &req->wb_flags);
255 kref_get(&req->wb_kref); 260 kref_get(&req->wb_kref);
261 }
256 } 262 }
257} 263}
258 264
@@ -269,6 +275,10 @@ nfs_page_group_destroy(struct kref *kref)
269 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref); 275 struct nfs_page *req = container_of(kref, struct nfs_page, wb_kref);
270 struct nfs_page *tmp, *next; 276 struct nfs_page *tmp, *next;
271 277
278 /* subrequests must release the ref on the head request */
279 if (req->wb_head != req)
280 nfs_release_request(req->wb_head);
281
272 if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN)) 282 if (!nfs_page_group_sync_on_bit(req, PG_TEARDOWN))
273 return; 283 return;
274 284
@@ -394,7 +404,7 @@ static void nfs_clear_request(struct nfs_page *req)
394 * 404 *
395 * Note: Should never be called with the spinlock held! 405 * Note: Should never be called with the spinlock held!
396 */ 406 */
397static void nfs_free_request(struct nfs_page *req) 407void nfs_free_request(struct nfs_page *req)
398{ 408{
399 WARN_ON_ONCE(req->wb_this_page != req); 409 WARN_ON_ONCE(req->wb_this_page != req);
400 410
@@ -925,7 +935,6 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
925 nfs_pageio_doio(desc); 935 nfs_pageio_doio(desc);
926 if (desc->pg_error < 0) 936 if (desc->pg_error < 0)
927 return 0; 937 return 0;
928 desc->pg_moreio = 0;
929 if (desc->pg_recoalesce) 938 if (desc->pg_recoalesce)
930 return 0; 939 return 0;
931 /* retry add_request for this subreq */ 940 /* retry add_request for this subreq */
@@ -972,6 +981,7 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
972 desc->pg_count = 0; 981 desc->pg_count = 0;
973 desc->pg_base = 0; 982 desc->pg_base = 0;
974 desc->pg_recoalesce = 0; 983 desc->pg_recoalesce = 0;
984 desc->pg_moreio = 0;
975 985
976 while (!list_empty(&head)) { 986 while (!list_empty(&head)) {
977 struct nfs_page *req; 987 struct nfs_page *req;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 3ee5af4e738e..5e2f10304548 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -46,6 +46,7 @@ static const struct rpc_call_ops nfs_commit_ops;
46static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops; 46static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
47static const struct nfs_commit_completion_ops nfs_commit_completion_ops; 47static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
48static const struct nfs_rw_ops nfs_rw_write_ops; 48static const struct nfs_rw_ops nfs_rw_write_ops;
49static void nfs_clear_request_commit(struct nfs_page *req);
49 50
50static struct kmem_cache *nfs_wdata_cachep; 51static struct kmem_cache *nfs_wdata_cachep;
51static mempool_t *nfs_wdata_mempool; 52static mempool_t *nfs_wdata_mempool;
@@ -91,8 +92,15 @@ static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
91 set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags); 92 set_bit(NFS_CONTEXT_ERROR_WRITE, &ctx->flags);
92} 93}
93 94
95/*
96 * nfs_page_find_head_request_locked - find head request associated with @page
97 *
98 * must be called while holding the inode lock.
99 *
100 * returns matching head request with reference held, or NULL if not found.
101 */
94static struct nfs_page * 102static struct nfs_page *
95nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page) 103nfs_page_find_head_request_locked(struct nfs_inode *nfsi, struct page *page)
96{ 104{
97 struct nfs_page *req = NULL; 105 struct nfs_page *req = NULL;
98 106
@@ -104,25 +112,33 @@ nfs_page_find_request_locked(struct nfs_inode *nfsi, struct page *page)
104 /* Linearly search the commit list for the correct req */ 112 /* Linearly search the commit list for the correct req */
105 list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) { 113 list_for_each_entry_safe(freq, t, &nfsi->commit_info.list, wb_list) {
106 if (freq->wb_page == page) { 114 if (freq->wb_page == page) {
107 req = freq; 115 req = freq->wb_head;
108 break; 116 break;
109 } 117 }
110 } 118 }
111 } 119 }
112 120
113 if (req) 121 if (req) {
122 WARN_ON_ONCE(req->wb_head != req);
123
114 kref_get(&req->wb_kref); 124 kref_get(&req->wb_kref);
125 }
115 126
116 return req; 127 return req;
117} 128}
118 129
119static struct nfs_page *nfs_page_find_request(struct page *page) 130/*
131 * nfs_page_find_head_request - find head request associated with @page
132 *
133 * returns matching head request with reference held, or NULL if not found.
134 */
135static struct nfs_page *nfs_page_find_head_request(struct page *page)
120{ 136{
121 struct inode *inode = page_file_mapping(page)->host; 137 struct inode *inode = page_file_mapping(page)->host;
122 struct nfs_page *req = NULL; 138 struct nfs_page *req = NULL;
123 139
124 spin_lock(&inode->i_lock); 140 spin_lock(&inode->i_lock);
125 req = nfs_page_find_request_locked(NFS_I(inode), page); 141 req = nfs_page_find_head_request_locked(NFS_I(inode), page);
126 spin_unlock(&inode->i_lock); 142 spin_unlock(&inode->i_lock);
127 return req; 143 return req;
128} 144}
@@ -274,36 +290,246 @@ static void nfs_end_page_writeback(struct nfs_page *req)
274 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); 290 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
275} 291}
276 292
277static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblock) 293
294/* nfs_page_group_clear_bits
295 * @req - an nfs request
296 * clears all page group related bits from @req
297 */
298static void
299nfs_page_group_clear_bits(struct nfs_page *req)
300{
301 clear_bit(PG_TEARDOWN, &req->wb_flags);
302 clear_bit(PG_UNLOCKPAGE, &req->wb_flags);
303 clear_bit(PG_UPTODATE, &req->wb_flags);
304 clear_bit(PG_WB_END, &req->wb_flags);
305 clear_bit(PG_REMOVE, &req->wb_flags);
306}
307
308
309/*
310 * nfs_unroll_locks_and_wait - unlock all newly locked reqs and wait on @req
311 *
312 * this is a helper function for nfs_lock_and_join_requests
313 *
314 * @inode - inode associated with request page group, must be holding inode lock
315 * @head - head request of page group, must be holding head lock
316 * @req - request that couldn't lock and needs to wait on the req bit lock
317 * @nonblock - if true, don't actually wait
318 *
319 * NOTE: this must be called holding page_group bit lock and inode spin lock
320 * and BOTH will be released before returning.
321 *
322 * returns 0 on success, < 0 on error.
323 */
324static int
325nfs_unroll_locks_and_wait(struct inode *inode, struct nfs_page *head,
326 struct nfs_page *req, bool nonblock)
327 __releases(&inode->i_lock)
328{
329 struct nfs_page *tmp;
330 int ret;
331
332 /* relinquish all the locks successfully grabbed this run */
333 for (tmp = head ; tmp != req; tmp = tmp->wb_this_page)
334 nfs_unlock_request(tmp);
335
336 WARN_ON_ONCE(test_bit(PG_TEARDOWN, &req->wb_flags));
337
338 /* grab a ref on the request that will be waited on */
339 kref_get(&req->wb_kref);
340
341 nfs_page_group_unlock(head);
342 spin_unlock(&inode->i_lock);
343
344 /* release ref from nfs_page_find_head_request_locked */
345 nfs_release_request(head);
346
347 if (!nonblock)
348 ret = nfs_wait_on_request(req);
349 else
350 ret = -EAGAIN;
351 nfs_release_request(req);
352
353 return ret;
354}
355
356/*
357 * nfs_destroy_unlinked_subrequests - destroy recently unlinked subrequests
358 *
359 * @destroy_list - request list (using wb_this_page) terminated by @old_head
360 * @old_head - the old head of the list
361 *
362 * All subrequests must be locked and removed from all lists, so at this point
363 * they are only "active" in this function, and possibly in nfs_wait_on_request
364 * with a reference held by some other context.
365 */
366static void
367nfs_destroy_unlinked_subrequests(struct nfs_page *destroy_list,
368 struct nfs_page *old_head)
369{
370 while (destroy_list) {
371 struct nfs_page *subreq = destroy_list;
372
373 destroy_list = (subreq->wb_this_page == old_head) ?
374 NULL : subreq->wb_this_page;
375
376 WARN_ON_ONCE(old_head != subreq->wb_head);
377
378 /* make sure old group is not used */
379 subreq->wb_head = subreq;
380 subreq->wb_this_page = subreq;
381
382 nfs_clear_request_commit(subreq);
383
384 /* subreq is now totally disconnected from page group or any
385 * write / commit lists. last chance to wake any waiters */
386 nfs_unlock_request(subreq);
387
388 if (!test_bit(PG_TEARDOWN, &subreq->wb_flags)) {
389 /* release ref on old head request */
390 nfs_release_request(old_head);
391
392 nfs_page_group_clear_bits(subreq);
393
394 /* release the PG_INODE_REF reference */
395 if (test_and_clear_bit(PG_INODE_REF, &subreq->wb_flags))
396 nfs_release_request(subreq);
397 else
398 WARN_ON_ONCE(1);
399 } else {
400 WARN_ON_ONCE(test_bit(PG_CLEAN, &subreq->wb_flags));
401 /* zombie requests have already released the last
402 * reference and were waiting on the rest of the
403 * group to complete. Since it's no longer part of a
404 * group, simply free the request */
405 nfs_page_group_clear_bits(subreq);
406 nfs_free_request(subreq);
407 }
408 }
409}
410
411/*
412 * nfs_lock_and_join_requests - join all subreqs to the head req and return
413 * a locked reference, cancelling any pending
414 * operations for this page.
415 *
416 * @page - the page used to lookup the "page group" of nfs_page structures
417 * @nonblock - if true, don't block waiting for request locks
418 *
419 * This function joins all sub requests to the head request by first
420 * locking all requests in the group, cancelling any pending operations
421 * and finally updating the head request to cover the whole range covered by
422 * the (former) group. All subrequests are removed from any write or commit
423 * lists, unlinked from the group and destroyed.
424 *
425 * Returns a locked, referenced pointer to the head request - which after
426 * this call is guaranteed to be the only request associated with the page.
427 * Returns NULL if no requests are found for @page, or a ERR_PTR if an
428 * error was encountered.
429 */
430static struct nfs_page *
431nfs_lock_and_join_requests(struct page *page, bool nonblock)
278{ 432{
279 struct inode *inode = page_file_mapping(page)->host; 433 struct inode *inode = page_file_mapping(page)->host;
280 struct nfs_page *req; 434 struct nfs_page *head, *subreq;
435 struct nfs_page *destroy_list = NULL;
436 unsigned int total_bytes;
281 int ret; 437 int ret;
282 438
439try_again:
440 total_bytes = 0;
441
442 WARN_ON_ONCE(destroy_list);
443
283 spin_lock(&inode->i_lock); 444 spin_lock(&inode->i_lock);
284 for (;;) { 445
285 req = nfs_page_find_request_locked(NFS_I(inode), page); 446 /*
286 if (req == NULL) 447 * A reference is taken only on the head request which acts as a
287 break; 448 * reference to the whole page group - the group will not be destroyed
288 if (nfs_lock_request(req)) 449 * until the head reference is released.
289 break; 450 */
290 /* Note: If we hold the page lock, as is the case in nfs_writepage, 451 head = nfs_page_find_head_request_locked(NFS_I(inode), page);
291 * then the call to nfs_lock_request() will always 452
292 * succeed provided that someone hasn't already marked the 453 if (!head) {
293 * request as dirty (in which case we don't care).
294 */
295 spin_unlock(&inode->i_lock); 454 spin_unlock(&inode->i_lock);
296 if (!nonblock) 455 return NULL;
297 ret = nfs_wait_on_request(req); 456 }
298 else 457
299 ret = -EAGAIN; 458 /* lock each request in the page group */
300 nfs_release_request(req); 459 nfs_page_group_lock(head);
301 if (ret != 0) 460 subreq = head;
461 do {
462 /*
463 * Subrequests are always contiguous, non overlapping
464 * and in order. If not, it's a programming error.
465 */
466 WARN_ON_ONCE(subreq->wb_offset !=
467 (head->wb_offset + total_bytes));
468
469 /* keep track of how many bytes this group covers */
470 total_bytes += subreq->wb_bytes;
471
472 if (!nfs_lock_request(subreq)) {
473 /* releases page group bit lock and
474 * inode spin lock and all references */
475 ret = nfs_unroll_locks_and_wait(inode, head,
476 subreq, nonblock);
477
478 if (ret == 0)
479 goto try_again;
480
302 return ERR_PTR(ret); 481 return ERR_PTR(ret);
303 spin_lock(&inode->i_lock); 482 }
483
484 subreq = subreq->wb_this_page;
485 } while (subreq != head);
486
487 /* Now that all requests are locked, make sure they aren't on any list.
488 * Commit list removal accounting is done after locks are dropped */
489 subreq = head;
490 do {
491 nfs_list_remove_request(subreq);
492 subreq = subreq->wb_this_page;
493 } while (subreq != head);
494
495 /* unlink subrequests from head, destroy them later */
496 if (head->wb_this_page != head) {
497 /* destroy list will be terminated by head */
498 destroy_list = head->wb_this_page;
499 head->wb_this_page = head;
500
501 /* change head request to cover whole range that
502 * the former page group covered */
503 head->wb_bytes = total_bytes;
304 } 504 }
505
506 /*
507 * prepare head request to be added to new pgio descriptor
508 */
509 nfs_page_group_clear_bits(head);
510
511 /*
512 * some part of the group was still on the inode list - otherwise
513 * the group wouldn't be involved in async write.
514 * grab a reference for the head request, iff it needs one.
515 */
516 if (!test_and_set_bit(PG_INODE_REF, &head->wb_flags))
517 kref_get(&head->wb_kref);
518
519 nfs_page_group_unlock(head);
520
521 /* drop lock to clear_request_commit the head req and clean up
522 * requests on destroy list */
305 spin_unlock(&inode->i_lock); 523 spin_unlock(&inode->i_lock);
306 return req; 524
525 nfs_destroy_unlinked_subrequests(destroy_list, head);
526
527 /* clean up commit list state */
528 nfs_clear_request_commit(head);
529
530 /* still holds ref on head from nfs_page_find_head_request_locked
531 * and still has lock on head from lock loop */
532 return head;
307} 533}
308 534
309/* 535/*
@@ -316,7 +542,7 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
316 struct nfs_page *req; 542 struct nfs_page *req;
317 int ret = 0; 543 int ret = 0;
318 544
319 req = nfs_find_and_lock_request(page, nonblock); 545 req = nfs_lock_and_join_requests(page, nonblock);
320 if (!req) 546 if (!req)
321 goto out; 547 goto out;
322 ret = PTR_ERR(req); 548 ret = PTR_ERR(req);
@@ -448,7 +674,9 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
448 set_page_private(req->wb_page, (unsigned long)req); 674 set_page_private(req->wb_page, (unsigned long)req);
449 } 675 }
450 nfsi->npages++; 676 nfsi->npages++;
451 set_bit(PG_INODE_REF, &req->wb_flags); 677 /* this a head request for a page group - mark it as having an
678 * extra reference so sub groups can follow suit */
679 WARN_ON(test_and_set_bit(PG_INODE_REF, &req->wb_flags));
452 kref_get(&req->wb_kref); 680 kref_get(&req->wb_kref);
453 spin_unlock(&inode->i_lock); 681 spin_unlock(&inode->i_lock);
454} 682}
@@ -474,7 +702,9 @@ static void nfs_inode_remove_request(struct nfs_page *req)
474 nfsi->npages--; 702 nfsi->npages--;
475 spin_unlock(&inode->i_lock); 703 spin_unlock(&inode->i_lock);
476 } 704 }
477 nfs_release_request(req); 705
706 if (test_and_clear_bit(PG_INODE_REF, &req->wb_flags))
707 nfs_release_request(req);
478} 708}
479 709
480static void 710static void
@@ -638,7 +868,6 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
638{ 868{
639 struct nfs_commit_info cinfo; 869 struct nfs_commit_info cinfo;
640 unsigned long bytes = 0; 870 unsigned long bytes = 0;
641 bool do_destroy;
642 871
643 if (test_bit(NFS_IOHDR_REDO, &hdr->flags)) 872 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
644 goto out; 873 goto out;
@@ -668,7 +897,6 @@ remove_req:
668next: 897next:
669 nfs_unlock_request(req); 898 nfs_unlock_request(req);
670 nfs_end_page_writeback(req); 899 nfs_end_page_writeback(req);
671 do_destroy = !test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags);
672 nfs_release_request(req); 900 nfs_release_request(req);
673 } 901 }
674out: 902out:
@@ -769,7 +997,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
769 spin_lock(&inode->i_lock); 997 spin_lock(&inode->i_lock);
770 998
771 for (;;) { 999 for (;;) {
772 req = nfs_page_find_request_locked(NFS_I(inode), page); 1000 req = nfs_page_find_head_request_locked(NFS_I(inode), page);
773 if (req == NULL) 1001 if (req == NULL)
774 goto out_unlock; 1002 goto out_unlock;
775 1003
@@ -877,7 +1105,7 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
877 * dropped page. 1105 * dropped page.
878 */ 1106 */
879 do { 1107 do {
880 req = nfs_page_find_request(page); 1108 req = nfs_page_find_head_request(page);
881 if (req == NULL) 1109 if (req == NULL)
882 return 0; 1110 return 0;
883 l_ctx = req->wb_lock_context; 1111 l_ctx = req->wb_lock_context;
@@ -934,12 +1162,14 @@ static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
934 1162
935 if (nfs_have_delegated_attributes(inode)) 1163 if (nfs_have_delegated_attributes(inode))
936 goto out; 1164 goto out;
937 if (nfsi->cache_validity & (NFS_INO_INVALID_DATA|NFS_INO_REVAL_PAGECACHE)) 1165 if (nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE)
938 return false; 1166 return false;
939 smp_rmb(); 1167 smp_rmb();
940 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags)) 1168 if (test_bit(NFS_INO_INVALIDATING, &nfsi->flags))
941 return false; 1169 return false;
942out: 1170out:
1171 if (nfsi->cache_validity & NFS_INO_INVALID_DATA)
1172 return false;
943 return PageUptodate(page) != 0; 1173 return PageUptodate(page) != 0;
944} 1174}
945 1175
@@ -1567,27 +1797,28 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1567 struct nfs_page *req; 1797 struct nfs_page *req;
1568 int ret = 0; 1798 int ret = 0;
1569 1799
1570 for (;;) { 1800 wait_on_page_writeback(page);
1571 wait_on_page_writeback(page); 1801
1572 req = nfs_page_find_request(page); 1802 /* blocking call to cancel all requests and join to a single (head)
1573 if (req == NULL) 1803 * request */
1574 break; 1804 req = nfs_lock_and_join_requests(page, false);
1575 if (nfs_lock_request(req)) { 1805
1576 nfs_clear_request_commit(req); 1806 if (IS_ERR(req)) {
1577 nfs_inode_remove_request(req); 1807 ret = PTR_ERR(req);
1578 /* 1808 } else if (req) {
1579 * In case nfs_inode_remove_request has marked the 1809 /* all requests from this page have been cancelled by
1580 * page as being dirty 1810 * nfs_lock_and_join_requests, so just remove the head
1581 */ 1811 * request from the inode / page_private pointer and
1582 cancel_dirty_page(page, PAGE_CACHE_SIZE); 1812 * release it */
1583 nfs_unlock_and_release_request(req); 1813 nfs_inode_remove_request(req);
1584 break; 1814 /*
1585 } 1815 * In case nfs_inode_remove_request has marked the
1586 ret = nfs_wait_on_request(req); 1816 * page as being dirty
1587 nfs_release_request(req); 1817 */
1588 if (ret < 0) 1818 cancel_dirty_page(page, PAGE_CACHE_SIZE);
1589 break; 1819 nfs_unlock_and_release_request(req);
1590 } 1820 }
1821
1591 return ret; 1822 return ret;
1592} 1823}
1593 1824
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 6851b003f2a4..8f029db5d271 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -617,15 +617,6 @@ nfsd4_create(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
617 617
618 switch (create->cr_type) { 618 switch (create->cr_type) {
619 case NF4LNK: 619 case NF4LNK:
620 /* ugh! we have to null-terminate the linktext, or
621 * vfs_symlink() will choke. it is always safe to
622 * null-terminate by brute force, since at worst we
623 * will overwrite the first byte of the create namelen
624 * in the XDR buffer, which has already been extracted
625 * during XDR decode.
626 */
627 create->cr_linkname[create->cr_linklen] = 0;
628
629 status = nfsd_symlink(rqstp, &cstate->current_fh, 620 status = nfsd_symlink(rqstp, &cstate->current_fh,
630 create->cr_name, create->cr_namelen, 621 create->cr_name, create->cr_namelen,
631 create->cr_linkname, create->cr_linklen, 622 create->cr_linkname, create->cr_linklen,
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 83baf2bfe9e9..b56b1cc02718 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -600,7 +600,18 @@ nfsd4_decode_create(struct nfsd4_compoundargs *argp, struct nfsd4_create *create
600 READ_BUF(4); 600 READ_BUF(4);
601 create->cr_linklen = be32_to_cpup(p++); 601 create->cr_linklen = be32_to_cpup(p++);
602 READ_BUF(create->cr_linklen); 602 READ_BUF(create->cr_linklen);
603 SAVEMEM(create->cr_linkname, create->cr_linklen); 603 /*
604 * The VFS will want a null-terminated string, and
605 * null-terminating in place isn't safe since this might
606 * end on a page boundary:
607 */
608 create->cr_linkname =
609 kmalloc(create->cr_linklen + 1, GFP_KERNEL);
610 if (!create->cr_linkname)
611 return nfserr_jukebox;
612 memcpy(create->cr_linkname, p, create->cr_linklen);
613 create->cr_linkname[create->cr_linklen] = '\0';
614 defer_free(argp, kfree, create->cr_linkname);
604 break; 615 break;
605 case NF4BLK: 616 case NF4BLK:
606 case NF4CHR: 617 case NF4CHR:
@@ -2630,7 +2641,7 @@ nfsd4_encode_rdattr_error(struct xdr_stream *xdr, __be32 nfserr)
2630{ 2641{
2631 __be32 *p; 2642 __be32 *p;
2632 2643
2633 p = xdr_reserve_space(xdr, 6); 2644 p = xdr_reserve_space(xdr, 20);
2634 if (!p) 2645 if (!p)
2635 return NULL; 2646 return NULL;
2636 *p++ = htonl(2); 2647 *p++ = htonl(2);
@@ -3267,7 +3278,7 @@ nfsd4_encode_readlink(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd
3267 3278
3268 wire_count = htonl(maxcount); 3279 wire_count = htonl(maxcount);
3269 write_bytes_to_xdr_buf(xdr->buf, length_offset, &wire_count, 4); 3280 write_bytes_to_xdr_buf(xdr->buf, length_offset, &wire_count, 4);
3270 xdr_truncate_encode(xdr, length_offset + 4 + maxcount); 3281 xdr_truncate_encode(xdr, length_offset + 4 + ALIGN(maxcount, 4));
3271 if (maxcount & 3) 3282 if (maxcount & 3)
3272 write_bytes_to_xdr_buf(xdr->buf, length_offset + 4 + maxcount, 3283 write_bytes_to_xdr_buf(xdr->buf, length_offset + 4 + maxcount,
3273 &zero, 4 - (maxcount&3)); 3284 &zero, 4 - (maxcount&3));
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index a106b3f2b22a..fae17c640df3 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -331,6 +331,7 @@ struct dlm_lock_resource
331 u16 state; 331 u16 state;
332 char lvb[DLM_LVB_LEN]; 332 char lvb[DLM_LVB_LEN];
333 unsigned int inflight_locks; 333 unsigned int inflight_locks;
334 unsigned int inflight_assert_workers;
334 unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)]; 335 unsigned long refmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
335}; 336};
336 337
@@ -910,6 +911,9 @@ void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
910void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm, 911void dlm_lockres_grab_inflight_ref(struct dlm_ctxt *dlm,
911 struct dlm_lock_resource *res); 912 struct dlm_lock_resource *res);
912 913
914void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
915 struct dlm_lock_resource *res);
916
913void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 917void dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
914void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 918void dlm_queue_bast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
915void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock); 919void __dlm_queue_ast(struct dlm_ctxt *dlm, struct dlm_lock *lock);
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 3087a21d32f9..82abf0cc9a12 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -581,6 +581,7 @@ static void dlm_init_lockres(struct dlm_ctxt *dlm,
581 atomic_set(&res->asts_reserved, 0); 581 atomic_set(&res->asts_reserved, 0);
582 res->migration_pending = 0; 582 res->migration_pending = 0;
583 res->inflight_locks = 0; 583 res->inflight_locks = 0;
584 res->inflight_assert_workers = 0;
584 585
585 res->dlm = dlm; 586 res->dlm = dlm;
586 587
@@ -683,6 +684,43 @@ void dlm_lockres_drop_inflight_ref(struct dlm_ctxt *dlm,
683 wake_up(&res->wq); 684 wake_up(&res->wq);
684} 685}
685 686
687void __dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
688 struct dlm_lock_resource *res)
689{
690 assert_spin_locked(&res->spinlock);
691 res->inflight_assert_workers++;
692 mlog(0, "%s:%.*s: inflight assert worker++: now %u\n",
693 dlm->name, res->lockname.len, res->lockname.name,
694 res->inflight_assert_workers);
695}
696
697static void dlm_lockres_grab_inflight_worker(struct dlm_ctxt *dlm,
698 struct dlm_lock_resource *res)
699{
700 spin_lock(&res->spinlock);
701 __dlm_lockres_grab_inflight_worker(dlm, res);
702 spin_unlock(&res->spinlock);
703}
704
705static void __dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
706 struct dlm_lock_resource *res)
707{
708 assert_spin_locked(&res->spinlock);
709 BUG_ON(res->inflight_assert_workers == 0);
710 res->inflight_assert_workers--;
711 mlog(0, "%s:%.*s: inflight assert worker--: now %u\n",
712 dlm->name, res->lockname.len, res->lockname.name,
713 res->inflight_assert_workers);
714}
715
716static void dlm_lockres_drop_inflight_worker(struct dlm_ctxt *dlm,
717 struct dlm_lock_resource *res)
718{
719 spin_lock(&res->spinlock);
720 __dlm_lockres_drop_inflight_worker(dlm, res);
721 spin_unlock(&res->spinlock);
722}
723
686/* 724/*
687 * lookup a lock resource by name. 725 * lookup a lock resource by name.
688 * may already exist in the hashtable. 726 * may already exist in the hashtable.
@@ -1603,7 +1641,8 @@ send_response:
1603 mlog(ML_ERROR, "failed to dispatch assert master work\n"); 1641 mlog(ML_ERROR, "failed to dispatch assert master work\n");
1604 response = DLM_MASTER_RESP_ERROR; 1642 response = DLM_MASTER_RESP_ERROR;
1605 dlm_lockres_put(res); 1643 dlm_lockres_put(res);
1606 } 1644 } else
1645 dlm_lockres_grab_inflight_worker(dlm, res);
1607 } else { 1646 } else {
1608 if (res) 1647 if (res)
1609 dlm_lockres_put(res); 1648 dlm_lockres_put(res);
@@ -2118,6 +2157,8 @@ static void dlm_assert_master_worker(struct dlm_work_item *item, void *data)
2118 dlm_lockres_release_ast(dlm, res); 2157 dlm_lockres_release_ast(dlm, res);
2119 2158
2120put: 2159put:
2160 dlm_lockres_drop_inflight_worker(dlm, res);
2161
2121 dlm_lockres_put(res); 2162 dlm_lockres_put(res);
2122 2163
2123 mlog(0, "finished with dlm_assert_master_worker\n"); 2164 mlog(0, "finished with dlm_assert_master_worker\n");
@@ -3088,11 +3129,15 @@ static int dlm_add_migration_mle(struct dlm_ctxt *dlm,
3088 /* remove it so that only one mle will be found */ 3129 /* remove it so that only one mle will be found */
3089 __dlm_unlink_mle(dlm, tmp); 3130 __dlm_unlink_mle(dlm, tmp);
3090 __dlm_mle_detach_hb_events(dlm, tmp); 3131 __dlm_mle_detach_hb_events(dlm, tmp);
3091 ret = DLM_MIGRATE_RESPONSE_MASTERY_REF; 3132 if (tmp->type == DLM_MLE_MASTER) {
3092 mlog(0, "%s:%.*s: master=%u, newmaster=%u, " 3133 ret = DLM_MIGRATE_RESPONSE_MASTERY_REF;
3093 "telling master to get ref for cleared out mle " 3134 mlog(0, "%s:%.*s: master=%u, newmaster=%u, "
3094 "during migration\n", dlm->name, namelen, name, 3135 "telling master to get ref "
3095 master, new_master); 3136 "for cleared out mle during "
3137 "migration\n", dlm->name,
3138 namelen, name, master,
3139 new_master);
3140 }
3096 } 3141 }
3097 spin_unlock(&tmp->spinlock); 3142 spin_unlock(&tmp->spinlock);
3098 } 3143 }
diff --git a/fs/ocfs2/dlm/dlmrecovery.c b/fs/ocfs2/dlm/dlmrecovery.c
index 5de019437ea5..45067faf5695 100644
--- a/fs/ocfs2/dlm/dlmrecovery.c
+++ b/fs/ocfs2/dlm/dlmrecovery.c
@@ -1708,7 +1708,8 @@ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data,
1708 mlog_errno(-ENOMEM); 1708 mlog_errno(-ENOMEM);
1709 /* retry!? */ 1709 /* retry!? */
1710 BUG(); 1710 BUG();
1711 } 1711 } else
1712 __dlm_lockres_grab_inflight_worker(dlm, res);
1712 } else /* put.. incase we are not the master */ 1713 } else /* put.. incase we are not the master */
1713 dlm_lockres_put(res); 1714 dlm_lockres_put(res);
1714 spin_unlock(&res->spinlock); 1715 spin_unlock(&res->spinlock);
diff --git a/fs/ocfs2/dlm/dlmthread.c b/fs/ocfs2/dlm/dlmthread.c
index 9db869de829d..69aac6f088ad 100644
--- a/fs/ocfs2/dlm/dlmthread.c
+++ b/fs/ocfs2/dlm/dlmthread.c
@@ -259,12 +259,15 @@ static void dlm_run_purge_list(struct dlm_ctxt *dlm,
259 * refs on it. */ 259 * refs on it. */
260 unused = __dlm_lockres_unused(lockres); 260 unused = __dlm_lockres_unused(lockres);
261 if (!unused || 261 if (!unused ||
262 (lockres->state & DLM_LOCK_RES_MIGRATING)) { 262 (lockres->state & DLM_LOCK_RES_MIGRATING) ||
263 (lockres->inflight_assert_workers != 0)) {
263 mlog(0, "%s: res %.*s is in use or being remastered, " 264 mlog(0, "%s: res %.*s is in use or being remastered, "
264 "used %d, state %d\n", dlm->name, 265 "used %d, state %d, assert master workers %u\n",
265 lockres->lockname.len, lockres->lockname.name, 266 dlm->name, lockres->lockname.len,
266 !unused, lockres->state); 267 lockres->lockname.name,
267 list_move_tail(&dlm->purge_list, &lockres->purge); 268 !unused, lockres->state,
269 lockres->inflight_assert_workers);
270 list_move_tail(&lockres->purge, &dlm->purge_list);
268 spin_unlock(&lockres->spinlock); 271 spin_unlock(&lockres->spinlock);
269 continue; 272 continue;
270 } 273 }
diff --git a/fs/ocfs2/dlm/dlmunlock.c b/fs/ocfs2/dlm/dlmunlock.c
index 5698b52cf5c9..2e3c9dbab68c 100644
--- a/fs/ocfs2/dlm/dlmunlock.c
+++ b/fs/ocfs2/dlm/dlmunlock.c
@@ -191,7 +191,9 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
191 DLM_UNLOCK_CLEAR_CONVERT_TYPE); 191 DLM_UNLOCK_CLEAR_CONVERT_TYPE);
192 } else if (status == DLM_RECOVERING || 192 } else if (status == DLM_RECOVERING ||
193 status == DLM_MIGRATING || 193 status == DLM_MIGRATING ||
194 status == DLM_FORWARD) { 194 status == DLM_FORWARD ||
195 status == DLM_NOLOCKMGR
196 ) {
195 /* must clear the actions because this unlock 197 /* must clear the actions because this unlock
196 * is about to be retried. cannot free or do 198 * is about to be retried. cannot free or do
197 * any list manipulation. */ 199 * any list manipulation. */
@@ -200,7 +202,8 @@ static enum dlm_status dlmunlock_common(struct dlm_ctxt *dlm,
200 res->lockname.name, 202 res->lockname.name,
201 status==DLM_RECOVERING?"recovering": 203 status==DLM_RECOVERING?"recovering":
202 (status==DLM_MIGRATING?"migrating": 204 (status==DLM_MIGRATING?"migrating":
203 "forward")); 205 (status == DLM_FORWARD ? "forward" :
206 "nolockmanager")));
204 actions = 0; 207 actions = 0;
205 } 208 }
206 if (flags & LKM_CANCEL) 209 if (flags & LKM_CANCEL)
@@ -364,7 +367,10 @@ static enum dlm_status dlm_send_remote_unlock_request(struct dlm_ctxt *dlm,
364 * updated state to the recovery master. this thread 367 * updated state to the recovery master. this thread
365 * just needs to finish out the operation and call 368 * just needs to finish out the operation and call
366 * the unlockast. */ 369 * the unlockast. */
367 ret = DLM_NORMAL; 370 if (dlm_is_node_dead(dlm, owner))
371 ret = DLM_NORMAL;
372 else
373 ret = DLM_NOLOCKMGR;
368 } else { 374 } else {
369 /* something bad. this will BUG in ocfs2 */ 375 /* something bad. this will BUG in ocfs2 */
370 ret = dlm_err_to_dlm_status(tmpret); 376 ret = dlm_err_to_dlm_status(tmpret);
@@ -638,7 +644,9 @@ retry:
638 644
639 if (status == DLM_RECOVERING || 645 if (status == DLM_RECOVERING ||
640 status == DLM_MIGRATING || 646 status == DLM_MIGRATING ||
641 status == DLM_FORWARD) { 647 status == DLM_FORWARD ||
648 status == DLM_NOLOCKMGR) {
649
642 /* We want to go away for a tiny bit to allow recovery 650 /* We want to go away for a tiny bit to allow recovery
643 * / migration to complete on this resource. I don't 651 * / migration to complete on this resource. I don't
644 * know of any wait queue we could sleep on as this 652 * know of any wait queue we could sleep on as this
@@ -650,7 +658,7 @@ retry:
650 msleep(50); 658 msleep(50);
651 659
652 mlog(0, "retrying unlock due to pending recovery/" 660 mlog(0, "retrying unlock due to pending recovery/"
653 "migration/in-progress\n"); 661 "migration/in-progress/reconnect\n");
654 goto retry; 662 goto retry;
655 } 663 }
656 664
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index 2060fc398445..8add6f1030d7 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -205,6 +205,21 @@ static struct inode *ocfs2_get_init_inode(struct inode *dir, umode_t mode)
205 return inode; 205 return inode;
206} 206}
207 207
208static void ocfs2_cleanup_add_entry_failure(struct ocfs2_super *osb,
209 struct dentry *dentry, struct inode *inode)
210{
211 struct ocfs2_dentry_lock *dl = dentry->d_fsdata;
212
213 ocfs2_simple_drop_lockres(osb, &dl->dl_lockres);
214 ocfs2_lock_res_free(&dl->dl_lockres);
215 BUG_ON(dl->dl_count != 1);
216 spin_lock(&dentry_attach_lock);
217 dentry->d_fsdata = NULL;
218 spin_unlock(&dentry_attach_lock);
219 kfree(dl);
220 iput(inode);
221}
222
208static int ocfs2_mknod(struct inode *dir, 223static int ocfs2_mknod(struct inode *dir,
209 struct dentry *dentry, 224 struct dentry *dentry,
210 umode_t mode, 225 umode_t mode,
@@ -231,6 +246,7 @@ static int ocfs2_mknod(struct inode *dir,
231 sigset_t oldset; 246 sigset_t oldset;
232 int did_block_signals = 0; 247 int did_block_signals = 0;
233 struct posix_acl *default_acl = NULL, *acl = NULL; 248 struct posix_acl *default_acl = NULL, *acl = NULL;
249 struct ocfs2_dentry_lock *dl = NULL;
234 250
235 trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name, 251 trace_ocfs2_mknod(dir, dentry, dentry->d_name.len, dentry->d_name.name,
236 (unsigned long long)OCFS2_I(dir)->ip_blkno, 252 (unsigned long long)OCFS2_I(dir)->ip_blkno,
@@ -423,6 +439,8 @@ static int ocfs2_mknod(struct inode *dir,
423 goto leave; 439 goto leave;
424 } 440 }
425 441
442 dl = dentry->d_fsdata;
443
426 status = ocfs2_add_entry(handle, dentry, inode, 444 status = ocfs2_add_entry(handle, dentry, inode,
427 OCFS2_I(inode)->ip_blkno, parent_fe_bh, 445 OCFS2_I(inode)->ip_blkno, parent_fe_bh,
428 &lookup); 446 &lookup);
@@ -469,6 +487,9 @@ leave:
469 * ocfs2_delete_inode will mutex_lock again. 487 * ocfs2_delete_inode will mutex_lock again.
470 */ 488 */
471 if ((status < 0) && inode) { 489 if ((status < 0) && inode) {
490 if (dl)
491 ocfs2_cleanup_add_entry_failure(osb, dentry, inode);
492
472 OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SKIP_ORPHAN_DIR; 493 OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SKIP_ORPHAN_DIR;
473 clear_nlink(inode); 494 clear_nlink(inode);
474 iput(inode); 495 iput(inode);
@@ -991,6 +1012,65 @@ leave:
991 return status; 1012 return status;
992} 1013}
993 1014
1015static int ocfs2_check_if_ancestor(struct ocfs2_super *osb,
1016 u64 src_inode_no, u64 dest_inode_no)
1017{
1018 int ret = 0, i = 0;
1019 u64 parent_inode_no = 0;
1020 u64 child_inode_no = src_inode_no;
1021 struct inode *child_inode;
1022
1023#define MAX_LOOKUP_TIMES 32
1024 while (1) {
1025 child_inode = ocfs2_iget(osb, child_inode_no, 0, 0);
1026 if (IS_ERR(child_inode)) {
1027 ret = PTR_ERR(child_inode);
1028 break;
1029 }
1030
1031 ret = ocfs2_inode_lock(child_inode, NULL, 0);
1032 if (ret < 0) {
1033 iput(child_inode);
1034 if (ret != -ENOENT)
1035 mlog_errno(ret);
1036 break;
1037 }
1038
1039 ret = ocfs2_lookup_ino_from_name(child_inode, "..", 2,
1040 &parent_inode_no);
1041 ocfs2_inode_unlock(child_inode, 0);
1042 iput(child_inode);
1043 if (ret < 0) {
1044 ret = -ENOENT;
1045 break;
1046 }
1047
1048 if (parent_inode_no == dest_inode_no) {
1049 ret = 1;
1050 break;
1051 }
1052
1053 if (parent_inode_no == osb->root_inode->i_ino) {
1054 ret = 0;
1055 break;
1056 }
1057
1058 child_inode_no = parent_inode_no;
1059
1060 if (++i >= MAX_LOOKUP_TIMES) {
1061 mlog(ML_NOTICE, "max lookup times reached, filesystem "
1062 "may have nested directories, "
1063 "src inode: %llu, dest inode: %llu.\n",
1064 (unsigned long long)src_inode_no,
1065 (unsigned long long)dest_inode_no);
1066 ret = 0;
1067 break;
1068 }
1069 }
1070
1071 return ret;
1072}
1073
994/* 1074/*
995 * The only place this should be used is rename! 1075 * The only place this should be used is rename!
996 * if they have the same id, then the 1st one is the only one locked. 1076 * if they have the same id, then the 1st one is the only one locked.
@@ -1002,6 +1082,7 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
1002 struct inode *inode2) 1082 struct inode *inode2)
1003{ 1083{
1004 int status; 1084 int status;
1085 int inode1_is_ancestor, inode2_is_ancestor;
1005 struct ocfs2_inode_info *oi1 = OCFS2_I(inode1); 1086 struct ocfs2_inode_info *oi1 = OCFS2_I(inode1);
1006 struct ocfs2_inode_info *oi2 = OCFS2_I(inode2); 1087 struct ocfs2_inode_info *oi2 = OCFS2_I(inode2);
1007 struct buffer_head **tmpbh; 1088 struct buffer_head **tmpbh;
@@ -1015,9 +1096,26 @@ static int ocfs2_double_lock(struct ocfs2_super *osb,
1015 if (*bh2) 1096 if (*bh2)
1016 *bh2 = NULL; 1097 *bh2 = NULL;
1017 1098
1018 /* we always want to lock the one with the lower lockid first. */ 1099 /* we always want to lock the one with the lower lockid first.
1100 * and if they are nested, we lock ancestor first */
1019 if (oi1->ip_blkno != oi2->ip_blkno) { 1101 if (oi1->ip_blkno != oi2->ip_blkno) {
1020 if (oi1->ip_blkno < oi2->ip_blkno) { 1102 inode1_is_ancestor = ocfs2_check_if_ancestor(osb, oi2->ip_blkno,
1103 oi1->ip_blkno);
1104 if (inode1_is_ancestor < 0) {
1105 status = inode1_is_ancestor;
1106 goto bail;
1107 }
1108
1109 inode2_is_ancestor = ocfs2_check_if_ancestor(osb, oi1->ip_blkno,
1110 oi2->ip_blkno);
1111 if (inode2_is_ancestor < 0) {
1112 status = inode2_is_ancestor;
1113 goto bail;
1114 }
1115
1116 if ((inode1_is_ancestor == 1) ||
1117 (oi1->ip_blkno < oi2->ip_blkno &&
1118 inode2_is_ancestor == 0)) {
1021 /* switch id1 and id2 around */ 1119 /* switch id1 and id2 around */
1022 tmpbh = bh2; 1120 tmpbh = bh2;
1023 bh2 = bh1; 1121 bh2 = bh1;
@@ -1098,6 +1196,7 @@ static int ocfs2_rename(struct inode *old_dir,
1098 struct ocfs2_dir_lookup_result old_entry_lookup = { NULL, }; 1196 struct ocfs2_dir_lookup_result old_entry_lookup = { NULL, };
1099 struct ocfs2_dir_lookup_result orphan_insert = { NULL, }; 1197 struct ocfs2_dir_lookup_result orphan_insert = { NULL, };
1100 struct ocfs2_dir_lookup_result target_insert = { NULL, }; 1198 struct ocfs2_dir_lookup_result target_insert = { NULL, };
1199 bool should_add_orphan = false;
1101 1200
1102 /* At some point it might be nice to break this function up a 1201 /* At some point it might be nice to break this function up a
1103 * bit. */ 1202 * bit. */
@@ -1134,6 +1233,21 @@ static int ocfs2_rename(struct inode *old_dir,
1134 goto bail; 1233 goto bail;
1135 } 1234 }
1136 rename_lock = 1; 1235 rename_lock = 1;
1236
1237 /* here we cannot guarantee the inodes haven't just been
1238 * changed, so check if they are nested again */
1239 status = ocfs2_check_if_ancestor(osb, new_dir->i_ino,
1240 old_inode->i_ino);
1241 if (status < 0) {
1242 mlog_errno(status);
1243 goto bail;
1244 } else if (status == 1) {
1245 status = -EPERM;
1246 trace_ocfs2_rename_not_permitted(
1247 (unsigned long long)old_inode->i_ino,
1248 (unsigned long long)new_dir->i_ino);
1249 goto bail;
1250 }
1137 } 1251 }
1138 1252
1139 /* if old and new are the same, this'll just do one lock. */ 1253 /* if old and new are the same, this'll just do one lock. */
@@ -1304,6 +1418,7 @@ static int ocfs2_rename(struct inode *old_dir,
1304 mlog_errno(status); 1418 mlog_errno(status);
1305 goto bail; 1419 goto bail;
1306 } 1420 }
1421 should_add_orphan = true;
1307 } 1422 }
1308 } else { 1423 } else {
1309 BUG_ON(new_dentry->d_parent->d_inode != new_dir); 1424 BUG_ON(new_dentry->d_parent->d_inode != new_dir);
@@ -1348,17 +1463,6 @@ static int ocfs2_rename(struct inode *old_dir,
1348 goto bail; 1463 goto bail;
1349 } 1464 }
1350 1465
1351 if (S_ISDIR(new_inode->i_mode) ||
1352 (ocfs2_read_links_count(newfe) == 1)) {
1353 status = ocfs2_orphan_add(osb, handle, new_inode,
1354 newfe_bh, orphan_name,
1355 &orphan_insert, orphan_dir);
1356 if (status < 0) {
1357 mlog_errno(status);
1358 goto bail;
1359 }
1360 }
1361
1362 /* change the dirent to point to the correct inode */ 1466 /* change the dirent to point to the correct inode */
1363 status = ocfs2_update_entry(new_dir, handle, &target_lookup_res, 1467 status = ocfs2_update_entry(new_dir, handle, &target_lookup_res,
1364 old_inode); 1468 old_inode);
@@ -1373,6 +1477,15 @@ static int ocfs2_rename(struct inode *old_dir,
1373 else 1477 else
1374 ocfs2_add_links_count(newfe, -1); 1478 ocfs2_add_links_count(newfe, -1);
1375 ocfs2_journal_dirty(handle, newfe_bh); 1479 ocfs2_journal_dirty(handle, newfe_bh);
1480 if (should_add_orphan) {
1481 status = ocfs2_orphan_add(osb, handle, new_inode,
1482 newfe_bh, orphan_name,
1483 &orphan_insert, orphan_dir);
1484 if (status < 0) {
1485 mlog_errno(status);
1486 goto bail;
1487 }
1488 }
1376 } else { 1489 } else {
1377 /* if the name was not found in new_dir, add it now */ 1490 /* if the name was not found in new_dir, add it now */
1378 status = ocfs2_add_entry(handle, new_dentry, old_inode, 1491 status = ocfs2_add_entry(handle, new_dentry, old_inode,
@@ -1642,6 +1755,7 @@ static int ocfs2_symlink(struct inode *dir,
1642 struct ocfs2_dir_lookup_result lookup = { NULL, }; 1755 struct ocfs2_dir_lookup_result lookup = { NULL, };
1643 sigset_t oldset; 1756 sigset_t oldset;
1644 int did_block_signals = 0; 1757 int did_block_signals = 0;
1758 struct ocfs2_dentry_lock *dl = NULL;
1645 1759
1646 trace_ocfs2_symlink_begin(dir, dentry, symname, 1760 trace_ocfs2_symlink_begin(dir, dentry, symname,
1647 dentry->d_name.len, dentry->d_name.name); 1761 dentry->d_name.len, dentry->d_name.name);
@@ -1830,6 +1944,8 @@ static int ocfs2_symlink(struct inode *dir,
1830 goto bail; 1944 goto bail;
1831 } 1945 }
1832 1946
1947 dl = dentry->d_fsdata;
1948
1833 status = ocfs2_add_entry(handle, dentry, inode, 1949 status = ocfs2_add_entry(handle, dentry, inode,
1834 le64_to_cpu(fe->i_blkno), parent_fe_bh, 1950 le64_to_cpu(fe->i_blkno), parent_fe_bh,
1835 &lookup); 1951 &lookup);
@@ -1864,6 +1980,9 @@ bail:
1864 if (xattr_ac) 1980 if (xattr_ac)
1865 ocfs2_free_alloc_context(xattr_ac); 1981 ocfs2_free_alloc_context(xattr_ac);
1866 if ((status < 0) && inode) { 1982 if ((status < 0) && inode) {
1983 if (dl)
1984 ocfs2_cleanup_add_entry_failure(osb, dentry, inode);
1985
1867 OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SKIP_ORPHAN_DIR; 1986 OCFS2_I(inode)->ip_flags |= OCFS2_INODE_SKIP_ORPHAN_DIR;
1868 clear_nlink(inode); 1987 clear_nlink(inode);
1869 iput(inode); 1988 iput(inode);
diff --git a/fs/ocfs2/ocfs2_trace.h b/fs/ocfs2/ocfs2_trace.h
index 1b60c62aa9d6..6cb019b7c6a8 100644
--- a/fs/ocfs2/ocfs2_trace.h
+++ b/fs/ocfs2/ocfs2_trace.h
@@ -2292,6 +2292,8 @@ TRACE_EVENT(ocfs2_rename,
2292 __entry->new_len, __get_str(new_name)) 2292 __entry->new_len, __get_str(new_name))
2293); 2293);
2294 2294
2295DEFINE_OCFS2_ULL_ULL_EVENT(ocfs2_rename_not_permitted);
2296
2295TRACE_EVENT(ocfs2_rename_target_exists, 2297TRACE_EVENT(ocfs2_rename_target_exists,
2296 TP_PROTO(int new_len, const char *new_name), 2298 TP_PROTO(int new_len, const char *new_name),
2297 TP_ARGS(new_len, new_name), 2299 TP_ARGS(new_len, new_name),
diff --git a/fs/ocfs2/refcounttree.c b/fs/ocfs2/refcounttree.c
index 714e53b9cc66..636aab69ead5 100644
--- a/fs/ocfs2/refcounttree.c
+++ b/fs/ocfs2/refcounttree.c
@@ -4288,9 +4288,16 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
4288 goto out; 4288 goto out;
4289 } 4289 }
4290 4290
4291 error = ocfs2_rw_lock(inode, 1);
4292 if (error) {
4293 mlog_errno(error);
4294 goto out;
4295 }
4296
4291 error = ocfs2_inode_lock(inode, &old_bh, 1); 4297 error = ocfs2_inode_lock(inode, &old_bh, 1);
4292 if (error) { 4298 if (error) {
4293 mlog_errno(error); 4299 mlog_errno(error);
4300 ocfs2_rw_unlock(inode, 1);
4294 goto out; 4301 goto out;
4295 } 4302 }
4296 4303
@@ -4302,6 +4309,7 @@ static int ocfs2_reflink(struct dentry *old_dentry, struct inode *dir,
4302 up_write(&OCFS2_I(inode)->ip_xattr_sem); 4309 up_write(&OCFS2_I(inode)->ip_xattr_sem);
4303 4310
4304 ocfs2_inode_unlock(inode, 1); 4311 ocfs2_inode_unlock(inode, 1);
4312 ocfs2_rw_unlock(inode, 1);
4305 brelse(old_bh); 4313 brelse(old_bh);
4306 4314
4307 if (error) { 4315 if (error) {
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index c7a89cea5c5d..ddb662b32447 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1925,15 +1925,11 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
1925 1925
1926 ocfs2_shutdown_local_alloc(osb); 1926 ocfs2_shutdown_local_alloc(osb);
1927 1927
1928 ocfs2_truncate_log_shutdown(osb);
1929
1928 /* This will disable recovery and flush any recovery work. */ 1930 /* This will disable recovery and flush any recovery work. */
1929 ocfs2_recovery_exit(osb); 1931 ocfs2_recovery_exit(osb);
1930 1932
1931 /*
1932 * During dismount, when it recovers another node it will call
1933 * ocfs2_recover_orphans and queue delayed work osb_truncate_log_wq.
1934 */
1935 ocfs2_truncate_log_shutdown(osb);
1936
1937 ocfs2_journal_shutdown(osb); 1933 ocfs2_journal_shutdown(osb);
1938 1934
1939 ocfs2_sync_blockdev(sb); 1935 ocfs2_sync_blockdev(sb);
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index 9d231e9e5f0e..bf2d03f8fd3e 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -184,29 +184,11 @@ static int show_stat(struct seq_file *p, void *v)
184 184
185static int stat_open(struct inode *inode, struct file *file) 185static int stat_open(struct inode *inode, struct file *file)
186{ 186{
187 size_t size = 1024 + 128 * num_possible_cpus(); 187 size_t size = 1024 + 128 * num_online_cpus();
188 char *buf;
189 struct seq_file *m;
190 int res;
191 188
192 /* minimum size to display an interrupt count : 2 bytes */ 189 /* minimum size to display an interrupt count : 2 bytes */
193 size += 2 * nr_irqs; 190 size += 2 * nr_irqs;
194 191 return single_open_size(file, show_stat, NULL, size);
195 /* don't ask for more than the kmalloc() max size */
196 if (size > KMALLOC_MAX_SIZE)
197 size = KMALLOC_MAX_SIZE;
198 buf = kmalloc(size, GFP_KERNEL);
199 if (!buf)
200 return -ENOMEM;
201
202 res = single_open(file, show_stat, NULL);
203 if (!res) {
204 m = file->private_data;
205 m->buf = buf;
206 m->size = ksize(buf);
207 } else
208 kfree(buf);
209 return res;
210} 192}
211 193
212static const struct file_operations proc_stat_operations = { 194static const struct file_operations proc_stat_operations = {
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index 9cd5f63715c0..7f30bdc57d13 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -702,6 +702,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
702 struct dquot *dquot; 702 struct dquot *dquot;
703 unsigned long freed = 0; 703 unsigned long freed = 0;
704 704
705 spin_lock(&dq_list_lock);
705 head = free_dquots.prev; 706 head = free_dquots.prev;
706 while (head != &free_dquots && sc->nr_to_scan) { 707 while (head != &free_dquots && sc->nr_to_scan) {
707 dquot = list_entry(head, struct dquot, dq_free); 708 dquot = list_entry(head, struct dquot, dq_free);
@@ -713,6 +714,7 @@ dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
713 freed++; 714 freed++;
714 head = free_dquots.prev; 715 head = free_dquots.prev;
715 } 716 }
717 spin_unlock(&dq_list_lock);
716 return freed; 718 return freed;
717} 719}
718 720
diff --git a/fs/seq_file.c b/fs/seq_file.c
index 1d641bb108d2..3857b720cb1b 100644
--- a/fs/seq_file.c
+++ b/fs/seq_file.c
@@ -8,8 +8,10 @@
8#include <linux/fs.h> 8#include <linux/fs.h>
9#include <linux/export.h> 9#include <linux/export.h>
10#include <linux/seq_file.h> 10#include <linux/seq_file.h>
11#include <linux/vmalloc.h>
11#include <linux/slab.h> 12#include <linux/slab.h>
12#include <linux/cred.h> 13#include <linux/cred.h>
14#include <linux/mm.h>
13 15
14#include <asm/uaccess.h> 16#include <asm/uaccess.h>
15#include <asm/page.h> 17#include <asm/page.h>
@@ -30,6 +32,16 @@ static void seq_set_overflow(struct seq_file *m)
30 m->count = m->size; 32 m->count = m->size;
31} 33}
32 34
35static void *seq_buf_alloc(unsigned long size)
36{
37 void *buf;
38
39 buf = kmalloc(size, GFP_KERNEL | __GFP_NOWARN);
40 if (!buf && size > PAGE_SIZE)
41 buf = vmalloc(size);
42 return buf;
43}
44
33/** 45/**
34 * seq_open - initialize sequential file 46 * seq_open - initialize sequential file
35 * @file: file we initialize 47 * @file: file we initialize
@@ -96,7 +108,7 @@ static int traverse(struct seq_file *m, loff_t offset)
96 return 0; 108 return 0;
97 } 109 }
98 if (!m->buf) { 110 if (!m->buf) {
99 m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL); 111 m->buf = seq_buf_alloc(m->size = PAGE_SIZE);
100 if (!m->buf) 112 if (!m->buf)
101 return -ENOMEM; 113 return -ENOMEM;
102 } 114 }
@@ -135,9 +147,9 @@ static int traverse(struct seq_file *m, loff_t offset)
135 147
136Eoverflow: 148Eoverflow:
137 m->op->stop(m, p); 149 m->op->stop(m, p);
138 kfree(m->buf); 150 kvfree(m->buf);
139 m->count = 0; 151 m->count = 0;
140 m->buf = kmalloc(m->size <<= 1, GFP_KERNEL); 152 m->buf = seq_buf_alloc(m->size <<= 1);
141 return !m->buf ? -ENOMEM : -EAGAIN; 153 return !m->buf ? -ENOMEM : -EAGAIN;
142} 154}
143 155
@@ -192,7 +204,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
192 204
193 /* grab buffer if we didn't have one */ 205 /* grab buffer if we didn't have one */
194 if (!m->buf) { 206 if (!m->buf) {
195 m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL); 207 m->buf = seq_buf_alloc(m->size = PAGE_SIZE);
196 if (!m->buf) 208 if (!m->buf)
197 goto Enomem; 209 goto Enomem;
198 } 210 }
@@ -232,9 +244,9 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
232 if (m->count < m->size) 244 if (m->count < m->size)
233 goto Fill; 245 goto Fill;
234 m->op->stop(m, p); 246 m->op->stop(m, p);
235 kfree(m->buf); 247 kvfree(m->buf);
236 m->count = 0; 248 m->count = 0;
237 m->buf = kmalloc(m->size <<= 1, GFP_KERNEL); 249 m->buf = seq_buf_alloc(m->size <<= 1);
238 if (!m->buf) 250 if (!m->buf)
239 goto Enomem; 251 goto Enomem;
240 m->version = 0; 252 m->version = 0;
@@ -350,7 +362,7 @@ EXPORT_SYMBOL(seq_lseek);
350int seq_release(struct inode *inode, struct file *file) 362int seq_release(struct inode *inode, struct file *file)
351{ 363{
352 struct seq_file *m = file->private_data; 364 struct seq_file *m = file->private_data;
353 kfree(m->buf); 365 kvfree(m->buf);
354 kfree(m); 366 kfree(m);
355 return 0; 367 return 0;
356} 368}
@@ -605,13 +617,13 @@ EXPORT_SYMBOL(single_open);
605int single_open_size(struct file *file, int (*show)(struct seq_file *, void *), 617int single_open_size(struct file *file, int (*show)(struct seq_file *, void *),
606 void *data, size_t size) 618 void *data, size_t size)
607{ 619{
608 char *buf = kmalloc(size, GFP_KERNEL); 620 char *buf = seq_buf_alloc(size);
609 int ret; 621 int ret;
610 if (!buf) 622 if (!buf)
611 return -ENOMEM; 623 return -ENOMEM;
612 ret = single_open(file, show, data); 624 ret = single_open(file, show, data);
613 if (ret) { 625 if (ret) {
614 kfree(buf); 626 kvfree(buf);
615 return ret; 627 return ret;
616 } 628 }
617 ((struct seq_file *)file->private_data)->buf = buf; 629 ((struct seq_file *)file->private_data)->buf = buf;
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 96175df211b1..75c3fe5f3d9d 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -4298,8 +4298,8 @@ xfs_bmapi_delay(
4298} 4298}
4299 4299
4300 4300
4301int 4301static int
4302__xfs_bmapi_allocate( 4302xfs_bmapi_allocate(
4303 struct xfs_bmalloca *bma) 4303 struct xfs_bmalloca *bma)
4304{ 4304{
4305 struct xfs_mount *mp = bma->ip->i_mount; 4305 struct xfs_mount *mp = bma->ip->i_mount;
@@ -4578,9 +4578,6 @@ xfs_bmapi_write(
4578 bma.flist = flist; 4578 bma.flist = flist;
4579 bma.firstblock = firstblock; 4579 bma.firstblock = firstblock;
4580 4580
4581 if (flags & XFS_BMAPI_STACK_SWITCH)
4582 bma.stack_switch = 1;
4583
4584 while (bno < end && n < *nmap) { 4581 while (bno < end && n < *nmap) {
4585 inhole = eof || bma.got.br_startoff > bno; 4582 inhole = eof || bma.got.br_startoff > bno;
4586 wasdelay = !inhole && isnullstartblock(bma.got.br_startblock); 4583 wasdelay = !inhole && isnullstartblock(bma.got.br_startblock);
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 38ba36e9b2f0..b879ca56a64c 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -77,7 +77,6 @@ typedef struct xfs_bmap_free
77 * from written to unwritten, otherwise convert from unwritten to written. 77 * from written to unwritten, otherwise convert from unwritten to written.
78 */ 78 */
79#define XFS_BMAPI_CONVERT 0x040 79#define XFS_BMAPI_CONVERT 0x040
80#define XFS_BMAPI_STACK_SWITCH 0x080
81 80
82#define XFS_BMAPI_FLAGS \ 81#define XFS_BMAPI_FLAGS \
83 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \ 82 { XFS_BMAPI_ENTIRE, "ENTIRE" }, \
@@ -86,8 +85,7 @@ typedef struct xfs_bmap_free
86 { XFS_BMAPI_PREALLOC, "PREALLOC" }, \ 85 { XFS_BMAPI_PREALLOC, "PREALLOC" }, \
87 { XFS_BMAPI_IGSTATE, "IGSTATE" }, \ 86 { XFS_BMAPI_IGSTATE, "IGSTATE" }, \
88 { XFS_BMAPI_CONTIG, "CONTIG" }, \ 87 { XFS_BMAPI_CONTIG, "CONTIG" }, \
89 { XFS_BMAPI_CONVERT, "CONVERT" }, \ 88 { XFS_BMAPI_CONVERT, "CONVERT" }
90 { XFS_BMAPI_STACK_SWITCH, "STACK_SWITCH" }
91 89
92 90
93static inline int xfs_bmapi_aflag(int w) 91static inline int xfs_bmapi_aflag(int w)
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 703b3ec1796c..64731ef3324d 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -249,59 +249,6 @@ xfs_bmap_rtalloc(
249} 249}
250 250
251/* 251/*
252 * Stack switching interfaces for allocation
253 */
254static void
255xfs_bmapi_allocate_worker(
256 struct work_struct *work)
257{
258 struct xfs_bmalloca *args = container_of(work,
259 struct xfs_bmalloca, work);
260 unsigned long pflags;
261 unsigned long new_pflags = PF_FSTRANS;
262
263 /*
264 * we are in a transaction context here, but may also be doing work
265 * in kswapd context, and hence we may need to inherit that state
266 * temporarily to ensure that we don't block waiting for memory reclaim
267 * in any way.
268 */
269 if (args->kswapd)
270 new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
271
272 current_set_flags_nested(&pflags, new_pflags);
273
274 args->result = __xfs_bmapi_allocate(args);
275 complete(args->done);
276
277 current_restore_flags_nested(&pflags, new_pflags);
278}
279
280/*
281 * Some allocation requests often come in with little stack to work on. Push
282 * them off to a worker thread so there is lots of stack to use. Otherwise just
283 * call directly to avoid the context switch overhead here.
284 */
285int
286xfs_bmapi_allocate(
287 struct xfs_bmalloca *args)
288{
289 DECLARE_COMPLETION_ONSTACK(done);
290
291 if (!args->stack_switch)
292 return __xfs_bmapi_allocate(args);
293
294
295 args->done = &done;
296 args->kswapd = current_is_kswapd();
297 INIT_WORK_ONSTACK(&args->work, xfs_bmapi_allocate_worker);
298 queue_work(xfs_alloc_wq, &args->work);
299 wait_for_completion(&done);
300 destroy_work_on_stack(&args->work);
301 return args->result;
302}
303
304/*
305 * Check if the endoff is outside the last extent. If so the caller will grow 252 * Check if the endoff is outside the last extent. If so the caller will grow
306 * the allocation to a stripe unit boundary. All offsets are considered outside 253 * the allocation to a stripe unit boundary. All offsets are considered outside
307 * the end of file for an empty fork, so 1 is returned in *eof in that case. 254 * the end of file for an empty fork, so 1 is returned in *eof in that case.
diff --git a/fs/xfs/xfs_bmap_util.h b/fs/xfs/xfs_bmap_util.h
index 075f72232a64..2fdb72d2c908 100644
--- a/fs/xfs/xfs_bmap_util.h
+++ b/fs/xfs/xfs_bmap_util.h
@@ -55,8 +55,6 @@ struct xfs_bmalloca {
55 bool userdata;/* set if is user data */ 55 bool userdata;/* set if is user data */
56 bool aeof; /* allocated space at eof */ 56 bool aeof; /* allocated space at eof */
57 bool conv; /* overwriting unwritten extents */ 57 bool conv; /* overwriting unwritten extents */
58 bool stack_switch;
59 bool kswapd; /* allocation in kswapd context */
60 int flags; 58 int flags;
61 struct completion *done; 59 struct completion *done;
62 struct work_struct work; 60 struct work_struct work;
@@ -66,8 +64,6 @@ struct xfs_bmalloca {
66int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist, 64int xfs_bmap_finish(struct xfs_trans **tp, struct xfs_bmap_free *flist,
67 int *committed); 65 int *committed);
68int xfs_bmap_rtalloc(struct xfs_bmalloca *ap); 66int xfs_bmap_rtalloc(struct xfs_bmalloca *ap);
69int xfs_bmapi_allocate(struct xfs_bmalloca *args);
70int __xfs_bmapi_allocate(struct xfs_bmalloca *args);
71int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff, 67int xfs_bmap_eof(struct xfs_inode *ip, xfs_fileoff_t endoff,
72 int whichfork, int *eof); 68 int whichfork, int *eof);
73int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip, 69int xfs_bmap_count_blocks(struct xfs_trans *tp, struct xfs_inode *ip,
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index bf810c6baf2b..cf893bc1e373 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -33,6 +33,7 @@
33#include "xfs_error.h" 33#include "xfs_error.h"
34#include "xfs_trace.h" 34#include "xfs_trace.h"
35#include "xfs_cksum.h" 35#include "xfs_cksum.h"
36#include "xfs_alloc.h"
36 37
37/* 38/*
38 * Cursor allocation zone. 39 * Cursor allocation zone.
@@ -2323,7 +2324,7 @@ error1:
2323 * record (to be inserted into parent). 2324 * record (to be inserted into parent).
2324 */ 2325 */
2325STATIC int /* error */ 2326STATIC int /* error */
2326xfs_btree_split( 2327__xfs_btree_split(
2327 struct xfs_btree_cur *cur, 2328 struct xfs_btree_cur *cur,
2328 int level, 2329 int level,
2329 union xfs_btree_ptr *ptrp, 2330 union xfs_btree_ptr *ptrp,
@@ -2503,6 +2504,85 @@ error0:
2503 return error; 2504 return error;
2504} 2505}
2505 2506
2507struct xfs_btree_split_args {
2508 struct xfs_btree_cur *cur;
2509 int level;
2510 union xfs_btree_ptr *ptrp;
2511 union xfs_btree_key *key;
2512 struct xfs_btree_cur **curp;
2513 int *stat; /* success/failure */
2514 int result;
2515 bool kswapd; /* allocation in kswapd context */
2516 struct completion *done;
2517 struct work_struct work;
2518};
2519
2520/*
2521 * Stack switching interfaces for allocation
2522 */
2523static void
2524xfs_btree_split_worker(
2525 struct work_struct *work)
2526{
2527 struct xfs_btree_split_args *args = container_of(work,
2528 struct xfs_btree_split_args, work);
2529 unsigned long pflags;
2530 unsigned long new_pflags = PF_FSTRANS;
2531
2532 /*
2533 * we are in a transaction context here, but may also be doing work
2534 * in kswapd context, and hence we may need to inherit that state
2535 * temporarily to ensure that we don't block waiting for memory reclaim
2536 * in any way.
2537 */
2538 if (args->kswapd)
2539 new_pflags |= PF_MEMALLOC | PF_SWAPWRITE | PF_KSWAPD;
2540
2541 current_set_flags_nested(&pflags, new_pflags);
2542
2543 args->result = __xfs_btree_split(args->cur, args->level, args->ptrp,
2544 args->key, args->curp, args->stat);
2545 complete(args->done);
2546
2547 current_restore_flags_nested(&pflags, new_pflags);
2548}
2549
2550/*
2551 * BMBT split requests often come in with little stack to work on. Push
2552 * them off to a worker thread so there is lots of stack to use. For the other
2553 * btree types, just call directly to avoid the context switch overhead here.
2554 */
2555STATIC int /* error */
2556xfs_btree_split(
2557 struct xfs_btree_cur *cur,
2558 int level,
2559 union xfs_btree_ptr *ptrp,
2560 union xfs_btree_key *key,
2561 struct xfs_btree_cur **curp,
2562 int *stat) /* success/failure */
2563{
2564 struct xfs_btree_split_args args;
2565 DECLARE_COMPLETION_ONSTACK(done);
2566
2567 if (cur->bc_btnum != XFS_BTNUM_BMAP)
2568 return __xfs_btree_split(cur, level, ptrp, key, curp, stat);
2569
2570 args.cur = cur;
2571 args.level = level;
2572 args.ptrp = ptrp;
2573 args.key = key;
2574 args.curp = curp;
2575 args.stat = stat;
2576 args.done = &done;
2577 args.kswapd = current_is_kswapd();
2578 INIT_WORK_ONSTACK(&args.work, xfs_btree_split_worker);
2579 queue_work(xfs_alloc_wq, &args.work);
2580 wait_for_completion(&done);
2581 destroy_work_on_stack(&args.work);
2582 return args.result;
2583}
2584
2585
2506/* 2586/*
2507 * Copy the old inode root contents into a real block and make the 2587 * Copy the old inode root contents into a real block and make the
2508 * broot point to it. 2588 * broot point to it.
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 6c5eb4c551e3..6d3ec2b6ee29 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -749,8 +749,7 @@ xfs_iomap_write_allocate(
749 * pointer that the caller gave to us. 749 * pointer that the caller gave to us.
750 */ 750 */
751 error = xfs_bmapi_write(tp, ip, map_start_fsb, 751 error = xfs_bmapi_write(tp, ip, map_start_fsb,
752 count_fsb, 752 count_fsb, 0,
753 XFS_BMAPI_STACK_SWITCH,
754 &first_block, 1, 753 &first_block, 1,
755 imap, &nimaps, &free_list); 754 imap, &nimaps, &free_list);
756 if (error) 755 if (error)
diff --git a/fs/xfs/xfs_sb.c b/fs/xfs/xfs_sb.c
index c3453b11f563..7703fa6770ff 100644
--- a/fs/xfs/xfs_sb.c
+++ b/fs/xfs/xfs_sb.c
@@ -483,10 +483,16 @@ xfs_sb_quota_to_disk(
483 } 483 }
484 484
485 /* 485 /*
486 * GQUOTINO and PQUOTINO cannot be used together in versions 486 * GQUOTINO and PQUOTINO cannot be used together in versions of
487 * of superblock that do not have pquotino. from->sb_flags 487 * superblock that do not have pquotino. from->sb_flags tells us which
488 * tells us which quota is active and should be copied to 488 * quota is active and should be copied to disk. If neither are active,
489 * disk. 489 * make sure we write NULLFSINO to the sb_gquotino field as a quota
490 * inode value of "0" is invalid when the XFS_SB_VERSION_QUOTA feature
491 * bit is set.
492 *
493 * Note that we don't need to handle the sb_uquotino or sb_pquotino here
494 * as they do not require any translation. Hence the main sb field loop
495 * will write them appropriately from the in-core superblock.
490 */ 496 */
491 if ((*fields & XFS_SB_GQUOTINO) && 497 if ((*fields & XFS_SB_GQUOTINO) &&
492 (from->sb_qflags & XFS_GQUOTA_ACCT)) 498 (from->sb_qflags & XFS_GQUOTA_ACCT))
@@ -494,6 +500,17 @@ xfs_sb_quota_to_disk(
494 else if ((*fields & XFS_SB_PQUOTINO) && 500 else if ((*fields & XFS_SB_PQUOTINO) &&
495 (from->sb_qflags & XFS_PQUOTA_ACCT)) 501 (from->sb_qflags & XFS_PQUOTA_ACCT))
496 to->sb_gquotino = cpu_to_be64(from->sb_pquotino); 502 to->sb_gquotino = cpu_to_be64(from->sb_pquotino);
503 else {
504 /*
505 * We can't rely on just the fields being logged to tell us
506 * that it is safe to write NULLFSINO - we should only do that
507 * if quotas are not actually enabled. Hence only write
508 * NULLFSINO if both in-core quota inodes are NULL.
509 */
510 if (from->sb_gquotino == NULLFSINO &&
511 from->sb_pquotino == NULLFSINO)
512 to->sb_gquotino = cpu_to_be64(NULLFSINO);
513 }
497 514
498 *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO); 515 *fields &= ~(XFS_SB_PQUOTINO | XFS_SB_GQUOTINO);
499} 516}
diff --git a/include/acpi/apei.h b/include/acpi/apei.h
index 04f349d8da73..76284bb560a6 100644
--- a/include/acpi/apei.h
+++ b/include/acpi/apei.h
@@ -42,5 +42,9 @@ ssize_t erst_read(u64 record_id, struct cper_record_header *record,
42 size_t buflen); 42 size_t buflen);
43int erst_clear(u64 record_id); 43int erst_clear(u64 record_id);
44 44
45int arch_apei_enable_cmcff(struct acpi_hest_header *hest_hdr, void *data);
46void arch_apei_report_mem_error(int sev, struct cper_sec_mem_err *mem_err);
47void arch_apei_flush_tlb_one(unsigned long addr);
48
45#endif 49#endif
46#endif 50#endif
diff --git a/include/acpi/video.h b/include/acpi/video.h
index ea4c7bbded4d..843ef1adfbfa 100644
--- a/include/acpi/video.h
+++ b/include/acpi/video.h
@@ -22,6 +22,7 @@ extern void acpi_video_unregister(void);
22extern void acpi_video_unregister_backlight(void); 22extern void acpi_video_unregister_backlight(void);
23extern int acpi_video_get_edid(struct acpi_device *device, int type, 23extern int acpi_video_get_edid(struct acpi_device *device, int type,
24 int device_id, void **edid); 24 int device_id, void **edid);
25extern bool acpi_video_verify_backlight_support(void);
25#else 26#else
26static inline int acpi_video_register(void) { return 0; } 27static inline int acpi_video_register(void) { return 0; }
27static inline void acpi_video_unregister(void) { return; } 28static inline void acpi_video_unregister(void) { return; }
@@ -31,6 +32,7 @@ static inline int acpi_video_get_edid(struct acpi_device *device, int type,
31{ 32{
32 return -ENODEV; 33 return -ENODEV;
33} 34}
35static inline bool acpi_video_verify_backlight_support(void) { return false; }
34#endif 36#endif
35 37
36#endif 38#endif
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 471ba48c7ae4..c1c0b0cf39b4 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -693,7 +693,7 @@
693 . = ALIGN(PAGE_SIZE); \ 693 . = ALIGN(PAGE_SIZE); \
694 *(.data..percpu..page_aligned) \ 694 *(.data..percpu..page_aligned) \
695 . = ALIGN(cacheline); \ 695 . = ALIGN(cacheline); \
696 *(.data..percpu..readmostly) \ 696 *(.data..percpu..read_mostly) \
697 . = ALIGN(cacheline); \ 697 . = ALIGN(cacheline); \
698 *(.data..percpu) \ 698 *(.data..percpu) \
699 *(.data..percpu..shared_aligned) \ 699 *(.data..percpu..shared_aligned) \
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index 0572035673f3..a70d45647898 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -237,13 +237,21 @@
237#define INTEL_BDW_GT3D_IDS(info) \ 237#define INTEL_BDW_GT3D_IDS(info) \
238 _INTEL_BDW_D_IDS(3, info) 238 _INTEL_BDW_D_IDS(3, info)
239 239
240#define INTEL_BDW_RSVDM_IDS(info) \
241 _INTEL_BDW_M_IDS(4, info)
242
243#define INTEL_BDW_RSVDD_IDS(info) \
244 _INTEL_BDW_D_IDS(4, info)
245
240#define INTEL_BDW_M_IDS(info) \ 246#define INTEL_BDW_M_IDS(info) \
241 INTEL_BDW_GT12M_IDS(info), \ 247 INTEL_BDW_GT12M_IDS(info), \
242 INTEL_BDW_GT3M_IDS(info) 248 INTEL_BDW_GT3M_IDS(info), \
249 INTEL_BDW_RSVDM_IDS(info)
243 250
244#define INTEL_BDW_D_IDS(info) \ 251#define INTEL_BDW_D_IDS(info) \
245 INTEL_BDW_GT12D_IDS(info), \ 252 INTEL_BDW_GT12D_IDS(info), \
246 INTEL_BDW_GT3D_IDS(info) 253 INTEL_BDW_GT3D_IDS(info), \
254 INTEL_BDW_RSVDD_IDS(info)
247 255
248#define INTEL_CHV_IDS(info) \ 256#define INTEL_CHV_IDS(info) \
249 INTEL_VGA_DEVICE(0x22b0, info), \ 257 INTEL_VGA_DEVICE(0x22b0, info), \
diff --git a/include/drm/i915_powerwell.h b/include/drm/i915_powerwell.h
index 2baba9996094..baa6f11b1837 100644
--- a/include/drm/i915_powerwell.h
+++ b/include/drm/i915_powerwell.h
@@ -32,5 +32,6 @@
32/* For use by hda_i915 driver */ 32/* For use by hda_i915 driver */
33extern int i915_request_power_well(void); 33extern int i915_request_power_well(void);
34extern int i915_release_power_well(void); 34extern int i915_release_power_well(void);
35extern int i915_get_cdclk_freq(void);
35 36
36#endif /* _I915_POWERWELL_H_ */ 37#endif /* _I915_POWERWELL_H_ */
diff --git a/include/dt-bindings/clock/exynos5420.h b/include/dt-bindings/clock/exynos5420.h
index 97dcb89d37d3..21d51ae1d242 100644
--- a/include/dt-bindings/clock/exynos5420.h
+++ b/include/dt-bindings/clock/exynos5420.h
@@ -63,7 +63,6 @@
63#define CLK_SCLK_MPHY_IXTAL24 161 63#define CLK_SCLK_MPHY_IXTAL24 161
64 64
65/* gate clocks */ 65/* gate clocks */
66#define CLK_ACLK66_PERIC 256
67#define CLK_UART0 257 66#define CLK_UART0 257
68#define CLK_UART1 258 67#define CLK_UART1 258
69#define CLK_UART2 259 68#define CLK_UART2 259
@@ -203,6 +202,8 @@
203#define CLK_MOUT_G3D 641 202#define CLK_MOUT_G3D 641
204#define CLK_MOUT_VPLL 642 203#define CLK_MOUT_VPLL 642
205#define CLK_MOUT_MAUDIO0 643 204#define CLK_MOUT_MAUDIO0 643
205#define CLK_MOUT_USER_ACLK333 644
206#define CLK_MOUT_SW_ACLK333 645
206 207
207/* divider clocks */ 208/* divider clocks */
208#define CLK_DOUT_PIXEL 768 209#define CLK_DOUT_PIXEL 768
diff --git a/include/dt-bindings/clock/imx6sl-clock.h b/include/dt-bindings/clock/imx6sl-clock.h
index 7cf5c9969336..b91dd462ba85 100644
--- a/include/dt-bindings/clock/imx6sl-clock.h
+++ b/include/dt-bindings/clock/imx6sl-clock.h
@@ -145,6 +145,7 @@
145#define IMX6SL_CLK_USDHC4 132 145#define IMX6SL_CLK_USDHC4 132
146#define IMX6SL_CLK_PLL4_AUDIO_DIV 133 146#define IMX6SL_CLK_PLL4_AUDIO_DIV 133
147#define IMX6SL_CLK_SPBA 134 147#define IMX6SL_CLK_SPBA 134
148#define IMX6SL_CLK_END 135 148#define IMX6SL_CLK_ENET 135
149#define IMX6SL_CLK_END 136
149 150
150#endif /* __DT_BINDINGS_CLOCK_IMX6SL_H */ 151#endif /* __DT_BINDINGS_CLOCK_IMX6SL_H */
diff --git a/include/dt-bindings/clock/stih415-clks.h b/include/dt-bindings/clock/stih415-clks.h
index 0d2c7397e028..d80caa68aebd 100644
--- a/include/dt-bindings/clock/stih415-clks.h
+++ b/include/dt-bindings/clock/stih415-clks.h
@@ -10,6 +10,7 @@
10#define CLK_ETH1_PHY 4 10#define CLK_ETH1_PHY 4
11 11
12/* CLOCKGEN A1 */ 12/* CLOCKGEN A1 */
13#define CLK_ICN_IF_2 0
13#define CLK_GMAC0_PHY 3 14#define CLK_GMAC0_PHY 3
14 15
15#endif 16#endif
diff --git a/include/dt-bindings/clock/stih416-clks.h b/include/dt-bindings/clock/stih416-clks.h
index 552c779eb6af..f9bdbd13568d 100644
--- a/include/dt-bindings/clock/stih416-clks.h
+++ b/include/dt-bindings/clock/stih416-clks.h
@@ -10,6 +10,7 @@
10#define CLK_ETH1_PHY 4 10#define CLK_ETH1_PHY 4
11 11
12/* CLOCKGEN A1 */ 12/* CLOCKGEN A1 */
13#define CLK_ICN_IF_2 0
13#define CLK_GMAC0_PHY 3 14#define CLK_GMAC0_PHY 3
14 15
15#endif 16#endif
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 5a645769f020..d2633ee099d9 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -186,6 +186,15 @@ static inline void *bio_data(struct bio *bio)
186#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ 186#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
187 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) 187 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
188 188
189/*
190 * Check if adding a bio_vec after bprv with offset would create a gap in
191 * the SG list. Most drivers don't care about this, but some do.
192 */
193static inline bool bvec_gap_to_prev(struct bio_vec *bprv, unsigned int offset)
194{
195 return offset || ((bprv->bv_offset + bprv->bv_len) & (PAGE_SIZE - 1));
196}
197
189#define bio_io_error(bio) bio_endio((bio), -EIO) 198#define bio_io_error(bio) bio_endio((bio), -EIO)
190 199
191/* 200/*
@@ -644,10 +653,6 @@ struct biovec_slab {
644 653
645#if defined(CONFIG_BLK_DEV_INTEGRITY) 654#if defined(CONFIG_BLK_DEV_INTEGRITY)
646 655
647
648
649#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
650
651#define bip_for_each_vec(bvl, bip, iter) \ 656#define bip_for_each_vec(bvl, bip, iter) \
652 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter) 657 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
653 658
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 713f8b62b435..8699bcf5f099 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -512,6 +512,7 @@ struct request_queue {
512#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */ 512#define QUEUE_FLAG_DEAD 19 /* queue tear-down finished */
513#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */ 513#define QUEUE_FLAG_INIT_DONE 20 /* queue is initialized */
514#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/ 514#define QUEUE_FLAG_NO_SG_MERGE 21 /* don't attempt to merge SG segments*/
515#define QUEUE_FLAG_SG_GAPS 22 /* queue doesn't support SG gaps */
515 516
516#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \ 517#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
517 (1 << QUEUE_FLAG_STACKABLE) | \ 518 (1 << QUEUE_FLAG_STACKABLE) | \
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index ec4112d257bc..8f8ae95c6e27 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -482,8 +482,8 @@ extern struct cpufreq_governor cpufreq_gov_conservative;
482 *********************************************************************/ 482 *********************************************************************/
483 483
484/* Special Values of .frequency field */ 484/* Special Values of .frequency field */
485#define CPUFREQ_ENTRY_INVALID ~0 485#define CPUFREQ_ENTRY_INVALID ~0u
486#define CPUFREQ_TABLE_END ~1 486#define CPUFREQ_TABLE_END ~1u
487/* Special Values of .flags field */ 487/* Special Values of .flags field */
488#define CPUFREQ_BOOST_FREQ (1 << 0) 488#define CPUFREQ_BOOST_FREQ (1 << 0)
489 489
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index e2a6bd7fb133..45a91474487d 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -143,7 +143,7 @@ extern void elv_drain_elevator(struct request_queue *);
143 * io scheduler registration 143 * io scheduler registration
144 */ 144 */
145extern void __init load_default_elevator_module(void); 145extern void __init load_default_elevator_module(void);
146extern int __init elv_register(struct elevator_type *); 146extern int elv_register(struct elevator_type *);
147extern void elv_unregister(struct elevator_type *); 147extern void elv_unregister(struct elevator_type *);
148 148
149/* 149/*
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 17aa1cce6f8e..30faf797c2c3 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -91,6 +91,7 @@ struct kernfs_elem_attr {
91 const struct kernfs_ops *ops; 91 const struct kernfs_ops *ops;
92 struct kernfs_open_node *open; 92 struct kernfs_open_node *open;
93 loff_t size; 93 loff_t size;
94 struct kernfs_node *notify_next; /* for kernfs_notify() */
94}; 95};
95 96
96/* 97/*
@@ -304,6 +305,7 @@ struct dentry *kernfs_mount_ns(struct file_system_type *fs_type, int flags,
304 struct kernfs_root *root, unsigned long magic, 305 struct kernfs_root *root, unsigned long magic,
305 bool *new_sb_created, const void *ns); 306 bool *new_sb_created, const void *ns);
306void kernfs_kill_sb(struct super_block *sb); 307void kernfs_kill_sb(struct super_block *sb);
308struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns);
307 309
308void kernfs_init(void); 310void kernfs_init(void);
309 311
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index b12f4bbd064c..35b51e7af886 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -578,8 +578,6 @@ struct mlx4_cq {
578 u32 cons_index; 578 u32 cons_index;
579 579
580 u16 irq; 580 u16 irq;
581 bool irq_affinity_change;
582
583 __be32 *set_ci_db; 581 __be32 *set_ci_db;
584 __be32 *arm_db; 582 __be32 *arm_db;
585 int arm_sn; 583 int arm_sn;
@@ -1167,6 +1165,8 @@ int mlx4_assign_eq(struct mlx4_dev *dev, char *name, struct cpu_rmap *rmap,
1167 int *vector); 1165 int *vector);
1168void mlx4_release_eq(struct mlx4_dev *dev, int vec); 1166void mlx4_release_eq(struct mlx4_dev *dev, int vec);
1169 1167
1168int mlx4_eq_get_irq(struct mlx4_dev *dev, int vec);
1169
1170int mlx4_get_phys_port_id(struct mlx4_dev *dev); 1170int mlx4_get_phys_port_id(struct mlx4_dev *dev);
1171int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port); 1171int mlx4_wol_read(struct mlx4_dev *dev, u64 *config, int port);
1172int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port); 1172int mlx4_wol_write(struct mlx4_dev *dev, u64 config, int port);
diff --git a/include/linux/mutex.h b/include/linux/mutex.h
index 11692dea18aa..42aa9b9ecd5f 100644
--- a/include/linux/mutex.h
+++ b/include/linux/mutex.h
@@ -17,6 +17,7 @@
17#include <linux/lockdep.h> 17#include <linux/lockdep.h>
18#include <linux/atomic.h> 18#include <linux/atomic.h>
19#include <asm/processor.h> 19#include <asm/processor.h>
20#include <linux/osq_lock.h>
20 21
21/* 22/*
22 * Simple, straightforward mutexes with strict semantics: 23 * Simple, straightforward mutexes with strict semantics:
@@ -46,7 +47,6 @@
46 * - detects multi-task circular deadlocks and prints out all affected 47 * - detects multi-task circular deadlocks and prints out all affected
47 * locks and tasks (and only those tasks) 48 * locks and tasks (and only those tasks)
48 */ 49 */
49struct optimistic_spin_queue;
50struct mutex { 50struct mutex {
51 /* 1: unlocked, 0: locked, negative: locked, possible waiters */ 51 /* 1: unlocked, 0: locked, negative: locked, possible waiters */
52 atomic_t count; 52 atomic_t count;
@@ -56,7 +56,7 @@ struct mutex {
56 struct task_struct *owner; 56 struct task_struct *owner;
57#endif 57#endif
58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 58#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
59 struct optimistic_spin_queue *osq; /* Spinner MCS lock */ 59 struct optimistic_spin_queue osq; /* Spinner MCS lock */
60#endif 60#endif
61#ifdef CONFIG_DEBUG_MUTEXES 61#ifdef CONFIG_DEBUG_MUTEXES
62 const char *name; 62 const char *name;
diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index 6a45fb583ff1..1d2a6ab6b8bb 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -32,15 +32,24 @@ static inline void touch_nmi_watchdog(void)
32#ifdef arch_trigger_all_cpu_backtrace 32#ifdef arch_trigger_all_cpu_backtrace
33static inline bool trigger_all_cpu_backtrace(void) 33static inline bool trigger_all_cpu_backtrace(void)
34{ 34{
35 arch_trigger_all_cpu_backtrace(); 35 arch_trigger_all_cpu_backtrace(true);
36 36
37 return true; 37 return true;
38} 38}
39static inline bool trigger_allbutself_cpu_backtrace(void)
40{
41 arch_trigger_all_cpu_backtrace(false);
42 return true;
43}
39#else 44#else
40static inline bool trigger_all_cpu_backtrace(void) 45static inline bool trigger_all_cpu_backtrace(void)
41{ 46{
42 return false; 47 return false;
43} 48}
49static inline bool trigger_allbutself_cpu_backtrace(void)
50{
51 return false;
52}
44#endif 53#endif
45 54
46#ifdef CONFIG_LOCKUP_DETECTOR 55#ifdef CONFIG_LOCKUP_DETECTOR
@@ -48,9 +57,14 @@ int hw_nmi_is_cpu_stuck(struct pt_regs *);
48u64 hw_nmi_get_sample_period(int watchdog_thresh); 57u64 hw_nmi_get_sample_period(int watchdog_thresh);
49extern int watchdog_user_enabled; 58extern int watchdog_user_enabled;
50extern int watchdog_thresh; 59extern int watchdog_thresh;
60extern int sysctl_softlockup_all_cpu_backtrace;
51struct ctl_table; 61struct ctl_table;
52extern int proc_dowatchdog(struct ctl_table *, int , 62extern int proc_dowatchdog(struct ctl_table *, int ,
53 void __user *, size_t *, loff_t *); 63 void __user *, size_t *, loff_t *);
54#endif 64#endif
55 65
66#ifdef CONFIG_HAVE_ACPI_APEI_NMI
67#include <asm/nmi.h>
68#endif
69
56#endif 70#endif
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index a70c9493d55a..d449018d0726 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -25,9 +25,6 @@ struct phy_device *of_phy_attach(struct net_device *dev,
25 25
26extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np); 26extern struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np);
27 27
28extern void of_mdiobus_link_phydev(struct mii_bus *mdio,
29 struct phy_device *phydev);
30
31#else /* CONFIG_OF */ 28#else /* CONFIG_OF */
32static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np) 29static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
33{ 30{
@@ -63,11 +60,6 @@ static inline struct mii_bus *of_mdio_find_bus(struct device_node *mdio_np)
63{ 60{
64 return NULL; 61 return NULL;
65} 62}
66
67static inline void of_mdiobus_link_phydev(struct mii_bus *mdio,
68 struct phy_device *phydev)
69{
70}
71#endif /* CONFIG_OF */ 63#endif /* CONFIG_OF */
72 64
73#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY) 65#if defined(CONFIG_OF) && defined(CONFIG_FIXED_PHY)
diff --git a/include/linux/osq_lock.h b/include/linux/osq_lock.h
new file mode 100644
index 000000000000..90230d5811c5
--- /dev/null
+++ b/include/linux/osq_lock.h
@@ -0,0 +1,27 @@
1#ifndef __LINUX_OSQ_LOCK_H
2#define __LINUX_OSQ_LOCK_H
3
4/*
5 * An MCS like lock especially tailored for optimistic spinning for sleeping
6 * lock implementations (mutex, rwsem, etc).
7 */
8
9#define OSQ_UNLOCKED_VAL (0)
10
11struct optimistic_spin_queue {
12 /*
13 * Stores an encoded value of the CPU # of the tail node in the queue.
14 * If the queue is empty, then it's set to OSQ_UNLOCKED_VAL.
15 */
16 atomic_t tail;
17};
18
19/* Init macro and function. */
20#define OSQ_LOCK_UNLOCKED { ATOMIC_INIT(OSQ_UNLOCKED_VAL) }
21
22static inline void osq_lock_init(struct optimistic_spin_queue *lock)
23{
24 atomic_set(&lock->tail, OSQ_UNLOCKED_VAL);
25}
26
27#endif
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 3c545b48aeab..8304959ad336 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -360,6 +360,9 @@ static inline void ClearPageCompound(struct page *page)
360 ClearPageHead(page); 360 ClearPageHead(page);
361} 361}
362#endif 362#endif
363
364#define PG_head_mask ((1L << PG_head))
365
363#else 366#else
364/* 367/*
365 * Reduce page flag use as much as possible by overlapping 368 * Reduce page flag use as much as possible by overlapping
diff --git a/include/linux/percpu-defs.h b/include/linux/percpu-defs.h
index a5fc7d01aad6..dec01d6c3f80 100644
--- a/include/linux/percpu-defs.h
+++ b/include/linux/percpu-defs.h
@@ -146,10 +146,10 @@
146 * Declaration/definition used for per-CPU variables that must be read mostly. 146 * Declaration/definition used for per-CPU variables that must be read mostly.
147 */ 147 */
148#define DECLARE_PER_CPU_READ_MOSTLY(type, name) \ 148#define DECLARE_PER_CPU_READ_MOSTLY(type, name) \
149 DECLARE_PER_CPU_SECTION(type, name, "..readmostly") 149 DECLARE_PER_CPU_SECTION(type, name, "..read_mostly")
150 150
151#define DEFINE_PER_CPU_READ_MOSTLY(type, name) \ 151#define DEFINE_PER_CPU_READ_MOSTLY(type, name) \
152 DEFINE_PER_CPU_SECTION(type, name, "..readmostly") 152 DEFINE_PER_CPU_SECTION(type, name, "..read_mostly")
153 153
154/* 154/*
155 * Intermodule exports for per-CPU variables. sparse forgets about 155 * Intermodule exports for per-CPU variables. sparse forgets about
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 864ddafad8cc..68041446c450 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -536,6 +536,15 @@ struct phy_driver {
536 /* See set_wol, but for checking whether Wake on LAN is enabled. */ 536 /* See set_wol, but for checking whether Wake on LAN is enabled. */
537 void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol); 537 void (*get_wol)(struct phy_device *dev, struct ethtool_wolinfo *wol);
538 538
539 /*
540 * Called to inform a PHY device driver when the core is about to
541 * change the link state. This callback is supposed to be used as
542 * fixup hook for drivers that need to take action when the link
543 * state changes. Drivers are by no means allowed to mess with the
544 * PHY device structure in their implementations.
545 */
546 void (*link_change_notify)(struct phy_device *dev);
547
539 struct device_driver driver; 548 struct device_driver driver;
540}; 549};
541#define to_phy_driver(d) container_of(d, struct phy_driver, driver) 550#define to_phy_driver(d) container_of(d, struct phy_driver, driver)
diff --git a/include/linux/ptrace.h b/include/linux/ptrace.h
index 077904c8b70d..cc79eff4a1ad 100644
--- a/include/linux/ptrace.h
+++ b/include/linux/ptrace.h
@@ -334,6 +334,9 @@ static inline void user_single_step_siginfo(struct task_struct *tsk,
334 * calling arch_ptrace_stop() when it would be superfluous. For example, 334 * calling arch_ptrace_stop() when it would be superfluous. For example,
335 * if the thread has not been back to user mode since the last stop, the 335 * if the thread has not been back to user mode since the last stop, the
336 * thread state might indicate that nothing needs to be done. 336 * thread state might indicate that nothing needs to be done.
337 *
338 * This is guaranteed to be invoked once before a task stops for ptrace and
339 * may include arch-specific operations necessary prior to a ptrace stop.
337 */ 340 */
338#define arch_ptrace_stop_needed(code, info) (0) 341#define arch_ptrace_stop_needed(code, info) (0)
339#endif 342#endif
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 5a75d19aa661..6a94cc8b1ca0 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -44,7 +44,6 @@
44#include <linux/debugobjects.h> 44#include <linux/debugobjects.h>
45#include <linux/bug.h> 45#include <linux/bug.h>
46#include <linux/compiler.h> 46#include <linux/compiler.h>
47#include <linux/percpu.h>
48#include <asm/barrier.h> 47#include <asm/barrier.h>
49 48
50extern int rcu_expedited; /* for sysctl */ 49extern int rcu_expedited; /* for sysctl */
@@ -300,41 +299,6 @@ bool __rcu_is_watching(void);
300#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */ 299#endif /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) || defined(CONFIG_SMP) */
301 300
302/* 301/*
303 * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
304 */
305
306#define RCU_COND_RESCHED_LIM 256 /* ms vs. 100s of ms. */
307DECLARE_PER_CPU(int, rcu_cond_resched_count);
308void rcu_resched(void);
309
310/*
311 * Is it time to report RCU quiescent states?
312 *
313 * Note unsynchronized access to rcu_cond_resched_count. Yes, we might
314 * increment some random CPU's count, and possibly also load the result from
315 * yet another CPU's count. We might even clobber some other CPU's attempt
316 * to zero its counter. This is all OK because the goal is not precision,
317 * but rather reasonable amortization of rcu_note_context_switch() overhead
318 * and extremely high probability of avoiding RCU CPU stall warnings.
319 * Note that this function has to be preempted in just the wrong place,
320 * many thousands of times in a row, for anything bad to happen.
321 */
322static inline bool rcu_should_resched(void)
323{
324 return raw_cpu_inc_return(rcu_cond_resched_count) >=
325 RCU_COND_RESCHED_LIM;
326}
327
328/*
329 * Report quiscent states to RCU if it is time to do so.
330 */
331static inline void rcu_cond_resched(void)
332{
333 if (unlikely(rcu_should_resched()))
334 rcu_resched();
335}
336
337/*
338 * Infrastructure to implement the synchronize_() primitives in 302 * Infrastructure to implement the synchronize_() primitives in
339 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU. 303 * TREE_RCU and rcu_barrier_() primitives in TINY_RCU.
340 */ 304 */
@@ -358,9 +322,19 @@ void wait_rcu_gp(call_rcu_func_t crf);
358 * initialization. 322 * initialization.
359 */ 323 */
360#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 324#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
325void init_rcu_head(struct rcu_head *head);
326void destroy_rcu_head(struct rcu_head *head);
361void init_rcu_head_on_stack(struct rcu_head *head); 327void init_rcu_head_on_stack(struct rcu_head *head);
362void destroy_rcu_head_on_stack(struct rcu_head *head); 328void destroy_rcu_head_on_stack(struct rcu_head *head);
363#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */ 329#else /* !CONFIG_DEBUG_OBJECTS_RCU_HEAD */
330static inline void init_rcu_head(struct rcu_head *head)
331{
332}
333
334static inline void destroy_rcu_head(struct rcu_head *head)
335{
336}
337
364static inline void init_rcu_head_on_stack(struct rcu_head *head) 338static inline void init_rcu_head_on_stack(struct rcu_head *head)
365{ 339{
366} 340}
diff --git a/include/linux/rwsem-spinlock.h b/include/linux/rwsem-spinlock.h
index d5b13bc07a0b..561e8615528d 100644
--- a/include/linux/rwsem-spinlock.h
+++ b/include/linux/rwsem-spinlock.h
@@ -15,13 +15,13 @@
15#ifdef __KERNEL__ 15#ifdef __KERNEL__
16/* 16/*
17 * the rw-semaphore definition 17 * the rw-semaphore definition
18 * - if activity is 0 then there are no active readers or writers 18 * - if count is 0 then there are no active readers or writers
19 * - if activity is +ve then that is the number of active readers 19 * - if count is +ve then that is the number of active readers
20 * - if activity is -1 then there is one active writer 20 * - if count is -1 then there is one active writer
21 * - if wait_list is not empty, then there are processes waiting for the semaphore 21 * - if wait_list is not empty, then there are processes waiting for the semaphore
22 */ 22 */
23struct rw_semaphore { 23struct rw_semaphore {
24 __s32 activity; 24 __s32 count;
25 raw_spinlock_t wait_lock; 25 raw_spinlock_t wait_lock;
26 struct list_head wait_list; 26 struct list_head wait_list;
27#ifdef CONFIG_DEBUG_LOCK_ALLOC 27#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/rwsem.h b/include/linux/rwsem.h
index 8d79708146aa..035d3c57fc8a 100644
--- a/include/linux/rwsem.h
+++ b/include/linux/rwsem.h
@@ -13,10 +13,11 @@
13#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/list.h> 14#include <linux/list.h>
15#include <linux/spinlock.h> 15#include <linux/spinlock.h>
16
17#include <linux/atomic.h> 16#include <linux/atomic.h>
17#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
18#include <linux/osq_lock.h>
19#endif
18 20
19struct optimistic_spin_queue;
20struct rw_semaphore; 21struct rw_semaphore;
21 22
22#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK 23#ifdef CONFIG_RWSEM_GENERIC_SPINLOCK
@@ -25,15 +26,15 @@ struct rw_semaphore;
25/* All arch specific implementations share the same struct */ 26/* All arch specific implementations share the same struct */
26struct rw_semaphore { 27struct rw_semaphore {
27 long count; 28 long count;
28 raw_spinlock_t wait_lock;
29 struct list_head wait_list; 29 struct list_head wait_list;
30#ifdef CONFIG_SMP 30 raw_spinlock_t wait_lock;
31#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
32 struct optimistic_spin_queue osq; /* spinner MCS lock */
31 /* 33 /*
32 * Write owner. Used as a speculative check to see 34 * Write owner. Used as a speculative check to see
33 * if the owner is running on the cpu. 35 * if the owner is running on the cpu.
34 */ 36 */
35 struct task_struct *owner; 37 struct task_struct *owner;
36 struct optimistic_spin_queue *osq; /* spinner MCS lock */
37#endif 38#endif
38#ifdef CONFIG_DEBUG_LOCK_ALLOC 39#ifdef CONFIG_DEBUG_LOCK_ALLOC
39 struct lockdep_map dep_map; 40 struct lockdep_map dep_map;
@@ -64,22 +65,19 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
64# define __RWSEM_DEP_MAP_INIT(lockname) 65# define __RWSEM_DEP_MAP_INIT(lockname)
65#endif 66#endif
66 67
67#if defined(CONFIG_SMP) && !defined(CONFIG_RWSEM_GENERIC_SPINLOCK) 68#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
68#define __RWSEM_INITIALIZER(name) \ 69#define __RWSEM_OPT_INIT(lockname) , .osq = OSQ_LOCK_UNLOCKED, .owner = NULL
69 { RWSEM_UNLOCKED_VALUE, \
70 __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
71 LIST_HEAD_INIT((name).wait_list), \
72 NULL, /* owner */ \
73 NULL /* mcs lock */ \
74 __RWSEM_DEP_MAP_INIT(name) }
75#else 70#else
76#define __RWSEM_INITIALIZER(name) \ 71#define __RWSEM_OPT_INIT(lockname)
77 { RWSEM_UNLOCKED_VALUE, \
78 __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock), \
79 LIST_HEAD_INIT((name).wait_list) \
80 __RWSEM_DEP_MAP_INIT(name) }
81#endif 72#endif
82 73
74#define __RWSEM_INITIALIZER(name) \
75 { .count = RWSEM_UNLOCKED_VALUE, \
76 .wait_list = LIST_HEAD_INIT((name).wait_list), \
77 .wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
78 __RWSEM_OPT_INIT(name) \
79 __RWSEM_DEP_MAP_INIT(name) }
80
83#define DECLARE_RWSEM(name) \ 81#define DECLARE_RWSEM(name) \
84 struct rw_semaphore name = __RWSEM_INITIALIZER(name) 82 struct rw_semaphore name = __RWSEM_INITIALIZER(name)
85 83
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 306f4f0c987a..0376b054a0d0 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -872,21 +872,21 @@ enum cpu_idle_type {
872#define SD_NUMA 0x4000 /* cross-node balancing */ 872#define SD_NUMA 0x4000 /* cross-node balancing */
873 873
874#ifdef CONFIG_SCHED_SMT 874#ifdef CONFIG_SCHED_SMT
875static inline const int cpu_smt_flags(void) 875static inline int cpu_smt_flags(void)
876{ 876{
877 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; 877 return SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES;
878} 878}
879#endif 879#endif
880 880
881#ifdef CONFIG_SCHED_MC 881#ifdef CONFIG_SCHED_MC
882static inline const int cpu_core_flags(void) 882static inline int cpu_core_flags(void)
883{ 883{
884 return SD_SHARE_PKG_RESOURCES; 884 return SD_SHARE_PKG_RESOURCES;
885} 885}
886#endif 886#endif
887 887
888#ifdef CONFIG_NUMA 888#ifdef CONFIG_NUMA
889static inline const int cpu_numa_flags(void) 889static inline int cpu_numa_flags(void)
890{ 890{
891 return SD_NUMA; 891 return SD_NUMA;
892} 892}
@@ -999,7 +999,7 @@ void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
999bool cpus_share_cache(int this_cpu, int that_cpu); 999bool cpus_share_cache(int this_cpu, int that_cpu);
1000 1000
1001typedef const struct cpumask *(*sched_domain_mask_f)(int cpu); 1001typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
1002typedef const int (*sched_domain_flags_f)(void); 1002typedef int (*sched_domain_flags_f)(void);
1003 1003
1004#define SDTL_OVERLAP 0x01 1004#define SDTL_OVERLAP 0x01
1005 1005
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 8e98297f1388..ec538fc287a6 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -305,8 +305,6 @@ struct ucred {
305/* IPX options */ 305/* IPX options */
306#define IPX_TYPE 1 306#define IPX_TYPE 1
307 307
308extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
309 int offset, int len);
310extern int csum_partial_copy_fromiovecend(unsigned char *kdata, 308extern int csum_partial_copy_fromiovecend(unsigned char *kdata,
311 struct iovec *iov, 309 struct iovec *iov,
312 int offset, 310 int offset,
@@ -315,8 +313,6 @@ extern unsigned long iov_pages(const struct iovec *iov, int offset,
315 unsigned long nr_segs); 313 unsigned long nr_segs);
316 314
317extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode); 315extern int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *address, int mode);
318extern int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
319 int offset, int len);
320extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr); 316extern int move_addr_to_kernel(void __user *uaddr, int ulen, struct sockaddr_storage *kaddr);
321extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); 317extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data);
322 318
diff --git a/include/linux/uio.h b/include/linux/uio.h
index e2231e47cec1..09a7cffc224e 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -94,8 +94,20 @@ static inline size_t iov_iter_count(struct iov_iter *i)
94 return i->count; 94 return i->count;
95} 95}
96 96
97static inline void iov_iter_truncate(struct iov_iter *i, size_t count) 97/*
98 * Cap the iov_iter by given limit; note that the second argument is
99 * *not* the new size - it's upper limit for such. Passing it a value
100 * greater than the amount of data in iov_iter is fine - it'll just do
101 * nothing in that case.
102 */
103static inline void iov_iter_truncate(struct iov_iter *i, u64 count)
98{ 104{
105 /*
106 * count doesn't have to fit in size_t - comparison extends both
107 * operands to u64 here and any value that would be truncated by
108 * conversion in assignement is by definition greater than all
109 * values of size_t, including old i->count.
110 */
99 if (i->count > count) 111 if (i->count > count)
100 i->count = count; 112 i->count = count;
101} 113}
@@ -111,6 +123,9 @@ static inline void iov_iter_reexpand(struct iov_iter *i, size_t count)
111 123
112int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); 124int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len);
113int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len); 125int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len);
114 126int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
127 int offset, int len);
128int memcpy_toiovecend(const struct iovec *v, unsigned char *kdata,
129 int offset, int len);
115 130
116#endif 131#endif
diff --git a/include/linux/usb_usual.h b/include/linux/usb_usual.h
index 1a64b26046ed..9b7de1b46437 100644
--- a/include/linux/usb_usual.h
+++ b/include/linux/usb_usual.h
@@ -70,7 +70,9 @@
70 US_FLAG(NEEDS_CAP16, 0x00400000) \ 70 US_FLAG(NEEDS_CAP16, 0x00400000) \
71 /* cannot handle READ_CAPACITY_10 */ \ 71 /* cannot handle READ_CAPACITY_10 */ \
72 US_FLAG(IGNORE_UAS, 0x00800000) \ 72 US_FLAG(IGNORE_UAS, 0x00800000) \
73 /* Device advertises UAS but it is broken */ 73 /* Device advertises UAS but it is broken */ \
74 US_FLAG(BROKEN_FUA, 0x01000000) \
75 /* Cannot handle FUA in WRITE or READ CDBs */ \
74 76
75#define US_FLAG(name, value) US_FL_##name = value , 77#define US_FLAG(name, value) US_FL_##name = value ,
76enum { US_DO_ALL_FLAGS }; 78enum { US_DO_ALL_FLAGS };
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 7277caf3743d..47f425464f84 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -203,7 +203,6 @@ struct neigh_table {
203 void (*proxy_redo)(struct sk_buff *skb); 203 void (*proxy_redo)(struct sk_buff *skb);
204 char *id; 204 char *id;
205 struct neigh_parms parms; 205 struct neigh_parms parms;
206 /* HACK. gc_* should follow parms without a gap! */
207 int gc_interval; 206 int gc_interval;
208 int gc_thresh1; 207 int gc_thresh1;
209 int gc_thresh2; 208 int gc_thresh2;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 7ee6ce6564ae..713b0b88bd5a 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -503,9 +503,9 @@ enum nft_chain_flags {
503 * @net: net namespace that this chain belongs to 503 * @net: net namespace that this chain belongs to
504 * @table: table that this chain belongs to 504 * @table: table that this chain belongs to
505 * @handle: chain handle 505 * @handle: chain handle
506 * @flags: bitmask of enum nft_chain_flags
507 * @use: number of jump references to this chain 506 * @use: number of jump references to this chain
508 * @level: length of longest path to this chain 507 * @level: length of longest path to this chain
508 * @flags: bitmask of enum nft_chain_flags
509 * @name: name of the chain 509 * @name: name of the chain
510 */ 510 */
511struct nft_chain { 511struct nft_chain {
@@ -514,9 +514,9 @@ struct nft_chain {
514 struct net *net; 514 struct net *net;
515 struct nft_table *table; 515 struct nft_table *table;
516 u64 handle; 516 u64 handle;
517 u8 flags; 517 u32 use;
518 u16 use;
519 u16 level; 518 u16 level;
519 u8 flags;
520 char name[NFT_CHAIN_MAXNAMELEN]; 520 char name[NFT_CHAIN_MAXNAMELEN];
521}; 521};
522 522
diff --git a/include/net/netns/ieee802154_6lowpan.h b/include/net/netns/ieee802154_6lowpan.h
index 079030c853d8..e2070960bac0 100644
--- a/include/net/netns/ieee802154_6lowpan.h
+++ b/include/net/netns/ieee802154_6lowpan.h
@@ -16,7 +16,7 @@ struct netns_sysctl_lowpan {
16struct netns_ieee802154_lowpan { 16struct netns_ieee802154_lowpan {
17 struct netns_sysctl_lowpan sysctl; 17 struct netns_sysctl_lowpan sysctl;
18 struct netns_frags frags; 18 struct netns_frags frags;
19 u16 max_dsize; 19 int max_dsize;
20}; 20};
21 21
22#endif 22#endif
diff --git a/include/net/sock.h b/include/net/sock.h
index 07b7fcd60d80..156350745700 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -1730,8 +1730,8 @@ sk_dst_get(struct sock *sk)
1730 1730
1731 rcu_read_lock(); 1731 rcu_read_lock();
1732 dst = rcu_dereference(sk->sk_dst_cache); 1732 dst = rcu_dereference(sk->sk_dst_cache);
1733 if (dst) 1733 if (dst && !atomic_inc_not_zero(&dst->__refcnt))
1734 dst_hold(dst); 1734 dst = NULL;
1735 rcu_read_unlock(); 1735 rcu_read_unlock();
1736 return dst; 1736 return dst;
1737} 1737}
@@ -1768,9 +1768,11 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
1768static inline void 1768static inline void
1769sk_dst_set(struct sock *sk, struct dst_entry *dst) 1769sk_dst_set(struct sock *sk, struct dst_entry *dst)
1770{ 1770{
1771 spin_lock(&sk->sk_dst_lock); 1771 struct dst_entry *old_dst;
1772 __sk_dst_set(sk, dst); 1772
1773 spin_unlock(&sk->sk_dst_lock); 1773 sk_tx_queue_clear(sk);
1774 old_dst = xchg((__force struct dst_entry **)&sk->sk_dst_cache, dst);
1775 dst_release(old_dst);
1774} 1776}
1775 1777
1776static inline void 1778static inline void
@@ -1782,9 +1784,7 @@ __sk_dst_reset(struct sock *sk)
1782static inline void 1784static inline void
1783sk_dst_reset(struct sock *sk) 1785sk_dst_reset(struct sock *sk)
1784{ 1786{
1785 spin_lock(&sk->sk_dst_lock); 1787 sk_dst_set(sk, NULL);
1786 __sk_dst_reset(sk);
1787 spin_unlock(&sk->sk_dst_lock);
1788} 1788}
1789 1789
1790struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie); 1790struct dst_entry *__sk_dst_check(struct sock *sk, u32 cookie);
diff --git a/include/scsi/scsi_cmnd.h b/include/scsi/scsi_cmnd.h
index 42ed789ebafc..e0ae71098144 100644
--- a/include/scsi/scsi_cmnd.h
+++ b/include/scsi/scsi_cmnd.h
@@ -318,7 +318,7 @@ static inline void set_driver_byte(struct scsi_cmnd *cmd, char status)
318 318
319static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd) 319static inline unsigned scsi_transfer_length(struct scsi_cmnd *scmd)
320{ 320{
321 unsigned int xfer_len = blk_rq_bytes(scmd->request); 321 unsigned int xfer_len = scsi_out(scmd)->length;
322 unsigned int prot_op = scsi_get_prot_op(scmd); 322 unsigned int prot_op = scsi_get_prot_op(scmd);
323 unsigned int sector_size = scmd->device->sector_size; 323 unsigned int sector_size = scmd->device->sector_size;
324 324
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 5853c913d2b0..27ab31017f09 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -173,6 +173,7 @@ struct scsi_device {
173 unsigned is_visible:1; /* is the device visible in sysfs */ 173 unsigned is_visible:1; /* is the device visible in sysfs */
174 unsigned wce_default_on:1; /* Cache is ON by default */ 174 unsigned wce_default_on:1; /* Cache is ON by default */
175 unsigned no_dif:1; /* T10 PI (DIF) should be disabled */ 175 unsigned no_dif:1; /* T10 PI (DIF) should be disabled */
176 unsigned broken_fua:1; /* Don't set FUA bit */
176 177
177 atomic_t disk_events_disable_depth; /* disable depth for disk events */ 178 atomic_t disk_events_disable_depth; /* disable depth for disk events */
178 179
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index 6f9c38ce45c7..2f47824e7a36 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -38,6 +38,7 @@ struct btrfs_ioctl_vol_args {
38#define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2) 38#define BTRFS_SUBVOL_QGROUP_INHERIT (1ULL << 2)
39#define BTRFS_FSID_SIZE 16 39#define BTRFS_FSID_SIZE 16
40#define BTRFS_UUID_SIZE 16 40#define BTRFS_UUID_SIZE 16
41#define BTRFS_UUID_UNPARSED_SIZE 37
41 42
42#define BTRFS_QGROUP_INHERIT_SET_LIMITS (1ULL << 0) 43#define BTRFS_QGROUP_INHERIT_SET_LIMITS (1ULL << 0)
43 44
diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
index 2a4b4a72a4f9..24b68c59dcf8 100644
--- a/include/uapi/linux/usb/functionfs.h
+++ b/include/uapi/linux/usb/functionfs.h
@@ -33,6 +33,13 @@ struct usb_endpoint_descriptor_no_audio {
33 __u8 bInterval; 33 __u8 bInterval;
34} __attribute__((packed)); 34} __attribute__((packed));
35 35
36/* Legacy format, deprecated as of 3.14. */
37struct usb_functionfs_descs_head {
38 __le32 magic;
39 __le32 length;
40 __le32 fs_count;
41 __le32 hs_count;
42} __attribute__((packed, deprecated));
36 43
37/* 44/*
38 * Descriptors format: 45 * Descriptors format:
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index 21eed488783f..1964026b5e09 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -39,7 +39,7 @@
39struct snd_compressed_buffer { 39struct snd_compressed_buffer {
40 __u32 fragment_size; 40 __u32 fragment_size;
41 __u32 fragments; 41 __u32 fragments;
42}; 42} __attribute__((packed, aligned(4)));
43 43
44/** 44/**
45 * struct snd_compr_params: compressed stream params 45 * struct snd_compr_params: compressed stream params
@@ -51,7 +51,7 @@ struct snd_compr_params {
51 struct snd_compressed_buffer buffer; 51 struct snd_compressed_buffer buffer;
52 struct snd_codec codec; 52 struct snd_codec codec;
53 __u8 no_wake_mode; 53 __u8 no_wake_mode;
54}; 54} __attribute__((packed, aligned(4)));
55 55
56/** 56/**
57 * struct snd_compr_tstamp: timestamp descriptor 57 * struct snd_compr_tstamp: timestamp descriptor
@@ -70,7 +70,7 @@ struct snd_compr_tstamp {
70 __u32 pcm_frames; 70 __u32 pcm_frames;
71 __u32 pcm_io_frames; 71 __u32 pcm_io_frames;
72 __u32 sampling_rate; 72 __u32 sampling_rate;
73}; 73} __attribute__((packed, aligned(4)));
74 74
75/** 75/**
76 * struct snd_compr_avail: avail descriptor 76 * struct snd_compr_avail: avail descriptor
@@ -80,7 +80,7 @@ struct snd_compr_tstamp {
80struct snd_compr_avail { 80struct snd_compr_avail {
81 __u64 avail; 81 __u64 avail;
82 struct snd_compr_tstamp tstamp; 82 struct snd_compr_tstamp tstamp;
83} __attribute__((packed)); 83} __attribute__((packed, aligned(4)));
84 84
85enum snd_compr_direction { 85enum snd_compr_direction {
86 SND_COMPRESS_PLAYBACK = 0, 86 SND_COMPRESS_PLAYBACK = 0,
@@ -107,7 +107,7 @@ struct snd_compr_caps {
107 __u32 max_fragments; 107 __u32 max_fragments;
108 __u32 codecs[MAX_NUM_CODECS]; 108 __u32 codecs[MAX_NUM_CODECS];
109 __u32 reserved[11]; 109 __u32 reserved[11];
110}; 110} __attribute__((packed, aligned(4)));
111 111
112/** 112/**
113 * struct snd_compr_codec_caps: query capability of codec 113 * struct snd_compr_codec_caps: query capability of codec
@@ -119,7 +119,7 @@ struct snd_compr_codec_caps {
119 __u32 codec; 119 __u32 codec;
120 __u32 num_descriptors; 120 __u32 num_descriptors;
121 struct snd_codec_desc descriptor[MAX_NUM_CODEC_DESCRIPTORS]; 121 struct snd_codec_desc descriptor[MAX_NUM_CODEC_DESCRIPTORS];
122}; 122} __attribute__((packed, aligned(4)));
123 123
124/** 124/**
125 * @SNDRV_COMPRESS_ENCODER_PADDING: no of samples appended by the encoder at the 125 * @SNDRV_COMPRESS_ENCODER_PADDING: no of samples appended by the encoder at the
@@ -140,7 +140,7 @@ enum {
140struct snd_compr_metadata { 140struct snd_compr_metadata {
141 __u32 key; 141 __u32 key;
142 __u32 value[8]; 142 __u32 value[8];
143}; 143} __attribute__((packed, aligned(4)));
144 144
145/** 145/**
146 * compress path ioctl definitions 146 * compress path ioctl definitions
diff --git a/include/uapi/sound/compress_params.h b/include/uapi/sound/compress_params.h
index 165e7059de75..d9bd9ca0d5b0 100644
--- a/include/uapi/sound/compress_params.h
+++ b/include/uapi/sound/compress_params.h
@@ -268,7 +268,7 @@ struct snd_enc_vorbis {
268 __u32 max_bit_rate; 268 __u32 max_bit_rate;
269 __u32 min_bit_rate; 269 __u32 min_bit_rate;
270 __u32 downmix; 270 __u32 downmix;
271}; 271} __attribute__((packed, aligned(4)));
272 272
273 273
274/** 274/**
@@ -284,7 +284,7 @@ struct snd_enc_real {
284 __u32 quant_bits; 284 __u32 quant_bits;
285 __u32 start_region; 285 __u32 start_region;
286 __u32 num_regions; 286 __u32 num_regions;
287}; 287} __attribute__((packed, aligned(4)));
288 288
289/** 289/**
290 * struct snd_enc_flac 290 * struct snd_enc_flac
@@ -308,12 +308,12 @@ struct snd_enc_real {
308struct snd_enc_flac { 308struct snd_enc_flac {
309 __u32 num; 309 __u32 num;
310 __u32 gain; 310 __u32 gain;
311}; 311} __attribute__((packed, aligned(4)));
312 312
313struct snd_enc_generic { 313struct snd_enc_generic {
314 __u32 bw; /* encoder bandwidth */ 314 __u32 bw; /* encoder bandwidth */
315 __s32 reserved[15]; 315 __s32 reserved[15];
316}; 316} __attribute__((packed, aligned(4)));
317 317
318union snd_codec_options { 318union snd_codec_options {
319 struct snd_enc_wma wma; 319 struct snd_enc_wma wma;
@@ -321,7 +321,7 @@ union snd_codec_options {
321 struct snd_enc_real real; 321 struct snd_enc_real real;
322 struct snd_enc_flac flac; 322 struct snd_enc_flac flac;
323 struct snd_enc_generic generic; 323 struct snd_enc_generic generic;
324}; 324} __attribute__((packed, aligned(4)));
325 325
326/** struct snd_codec_desc - description of codec capabilities 326/** struct snd_codec_desc - description of codec capabilities
327 * @max_ch: Maximum number of audio channels 327 * @max_ch: Maximum number of audio channels
@@ -358,7 +358,7 @@ struct snd_codec_desc {
358 __u32 formats; 358 __u32 formats;
359 __u32 min_buffer; 359 __u32 min_buffer;
360 __u32 reserved[15]; 360 __u32 reserved[15];
361}; 361} __attribute__((packed, aligned(4)));
362 362
363/** struct snd_codec 363/** struct snd_codec
364 * @id: Identifies the supported audio encoder/decoder. 364 * @id: Identifies the supported audio encoder/decoder.
@@ -399,6 +399,6 @@ struct snd_codec {
399 __u32 align; 399 __u32 align;
400 union snd_codec_options options; 400 union snd_codec_options options;
401 __u32 reserved[3]; 401 __u32 reserved[3];
402}; 402} __attribute__((packed, aligned(4)));
403 403
404#endif 404#endif
diff --git a/kernel/Kconfig.locks b/kernel/Kconfig.locks
index 35536d9c0964..76768ee812b2 100644
--- a/kernel/Kconfig.locks
+++ b/kernel/Kconfig.locks
@@ -220,9 +220,16 @@ config INLINE_WRITE_UNLOCK_IRQRESTORE
220 220
221endif 221endif
222 222
223config ARCH_SUPPORTS_ATOMIC_RMW
224 bool
225
223config MUTEX_SPIN_ON_OWNER 226config MUTEX_SPIN_ON_OWNER
224 def_bool y 227 def_bool y
225 depends on SMP && !DEBUG_MUTEXES 228 depends on SMP && !DEBUG_MUTEXES && ARCH_SUPPORTS_ATOMIC_RMW
229
230config RWSEM_SPIN_ON_OWNER
231 def_bool y
232 depends on SMP && RWSEM_XCHGADD_ALGORITHM && ARCH_SUPPORTS_ATOMIC_RMW
226 233
227config ARCH_USE_QUEUE_RWLOCK 234config ARCH_USE_QUEUE_RWLOCK
228 bool 235 bool
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 7868fc3c0bc5..70776aec2562 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -1648,10 +1648,13 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1648 int flags, const char *unused_dev_name, 1648 int flags, const char *unused_dev_name,
1649 void *data) 1649 void *data)
1650{ 1650{
1651 struct super_block *pinned_sb = NULL;
1652 struct cgroup_subsys *ss;
1651 struct cgroup_root *root; 1653 struct cgroup_root *root;
1652 struct cgroup_sb_opts opts; 1654 struct cgroup_sb_opts opts;
1653 struct dentry *dentry; 1655 struct dentry *dentry;
1654 int ret; 1656 int ret;
1657 int i;
1655 bool new_sb; 1658 bool new_sb;
1656 1659
1657 /* 1660 /*
@@ -1677,6 +1680,27 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1677 goto out_unlock; 1680 goto out_unlock;
1678 } 1681 }
1679 1682
1683 /*
1684 * Destruction of cgroup root is asynchronous, so subsystems may
1685 * still be dying after the previous unmount. Let's drain the
1686 * dying subsystems. We just need to ensure that the ones
1687 * unmounted previously finish dying and don't care about new ones
1688 * starting. Testing ref liveliness is good enough.
1689 */
1690 for_each_subsys(ss, i) {
1691 if (!(opts.subsys_mask & (1 << i)) ||
1692 ss->root == &cgrp_dfl_root)
1693 continue;
1694
1695 if (!percpu_ref_tryget_live(&ss->root->cgrp.self.refcnt)) {
1696 mutex_unlock(&cgroup_mutex);
1697 msleep(10);
1698 ret = restart_syscall();
1699 goto out_free;
1700 }
1701 cgroup_put(&ss->root->cgrp);
1702 }
1703
1680 for_each_root(root) { 1704 for_each_root(root) {
1681 bool name_match = false; 1705 bool name_match = false;
1682 1706
@@ -1717,15 +1741,23 @@ static struct dentry *cgroup_mount(struct file_system_type *fs_type,
1717 } 1741 }
1718 1742
1719 /* 1743 /*
1720 * A root's lifetime is governed by its root cgroup. 1744 * We want to reuse @root whose lifetime is governed by its
1721 * tryget_live failure indicate that the root is being 1745 * ->cgrp. Let's check whether @root is alive and keep it
1722 * destroyed. Wait for destruction to complete so that the 1746 * that way. As cgroup_kill_sb() can happen anytime, we
1723 * subsystems are free. We can use wait_queue for the wait 1747 * want to block it by pinning the sb so that @root doesn't
1724 * but this path is super cold. Let's just sleep for a bit 1748 * get killed before mount is complete.
1725 * and retry. 1749 *
1750 * With the sb pinned, tryget_live can reliably indicate
1751 * whether @root can be reused. If it's being killed,
1752 * drain it. We can use wait_queue for the wait but this
1753 * path is super cold. Let's just sleep a bit and retry.
1726 */ 1754 */
1727 if (!percpu_ref_tryget_live(&root->cgrp.self.refcnt)) { 1755 pinned_sb = kernfs_pin_sb(root->kf_root, NULL);
1756 if (IS_ERR(pinned_sb) ||
1757 !percpu_ref_tryget_live(&root->cgrp.self.refcnt)) {
1728 mutex_unlock(&cgroup_mutex); 1758 mutex_unlock(&cgroup_mutex);
1759 if (!IS_ERR_OR_NULL(pinned_sb))
1760 deactivate_super(pinned_sb);
1729 msleep(10); 1761 msleep(10);
1730 ret = restart_syscall(); 1762 ret = restart_syscall();
1731 goto out_free; 1763 goto out_free;
@@ -1770,6 +1802,16 @@ out_free:
1770 CGROUP_SUPER_MAGIC, &new_sb); 1802 CGROUP_SUPER_MAGIC, &new_sb);
1771 if (IS_ERR(dentry) || !new_sb) 1803 if (IS_ERR(dentry) || !new_sb)
1772 cgroup_put(&root->cgrp); 1804 cgroup_put(&root->cgrp);
1805
1806 /*
1807 * If @pinned_sb, we're reusing an existing root and holding an
1808 * extra ref on its sb. Mount is complete. Put the extra ref.
1809 */
1810 if (pinned_sb) {
1811 WARN_ON(new_sb);
1812 deactivate_super(pinned_sb);
1813 }
1814
1773 return dentry; 1815 return dentry;
1774} 1816}
1775 1817
@@ -3328,7 +3370,7 @@ bool css_has_online_children(struct cgroup_subsys_state *css)
3328 3370
3329 rcu_read_lock(); 3371 rcu_read_lock();
3330 css_for_each_child(child, css) { 3372 css_for_each_child(child, css) {
3331 if (css->flags & CSS_ONLINE) { 3373 if (child->flags & CSS_ONLINE) {
3332 ret = true; 3374 ret = true;
3333 break; 3375 break;
3334 } 3376 }
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index f6b33c696224..116a4164720a 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -1181,7 +1181,13 @@ done:
1181 1181
1182int current_cpuset_is_being_rebound(void) 1182int current_cpuset_is_being_rebound(void)
1183{ 1183{
1184 return task_cs(current) == cpuset_being_rebound; 1184 int ret;
1185
1186 rcu_read_lock();
1187 ret = task_cs(current) == cpuset_being_rebound;
1188 rcu_read_unlock();
1189
1190 return ret;
1185} 1191}
1186 1192
1187static int update_relax_domain_level(struct cpuset *cs, s64 val) 1193static int update_relax_domain_level(struct cpuset *cs, s64 val)
@@ -1617,7 +1623,17 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
1617 * resources, wait for the previously scheduled operations before 1623 * resources, wait for the previously scheduled operations before
1618 * proceeding, so that we don't end up keep removing tasks added 1624 * proceeding, so that we don't end up keep removing tasks added
1619 * after execution capability is restored. 1625 * after execution capability is restored.
1626 *
1627 * cpuset_hotplug_work calls back into cgroup core via
1628 * cgroup_transfer_tasks() and waiting for it from a cgroupfs
1629 * operation like this one can lead to a deadlock through kernfs
1630 * active_ref protection. Let's break the protection. Losing the
1631 * protection is okay as we check whether @cs is online after
1632 * grabbing cpuset_mutex anyway. This only happens on the legacy
1633 * hierarchies.
1620 */ 1634 */
1635 css_get(&cs->css);
1636 kernfs_break_active_protection(of->kn);
1621 flush_work(&cpuset_hotplug_work); 1637 flush_work(&cpuset_hotplug_work);
1622 1638
1623 mutex_lock(&cpuset_mutex); 1639 mutex_lock(&cpuset_mutex);
@@ -1645,6 +1661,8 @@ static ssize_t cpuset_write_resmask(struct kernfs_open_file *of,
1645 free_trial_cpuset(trialcs); 1661 free_trial_cpuset(trialcs);
1646out_unlock: 1662out_unlock:
1647 mutex_unlock(&cpuset_mutex); 1663 mutex_unlock(&cpuset_mutex);
1664 kernfs_unbreak_active_protection(of->kn);
1665 css_put(&cs->css);
1648 return retval ?: nbytes; 1666 return retval ?: nbytes;
1649} 1667}
1650 1668
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a33d9a2bcbd7..b0c95f0f06fd 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2320,7 +2320,7 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
2320 next_parent = rcu_dereference(next_ctx->parent_ctx); 2320 next_parent = rcu_dereference(next_ctx->parent_ctx);
2321 2321
2322 /* If neither context have a parent context; they cannot be clones. */ 2322 /* If neither context have a parent context; they cannot be clones. */
2323 if (!parent && !next_parent) 2323 if (!parent || !next_parent)
2324 goto unlock; 2324 goto unlock;
2325 2325
2326 if (next_parent == ctx || next_ctx == parent || next_parent == parent) { 2326 if (next_parent == ctx || next_ctx == parent || next_parent == parent) {
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index c445e392e93f..6f3254e8c137 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -846,7 +846,7 @@ static void __uprobe_unregister(struct uprobe *uprobe, struct uprobe_consumer *u
846{ 846{
847 int err; 847 int err;
848 848
849 if (!consumer_del(uprobe, uc)) /* WARN? */ 849 if (WARN_ON(!consumer_del(uprobe, uc)))
850 return; 850 return;
851 851
852 err = register_for_each_vma(uprobe, NULL); 852 err = register_for_each_vma(uprobe, NULL);
@@ -927,7 +927,7 @@ int uprobe_apply(struct inode *inode, loff_t offset,
927 int ret = -ENOENT; 927 int ret = -ENOENT;
928 928
929 uprobe = find_uprobe(inode, offset); 929 uprobe = find_uprobe(inode, offset);
930 if (!uprobe) 930 if (WARN_ON(!uprobe))
931 return ret; 931 return ret;
932 932
933 down_write(&uprobe->register_rwsem); 933 down_write(&uprobe->register_rwsem);
@@ -952,7 +952,7 @@ void uprobe_unregister(struct inode *inode, loff_t offset, struct uprobe_consume
952 struct uprobe *uprobe; 952 struct uprobe *uprobe;
953 953
954 uprobe = find_uprobe(inode, offset); 954 uprobe = find_uprobe(inode, offset);
955 if (!uprobe) 955 if (WARN_ON(!uprobe))
956 return; 956 return;
957 957
958 down_write(&uprobe->register_rwsem); 958 down_write(&uprobe->register_rwsem);
diff --git a/kernel/irq/irqdesc.c b/kernel/irq/irqdesc.c
index 7339e42a85ab..1487a123db5c 100644
--- a/kernel/irq/irqdesc.c
+++ b/kernel/irq/irqdesc.c
@@ -455,9 +455,9 @@ EXPORT_SYMBOL_GPL(irq_alloc_hwirqs);
455 */ 455 */
456void irq_free_hwirqs(unsigned int from, int cnt) 456void irq_free_hwirqs(unsigned int from, int cnt)
457{ 457{
458 int i; 458 int i, j;
459 459
460 for (i = from; cnt > 0; i++, cnt--) { 460 for (i = from, j = cnt; j > 0; i++, j--) {
461 irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE); 461 irq_set_status_flags(i, _IRQ_NOREQUEST | _IRQ_NOPROBE);
462 arch_teardown_hwirq(i); 462 arch_teardown_hwirq(i);
463 } 463 }
diff --git a/kernel/kexec.c b/kernel/kexec.c
index 6748688813d0..369f41a94124 100644
--- a/kernel/kexec.c
+++ b/kernel/kexec.c
@@ -1617,6 +1617,7 @@ static int __init crash_save_vmcoreinfo_init(void)
1617#ifdef CONFIG_MEMORY_FAILURE 1617#ifdef CONFIG_MEMORY_FAILURE
1618 VMCOREINFO_NUMBER(PG_hwpoison); 1618 VMCOREINFO_NUMBER(PG_hwpoison);
1619#endif 1619#endif
1620 VMCOREINFO_NUMBER(PG_head_mask);
1620 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE); 1621 VMCOREINFO_NUMBER(PAGE_BUDDY_MAPCOUNT_VALUE);
1621 1622
1622 arch_crash_save_vmcoreinfo(); 1623 arch_crash_save_vmcoreinfo();
diff --git a/kernel/locking/mcs_spinlock.c b/kernel/locking/mcs_spinlock.c
index 838dc9e00669..be9ee1559fca 100644
--- a/kernel/locking/mcs_spinlock.c
+++ b/kernel/locking/mcs_spinlock.c
@@ -14,21 +14,47 @@
14 * called from interrupt context and we have preemption disabled while 14 * called from interrupt context and we have preemption disabled while
15 * spinning. 15 * spinning.
16 */ 16 */
17static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_queue, osq_node); 17static DEFINE_PER_CPU_SHARED_ALIGNED(struct optimistic_spin_node, osq_node);
18
19/*
20 * We use the value 0 to represent "no CPU", thus the encoded value
21 * will be the CPU number incremented by 1.
22 */
23static inline int encode_cpu(int cpu_nr)
24{
25 return cpu_nr + 1;
26}
27
28static inline struct optimistic_spin_node *decode_cpu(int encoded_cpu_val)
29{
30 int cpu_nr = encoded_cpu_val - 1;
31
32 return per_cpu_ptr(&osq_node, cpu_nr);
33}
18 34
19/* 35/*
20 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes. 36 * Get a stable @node->next pointer, either for unlock() or unqueue() purposes.
21 * Can return NULL in case we were the last queued and we updated @lock instead. 37 * Can return NULL in case we were the last queued and we updated @lock instead.
22 */ 38 */
23static inline struct optimistic_spin_queue * 39static inline struct optimistic_spin_node *
24osq_wait_next(struct optimistic_spin_queue **lock, 40osq_wait_next(struct optimistic_spin_queue *lock,
25 struct optimistic_spin_queue *node, 41 struct optimistic_spin_node *node,
26 struct optimistic_spin_queue *prev) 42 struct optimistic_spin_node *prev)
27{ 43{
28 struct optimistic_spin_queue *next = NULL; 44 struct optimistic_spin_node *next = NULL;
45 int curr = encode_cpu(smp_processor_id());
46 int old;
47
48 /*
49 * If there is a prev node in queue, then the 'old' value will be
50 * the prev node's CPU #, else it's set to OSQ_UNLOCKED_VAL since if
51 * we're currently last in queue, then the queue will then become empty.
52 */
53 old = prev ? prev->cpu : OSQ_UNLOCKED_VAL;
29 54
30 for (;;) { 55 for (;;) {
31 if (*lock == node && cmpxchg(lock, node, prev) == node) { 56 if (atomic_read(&lock->tail) == curr &&
57 atomic_cmpxchg(&lock->tail, curr, old) == curr) {
32 /* 58 /*
33 * We were the last queued, we moved @lock back. @prev 59 * We were the last queued, we moved @lock back. @prev
34 * will now observe @lock and will complete its 60 * will now observe @lock and will complete its
@@ -59,18 +85,23 @@ osq_wait_next(struct optimistic_spin_queue **lock,
59 return next; 85 return next;
60} 86}
61 87
62bool osq_lock(struct optimistic_spin_queue **lock) 88bool osq_lock(struct optimistic_spin_queue *lock)
63{ 89{
64 struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node); 90 struct optimistic_spin_node *node = this_cpu_ptr(&osq_node);
65 struct optimistic_spin_queue *prev, *next; 91 struct optimistic_spin_node *prev, *next;
92 int curr = encode_cpu(smp_processor_id());
93 int old;
66 94
67 node->locked = 0; 95 node->locked = 0;
68 node->next = NULL; 96 node->next = NULL;
97 node->cpu = curr;
69 98
70 node->prev = prev = xchg(lock, node); 99 old = atomic_xchg(&lock->tail, curr);
71 if (likely(prev == NULL)) 100 if (old == OSQ_UNLOCKED_VAL)
72 return true; 101 return true;
73 102
103 prev = decode_cpu(old);
104 node->prev = prev;
74 ACCESS_ONCE(prev->next) = node; 105 ACCESS_ONCE(prev->next) = node;
75 106
76 /* 107 /*
@@ -149,20 +180,21 @@ unqueue:
149 return false; 180 return false;
150} 181}
151 182
152void osq_unlock(struct optimistic_spin_queue **lock) 183void osq_unlock(struct optimistic_spin_queue *lock)
153{ 184{
154 struct optimistic_spin_queue *node = this_cpu_ptr(&osq_node); 185 struct optimistic_spin_node *node, *next;
155 struct optimistic_spin_queue *next; 186 int curr = encode_cpu(smp_processor_id());
156 187
157 /* 188 /*
158 * Fast path for the uncontended case. 189 * Fast path for the uncontended case.
159 */ 190 */
160 if (likely(cmpxchg(lock, node, NULL) == node)) 191 if (likely(atomic_cmpxchg(&lock->tail, curr, OSQ_UNLOCKED_VAL) == curr))
161 return; 192 return;
162 193
163 /* 194 /*
164 * Second most likely case. 195 * Second most likely case.
165 */ 196 */
197 node = this_cpu_ptr(&osq_node);
166 next = xchg(&node->next, NULL); 198 next = xchg(&node->next, NULL);
167 if (next) { 199 if (next) {
168 ACCESS_ONCE(next->locked) = 1; 200 ACCESS_ONCE(next->locked) = 1;
diff --git a/kernel/locking/mcs_spinlock.h b/kernel/locking/mcs_spinlock.h
index a2dbac4aca6b..74356dc0ce29 100644
--- a/kernel/locking/mcs_spinlock.h
+++ b/kernel/locking/mcs_spinlock.h
@@ -118,12 +118,13 @@ void mcs_spin_unlock(struct mcs_spinlock **lock, struct mcs_spinlock *node)
118 * mutex_lock()/rwsem_down_{read,write}() etc. 118 * mutex_lock()/rwsem_down_{read,write}() etc.
119 */ 119 */
120 120
121struct optimistic_spin_queue { 121struct optimistic_spin_node {
122 struct optimistic_spin_queue *next, *prev; 122 struct optimistic_spin_node *next, *prev;
123 int locked; /* 1 if lock acquired */ 123 int locked; /* 1 if lock acquired */
124 int cpu; /* encoded CPU # value */
124}; 125};
125 126
126extern bool osq_lock(struct optimistic_spin_queue **lock); 127extern bool osq_lock(struct optimistic_spin_queue *lock);
127extern void osq_unlock(struct optimistic_spin_queue **lock); 128extern void osq_unlock(struct optimistic_spin_queue *lock);
128 129
129#endif /* __LINUX_MCS_SPINLOCK_H */ 130#endif /* __LINUX_MCS_SPINLOCK_H */
diff --git a/kernel/locking/mutex.c b/kernel/locking/mutex.c
index bc73d33c6760..acca2c1a3c5e 100644
--- a/kernel/locking/mutex.c
+++ b/kernel/locking/mutex.c
@@ -60,7 +60,7 @@ __mutex_init(struct mutex *lock, const char *name, struct lock_class_key *key)
60 INIT_LIST_HEAD(&lock->wait_list); 60 INIT_LIST_HEAD(&lock->wait_list);
61 mutex_clear_owner(lock); 61 mutex_clear_owner(lock);
62#ifdef CONFIG_MUTEX_SPIN_ON_OWNER 62#ifdef CONFIG_MUTEX_SPIN_ON_OWNER
63 lock->osq = NULL; 63 osq_lock_init(&lock->osq);
64#endif 64#endif
65 65
66 debug_mutex_init(lock, name, key); 66 debug_mutex_init(lock, name, key);
diff --git a/kernel/locking/rwsem-spinlock.c b/kernel/locking/rwsem-spinlock.c
index 9be8a9144978..2c93571162cb 100644
--- a/kernel/locking/rwsem-spinlock.c
+++ b/kernel/locking/rwsem-spinlock.c
@@ -26,7 +26,7 @@ int rwsem_is_locked(struct rw_semaphore *sem)
26 unsigned long flags; 26 unsigned long flags;
27 27
28 if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) { 28 if (raw_spin_trylock_irqsave(&sem->wait_lock, flags)) {
29 ret = (sem->activity != 0); 29 ret = (sem->count != 0);
30 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 30 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
31 } 31 }
32 return ret; 32 return ret;
@@ -46,7 +46,7 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
46 debug_check_no_locks_freed((void *)sem, sizeof(*sem)); 46 debug_check_no_locks_freed((void *)sem, sizeof(*sem));
47 lockdep_init_map(&sem->dep_map, name, key, 0); 47 lockdep_init_map(&sem->dep_map, name, key, 0);
48#endif 48#endif
49 sem->activity = 0; 49 sem->count = 0;
50 raw_spin_lock_init(&sem->wait_lock); 50 raw_spin_lock_init(&sem->wait_lock);
51 INIT_LIST_HEAD(&sem->wait_list); 51 INIT_LIST_HEAD(&sem->wait_list);
52} 52}
@@ -95,7 +95,7 @@ __rwsem_do_wake(struct rw_semaphore *sem, int wakewrite)
95 waiter = list_entry(next, struct rwsem_waiter, list); 95 waiter = list_entry(next, struct rwsem_waiter, list);
96 } while (waiter->type != RWSEM_WAITING_FOR_WRITE); 96 } while (waiter->type != RWSEM_WAITING_FOR_WRITE);
97 97
98 sem->activity += woken; 98 sem->count += woken;
99 99
100 out: 100 out:
101 return sem; 101 return sem;
@@ -126,9 +126,9 @@ void __sched __down_read(struct rw_semaphore *sem)
126 126
127 raw_spin_lock_irqsave(&sem->wait_lock, flags); 127 raw_spin_lock_irqsave(&sem->wait_lock, flags);
128 128
129 if (sem->activity >= 0 && list_empty(&sem->wait_list)) { 129 if (sem->count >= 0 && list_empty(&sem->wait_list)) {
130 /* granted */ 130 /* granted */
131 sem->activity++; 131 sem->count++;
132 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 132 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
133 goto out; 133 goto out;
134 } 134 }
@@ -170,9 +170,9 @@ int __down_read_trylock(struct rw_semaphore *sem)
170 170
171 raw_spin_lock_irqsave(&sem->wait_lock, flags); 171 raw_spin_lock_irqsave(&sem->wait_lock, flags);
172 172
173 if (sem->activity >= 0 && list_empty(&sem->wait_list)) { 173 if (sem->count >= 0 && list_empty(&sem->wait_list)) {
174 /* granted */ 174 /* granted */
175 sem->activity++; 175 sem->count++;
176 ret = 1; 176 ret = 1;
177 } 177 }
178 178
@@ -206,7 +206,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
206 * itself into sleep and waiting for system woke it or someone 206 * itself into sleep and waiting for system woke it or someone
207 * else in the head of the wait list up. 207 * else in the head of the wait list up.
208 */ 208 */
209 if (sem->activity == 0) 209 if (sem->count == 0)
210 break; 210 break;
211 set_task_state(tsk, TASK_UNINTERRUPTIBLE); 211 set_task_state(tsk, TASK_UNINTERRUPTIBLE);
212 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 212 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -214,7 +214,7 @@ void __sched __down_write_nested(struct rw_semaphore *sem, int subclass)
214 raw_spin_lock_irqsave(&sem->wait_lock, flags); 214 raw_spin_lock_irqsave(&sem->wait_lock, flags);
215 } 215 }
216 /* got the lock */ 216 /* got the lock */
217 sem->activity = -1; 217 sem->count = -1;
218 list_del(&waiter.list); 218 list_del(&waiter.list);
219 219
220 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 220 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -235,9 +235,9 @@ int __down_write_trylock(struct rw_semaphore *sem)
235 235
236 raw_spin_lock_irqsave(&sem->wait_lock, flags); 236 raw_spin_lock_irqsave(&sem->wait_lock, flags);
237 237
238 if (sem->activity == 0) { 238 if (sem->count == 0) {
239 /* got the lock */ 239 /* got the lock */
240 sem->activity = -1; 240 sem->count = -1;
241 ret = 1; 241 ret = 1;
242 } 242 }
243 243
@@ -255,7 +255,7 @@ void __up_read(struct rw_semaphore *sem)
255 255
256 raw_spin_lock_irqsave(&sem->wait_lock, flags); 256 raw_spin_lock_irqsave(&sem->wait_lock, flags);
257 257
258 if (--sem->activity == 0 && !list_empty(&sem->wait_list)) 258 if (--sem->count == 0 && !list_empty(&sem->wait_list))
259 sem = __rwsem_wake_one_writer(sem); 259 sem = __rwsem_wake_one_writer(sem);
260 260
261 raw_spin_unlock_irqrestore(&sem->wait_lock, flags); 261 raw_spin_unlock_irqrestore(&sem->wait_lock, flags);
@@ -270,7 +270,7 @@ void __up_write(struct rw_semaphore *sem)
270 270
271 raw_spin_lock_irqsave(&sem->wait_lock, flags); 271 raw_spin_lock_irqsave(&sem->wait_lock, flags);
272 272
273 sem->activity = 0; 273 sem->count = 0;
274 if (!list_empty(&sem->wait_list)) 274 if (!list_empty(&sem->wait_list))
275 sem = __rwsem_do_wake(sem, 1); 275 sem = __rwsem_do_wake(sem, 1);
276 276
@@ -287,7 +287,7 @@ void __downgrade_write(struct rw_semaphore *sem)
287 287
288 raw_spin_lock_irqsave(&sem->wait_lock, flags); 288 raw_spin_lock_irqsave(&sem->wait_lock, flags);
289 289
290 sem->activity = 1; 290 sem->count = 1;
291 if (!list_empty(&sem->wait_list)) 291 if (!list_empty(&sem->wait_list))
292 sem = __rwsem_do_wake(sem, 0); 292 sem = __rwsem_do_wake(sem, 0);
293 293
diff --git a/kernel/locking/rwsem-xadd.c b/kernel/locking/rwsem-xadd.c
index dacc32142fcc..a2391ac135c8 100644
--- a/kernel/locking/rwsem-xadd.c
+++ b/kernel/locking/rwsem-xadd.c
@@ -82,9 +82,9 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
82 sem->count = RWSEM_UNLOCKED_VALUE; 82 sem->count = RWSEM_UNLOCKED_VALUE;
83 raw_spin_lock_init(&sem->wait_lock); 83 raw_spin_lock_init(&sem->wait_lock);
84 INIT_LIST_HEAD(&sem->wait_list); 84 INIT_LIST_HEAD(&sem->wait_list);
85#ifdef CONFIG_SMP 85#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
86 sem->owner = NULL; 86 sem->owner = NULL;
87 sem->osq = NULL; 87 osq_lock_init(&sem->osq);
88#endif 88#endif
89} 89}
90 90
@@ -262,7 +262,7 @@ static inline bool rwsem_try_write_lock(long count, struct rw_semaphore *sem)
262 return false; 262 return false;
263} 263}
264 264
265#ifdef CONFIG_SMP 265#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
266/* 266/*
267 * Try to acquire write lock before the writer has been put on wait queue. 267 * Try to acquire write lock before the writer has been put on wait queue.
268 */ 268 */
@@ -285,10 +285,10 @@ static inline bool rwsem_try_write_lock_unqueued(struct rw_semaphore *sem)
285static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem) 285static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
286{ 286{
287 struct task_struct *owner; 287 struct task_struct *owner;
288 bool on_cpu = true; 288 bool on_cpu = false;
289 289
290 if (need_resched()) 290 if (need_resched())
291 return 0; 291 return false;
292 292
293 rcu_read_lock(); 293 rcu_read_lock();
294 owner = ACCESS_ONCE(sem->owner); 294 owner = ACCESS_ONCE(sem->owner);
@@ -297,9 +297,9 @@ static inline bool rwsem_can_spin_on_owner(struct rw_semaphore *sem)
297 rcu_read_unlock(); 297 rcu_read_unlock();
298 298
299 /* 299 /*
300 * If sem->owner is not set, the rwsem owner may have 300 * If sem->owner is not set, yet we have just recently entered the
301 * just acquired it and not set the owner yet or the rwsem 301 * slowpath, then there is a possibility reader(s) may have the lock.
302 * has been released. 302 * To be safe, avoid spinning in these situations.
303 */ 303 */
304 return on_cpu; 304 return on_cpu;
305} 305}
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index 42f806de49d4..e2d3bc7f03b4 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -12,7 +12,7 @@
12 12
13#include <linux/atomic.h> 13#include <linux/atomic.h>
14 14
15#if defined(CONFIG_SMP) && defined(CONFIG_RWSEM_XCHGADD_ALGORITHM) 15#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
16static inline void rwsem_set_owner(struct rw_semaphore *sem) 16static inline void rwsem_set_owner(struct rw_semaphore *sem)
17{ 17{
18 sem->owner = current; 18 sem->owner = current;
diff --git a/kernel/power/process.c b/kernel/power/process.c
index 0ca8d83e2369..4ee194eb524b 100644
--- a/kernel/power/process.c
+++ b/kernel/power/process.c
@@ -186,6 +186,7 @@ void thaw_processes(void)
186 186
187 printk("Restarting tasks ... "); 187 printk("Restarting tasks ... ");
188 188
189 __usermodehelper_set_disable_depth(UMH_FREEZING);
189 thaw_workqueues(); 190 thaw_workqueues();
190 191
191 read_lock(&tasklist_lock); 192 read_lock(&tasklist_lock);
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index 4dd8822f732a..ed35a4790afe 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -306,7 +306,7 @@ int suspend_devices_and_enter(suspend_state_t state)
306 error = suspend_ops->begin(state); 306 error = suspend_ops->begin(state);
307 if (error) 307 if (error)
308 goto Close; 308 goto Close;
309 } else if (state == PM_SUSPEND_FREEZE && freeze_ops->begin) { 309 } else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->begin) {
310 error = freeze_ops->begin(); 310 error = freeze_ops->begin();
311 if (error) 311 if (error)
312 goto Close; 312 goto Close;
@@ -335,7 +335,7 @@ int suspend_devices_and_enter(suspend_state_t state)
335 Close: 335 Close:
336 if (need_suspend_ops(state) && suspend_ops->end) 336 if (need_suspend_ops(state) && suspend_ops->end)
337 suspend_ops->end(); 337 suspend_ops->end();
338 else if (state == PM_SUSPEND_FREEZE && freeze_ops->end) 338 else if (state == PM_SUSPEND_FREEZE && freeze_ops && freeze_ops->end)
339 freeze_ops->end(); 339 freeze_ops->end();
340 340
341 return error; 341 return error;
diff --git a/kernel/printk/printk.c b/kernel/printk/printk.c
index ea2d5f6962ed..13e839dbca07 100644
--- a/kernel/printk/printk.c
+++ b/kernel/printk/printk.c
@@ -1416,9 +1416,10 @@ static int have_callable_console(void)
1416/* 1416/*
1417 * Can we actually use the console at this time on this cpu? 1417 * Can we actually use the console at this time on this cpu?
1418 * 1418 *
1419 * Console drivers may assume that per-cpu resources have been allocated. So 1419 * Console drivers may assume that per-cpu resources have
1420 * unless they're explicitly marked as being able to cope (CON_ANYTIME) don't 1420 * been allocated. So unless they're explicitly marked as
1421 * call them until this CPU is officially up. 1421 * being able to cope (CON_ANYTIME) don't call them until
1422 * this CPU is officially up.
1422 */ 1423 */
1423static inline int can_use_console(unsigned int cpu) 1424static inline int can_use_console(unsigned int cpu)
1424{ 1425{
@@ -1431,10 +1432,8 @@ static inline int can_use_console(unsigned int cpu)
1431 * console_lock held, and 'console_locked' set) if it 1432 * console_lock held, and 'console_locked' set) if it
1432 * is successful, false otherwise. 1433 * is successful, false otherwise.
1433 */ 1434 */
1434static int console_trylock_for_printk(void) 1435static int console_trylock_for_printk(unsigned int cpu)
1435{ 1436{
1436 unsigned int cpu = smp_processor_id();
1437
1438 if (!console_trylock()) 1437 if (!console_trylock())
1439 return 0; 1438 return 0;
1440 /* 1439 /*
@@ -1609,8 +1608,7 @@ asmlinkage int vprintk_emit(int facility, int level,
1609 */ 1608 */
1610 if (!oops_in_progress && !lockdep_recursing(current)) { 1609 if (!oops_in_progress && !lockdep_recursing(current)) {
1611 recursion_bug = 1; 1610 recursion_bug = 1;
1612 local_irq_restore(flags); 1611 goto out_restore_irqs;
1613 return 0;
1614 } 1612 }
1615 zap_locks(); 1613 zap_locks();
1616 } 1614 }
@@ -1718,27 +1716,21 @@ asmlinkage int vprintk_emit(int facility, int level,
1718 1716
1719 logbuf_cpu = UINT_MAX; 1717 logbuf_cpu = UINT_MAX;
1720 raw_spin_unlock(&logbuf_lock); 1718 raw_spin_unlock(&logbuf_lock);
1721 lockdep_on();
1722 local_irq_restore(flags);
1723 1719
1724 /* If called from the scheduler, we can not call up(). */ 1720 /* If called from the scheduler, we can not call up(). */
1725 if (in_sched) 1721 if (!in_sched) {
1726 return printed_len; 1722 /*
1727 1723 * Try to acquire and then immediately release the console
1728 /* 1724 * semaphore. The release will print out buffers and wake up
1729 * Disable preemption to avoid being preempted while holding 1725 * /dev/kmsg and syslog() users.
1730 * console_sem which would prevent anyone from printing to console 1726 */
1731 */ 1727 if (console_trylock_for_printk(this_cpu))
1732 preempt_disable(); 1728 console_unlock();
1733 /* 1729 }
1734 * Try to acquire and then immediately release the console semaphore.
1735 * The release will print out buffers and wake up /dev/kmsg and syslog()
1736 * users.
1737 */
1738 if (console_trylock_for_printk())
1739 console_unlock();
1740 preempt_enable();
1741 1730
1731 lockdep_on();
1732out_restore_irqs:
1733 local_irq_restore(flags);
1742 return printed_len; 1734 return printed_len;
1743} 1735}
1744EXPORT_SYMBOL(vprintk_emit); 1736EXPORT_SYMBOL(vprintk_emit);
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index f1ba77363fbb..625d0b0cd75a 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -206,6 +206,70 @@ void rcu_bh_qs(int cpu)
206 rdp->passed_quiesce = 1; 206 rdp->passed_quiesce = 1;
207} 207}
208 208
209static DEFINE_PER_CPU(int, rcu_sched_qs_mask);
210
211static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
212 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
213 .dynticks = ATOMIC_INIT(1),
214#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
215 .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
216 .dynticks_idle = ATOMIC_INIT(1),
217#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
218};
219
220/*
221 * Let the RCU core know that this CPU has gone through the scheduler,
222 * which is a quiescent state. This is called when the need for a
223 * quiescent state is urgent, so we burn an atomic operation and full
224 * memory barriers to let the RCU core know about it, regardless of what
225 * this CPU might (or might not) do in the near future.
226 *
227 * We inform the RCU core by emulating a zero-duration dyntick-idle
228 * period, which we in turn do by incrementing the ->dynticks counter
229 * by two.
230 */
231static void rcu_momentary_dyntick_idle(void)
232{
233 unsigned long flags;
234 struct rcu_data *rdp;
235 struct rcu_dynticks *rdtp;
236 int resched_mask;
237 struct rcu_state *rsp;
238
239 local_irq_save(flags);
240
241 /*
242 * Yes, we can lose flag-setting operations. This is OK, because
243 * the flag will be set again after some delay.
244 */
245 resched_mask = raw_cpu_read(rcu_sched_qs_mask);
246 raw_cpu_write(rcu_sched_qs_mask, 0);
247
248 /* Find the flavor that needs a quiescent state. */
249 for_each_rcu_flavor(rsp) {
250 rdp = raw_cpu_ptr(rsp->rda);
251 if (!(resched_mask & rsp->flavor_mask))
252 continue;
253 smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */
254 if (ACCESS_ONCE(rdp->mynode->completed) !=
255 ACCESS_ONCE(rdp->cond_resched_completed))
256 continue;
257
258 /*
259 * Pretend to be momentarily idle for the quiescent state.
260 * This allows the grace-period kthread to record the
261 * quiescent state, with no need for this CPU to do anything
262 * further.
263 */
264 rdtp = this_cpu_ptr(&rcu_dynticks);
265 smp_mb__before_atomic(); /* Earlier stuff before QS. */
266 atomic_add(2, &rdtp->dynticks); /* QS. */
267 smp_mb__after_atomic(); /* Later stuff after QS. */
268 break;
269 }
270 local_irq_restore(flags);
271}
272
209/* 273/*
210 * Note a context switch. This is a quiescent state for RCU-sched, 274 * Note a context switch. This is a quiescent state for RCU-sched,
211 * and requires special handling for preemptible RCU. 275 * and requires special handling for preemptible RCU.
@@ -216,19 +280,12 @@ void rcu_note_context_switch(int cpu)
216 trace_rcu_utilization(TPS("Start context switch")); 280 trace_rcu_utilization(TPS("Start context switch"));
217 rcu_sched_qs(cpu); 281 rcu_sched_qs(cpu);
218 rcu_preempt_note_context_switch(cpu); 282 rcu_preempt_note_context_switch(cpu);
283 if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
284 rcu_momentary_dyntick_idle();
219 trace_rcu_utilization(TPS("End context switch")); 285 trace_rcu_utilization(TPS("End context switch"));
220} 286}
221EXPORT_SYMBOL_GPL(rcu_note_context_switch); 287EXPORT_SYMBOL_GPL(rcu_note_context_switch);
222 288
223static DEFINE_PER_CPU(struct rcu_dynticks, rcu_dynticks) = {
224 .dynticks_nesting = DYNTICK_TASK_EXIT_IDLE,
225 .dynticks = ATOMIC_INIT(1),
226#ifdef CONFIG_NO_HZ_FULL_SYSIDLE
227 .dynticks_idle_nesting = DYNTICK_TASK_NEST_VALUE,
228 .dynticks_idle = ATOMIC_INIT(1),
229#endif /* #ifdef CONFIG_NO_HZ_FULL_SYSIDLE */
230};
231
232static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */ 289static long blimit = 10; /* Maximum callbacks per rcu_do_batch. */
233static long qhimark = 10000; /* If this many pending, ignore blimit. */ 290static long qhimark = 10000; /* If this many pending, ignore blimit. */
234static long qlowmark = 100; /* Once only this many pending, use blimit. */ 291static long qlowmark = 100; /* Once only this many pending, use blimit. */
@@ -243,6 +300,13 @@ static ulong jiffies_till_next_fqs = ULONG_MAX;
243module_param(jiffies_till_first_fqs, ulong, 0644); 300module_param(jiffies_till_first_fqs, ulong, 0644);
244module_param(jiffies_till_next_fqs, ulong, 0644); 301module_param(jiffies_till_next_fqs, ulong, 0644);
245 302
303/*
304 * How long the grace period must be before we start recruiting
305 * quiescent-state help from rcu_note_context_switch().
306 */
307static ulong jiffies_till_sched_qs = HZ / 20;
308module_param(jiffies_till_sched_qs, ulong, 0644);
309
246static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp, 310static bool rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
247 struct rcu_data *rdp); 311 struct rcu_data *rdp);
248static void force_qs_rnp(struct rcu_state *rsp, 312static void force_qs_rnp(struct rcu_state *rsp,
@@ -853,6 +917,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
853 bool *isidle, unsigned long *maxj) 917 bool *isidle, unsigned long *maxj)
854{ 918{
855 unsigned int curr; 919 unsigned int curr;
920 int *rcrmp;
856 unsigned int snap; 921 unsigned int snap;
857 922
858 curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks); 923 curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
@@ -893,27 +958,43 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
893 } 958 }
894 959
895 /* 960 /*
896 * There is a possibility that a CPU in adaptive-ticks state 961 * A CPU running for an extended time within the kernel can
897 * might run in the kernel with the scheduling-clock tick disabled 962 * delay RCU grace periods. When the CPU is in NO_HZ_FULL mode,
898 * for an extended time period. Invoke rcu_kick_nohz_cpu() to 963 * even context-switching back and forth between a pair of
899 * force the CPU to restart the scheduling-clock tick in this 964 * in-kernel CPU-bound tasks cannot advance grace periods.
900 * CPU is in this state. 965 * So if the grace period is old enough, make the CPU pay attention.
901 */ 966 * Note that the unsynchronized assignments to the per-CPU
902 rcu_kick_nohz_cpu(rdp->cpu); 967 * rcu_sched_qs_mask variable are safe. Yes, setting of
903 968 * bits can be lost, but they will be set again on the next
904 /* 969 * force-quiescent-state pass. So lost bit sets do not result
905 * Alternatively, the CPU might be running in the kernel 970 * in incorrect behavior, merely in a grace period lasting
906 * for an extended period of time without a quiescent state. 971 * a few jiffies longer than it might otherwise. Because
907 * Attempt to force the CPU through the scheduler to gain the 972 * there are at most four threads involved, and because the
908 * needed quiescent state, but only if the grace period has gone 973 * updates are only once every few jiffies, the probability of
909 * on for an uncommonly long time. If there are many stuck CPUs, 974 * lossage (and thus of slight grace-period extension) is
910 * we will beat on the first one until it gets unstuck, then move 975 * quite low.
911 * to the next. Only do this for the primary flavor of RCU. 976 *
977 * Note that if the jiffies_till_sched_qs boot/sysfs parameter
978 * is set too high, we override with half of the RCU CPU stall
979 * warning delay.
912 */ 980 */
913 if (rdp->rsp == rcu_state_p && 981 rcrmp = &per_cpu(rcu_sched_qs_mask, rdp->cpu);
982 if (ULONG_CMP_GE(jiffies,
983 rdp->rsp->gp_start + jiffies_till_sched_qs) ||
914 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) { 984 ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
915 rdp->rsp->jiffies_resched += 5; 985 if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
916 resched_cpu(rdp->cpu); 986 ACCESS_ONCE(rdp->cond_resched_completed) =
987 ACCESS_ONCE(rdp->mynode->completed);
988 smp_mb(); /* ->cond_resched_completed before *rcrmp. */
989 ACCESS_ONCE(*rcrmp) =
990 ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
991 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
992 rdp->rsp->jiffies_resched += 5; /* Enable beating. */
993 } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
994 /* Time to beat on that CPU again! */
995 resched_cpu(rdp->cpu); /* Force CPU into scheduler. */
996 rdp->rsp->jiffies_resched += 5; /* Re-enable beating. */
997 }
917 } 998 }
918 999
919 return 0; 1000 return 0;
@@ -3491,6 +3572,7 @@ static void __init rcu_init_one(struct rcu_state *rsp,
3491 "rcu_node_fqs_1", 3572 "rcu_node_fqs_1",
3492 "rcu_node_fqs_2", 3573 "rcu_node_fqs_2",
3493 "rcu_node_fqs_3" }; /* Match MAX_RCU_LVLS */ 3574 "rcu_node_fqs_3" }; /* Match MAX_RCU_LVLS */
3575 static u8 fl_mask = 0x1;
3494 int cpustride = 1; 3576 int cpustride = 1;
3495 int i; 3577 int i;
3496 int j; 3578 int j;
@@ -3509,6 +3591,8 @@ static void __init rcu_init_one(struct rcu_state *rsp,
3509 for (i = 1; i < rcu_num_lvls; i++) 3591 for (i = 1; i < rcu_num_lvls; i++)
3510 rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1]; 3592 rsp->level[i] = rsp->level[i - 1] + rsp->levelcnt[i - 1];
3511 rcu_init_levelspread(rsp); 3593 rcu_init_levelspread(rsp);
3594 rsp->flavor_mask = fl_mask;
3595 fl_mask <<= 1;
3512 3596
3513 /* Initialize the elements themselves, starting from the leaves. */ 3597 /* Initialize the elements themselves, starting from the leaves. */
3514 3598
diff --git a/kernel/rcu/tree.h b/kernel/rcu/tree.h
index bf2c1e669691..0f69a79c5b7d 100644
--- a/kernel/rcu/tree.h
+++ b/kernel/rcu/tree.h
@@ -307,6 +307,9 @@ struct rcu_data {
307 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ 307 /* 4) reasons this CPU needed to be kicked by force_quiescent_state */
308 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ 308 unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */
309 unsigned long offline_fqs; /* Kicked due to being offline. */ 309 unsigned long offline_fqs; /* Kicked due to being offline. */
310 unsigned long cond_resched_completed;
311 /* Grace period that needs help */
312 /* from cond_resched(). */
310 313
311 /* 5) __rcu_pending() statistics. */ 314 /* 5) __rcu_pending() statistics. */
312 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */ 315 unsigned long n_rcu_pending; /* rcu_pending() calls since boot. */
@@ -392,6 +395,7 @@ struct rcu_state {
392 struct rcu_node *level[RCU_NUM_LVLS]; /* Hierarchy levels. */ 395 struct rcu_node *level[RCU_NUM_LVLS]; /* Hierarchy levels. */
393 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ 396 u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */
394 u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */ 397 u8 levelspread[RCU_NUM_LVLS]; /* kids/node in each level. */
398 u8 flavor_mask; /* bit in flavor mask. */
395 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */ 399 struct rcu_data __percpu *rda; /* pointer of percu rcu_data. */
396 void (*call)(struct rcu_head *head, /* call_rcu() flavor. */ 400 void (*call)(struct rcu_head *head, /* call_rcu() flavor. */
397 void (*func)(struct rcu_head *head)); 401 void (*func)(struct rcu_head *head));
@@ -563,7 +567,7 @@ static bool rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp);
563static void do_nocb_deferred_wakeup(struct rcu_data *rdp); 567static void do_nocb_deferred_wakeup(struct rcu_data *rdp);
564static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp); 568static void rcu_boot_init_nocb_percpu_data(struct rcu_data *rdp);
565static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp); 569static void rcu_spawn_nocb_kthreads(struct rcu_state *rsp);
566static void rcu_kick_nohz_cpu(int cpu); 570static void __maybe_unused rcu_kick_nohz_cpu(int cpu);
567static bool init_nocb_callback_list(struct rcu_data *rdp); 571static bool init_nocb_callback_list(struct rcu_data *rdp);
568static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq); 572static void rcu_sysidle_enter(struct rcu_dynticks *rdtp, int irq);
569static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq); 573static void rcu_sysidle_exit(struct rcu_dynticks *rdtp, int irq);
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index cbc2c45265e2..02ac0fb186b8 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -2404,7 +2404,7 @@ static bool init_nocb_callback_list(struct rcu_data *rdp)
2404 * if an adaptive-ticks CPU is failing to respond to the current grace 2404 * if an adaptive-ticks CPU is failing to respond to the current grace
2405 * period and has not be idle from an RCU perspective, kick it. 2405 * period and has not be idle from an RCU perspective, kick it.
2406 */ 2406 */
2407static void rcu_kick_nohz_cpu(int cpu) 2407static void __maybe_unused rcu_kick_nohz_cpu(int cpu)
2408{ 2408{
2409#ifdef CONFIG_NO_HZ_FULL 2409#ifdef CONFIG_NO_HZ_FULL
2410 if (tick_nohz_full_cpu(cpu)) 2410 if (tick_nohz_full_cpu(cpu))
diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
index a2aeb4df0f60..bc7883570530 100644
--- a/kernel/rcu/update.c
+++ b/kernel/rcu/update.c
@@ -200,12 +200,12 @@ void wait_rcu_gp(call_rcu_func_t crf)
200EXPORT_SYMBOL_GPL(wait_rcu_gp); 200EXPORT_SYMBOL_GPL(wait_rcu_gp);
201 201
202#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD 202#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
203static inline void debug_init_rcu_head(struct rcu_head *head) 203void init_rcu_head(struct rcu_head *head)
204{ 204{
205 debug_object_init(head, &rcuhead_debug_descr); 205 debug_object_init(head, &rcuhead_debug_descr);
206} 206}
207 207
208static inline void debug_rcu_head_free(struct rcu_head *head) 208void destroy_rcu_head(struct rcu_head *head)
209{ 209{
210 debug_object_free(head, &rcuhead_debug_descr); 210 debug_object_free(head, &rcuhead_debug_descr);
211} 211}
@@ -350,21 +350,3 @@ static int __init check_cpu_stall_init(void)
350early_initcall(check_cpu_stall_init); 350early_initcall(check_cpu_stall_init);
351 351
352#endif /* #ifdef CONFIG_RCU_STALL_COMMON */ 352#endif /* #ifdef CONFIG_RCU_STALL_COMMON */
353
354/*
355 * Hooks for cond_resched() and friends to avoid RCU CPU stall warnings.
356 */
357
358DEFINE_PER_CPU(int, rcu_cond_resched_count);
359
360/*
361 * Report a set of RCU quiescent states, for use by cond_resched()
362 * and friends. Out of line due to being called infrequently.
363 */
364void rcu_resched(void)
365{
366 preempt_disable();
367 __this_cpu_write(rcu_cond_resched_count, 0);
368 rcu_note_context_switch(smp_processor_id());
369 preempt_enable();
370}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 3bdf01b494fe..bc1638b33449 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4147,7 +4147,6 @@ static void __cond_resched(void)
4147 4147
4148int __sched _cond_resched(void) 4148int __sched _cond_resched(void)
4149{ 4149{
4150 rcu_cond_resched();
4151 if (should_resched()) { 4150 if (should_resched()) {
4152 __cond_resched(); 4151 __cond_resched();
4153 return 1; 4152 return 1;
@@ -4166,18 +4165,15 @@ EXPORT_SYMBOL(_cond_resched);
4166 */ 4165 */
4167int __cond_resched_lock(spinlock_t *lock) 4166int __cond_resched_lock(spinlock_t *lock)
4168{ 4167{
4169 bool need_rcu_resched = rcu_should_resched();
4170 int resched = should_resched(); 4168 int resched = should_resched();
4171 int ret = 0; 4169 int ret = 0;
4172 4170
4173 lockdep_assert_held(lock); 4171 lockdep_assert_held(lock);
4174 4172
4175 if (spin_needbreak(lock) || resched || need_rcu_resched) { 4173 if (spin_needbreak(lock) || resched) {
4176 spin_unlock(lock); 4174 spin_unlock(lock);
4177 if (resched) 4175 if (resched)
4178 __cond_resched(); 4176 __cond_resched();
4179 else if (unlikely(need_rcu_resched))
4180 rcu_resched();
4181 else 4177 else
4182 cpu_relax(); 4178 cpu_relax();
4183 ret = 1; 4179 ret = 1;
@@ -4191,7 +4187,6 @@ int __sched __cond_resched_softirq(void)
4191{ 4187{
4192 BUG_ON(!in_softirq()); 4188 BUG_ON(!in_softirq());
4193 4189
4194 rcu_cond_resched(); /* BH disabled OK, just recording QSes. */
4195 if (should_resched()) { 4190 if (should_resched()) {
4196 local_bh_enable(); 4191 local_bh_enable();
4197 __cond_resched(); 4192 __cond_resched();
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 695f9773bb60..627b3c34b821 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -608,7 +608,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
608 608
609 avg_atom = p->se.sum_exec_runtime; 609 avg_atom = p->se.sum_exec_runtime;
610 if (nr_switches) 610 if (nr_switches)
611 do_div(avg_atom, nr_switches); 611 avg_atom = div64_ul(avg_atom, nr_switches);
612 else 612 else
613 avg_atom = -1LL; 613 avg_atom = -1LL;
614 614
diff --git a/kernel/smp.c b/kernel/smp.c
index 306f8180b0d5..80c33f8de14f 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -29,6 +29,8 @@ static DEFINE_PER_CPU_SHARED_ALIGNED(struct call_function_data, cfd_data);
29 29
30static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue); 30static DEFINE_PER_CPU_SHARED_ALIGNED(struct llist_head, call_single_queue);
31 31
32static void flush_smp_call_function_queue(bool warn_cpu_offline);
33
32static int 34static int
33hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu) 35hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
34{ 36{
@@ -51,12 +53,27 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
51#ifdef CONFIG_HOTPLUG_CPU 53#ifdef CONFIG_HOTPLUG_CPU
52 case CPU_UP_CANCELED: 54 case CPU_UP_CANCELED:
53 case CPU_UP_CANCELED_FROZEN: 55 case CPU_UP_CANCELED_FROZEN:
56 /* Fall-through to the CPU_DEAD[_FROZEN] case. */
54 57
55 case CPU_DEAD: 58 case CPU_DEAD:
56 case CPU_DEAD_FROZEN: 59 case CPU_DEAD_FROZEN:
57 free_cpumask_var(cfd->cpumask); 60 free_cpumask_var(cfd->cpumask);
58 free_percpu(cfd->csd); 61 free_percpu(cfd->csd);
59 break; 62 break;
63
64 case CPU_DYING:
65 case CPU_DYING_FROZEN:
66 /*
67 * The IPIs for the smp-call-function callbacks queued by other
68 * CPUs might arrive late, either due to hardware latencies or
69 * because this CPU disabled interrupts (inside stop-machine)
70 * before the IPIs were sent. So flush out any pending callbacks
71 * explicitly (without waiting for the IPIs to arrive), to
72 * ensure that the outgoing CPU doesn't go offline with work
73 * still pending.
74 */
75 flush_smp_call_function_queue(false);
76 break;
60#endif 77#endif
61 }; 78 };
62 79
@@ -177,23 +194,47 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
177 return 0; 194 return 0;
178} 195}
179 196
180/* 197/**
181 * Invoked by arch to handle an IPI for call function single. Must be 198 * generic_smp_call_function_single_interrupt - Execute SMP IPI callbacks
182 * called from the arch with interrupts disabled. 199 *
200 * Invoked by arch to handle an IPI for call function single.
201 * Must be called with interrupts disabled.
183 */ 202 */
184void generic_smp_call_function_single_interrupt(void) 203void generic_smp_call_function_single_interrupt(void)
185{ 204{
205 flush_smp_call_function_queue(true);
206}
207
208/**
209 * flush_smp_call_function_queue - Flush pending smp-call-function callbacks
210 *
211 * @warn_cpu_offline: If set to 'true', warn if callbacks were queued on an
212 * offline CPU. Skip this check if set to 'false'.
213 *
214 * Flush any pending smp-call-function callbacks queued on this CPU. This is
215 * invoked by the generic IPI handler, as well as by a CPU about to go offline,
216 * to ensure that all pending IPI callbacks are run before it goes completely
217 * offline.
218 *
219 * Loop through the call_single_queue and run all the queued callbacks.
220 * Must be called with interrupts disabled.
221 */
222static void flush_smp_call_function_queue(bool warn_cpu_offline)
223{
224 struct llist_head *head;
186 struct llist_node *entry; 225 struct llist_node *entry;
187 struct call_single_data *csd, *csd_next; 226 struct call_single_data *csd, *csd_next;
188 static bool warned; 227 static bool warned;
189 228
190 entry = llist_del_all(&__get_cpu_var(call_single_queue)); 229 WARN_ON(!irqs_disabled());
230
231 head = &__get_cpu_var(call_single_queue);
232 entry = llist_del_all(head);
191 entry = llist_reverse_order(entry); 233 entry = llist_reverse_order(entry);
192 234
193 /* 235 /* There shouldn't be any pending callbacks on an offline CPU. */
194 * Shouldn't receive this interrupt on a cpu that is not yet online. 236 if (unlikely(warn_cpu_offline && !cpu_online(smp_processor_id()) &&
195 */ 237 !warned && !llist_empty(head))) {
196 if (unlikely(!cpu_online(smp_processor_id()) && !warned)) {
197 warned = true; 238 warned = true;
198 WARN(1, "IPI on offline CPU %d\n", smp_processor_id()); 239 WARN(1, "IPI on offline CPU %d\n", smp_processor_id());
199 240
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 7de6555cfea0..75b22e22a72c 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -136,7 +136,6 @@ static unsigned long dirty_bytes_min = 2 * PAGE_SIZE;
136/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ 136/* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */
137static int maxolduid = 65535; 137static int maxolduid = 65535;
138static int minolduid; 138static int minolduid;
139static int min_percpu_pagelist_fract = 8;
140 139
141static int ngroups_max = NGROUPS_MAX; 140static int ngroups_max = NGROUPS_MAX;
142static const int cap_last_cap = CAP_LAST_CAP; 141static const int cap_last_cap = CAP_LAST_CAP;
@@ -861,6 +860,17 @@ static struct ctl_table kern_table[] = {
861 .extra1 = &zero, 860 .extra1 = &zero,
862 .extra2 = &one, 861 .extra2 = &one,
863 }, 862 },
863#ifdef CONFIG_SMP
864 {
865 .procname = "softlockup_all_cpu_backtrace",
866 .data = &sysctl_softlockup_all_cpu_backtrace,
867 .maxlen = sizeof(int),
868 .mode = 0644,
869 .proc_handler = proc_dointvec_minmax,
870 .extra1 = &zero,
871 .extra2 = &one,
872 },
873#endif /* CONFIG_SMP */
864 { 874 {
865 .procname = "nmi_watchdog", 875 .procname = "nmi_watchdog",
866 .data = &watchdog_user_enabled, 876 .data = &watchdog_user_enabled,
@@ -1317,7 +1327,7 @@ static struct ctl_table vm_table[] = {
1317 .maxlen = sizeof(percpu_pagelist_fraction), 1327 .maxlen = sizeof(percpu_pagelist_fraction),
1318 .mode = 0644, 1328 .mode = 0644,
1319 .proc_handler = percpu_pagelist_fraction_sysctl_handler, 1329 .proc_handler = percpu_pagelist_fraction_sysctl_handler,
1320 .extra1 = &min_percpu_pagelist_fract, 1330 .extra1 = &zero,
1321 }, 1331 },
1322#ifdef CONFIG_MMU 1332#ifdef CONFIG_MMU
1323 { 1333 {
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 88c9c65a430d..fe75444ae7ec 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -585,9 +585,14 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
585 struct itimerspec *new_setting, 585 struct itimerspec *new_setting,
586 struct itimerspec *old_setting) 586 struct itimerspec *old_setting)
587{ 587{
588 ktime_t exp;
589
588 if (!rtcdev) 590 if (!rtcdev)
589 return -ENOTSUPP; 591 return -ENOTSUPP;
590 592
593 if (flags & ~TIMER_ABSTIME)
594 return -EINVAL;
595
591 if (old_setting) 596 if (old_setting)
592 alarm_timer_get(timr, old_setting); 597 alarm_timer_get(timr, old_setting);
593 598
@@ -597,8 +602,16 @@ static int alarm_timer_set(struct k_itimer *timr, int flags,
597 602
598 /* start the timer */ 603 /* start the timer */
599 timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval); 604 timr->it.alarm.interval = timespec_to_ktime(new_setting->it_interval);
600 alarm_start(&timr->it.alarm.alarmtimer, 605 exp = timespec_to_ktime(new_setting->it_value);
601 timespec_to_ktime(new_setting->it_value)); 606 /* Convert (if necessary) to absolute time */
607 if (flags != TIMER_ABSTIME) {
608 ktime_t now;
609
610 now = alarm_bases[timr->it.alarm.alarmtimer.type].gettime();
611 exp = ktime_add(now, exp);
612 }
613
614 alarm_start(&timr->it.alarm.alarmtimer, exp);
602 return 0; 615 return 0;
603} 616}
604 617
@@ -730,6 +743,9 @@ static int alarm_timer_nsleep(const clockid_t which_clock, int flags,
730 if (!alarmtimer_get_rtcdev()) 743 if (!alarmtimer_get_rtcdev())
731 return -ENOTSUPP; 744 return -ENOTSUPP;
732 745
746 if (flags & ~TIMER_ABSTIME)
747 return -EINVAL;
748
733 if (!capable(CAP_WAKE_ALARM)) 749 if (!capable(CAP_WAKE_ALARM))
734 return -EPERM; 750 return -EPERM;
735 751
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 5b372e3ed675..ac9d1dad630b 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -265,12 +265,12 @@ static void update_ftrace_function(void)
265 func = ftrace_ops_list_func; 265 func = ftrace_ops_list_func;
266 } 266 }
267 267
268 update_function_graph_func();
269
268 /* If there's no change, then do nothing more here */ 270 /* If there's no change, then do nothing more here */
269 if (ftrace_trace_function == func) 271 if (ftrace_trace_function == func)
270 return; 272 return;
271 273
272 update_function_graph_func();
273
274 /* 274 /*
275 * If we are using the list function, it doesn't care 275 * If we are using the list function, it doesn't care
276 * about the function_trace_ops. 276 * about the function_trace_ops.
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index 7c56c3d06943..ff7027199a9a 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -616,10 +616,6 @@ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu,
616 struct ring_buffer_per_cpu *cpu_buffer; 616 struct ring_buffer_per_cpu *cpu_buffer;
617 struct rb_irq_work *work; 617 struct rb_irq_work *work;
618 618
619 if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) ||
620 (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu)))
621 return POLLIN | POLLRDNORM;
622
623 if (cpu == RING_BUFFER_ALL_CPUS) 619 if (cpu == RING_BUFFER_ALL_CPUS)
624 work = &buffer->irq_work; 620 work = &buffer->irq_work;
625 else { 621 else {
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 384ede311717..bda9621638cc 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -466,6 +466,12 @@ int __trace_puts(unsigned long ip, const char *str, int size)
466 struct print_entry *entry; 466 struct print_entry *entry;
467 unsigned long irq_flags; 467 unsigned long irq_flags;
468 int alloc; 468 int alloc;
469 int pc;
470
471 if (!(trace_flags & TRACE_ITER_PRINTK))
472 return 0;
473
474 pc = preempt_count();
469 475
470 if (unlikely(tracing_selftest_running || tracing_disabled)) 476 if (unlikely(tracing_selftest_running || tracing_disabled))
471 return 0; 477 return 0;
@@ -475,7 +481,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
475 local_save_flags(irq_flags); 481 local_save_flags(irq_flags);
476 buffer = global_trace.trace_buffer.buffer; 482 buffer = global_trace.trace_buffer.buffer;
477 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc, 483 event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, alloc,
478 irq_flags, preempt_count()); 484 irq_flags, pc);
479 if (!event) 485 if (!event)
480 return 0; 486 return 0;
481 487
@@ -492,6 +498,7 @@ int __trace_puts(unsigned long ip, const char *str, int size)
492 entry->buf[size] = '\0'; 498 entry->buf[size] = '\0';
493 499
494 __buffer_unlock_commit(buffer, event); 500 __buffer_unlock_commit(buffer, event);
501 ftrace_trace_stack(buffer, irq_flags, 4, pc);
495 502
496 return size; 503 return size;
497} 504}
@@ -509,6 +516,12 @@ int __trace_bputs(unsigned long ip, const char *str)
509 struct bputs_entry *entry; 516 struct bputs_entry *entry;
510 unsigned long irq_flags; 517 unsigned long irq_flags;
511 int size = sizeof(struct bputs_entry); 518 int size = sizeof(struct bputs_entry);
519 int pc;
520
521 if (!(trace_flags & TRACE_ITER_PRINTK))
522 return 0;
523
524 pc = preempt_count();
512 525
513 if (unlikely(tracing_selftest_running || tracing_disabled)) 526 if (unlikely(tracing_selftest_running || tracing_disabled))
514 return 0; 527 return 0;
@@ -516,7 +529,7 @@ int __trace_bputs(unsigned long ip, const char *str)
516 local_save_flags(irq_flags); 529 local_save_flags(irq_flags);
517 buffer = global_trace.trace_buffer.buffer; 530 buffer = global_trace.trace_buffer.buffer;
518 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size, 531 event = trace_buffer_lock_reserve(buffer, TRACE_BPUTS, size,
519 irq_flags, preempt_count()); 532 irq_flags, pc);
520 if (!event) 533 if (!event)
521 return 0; 534 return 0;
522 535
@@ -525,6 +538,7 @@ int __trace_bputs(unsigned long ip, const char *str)
525 entry->str = str; 538 entry->str = str;
526 539
527 __buffer_unlock_commit(buffer, event); 540 __buffer_unlock_commit(buffer, event);
541 ftrace_trace_stack(buffer, irq_flags, 4, pc);
528 542
529 return 1; 543 return 1;
530} 544}
@@ -1396,7 +1410,6 @@ void tracing_start(void)
1396 1410
1397 arch_spin_unlock(&global_trace.max_lock); 1411 arch_spin_unlock(&global_trace.max_lock);
1398 1412
1399 ftrace_start();
1400 out: 1413 out:
1401 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags); 1414 raw_spin_unlock_irqrestore(&global_trace.start_lock, flags);
1402} 1415}
@@ -1443,7 +1456,6 @@ void tracing_stop(void)
1443 struct ring_buffer *buffer; 1456 struct ring_buffer *buffer;
1444 unsigned long flags; 1457 unsigned long flags;
1445 1458
1446 ftrace_stop();
1447 raw_spin_lock_irqsave(&global_trace.start_lock, flags); 1459 raw_spin_lock_irqsave(&global_trace.start_lock, flags);
1448 if (global_trace.stop_count++) 1460 if (global_trace.stop_count++)
1449 goto out; 1461 goto out;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f99e0b3bca8c..2de53628689f 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -470,6 +470,7 @@ static void remove_event_file_dir(struct ftrace_event_file *file)
470 470
471 list_del(&file->list); 471 list_del(&file->list);
472 remove_subsystem(file->system); 472 remove_subsystem(file->system);
473 free_event_filter(file->filter);
473 kmem_cache_free(file_cachep, file); 474 kmem_cache_free(file_cachep, file);
474} 475}
475 476
diff --git a/kernel/trace/trace_uprobe.c b/kernel/trace/trace_uprobe.c
index 04fdb5de823c..3c9b97e6b1f4 100644
--- a/kernel/trace/trace_uprobe.c
+++ b/kernel/trace/trace_uprobe.c
@@ -893,6 +893,9 @@ probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file,
893 int ret; 893 int ret;
894 894
895 if (file) { 895 if (file) {
896 if (tu->tp.flags & TP_FLAG_PROFILE)
897 return -EINTR;
898
896 link = kmalloc(sizeof(*link), GFP_KERNEL); 899 link = kmalloc(sizeof(*link), GFP_KERNEL);
897 if (!link) 900 if (!link)
898 return -ENOMEM; 901 return -ENOMEM;
@@ -901,29 +904,40 @@ probe_event_enable(struct trace_uprobe *tu, struct ftrace_event_file *file,
901 list_add_tail_rcu(&link->list, &tu->tp.files); 904 list_add_tail_rcu(&link->list, &tu->tp.files);
902 905
903 tu->tp.flags |= TP_FLAG_TRACE; 906 tu->tp.flags |= TP_FLAG_TRACE;
904 } else 907 } else {
905 tu->tp.flags |= TP_FLAG_PROFILE; 908 if (tu->tp.flags & TP_FLAG_TRACE)
909 return -EINTR;
906 910
907 ret = uprobe_buffer_enable(); 911 tu->tp.flags |= TP_FLAG_PROFILE;
908 if (ret < 0) 912 }
909 return ret;
910 913
911 WARN_ON(!uprobe_filter_is_empty(&tu->filter)); 914 WARN_ON(!uprobe_filter_is_empty(&tu->filter));
912 915
913 if (enabled) 916 if (enabled)
914 return 0; 917 return 0;
915 918
919 ret = uprobe_buffer_enable();
920 if (ret)
921 goto err_flags;
922
916 tu->consumer.filter = filter; 923 tu->consumer.filter = filter;
917 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer); 924 ret = uprobe_register(tu->inode, tu->offset, &tu->consumer);
918 if (ret) { 925 if (ret)
919 if (file) { 926 goto err_buffer;
920 list_del(&link->list);
921 kfree(link);
922 tu->tp.flags &= ~TP_FLAG_TRACE;
923 } else
924 tu->tp.flags &= ~TP_FLAG_PROFILE;
925 }
926 927
928 return 0;
929
930 err_buffer:
931 uprobe_buffer_disable();
932
933 err_flags:
934 if (file) {
935 list_del(&link->list);
936 kfree(link);
937 tu->tp.flags &= ~TP_FLAG_TRACE;
938 } else {
939 tu->tp.flags &= ~TP_FLAG_PROFILE;
940 }
927 return ret; 941 return ret;
928} 942}
929 943
@@ -1201,12 +1215,6 @@ static int uprobe_dispatcher(struct uprobe_consumer *con, struct pt_regs *regs)
1201 1215
1202 current->utask->vaddr = (unsigned long) &udd; 1216 current->utask->vaddr = (unsigned long) &udd;
1203 1217
1204#ifdef CONFIG_PERF_EVENTS
1205 if ((tu->tp.flags & TP_FLAG_TRACE) == 0 &&
1206 !uprobe_perf_filter(&tu->consumer, 0, current->mm))
1207 return UPROBE_HANDLER_REMOVE;
1208#endif
1209
1210 if (WARN_ON_ONCE(!uprobe_cpu_buffer)) 1218 if (WARN_ON_ONCE(!uprobe_cpu_buffer))
1211 return 0; 1219 return 0;
1212 1220
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 516203e665fc..c3319bd1b040 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -31,6 +31,12 @@
31 31
32int watchdog_user_enabled = 1; 32int watchdog_user_enabled = 1;
33int __read_mostly watchdog_thresh = 10; 33int __read_mostly watchdog_thresh = 10;
34#ifdef CONFIG_SMP
35int __read_mostly sysctl_softlockup_all_cpu_backtrace;
36#else
37#define sysctl_softlockup_all_cpu_backtrace 0
38#endif
39
34static int __read_mostly watchdog_running; 40static int __read_mostly watchdog_running;
35static u64 __read_mostly sample_period; 41static u64 __read_mostly sample_period;
36 42
@@ -47,6 +53,7 @@ static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
47static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved); 53static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
48static DEFINE_PER_CPU(struct perf_event *, watchdog_ev); 54static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
49#endif 55#endif
56static unsigned long soft_lockup_nmi_warn;
50 57
51/* boot commands */ 58/* boot commands */
52/* 59/*
@@ -95,6 +102,15 @@ static int __init nosoftlockup_setup(char *str)
95} 102}
96__setup("nosoftlockup", nosoftlockup_setup); 103__setup("nosoftlockup", nosoftlockup_setup);
97/* */ 104/* */
105#ifdef CONFIG_SMP
106static int __init softlockup_all_cpu_backtrace_setup(char *str)
107{
108 sysctl_softlockup_all_cpu_backtrace =
109 !!simple_strtol(str, NULL, 0);
110 return 1;
111}
112__setup("softlockup_all_cpu_backtrace=", softlockup_all_cpu_backtrace_setup);
113#endif
98 114
99/* 115/*
100 * Hard-lockup warnings should be triggered after just a few seconds. Soft- 116 * Hard-lockup warnings should be triggered after just a few seconds. Soft-
@@ -271,6 +287,7 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
271 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts); 287 unsigned long touch_ts = __this_cpu_read(watchdog_touch_ts);
272 struct pt_regs *regs = get_irq_regs(); 288 struct pt_regs *regs = get_irq_regs();
273 int duration; 289 int duration;
290 int softlockup_all_cpu_backtrace = sysctl_softlockup_all_cpu_backtrace;
274 291
275 /* kick the hardlockup detector */ 292 /* kick the hardlockup detector */
276 watchdog_interrupt_count(); 293 watchdog_interrupt_count();
@@ -317,6 +334,17 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
317 if (__this_cpu_read(soft_watchdog_warn) == true) 334 if (__this_cpu_read(soft_watchdog_warn) == true)
318 return HRTIMER_RESTART; 335 return HRTIMER_RESTART;
319 336
337 if (softlockup_all_cpu_backtrace) {
338 /* Prevent multiple soft-lockup reports if one cpu is already
339 * engaged in dumping cpu back traces
340 */
341 if (test_and_set_bit(0, &soft_lockup_nmi_warn)) {
342 /* Someone else will report us. Let's give up */
343 __this_cpu_write(soft_watchdog_warn, true);
344 return HRTIMER_RESTART;
345 }
346 }
347
320 printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n", 348 printk(KERN_EMERG "BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
321 smp_processor_id(), duration, 349 smp_processor_id(), duration,
322 current->comm, task_pid_nr(current)); 350 current->comm, task_pid_nr(current));
@@ -327,6 +355,17 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
327 else 355 else
328 dump_stack(); 356 dump_stack();
329 357
358 if (softlockup_all_cpu_backtrace) {
359 /* Avoid generating two back traces for current
360 * given that one is already made above
361 */
362 trigger_allbutself_cpu_backtrace();
363
364 clear_bit(0, &soft_lockup_nmi_warn);
365 /* Barrier to sync with other cpus */
366 smp_mb__after_atomic();
367 }
368
330 if (softlockup_panic) 369 if (softlockup_panic)
331 panic("softlockup: hung tasks"); 370 panic("softlockup: hung tasks");
332 __this_cpu_write(soft_watchdog_warn, true); 371 __this_cpu_write(soft_watchdog_warn, true);
@@ -527,10 +566,8 @@ static void update_timers_all_cpus(void)
527 int cpu; 566 int cpu;
528 567
529 get_online_cpus(); 568 get_online_cpus();
530 preempt_disable();
531 for_each_online_cpu(cpu) 569 for_each_online_cpu(cpu)
532 update_timers(cpu); 570 update_timers(cpu);
533 preempt_enable();
534 put_online_cpus(); 571 put_online_cpus();
535} 572}
536 573
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 6203d2900877..35974ac69600 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3284,6 +3284,7 @@ int workqueue_sysfs_register(struct workqueue_struct *wq)
3284 } 3284 }
3285 } 3285 }
3286 3286
3287 dev_set_uevent_suppress(&wq_dev->dev, false);
3287 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD); 3288 kobject_uevent(&wq_dev->dev.kobj, KOBJ_ADD);
3288 return 0; 3289 return 0;
3289} 3290}
@@ -4879,7 +4880,7 @@ static void __init wq_numa_init(void)
4879 BUG_ON(!tbl); 4880 BUG_ON(!tbl);
4880 4881
4881 for_each_node(node) 4882 for_each_node(node)
4882 BUG_ON(!alloc_cpumask_var_node(&tbl[node], GFP_KERNEL, 4883 BUG_ON(!zalloc_cpumask_var_node(&tbl[node], GFP_KERNEL,
4883 node_online(node) ? node : NUMA_NO_NODE)); 4884 node_online(node) ? node : NUMA_NO_NODE));
4884 4885
4885 for_each_possible_cpu(cpu) { 4886 for_each_possible_cpu(cpu) {
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index 7cfcc1b8e101..7a638aa3545b 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -930,7 +930,7 @@ config LOCKDEP
930 bool 930 bool
931 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT 931 depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
932 select STACKTRACE 932 select STACKTRACE
933 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC 933 select FRAME_POINTER if !MIPS && !PPC && !ARM_UNWIND && !S390 && !MICROBLAZE && !ARC && !SCORE
934 select KALLSYMS 934 select KALLSYMS
935 select KALLSYMS_ALL 935 select KALLSYMS_ALL
936 936
@@ -1408,7 +1408,7 @@ config FAULT_INJECTION_STACKTRACE_FILTER
1408 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT 1408 depends on FAULT_INJECTION_DEBUG_FS && STACKTRACE_SUPPORT
1409 depends on !X86_64 1409 depends on !X86_64
1410 select STACKTRACE 1410 select STACKTRACE
1411 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC 1411 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND && !ARC && !SCORE
1412 help 1412 help
1413 Provide stacktrace filter for fault-injection capabilities 1413 Provide stacktrace filter for fault-injection capabilities
1414 1414
diff --git a/lib/cpumask.c b/lib/cpumask.c
index c101230658eb..b6513a9f2892 100644
--- a/lib/cpumask.c
+++ b/lib/cpumask.c
@@ -191,7 +191,7 @@ int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
191 191
192 i %= num_online_cpus(); 192 i %= num_online_cpus();
193 193
194 if (!cpumask_of_node(numa_node)) { 194 if (numa_node == -1 || !cpumask_of_node(numa_node)) {
195 /* Use all online cpu's for non numa aware system */ 195 /* Use all online cpu's for non numa aware system */
196 cpumask_copy(mask, cpu_online_mask); 196 cpumask_copy(mask, cpu_online_mask);
197 } else { 197 } else {
diff --git a/lib/iovec.c b/lib/iovec.c
index 454baa88bf27..7a7c2da4cddf 100644
--- a/lib/iovec.c
+++ b/lib/iovec.c
@@ -51,3 +51,58 @@ int memcpy_toiovec(struct iovec *iov, unsigned char *kdata, int len)
51 return 0; 51 return 0;
52} 52}
53EXPORT_SYMBOL(memcpy_toiovec); 53EXPORT_SYMBOL(memcpy_toiovec);
54
55/*
56 * Copy kernel to iovec. Returns -EFAULT on error.
57 */
58
59int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
60 int offset, int len)
61{
62 int copy;
63 for (; len > 0; ++iov) {
64 /* Skip over the finished iovecs */
65 if (unlikely(offset >= iov->iov_len)) {
66 offset -= iov->iov_len;
67 continue;
68 }
69 copy = min_t(unsigned int, iov->iov_len - offset, len);
70 if (copy_to_user(iov->iov_base + offset, kdata, copy))
71 return -EFAULT;
72 offset = 0;
73 kdata += copy;
74 len -= copy;
75 }
76
77 return 0;
78}
79EXPORT_SYMBOL(memcpy_toiovecend);
80
81/*
82 * Copy iovec to kernel. Returns -EFAULT on error.
83 */
84
85int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
86 int offset, int len)
87{
88 /* Skip over the finished iovecs */
89 while (offset >= iov->iov_len) {
90 offset -= iov->iov_len;
91 iov++;
92 }
93
94 while (len > 0) {
95 u8 __user *base = iov->iov_base + offset;
96 int copy = min_t(unsigned int, len, iov->iov_len - offset);
97
98 offset = 0;
99 if (copy_from_user(kdata, base, copy))
100 return -EFAULT;
101 len -= copy;
102 kdata += copy;
103 iov++;
104 }
105
106 return 0;
107}
108EXPORT_SYMBOL(memcpy_fromiovecend);
diff --git a/lib/lz4/lz4_decompress.c b/lib/lz4/lz4_decompress.c
index df6839e3ce08..7a85967060a5 100644
--- a/lib/lz4/lz4_decompress.c
+++ b/lib/lz4/lz4_decompress.c
@@ -72,6 +72,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
72 len = *ip++; 72 len = *ip++;
73 for (; len == 255; length += 255) 73 for (; len == 255; length += 255)
74 len = *ip++; 74 len = *ip++;
75 if (unlikely(length > (size_t)(length + len)))
76 goto _output_error;
75 length += len; 77 length += len;
76 } 78 }
77 79
@@ -106,6 +108,8 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
106 if (length == ML_MASK) { 108 if (length == ML_MASK) {
107 for (; *ip == 255; length += 255) 109 for (; *ip == 255; length += 255)
108 ip++; 110 ip++;
111 if (unlikely(length > (size_t)(length + *ip)))
112 goto _output_error;
109 length += *ip++; 113 length += *ip++;
110 } 114 }
111 115
@@ -155,7 +159,7 @@ static int lz4_uncompress(const char *source, char *dest, int osize)
155 159
156 /* write overflow error detected */ 160 /* write overflow error detected */
157_output_error: 161_output_error:
158 return (int) (-(((char *)ip) - source)); 162 return -1;
159} 163}
160 164
161static int lz4_uncompress_unknownoutputsize(const char *source, char *dest, 165static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
@@ -188,6 +192,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
188 int s = 255; 192 int s = 255;
189 while ((ip < iend) && (s == 255)) { 193 while ((ip < iend) && (s == 255)) {
190 s = *ip++; 194 s = *ip++;
195 if (unlikely(length > (size_t)(length + s)))
196 goto _output_error;
191 length += s; 197 length += s;
192 } 198 }
193 } 199 }
@@ -228,6 +234,8 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
228 if (length == ML_MASK) { 234 if (length == ML_MASK) {
229 while (ip < iend) { 235 while (ip < iend) {
230 int s = *ip++; 236 int s = *ip++;
237 if (unlikely(length > (size_t)(length + s)))
238 goto _output_error;
231 length += s; 239 length += s;
232 if (s == 255) 240 if (s == 255)
233 continue; 241 continue;
@@ -280,7 +288,7 @@ static int lz4_uncompress_unknownoutputsize(const char *source, char *dest,
280 288
281 /* write overflow error detected */ 289 /* write overflow error detected */
282_output_error: 290_output_error:
283 return (int) (-(((char *) ip) - source)); 291 return -1;
284} 292}
285 293
286int lz4_decompress(const unsigned char *src, size_t *src_len, 294int lz4_decompress(const unsigned char *src, size_t *src_len,
diff --git a/lib/lzo/lzo1x_decompress_safe.c b/lib/lzo/lzo1x_decompress_safe.c
index 569985d522d5..8563081e8da3 100644
--- a/lib/lzo/lzo1x_decompress_safe.c
+++ b/lib/lzo/lzo1x_decompress_safe.c
@@ -19,11 +19,31 @@
19#include <linux/lzo.h> 19#include <linux/lzo.h>
20#include "lzodefs.h" 20#include "lzodefs.h"
21 21
22#define HAVE_IP(x) ((size_t)(ip_end - ip) >= (size_t)(x)) 22#define HAVE_IP(t, x) \
23#define HAVE_OP(x) ((size_t)(op_end - op) >= (size_t)(x)) 23 (((size_t)(ip_end - ip) >= (size_t)(t + x)) && \
24#define NEED_IP(x) if (!HAVE_IP(x)) goto input_overrun 24 (((t + x) >= t) && ((t + x) >= x)))
25#define NEED_OP(x) if (!HAVE_OP(x)) goto output_overrun 25
26#define TEST_LB(m_pos) if ((m_pos) < out) goto lookbehind_overrun 26#define HAVE_OP(t, x) \
27 (((size_t)(op_end - op) >= (size_t)(t + x)) && \
28 (((t + x) >= t) && ((t + x) >= x)))
29
30#define NEED_IP(t, x) \
31 do { \
32 if (!HAVE_IP(t, x)) \
33 goto input_overrun; \
34 } while (0)
35
36#define NEED_OP(t, x) \
37 do { \
38 if (!HAVE_OP(t, x)) \
39 goto output_overrun; \
40 } while (0)
41
42#define TEST_LB(m_pos) \
43 do { \
44 if ((m_pos) < out) \
45 goto lookbehind_overrun; \
46 } while (0)
27 47
28int lzo1x_decompress_safe(const unsigned char *in, size_t in_len, 48int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
29 unsigned char *out, size_t *out_len) 49 unsigned char *out, size_t *out_len)
@@ -58,14 +78,14 @@ int lzo1x_decompress_safe(const unsigned char *in, size_t in_len,
58 while (unlikely(*ip == 0)) { 78 while (unlikely(*ip == 0)) {
59 t += 255; 79 t += 255;
60 ip++; 80 ip++;
61 NEED_IP(1); 81 NEED_IP(1, 0);
62 } 82 }
63 t += 15 + *ip++; 83 t += 15 + *ip++;
64 } 84 }
65 t += 3; 85 t += 3;
66copy_literal_run: 86copy_literal_run:
67#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 87#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
68 if (likely(HAVE_IP(t + 15) && HAVE_OP(t + 15))) { 88 if (likely(HAVE_IP(t, 15) && HAVE_OP(t, 15))) {
69 const unsigned char *ie = ip + t; 89 const unsigned char *ie = ip + t;
70 unsigned char *oe = op + t; 90 unsigned char *oe = op + t;
71 do { 91 do {
@@ -81,8 +101,8 @@ copy_literal_run:
81 } else 101 } else
82#endif 102#endif
83 { 103 {
84 NEED_OP(t); 104 NEED_OP(t, 0);
85 NEED_IP(t + 3); 105 NEED_IP(t, 3);
86 do { 106 do {
87 *op++ = *ip++; 107 *op++ = *ip++;
88 } while (--t > 0); 108 } while (--t > 0);
@@ -95,7 +115,7 @@ copy_literal_run:
95 m_pos -= t >> 2; 115 m_pos -= t >> 2;
96 m_pos -= *ip++ << 2; 116 m_pos -= *ip++ << 2;
97 TEST_LB(m_pos); 117 TEST_LB(m_pos);
98 NEED_OP(2); 118 NEED_OP(2, 0);
99 op[0] = m_pos[0]; 119 op[0] = m_pos[0];
100 op[1] = m_pos[1]; 120 op[1] = m_pos[1];
101 op += 2; 121 op += 2;
@@ -119,10 +139,10 @@ copy_literal_run:
119 while (unlikely(*ip == 0)) { 139 while (unlikely(*ip == 0)) {
120 t += 255; 140 t += 255;
121 ip++; 141 ip++;
122 NEED_IP(1); 142 NEED_IP(1, 0);
123 } 143 }
124 t += 31 + *ip++; 144 t += 31 + *ip++;
125 NEED_IP(2); 145 NEED_IP(2, 0);
126 } 146 }
127 m_pos = op - 1; 147 m_pos = op - 1;
128 next = get_unaligned_le16(ip); 148 next = get_unaligned_le16(ip);
@@ -137,10 +157,10 @@ copy_literal_run:
137 while (unlikely(*ip == 0)) { 157 while (unlikely(*ip == 0)) {
138 t += 255; 158 t += 255;
139 ip++; 159 ip++;
140 NEED_IP(1); 160 NEED_IP(1, 0);
141 } 161 }
142 t += 7 + *ip++; 162 t += 7 + *ip++;
143 NEED_IP(2); 163 NEED_IP(2, 0);
144 } 164 }
145 next = get_unaligned_le16(ip); 165 next = get_unaligned_le16(ip);
146 ip += 2; 166 ip += 2;
@@ -154,7 +174,7 @@ copy_literal_run:
154#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 174#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
155 if (op - m_pos >= 8) { 175 if (op - m_pos >= 8) {
156 unsigned char *oe = op + t; 176 unsigned char *oe = op + t;
157 if (likely(HAVE_OP(t + 15))) { 177 if (likely(HAVE_OP(t, 15))) {
158 do { 178 do {
159 COPY8(op, m_pos); 179 COPY8(op, m_pos);
160 op += 8; 180 op += 8;
@@ -164,7 +184,7 @@ copy_literal_run:
164 m_pos += 8; 184 m_pos += 8;
165 } while (op < oe); 185 } while (op < oe);
166 op = oe; 186 op = oe;
167 if (HAVE_IP(6)) { 187 if (HAVE_IP(6, 0)) {
168 state = next; 188 state = next;
169 COPY4(op, ip); 189 COPY4(op, ip);
170 op += next; 190 op += next;
@@ -172,7 +192,7 @@ copy_literal_run:
172 continue; 192 continue;
173 } 193 }
174 } else { 194 } else {
175 NEED_OP(t); 195 NEED_OP(t, 0);
176 do { 196 do {
177 *op++ = *m_pos++; 197 *op++ = *m_pos++;
178 } while (op < oe); 198 } while (op < oe);
@@ -181,7 +201,7 @@ copy_literal_run:
181#endif 201#endif
182 { 202 {
183 unsigned char *oe = op + t; 203 unsigned char *oe = op + t;
184 NEED_OP(t); 204 NEED_OP(t, 0);
185 op[0] = m_pos[0]; 205 op[0] = m_pos[0];
186 op[1] = m_pos[1]; 206 op[1] = m_pos[1];
187 op += 2; 207 op += 2;
@@ -194,15 +214,15 @@ match_next:
194 state = next; 214 state = next;
195 t = next; 215 t = next;
196#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 216#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
197 if (likely(HAVE_IP(6) && HAVE_OP(4))) { 217 if (likely(HAVE_IP(6, 0) && HAVE_OP(4, 0))) {
198 COPY4(op, ip); 218 COPY4(op, ip);
199 op += t; 219 op += t;
200 ip += t; 220 ip += t;
201 } else 221 } else
202#endif 222#endif
203 { 223 {
204 NEED_IP(t + 3); 224 NEED_IP(t, 3);
205 NEED_OP(t); 225 NEED_OP(t, 0);
206 while (t > 0) { 226 while (t > 0) {
207 *op++ = *ip++; 227 *op++ = *ip++;
208 t--; 228 t--;
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 649d097853a1..4abda074ea45 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -86,6 +86,7 @@ static unsigned int io_tlb_index;
86 * We need to save away the original address corresponding to a mapped entry 86 * We need to save away the original address corresponding to a mapped entry
87 * for the sync operations. 87 * for the sync operations.
88 */ 88 */
89#define INVALID_PHYS_ADDR (~(phys_addr_t)0)
89static phys_addr_t *io_tlb_orig_addr; 90static phys_addr_t *io_tlb_orig_addr;
90 91
91/* 92/*
@@ -188,12 +189,14 @@ int __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
188 io_tlb_list = memblock_virt_alloc( 189 io_tlb_list = memblock_virt_alloc(
189 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)), 190 PAGE_ALIGN(io_tlb_nslabs * sizeof(int)),
190 PAGE_SIZE); 191 PAGE_SIZE);
191 for (i = 0; i < io_tlb_nslabs; i++)
192 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
193 io_tlb_index = 0;
194 io_tlb_orig_addr = memblock_virt_alloc( 192 io_tlb_orig_addr = memblock_virt_alloc(
195 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)), 193 PAGE_ALIGN(io_tlb_nslabs * sizeof(phys_addr_t)),
196 PAGE_SIZE); 194 PAGE_SIZE);
195 for (i = 0; i < io_tlb_nslabs; i++) {
196 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
197 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
198 }
199 io_tlb_index = 0;
197 200
198 if (verbose) 201 if (verbose)
199 swiotlb_print_info(); 202 swiotlb_print_info();
@@ -313,10 +316,6 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
313 if (!io_tlb_list) 316 if (!io_tlb_list)
314 goto cleanup3; 317 goto cleanup3;
315 318
316 for (i = 0; i < io_tlb_nslabs; i++)
317 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
318 io_tlb_index = 0;
319
320 io_tlb_orig_addr = (phys_addr_t *) 319 io_tlb_orig_addr = (phys_addr_t *)
321 __get_free_pages(GFP_KERNEL, 320 __get_free_pages(GFP_KERNEL,
322 get_order(io_tlb_nslabs * 321 get_order(io_tlb_nslabs *
@@ -324,7 +323,11 @@ swiotlb_late_init_with_tbl(char *tlb, unsigned long nslabs)
324 if (!io_tlb_orig_addr) 323 if (!io_tlb_orig_addr)
325 goto cleanup4; 324 goto cleanup4;
326 325
327 memset(io_tlb_orig_addr, 0, io_tlb_nslabs * sizeof(phys_addr_t)); 326 for (i = 0; i < io_tlb_nslabs; i++) {
327 io_tlb_list[i] = IO_TLB_SEGSIZE - OFFSET(i, IO_TLB_SEGSIZE);
328 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
329 }
330 io_tlb_index = 0;
328 331
329 swiotlb_print_info(); 332 swiotlb_print_info();
330 333
@@ -556,7 +559,8 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
556 /* 559 /*
557 * First, sync the memory before unmapping the entry 560 * First, sync the memory before unmapping the entry
558 */ 561 */
559 if (orig_addr && ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL))) 562 if (orig_addr != INVALID_PHYS_ADDR &&
563 ((dir == DMA_FROM_DEVICE) || (dir == DMA_BIDIRECTIONAL)))
560 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE); 564 swiotlb_bounce(orig_addr, tlb_addr, size, DMA_FROM_DEVICE);
561 565
562 /* 566 /*
@@ -573,8 +577,10 @@ void swiotlb_tbl_unmap_single(struct device *hwdev, phys_addr_t tlb_addr,
573 * Step 1: return the slots to the free list, merging the 577 * Step 1: return the slots to the free list, merging the
574 * slots with superceeding slots 578 * slots with superceeding slots
575 */ 579 */
576 for (i = index + nslots - 1; i >= index; i--) 580 for (i = index + nslots - 1; i >= index; i--) {
577 io_tlb_list[i] = ++count; 581 io_tlb_list[i] = ++count;
582 io_tlb_orig_addr[i] = INVALID_PHYS_ADDR;
583 }
578 /* 584 /*
579 * Step 2: merge the returned slots with the preceding slots, 585 * Step 2: merge the returned slots with the preceding slots,
580 * if available (non zero) 586 * if available (non zero)
@@ -593,6 +599,8 @@ void swiotlb_tbl_sync_single(struct device *hwdev, phys_addr_t tlb_addr,
593 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT; 599 int index = (tlb_addr - io_tlb_start) >> IO_TLB_SHIFT;
594 phys_addr_t orig_addr = io_tlb_orig_addr[index]; 600 phys_addr_t orig_addr = io_tlb_orig_addr[index];
595 601
602 if (orig_addr == INVALID_PHYS_ADDR)
603 return;
596 orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1); 604 orig_addr += (unsigned long)tlb_addr & ((1 << IO_TLB_SHIFT) - 1);
597 605
598 switch (target) { 606 switch (target) {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e60837dc785c..33514d88fef9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -941,6 +941,37 @@ unlock:
941 spin_unlock(ptl); 941 spin_unlock(ptl);
942} 942}
943 943
944/*
945 * Save CONFIG_DEBUG_PAGEALLOC from faulting falsely on tail pages
946 * during copy_user_huge_page()'s copy_page_rep(): in the case when
947 * the source page gets split and a tail freed before copy completes.
948 * Called under pmd_lock of checked pmd, so safe from splitting itself.
949 */
950static void get_user_huge_page(struct page *page)
951{
952 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
953 struct page *endpage = page + HPAGE_PMD_NR;
954
955 atomic_add(HPAGE_PMD_NR, &page->_count);
956 while (++page < endpage)
957 get_huge_page_tail(page);
958 } else {
959 get_page(page);
960 }
961}
962
963static void put_user_huge_page(struct page *page)
964{
965 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
966 struct page *endpage = page + HPAGE_PMD_NR;
967
968 while (page < endpage)
969 put_page(page++);
970 } else {
971 put_page(page);
972 }
973}
974
944static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, 975static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
945 struct vm_area_struct *vma, 976 struct vm_area_struct *vma,
946 unsigned long address, 977 unsigned long address,
@@ -1074,7 +1105,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1074 ret |= VM_FAULT_WRITE; 1105 ret |= VM_FAULT_WRITE;
1075 goto out_unlock; 1106 goto out_unlock;
1076 } 1107 }
1077 get_page(page); 1108 get_user_huge_page(page);
1078 spin_unlock(ptl); 1109 spin_unlock(ptl);
1079alloc: 1110alloc:
1080 if (transparent_hugepage_enabled(vma) && 1111 if (transparent_hugepage_enabled(vma) &&
@@ -1095,7 +1126,7 @@ alloc:
1095 split_huge_page(page); 1126 split_huge_page(page);
1096 ret |= VM_FAULT_FALLBACK; 1127 ret |= VM_FAULT_FALLBACK;
1097 } 1128 }
1098 put_page(page); 1129 put_user_huge_page(page);
1099 } 1130 }
1100 count_vm_event(THP_FAULT_FALLBACK); 1131 count_vm_event(THP_FAULT_FALLBACK);
1101 goto out; 1132 goto out;
@@ -1105,7 +1136,7 @@ alloc:
1105 put_page(new_page); 1136 put_page(new_page);
1106 if (page) { 1137 if (page) {
1107 split_huge_page(page); 1138 split_huge_page(page);
1108 put_page(page); 1139 put_user_huge_page(page);
1109 } else 1140 } else
1110 split_huge_page_pmd(vma, address, pmd); 1141 split_huge_page_pmd(vma, address, pmd);
1111 ret |= VM_FAULT_FALLBACK; 1142 ret |= VM_FAULT_FALLBACK;
@@ -1127,7 +1158,7 @@ alloc:
1127 1158
1128 spin_lock(ptl); 1159 spin_lock(ptl);
1129 if (page) 1160 if (page)
1130 put_page(page); 1161 put_user_huge_page(page);
1131 if (unlikely(!pmd_same(*pmd, orig_pmd))) { 1162 if (unlikely(!pmd_same(*pmd, orig_pmd))) {
1132 spin_unlock(ptl); 1163 spin_unlock(ptl);
1133 mem_cgroup_uncharge_page(new_page); 1164 mem_cgroup_uncharge_page(new_page);
@@ -2392,8 +2423,6 @@ static void collapse_huge_page(struct mm_struct *mm,
2392 pmd = mm_find_pmd(mm, address); 2423 pmd = mm_find_pmd(mm, address);
2393 if (!pmd) 2424 if (!pmd)
2394 goto out; 2425 goto out;
2395 if (pmd_trans_huge(*pmd))
2396 goto out;
2397 2426
2398 anon_vma_lock_write(vma->anon_vma); 2427 anon_vma_lock_write(vma->anon_vma);
2399 2428
@@ -2492,8 +2521,6 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
2492 pmd = mm_find_pmd(mm, address); 2521 pmd = mm_find_pmd(mm, address);
2493 if (!pmd) 2522 if (!pmd)
2494 goto out; 2523 goto out;
2495 if (pmd_trans_huge(*pmd))
2496 goto out;
2497 2524
2498 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); 2525 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2499 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 2526 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
@@ -2846,12 +2873,22 @@ void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
2846static void split_huge_page_address(struct mm_struct *mm, 2873static void split_huge_page_address(struct mm_struct *mm,
2847 unsigned long address) 2874 unsigned long address)
2848{ 2875{
2876 pgd_t *pgd;
2877 pud_t *pud;
2849 pmd_t *pmd; 2878 pmd_t *pmd;
2850 2879
2851 VM_BUG_ON(!(address & ~HPAGE_PMD_MASK)); 2880 VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
2852 2881
2853 pmd = mm_find_pmd(mm, address); 2882 pgd = pgd_offset(mm, address);
2854 if (!pmd) 2883 if (!pgd_present(*pgd))
2884 return;
2885
2886 pud = pud_offset(pgd, address);
2887 if (!pud_present(*pud))
2888 return;
2889
2890 pmd = pmd_offset(pud, address);
2891 if (!pmd_present(*pmd))
2855 return; 2892 return;
2856 /* 2893 /*
2857 * Caller holds the mmap_sem write mode, so a huge pmd cannot 2894 * Caller holds the mmap_sem write mode, so a huge pmd cannot
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 226910cb7c9b..2024bbd573d2 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2520,6 +2520,31 @@ static void set_huge_ptep_writable(struct vm_area_struct *vma,
2520 update_mmu_cache(vma, address, ptep); 2520 update_mmu_cache(vma, address, ptep);
2521} 2521}
2522 2522
2523static int is_hugetlb_entry_migration(pte_t pte)
2524{
2525 swp_entry_t swp;
2526
2527 if (huge_pte_none(pte) || pte_present(pte))
2528 return 0;
2529 swp = pte_to_swp_entry(pte);
2530 if (non_swap_entry(swp) && is_migration_entry(swp))
2531 return 1;
2532 else
2533 return 0;
2534}
2535
2536static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2537{
2538 swp_entry_t swp;
2539
2540 if (huge_pte_none(pte) || pte_present(pte))
2541 return 0;
2542 swp = pte_to_swp_entry(pte);
2543 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2544 return 1;
2545 else
2546 return 0;
2547}
2523 2548
2524int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, 2549int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2525 struct vm_area_struct *vma) 2550 struct vm_area_struct *vma)
@@ -2559,10 +2584,26 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2559 dst_ptl = huge_pte_lock(h, dst, dst_pte); 2584 dst_ptl = huge_pte_lock(h, dst, dst_pte);
2560 src_ptl = huge_pte_lockptr(h, src, src_pte); 2585 src_ptl = huge_pte_lockptr(h, src, src_pte);
2561 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); 2586 spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING);
2562 if (!huge_pte_none(huge_ptep_get(src_pte))) { 2587 entry = huge_ptep_get(src_pte);
2588 if (huge_pte_none(entry)) { /* skip none entry */
2589 ;
2590 } else if (unlikely(is_hugetlb_entry_migration(entry) ||
2591 is_hugetlb_entry_hwpoisoned(entry))) {
2592 swp_entry_t swp_entry = pte_to_swp_entry(entry);
2593
2594 if (is_write_migration_entry(swp_entry) && cow) {
2595 /*
2596 * COW mappings require pages in both
2597 * parent and child to be set to read.
2598 */
2599 make_migration_entry_read(&swp_entry);
2600 entry = swp_entry_to_pte(swp_entry);
2601 set_huge_pte_at(src, addr, src_pte, entry);
2602 }
2603 set_huge_pte_at(dst, addr, dst_pte, entry);
2604 } else {
2563 if (cow) 2605 if (cow)
2564 huge_ptep_set_wrprotect(src, addr, src_pte); 2606 huge_ptep_set_wrprotect(src, addr, src_pte);
2565 entry = huge_ptep_get(src_pte);
2566 ptepage = pte_page(entry); 2607 ptepage = pte_page(entry);
2567 get_page(ptepage); 2608 get_page(ptepage);
2568 page_dup_rmap(ptepage); 2609 page_dup_rmap(ptepage);
@@ -2578,32 +2619,6 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
2578 return ret; 2619 return ret;
2579} 2620}
2580 2621
2581static int is_hugetlb_entry_migration(pte_t pte)
2582{
2583 swp_entry_t swp;
2584
2585 if (huge_pte_none(pte) || pte_present(pte))
2586 return 0;
2587 swp = pte_to_swp_entry(pte);
2588 if (non_swap_entry(swp) && is_migration_entry(swp))
2589 return 1;
2590 else
2591 return 0;
2592}
2593
2594static int is_hugetlb_entry_hwpoisoned(pte_t pte)
2595{
2596 swp_entry_t swp;
2597
2598 if (huge_pte_none(pte) || pte_present(pte))
2599 return 0;
2600 swp = pte_to_swp_entry(pte);
2601 if (non_swap_entry(swp) && is_hwpoison_entry(swp))
2602 return 1;
2603 else
2604 return 0;
2605}
2606
2607void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma, 2622void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
2608 unsigned long start, unsigned long end, 2623 unsigned long start, unsigned long end,
2609 struct page *ref_page) 2624 struct page *ref_page)
diff --git a/mm/ksm.c b/mm/ksm.c
index 68710e80994a..346ddc9e4c0d 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -945,7 +945,6 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
945 pmd = mm_find_pmd(mm, addr); 945 pmd = mm_find_pmd(mm, addr);
946 if (!pmd) 946 if (!pmd)
947 goto out; 947 goto out;
948 BUG_ON(pmd_trans_huge(*pmd));
949 948
950 mmun_start = addr; 949 mmun_start = addr;
951 mmun_end = addr + PAGE_SIZE; 950 mmun_end = addr + PAGE_SIZE;
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index cd8989c1027e..c6399e328931 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -895,7 +895,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
895 struct page *hpage = *hpagep; 895 struct page *hpage = *hpagep;
896 struct page *ppage; 896 struct page *ppage;
897 897
898 if (PageReserved(p) || PageSlab(p)) 898 if (PageReserved(p) || PageSlab(p) || !PageLRU(p))
899 return SWAP_SUCCESS; 899 return SWAP_SUCCESS;
900 900
901 /* 901 /*
@@ -1159,9 +1159,6 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1159 action_result(pfn, "free buddy, 2nd try", DELAYED); 1159 action_result(pfn, "free buddy, 2nd try", DELAYED);
1160 return 0; 1160 return 0;
1161 } 1161 }
1162 action_result(pfn, "non LRU", IGNORED);
1163 put_page(p);
1164 return -EBUSY;
1165 } 1162 }
1166 } 1163 }
1167 1164
@@ -1194,6 +1191,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1194 return 0; 1191 return 0;
1195 } 1192 }
1196 1193
1194 if (!PageHuge(p) && !PageTransTail(p) && !PageLRU(p))
1195 goto identify_page_state;
1196
1197 /* 1197 /*
1198 * For error on the tail page, we should set PG_hwpoison 1198 * For error on the tail page, we should set PG_hwpoison
1199 * on the head page to show that the hugepage is hwpoisoned 1199 * on the head page to show that the hugepage is hwpoisoned
@@ -1243,6 +1243,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1243 goto out; 1243 goto out;
1244 } 1244 }
1245 1245
1246identify_page_state:
1246 res = -EBUSY; 1247 res = -EBUSY;
1247 /* 1248 /*
1248 * The first check uses the current page flags which may not have any 1249 * The first check uses the current page flags which may not have any
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 284974230459..8f5330d74f47 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -656,19 +656,18 @@ static unsigned long change_prot_numa(struct vm_area_struct *vma,
656 * @nodes and @flags,) it's isolated and queued to the pagelist which is 656 * @nodes and @flags,) it's isolated and queued to the pagelist which is
657 * passed via @private.) 657 * passed via @private.)
658 */ 658 */
659static struct vm_area_struct * 659static int
660queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end, 660queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
661 const nodemask_t *nodes, unsigned long flags, void *private) 661 const nodemask_t *nodes, unsigned long flags, void *private)
662{ 662{
663 int err; 663 int err = 0;
664 struct vm_area_struct *first, *vma, *prev; 664 struct vm_area_struct *vma, *prev;
665
666 665
667 first = find_vma(mm, start); 666 vma = find_vma(mm, start);
668 if (!first) 667 if (!vma)
669 return ERR_PTR(-EFAULT); 668 return -EFAULT;
670 prev = NULL; 669 prev = NULL;
671 for (vma = first; vma && vma->vm_start < end; vma = vma->vm_next) { 670 for (; vma && vma->vm_start < end; vma = vma->vm_next) {
672 unsigned long endvma = vma->vm_end; 671 unsigned long endvma = vma->vm_end;
673 672
674 if (endvma > end) 673 if (endvma > end)
@@ -678,9 +677,9 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
678 677
679 if (!(flags & MPOL_MF_DISCONTIG_OK)) { 678 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
680 if (!vma->vm_next && vma->vm_end < end) 679 if (!vma->vm_next && vma->vm_end < end)
681 return ERR_PTR(-EFAULT); 680 return -EFAULT;
682 if (prev && prev->vm_end < vma->vm_start) 681 if (prev && prev->vm_end < vma->vm_start)
683 return ERR_PTR(-EFAULT); 682 return -EFAULT;
684 } 683 }
685 684
686 if (flags & MPOL_MF_LAZY) { 685 if (flags & MPOL_MF_LAZY) {
@@ -694,15 +693,13 @@ queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
694 693
695 err = queue_pages_pgd_range(vma, start, endvma, nodes, 694 err = queue_pages_pgd_range(vma, start, endvma, nodes,
696 flags, private); 695 flags, private);
697 if (err) { 696 if (err)
698 first = ERR_PTR(err);
699 break; 697 break;
700 }
701 } 698 }
702next: 699next:
703 prev = vma; 700 prev = vma;
704 } 701 }
705 return first; 702 return err;
706} 703}
707 704
708/* 705/*
@@ -1156,16 +1153,17 @@ out:
1156 1153
1157/* 1154/*
1158 * Allocate a new page for page migration based on vma policy. 1155 * Allocate a new page for page migration based on vma policy.
1159 * Start assuming that page is mapped by vma pointed to by @private. 1156 * Start by assuming the page is mapped by the same vma as contains @start.
1160 * Search forward from there, if not. N.B., this assumes that the 1157 * Search forward from there, if not. N.B., this assumes that the
1161 * list of pages handed to migrate_pages()--which is how we get here-- 1158 * list of pages handed to migrate_pages()--which is how we get here--
1162 * is in virtual address order. 1159 * is in virtual address order.
1163 */ 1160 */
1164static struct page *new_vma_page(struct page *page, unsigned long private, int **x) 1161static struct page *new_page(struct page *page, unsigned long start, int **x)
1165{ 1162{
1166 struct vm_area_struct *vma = (struct vm_area_struct *)private; 1163 struct vm_area_struct *vma;
1167 unsigned long uninitialized_var(address); 1164 unsigned long uninitialized_var(address);
1168 1165
1166 vma = find_vma(current->mm, start);
1169 while (vma) { 1167 while (vma) {
1170 address = page_address_in_vma(page, vma); 1168 address = page_address_in_vma(page, vma);
1171 if (address != -EFAULT) 1169 if (address != -EFAULT)
@@ -1195,7 +1193,7 @@ int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1195 return -ENOSYS; 1193 return -ENOSYS;
1196} 1194}
1197 1195
1198static struct page *new_vma_page(struct page *page, unsigned long private, int **x) 1196static struct page *new_page(struct page *page, unsigned long start, int **x)
1199{ 1197{
1200 return NULL; 1198 return NULL;
1201} 1199}
@@ -1205,7 +1203,6 @@ static long do_mbind(unsigned long start, unsigned long len,
1205 unsigned short mode, unsigned short mode_flags, 1203 unsigned short mode, unsigned short mode_flags,
1206 nodemask_t *nmask, unsigned long flags) 1204 nodemask_t *nmask, unsigned long flags)
1207{ 1205{
1208 struct vm_area_struct *vma;
1209 struct mm_struct *mm = current->mm; 1206 struct mm_struct *mm = current->mm;
1210 struct mempolicy *new; 1207 struct mempolicy *new;
1211 unsigned long end; 1208 unsigned long end;
@@ -1271,11 +1268,9 @@ static long do_mbind(unsigned long start, unsigned long len,
1271 if (err) 1268 if (err)
1272 goto mpol_out; 1269 goto mpol_out;
1273 1270
1274 vma = queue_pages_range(mm, start, end, nmask, 1271 err = queue_pages_range(mm, start, end, nmask,
1275 flags | MPOL_MF_INVERT, &pagelist); 1272 flags | MPOL_MF_INVERT, &pagelist);
1276 1273 if (!err)
1277 err = PTR_ERR(vma); /* maybe ... */
1278 if (!IS_ERR(vma))
1279 err = mbind_range(mm, start, end, new); 1274 err = mbind_range(mm, start, end, new);
1280 1275
1281 if (!err) { 1276 if (!err) {
@@ -1283,9 +1278,8 @@ static long do_mbind(unsigned long start, unsigned long len,
1283 1278
1284 if (!list_empty(&pagelist)) { 1279 if (!list_empty(&pagelist)) {
1285 WARN_ON_ONCE(flags & MPOL_MF_LAZY); 1280 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1286 nr_failed = migrate_pages(&pagelist, new_vma_page, 1281 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1287 NULL, (unsigned long)vma, 1282 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1288 MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1289 if (nr_failed) 1283 if (nr_failed)
1290 putback_movable_pages(&pagelist); 1284 putback_movable_pages(&pagelist);
1291 } 1285 }
@@ -2145,7 +2139,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
2145 } else 2139 } else
2146 *new = *old; 2140 *new = *old;
2147 2141
2148 rcu_read_lock();
2149 if (current_cpuset_is_being_rebound()) { 2142 if (current_cpuset_is_being_rebound()) {
2150 nodemask_t mems = cpuset_mems_allowed(current); 2143 nodemask_t mems = cpuset_mems_allowed(current);
2151 if (new->flags & MPOL_F_REBINDING) 2144 if (new->flags & MPOL_F_REBINDING)
@@ -2153,7 +2146,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old)
2153 else 2146 else
2154 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE); 2147 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
2155 } 2148 }
2156 rcu_read_unlock();
2157 atomic_set(&new->refcnt, 1); 2149 atomic_set(&new->refcnt, 1);
2158 return new; 2150 return new;
2159} 2151}
diff --git a/mm/migrate.c b/mm/migrate.c
index 63f0cd559999..9e0beaa91845 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -120,8 +120,6 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma,
120 pmd = mm_find_pmd(mm, addr); 120 pmd = mm_find_pmd(mm, addr);
121 if (!pmd) 121 if (!pmd)
122 goto out; 122 goto out;
123 if (pmd_trans_huge(*pmd))
124 goto out;
125 123
126 ptep = pte_offset_map(pmd, addr); 124 ptep = pte_offset_map(pmd, addr);
127 125
diff --git a/mm/msync.c b/mm/msync.c
index a5c673669ca6..992a1673d488 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -78,7 +78,8 @@ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
78 goto out_unlock; 78 goto out_unlock;
79 } 79 }
80 file = vma->vm_file; 80 file = vma->vm_file;
81 fstart = start + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 81 fstart = (start - vma->vm_start) +
82 ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
82 fend = fstart + (min(end, vma->vm_end) - start) - 1; 83 fend = fstart + (min(end, vma->vm_end) - start) - 1;
83 start = vma->vm_end; 84 start = vma->vm_end;
84 if ((flags & MS_SYNC) && file && 85 if ((flags & MS_SYNC) && file &&
diff --git a/mm/nommu.c b/mm/nommu.c
index b78e3a8f5ee7..4a852f6c5709 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -786,7 +786,7 @@ static void delete_vma_from_mm(struct vm_area_struct *vma)
786 for (i = 0; i < VMACACHE_SIZE; i++) { 786 for (i = 0; i < VMACACHE_SIZE; i++) {
787 /* if the vma is cached, invalidate the entire cache */ 787 /* if the vma is cached, invalidate the entire cache */
788 if (curr->vmacache[i] == vma) { 788 if (curr->vmacache[i] == vma) {
789 vmacache_invalidate(curr->mm); 789 vmacache_invalidate(mm);
790 break; 790 break;
791 } 791 }
792 } 792 }
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 4f59fa29eda8..0ea758b898fd 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -69,6 +69,7 @@
69 69
70/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */ 70/* prevent >1 _updater_ of zone percpu pageset ->high and ->batch fields */
71static DEFINE_MUTEX(pcp_batch_high_lock); 71static DEFINE_MUTEX(pcp_batch_high_lock);
72#define MIN_PERCPU_PAGELIST_FRACTION (8)
72 73
73#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID 74#ifdef CONFIG_USE_PERCPU_NUMA_NODE_ID
74DEFINE_PER_CPU(int, numa_node); 75DEFINE_PER_CPU(int, numa_node);
@@ -815,9 +816,21 @@ void __init init_cma_reserved_pageblock(struct page *page)
815 set_page_count(p, 0); 816 set_page_count(p, 0);
816 } while (++p, --i); 817 } while (++p, --i);
817 818
818 set_page_refcounted(page);
819 set_pageblock_migratetype(page, MIGRATE_CMA); 819 set_pageblock_migratetype(page, MIGRATE_CMA);
820 __free_pages(page, pageblock_order); 820
821 if (pageblock_order >= MAX_ORDER) {
822 i = pageblock_nr_pages;
823 p = page;
824 do {
825 set_page_refcounted(p);
826 __free_pages(p, MAX_ORDER - 1);
827 p += MAX_ORDER_NR_PAGES;
828 } while (i -= MAX_ORDER_NR_PAGES);
829 } else {
830 set_page_refcounted(page);
831 __free_pages(page, pageblock_order);
832 }
833
821 adjust_managed_page_count(page, pageblock_nr_pages); 834 adjust_managed_page_count(page, pageblock_nr_pages);
822} 835}
823#endif 836#endif
@@ -4145,7 +4158,7 @@ static void __meminit zone_init_free_lists(struct zone *zone)
4145 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY) 4158 memmap_init_zone((size), (nid), (zone), (start_pfn), MEMMAP_EARLY)
4146#endif 4159#endif
4147 4160
4148static int __meminit zone_batchsize(struct zone *zone) 4161static int zone_batchsize(struct zone *zone)
4149{ 4162{
4150#ifdef CONFIG_MMU 4163#ifdef CONFIG_MMU
4151 int batch; 4164 int batch;
@@ -4261,8 +4274,8 @@ static void pageset_set_high(struct per_cpu_pageset *p,
4261 pageset_update(&p->pcp, high, batch); 4274 pageset_update(&p->pcp, high, batch);
4262} 4275}
4263 4276
4264static void __meminit pageset_set_high_and_batch(struct zone *zone, 4277static void pageset_set_high_and_batch(struct zone *zone,
4265 struct per_cpu_pageset *pcp) 4278 struct per_cpu_pageset *pcp)
4266{ 4279{
4267 if (percpu_pagelist_fraction) 4280 if (percpu_pagelist_fraction)
4268 pageset_set_high(pcp, 4281 pageset_set_high(pcp,
@@ -5881,23 +5894,38 @@ int percpu_pagelist_fraction_sysctl_handler(struct ctl_table *table, int write,
5881 void __user *buffer, size_t *length, loff_t *ppos) 5894 void __user *buffer, size_t *length, loff_t *ppos)
5882{ 5895{
5883 struct zone *zone; 5896 struct zone *zone;
5884 unsigned int cpu; 5897 int old_percpu_pagelist_fraction;
5885 int ret; 5898 int ret;
5886 5899
5900 mutex_lock(&pcp_batch_high_lock);
5901 old_percpu_pagelist_fraction = percpu_pagelist_fraction;
5902
5887 ret = proc_dointvec_minmax(table, write, buffer, length, ppos); 5903 ret = proc_dointvec_minmax(table, write, buffer, length, ppos);
5888 if (!write || (ret < 0)) 5904 if (!write || ret < 0)
5889 return ret; 5905 goto out;
5906
5907 /* Sanity checking to avoid pcp imbalance */
5908 if (percpu_pagelist_fraction &&
5909 percpu_pagelist_fraction < MIN_PERCPU_PAGELIST_FRACTION) {
5910 percpu_pagelist_fraction = old_percpu_pagelist_fraction;
5911 ret = -EINVAL;
5912 goto out;
5913 }
5914
5915 /* No change? */
5916 if (percpu_pagelist_fraction == old_percpu_pagelist_fraction)
5917 goto out;
5890 5918
5891 mutex_lock(&pcp_batch_high_lock);
5892 for_each_populated_zone(zone) { 5919 for_each_populated_zone(zone) {
5893 unsigned long high; 5920 unsigned int cpu;
5894 high = zone->managed_pages / percpu_pagelist_fraction; 5921
5895 for_each_possible_cpu(cpu) 5922 for_each_possible_cpu(cpu)
5896 pageset_set_high(per_cpu_ptr(zone->pageset, cpu), 5923 pageset_set_high_and_batch(zone,
5897 high); 5924 per_cpu_ptr(zone->pageset, cpu));
5898 } 5925 }
5926out:
5899 mutex_unlock(&pcp_batch_high_lock); 5927 mutex_unlock(&pcp_batch_high_lock);
5900 return 0; 5928 return ret;
5901} 5929}
5902 5930
5903int hashdist = HASHDIST_DEFAULT; 5931int hashdist = HASHDIST_DEFAULT;
diff --git a/mm/rmap.c b/mm/rmap.c
index bf05fc872ae8..b7e94ebbd09e 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -569,6 +569,7 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
569 pgd_t *pgd; 569 pgd_t *pgd;
570 pud_t *pud; 570 pud_t *pud;
571 pmd_t *pmd = NULL; 571 pmd_t *pmd = NULL;
572 pmd_t pmde;
572 573
573 pgd = pgd_offset(mm, address); 574 pgd = pgd_offset(mm, address);
574 if (!pgd_present(*pgd)) 575 if (!pgd_present(*pgd))
@@ -579,7 +580,13 @@ pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address)
579 goto out; 580 goto out;
580 581
581 pmd = pmd_offset(pud, address); 582 pmd = pmd_offset(pud, address);
582 if (!pmd_present(*pmd)) 583 /*
584 * Some THP functions use the sequence pmdp_clear_flush(), set_pmd_at()
585 * without holding anon_vma lock for write. So when looking for a
586 * genuine pmde (in which to find pte), test present and !THP together.
587 */
588 pmde = ACCESS_ONCE(*pmd);
589 if (!pmd_present(pmde) || pmd_trans_huge(pmde))
583 pmd = NULL; 590 pmd = NULL;
584out: 591out:
585 return pmd; 592 return pmd;
@@ -615,9 +622,6 @@ pte_t *__page_check_address(struct page *page, struct mm_struct *mm,
615 if (!pmd) 622 if (!pmd)
616 return NULL; 623 return NULL;
617 624
618 if (pmd_trans_huge(*pmd))
619 return NULL;
620
621 pte = pte_offset_map(pmd, address); 625 pte = pte_offset_map(pmd, address);
622 /* Make a quick check before getting the lock */ 626 /* Make a quick check before getting the lock */
623 if (!sync && !pte_present(*pte)) { 627 if (!sync && !pte_present(*pte)) {
diff --git a/mm/shmem.c b/mm/shmem.c
index f484c276e994..1140f49b6ded 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -80,11 +80,12 @@ static struct vfsmount *shm_mnt;
80#define SHORT_SYMLINK_LEN 128 80#define SHORT_SYMLINK_LEN 128
81 81
82/* 82/*
83 * shmem_fallocate and shmem_writepage communicate via inode->i_private 83 * shmem_fallocate communicates with shmem_fault or shmem_writepage via
84 * (with i_mutex making sure that it has only one user at a time): 84 * inode->i_private (with i_mutex making sure that it has only one user at
85 * we would prefer not to enlarge the shmem inode just for that. 85 * a time): we would prefer not to enlarge the shmem inode just for that.
86 */ 86 */
87struct shmem_falloc { 87struct shmem_falloc {
88 int mode; /* FALLOC_FL mode currently operating */
88 pgoff_t start; /* start of range currently being fallocated */ 89 pgoff_t start; /* start of range currently being fallocated */
89 pgoff_t next; /* the next page offset to be fallocated */ 90 pgoff_t next; /* the next page offset to be fallocated */
90 pgoff_t nr_falloced; /* how many new pages have been fallocated */ 91 pgoff_t nr_falloced; /* how many new pages have been fallocated */
@@ -759,6 +760,7 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
759 spin_lock(&inode->i_lock); 760 spin_lock(&inode->i_lock);
760 shmem_falloc = inode->i_private; 761 shmem_falloc = inode->i_private;
761 if (shmem_falloc && 762 if (shmem_falloc &&
763 !shmem_falloc->mode &&
762 index >= shmem_falloc->start && 764 index >= shmem_falloc->start &&
763 index < shmem_falloc->next) 765 index < shmem_falloc->next)
764 shmem_falloc->nr_unswapped++; 766 shmem_falloc->nr_unswapped++;
@@ -1027,6 +1029,9 @@ repeat:
1027 goto failed; 1029 goto failed;
1028 } 1030 }
1029 1031
1032 if (page && sgp == SGP_WRITE)
1033 mark_page_accessed(page);
1034
1030 /* fallocated page? */ 1035 /* fallocated page? */
1031 if (page && !PageUptodate(page)) { 1036 if (page && !PageUptodate(page)) {
1032 if (sgp != SGP_READ) 1037 if (sgp != SGP_READ)
@@ -1108,6 +1113,9 @@ repeat:
1108 shmem_recalc_inode(inode); 1113 shmem_recalc_inode(inode);
1109 spin_unlock(&info->lock); 1114 spin_unlock(&info->lock);
1110 1115
1116 if (sgp == SGP_WRITE)
1117 mark_page_accessed(page);
1118
1111 delete_from_swap_cache(page); 1119 delete_from_swap_cache(page);
1112 set_page_dirty(page); 1120 set_page_dirty(page);
1113 swap_free(swap); 1121 swap_free(swap);
@@ -1134,6 +1142,9 @@ repeat:
1134 1142
1135 __SetPageSwapBacked(page); 1143 __SetPageSwapBacked(page);
1136 __set_page_locked(page); 1144 __set_page_locked(page);
1145 if (sgp == SGP_WRITE)
1146 init_page_accessed(page);
1147
1137 error = mem_cgroup_charge_file(page, current->mm, 1148 error = mem_cgroup_charge_file(page, current->mm,
1138 gfp & GFP_RECLAIM_MASK); 1149 gfp & GFP_RECLAIM_MASK);
1139 if (error) 1150 if (error)
@@ -1233,6 +1244,44 @@ static int shmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1233 int error; 1244 int error;
1234 int ret = VM_FAULT_LOCKED; 1245 int ret = VM_FAULT_LOCKED;
1235 1246
1247 /*
1248 * Trinity finds that probing a hole which tmpfs is punching can
1249 * prevent the hole-punch from ever completing: which in turn
1250 * locks writers out with its hold on i_mutex. So refrain from
1251 * faulting pages into the hole while it's being punched, and
1252 * wait on i_mutex to be released if vmf->flags permits.
1253 */
1254 if (unlikely(inode->i_private)) {
1255 struct shmem_falloc *shmem_falloc;
1256
1257 spin_lock(&inode->i_lock);
1258 shmem_falloc = inode->i_private;
1259 if (!shmem_falloc ||
1260 shmem_falloc->mode != FALLOC_FL_PUNCH_HOLE ||
1261 vmf->pgoff < shmem_falloc->start ||
1262 vmf->pgoff >= shmem_falloc->next)
1263 shmem_falloc = NULL;
1264 spin_unlock(&inode->i_lock);
1265 /*
1266 * i_lock has protected us from taking shmem_falloc seriously
1267 * once return from shmem_fallocate() went back up that stack.
1268 * i_lock does not serialize with i_mutex at all, but it does
1269 * not matter if sometimes we wait unnecessarily, or sometimes
1270 * miss out on waiting: we just need to make those cases rare.
1271 */
1272 if (shmem_falloc) {
1273 if ((vmf->flags & FAULT_FLAG_ALLOW_RETRY) &&
1274 !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) {
1275 up_read(&vma->vm_mm->mmap_sem);
1276 mutex_lock(&inode->i_mutex);
1277 mutex_unlock(&inode->i_mutex);
1278 return VM_FAULT_RETRY;
1279 }
1280 /* cond_resched? Leave that to GUP or return to user */
1281 return VM_FAULT_NOPAGE;
1282 }
1283 }
1284
1236 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret); 1285 error = shmem_getpage(inode, vmf->pgoff, &vmf->page, SGP_CACHE, &ret);
1237 if (error) 1286 if (error)
1238 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS); 1287 return ((error == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS);
@@ -1372,13 +1421,9 @@ shmem_write_begin(struct file *file, struct address_space *mapping,
1372 loff_t pos, unsigned len, unsigned flags, 1421 loff_t pos, unsigned len, unsigned flags,
1373 struct page **pagep, void **fsdata) 1422 struct page **pagep, void **fsdata)
1374{ 1423{
1375 int ret;
1376 struct inode *inode = mapping->host; 1424 struct inode *inode = mapping->host;
1377 pgoff_t index = pos >> PAGE_CACHE_SHIFT; 1425 pgoff_t index = pos >> PAGE_CACHE_SHIFT;
1378 ret = shmem_getpage(inode, index, pagep, SGP_WRITE, NULL); 1426 return shmem_getpage(inode, index, pagep, SGP_WRITE, NULL);
1379 if (ret == 0 && *pagep)
1380 init_page_accessed(*pagep);
1381 return ret;
1382} 1427}
1383 1428
1384static int 1429static int
@@ -1724,20 +1769,31 @@ static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1724 pgoff_t start, index, end; 1769 pgoff_t start, index, end;
1725 int error; 1770 int error;
1726 1771
1772 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
1773 return -EOPNOTSUPP;
1774
1727 mutex_lock(&inode->i_mutex); 1775 mutex_lock(&inode->i_mutex);
1728 1776
1777 shmem_falloc.mode = mode & ~FALLOC_FL_KEEP_SIZE;
1778
1729 if (mode & FALLOC_FL_PUNCH_HOLE) { 1779 if (mode & FALLOC_FL_PUNCH_HOLE) {
1730 struct address_space *mapping = file->f_mapping; 1780 struct address_space *mapping = file->f_mapping;
1731 loff_t unmap_start = round_up(offset, PAGE_SIZE); 1781 loff_t unmap_start = round_up(offset, PAGE_SIZE);
1732 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1; 1782 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
1733 1783
1784 shmem_falloc.start = unmap_start >> PAGE_SHIFT;
1785 shmem_falloc.next = (unmap_end + 1) >> PAGE_SHIFT;
1786 spin_lock(&inode->i_lock);
1787 inode->i_private = &shmem_falloc;
1788 spin_unlock(&inode->i_lock);
1789
1734 if ((u64)unmap_end > (u64)unmap_start) 1790 if ((u64)unmap_end > (u64)unmap_start)
1735 unmap_mapping_range(mapping, unmap_start, 1791 unmap_mapping_range(mapping, unmap_start,
1736 1 + unmap_end - unmap_start, 0); 1792 1 + unmap_end - unmap_start, 0);
1737 shmem_truncate_range(inode, offset, offset + len - 1); 1793 shmem_truncate_range(inode, offset, offset + len - 1);
1738 /* No need to unmap again: hole-punching leaves COWed pages */ 1794 /* No need to unmap again: hole-punching leaves COWed pages */
1739 error = 0; 1795 error = 0;
1740 goto out; 1796 goto undone;
1741 } 1797 }
1742 1798
1743 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */ 1799 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
diff --git a/mm/slab.c b/mm/slab.c
index 9ca3b87edabc..3070b929a1bf 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -386,6 +386,39 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
386 386
387#endif 387#endif
388 388
389#define OBJECT_FREE (0)
390#define OBJECT_ACTIVE (1)
391
392#ifdef CONFIG_DEBUG_SLAB_LEAK
393
394static void set_obj_status(struct page *page, int idx, int val)
395{
396 int freelist_size;
397 char *status;
398 struct kmem_cache *cachep = page->slab_cache;
399
400 freelist_size = cachep->num * sizeof(freelist_idx_t);
401 status = (char *)page->freelist + freelist_size;
402 status[idx] = val;
403}
404
405static inline unsigned int get_obj_status(struct page *page, int idx)
406{
407 int freelist_size;
408 char *status;
409 struct kmem_cache *cachep = page->slab_cache;
410
411 freelist_size = cachep->num * sizeof(freelist_idx_t);
412 status = (char *)page->freelist + freelist_size;
413
414 return status[idx];
415}
416
417#else
418static inline void set_obj_status(struct page *page, int idx, int val) {}
419
420#endif
421
389/* 422/*
390 * Do not go above this order unless 0 objects fit into the slab or 423 * Do not go above this order unless 0 objects fit into the slab or
391 * overridden on the command line. 424 * overridden on the command line.
@@ -576,12 +609,30 @@ static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
576 return cachep->array[smp_processor_id()]; 609 return cachep->array[smp_processor_id()];
577} 610}
578 611
612static size_t calculate_freelist_size(int nr_objs, size_t align)
613{
614 size_t freelist_size;
615
616 freelist_size = nr_objs * sizeof(freelist_idx_t);
617 if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
618 freelist_size += nr_objs * sizeof(char);
619
620 if (align)
621 freelist_size = ALIGN(freelist_size, align);
622
623 return freelist_size;
624}
625
579static int calculate_nr_objs(size_t slab_size, size_t buffer_size, 626static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
580 size_t idx_size, size_t align) 627 size_t idx_size, size_t align)
581{ 628{
582 int nr_objs; 629 int nr_objs;
630 size_t remained_size;
583 size_t freelist_size; 631 size_t freelist_size;
632 int extra_space = 0;
584 633
634 if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
635 extra_space = sizeof(char);
585 /* 636 /*
586 * Ignore padding for the initial guess. The padding 637 * Ignore padding for the initial guess. The padding
587 * is at most @align-1 bytes, and @buffer_size is at 638 * is at most @align-1 bytes, and @buffer_size is at
@@ -590,14 +641,15 @@ static int calculate_nr_objs(size_t slab_size, size_t buffer_size,
590 * into the memory allocation when taking the padding 641 * into the memory allocation when taking the padding
591 * into account. 642 * into account.
592 */ 643 */
593 nr_objs = slab_size / (buffer_size + idx_size); 644 nr_objs = slab_size / (buffer_size + idx_size + extra_space);
594 645
595 /* 646 /*
596 * This calculated number will be either the right 647 * This calculated number will be either the right
597 * amount, or one greater than what we want. 648 * amount, or one greater than what we want.
598 */ 649 */
599 freelist_size = slab_size - nr_objs * buffer_size; 650 remained_size = slab_size - nr_objs * buffer_size;
600 if (freelist_size < ALIGN(nr_objs * idx_size, align)) 651 freelist_size = calculate_freelist_size(nr_objs, align);
652 if (remained_size < freelist_size)
601 nr_objs--; 653 nr_objs--;
602 654
603 return nr_objs; 655 return nr_objs;
@@ -635,7 +687,7 @@ static void cache_estimate(unsigned long gfporder, size_t buffer_size,
635 } else { 687 } else {
636 nr_objs = calculate_nr_objs(slab_size, buffer_size, 688 nr_objs = calculate_nr_objs(slab_size, buffer_size,
637 sizeof(freelist_idx_t), align); 689 sizeof(freelist_idx_t), align);
638 mgmt_size = ALIGN(nr_objs * sizeof(freelist_idx_t), align); 690 mgmt_size = calculate_freelist_size(nr_objs, align);
639 } 691 }
640 *num = nr_objs; 692 *num = nr_objs;
641 *left_over = slab_size - nr_objs*buffer_size - mgmt_size; 693 *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
@@ -2041,13 +2093,16 @@ static size_t calculate_slab_order(struct kmem_cache *cachep,
2041 break; 2093 break;
2042 2094
2043 if (flags & CFLGS_OFF_SLAB) { 2095 if (flags & CFLGS_OFF_SLAB) {
2096 size_t freelist_size_per_obj = sizeof(freelist_idx_t);
2044 /* 2097 /*
2045 * Max number of objs-per-slab for caches which 2098 * Max number of objs-per-slab for caches which
2046 * use off-slab slabs. Needed to avoid a possible 2099 * use off-slab slabs. Needed to avoid a possible
2047 * looping condition in cache_grow(). 2100 * looping condition in cache_grow().
2048 */ 2101 */
2102 if (IS_ENABLED(CONFIG_DEBUG_SLAB_LEAK))
2103 freelist_size_per_obj += sizeof(char);
2049 offslab_limit = size; 2104 offslab_limit = size;
2050 offslab_limit /= sizeof(freelist_idx_t); 2105 offslab_limit /= freelist_size_per_obj;
2051 2106
2052 if (num > offslab_limit) 2107 if (num > offslab_limit)
2053 break; 2108 break;
@@ -2294,8 +2349,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2294 if (!cachep->num) 2349 if (!cachep->num)
2295 return -E2BIG; 2350 return -E2BIG;
2296 2351
2297 freelist_size = 2352 freelist_size = calculate_freelist_size(cachep->num, cachep->align);
2298 ALIGN(cachep->num * sizeof(freelist_idx_t), cachep->align);
2299 2353
2300 /* 2354 /*
2301 * If the slab has been placed off-slab, and we have enough space then 2355 * If the slab has been placed off-slab, and we have enough space then
@@ -2308,7 +2362,7 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
2308 2362
2309 if (flags & CFLGS_OFF_SLAB) { 2363 if (flags & CFLGS_OFF_SLAB) {
2310 /* really off slab. No need for manual alignment */ 2364 /* really off slab. No need for manual alignment */
2311 freelist_size = cachep->num * sizeof(freelist_idx_t); 2365 freelist_size = calculate_freelist_size(cachep->num, 0);
2312 2366
2313#ifdef CONFIG_PAGE_POISONING 2367#ifdef CONFIG_PAGE_POISONING
2314 /* If we're going to use the generic kernel_map_pages() 2368 /* If we're going to use the generic kernel_map_pages()
@@ -2612,6 +2666,7 @@ static void cache_init_objs(struct kmem_cache *cachep,
2612 if (cachep->ctor) 2666 if (cachep->ctor)
2613 cachep->ctor(objp); 2667 cachep->ctor(objp);
2614#endif 2668#endif
2669 set_obj_status(page, i, OBJECT_FREE);
2615 set_free_obj(page, i, i); 2670 set_free_obj(page, i, i);
2616 } 2671 }
2617} 2672}
@@ -2820,6 +2875,7 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2820 BUG_ON(objnr >= cachep->num); 2875 BUG_ON(objnr >= cachep->num);
2821 BUG_ON(objp != index_to_obj(cachep, page, objnr)); 2876 BUG_ON(objp != index_to_obj(cachep, page, objnr));
2822 2877
2878 set_obj_status(page, objnr, OBJECT_FREE);
2823 if (cachep->flags & SLAB_POISON) { 2879 if (cachep->flags & SLAB_POISON) {
2824#ifdef CONFIG_DEBUG_PAGEALLOC 2880#ifdef CONFIG_DEBUG_PAGEALLOC
2825 if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) { 2881 if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
@@ -2953,6 +3009,8 @@ static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
2953static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep, 3009static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2954 gfp_t flags, void *objp, unsigned long caller) 3010 gfp_t flags, void *objp, unsigned long caller)
2955{ 3011{
3012 struct page *page;
3013
2956 if (!objp) 3014 if (!objp)
2957 return objp; 3015 return objp;
2958 if (cachep->flags & SLAB_POISON) { 3016 if (cachep->flags & SLAB_POISON) {
@@ -2983,6 +3041,9 @@ static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
2983 *dbg_redzone1(cachep, objp) = RED_ACTIVE; 3041 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
2984 *dbg_redzone2(cachep, objp) = RED_ACTIVE; 3042 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
2985 } 3043 }
3044
3045 page = virt_to_head_page(objp);
3046 set_obj_status(page, obj_to_index(cachep, page, objp), OBJECT_ACTIVE);
2986 objp += obj_offset(cachep); 3047 objp += obj_offset(cachep);
2987 if (cachep->ctor && cachep->flags & SLAB_POISON) 3048 if (cachep->ctor && cachep->flags & SLAB_POISON)
2988 cachep->ctor(objp); 3049 cachep->ctor(objp);
@@ -4219,21 +4280,12 @@ static void handle_slab(unsigned long *n, struct kmem_cache *c,
4219 struct page *page) 4280 struct page *page)
4220{ 4281{
4221 void *p; 4282 void *p;
4222 int i, j; 4283 int i;
4223 4284
4224 if (n[0] == n[1]) 4285 if (n[0] == n[1])
4225 return; 4286 return;
4226 for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) { 4287 for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
4227 bool active = true; 4288 if (get_obj_status(page, i) != OBJECT_ACTIVE)
4228
4229 for (j = page->active; j < c->num; j++) {
4230 /* Skip freed item */
4231 if (get_free_obj(page, j) == i) {
4232 active = false;
4233 break;
4234 }
4235 }
4236 if (!active)
4237 continue; 4289 continue;
4238 4290
4239 if (!add_caller(n, (unsigned long)*dbg_userword(c, p))) 4291 if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
diff --git a/mm/slub.c b/mm/slub.c
index b2b047327d76..73004808537e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1881,7 +1881,7 @@ redo:
1881 1881
1882 new.frozen = 0; 1882 new.frozen = 0;
1883 1883
1884 if (!new.inuse && n->nr_partial > s->min_partial) 1884 if (!new.inuse && n->nr_partial >= s->min_partial)
1885 m = M_FREE; 1885 m = M_FREE;
1886 else if (new.freelist) { 1886 else if (new.freelist) {
1887 m = M_PARTIAL; 1887 m = M_PARTIAL;
@@ -1992,7 +1992,7 @@ static void unfreeze_partials(struct kmem_cache *s,
1992 new.freelist, new.counters, 1992 new.freelist, new.counters,
1993 "unfreezing slab")); 1993 "unfreezing slab"));
1994 1994
1995 if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) { 1995 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
1996 page->next = discard_page; 1996 page->next = discard_page;
1997 discard_page = page; 1997 discard_page = page;
1998 } else { 1998 } else {
@@ -2620,7 +2620,7 @@ static void __slab_free(struct kmem_cache *s, struct page *page,
2620 return; 2620 return;
2621 } 2621 }
2622 2622
2623 if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) 2623 if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
2624 goto slab_empty; 2624 goto slab_empty;
2625 2625
2626 /* 2626 /*
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 9012b1c922b6..75d427763992 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -114,8 +114,11 @@ EXPORT_SYMBOL(vlan_dev_vlan_proto);
114 114
115static struct sk_buff *vlan_reorder_header(struct sk_buff *skb) 115static struct sk_buff *vlan_reorder_header(struct sk_buff *skb)
116{ 116{
117 if (skb_cow(skb, skb_headroom(skb)) < 0) 117 if (skb_cow(skb, skb_headroom(skb)) < 0) {
118 kfree_skb(skb);
118 return NULL; 119 return NULL;
120 }
121
119 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN); 122 memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
120 skb->mac_header += VLAN_HLEN; 123 skb->mac_header += VLAN_HLEN;
121 return skb; 124 return skb;
diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c
index ad2ac3c00398..dd11f612e03e 100644
--- a/net/8021q/vlan_dev.c
+++ b/net/8021q/vlan_dev.c
@@ -627,8 +627,6 @@ static void vlan_dev_uninit(struct net_device *dev)
627 struct vlan_dev_priv *vlan = vlan_dev_priv(dev); 627 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
628 int i; 628 int i;
629 629
630 free_percpu(vlan->vlan_pcpu_stats);
631 vlan->vlan_pcpu_stats = NULL;
632 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { 630 for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
633 while ((pm = vlan->egress_priority_map[i]) != NULL) { 631 while ((pm = vlan->egress_priority_map[i]) != NULL) {
634 vlan->egress_priority_map[i] = pm->next; 632 vlan->egress_priority_map[i] = pm->next;
@@ -785,6 +783,15 @@ static const struct net_device_ops vlan_netdev_ops = {
785 .ndo_get_lock_subclass = vlan_dev_get_lock_subclass, 783 .ndo_get_lock_subclass = vlan_dev_get_lock_subclass,
786}; 784};
787 785
786static void vlan_dev_free(struct net_device *dev)
787{
788 struct vlan_dev_priv *vlan = vlan_dev_priv(dev);
789
790 free_percpu(vlan->vlan_pcpu_stats);
791 vlan->vlan_pcpu_stats = NULL;
792 free_netdev(dev);
793}
794
788void vlan_setup(struct net_device *dev) 795void vlan_setup(struct net_device *dev)
789{ 796{
790 ether_setup(dev); 797 ether_setup(dev);
@@ -794,7 +801,7 @@ void vlan_setup(struct net_device *dev)
794 dev->tx_queue_len = 0; 801 dev->tx_queue_len = 0;
795 802
796 dev->netdev_ops = &vlan_netdev_ops; 803 dev->netdev_ops = &vlan_netdev_ops;
797 dev->destructor = free_netdev; 804 dev->destructor = vlan_dev_free;
798 dev->ethtool_ops = &vlan_ethtool_ops; 805 dev->ethtool_ops = &vlan_ethtool_ops;
799 806
800 memset(dev->broadcast, 0, ETH_ALEN); 807 memset(dev->broadcast, 0, ETH_ALEN);
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 01a1082e02b3..bfcf6be1d665 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1489,8 +1489,6 @@ static int atalk_rcv(struct sk_buff *skb, struct net_device *dev,
1489 goto drop; 1489 goto drop;
1490 1490
1491 /* Queue packet (standard) */ 1491 /* Queue packet (standard) */
1492 skb->sk = sock;
1493
1494 if (sock_queue_rcv_skb(sock, skb) < 0) 1492 if (sock_queue_rcv_skb(sock, skb) < 0)
1495 goto drop; 1493 goto drop;
1496 1494
@@ -1644,7 +1642,6 @@ static int atalk_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr
1644 if (!skb) 1642 if (!skb)
1645 goto out; 1643 goto out;
1646 1644
1647 skb->sk = sk;
1648 skb_reserve(skb, ddp_dl->header_length); 1645 skb_reserve(skb, ddp_dl->header_length);
1649 skb_reserve(skb, dev->hard_header_len); 1646 skb_reserve(skb, dev->hard_header_len);
1650 skb->dev = dev; 1647 skb->dev = dev;
diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c
index 8671bc79a35b..a7a27bc2c0b1 100644
--- a/net/bluetooth/hci_conn.c
+++ b/net/bluetooth/hci_conn.c
@@ -289,10 +289,20 @@ static void hci_conn_timeout(struct work_struct *work)
289{ 289{
290 struct hci_conn *conn = container_of(work, struct hci_conn, 290 struct hci_conn *conn = container_of(work, struct hci_conn,
291 disc_work.work); 291 disc_work.work);
292 int refcnt = atomic_read(&conn->refcnt);
292 293
293 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state)); 294 BT_DBG("hcon %p state %s", conn, state_to_string(conn->state));
294 295
295 if (atomic_read(&conn->refcnt)) 296 WARN_ON(refcnt < 0);
297
298 /* FIXME: It was observed that in pairing failed scenario, refcnt
299 * drops below 0. Probably this is because l2cap_conn_del calls
300 * l2cap_chan_del for each channel, and inside l2cap_chan_del conn is
301 * dropped. After that loop hci_chan_del is called which also drops
302 * conn. For now make sure that ACL is alive if refcnt is higher then 0,
303 * otherwise drop it.
304 */
305 if (refcnt > 0)
296 return; 306 return;
297 307
298 switch (conn->state) { 308 switch (conn->state) {
@@ -610,11 +620,6 @@ static void hci_req_add_le_create_conn(struct hci_request *req,
610 if (hci_update_random_address(req, false, &own_addr_type)) 620 if (hci_update_random_address(req, false, &own_addr_type))
611 return; 621 return;
612 622
613 /* Save the address type used for this connnection attempt so we able
614 * to retrieve this information if we need it.
615 */
616 conn->src_type = own_addr_type;
617
618 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval); 623 cp.scan_interval = cpu_to_le16(hdev->le_scan_interval);
619 cp.scan_window = cpu_to_le16(hdev->le_scan_window); 624 cp.scan_window = cpu_to_le16(hdev->le_scan_window);
620 bacpy(&cp.peer_addr, &conn->dst); 625 bacpy(&cp.peer_addr, &conn->dst);
@@ -894,7 +899,7 @@ static int hci_conn_auth(struct hci_conn *conn, __u8 sec_level, __u8 auth_type)
894 /* If we're already encrypted set the REAUTH_PEND flag, 899 /* If we're already encrypted set the REAUTH_PEND flag,
895 * otherwise set the ENCRYPT_PEND. 900 * otherwise set the ENCRYPT_PEND.
896 */ 901 */
897 if (conn->key_type != 0xff) 902 if (conn->link_mode & HCI_LM_ENCRYPT)
898 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags); 903 set_bit(HCI_CONN_REAUTH_PEND, &conn->flags);
899 else 904 else
900 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags); 905 set_bit(HCI_CONN_ENCRYPT_PEND, &conn->flags);
diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c
index 21e5913d12e0..640c54ec1bd2 100644
--- a/net/bluetooth/hci_event.c
+++ b/net/bluetooth/hci_event.c
@@ -48,6 +48,10 @@ static void hci_cc_inquiry_cancel(struct hci_dev *hdev, struct sk_buff *skb)
48 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */ 48 smp_mb__after_atomic(); /* wake_up_bit advises about this barrier */
49 wake_up_bit(&hdev->flags, HCI_INQUIRY); 49 wake_up_bit(&hdev->flags, HCI_INQUIRY);
50 50
51 hci_dev_lock(hdev);
52 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
53 hci_dev_unlock(hdev);
54
51 hci_conn_check_pending(hdev); 55 hci_conn_check_pending(hdev);
52} 56}
53 57
@@ -3537,7 +3541,11 @@ static void hci_io_capa_request_evt(struct hci_dev *hdev, struct sk_buff *skb)
3537 cp.authentication = conn->auth_type; 3541 cp.authentication = conn->auth_type;
3538 3542
3539 /* Request MITM protection if our IO caps allow it 3543 /* Request MITM protection if our IO caps allow it
3540 * except for the no-bonding case 3544 * except for the no-bonding case.
3545 * conn->auth_type is not updated here since
3546 * that might cause the user confirmation to be
3547 * rejected in case the remote doesn't have the
3548 * IO capabilities for MITM.
3541 */ 3549 */
3542 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT && 3550 if (conn->io_capability != HCI_IO_NO_INPUT_OUTPUT &&
3543 cp.authentication != HCI_AT_NO_BONDING) 3551 cp.authentication != HCI_AT_NO_BONDING)
@@ -3628,8 +3636,11 @@ static void hci_user_confirm_request_evt(struct hci_dev *hdev,
3628 3636
3629 /* If we're not the initiators request authorization to 3637 /* If we're not the initiators request authorization to
3630 * proceed from user space (mgmt_user_confirm with 3638 * proceed from user space (mgmt_user_confirm with
3631 * confirm_hint set to 1). */ 3639 * confirm_hint set to 1). The exception is if neither
3632 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags)) { 3640 * side had MITM in which case we do auto-accept.
3641 */
3642 if (!test_bit(HCI_CONN_AUTH_PEND, &conn->flags) &&
3643 (loc_mitm || rem_mitm)) {
3633 BT_DBG("Confirming auto-accept as acceptor"); 3644 BT_DBG("Confirming auto-accept as acceptor");
3634 confirm_hint = 1; 3645 confirm_hint = 1;
3635 goto confirm; 3646 goto confirm;
diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
index 6eabbe05fe54..323f23cd2c37 100644
--- a/net/bluetooth/l2cap_core.c
+++ b/net/bluetooth/l2cap_core.c
@@ -1663,7 +1663,13 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err)
1663 kfree_skb(conn->rx_skb); 1663 kfree_skb(conn->rx_skb);
1664 1664
1665 skb_queue_purge(&conn->pending_rx); 1665 skb_queue_purge(&conn->pending_rx);
1666 flush_work(&conn->pending_rx_work); 1666
1667 /* We can not call flush_work(&conn->pending_rx_work) here since we
1668 * might block if we are running on a worker from the same workqueue
1669 * pending_rx_work is waiting on.
1670 */
1671 if (work_pending(&conn->pending_rx_work))
1672 cancel_work_sync(&conn->pending_rx_work);
1667 1673
1668 l2cap_unregister_all_users(conn); 1674 l2cap_unregister_all_users(conn);
1669 1675
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index ade3fb4c23bc..e1378693cc90 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -787,11 +787,6 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
787 787
788 /*change security for LE channels */ 788 /*change security for LE channels */
789 if (chan->scid == L2CAP_CID_ATT) { 789 if (chan->scid == L2CAP_CID_ATT) {
790 if (!conn->hcon->out) {
791 err = -EINVAL;
792 break;
793 }
794
795 if (smp_conn_security(conn->hcon, sec.level)) 790 if (smp_conn_security(conn->hcon, sec.level))
796 break; 791 break;
797 sk->sk_state = BT_CONFIG; 792 sk->sk_state = BT_CONFIG;
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 0fce54412ffd..af8e0a6243b7 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -1047,6 +1047,43 @@ static void clean_up_hci_complete(struct hci_dev *hdev, u8 status)
1047 } 1047 }
1048} 1048}
1049 1049
1050static void hci_stop_discovery(struct hci_request *req)
1051{
1052 struct hci_dev *hdev = req->hdev;
1053 struct hci_cp_remote_name_req_cancel cp;
1054 struct inquiry_entry *e;
1055
1056 switch (hdev->discovery.state) {
1057 case DISCOVERY_FINDING:
1058 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
1059 hci_req_add(req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
1060 } else {
1061 cancel_delayed_work(&hdev->le_scan_disable);
1062 hci_req_add_le_scan_disable(req);
1063 }
1064
1065 break;
1066
1067 case DISCOVERY_RESOLVING:
1068 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY,
1069 NAME_PENDING);
1070 if (!e)
1071 return;
1072
1073 bacpy(&cp.bdaddr, &e->data.bdaddr);
1074 hci_req_add(req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
1075 &cp);
1076
1077 break;
1078
1079 default:
1080 /* Passive scanning */
1081 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1082 hci_req_add_le_scan_disable(req);
1083 break;
1084 }
1085}
1086
1050static int clean_up_hci_state(struct hci_dev *hdev) 1087static int clean_up_hci_state(struct hci_dev *hdev)
1051{ 1088{
1052 struct hci_request req; 1089 struct hci_request req;
@@ -1063,9 +1100,7 @@ static int clean_up_hci_state(struct hci_dev *hdev)
1063 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags)) 1100 if (test_bit(HCI_ADVERTISING, &hdev->dev_flags))
1064 disable_advertising(&req); 1101 disable_advertising(&req);
1065 1102
1066 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags)) { 1103 hci_stop_discovery(&req);
1067 hci_req_add_le_scan_disable(&req);
1068 }
1069 1104
1070 list_for_each_entry(conn, &hdev->conn_hash.list, list) { 1105 list_for_each_entry(conn, &hdev->conn_hash.list, list) {
1071 struct hci_cp_disconnect dc; 1106 struct hci_cp_disconnect dc;
@@ -2996,8 +3031,13 @@ static int user_pairing_resp(struct sock *sk, struct hci_dev *hdev,
2996 } 3031 }
2997 3032
2998 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) { 3033 if (addr->type == BDADDR_LE_PUBLIC || addr->type == BDADDR_LE_RANDOM) {
2999 /* Continue with pairing via SMP */ 3034 /* Continue with pairing via SMP. The hdev lock must be
3035 * released as SMP may try to recquire it for crypto
3036 * purposes.
3037 */
3038 hci_dev_unlock(hdev);
3000 err = smp_user_confirm_reply(conn, mgmt_op, passkey); 3039 err = smp_user_confirm_reply(conn, mgmt_op, passkey);
3040 hci_dev_lock(hdev);
3001 3041
3002 if (!err) 3042 if (!err)
3003 err = cmd_complete(sk, hdev->id, mgmt_op, 3043 err = cmd_complete(sk, hdev->id, mgmt_op,
@@ -3574,8 +3614,6 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3574{ 3614{
3575 struct mgmt_cp_stop_discovery *mgmt_cp = data; 3615 struct mgmt_cp_stop_discovery *mgmt_cp = data;
3576 struct pending_cmd *cmd; 3616 struct pending_cmd *cmd;
3577 struct hci_cp_remote_name_req_cancel cp;
3578 struct inquiry_entry *e;
3579 struct hci_request req; 3617 struct hci_request req;
3580 int err; 3618 int err;
3581 3619
@@ -3605,52 +3643,22 @@ static int stop_discovery(struct sock *sk, struct hci_dev *hdev, void *data,
3605 3643
3606 hci_req_init(&req, hdev); 3644 hci_req_init(&req, hdev);
3607 3645
3608 switch (hdev->discovery.state) { 3646 hci_stop_discovery(&req);
3609 case DISCOVERY_FINDING:
3610 if (test_bit(HCI_INQUIRY, &hdev->flags)) {
3611 hci_req_add(&req, HCI_OP_INQUIRY_CANCEL, 0, NULL);
3612 } else {
3613 cancel_delayed_work(&hdev->le_scan_disable);
3614
3615 hci_req_add_le_scan_disable(&req);
3616 }
3617
3618 break;
3619 3647
3620 case DISCOVERY_RESOLVING: 3648 err = hci_req_run(&req, stop_discovery_complete);
3621 e = hci_inquiry_cache_lookup_resolve(hdev, BDADDR_ANY, 3649 if (!err) {
3622 NAME_PENDING); 3650 hci_discovery_set_state(hdev, DISCOVERY_STOPPING);
3623 if (!e) {
3624 mgmt_pending_remove(cmd);
3625 err = cmd_complete(sk, hdev->id,
3626 MGMT_OP_STOP_DISCOVERY, 0,
3627 &mgmt_cp->type,
3628 sizeof(mgmt_cp->type));
3629 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3630 goto unlock;
3631 }
3632
3633 bacpy(&cp.bdaddr, &e->data.bdaddr);
3634 hci_req_add(&req, HCI_OP_REMOTE_NAME_REQ_CANCEL, sizeof(cp),
3635 &cp);
3636
3637 break;
3638
3639 default:
3640 BT_DBG("unknown discovery state %u", hdev->discovery.state);
3641
3642 mgmt_pending_remove(cmd);
3643 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY,
3644 MGMT_STATUS_FAILED, &mgmt_cp->type,
3645 sizeof(mgmt_cp->type));
3646 goto unlock; 3651 goto unlock;
3647 } 3652 }
3648 3653
3649 err = hci_req_run(&req, stop_discovery_complete); 3654 mgmt_pending_remove(cmd);
3650 if (err < 0) 3655
3651 mgmt_pending_remove(cmd); 3656 /* If no HCI commands were sent we're done */
3652 else 3657 if (err == -ENODATA) {
3653 hci_discovery_set_state(hdev, DISCOVERY_STOPPING); 3658 err = cmd_complete(sk, hdev->id, MGMT_OP_STOP_DISCOVERY, 0,
3659 &mgmt_cp->type, sizeof(mgmt_cp->type));
3660 hci_discovery_set_state(hdev, DISCOVERY_STOPPED);
3661 }
3654 3662
3655unlock: 3663unlock:
3656 hci_dev_unlock(hdev); 3664 hci_dev_unlock(hdev);
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 3d1cc164557d..e33a982161c1 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -385,6 +385,16 @@ static const u8 gen_method[5][5] = {
385 { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP }, 385 { CFM_PASSKEY, CFM_PASSKEY, REQ_PASSKEY, JUST_WORKS, OVERLAP },
386}; 386};
387 387
388static u8 get_auth_method(struct smp_chan *smp, u8 local_io, u8 remote_io)
389{
390 /* If either side has unknown io_caps, use JUST WORKS */
391 if (local_io > SMP_IO_KEYBOARD_DISPLAY ||
392 remote_io > SMP_IO_KEYBOARD_DISPLAY)
393 return JUST_WORKS;
394
395 return gen_method[remote_io][local_io];
396}
397
388static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth, 398static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
389 u8 local_io, u8 remote_io) 399 u8 local_io, u8 remote_io)
390{ 400{
@@ -401,14 +411,11 @@ static int tk_request(struct l2cap_conn *conn, u8 remote_oob, u8 auth,
401 BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io); 411 BT_DBG("tk_request: auth:%d lcl:%d rem:%d", auth, local_io, remote_io);
402 412
403 /* If neither side wants MITM, use JUST WORKS */ 413 /* If neither side wants MITM, use JUST WORKS */
404 /* If either side has unknown io_caps, use JUST WORKS */
405 /* Otherwise, look up method from the table */ 414 /* Otherwise, look up method from the table */
406 if (!(auth & SMP_AUTH_MITM) || 415 if (!(auth & SMP_AUTH_MITM))
407 local_io > SMP_IO_KEYBOARD_DISPLAY ||
408 remote_io > SMP_IO_KEYBOARD_DISPLAY)
409 method = JUST_WORKS; 416 method = JUST_WORKS;
410 else 417 else
411 method = gen_method[remote_io][local_io]; 418 method = get_auth_method(smp, local_io, remote_io);
412 419
413 /* If not bonding, don't ask user to confirm a Zero TK */ 420 /* If not bonding, don't ask user to confirm a Zero TK */
414 if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM) 421 if (!(auth & SMP_AUTH_BONDING) && method == JUST_CFM)
@@ -544,7 +551,7 @@ static u8 smp_random(struct smp_chan *smp)
544 hci_le_start_enc(hcon, ediv, rand, stk); 551 hci_le_start_enc(hcon, ediv, rand, stk);
545 hcon->enc_key_size = smp->enc_key_size; 552 hcon->enc_key_size = smp->enc_key_size;
546 } else { 553 } else {
547 u8 stk[16]; 554 u8 stk[16], auth;
548 __le64 rand = 0; 555 __le64 rand = 0;
549 __le16 ediv = 0; 556 __le16 ediv = 0;
550 557
@@ -556,8 +563,13 @@ static u8 smp_random(struct smp_chan *smp)
556 memset(stk + smp->enc_key_size, 0, 563 memset(stk + smp->enc_key_size, 0,
557 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size); 564 SMP_MAX_ENC_KEY_SIZE - smp->enc_key_size);
558 565
566 if (hcon->pending_sec_level == BT_SECURITY_HIGH)
567 auth = 1;
568 else
569 auth = 0;
570
559 hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type, 571 hci_add_ltk(hcon->hdev, &hcon->dst, hcon->dst_type,
560 HCI_SMP_STK_SLAVE, 0, stk, smp->enc_key_size, 572 HCI_SMP_STK_SLAVE, auth, stk, smp->enc_key_size,
561 ediv, rand); 573 ediv, rand);
562 } 574 }
563 575
@@ -664,7 +676,7 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
664{ 676{
665 struct smp_cmd_pairing rsp, *req = (void *) skb->data; 677 struct smp_cmd_pairing rsp, *req = (void *) skb->data;
666 struct smp_chan *smp; 678 struct smp_chan *smp;
667 u8 key_size, auth; 679 u8 key_size, auth, sec_level;
668 int ret; 680 int ret;
669 681
670 BT_DBG("conn %p", conn); 682 BT_DBG("conn %p", conn);
@@ -690,7 +702,19 @@ static u8 smp_cmd_pairing_req(struct l2cap_conn *conn, struct sk_buff *skb)
690 /* We didn't start the pairing, so match remote */ 702 /* We didn't start the pairing, so match remote */
691 auth = req->auth_req; 703 auth = req->auth_req;
692 704
693 conn->hcon->pending_sec_level = authreq_to_seclevel(auth); 705 sec_level = authreq_to_seclevel(auth);
706 if (sec_level > conn->hcon->pending_sec_level)
707 conn->hcon->pending_sec_level = sec_level;
708
709 /* If we need MITM check that it can be acheived */
710 if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
711 u8 method;
712
713 method = get_auth_method(smp, conn->hcon->io_capability,
714 req->io_capability);
715 if (method == JUST_WORKS || method == JUST_CFM)
716 return SMP_AUTH_REQUIREMENTS;
717 }
694 718
695 build_pairing_cmd(conn, req, &rsp, auth); 719 build_pairing_cmd(conn, req, &rsp, auth);
696 720
@@ -738,6 +762,16 @@ static u8 smp_cmd_pairing_rsp(struct l2cap_conn *conn, struct sk_buff *skb)
738 if (check_enc_key_size(conn, key_size)) 762 if (check_enc_key_size(conn, key_size))
739 return SMP_ENC_KEY_SIZE; 763 return SMP_ENC_KEY_SIZE;
740 764
765 /* If we need MITM check that it can be acheived */
766 if (conn->hcon->pending_sec_level >= BT_SECURITY_HIGH) {
767 u8 method;
768
769 method = get_auth_method(smp, req->io_capability,
770 rsp->io_capability);
771 if (method == JUST_WORKS || method == JUST_CFM)
772 return SMP_AUTH_REQUIREMENTS;
773 }
774
741 get_random_bytes(smp->prnd, sizeof(smp->prnd)); 775 get_random_bytes(smp->prnd, sizeof(smp->prnd));
742 776
743 smp->prsp[0] = SMP_CMD_PAIRING_RSP; 777 smp->prsp[0] = SMP_CMD_PAIRING_RSP;
@@ -833,6 +867,7 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
833 struct smp_cmd_pairing cp; 867 struct smp_cmd_pairing cp;
834 struct hci_conn *hcon = conn->hcon; 868 struct hci_conn *hcon = conn->hcon;
835 struct smp_chan *smp; 869 struct smp_chan *smp;
870 u8 sec_level;
836 871
837 BT_DBG("conn %p", conn); 872 BT_DBG("conn %p", conn);
838 873
@@ -842,7 +877,9 @@ static u8 smp_cmd_security_req(struct l2cap_conn *conn, struct sk_buff *skb)
842 if (!(conn->hcon->link_mode & HCI_LM_MASTER)) 877 if (!(conn->hcon->link_mode & HCI_LM_MASTER))
843 return SMP_CMD_NOTSUPP; 878 return SMP_CMD_NOTSUPP;
844 879
845 hcon->pending_sec_level = authreq_to_seclevel(rp->auth_req); 880 sec_level = authreq_to_seclevel(rp->auth_req);
881 if (sec_level > hcon->pending_sec_level)
882 hcon->pending_sec_level = sec_level;
846 883
847 if (smp_ltk_encrypt(conn, hcon->pending_sec_level)) 884 if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
848 return 0; 885 return 0;
@@ -896,9 +933,12 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
896 if (smp_sufficient_security(hcon, sec_level)) 933 if (smp_sufficient_security(hcon, sec_level))
897 return 1; 934 return 1;
898 935
936 if (sec_level > hcon->pending_sec_level)
937 hcon->pending_sec_level = sec_level;
938
899 if (hcon->link_mode & HCI_LM_MASTER) 939 if (hcon->link_mode & HCI_LM_MASTER)
900 if (smp_ltk_encrypt(conn, sec_level)) 940 if (smp_ltk_encrypt(conn, hcon->pending_sec_level))
901 goto done; 941 return 0;
902 942
903 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) 943 if (test_and_set_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags))
904 return 0; 944 return 0;
@@ -913,7 +953,7 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
913 * requires it. 953 * requires it.
914 */ 954 */
915 if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT || 955 if (hcon->io_capability != HCI_IO_NO_INPUT_OUTPUT ||
916 sec_level > BT_SECURITY_MEDIUM) 956 hcon->pending_sec_level > BT_SECURITY_MEDIUM)
917 authreq |= SMP_AUTH_MITM; 957 authreq |= SMP_AUTH_MITM;
918 958
919 if (hcon->link_mode & HCI_LM_MASTER) { 959 if (hcon->link_mode & HCI_LM_MASTER) {
@@ -932,9 +972,6 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
932 972
933 set_bit(SMP_FLAG_INITIATOR, &smp->flags); 973 set_bit(SMP_FLAG_INITIATOR, &smp->flags);
934 974
935done:
936 hcon->pending_sec_level = sec_level;
937
938 return 0; 975 return 0;
939} 976}
940 977
diff --git a/net/core/dev.c b/net/core/dev.c
index 30eedf677913..7990984ca364 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -148,6 +148,9 @@ struct list_head ptype_all __read_mostly; /* Taps */
148static struct list_head offload_base __read_mostly; 148static struct list_head offload_base __read_mostly;
149 149
150static int netif_rx_internal(struct sk_buff *skb); 150static int netif_rx_internal(struct sk_buff *skb);
151static int call_netdevice_notifiers_info(unsigned long val,
152 struct net_device *dev,
153 struct netdev_notifier_info *info);
151 154
152/* 155/*
153 * The @dev_base_head list is protected by @dev_base_lock and the rtnl 156 * The @dev_base_head list is protected by @dev_base_lock and the rtnl
@@ -1207,7 +1210,11 @@ EXPORT_SYMBOL(netdev_features_change);
1207void netdev_state_change(struct net_device *dev) 1210void netdev_state_change(struct net_device *dev)
1208{ 1211{
1209 if (dev->flags & IFF_UP) { 1212 if (dev->flags & IFF_UP) {
1210 call_netdevice_notifiers(NETDEV_CHANGE, dev); 1213 struct netdev_notifier_change_info change_info;
1214
1215 change_info.flags_changed = 0;
1216 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1217 &change_info.info);
1211 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL); 1218 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1212 } 1219 }
1213} 1220}
@@ -4227,9 +4234,8 @@ static int process_backlog(struct napi_struct *napi, int quota)
4227#endif 4234#endif
4228 napi->weight = weight_p; 4235 napi->weight = weight_p;
4229 local_irq_disable(); 4236 local_irq_disable();
4230 while (work < quota) { 4237 while (1) {
4231 struct sk_buff *skb; 4238 struct sk_buff *skb;
4232 unsigned int qlen;
4233 4239
4234 while ((skb = __skb_dequeue(&sd->process_queue))) { 4240 while ((skb = __skb_dequeue(&sd->process_queue))) {
4235 local_irq_enable(); 4241 local_irq_enable();
@@ -4243,24 +4249,24 @@ static int process_backlog(struct napi_struct *napi, int quota)
4243 } 4249 }
4244 4250
4245 rps_lock(sd); 4251 rps_lock(sd);
4246 qlen = skb_queue_len(&sd->input_pkt_queue); 4252 if (skb_queue_empty(&sd->input_pkt_queue)) {
4247 if (qlen)
4248 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4249 &sd->process_queue);
4250
4251 if (qlen < quota - work) {
4252 /* 4253 /*
4253 * Inline a custom version of __napi_complete(). 4254 * Inline a custom version of __napi_complete().
4254 * only current cpu owns and manipulates this napi, 4255 * only current cpu owns and manipulates this napi,
4255 * and NAPI_STATE_SCHED is the only possible flag set on backlog. 4256 * and NAPI_STATE_SCHED is the only possible flag set
4256 * we can use a plain write instead of clear_bit(), 4257 * on backlog.
4258 * We can use a plain write instead of clear_bit(),
4257 * and we dont need an smp_mb() memory barrier. 4259 * and we dont need an smp_mb() memory barrier.
4258 */ 4260 */
4259 list_del(&napi->poll_list); 4261 list_del(&napi->poll_list);
4260 napi->state = 0; 4262 napi->state = 0;
4263 rps_unlock(sd);
4261 4264
4262 quota = work + qlen; 4265 break;
4263 } 4266 }
4267
4268 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4269 &sd->process_queue);
4264 rps_unlock(sd); 4270 rps_unlock(sd);
4265 } 4271 }
4266 local_irq_enable(); 4272 local_irq_enable();
diff --git a/net/core/dst.c b/net/core/dst.c
index 80d6286c8b62..a028409ee438 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -269,6 +269,15 @@ again:
269} 269}
270EXPORT_SYMBOL(dst_destroy); 270EXPORT_SYMBOL(dst_destroy);
271 271
272static void dst_destroy_rcu(struct rcu_head *head)
273{
274 struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
275
276 dst = dst_destroy(dst);
277 if (dst)
278 __dst_free(dst);
279}
280
272void dst_release(struct dst_entry *dst) 281void dst_release(struct dst_entry *dst)
273{ 282{
274 if (dst) { 283 if (dst) {
@@ -276,11 +285,8 @@ void dst_release(struct dst_entry *dst)
276 285
277 newrefcnt = atomic_dec_return(&dst->__refcnt); 286 newrefcnt = atomic_dec_return(&dst->__refcnt);
278 WARN_ON(newrefcnt < 0); 287 WARN_ON(newrefcnt < 0);
279 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) { 288 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
280 dst = dst_destroy(dst); 289 call_rcu(&dst->rcu_head, dst_destroy_rcu);
281 if (dst)
282 __dst_free(dst);
283 }
284 } 290 }
285} 291}
286EXPORT_SYMBOL(dst_release); 292EXPORT_SYMBOL(dst_release);
diff --git a/net/core/filter.c b/net/core/filter.c
index 735fad897496..1dbf6462f766 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -840,11 +840,11 @@ int sk_convert_filter(struct sock_filter *prog, int len,
840 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK); 840 BUILD_BUG_ON(BPF_MEMWORDS * sizeof(u32) > MAX_BPF_STACK);
841 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG); 841 BUILD_BUG_ON(BPF_REG_FP + 1 != MAX_BPF_REG);
842 842
843 if (len <= 0 || len >= BPF_MAXINSNS) 843 if (len <= 0 || len > BPF_MAXINSNS)
844 return -EINVAL; 844 return -EINVAL;
845 845
846 if (new_prog) { 846 if (new_prog) {
847 addrs = kzalloc(len * sizeof(*addrs), GFP_KERNEL); 847 addrs = kcalloc(len, sizeof(*addrs), GFP_KERNEL);
848 if (!addrs) 848 if (!addrs)
849 return -ENOMEM; 849 return -ENOMEM;
850 } 850 }
@@ -1101,7 +1101,7 @@ static int check_load_and_stores(struct sock_filter *filter, int flen)
1101 1101
1102 BUILD_BUG_ON(BPF_MEMWORDS > 16); 1102 BUILD_BUG_ON(BPF_MEMWORDS > 16);
1103 1103
1104 masks = kmalloc(flen * sizeof(*masks), GFP_KERNEL); 1104 masks = kmalloc_array(flen, sizeof(*masks), GFP_KERNEL);
1105 if (!masks) 1105 if (!masks)
1106 return -ENOMEM; 1106 return -ENOMEM;
1107 1107
@@ -1382,7 +1382,7 @@ static struct sk_filter *__sk_migrate_realloc(struct sk_filter *fp,
1382 fp_new = sock_kmalloc(sk, len, GFP_KERNEL); 1382 fp_new = sock_kmalloc(sk, len, GFP_KERNEL);
1383 if (fp_new) { 1383 if (fp_new) {
1384 *fp_new = *fp; 1384 *fp_new = *fp;
1385 /* As we're kepping orig_prog in fp_new along, 1385 /* As we're keeping orig_prog in fp_new along,
1386 * we need to make sure we're not evicting it 1386 * we need to make sure we're not evicting it
1387 * from the old fp. 1387 * from the old fp.
1388 */ 1388 */
@@ -1524,8 +1524,8 @@ static struct sk_filter *__sk_prepare_filter(struct sk_filter *fp,
1524 1524
1525/** 1525/**
1526 * sk_unattached_filter_create - create an unattached filter 1526 * sk_unattached_filter_create - create an unattached filter
1527 * @fprog: the filter program
1528 * @pfp: the unattached filter that is created 1527 * @pfp: the unattached filter that is created
1528 * @fprog: the filter program
1529 * 1529 *
1530 * Create a filter independent of any socket. We first run some 1530 * Create a filter independent of any socket. We first run some
1531 * sanity checks on it to make sure it does not explode on us later. 1531 * sanity checks on it to make sure it does not explode on us later.
diff --git a/net/core/iovec.c b/net/core/iovec.c
index b61869429f4c..827dd6beb49c 100644
--- a/net/core/iovec.c
+++ b/net/core/iovec.c
@@ -75,61 +75,6 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
75} 75}
76 76
77/* 77/*
78 * Copy kernel to iovec. Returns -EFAULT on error.
79 */
80
81int memcpy_toiovecend(const struct iovec *iov, unsigned char *kdata,
82 int offset, int len)
83{
84 int copy;
85 for (; len > 0; ++iov) {
86 /* Skip over the finished iovecs */
87 if (unlikely(offset >= iov->iov_len)) {
88 offset -= iov->iov_len;
89 continue;
90 }
91 copy = min_t(unsigned int, iov->iov_len - offset, len);
92 if (copy_to_user(iov->iov_base + offset, kdata, copy))
93 return -EFAULT;
94 offset = 0;
95 kdata += copy;
96 len -= copy;
97 }
98
99 return 0;
100}
101EXPORT_SYMBOL(memcpy_toiovecend);
102
103/*
104 * Copy iovec to kernel. Returns -EFAULT on error.
105 */
106
107int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov,
108 int offset, int len)
109{
110 /* Skip over the finished iovecs */
111 while (offset >= iov->iov_len) {
112 offset -= iov->iov_len;
113 iov++;
114 }
115
116 while (len > 0) {
117 u8 __user *base = iov->iov_base + offset;
118 int copy = min_t(unsigned int, len, iov->iov_len - offset);
119
120 offset = 0;
121 if (copy_from_user(kdata, base, copy))
122 return -EFAULT;
123 len -= copy;
124 kdata += copy;
125 iov++;
126 }
127
128 return 0;
129}
130EXPORT_SYMBOL(memcpy_fromiovecend);
131
132/*
133 * And now for the all-in-one: copy and checksum from a user iovec 78 * And now for the all-in-one: copy and checksum from a user iovec
134 * directly to a datagram 79 * directly to a datagram
135 * Calls to csum_partial but the last must be in 32 bit chunks 80 * Calls to csum_partial but the last must be in 32 bit chunks
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 32d872eec7f5..559890b0f0a2 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -3059,11 +3059,12 @@ int neigh_sysctl_register(struct net_device *dev, struct neigh_parms *p,
3059 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0, 3059 memset(&t->neigh_vars[NEIGH_VAR_GC_INTERVAL], 0,
3060 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL])); 3060 sizeof(t->neigh_vars[NEIGH_VAR_GC_INTERVAL]));
3061 } else { 3061 } else {
3062 struct neigh_table *tbl = p->tbl;
3062 dev_name_source = "default"; 3063 dev_name_source = "default";
3063 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = (int *)(p + 1); 3064 t->neigh_vars[NEIGH_VAR_GC_INTERVAL].data = &tbl->gc_interval;
3064 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = (int *)(p + 1) + 1; 3065 t->neigh_vars[NEIGH_VAR_GC_THRESH1].data = &tbl->gc_thresh1;
3065 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = (int *)(p + 1) + 2; 3066 t->neigh_vars[NEIGH_VAR_GC_THRESH2].data = &tbl->gc_thresh2;
3066 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = (int *)(p + 1) + 3; 3067 t->neigh_vars[NEIGH_VAR_GC_THRESH3].data = &tbl->gc_thresh3;
3067 } 3068 }
3068 3069
3069 if (handler) { 3070 if (handler) {
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 9cd5344fad73..c1a33033cbe2 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -2993,7 +2993,7 @@ struct sk_buff *skb_segment(struct sk_buff *head_skb,
2993 skb_put(nskb, len), 2993 skb_put(nskb, len),
2994 len, 0); 2994 len, 0);
2995 SKB_GSO_CB(nskb)->csum_start = 2995 SKB_GSO_CB(nskb)->csum_start =
2996 skb_headroom(nskb) + offset; 2996 skb_headroom(nskb) + doffset;
2997 continue; 2997 continue;
2998 } 2998 }
2999 2999
diff --git a/net/ipv4/gre_demux.c b/net/ipv4/gre_demux.c
index 4e9619bca732..0485bf7f8f03 100644
--- a/net/ipv4/gre_demux.c
+++ b/net/ipv4/gre_demux.c
@@ -68,6 +68,7 @@ void gre_build_header(struct sk_buff *skb, const struct tnl_ptk_info *tpi,
68 68
69 skb_push(skb, hdr_len); 69 skb_push(skb, hdr_len);
70 70
71 skb_reset_transport_header(skb);
71 greh = (struct gre_base_hdr *)skb->data; 72 greh = (struct gre_base_hdr *)skb->data;
72 greh->flags = tnl_flags_to_gre_flags(tpi->flags); 73 greh->flags = tnl_flags_to_gre_flags(tpi->flags);
73 greh->protocol = tpi->proto; 74 greh->protocol = tpi->proto;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 79c3d947a481..42b7bcf8045b 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -739,8 +739,6 @@ static void icmp_unreach(struct sk_buff *skb)
739 /* fall through */ 739 /* fall through */
740 case 0: 740 case 0:
741 info = ntohs(icmph->un.frag.mtu); 741 info = ntohs(icmph->un.frag.mtu);
742 if (!info)
743 goto out;
744 } 742 }
745 break; 743 break;
746 case ICMP_SR_FAILED: 744 case ICMP_SR_FAILED:
diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c
index 6748d420f714..db710b059bab 100644
--- a/net/ipv4/igmp.c
+++ b/net/ipv4/igmp.c
@@ -1944,6 +1944,10 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1944 1944
1945 rtnl_lock(); 1945 rtnl_lock();
1946 in_dev = ip_mc_find_dev(net, imr); 1946 in_dev = ip_mc_find_dev(net, imr);
1947 if (!in_dev) {
1948 ret = -ENODEV;
1949 goto out;
1950 }
1947 ifindex = imr->imr_ifindex; 1951 ifindex = imr->imr_ifindex;
1948 for (imlp = &inet->mc_list; 1952 for (imlp = &inet->mc_list;
1949 (iml = rtnl_dereference(*imlp)) != NULL; 1953 (iml = rtnl_dereference(*imlp)) != NULL;
@@ -1961,16 +1965,14 @@ int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
1961 1965
1962 *imlp = iml->next_rcu; 1966 *imlp = iml->next_rcu;
1963 1967
1964 if (in_dev) 1968 ip_mc_dec_group(in_dev, group);
1965 ip_mc_dec_group(in_dev, group);
1966 rtnl_unlock(); 1969 rtnl_unlock();
1967 /* decrease mem now to avoid the memleak warning */ 1970 /* decrease mem now to avoid the memleak warning */
1968 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc); 1971 atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
1969 kfree_rcu(iml, rcu); 1972 kfree_rcu(iml, rcu);
1970 return 0; 1973 return 0;
1971 } 1974 }
1972 if (!in_dev) 1975out:
1973 ret = -ENODEV;
1974 rtnl_unlock(); 1976 rtnl_unlock();
1975 return ret; 1977 return ret;
1976} 1978}
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index 097b3e7c1e8f..6f9de61dce5f 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -73,12 +73,7 @@ static void __tunnel_dst_set(struct ip_tunnel_dst *idst,
73{ 73{
74 struct dst_entry *old_dst; 74 struct dst_entry *old_dst;
75 75
76 if (dst) { 76 dst_clone(dst);
77 if (dst->flags & DST_NOCACHE)
78 dst = NULL;
79 else
80 dst_clone(dst);
81 }
82 old_dst = xchg((__force struct dst_entry **)&idst->dst, dst); 77 old_dst = xchg((__force struct dst_entry **)&idst->dst, dst);
83 dst_release(old_dst); 78 dst_release(old_dst);
84} 79}
@@ -108,13 +103,14 @@ static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
108 103
109 rcu_read_lock(); 104 rcu_read_lock();
110 dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst); 105 dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
106 if (dst && !atomic_inc_not_zero(&dst->__refcnt))
107 dst = NULL;
111 if (dst) { 108 if (dst) {
112 if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) { 109 if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
113 rcu_read_unlock();
114 tunnel_dst_reset(t); 110 tunnel_dst_reset(t);
115 return NULL; 111 dst_release(dst);
112 dst = NULL;
116 } 113 }
117 dst_hold(dst);
118 } 114 }
119 rcu_read_unlock(); 115 rcu_read_unlock();
120 return (struct rtable *)dst; 116 return (struct rtable *)dst;
@@ -173,6 +169,7 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
173 169
174 hlist_for_each_entry_rcu(t, head, hash_node) { 170 hlist_for_each_entry_rcu(t, head, hash_node) {
175 if (remote != t->parms.iph.daddr || 171 if (remote != t->parms.iph.daddr ||
172 t->parms.iph.saddr != 0 ||
176 !(t->dev->flags & IFF_UP)) 173 !(t->dev->flags & IFF_UP))
177 continue; 174 continue;
178 175
@@ -189,10 +186,11 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
189 head = &itn->tunnels[hash]; 186 head = &itn->tunnels[hash];
190 187
191 hlist_for_each_entry_rcu(t, head, hash_node) { 188 hlist_for_each_entry_rcu(t, head, hash_node) {
192 if ((local != t->parms.iph.saddr && 189 if ((local != t->parms.iph.saddr || t->parms.iph.daddr != 0) &&
193 (local != t->parms.iph.daddr || 190 (local != t->parms.iph.daddr || !ipv4_is_multicast(local)))
194 !ipv4_is_multicast(local))) || 191 continue;
195 !(t->dev->flags & IFF_UP)) 192
193 if (!(t->dev->flags & IFF_UP))
196 continue; 194 continue;
197 195
198 if (!ip_tunnel_key_match(&t->parms, flags, key)) 196 if (!ip_tunnel_key_match(&t->parms, flags, key))
@@ -209,6 +207,8 @@ struct ip_tunnel *ip_tunnel_lookup(struct ip_tunnel_net *itn,
209 207
210 hlist_for_each_entry_rcu(t, head, hash_node) { 208 hlist_for_each_entry_rcu(t, head, hash_node) {
211 if (t->parms.i_key != key || 209 if (t->parms.i_key != key ||
210 t->parms.iph.saddr != 0 ||
211 t->parms.iph.daddr != 0 ||
212 !(t->dev->flags & IFF_UP)) 212 !(t->dev->flags & IFF_UP))
213 continue; 213 continue;
214 214
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 082239ffe34a..3162ea923ded 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1010,7 +1010,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1010 const struct iphdr *iph = (const struct iphdr *) skb->data; 1010 const struct iphdr *iph = (const struct iphdr *) skb->data;
1011 struct flowi4 fl4; 1011 struct flowi4 fl4;
1012 struct rtable *rt; 1012 struct rtable *rt;
1013 struct dst_entry *dst; 1013 struct dst_entry *odst = NULL;
1014 bool new = false; 1014 bool new = false;
1015 1015
1016 bh_lock_sock(sk); 1016 bh_lock_sock(sk);
@@ -1018,16 +1018,17 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1018 if (!ip_sk_accept_pmtu(sk)) 1018 if (!ip_sk_accept_pmtu(sk))
1019 goto out; 1019 goto out;
1020 1020
1021 rt = (struct rtable *) __sk_dst_get(sk); 1021 odst = sk_dst_get(sk);
1022 1022
1023 if (sock_owned_by_user(sk) || !rt) { 1023 if (sock_owned_by_user(sk) || !odst) {
1024 __ipv4_sk_update_pmtu(skb, sk, mtu); 1024 __ipv4_sk_update_pmtu(skb, sk, mtu);
1025 goto out; 1025 goto out;
1026 } 1026 }
1027 1027
1028 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0); 1028 __build_flow_key(&fl4, sk, iph, 0, 0, 0, 0, 0);
1029 1029
1030 if (!__sk_dst_check(sk, 0)) { 1030 rt = (struct rtable *)odst;
1031 if (odst->obsolete && odst->ops->check(odst, 0) == NULL) {
1031 rt = ip_route_output_flow(sock_net(sk), &fl4, sk); 1032 rt = ip_route_output_flow(sock_net(sk), &fl4, sk);
1032 if (IS_ERR(rt)) 1033 if (IS_ERR(rt))
1033 goto out; 1034 goto out;
@@ -1037,8 +1038,7 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1037 1038
1038 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu); 1039 __ip_rt_update_pmtu((struct rtable *) rt->dst.path, &fl4, mtu);
1039 1040
1040 dst = dst_check(&rt->dst, 0); 1041 if (!dst_check(&rt->dst, 0)) {
1041 if (!dst) {
1042 if (new) 1042 if (new)
1043 dst_release(&rt->dst); 1043 dst_release(&rt->dst);
1044 1044
@@ -1050,10 +1050,11 @@ void ipv4_sk_update_pmtu(struct sk_buff *skb, struct sock *sk, u32 mtu)
1050 } 1050 }
1051 1051
1052 if (new) 1052 if (new)
1053 __sk_dst_set(sk, &rt->dst); 1053 sk_dst_set(sk, &rt->dst);
1054 1054
1055out: 1055out:
1056 bh_unlock_sock(sk); 1056 bh_unlock_sock(sk);
1057 dst_release(odst);
1057} 1058}
1058EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu); 1059EXPORT_SYMBOL_GPL(ipv4_sk_update_pmtu);
1059 1060
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index eb1dde37e678..9d2118e5fbc7 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1108,7 +1108,7 @@ int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1108 if (unlikely(tp->repair)) { 1108 if (unlikely(tp->repair)) {
1109 if (tp->repair_queue == TCP_RECV_QUEUE) { 1109 if (tp->repair_queue == TCP_RECV_QUEUE) {
1110 copied = tcp_send_rcvq(sk, msg, size); 1110 copied = tcp_send_rcvq(sk, msg, size);
1111 goto out; 1111 goto out_nopush;
1112 } 1112 }
1113 1113
1114 err = -EINVAL; 1114 err = -EINVAL;
@@ -1282,6 +1282,7 @@ wait_for_memory:
1282out: 1282out:
1283 if (copied) 1283 if (copied)
1284 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal); 1284 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1285out_nopush:
1285 release_sock(sk); 1286 release_sock(sk);
1286 return copied + copied_syn; 1287 return copied + copied_syn;
1287 1288
diff --git a/net/ipv4/tcp_fastopen.c b/net/ipv4/tcp_fastopen.c
index 62e48cf84e60..9771563ab564 100644
--- a/net/ipv4/tcp_fastopen.c
+++ b/net/ipv4/tcp_fastopen.c
@@ -131,7 +131,7 @@ static bool tcp_fastopen_create_child(struct sock *sk,
131 struct dst_entry *dst, 131 struct dst_entry *dst,
132 struct request_sock *req) 132 struct request_sock *req)
133{ 133{
134 struct tcp_sock *tp = tcp_sk(sk); 134 struct tcp_sock *tp;
135 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; 135 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
136 struct sock *child; 136 struct sock *child;
137 137
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 40661fc1e233..40639c288dc2 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1106,7 +1106,7 @@ static bool tcp_check_dsack(struct sock *sk, const struct sk_buff *ack_skb,
1106 } 1106 }
1107 1107
1108 /* D-SACK for already forgotten data... Do dumb counting. */ 1108 /* D-SACK for already forgotten data... Do dumb counting. */
1109 if (dup_sack && tp->undo_marker && tp->undo_retrans && 1109 if (dup_sack && tp->undo_marker && tp->undo_retrans > 0 &&
1110 !after(end_seq_0, prior_snd_una) && 1110 !after(end_seq_0, prior_snd_una) &&
1111 after(end_seq_0, tp->undo_marker)) 1111 after(end_seq_0, tp->undo_marker))
1112 tp->undo_retrans--; 1112 tp->undo_retrans--;
@@ -1162,7 +1162,7 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb,
1162 unsigned int new_len = (pkt_len / mss) * mss; 1162 unsigned int new_len = (pkt_len / mss) * mss;
1163 if (!in_sack && new_len < pkt_len) { 1163 if (!in_sack && new_len < pkt_len) {
1164 new_len += mss; 1164 new_len += mss;
1165 if (new_len > skb->len) 1165 if (new_len >= skb->len)
1166 return 0; 1166 return 0;
1167 } 1167 }
1168 pkt_len = new_len; 1168 pkt_len = new_len;
@@ -1187,7 +1187,7 @@ static u8 tcp_sacktag_one(struct sock *sk,
1187 1187
1188 /* Account D-SACK for retransmitted packet. */ 1188 /* Account D-SACK for retransmitted packet. */
1189 if (dup_sack && (sacked & TCPCB_RETRANS)) { 1189 if (dup_sack && (sacked & TCPCB_RETRANS)) {
1190 if (tp->undo_marker && tp->undo_retrans && 1190 if (tp->undo_marker && tp->undo_retrans > 0 &&
1191 after(end_seq, tp->undo_marker)) 1191 after(end_seq, tp->undo_marker))
1192 tp->undo_retrans--; 1192 tp->undo_retrans--;
1193 if (sacked & TCPCB_SACKED_ACKED) 1193 if (sacked & TCPCB_SACKED_ACKED)
@@ -1893,7 +1893,7 @@ static void tcp_clear_retrans_partial(struct tcp_sock *tp)
1893 tp->lost_out = 0; 1893 tp->lost_out = 0;
1894 1894
1895 tp->undo_marker = 0; 1895 tp->undo_marker = 0;
1896 tp->undo_retrans = 0; 1896 tp->undo_retrans = -1;
1897} 1897}
1898 1898
1899void tcp_clear_retrans(struct tcp_sock *tp) 1899void tcp_clear_retrans(struct tcp_sock *tp)
@@ -2665,7 +2665,7 @@ static void tcp_enter_recovery(struct sock *sk, bool ece_ack)
2665 2665
2666 tp->prior_ssthresh = 0; 2666 tp->prior_ssthresh = 0;
2667 tp->undo_marker = tp->snd_una; 2667 tp->undo_marker = tp->snd_una;
2668 tp->undo_retrans = tp->retrans_out; 2668 tp->undo_retrans = tp->retrans_out ? : -1;
2669 2669
2670 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) { 2670 if (inet_csk(sk)->icsk_ca_state < TCP_CA_CWR) {
2671 if (!ece_ack) 2671 if (!ece_ack)
diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c
index d92bce0ea24e..179b51e6bda3 100644
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
@@ -2525,8 +2525,6 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2525 if (!tp->retrans_stamp) 2525 if (!tp->retrans_stamp)
2526 tp->retrans_stamp = TCP_SKB_CB(skb)->when; 2526 tp->retrans_stamp = TCP_SKB_CB(skb)->when;
2527 2527
2528 tp->undo_retrans += tcp_skb_pcount(skb);
2529
2530 /* snd_nxt is stored to detect loss of retransmitted segment, 2528 /* snd_nxt is stored to detect loss of retransmitted segment,
2531 * see tcp_input.c tcp_sacktag_write_queue(). 2529 * see tcp_input.c tcp_sacktag_write_queue().
2532 */ 2530 */
@@ -2534,6 +2532,10 @@ int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb)
2534 } else if (err != -EBUSY) { 2532 } else if (err != -EBUSY) {
2535 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL); 2533 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPRETRANSFAIL);
2536 } 2534 }
2535
2536 if (tp->undo_retrans < 0)
2537 tp->undo_retrans = 0;
2538 tp->undo_retrans += tcp_skb_pcount(skb);
2537 return err; 2539 return err;
2538} 2540}
2539 2541
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index d92f94b7e402..7d5a8661df76 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1588,8 +1588,11 @@ int udp_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
1588 goto csum_error; 1588 goto csum_error;
1589 1589
1590 1590
1591 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) 1591 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
1592 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
1593 is_udplite);
1592 goto drop; 1594 goto drop;
1595 }
1593 1596
1594 rc = 0; 1597 rc = 0;
1595 1598
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 08b367c6b9cf..617f0958e164 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1301,8 +1301,17 @@ int igmp6_event_query(struct sk_buff *skb)
1301 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr); 1301 len = ntohs(ipv6_hdr(skb)->payload_len) + sizeof(struct ipv6hdr);
1302 len -= skb_network_header_len(skb); 1302 len -= skb_network_header_len(skb);
1303 1303
1304 /* Drop queries with not link local source */ 1304 /* RFC3810 6.2
1305 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL)) 1305 * Upon reception of an MLD message that contains a Query, the node
1306 * checks if the source address of the message is a valid link-local
1307 * address, if the Hop Limit is set to 1, and if the Router Alert
1308 * option is present in the Hop-By-Hop Options header of the IPv6
1309 * packet. If any of these checks fails, the packet is dropped.
1310 */
1311 if (!(ipv6_addr_type(&ipv6_hdr(skb)->saddr) & IPV6_ADDR_LINKLOCAL) ||
1312 ipv6_hdr(skb)->hop_limit != 1 ||
1313 !(IP6CB(skb)->flags & IP6SKB_ROUTERALERT) ||
1314 IP6CB(skb)->ra != htons(IPV6_OPT_ROUTERALERT_MLD))
1306 return -EINVAL; 1315 return -EINVAL;
1307 1316
1308 idev = __in6_dev_get(skb->dev); 1317 idev = __in6_dev_get(skb->dev);
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 95c834799288..7092ff78fd84 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -674,8 +674,11 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
674 goto csum_error; 674 goto csum_error;
675 } 675 }
676 676
677 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) 677 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
678 UDP6_INC_STATS_BH(sock_net(sk),
679 UDP_MIB_RCVBUFERRORS, is_udplite);
678 goto drop; 680 goto drop;
681 }
679 682
680 skb_dst_drop(skb); 683 skb_dst_drop(skb);
681 684
@@ -690,6 +693,7 @@ int udpv6_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
690 bh_unlock_sock(sk); 693 bh_unlock_sock(sk);
691 694
692 return rc; 695 return rc;
696
693csum_error: 697csum_error:
694 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite); 698 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
695drop: 699drop:
diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
index 950909f04ee6..13752d96275e 100644
--- a/net/l2tp/l2tp_ppp.c
+++ b/net/l2tp/l2tp_ppp.c
@@ -1365,7 +1365,7 @@ static int pppol2tp_setsockopt(struct socket *sock, int level, int optname,
1365 int err; 1365 int err;
1366 1366
1367 if (level != SOL_PPPOL2TP) 1367 if (level != SOL_PPPOL2TP)
1368 return udp_prot.setsockopt(sk, level, optname, optval, optlen); 1368 return -EINVAL;
1369 1369
1370 if (optlen < sizeof(int)) 1370 if (optlen < sizeof(int))
1371 return -EINVAL; 1371 return -EINVAL;
@@ -1491,7 +1491,7 @@ static int pppol2tp_getsockopt(struct socket *sock, int level, int optname,
1491 struct pppol2tp_session *ps; 1491 struct pppol2tp_session *ps;
1492 1492
1493 if (level != SOL_PPPOL2TP) 1493 if (level != SOL_PPPOL2TP)
1494 return udp_prot.getsockopt(sk, level, optname, optval, optlen); 1494 return -EINVAL;
1495 1495
1496 if (get_user(len, optlen)) 1496 if (get_user(len, optlen))
1497 return -EFAULT; 1497 return -EFAULT;
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 6886601afe1c..a6cda52ed920 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1096,11 +1096,12 @@ void ieee80211_send_auth(struct ieee80211_sub_if_data *sdata,
1096 int err; 1096 int err;
1097 1097
1098 /* 24 + 6 = header + auth_algo + auth_transaction + status_code */ 1098 /* 24 + 6 = header + auth_algo + auth_transaction + status_code */
1099 skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 6 + extra_len); 1099 skb = dev_alloc_skb(local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN +
1100 24 + 6 + extra_len + IEEE80211_WEP_ICV_LEN);
1100 if (!skb) 1101 if (!skb)
1101 return; 1102 return;
1102 1103
1103 skb_reserve(skb, local->hw.extra_tx_headroom); 1104 skb_reserve(skb, local->hw.extra_tx_headroom + IEEE80211_WEP_IV_LEN);
1104 1105
1105 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6); 1106 mgmt = (struct ieee80211_mgmt *) skb_put(skb, 24 + 6);
1106 memset(mgmt, 0, 24 + 6); 1107 memset(mgmt, 0, 24 + 6);
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index c42e83d2751c..581a6584ed0c 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -3778,6 +3778,7 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
3778 cancel_delayed_work_sync(&ipvs->defense_work); 3778 cancel_delayed_work_sync(&ipvs->defense_work);
3779 cancel_work_sync(&ipvs->defense_work.work); 3779 cancel_work_sync(&ipvs->defense_work.work);
3780 unregister_net_sysctl_table(ipvs->sysctl_hdr); 3780 unregister_net_sysctl_table(ipvs->sysctl_hdr);
3781 ip_vs_stop_estimator(net, &ipvs->tot_stats);
3781} 3782}
3782 3783
3783#else 3784#else
@@ -3840,7 +3841,6 @@ void __net_exit ip_vs_control_net_cleanup(struct net *net)
3840 struct netns_ipvs *ipvs = net_ipvs(net); 3841 struct netns_ipvs *ipvs = net_ipvs(net);
3841 3842
3842 ip_vs_trash_cleanup(net); 3843 ip_vs_trash_cleanup(net);
3843 ip_vs_stop_estimator(net, &ipvs->tot_stats);
3844 ip_vs_control_net_cleanup_sysctl(net); 3844 ip_vs_control_net_cleanup_sysctl(net);
3845 remove_proc_entry("ip_vs_stats_percpu", net->proc_net); 3845 remove_proc_entry("ip_vs_stats_percpu", net->proc_net);
3846 remove_proc_entry("ip_vs_stats", net->proc_net); 3846 remove_proc_entry("ip_vs_stats", net->proc_net);
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index 58579634427d..300ed1eec729 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -597,6 +597,9 @@ ctnetlink_nlmsg_size(const struct nf_conn *ct)
597#ifdef CONFIG_NF_CONNTRACK_MARK 597#ifdef CONFIG_NF_CONNTRACK_MARK
598 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */ 598 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
599#endif 599#endif
600#ifdef CONFIG_NF_CONNTRACK_ZONES
601 + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
602#endif
600 + ctnetlink_proto_size(ct) 603 + ctnetlink_proto_size(ct)
601 + ctnetlink_label_size(ct) 604 + ctnetlink_label_size(ct)
602 ; 605 ;
@@ -1150,7 +1153,7 @@ static int ctnetlink_done_list(struct netlink_callback *cb)
1150static int 1153static int
1151ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying) 1154ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying)
1152{ 1155{
1153 struct nf_conn *ct, *last = NULL; 1156 struct nf_conn *ct, *last;
1154 struct nf_conntrack_tuple_hash *h; 1157 struct nf_conntrack_tuple_hash *h;
1155 struct hlist_nulls_node *n; 1158 struct hlist_nulls_node *n;
1156 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh); 1159 struct nfgenmsg *nfmsg = nlmsg_data(cb->nlh);
@@ -1163,8 +1166,7 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
1163 if (cb->args[2]) 1166 if (cb->args[2])
1164 return 0; 1167 return 0;
1165 1168
1166 if (cb->args[0] == nr_cpu_ids) 1169 last = (struct nf_conn *)cb->args[1];
1167 return 0;
1168 1170
1169 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) { 1171 for (cpu = cb->args[0]; cpu < nr_cpu_ids; cpu++) {
1170 struct ct_pcpu *pcpu; 1172 struct ct_pcpu *pcpu;
@@ -1174,7 +1176,6 @@ ctnetlink_dump_list(struct sk_buff *skb, struct netlink_callback *cb, bool dying
1174 1176
1175 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu); 1177 pcpu = per_cpu_ptr(net->ct.pcpu_lists, cpu);
1176 spin_lock_bh(&pcpu->lock); 1178 spin_lock_bh(&pcpu->lock);
1177 last = (struct nf_conn *)cb->args[1];
1178 list = dying ? &pcpu->dying : &pcpu->unconfirmed; 1179 list = dying ? &pcpu->dying : &pcpu->unconfirmed;
1179restart: 1180restart:
1180 hlist_nulls_for_each_entry(h, n, list, hnnode) { 1181 hlist_nulls_for_each_entry(h, n, list, hnnode) {
@@ -1193,7 +1194,9 @@ restart:
1193 ct); 1194 ct);
1194 rcu_read_unlock(); 1195 rcu_read_unlock();
1195 if (res < 0) { 1196 if (res < 0) {
1196 nf_conntrack_get(&ct->ct_general); 1197 if (!atomic_inc_not_zero(&ct->ct_general.use))
1198 continue;
1199 cb->args[0] = cpu;
1197 cb->args[1] = (unsigned long)ct; 1200 cb->args[1] = (unsigned long)ct;
1198 spin_unlock_bh(&pcpu->lock); 1201 spin_unlock_bh(&pcpu->lock);
1199 goto out; 1202 goto out;
@@ -1202,10 +1205,10 @@ restart:
1202 if (cb->args[1]) { 1205 if (cb->args[1]) {
1203 cb->args[1] = 0; 1206 cb->args[1] = 0;
1204 goto restart; 1207 goto restart;
1205 } else 1208 }
1206 cb->args[2] = 1;
1207 spin_unlock_bh(&pcpu->lock); 1209 spin_unlock_bh(&pcpu->lock);
1208 } 1210 }
1211 cb->args[2] = 1;
1209out: 1212out:
1210 if (last) 1213 if (last)
1211 nf_ct_put(last); 1214 nf_ct_put(last);
@@ -2040,6 +2043,9 @@ ctnetlink_nfqueue_build_size(const struct nf_conn *ct)
2040#ifdef CONFIG_NF_CONNTRACK_MARK 2043#ifdef CONFIG_NF_CONNTRACK_MARK
2041 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */ 2044 + nla_total_size(sizeof(u_int32_t)) /* CTA_MARK */
2042#endif 2045#endif
2046#ifdef CONFIG_NF_CONNTRACK_ZONES
2047 + nla_total_size(sizeof(u_int16_t)) /* CTA_ZONE */
2048#endif
2043 + ctnetlink_proto_size(ct) 2049 + ctnetlink_proto_size(ct)
2044 ; 2050 ;
2045} 2051}
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 09096a670c45..a49907b1dabc 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -525,6 +525,39 @@ static int nf_nat_proto_remove(struct nf_conn *i, void *data)
525 return i->status & IPS_NAT_MASK ? 1 : 0; 525 return i->status & IPS_NAT_MASK ? 1 : 0;
526} 526}
527 527
528static int nf_nat_proto_clean(struct nf_conn *ct, void *data)
529{
530 struct nf_conn_nat *nat = nfct_nat(ct);
531
532 if (nf_nat_proto_remove(ct, data))
533 return 1;
534
535 if (!nat || !nat->ct)
536 return 0;
537
538 /* This netns is being destroyed, and conntrack has nat null binding.
539 * Remove it from bysource hash, as the table will be freed soon.
540 *
541 * Else, when the conntrack is destoyed, nf_nat_cleanup_conntrack()
542 * will delete entry from already-freed table.
543 */
544 if (!del_timer(&ct->timeout))
545 return 1;
546
547 spin_lock_bh(&nf_nat_lock);
548 hlist_del_rcu(&nat->bysource);
549 ct->status &= ~IPS_NAT_DONE_MASK;
550 nat->ct = NULL;
551 spin_unlock_bh(&nf_nat_lock);
552
553 add_timer(&ct->timeout);
554
555 /* don't delete conntrack. Although that would make things a lot
556 * simpler, we'd end up flushing all conntracks on nat rmmod.
557 */
558 return 0;
559}
560
528static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto) 561static void nf_nat_l4proto_clean(u8 l3proto, u8 l4proto)
529{ 562{
530 struct nf_nat_proto_clean clean = { 563 struct nf_nat_proto_clean clean = {
@@ -795,7 +828,7 @@ static void __net_exit nf_nat_net_exit(struct net *net)
795{ 828{
796 struct nf_nat_proto_clean clean = {}; 829 struct nf_nat_proto_clean clean = {};
797 830
798 nf_ct_iterate_cleanup(net, &nf_nat_proto_remove, &clean, 0, 0); 831 nf_ct_iterate_cleanup(net, nf_nat_proto_clean, &clean, 0, 0);
799 synchronize_rcu(); 832 synchronize_rcu();
800 nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size); 833 nf_ct_free_hashtable(net->ct.nat_bysource, net->ct.nat_htable_size);
801} 834}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 624e083125b9..ab4566cfcbe4 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -1730,6 +1730,9 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
1730 if (!create || nlh->nlmsg_flags & NLM_F_REPLACE) 1730 if (!create || nlh->nlmsg_flags & NLM_F_REPLACE)
1731 return -EINVAL; 1731 return -EINVAL;
1732 handle = nf_tables_alloc_handle(table); 1732 handle = nf_tables_alloc_handle(table);
1733
1734 if (chain->use == UINT_MAX)
1735 return -EOVERFLOW;
1733 } 1736 }
1734 1737
1735 if (nla[NFTA_RULE_POSITION]) { 1738 if (nla[NFTA_RULE_POSITION]) {
@@ -1789,14 +1792,15 @@ static int nf_tables_newrule(struct sock *nlsk, struct sk_buff *skb,
1789 1792
1790 if (nlh->nlmsg_flags & NLM_F_REPLACE) { 1793 if (nlh->nlmsg_flags & NLM_F_REPLACE) {
1791 if (nft_rule_is_active_next(net, old_rule)) { 1794 if (nft_rule_is_active_next(net, old_rule)) {
1792 trans = nft_trans_rule_add(&ctx, NFT_MSG_NEWRULE, 1795 trans = nft_trans_rule_add(&ctx, NFT_MSG_DELRULE,
1793 old_rule); 1796 old_rule);
1794 if (trans == NULL) { 1797 if (trans == NULL) {
1795 err = -ENOMEM; 1798 err = -ENOMEM;
1796 goto err2; 1799 goto err2;
1797 } 1800 }
1798 nft_rule_disactivate_next(net, old_rule); 1801 nft_rule_disactivate_next(net, old_rule);
1799 list_add_tail(&rule->list, &old_rule->list); 1802 chain->use--;
1803 list_add_tail_rcu(&rule->list, &old_rule->list);
1800 } else { 1804 } else {
1801 err = -ENOENT; 1805 err = -ENOENT;
1802 goto err2; 1806 goto err2;
@@ -1826,6 +1830,7 @@ err3:
1826 list_del_rcu(&nft_trans_rule(trans)->list); 1830 list_del_rcu(&nft_trans_rule(trans)->list);
1827 nft_rule_clear(net, nft_trans_rule(trans)); 1831 nft_rule_clear(net, nft_trans_rule(trans));
1828 nft_trans_destroy(trans); 1832 nft_trans_destroy(trans);
1833 chain->use++;
1829 } 1834 }
1830err2: 1835err2:
1831 nf_tables_rule_destroy(&ctx, rule); 1836 nf_tables_rule_destroy(&ctx, rule);
@@ -2845,7 +2850,7 @@ static int nf_tables_dump_set(struct sk_buff *skb, struct netlink_callback *cb)
2845 goto nla_put_failure; 2850 goto nla_put_failure;
2846 2851
2847 nfmsg = nlmsg_data(nlh); 2852 nfmsg = nlmsg_data(nlh);
2848 nfmsg->nfgen_family = NFPROTO_UNSPEC; 2853 nfmsg->nfgen_family = ctx.afi->family;
2849 nfmsg->version = NFNETLINK_V0; 2854 nfmsg->version = NFNETLINK_V0;
2850 nfmsg->res_id = 0; 2855 nfmsg->res_id = 0;
2851 2856
diff --git a/net/netfilter/nft_compat.c b/net/netfilter/nft_compat.c
index 8a779be832fb..1840989092ed 100644
--- a/net/netfilter/nft_compat.c
+++ b/net/netfilter/nft_compat.c
@@ -195,6 +195,15 @@ static void
195nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) 195nft_target_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
196{ 196{
197 struct xt_target *target = expr->ops->data; 197 struct xt_target *target = expr->ops->data;
198 void *info = nft_expr_priv(expr);
199 struct xt_tgdtor_param par;
200
201 par.net = ctx->net;
202 par.target = target;
203 par.targinfo = info;
204 par.family = ctx->afi->family;
205 if (par.target->destroy != NULL)
206 par.target->destroy(&par);
198 207
199 module_put(target->me); 208 module_put(target->me);
200} 209}
@@ -382,6 +391,15 @@ static void
382nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr) 391nft_match_destroy(const struct nft_ctx *ctx, const struct nft_expr *expr)
383{ 392{
384 struct xt_match *match = expr->ops->data; 393 struct xt_match *match = expr->ops->data;
394 void *info = nft_expr_priv(expr);
395 struct xt_mtdtor_param par;
396
397 par.net = ctx->net;
398 par.match = match;
399 par.matchinfo = info;
400 par.family = ctx->afi->family;
401 if (par.match->destroy != NULL)
402 par.match->destroy(&par);
385 403
386 module_put(match->me); 404 module_put(match->me);
387} 405}
diff --git a/net/netfilter/nft_nat.c b/net/netfilter/nft_nat.c
index a0195d28bcfc..79ff58cd36dc 100644
--- a/net/netfilter/nft_nat.c
+++ b/net/netfilter/nft_nat.c
@@ -175,12 +175,14 @@ static int nft_nat_dump(struct sk_buff *skb, const struct nft_expr *expr)
175 if (nla_put_be32(skb, 175 if (nla_put_be32(skb,
176 NFTA_NAT_REG_ADDR_MAX, htonl(priv->sreg_addr_max))) 176 NFTA_NAT_REG_ADDR_MAX, htonl(priv->sreg_addr_max)))
177 goto nla_put_failure; 177 goto nla_put_failure;
178 if (nla_put_be32(skb, 178 if (priv->sreg_proto_min) {
179 NFTA_NAT_REG_PROTO_MIN, htonl(priv->sreg_proto_min))) 179 if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MIN,
180 goto nla_put_failure; 180 htonl(priv->sreg_proto_min)))
181 if (nla_put_be32(skb, 181 goto nla_put_failure;
182 NFTA_NAT_REG_PROTO_MAX, htonl(priv->sreg_proto_max))) 182 if (nla_put_be32(skb, NFTA_NAT_REG_PROTO_MAX,
183 goto nla_put_failure; 183 htonl(priv->sreg_proto_max)))
184 goto nla_put_failure;
185 }
184 return 0; 186 return 0;
185 187
186nla_put_failure: 188nla_put_failure:
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 15c731f03fa6..e6fac7e3db52 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -636,7 +636,7 @@ static unsigned int netlink_poll(struct file *file, struct socket *sock,
636 while (nlk->cb_running && netlink_dump_space(nlk)) { 636 while (nlk->cb_running && netlink_dump_space(nlk)) {
637 err = netlink_dump(sk); 637 err = netlink_dump(sk);
638 if (err < 0) { 638 if (err < 0) {
639 sk->sk_err = err; 639 sk->sk_err = -err;
640 sk->sk_error_report(sk); 640 sk->sk_error_report(sk);
641 break; 641 break;
642 } 642 }
@@ -2483,7 +2483,7 @@ static int netlink_recvmsg(struct kiocb *kiocb, struct socket *sock,
2483 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) { 2483 atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf / 2) {
2484 ret = netlink_dump(sk); 2484 ret = netlink_dump(sk);
2485 if (ret) { 2485 if (ret) {
2486 sk->sk_err = ret; 2486 sk->sk_err = -ret;
2487 sk->sk_error_report(sk); 2487 sk->sk_error_report(sk);
2488 } 2488 }
2489 } 2489 }
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index c36856a457ca..e70d8b18e962 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -551,6 +551,8 @@ static int do_execute_actions(struct datapath *dp, struct sk_buff *skb,
551 551
552 case OVS_ACTION_ATTR_SAMPLE: 552 case OVS_ACTION_ATTR_SAMPLE:
553 err = sample(dp, skb, a); 553 err = sample(dp, skb, a);
554 if (unlikely(err)) /* skb already freed. */
555 return err;
554 break; 556 break;
555 } 557 }
556 558
diff --git a/net/openvswitch/datapath.c b/net/openvswitch/datapath.c
index 0d407bca81e3..9db4bf6740d1 100644
--- a/net/openvswitch/datapath.c
+++ b/net/openvswitch/datapath.c
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2013 Nicira, Inc. 2 * Copyright (c) 2007-2014 Nicira, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
@@ -276,7 +276,7 @@ void ovs_dp_process_received_packet(struct vport *p, struct sk_buff *skb)
276 OVS_CB(skb)->flow = flow; 276 OVS_CB(skb)->flow = flow;
277 OVS_CB(skb)->pkt_key = &key; 277 OVS_CB(skb)->pkt_key = &key;
278 278
279 ovs_flow_stats_update(OVS_CB(skb)->flow, skb); 279 ovs_flow_stats_update(OVS_CB(skb)->flow, key.tp.flags, skb);
280 ovs_execute_actions(dp, skb); 280 ovs_execute_actions(dp, skb);
281 stats_counter = &stats->n_hit; 281 stats_counter = &stats->n_hit;
282 282
@@ -889,8 +889,11 @@ static int ovs_flow_cmd_new(struct sk_buff *skb, struct genl_info *info)
889 } 889 }
890 /* The unmasked key has to be the same for flow updates. */ 890 /* The unmasked key has to be the same for flow updates. */
891 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) { 891 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
892 error = -EEXIST; 892 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
893 goto err_unlock_ovs; 893 if (!flow) {
894 error = -ENOENT;
895 goto err_unlock_ovs;
896 }
894 } 897 }
895 /* Update actions. */ 898 /* Update actions. */
896 old_acts = ovsl_dereference(flow->sf_acts); 899 old_acts = ovsl_dereference(flow->sf_acts);
@@ -981,16 +984,12 @@ static int ovs_flow_cmd_set(struct sk_buff *skb, struct genl_info *info)
981 goto err_unlock_ovs; 984 goto err_unlock_ovs;
982 } 985 }
983 /* Check that the flow exists. */ 986 /* Check that the flow exists. */
984 flow = ovs_flow_tbl_lookup(&dp->table, &key); 987 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
985 if (unlikely(!flow)) { 988 if (unlikely(!flow)) {
986 error = -ENOENT; 989 error = -ENOENT;
987 goto err_unlock_ovs; 990 goto err_unlock_ovs;
988 } 991 }
989 /* The unmasked key has to be the same for flow updates. */ 992
990 if (unlikely(!ovs_flow_cmp_unmasked_key(flow, &match))) {
991 error = -EEXIST;
992 goto err_unlock_ovs;
993 }
994 /* Update actions, if present. */ 993 /* Update actions, if present. */
995 if (likely(acts)) { 994 if (likely(acts)) {
996 old_acts = ovsl_dereference(flow->sf_acts); 995 old_acts = ovsl_dereference(flow->sf_acts);
@@ -1063,8 +1062,8 @@ static int ovs_flow_cmd_get(struct sk_buff *skb, struct genl_info *info)
1063 goto unlock; 1062 goto unlock;
1064 } 1063 }
1065 1064
1066 flow = ovs_flow_tbl_lookup(&dp->table, &key); 1065 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1067 if (!flow || !ovs_flow_cmp_unmasked_key(flow, &match)) { 1066 if (!flow) {
1068 err = -ENOENT; 1067 err = -ENOENT;
1069 goto unlock; 1068 goto unlock;
1070 } 1069 }
@@ -1113,8 +1112,8 @@ static int ovs_flow_cmd_del(struct sk_buff *skb, struct genl_info *info)
1113 goto unlock; 1112 goto unlock;
1114 } 1113 }
1115 1114
1116 flow = ovs_flow_tbl_lookup(&dp->table, &key); 1115 flow = ovs_flow_tbl_lookup_exact(&dp->table, &match);
1117 if (unlikely(!flow || !ovs_flow_cmp_unmasked_key(flow, &match))) { 1116 if (unlikely(!flow)) {
1118 err = -ENOENT; 1117 err = -ENOENT;
1119 goto unlock; 1118 goto unlock;
1120 } 1119 }
diff --git a/net/openvswitch/flow.c b/net/openvswitch/flow.c
index 334751cb1528..d07ab538fc9d 100644
--- a/net/openvswitch/flow.c
+++ b/net/openvswitch/flow.c
@@ -61,10 +61,10 @@ u64 ovs_flow_used_time(unsigned long flow_jiffies)
61 61
62#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF)) 62#define TCP_FLAGS_BE16(tp) (*(__be16 *)&tcp_flag_word(tp) & htons(0x0FFF))
63 63
64void ovs_flow_stats_update(struct sw_flow *flow, struct sk_buff *skb) 64void ovs_flow_stats_update(struct sw_flow *flow, __be16 tcp_flags,
65 struct sk_buff *skb)
65{ 66{
66 struct flow_stats *stats; 67 struct flow_stats *stats;
67 __be16 tcp_flags = flow->key.tp.flags;
68 int node = numa_node_id(); 68 int node = numa_node_id();
69 69
70 stats = rcu_dereference(flow->stats[node]); 70 stats = rcu_dereference(flow->stats[node]);
diff --git a/net/openvswitch/flow.h b/net/openvswitch/flow.h
index ac395d2cd821..5e5aaed3a85b 100644
--- a/net/openvswitch/flow.h
+++ b/net/openvswitch/flow.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * Copyright (c) 2007-2013 Nicira, Inc. 2 * Copyright (c) 2007-2014 Nicira, Inc.
3 * 3 *
4 * This program is free software; you can redistribute it and/or 4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of version 2 of the GNU General Public 5 * modify it under the terms of version 2 of the GNU General Public
@@ -180,7 +180,8 @@ struct arp_eth_header {
180 unsigned char ar_tip[4]; /* target IP address */ 180 unsigned char ar_tip[4]; /* target IP address */
181} __packed; 181} __packed;
182 182
183void ovs_flow_stats_update(struct sw_flow *, struct sk_buff *); 183void ovs_flow_stats_update(struct sw_flow *, __be16 tcp_flags,
184 struct sk_buff *);
184void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *, 185void ovs_flow_stats_get(const struct sw_flow *, struct ovs_flow_stats *,
185 unsigned long *used, __be16 *tcp_flags); 186 unsigned long *used, __be16 *tcp_flags);
186void ovs_flow_stats_clear(struct sw_flow *); 187void ovs_flow_stats_clear(struct sw_flow *);
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 574c3abc9b30..cf2d853646f0 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -456,6 +456,22 @@ struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *tbl,
456 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit); 456 return ovs_flow_tbl_lookup_stats(tbl, key, &n_mask_hit);
457} 457}
458 458
459struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
460 struct sw_flow_match *match)
461{
462 struct table_instance *ti = rcu_dereference_ovsl(tbl->ti);
463 struct sw_flow_mask *mask;
464 struct sw_flow *flow;
465
466 /* Always called under ovs-mutex. */
467 list_for_each_entry(mask, &tbl->mask_list, list) {
468 flow = masked_flow_lookup(ti, match->key, mask);
469 if (flow && ovs_flow_cmp_unmasked_key(flow, match)) /* Found */
470 return flow;
471 }
472 return NULL;
473}
474
459int ovs_flow_tbl_num_masks(const struct flow_table *table) 475int ovs_flow_tbl_num_masks(const struct flow_table *table)
460{ 476{
461 struct sw_flow_mask *mask; 477 struct sw_flow_mask *mask;
diff --git a/net/openvswitch/flow_table.h b/net/openvswitch/flow_table.h
index ca8a5820f615..5918bff7f3f6 100644
--- a/net/openvswitch/flow_table.h
+++ b/net/openvswitch/flow_table.h
@@ -76,7 +76,8 @@ struct sw_flow *ovs_flow_tbl_lookup_stats(struct flow_table *,
76 u32 *n_mask_hit); 76 u32 *n_mask_hit);
77struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *, 77struct sw_flow *ovs_flow_tbl_lookup(struct flow_table *,
78 const struct sw_flow_key *); 78 const struct sw_flow_key *);
79 79struct sw_flow *ovs_flow_tbl_lookup_exact(struct flow_table *tbl,
80 struct sw_flow_match *match);
80bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow, 81bool ovs_flow_cmp_unmasked_key(const struct sw_flow *flow,
81 struct sw_flow_match *match); 82 struct sw_flow_match *match);
82 83
diff --git a/net/openvswitch/vport-gre.c b/net/openvswitch/vport-gre.c
index 35ec4fed09e2..f49148a07da2 100644
--- a/net/openvswitch/vport-gre.c
+++ b/net/openvswitch/vport-gre.c
@@ -110,6 +110,22 @@ static int gre_rcv(struct sk_buff *skb,
110 return PACKET_RCVD; 110 return PACKET_RCVD;
111} 111}
112 112
113/* Called with rcu_read_lock and BH disabled. */
114static int gre_err(struct sk_buff *skb, u32 info,
115 const struct tnl_ptk_info *tpi)
116{
117 struct ovs_net *ovs_net;
118 struct vport *vport;
119
120 ovs_net = net_generic(dev_net(skb->dev), ovs_net_id);
121 vport = rcu_dereference(ovs_net->vport_net.gre_vport);
122
123 if (unlikely(!vport))
124 return PACKET_REJECT;
125 else
126 return PACKET_RCVD;
127}
128
113static int gre_tnl_send(struct vport *vport, struct sk_buff *skb) 129static int gre_tnl_send(struct vport *vport, struct sk_buff *skb)
114{ 130{
115 struct net *net = ovs_dp_get_net(vport->dp); 131 struct net *net = ovs_dp_get_net(vport->dp);
@@ -186,6 +202,7 @@ error:
186 202
187static struct gre_cisco_protocol gre_protocol = { 203static struct gre_cisco_protocol gre_protocol = {
188 .handler = gre_rcv, 204 .handler = gre_rcv,
205 .err_handler = gre_err,
189 .priority = 1, 206 .priority = 1,
190}; 207};
191 208
diff --git a/net/sctp/sysctl.c b/net/sctp/sysctl.c
index dcb19592761e..12c7e01c2677 100644
--- a/net/sctp/sysctl.c
+++ b/net/sctp/sysctl.c
@@ -321,41 +321,40 @@ static int proc_sctp_do_hmac_alg(struct ctl_table *ctl, int write,
321 loff_t *ppos) 321 loff_t *ppos)
322{ 322{
323 struct net *net = current->nsproxy->net_ns; 323 struct net *net = current->nsproxy->net_ns;
324 char tmp[8];
325 struct ctl_table tbl; 324 struct ctl_table tbl;
326 int ret; 325 bool changed = false;
327 int changed = 0;
328 char *none = "none"; 326 char *none = "none";
327 char tmp[8];
328 int ret;
329 329
330 memset(&tbl, 0, sizeof(struct ctl_table)); 330 memset(&tbl, 0, sizeof(struct ctl_table));
331 331
332 if (write) { 332 if (write) {
333 tbl.data = tmp; 333 tbl.data = tmp;
334 tbl.maxlen = 8; 334 tbl.maxlen = sizeof(tmp);
335 } else { 335 } else {
336 tbl.data = net->sctp.sctp_hmac_alg ? : none; 336 tbl.data = net->sctp.sctp_hmac_alg ? : none;
337 tbl.maxlen = strlen(tbl.data); 337 tbl.maxlen = strlen(tbl.data);
338 } 338 }
339 ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
340 339
341 if (write) { 340 ret = proc_dostring(&tbl, write, buffer, lenp, ppos);
341 if (write && ret == 0) {
342#ifdef CONFIG_CRYPTO_MD5 342#ifdef CONFIG_CRYPTO_MD5
343 if (!strncmp(tmp, "md5", 3)) { 343 if (!strncmp(tmp, "md5", 3)) {
344 net->sctp.sctp_hmac_alg = "md5"; 344 net->sctp.sctp_hmac_alg = "md5";
345 changed = 1; 345 changed = true;
346 } 346 }
347#endif 347#endif
348#ifdef CONFIG_CRYPTO_SHA1 348#ifdef CONFIG_CRYPTO_SHA1
349 if (!strncmp(tmp, "sha1", 4)) { 349 if (!strncmp(tmp, "sha1", 4)) {
350 net->sctp.sctp_hmac_alg = "sha1"; 350 net->sctp.sctp_hmac_alg = "sha1";
351 changed = 1; 351 changed = true;
352 } 352 }
353#endif 353#endif
354 if (!strncmp(tmp, "none", 4)) { 354 if (!strncmp(tmp, "none", 4)) {
355 net->sctp.sctp_hmac_alg = NULL; 355 net->sctp.sctp_hmac_alg = NULL;
356 changed = 1; 356 changed = true;
357 } 357 }
358
359 if (!changed) 358 if (!changed)
360 ret = -EINVAL; 359 ret = -EINVAL;
361 } 360 }
@@ -368,11 +367,10 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
368 loff_t *ppos) 367 loff_t *ppos)
369{ 368{
370 struct net *net = current->nsproxy->net_ns; 369 struct net *net = current->nsproxy->net_ns;
371 int new_value;
372 struct ctl_table tbl;
373 unsigned int min = *(unsigned int *) ctl->extra1; 370 unsigned int min = *(unsigned int *) ctl->extra1;
374 unsigned int max = *(unsigned int *) ctl->extra2; 371 unsigned int max = *(unsigned int *) ctl->extra2;
375 int ret; 372 struct ctl_table tbl;
373 int ret, new_value;
376 374
377 memset(&tbl, 0, sizeof(struct ctl_table)); 375 memset(&tbl, 0, sizeof(struct ctl_table));
378 tbl.maxlen = sizeof(unsigned int); 376 tbl.maxlen = sizeof(unsigned int);
@@ -381,12 +379,15 @@ static int proc_sctp_do_rto_min(struct ctl_table *ctl, int write,
381 tbl.data = &new_value; 379 tbl.data = &new_value;
382 else 380 else
383 tbl.data = &net->sctp.rto_min; 381 tbl.data = &net->sctp.rto_min;
382
384 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); 383 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
385 if (write) { 384 if (write && ret == 0) {
386 if (ret || new_value > max || new_value < min) 385 if (new_value > max || new_value < min)
387 return -EINVAL; 386 return -EINVAL;
387
388 net->sctp.rto_min = new_value; 388 net->sctp.rto_min = new_value;
389 } 389 }
390
390 return ret; 391 return ret;
391} 392}
392 393
@@ -395,11 +396,10 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
395 loff_t *ppos) 396 loff_t *ppos)
396{ 397{
397 struct net *net = current->nsproxy->net_ns; 398 struct net *net = current->nsproxy->net_ns;
398 int new_value;
399 struct ctl_table tbl;
400 unsigned int min = *(unsigned int *) ctl->extra1; 399 unsigned int min = *(unsigned int *) ctl->extra1;
401 unsigned int max = *(unsigned int *) ctl->extra2; 400 unsigned int max = *(unsigned int *) ctl->extra2;
402 int ret; 401 struct ctl_table tbl;
402 int ret, new_value;
403 403
404 memset(&tbl, 0, sizeof(struct ctl_table)); 404 memset(&tbl, 0, sizeof(struct ctl_table));
405 tbl.maxlen = sizeof(unsigned int); 405 tbl.maxlen = sizeof(unsigned int);
@@ -408,12 +408,15 @@ static int proc_sctp_do_rto_max(struct ctl_table *ctl, int write,
408 tbl.data = &new_value; 408 tbl.data = &new_value;
409 else 409 else
410 tbl.data = &net->sctp.rto_max; 410 tbl.data = &net->sctp.rto_max;
411
411 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); 412 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
412 if (write) { 413 if (write && ret == 0) {
413 if (ret || new_value > max || new_value < min) 414 if (new_value > max || new_value < min)
414 return -EINVAL; 415 return -EINVAL;
416
415 net->sctp.rto_max = new_value; 417 net->sctp.rto_max = new_value;
416 } 418 }
419
417 return ret; 420 return ret;
418} 421}
419 422
@@ -444,8 +447,7 @@ static int proc_sctp_do_auth(struct ctl_table *ctl, int write,
444 tbl.data = &net->sctp.auth_enable; 447 tbl.data = &net->sctp.auth_enable;
445 448
446 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos); 449 ret = proc_dointvec(&tbl, write, buffer, lenp, ppos);
447 450 if (write && ret == 0) {
448 if (write) {
449 struct sock *sk = net->sctp.ctl_sock; 451 struct sock *sk = net->sctp.ctl_sock;
450 452
451 net->sctp.auth_enable = new_value; 453 net->sctp.auth_enable = new_value;
diff --git a/net/sctp/ulpevent.c b/net/sctp/ulpevent.c
index 85c64658bd0b..b6842fdb53d4 100644
--- a/net/sctp/ulpevent.c
+++ b/net/sctp/ulpevent.c
@@ -366,9 +366,10 @@ fail:
366 * specification [SCTP] and any extensions for a list of possible 366 * specification [SCTP] and any extensions for a list of possible
367 * error formats. 367 * error formats.
368 */ 368 */
369struct sctp_ulpevent *sctp_ulpevent_make_remote_error( 369struct sctp_ulpevent *
370 const struct sctp_association *asoc, struct sctp_chunk *chunk, 370sctp_ulpevent_make_remote_error(const struct sctp_association *asoc,
371 __u16 flags, gfp_t gfp) 371 struct sctp_chunk *chunk, __u16 flags,
372 gfp_t gfp)
372{ 373{
373 struct sctp_ulpevent *event; 374 struct sctp_ulpevent *event;
374 struct sctp_remote_error *sre; 375 struct sctp_remote_error *sre;
@@ -387,8 +388,7 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
387 /* Copy the skb to a new skb with room for us to prepend 388 /* Copy the skb to a new skb with room for us to prepend
388 * notification with. 389 * notification with.
389 */ 390 */
390 skb = skb_copy_expand(chunk->skb, sizeof(struct sctp_remote_error), 391 skb = skb_copy_expand(chunk->skb, sizeof(*sre), 0, gfp);
391 0, gfp);
392 392
393 /* Pull off the rest of the cause TLV from the chunk. */ 393 /* Pull off the rest of the cause TLV from the chunk. */
394 skb_pull(chunk->skb, elen); 394 skb_pull(chunk->skb, elen);
@@ -399,62 +399,21 @@ struct sctp_ulpevent *sctp_ulpevent_make_remote_error(
399 event = sctp_skb2event(skb); 399 event = sctp_skb2event(skb);
400 sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize); 400 sctp_ulpevent_init(event, MSG_NOTIFICATION, skb->truesize);
401 401
402 sre = (struct sctp_remote_error *) 402 sre = (struct sctp_remote_error *) skb_push(skb, sizeof(*sre));
403 skb_push(skb, sizeof(struct sctp_remote_error));
404 403
405 /* Trim the buffer to the right length. */ 404 /* Trim the buffer to the right length. */
406 skb_trim(skb, sizeof(struct sctp_remote_error) + elen); 405 skb_trim(skb, sizeof(*sre) + elen);
407 406
408 /* Socket Extensions for SCTP 407 /* RFC6458, Section 6.1.3. SCTP_REMOTE_ERROR */
409 * 5.3.1.3 SCTP_REMOTE_ERROR 408 memset(sre, 0, sizeof(*sre));
410 *
411 * sre_type:
412 * It should be SCTP_REMOTE_ERROR.
413 */
414 sre->sre_type = SCTP_REMOTE_ERROR; 409 sre->sre_type = SCTP_REMOTE_ERROR;
415
416 /*
417 * Socket Extensions for SCTP
418 * 5.3.1.3 SCTP_REMOTE_ERROR
419 *
420 * sre_flags: 16 bits (unsigned integer)
421 * Currently unused.
422 */
423 sre->sre_flags = 0; 410 sre->sre_flags = 0;
424
425 /* Socket Extensions for SCTP
426 * 5.3.1.3 SCTP_REMOTE_ERROR
427 *
428 * sre_length: sizeof (__u32)
429 *
430 * This field is the total length of the notification data,
431 * including the notification header.
432 */
433 sre->sre_length = skb->len; 411 sre->sre_length = skb->len;
434
435 /* Socket Extensions for SCTP
436 * 5.3.1.3 SCTP_REMOTE_ERROR
437 *
438 * sre_error: 16 bits (unsigned integer)
439 * This value represents one of the Operational Error causes defined in
440 * the SCTP specification, in network byte order.
441 */
442 sre->sre_error = cause; 412 sre->sre_error = cause;
443
444 /* Socket Extensions for SCTP
445 * 5.3.1.3 SCTP_REMOTE_ERROR
446 *
447 * sre_assoc_id: sizeof (sctp_assoc_t)
448 *
449 * The association id field, holds the identifier for the association.
450 * All notifications for a given association have the same association
451 * identifier. For TCP style socket, this field is ignored.
452 */
453 sctp_ulpevent_set_owner(event, asoc); 413 sctp_ulpevent_set_owner(event, asoc);
454 sre->sre_assoc_id = sctp_assoc2id(asoc); 414 sre->sre_assoc_id = sctp_assoc2id(asoc);
455 415
456 return event; 416 return event;
457
458fail: 417fail:
459 return NULL; 418 return NULL;
460} 419}
@@ -899,7 +858,9 @@ __u16 sctp_ulpevent_get_notification_type(const struct sctp_ulpevent *event)
899 return notification->sn_header.sn_type; 858 return notification->sn_header.sn_type;
900} 859}
901 860
902/* Copy out the sndrcvinfo into a msghdr. */ 861/* RFC6458, Section 5.3.2. SCTP Header Information Structure
862 * (SCTP_SNDRCV, DEPRECATED)
863 */
903void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event, 864void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
904 struct msghdr *msghdr) 865 struct msghdr *msghdr)
905{ 866{
@@ -908,74 +869,21 @@ void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
908 if (sctp_ulpevent_is_notification(event)) 869 if (sctp_ulpevent_is_notification(event))
909 return; 870 return;
910 871
911 /* Sockets API Extensions for SCTP 872 memset(&sinfo, 0, sizeof(sinfo));
912 * Section 5.2.2 SCTP Header Information Structure (SCTP_SNDRCV)
913 *
914 * sinfo_stream: 16 bits (unsigned integer)
915 *
916 * For recvmsg() the SCTP stack places the message's stream number in
917 * this value.
918 */
919 sinfo.sinfo_stream = event->stream; 873 sinfo.sinfo_stream = event->stream;
920 /* sinfo_ssn: 16 bits (unsigned integer)
921 *
922 * For recvmsg() this value contains the stream sequence number that
923 * the remote endpoint placed in the DATA chunk. For fragmented
924 * messages this is the same number for all deliveries of the message
925 * (if more than one recvmsg() is needed to read the message).
926 */
927 sinfo.sinfo_ssn = event->ssn; 874 sinfo.sinfo_ssn = event->ssn;
928 /* sinfo_ppid: 32 bits (unsigned integer)
929 *
930 * In recvmsg() this value is
931 * the same information that was passed by the upper layer in the peer
932 * application. Please note that byte order issues are NOT accounted
933 * for and this information is passed opaquely by the SCTP stack from
934 * one end to the other.
935 */
936 sinfo.sinfo_ppid = event->ppid; 875 sinfo.sinfo_ppid = event->ppid;
937 /* sinfo_flags: 16 bits (unsigned integer)
938 *
939 * This field may contain any of the following flags and is composed of
940 * a bitwise OR of these values.
941 *
942 * recvmsg() flags:
943 *
944 * SCTP_UNORDERED - This flag is present when the message was sent
945 * non-ordered.
946 */
947 sinfo.sinfo_flags = event->flags; 876 sinfo.sinfo_flags = event->flags;
948 /* sinfo_tsn: 32 bit (unsigned integer)
949 *
950 * For the receiving side, this field holds a TSN that was
951 * assigned to one of the SCTP Data Chunks.
952 */
953 sinfo.sinfo_tsn = event->tsn; 877 sinfo.sinfo_tsn = event->tsn;
954 /* sinfo_cumtsn: 32 bit (unsigned integer)
955 *
956 * This field will hold the current cumulative TSN as
957 * known by the underlying SCTP layer. Note this field is
958 * ignored when sending and only valid for a receive
959 * operation when sinfo_flags are set to SCTP_UNORDERED.
960 */
961 sinfo.sinfo_cumtsn = event->cumtsn; 878 sinfo.sinfo_cumtsn = event->cumtsn;
962 /* sinfo_assoc_id: sizeof (sctp_assoc_t)
963 *
964 * The association handle field, sinfo_assoc_id, holds the identifier
965 * for the association announced in the COMMUNICATION_UP notification.
966 * All notifications for a given association have the same identifier.
967 * Ignored for one-to-one style sockets.
968 */
969 sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc); 879 sinfo.sinfo_assoc_id = sctp_assoc2id(event->asoc);
970 880 /* Context value that is set via SCTP_CONTEXT socket option. */
971 /* context value that is set via SCTP_CONTEXT socket option. */
972 sinfo.sinfo_context = event->asoc->default_rcv_context; 881 sinfo.sinfo_context = event->asoc->default_rcv_context;
973
974 /* These fields are not used while receiving. */ 882 /* These fields are not used while receiving. */
975 sinfo.sinfo_timetolive = 0; 883 sinfo.sinfo_timetolive = 0;
976 884
977 put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV, 885 put_cmsg(msghdr, IPPROTO_SCTP, SCTP_SNDRCV,
978 sizeof(struct sctp_sndrcvinfo), (void *)&sinfo); 886 sizeof(sinfo), &sinfo);
979} 887}
980 888
981/* Do accounting for bytes received and hold a reference to the association 889/* Do accounting for bytes received and hold a reference to the association
diff --git a/net/sunrpc/auth.c b/net/sunrpc/auth.c
index 247e973544bf..f77366717420 100644
--- a/net/sunrpc/auth.c
+++ b/net/sunrpc/auth.c
@@ -592,6 +592,7 @@ rpcauth_lookupcred(struct rpc_auth *auth, int flags)
592 put_group_info(acred.group_info); 592 put_group_info(acred.group_info);
593 return ret; 593 return ret;
594} 594}
595EXPORT_SYMBOL_GPL(rpcauth_lookupcred);
595 596
596void 597void
597rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred, 598rpcauth_init_cred(struct rpc_cred *cred, const struct auth_cred *acred,
diff --git a/net/tipc/bcast.c b/net/tipc/bcast.c
index 26631679a1fa..55c6c9d3e1ce 100644
--- a/net/tipc/bcast.c
+++ b/net/tipc/bcast.c
@@ -559,6 +559,7 @@ receive:
559 559
560 buf = node->bclink.deferred_head; 560 buf = node->bclink.deferred_head;
561 node->bclink.deferred_head = buf->next; 561 node->bclink.deferred_head = buf->next;
562 buf->next = NULL;
562 node->bclink.deferred_size--; 563 node->bclink.deferred_size--;
563 goto receive; 564 goto receive;
564 } 565 }
diff --git a/net/tipc/msg.c b/net/tipc/msg.c
index 8be6e94a1ca9..0a37a472c29f 100644
--- a/net/tipc/msg.c
+++ b/net/tipc/msg.c
@@ -101,9 +101,11 @@ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect,
101} 101}
102 102
103/* tipc_buf_append(): Append a buffer to the fragment list of another buffer 103/* tipc_buf_append(): Append a buffer to the fragment list of another buffer
104 * Let first buffer become head buffer 104 * @*headbuf: in: NULL for first frag, otherwise value returned from prev call
105 * Returns 1 and sets *buf to headbuf if chain is complete, otherwise 0 105 * out: set when successful non-complete reassembly, otherwise NULL
106 * Leaves headbuf pointer at NULL if failure 106 * @*buf: in: the buffer to append. Always defined
107 * out: head buf after sucessful complete reassembly, otherwise NULL
108 * Returns 1 when reassembly complete, otherwise 0
107 */ 109 */
108int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) 110int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
109{ 111{
@@ -122,6 +124,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
122 goto out_free; 124 goto out_free;
123 head = *headbuf = frag; 125 head = *headbuf = frag;
124 skb_frag_list_init(head); 126 skb_frag_list_init(head);
127 *buf = NULL;
125 return 0; 128 return 0;
126 } 129 }
127 if (!head) 130 if (!head)
@@ -150,5 +153,7 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf)
150out_free: 153out_free:
151 pr_warn_ratelimited("Unable to build fragment list\n"); 154 pr_warn_ratelimited("Unable to build fragment list\n");
152 kfree_skb(*buf); 155 kfree_skb(*buf);
156 kfree_skb(*headbuf);
157 *buf = *headbuf = NULL;
153 return 0; 158 return 0;
154} 159}
diff --git a/net/wireless/core.h b/net/wireless/core.h
index e9afbf10e756..7e3a3cef7df9 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -424,7 +424,7 @@ static inline unsigned int elapsed_jiffies_msecs(unsigned long start)
424 if (end >= start) 424 if (end >= start)
425 return jiffies_to_msecs(end - start); 425 return jiffies_to_msecs(end - start);
426 426
427 return jiffies_to_msecs(end + (MAX_JIFFY_OFFSET - start) + 1); 427 return jiffies_to_msecs(end + (ULONG_MAX - start) + 1);
428} 428}
429 429
430void 430void
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index ba4f1723c83a..6668daf69326 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -1497,18 +1497,17 @@ static int nl80211_send_wiphy(struct cfg80211_registered_device *rdev,
1497 } 1497 }
1498 CMD(start_p2p_device, START_P2P_DEVICE); 1498 CMD(start_p2p_device, START_P2P_DEVICE);
1499 CMD(set_mcast_rate, SET_MCAST_RATE); 1499 CMD(set_mcast_rate, SET_MCAST_RATE);
1500#ifdef CONFIG_NL80211_TESTMODE
1501 CMD(testmode_cmd, TESTMODE);
1502#endif
1500 if (state->split) { 1503 if (state->split) {
1501 CMD(crit_proto_start, CRIT_PROTOCOL_START); 1504 CMD(crit_proto_start, CRIT_PROTOCOL_START);
1502 CMD(crit_proto_stop, CRIT_PROTOCOL_STOP); 1505 CMD(crit_proto_stop, CRIT_PROTOCOL_STOP);
1503 if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH) 1506 if (rdev->wiphy.flags & WIPHY_FLAG_HAS_CHANNEL_SWITCH)
1504 CMD(channel_switch, CHANNEL_SWITCH); 1507 CMD(channel_switch, CHANNEL_SWITCH);
1508 CMD(set_qos_map, SET_QOS_MAP);
1505 } 1509 }
1506 CMD(set_qos_map, SET_QOS_MAP); 1510 /* add into the if now */
1507
1508#ifdef CONFIG_NL80211_TESTMODE
1509 CMD(testmode_cmd, TESTMODE);
1510#endif
1511
1512#undef CMD 1511#undef CMD
1513 1512
1514 if (rdev->ops->connect || rdev->ops->auth) { 1513 if (rdev->ops->connect || rdev->ops->auth) {
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index 558b0e3a02d8..1afdf45db38f 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -935,7 +935,7 @@ freq_reg_info_regd(struct wiphy *wiphy, u32 center_freq,
935 if (!band_rule_found) 935 if (!band_rule_found)
936 band_rule_found = freq_in_rule_band(fr, center_freq); 936 band_rule_found = freq_in_rule_band(fr, center_freq);
937 937
938 bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(5)); 938 bw_fits = reg_does_bw_fit(fr, center_freq, MHZ_TO_KHZ(20));
939 939
940 if (band_rule_found && bw_fits) 940 if (band_rule_found && bw_fits)
941 return rr; 941 return rr;
@@ -1019,10 +1019,10 @@ static void chan_reg_rule_print_dbg(const struct ieee80211_regdomain *regd,
1019} 1019}
1020#endif 1020#endif
1021 1021
1022/* Find an ieee80211_reg_rule such that a 5MHz channel with frequency 1022/*
1023 * chan->center_freq fits there. 1023 * Note that right now we assume the desired channel bandwidth
1024 * If there is no such reg_rule, disable the channel, otherwise set the 1024 * is always 20 MHz for each individual channel (HT40 uses 20 MHz
1025 * flags corresponding to the bandwidths allowed in the particular reg_rule 1025 * per channel, the primary and the extension channel).
1026 */ 1026 */
1027static void handle_channel(struct wiphy *wiphy, 1027static void handle_channel(struct wiphy *wiphy,
1028 enum nl80211_reg_initiator initiator, 1028 enum nl80211_reg_initiator initiator,
@@ -1083,12 +1083,8 @@ static void handle_channel(struct wiphy *wiphy,
1083 if (reg_rule->flags & NL80211_RRF_AUTO_BW) 1083 if (reg_rule->flags & NL80211_RRF_AUTO_BW)
1084 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule); 1084 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
1085 1085
1086 if (max_bandwidth_khz < MHZ_TO_KHZ(10))
1087 bw_flags = IEEE80211_CHAN_NO_10MHZ;
1088 if (max_bandwidth_khz < MHZ_TO_KHZ(20))
1089 bw_flags |= IEEE80211_CHAN_NO_20MHZ;
1090 if (max_bandwidth_khz < MHZ_TO_KHZ(40)) 1086 if (max_bandwidth_khz < MHZ_TO_KHZ(40))
1091 bw_flags |= IEEE80211_CHAN_NO_HT40; 1087 bw_flags = IEEE80211_CHAN_NO_HT40;
1092 if (max_bandwidth_khz < MHZ_TO_KHZ(80)) 1088 if (max_bandwidth_khz < MHZ_TO_KHZ(80))
1093 bw_flags |= IEEE80211_CHAN_NO_80MHZ; 1089 bw_flags |= IEEE80211_CHAN_NO_80MHZ;
1094 if (max_bandwidth_khz < MHZ_TO_KHZ(160)) 1090 if (max_bandwidth_khz < MHZ_TO_KHZ(160))
@@ -1522,12 +1518,8 @@ static void handle_channel_custom(struct wiphy *wiphy,
1522 if (reg_rule->flags & NL80211_RRF_AUTO_BW) 1518 if (reg_rule->flags & NL80211_RRF_AUTO_BW)
1523 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule); 1519 max_bandwidth_khz = reg_get_max_bandwidth(regd, reg_rule);
1524 1520
1525 if (max_bandwidth_khz < MHZ_TO_KHZ(10))
1526 bw_flags = IEEE80211_CHAN_NO_10MHZ;
1527 if (max_bandwidth_khz < MHZ_TO_KHZ(20))
1528 bw_flags |= IEEE80211_CHAN_NO_20MHZ;
1529 if (max_bandwidth_khz < MHZ_TO_KHZ(40)) 1521 if (max_bandwidth_khz < MHZ_TO_KHZ(40))
1530 bw_flags |= IEEE80211_CHAN_NO_HT40; 1522 bw_flags = IEEE80211_CHAN_NO_HT40;
1531 if (max_bandwidth_khz < MHZ_TO_KHZ(80)) 1523 if (max_bandwidth_khz < MHZ_TO_KHZ(80))
1532 bw_flags |= IEEE80211_CHAN_NO_80MHZ; 1524 bw_flags |= IEEE80211_CHAN_NO_80MHZ;
1533 if (max_bandwidth_khz < MHZ_TO_KHZ(160)) 1525 if (max_bandwidth_khz < MHZ_TO_KHZ(160))
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 010b18ef4ea0..182be0f12407 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -3476,12 +3476,17 @@ sub process {
3476 } 3476 }
3477 } 3477 }
3478 3478
3479# unnecessary return in a void function? (a single leading tab, then return;) 3479# unnecessary return in a void function
3480 if ($sline =~ /^\+\treturn\s*;\s*$/ && 3480# at end-of-function, with the previous line a single leading tab, then return;
3481 $prevline =~ /^\+/) { 3481# and the line before that not a goto label target like "out:"
3482 if ($sline =~ /^[ \+]}\s*$/ &&
3483 $prevline =~ /^\+\treturn\s*;\s*$/ &&
3484 $linenr >= 3 &&
3485 $lines[$linenr - 3] =~ /^[ +]/ &&
3486 $lines[$linenr - 3] !~ /^[ +]\s*$Ident\s*:/) {
3482 WARN("RETURN_VOID", 3487 WARN("RETURN_VOID",
3483 "void function return statements are not generally useful\n" . $herecurr); 3488 "void function return statements are not generally useful\n" . $hereprev);
3484 } 3489 }
3485 3490
3486# if statements using unnecessary parentheses - ie: if ((foo == bar)) 3491# if statements using unnecessary parentheses - ie: if ((foo == bar))
3487 if ($^V && $^V ge 5.10.0 && 3492 if ($^V && $^V ge 5.10.0 &&
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index da058da413e7..16a07cfa4d34 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -2073,6 +2073,7 @@ sub check_return_section {
2073sub dump_function($$) { 2073sub dump_function($$) {
2074 my $prototype = shift; 2074 my $prototype = shift;
2075 my $file = shift; 2075 my $file = shift;
2076 my $noret = 0;
2076 2077
2077 $prototype =~ s/^static +//; 2078 $prototype =~ s/^static +//;
2078 $prototype =~ s/^extern +//; 2079 $prototype =~ s/^extern +//;
@@ -2086,7 +2087,7 @@ sub dump_function($$) {
2086 $prototype =~ s/__init_or_module +//; 2087 $prototype =~ s/__init_or_module +//;
2087 $prototype =~ s/__must_check +//; 2088 $prototype =~ s/__must_check +//;
2088 $prototype =~ s/__weak +//; 2089 $prototype =~ s/__weak +//;
2089 $prototype =~ s/^#\s*define\s+//; #ak added 2090 my $define = $prototype =~ s/^#\s*define\s+//; #ak added
2090 $prototype =~ s/__attribute__\s*\(\([a-z,]*\)\)//; 2091 $prototype =~ s/__attribute__\s*\(\([a-z,]*\)\)//;
2091 2092
2092 # Yes, this truly is vile. We are looking for: 2093 # Yes, this truly is vile. We are looking for:
@@ -2105,7 +2106,15 @@ sub dump_function($$) {
2105 # - atomic_set (macro) 2106 # - atomic_set (macro)
2106 # - pci_match_device, __copy_to_user (long return type) 2107 # - pci_match_device, __copy_to_user (long return type)
2107 2108
2108 if ($prototype =~ m/^()([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || 2109 if ($define && $prototype =~ m/^()([a-zA-Z0-9_~:]+)\s+/) {
2110 # This is an object-like macro, it has no return type and no parameter
2111 # list.
2112 # Function-like macros are not allowed to have spaces between
2113 # declaration_name and opening parenthesis (notice the \s+).
2114 $return_type = $1;
2115 $declaration_name = $2;
2116 $noret = 1;
2117 } elsif ($prototype =~ m/^()([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
2109 $prototype =~ m/^(\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || 2118 $prototype =~ m/^(\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
2110 $prototype =~ m/^(\w+\s*\*)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || 2119 $prototype =~ m/^(\w+\s*\*)\s*([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
2111 $prototype =~ m/^(\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ || 2120 $prototype =~ m/^(\w+\s+\w+)\s+([a-zA-Z0-9_~:]+)\s*\(([^\(]*)\)/ ||
@@ -2140,7 +2149,7 @@ sub dump_function($$) {
2140 # of warnings goes sufficiently down, the check is only performed in 2149 # of warnings goes sufficiently down, the check is only performed in
2141 # verbose mode. 2150 # verbose mode.
2142 # TODO: always perform the check. 2151 # TODO: always perform the check.
2143 if ($verbose) { 2152 if ($verbose && !$noret) {
2144 check_return_section($file, $declaration_name, $return_type); 2153 check_return_section($file, $declaration_name, $return_type);
2145 } 2154 }
2146 2155
diff --git a/scripts/recordmcount.h b/scripts/recordmcount.h
index 9d1421e63ff8..49b582a225b0 100644
--- a/scripts/recordmcount.h
+++ b/scripts/recordmcount.h
@@ -163,11 +163,11 @@ static int mcount_adjust = 0;
163 163
164static int MIPS_is_fake_mcount(Elf_Rel const *rp) 164static int MIPS_is_fake_mcount(Elf_Rel const *rp)
165{ 165{
166 static Elf_Addr old_r_offset; 166 static Elf_Addr old_r_offset = ~(Elf_Addr)0;
167 Elf_Addr current_r_offset = _w(rp->r_offset); 167 Elf_Addr current_r_offset = _w(rp->r_offset);
168 int is_fake; 168 int is_fake;
169 169
170 is_fake = old_r_offset && 170 is_fake = (old_r_offset != ~(Elf_Addr)0) &&
171 (current_r_offset - old_r_offset == MIPS_FAKEMCOUNT_OFFSET); 171 (current_r_offset - old_r_offset == MIPS_FAKEMCOUNT_OFFSET);
172 old_r_offset = current_r_offset; 172 old_r_offset = current_r_offset;
173 173
diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c
index b684c6e4f301..dabe41975a9d 100644
--- a/sound/pci/hda/hda_auto_parser.c
+++ b/sound/pci/hda/hda_auto_parser.c
@@ -898,6 +898,7 @@ void snd_hda_pick_fixup(struct hda_codec *codec,
898 if (!strcmp(codec->modelname, models->name)) { 898 if (!strcmp(codec->modelname, models->name)) {
899 codec->fixup_id = models->id; 899 codec->fixup_id = models->id;
900 codec->fixup_name = models->name; 900 codec->fixup_name = models->name;
901 codec->fixup_list = fixlist;
901 codec->fixup_forced = 1; 902 codec->fixup_forced = 1;
902 return; 903 return;
903 } 904 }
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 480bbddbd801..6df04d91c93c 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -193,7 +193,8 @@ azx_assign_device(struct azx *chip, struct snd_pcm_substream *substream)
193 dsp_unlock(azx_dev); 193 dsp_unlock(azx_dev);
194 return azx_dev; 194 return azx_dev;
195 } 195 }
196 if (!res) 196 if (!res ||
197 (chip->driver_caps & AZX_DCAPS_REVERSE_ASSIGN))
197 res = azx_dev; 198 res = azx_dev;
198 } 199 }
199 dsp_unlock(azx_dev); 200 dsp_unlock(azx_dev);
diff --git a/sound/pci/hda/hda_i915.c b/sound/pci/hda/hda_i915.c
index e9e8a4a4a9a1..8b4940ba33d6 100644
--- a/sound/pci/hda/hda_i915.c
+++ b/sound/pci/hda/hda_i915.c
@@ -20,10 +20,20 @@
20#include <linux/module.h> 20#include <linux/module.h>
21#include <sound/core.h> 21#include <sound/core.h>
22#include <drm/i915_powerwell.h> 22#include <drm/i915_powerwell.h>
23#include "hda_priv.h"
23#include "hda_i915.h" 24#include "hda_i915.h"
24 25
26/* Intel HSW/BDW display HDA controller Extended Mode registers.
27 * EM4 (M value) and EM5 (N Value) are used to convert CDClk (Core Display
28 * Clock) to 24MHz BCLK: BCLK = CDCLK * M / N
29 * The values will be lost when the display power well is disabled.
30 */
31#define ICH6_REG_EM4 0x100c
32#define ICH6_REG_EM5 0x1010
33
25static int (*get_power)(void); 34static int (*get_power)(void);
26static int (*put_power)(void); 35static int (*put_power)(void);
36static int (*get_cdclk)(void);
27 37
28int hda_display_power(bool enable) 38int hda_display_power(bool enable)
29{ 39{
@@ -38,6 +48,43 @@ int hda_display_power(bool enable)
38 return put_power(); 48 return put_power();
39} 49}
40 50
51void haswell_set_bclk(struct azx *chip)
52{
53 int cdclk_freq;
54 unsigned int bclk_m, bclk_n;
55
56 if (!get_cdclk)
57 return;
58
59 cdclk_freq = get_cdclk();
60 switch (cdclk_freq) {
61 case 337500:
62 bclk_m = 16;
63 bclk_n = 225;
64 break;
65
66 case 450000:
67 default: /* default CDCLK 450MHz */
68 bclk_m = 4;
69 bclk_n = 75;
70 break;
71
72 case 540000:
73 bclk_m = 4;
74 bclk_n = 90;
75 break;
76
77 case 675000:
78 bclk_m = 8;
79 bclk_n = 225;
80 break;
81 }
82
83 azx_writew(chip, EM4, bclk_m);
84 azx_writew(chip, EM5, bclk_n);
85}
86
87
41int hda_i915_init(void) 88int hda_i915_init(void)
42{ 89{
43 int err = 0; 90 int err = 0;
@@ -55,6 +102,10 @@ int hda_i915_init(void)
55 return -ENODEV; 102 return -ENODEV;
56 } 103 }
57 104
105 get_cdclk = symbol_request(i915_get_cdclk_freq);
106 if (!get_cdclk) /* may have abnormal BCLK and audio playback rate */
107 pr_warn("hda-i915: get_cdclk symbol get fail\n");
108
58 pr_debug("HDA driver get symbol successfully from i915 module\n"); 109 pr_debug("HDA driver get symbol successfully from i915 module\n");
59 110
60 return err; 111 return err;
@@ -70,6 +121,10 @@ int hda_i915_exit(void)
70 symbol_put(i915_release_power_well); 121 symbol_put(i915_release_power_well);
71 put_power = NULL; 122 put_power = NULL;
72 } 123 }
124 if (get_cdclk) {
125 symbol_put(i915_get_cdclk_freq);
126 get_cdclk = NULL;
127 }
73 128
74 return 0; 129 return 0;
75} 130}
diff --git a/sound/pci/hda/hda_i915.h b/sound/pci/hda/hda_i915.h
index bfd835f8f1aa..e6072c627583 100644
--- a/sound/pci/hda/hda_i915.h
+++ b/sound/pci/hda/hda_i915.h
@@ -18,10 +18,12 @@
18 18
19#ifdef CONFIG_SND_HDA_I915 19#ifdef CONFIG_SND_HDA_I915
20int hda_display_power(bool enable); 20int hda_display_power(bool enable);
21void haswell_set_bclk(struct azx *chip);
21int hda_i915_init(void); 22int hda_i915_init(void);
22int hda_i915_exit(void); 23int hda_i915_exit(void);
23#else 24#else
24static inline int hda_display_power(bool enable) { return 0; } 25static inline int hda_display_power(bool enable) { return 0; }
26static inline void haswell_set_bclk(struct azx *chip) { return; }
25static inline int hda_i915_init(void) 27static inline int hda_i915_init(void)
26{ 28{
27 return -ENODEV; 29 return -ENODEV;
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 23fd6b9aecca..83cd19017cf3 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -62,9 +62,9 @@
62#include <linux/vga_switcheroo.h> 62#include <linux/vga_switcheroo.h>
63#include <linux/firmware.h> 63#include <linux/firmware.h>
64#include "hda_codec.h" 64#include "hda_codec.h"
65#include "hda_i915.h"
66#include "hda_controller.h" 65#include "hda_controller.h"
67#include "hda_priv.h" 66#include "hda_priv.h"
67#include "hda_i915.h"
68 68
69 69
70static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; 70static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX;
@@ -227,7 +227,7 @@ enum {
227/* quirks for Intel PCH */ 227/* quirks for Intel PCH */
228#define AZX_DCAPS_INTEL_PCH_NOPM \ 228#define AZX_DCAPS_INTEL_PCH_NOPM \
229 (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \ 229 (AZX_DCAPS_SCH_SNOOP | AZX_DCAPS_BUFSIZE | \
230 AZX_DCAPS_COUNT_LPIB_DELAY) 230 AZX_DCAPS_COUNT_LPIB_DELAY | AZX_DCAPS_REVERSE_ASSIGN)
231 231
232#define AZX_DCAPS_INTEL_PCH \ 232#define AZX_DCAPS_INTEL_PCH \
233 (AZX_DCAPS_INTEL_PCH_NOPM | AZX_DCAPS_PM_RUNTIME) 233 (AZX_DCAPS_INTEL_PCH_NOPM | AZX_DCAPS_PM_RUNTIME)
@@ -288,6 +288,11 @@ static char *driver_short_names[] = {
288 [AZX_DRIVER_GENERIC] = "HD-Audio Generic", 288 [AZX_DRIVER_GENERIC] = "HD-Audio Generic",
289}; 289};
290 290
291struct hda_intel {
292 struct azx chip;
293};
294
295
291#ifdef CONFIG_X86 296#ifdef CONFIG_X86
292static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool on) 297static void __mark_pages_wc(struct azx *chip, struct snd_dma_buffer *dmab, bool on)
293{ 298{
@@ -591,7 +596,7 @@ static int azx_suspend(struct device *dev)
591 struct azx *chip = card->private_data; 596 struct azx *chip = card->private_data;
592 struct azx_pcm *p; 597 struct azx_pcm *p;
593 598
594 if (chip->disabled) 599 if (chip->disabled || chip->init_failed)
595 return 0; 600 return 0;
596 601
597 snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); 602 snd_power_change_state(card, SNDRV_CTL_POWER_D3hot);
@@ -606,6 +611,7 @@ static int azx_suspend(struct device *dev)
606 free_irq(chip->irq, chip); 611 free_irq(chip->irq, chip);
607 chip->irq = -1; 612 chip->irq = -1;
608 } 613 }
614
609 if (chip->msi) 615 if (chip->msi)
610 pci_disable_msi(chip->pci); 616 pci_disable_msi(chip->pci);
611 pci_disable_device(pci); 617 pci_disable_device(pci);
@@ -622,11 +628,13 @@ static int azx_resume(struct device *dev)
622 struct snd_card *card = dev_get_drvdata(dev); 628 struct snd_card *card = dev_get_drvdata(dev);
623 struct azx *chip = card->private_data; 629 struct azx *chip = card->private_data;
624 630
625 if (chip->disabled) 631 if (chip->disabled || chip->init_failed)
626 return 0; 632 return 0;
627 633
628 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) 634 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
629 hda_display_power(true); 635 hda_display_power(true);
636 haswell_set_bclk(chip);
637 }
630 pci_set_power_state(pci, PCI_D0); 638 pci_set_power_state(pci, PCI_D0);
631 pci_restore_state(pci); 639 pci_restore_state(pci);
632 if (pci_enable_device(pci) < 0) { 640 if (pci_enable_device(pci) < 0) {
@@ -657,7 +665,7 @@ static int azx_runtime_suspend(struct device *dev)
657 struct snd_card *card = dev_get_drvdata(dev); 665 struct snd_card *card = dev_get_drvdata(dev);
658 struct azx *chip = card->private_data; 666 struct azx *chip = card->private_data;
659 667
660 if (chip->disabled) 668 if (chip->disabled || chip->init_failed)
661 return 0; 669 return 0;
662 670
663 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) 671 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
@@ -672,6 +680,7 @@ static int azx_runtime_suspend(struct device *dev)
672 azx_clear_irq_pending(chip); 680 azx_clear_irq_pending(chip);
673 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) 681 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
674 hda_display_power(false); 682 hda_display_power(false);
683
675 return 0; 684 return 0;
676} 685}
677 686
@@ -683,14 +692,16 @@ static int azx_runtime_resume(struct device *dev)
683 struct hda_codec *codec; 692 struct hda_codec *codec;
684 int status; 693 int status;
685 694
686 if (chip->disabled) 695 if (chip->disabled || chip->init_failed)
687 return 0; 696 return 0;
688 697
689 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME)) 698 if (!(chip->driver_caps & AZX_DCAPS_PM_RUNTIME))
690 return 0; 699 return 0;
691 700
692 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) 701 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
693 hda_display_power(true); 702 hda_display_power(true);
703 haswell_set_bclk(chip);
704 }
694 705
695 /* Read STATESTS before controller reset */ 706 /* Read STATESTS before controller reset */
696 status = azx_readw(chip, STATESTS); 707 status = azx_readw(chip, STATESTS);
@@ -718,7 +729,7 @@ static int azx_runtime_idle(struct device *dev)
718 struct snd_card *card = dev_get_drvdata(dev); 729 struct snd_card *card = dev_get_drvdata(dev);
719 struct azx *chip = card->private_data; 730 struct azx *chip = card->private_data;
720 731
721 if (chip->disabled) 732 if (chip->disabled || chip->init_failed)
722 return 0; 733 return 0;
723 734
724 if (!power_save_controller || 735 if (!power_save_controller ||
@@ -883,6 +894,8 @@ static int register_vga_switcheroo(struct azx *chip)
883static int azx_free(struct azx *chip) 894static int azx_free(struct azx *chip)
884{ 895{
885 struct pci_dev *pci = chip->pci; 896 struct pci_dev *pci = chip->pci;
897 struct hda_intel *hda = container_of(chip, struct hda_intel, chip);
898
886 int i; 899 int i;
887 900
888 if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME) 901 if ((chip->driver_caps & AZX_DCAPS_PM_RUNTIME)
@@ -930,7 +943,7 @@ static int azx_free(struct azx *chip)
930 hda_display_power(false); 943 hda_display_power(false);
931 hda_i915_exit(); 944 hda_i915_exit();
932 } 945 }
933 kfree(chip); 946 kfree(hda);
934 947
935 return 0; 948 return 0;
936} 949}
@@ -1174,6 +1187,7 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
1174 static struct snd_device_ops ops = { 1187 static struct snd_device_ops ops = {
1175 .dev_free = azx_dev_free, 1188 .dev_free = azx_dev_free,
1176 }; 1189 };
1190 struct hda_intel *hda;
1177 struct azx *chip; 1191 struct azx *chip;
1178 int err; 1192 int err;
1179 1193
@@ -1183,13 +1197,14 @@ static int azx_create(struct snd_card *card, struct pci_dev *pci,
1183 if (err < 0) 1197 if (err < 0)
1184 return err; 1198 return err;
1185 1199
1186 chip = kzalloc(sizeof(*chip), GFP_KERNEL); 1200 hda = kzalloc(sizeof(*hda), GFP_KERNEL);
1187 if (!chip) { 1201 if (!hda) {
1188 dev_err(card->dev, "Cannot allocate chip\n"); 1202 dev_err(card->dev, "Cannot allocate hda\n");
1189 pci_disable_device(pci); 1203 pci_disable_device(pci);
1190 return -ENOMEM; 1204 return -ENOMEM;
1191 } 1205 }
1192 1206
1207 chip = &hda->chip;
1193 spin_lock_init(&chip->reg_lock); 1208 spin_lock_init(&chip->reg_lock);
1194 mutex_init(&chip->open_mutex); 1209 mutex_init(&chip->open_mutex);
1195 chip->card = card; 1210 chip->card = card;
@@ -1375,6 +1390,10 @@ static int azx_first_init(struct azx *chip)
1375 1390
1376 /* initialize chip */ 1391 /* initialize chip */
1377 azx_init_pci(chip); 1392 azx_init_pci(chip);
1393
1394 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
1395 haswell_set_bclk(chip);
1396
1378 azx_init_chip(chip, (probe_only[dev] & 2) == 0); 1397 azx_init_chip(chip, (probe_only[dev] & 2) == 0);
1379 1398
1380 /* codec detection */ 1399 /* codec detection */
diff --git a/sound/pci/hda/hda_local.h b/sound/pci/hda/hda_local.h
index ebd1fa6f015c..4e2d4863daa1 100644
--- a/sound/pci/hda/hda_local.h
+++ b/sound/pci/hda/hda_local.h
@@ -417,6 +417,27 @@ struct snd_hda_pin_quirk {
417 int value; /* quirk value */ 417 int value; /* quirk value */
418}; 418};
419 419
420#ifdef CONFIG_SND_DEBUG_VERBOSE
421
422#define SND_HDA_PIN_QUIRK(_codec, _subvendor, _name, _value, _pins...) \
423 { .codec = _codec,\
424 .subvendor = _subvendor,\
425 .name = _name,\
426 .value = _value,\
427 .pins = (const struct hda_pintbl[]) { _pins } \
428 }
429#else
430
431#define SND_HDA_PIN_QUIRK(_codec, _subvendor, _name, _value, _pins...) \
432 { .codec = _codec,\
433 .subvendor = _subvendor,\
434 .value = _value,\
435 .pins = (const struct hda_pintbl[]) { _pins } \
436 }
437
438#endif
439
440
420/* fixup types */ 441/* fixup types */
421enum { 442enum {
422 HDA_FIXUP_INVALID, 443 HDA_FIXUP_INVALID,
diff --git a/sound/pci/hda/hda_priv.h b/sound/pci/hda/hda_priv.h
index 4a7cb01fa912..e9d1a5762a55 100644
--- a/sound/pci/hda/hda_priv.h
+++ b/sound/pci/hda/hda_priv.h
@@ -186,6 +186,7 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
186#define AZX_DCAPS_BUFSIZE (1 << 21) /* no buffer size alignment */ 186#define AZX_DCAPS_BUFSIZE (1 << 21) /* no buffer size alignment */
187#define AZX_DCAPS_ALIGN_BUFSIZE (1 << 22) /* buffer size alignment */ 187#define AZX_DCAPS_ALIGN_BUFSIZE (1 << 22) /* buffer size alignment */
188#define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */ 188#define AZX_DCAPS_4K_BDLE_BOUNDARY (1 << 23) /* BDLE in 4k boundary */
189#define AZX_DCAPS_REVERSE_ASSIGN (1 << 24) /* Assign devices in reverse order */
189#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */ 190#define AZX_DCAPS_COUNT_LPIB_DELAY (1 << 25) /* Take LPIB as delay */
190#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */ 191#define AZX_DCAPS_PM_RUNTIME (1 << 26) /* runtime PM support */
191#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */ 192#define AZX_DCAPS_I915_POWERWELL (1 << 27) /* HSW i915 powerwell support */
diff --git a/sound/pci/hda/hda_tegra.c b/sound/pci/hda/hda_tegra.c
index a366ba9293a8..358414da6418 100644
--- a/sound/pci/hda/hda_tegra.c
+++ b/sound/pci/hda/hda_tegra.c
@@ -236,6 +236,7 @@ disable_hda:
236 return rc; 236 return rc;
237} 237}
238 238
239#ifdef CONFIG_PM_SLEEP
239static void hda_tegra_disable_clocks(struct hda_tegra *data) 240static void hda_tegra_disable_clocks(struct hda_tegra *data)
240{ 241{
241 clk_disable_unprepare(data->hda2hdmi_clk); 242 clk_disable_unprepare(data->hda2hdmi_clk);
@@ -243,7 +244,6 @@ static void hda_tegra_disable_clocks(struct hda_tegra *data)
243 clk_disable_unprepare(data->hda_clk); 244 clk_disable_unprepare(data->hda_clk);
244} 245}
245 246
246#ifdef CONFIG_PM_SLEEP
247/* 247/*
248 * power management 248 * power management
249 */ 249 */
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 3e4417b0ddbe..ba4ca52072ff 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -2204,7 +2204,7 @@ static int generic_hdmi_resume(struct hda_codec *codec)
2204 struct hdmi_spec *spec = codec->spec; 2204 struct hdmi_spec *spec = codec->spec;
2205 int pin_idx; 2205 int pin_idx;
2206 2206
2207 generic_hdmi_init(codec); 2207 codec->patch_ops.init(codec);
2208 snd_hda_codec_resume_amp(codec); 2208 snd_hda_codec_resume_amp(codec);
2209 snd_hda_codec_resume_cache(codec); 2209 snd_hda_codec_resume_cache(codec);
2210 2210
@@ -3337,6 +3337,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
3337{ .id = 0x10de0051, .name = "GPU 51 HDMI/DP", .patch = patch_nvhdmi }, 3337{ .id = 0x10de0051, .name = "GPU 51 HDMI/DP", .patch = patch_nvhdmi },
3338{ .id = 0x10de0060, .name = "GPU 60 HDMI/DP", .patch = patch_nvhdmi }, 3338{ .id = 0x10de0060, .name = "GPU 60 HDMI/DP", .patch = patch_nvhdmi },
3339{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch }, 3339{ .id = 0x10de0067, .name = "MCP67 HDMI", .patch = patch_nvhdmi_2ch },
3340{ .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi },
3340{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi }, 3341{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi },
3341{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, 3342{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
3342{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, 3343{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
@@ -3394,6 +3395,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0044");
3394MODULE_ALIAS("snd-hda-codec-id:10de0051"); 3395MODULE_ALIAS("snd-hda-codec-id:10de0051");
3395MODULE_ALIAS("snd-hda-codec-id:10de0060"); 3396MODULE_ALIAS("snd-hda-codec-id:10de0060");
3396MODULE_ALIAS("snd-hda-codec-id:10de0067"); 3397MODULE_ALIAS("snd-hda-codec-id:10de0067");
3398MODULE_ALIAS("snd-hda-codec-id:10de0070");
3397MODULE_ALIAS("snd-hda-codec-id:10de0071"); 3399MODULE_ALIAS("snd-hda-codec-id:10de0071");
3398MODULE_ALIAS("snd-hda-codec-id:10de8001"); 3400MODULE_ALIAS("snd-hda-codec-id:10de8001");
3399MODULE_ALIAS("snd-hda-codec-id:11069f80"); 3401MODULE_ALIAS("snd-hda-codec-id:11069f80");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index af76995fa966..b60824e90408 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -4880,6 +4880,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
4880 SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK), 4880 SND_PCI_QUIRK(0x17aa, 0x2208, "Thinkpad T431s", ALC269_FIXUP_LENOVO_DOCK),
4881 SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK), 4881 SND_PCI_QUIRK(0x17aa, 0x220c, "Thinkpad T440s", ALC292_FIXUP_TPT440_DOCK),
4882 SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK), 4882 SND_PCI_QUIRK(0x17aa, 0x220e, "Thinkpad T440p", ALC292_FIXUP_TPT440_DOCK),
4883 SND_PCI_QUIRK(0x17aa, 0x2210, "Thinkpad T540p", ALC292_FIXUP_TPT440_DOCK),
4883 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4884 SND_PCI_QUIRK(0x17aa, 0x2212, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4884 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4885 SND_PCI_QUIRK(0x17aa, 0x2214, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
4885 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST), 4886 SND_PCI_QUIRK(0x17aa, 0x2215, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -4962,228 +4963,141 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
4962}; 4963};
4963 4964
4964static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = { 4965static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
4965 { 4966 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4966 .codec = 0x10ec0255, 4967 {0x12, 0x90a60140},
4967 .subvendor = 0x1028, 4968 {0x14, 0x90170110},
4968#ifdef CONFIG_SND_DEBUG_VERBOSE 4969 {0x17, 0x40000000},
4969 .name = "Dell", 4970 {0x18, 0x411111f0},
4970#endif 4971 {0x19, 0x411111f0},
4971 .pins = (const struct hda_pintbl[]) { 4972 {0x1a, 0x411111f0},
4972 {0x12, 0x90a60140}, 4973 {0x1b, 0x411111f0},
4973 {0x14, 0x90170110}, 4974 {0x1d, 0x40700001},
4974 {0x17, 0x40000000}, 4975 {0x1e, 0x411111f0},
4975 {0x18, 0x411111f0}, 4976 {0x21, 0x02211020}),
4976 {0x19, 0x411111f0}, 4977 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4977 {0x1a, 0x411111f0}, 4978 {0x12, 0x90a60160},
4978 {0x1b, 0x411111f0}, 4979 {0x14, 0x90170120},
4979 {0x1d, 0x40700001}, 4980 {0x17, 0x40000000},
4980 {0x1e, 0x411111f0}, 4981 {0x18, 0x411111f0},
4981 {0x21, 0x02211020}, 4982 {0x19, 0x411111f0},
4982 }, 4983 {0x1a, 0x411111f0},
4983 .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 4984 {0x1b, 0x411111f0},
4984 }, 4985 {0x1d, 0x40700001},
4985 { 4986 {0x1e, 0x411111f0},
4986 .codec = 0x10ec0255, 4987 {0x21, 0x02211030}),
4987 .subvendor = 0x1028, 4988 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4988#ifdef CONFIG_SND_DEBUG_VERBOSE 4989 {0x12, 0x90a60160},
4989 .name = "Dell", 4990 {0x14, 0x90170120},
4990#endif 4991 {0x17, 0x90170140},
4991 .pins = (const struct hda_pintbl[]) { 4992 {0x18, 0x40000000},
4992 {0x12, 0x90a60160}, 4993 {0x19, 0x411111f0},
4993 {0x14, 0x90170120}, 4994 {0x1a, 0x411111f0},
4994 {0x17, 0x40000000}, 4995 {0x1b, 0x411111f0},
4995 {0x18, 0x411111f0}, 4996 {0x1d, 0x41163b05},
4996 {0x19, 0x411111f0}, 4997 {0x1e, 0x411111f0},
4997 {0x1a, 0x411111f0}, 4998 {0x21, 0x0321102f}),
4998 {0x1b, 0x411111f0}, 4999 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
4999 {0x1d, 0x40700001}, 5000 {0x12, 0x90a60160},
5000 {0x1e, 0x411111f0}, 5001 {0x14, 0x90170130},
5001 {0x21, 0x02211030}, 5002 {0x17, 0x40000000},
5002 }, 5003 {0x18, 0x411111f0},
5003 .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5004 {0x19, 0x411111f0},
5004 }, 5005 {0x1a, 0x411111f0},
5005 { 5006 {0x1b, 0x411111f0},
5006 .codec = 0x10ec0255, 5007 {0x1d, 0x40700001},
5007 .subvendor = 0x1028, 5008 {0x1e, 0x411111f0},
5008#ifdef CONFIG_SND_DEBUG_VERBOSE 5009 {0x21, 0x02211040}),
5009 .name = "Dell", 5010 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5010#endif 5011 {0x12, 0x90a60160},
5011 .pins = (const struct hda_pintbl[]) { 5012 {0x14, 0x90170140},
5012 {0x12, 0x90a60160}, 5013 {0x17, 0x40000000},
5013 {0x14, 0x90170120}, 5014 {0x18, 0x411111f0},
5014 {0x17, 0x90170140}, 5015 {0x19, 0x411111f0},
5015 {0x18, 0x40000000}, 5016 {0x1a, 0x411111f0},
5016 {0x19, 0x411111f0}, 5017 {0x1b, 0x411111f0},
5017 {0x1a, 0x411111f0}, 5018 {0x1d, 0x40700001},
5018 {0x1b, 0x411111f0}, 5019 {0x1e, 0x411111f0},
5019 {0x1d, 0x41163b05}, 5020 {0x21, 0x02211050}),
5020 {0x1e, 0x411111f0}, 5021 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5021 {0x21, 0x0321102f}, 5022 {0x12, 0x90a60170},
5022 }, 5023 {0x14, 0x90170120},
5023 .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5024 {0x17, 0x40000000},
5024 }, 5025 {0x18, 0x411111f0},
5025 { 5026 {0x19, 0x411111f0},
5026 .codec = 0x10ec0255, 5027 {0x1a, 0x411111f0},
5027 .subvendor = 0x1028, 5028 {0x1b, 0x411111f0},
5028#ifdef CONFIG_SND_DEBUG_VERBOSE 5029 {0x1d, 0x40700001},
5029 .name = "Dell", 5030 {0x1e, 0x411111f0},
5030#endif 5031 {0x21, 0x02211030}),
5031 .pins = (const struct hda_pintbl[]) { 5032 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5032 {0x12, 0x90a60160}, 5033 {0x12, 0x90a60170},
5033 {0x14, 0x90170130}, 5034 {0x14, 0x90170130},
5034 {0x17, 0x40000000}, 5035 {0x17, 0x40000000},
5035 {0x18, 0x411111f0}, 5036 {0x18, 0x411111f0},
5036 {0x19, 0x411111f0}, 5037 {0x19, 0x411111f0},
5037 {0x1a, 0x411111f0}, 5038 {0x1a, 0x411111f0},
5038 {0x1b, 0x411111f0}, 5039 {0x1b, 0x411111f0},
5039 {0x1d, 0x40700001}, 5040 {0x1d, 0x40700001},
5040 {0x1e, 0x411111f0}, 5041 {0x1e, 0x411111f0},
5041 {0x21, 0x02211040}, 5042 {0x21, 0x02211040}),
5042 }, 5043 SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
5043 .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5044 {0x12, 0x90a60130},
5044 }, 5045 {0x14, 0x90170110},
5045 { 5046 {0x17, 0x40020008},
5046 .codec = 0x10ec0255, 5047 {0x18, 0x411111f0},
5047 .subvendor = 0x1028, 5048 {0x19, 0x411111f0},
5048#ifdef CONFIG_SND_DEBUG_VERBOSE 5049 {0x1a, 0x411111f0},
5049 .name = "Dell", 5050 {0x1b, 0x411111f0},
5050#endif 5051 {0x1d, 0x40e00001},
5051 .pins = (const struct hda_pintbl[]) { 5052 {0x1e, 0x411111f0},
5052 {0x12, 0x90a60160}, 5053 {0x21, 0x0321101f}),
5053 {0x14, 0x90170140}, 5054 SND_HDA_PIN_QUIRK(0x10ec0283, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
5054 {0x17, 0x40000000}, 5055 {0x12, 0x90a60160},
5055 {0x18, 0x411111f0}, 5056 {0x14, 0x90170120},
5056 {0x19, 0x411111f0}, 5057 {0x17, 0x40000000},
5057 {0x1a, 0x411111f0}, 5058 {0x18, 0x411111f0},
5058 {0x1b, 0x411111f0}, 5059 {0x19, 0x411111f0},
5059 {0x1d, 0x40700001}, 5060 {0x1a, 0x411111f0},
5060 {0x1e, 0x411111f0}, 5061 {0x1b, 0x411111f0},
5061 {0x21, 0x02211050}, 5062 {0x1d, 0x40700001},
5062 }, 5063 {0x1e, 0x411111f0},
5063 .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5064 {0x21, 0x02211030}),
5064 }, 5065 SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
5065 { 5066 {0x12, 0x90a60140},
5066 .codec = 0x10ec0255, 5067 {0x13, 0x411111f0},
5067 .subvendor = 0x1028, 5068 {0x14, 0x90170110},
5068#ifdef CONFIG_SND_DEBUG_VERBOSE 5069 {0x15, 0x0221401f},
5069 .name = "Dell", 5070 {0x16, 0x411111f0},
5070#endif 5071 {0x18, 0x411111f0},
5071 .pins = (const struct hda_pintbl[]) { 5072 {0x19, 0x411111f0},
5072 {0x12, 0x90a60170}, 5073 {0x1a, 0x411111f0},
5073 {0x14, 0x90170120}, 5074 {0x1b, 0x411111f0},
5074 {0x17, 0x40000000}, 5075 {0x1d, 0x40700001},
5075 {0x18, 0x411111f0}, 5076 {0x1e, 0x411111f0}),
5076 {0x19, 0x411111f0}, 5077 SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
5077 {0x1a, 0x411111f0}, 5078 {0x12, 0x40000000},
5078 {0x1b, 0x411111f0}, 5079 {0x13, 0x90a60140},
5079 {0x1d, 0x40700001}, 5080 {0x14, 0x90170110},
5080 {0x1e, 0x411111f0}, 5081 {0x15, 0x0221401f},
5081 {0x21, 0x02211030}, 5082 {0x16, 0x21014020},
5082 }, 5083 {0x18, 0x411111f0},
5083 .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5084 {0x19, 0x21a19030},
5084 }, 5085 {0x1a, 0x411111f0},
5085 { 5086 {0x1b, 0x411111f0},
5086 .codec = 0x10ec0255, 5087 {0x1d, 0x40700001},
5087 .subvendor = 0x1028, 5088 {0x1e, 0x411111f0}),
5088#ifdef CONFIG_SND_DEBUG_VERBOSE 5089 SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
5089 .name = "Dell", 5090 {0x12, 0x40000000},
5090#endif 5091 {0x13, 0x90a60140},
5091 .pins = (const struct hda_pintbl[]) { 5092 {0x14, 0x90170110},
5092 {0x12, 0x90a60170}, 5093 {0x15, 0x0221401f},
5093 {0x14, 0x90170130}, 5094 {0x16, 0x411111f0},
5094 {0x17, 0x40000000}, 5095 {0x18, 0x411111f0},
5095 {0x18, 0x411111f0}, 5096 {0x19, 0x411111f0},
5096 {0x19, 0x411111f0}, 5097 {0x1a, 0x411111f0},
5097 {0x1a, 0x411111f0}, 5098 {0x1b, 0x411111f0},
5098 {0x1b, 0x411111f0}, 5099 {0x1d, 0x40700001},
5099 {0x1d, 0x40700001}, 5100 {0x1e, 0x411111f0}),
5100 {0x1e, 0x411111f0},
5101 {0x21, 0x02211040},
5102 },
5103 .value = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5104 },
5105 {
5106 .codec = 0x10ec0283,
5107 .subvendor = 0x1028,
5108#ifdef CONFIG_SND_DEBUG_VERBOSE
5109 .name = "Dell",
5110#endif
5111 .pins = (const struct hda_pintbl[]) {
5112 {0x12, 0x90a60130},
5113 {0x14, 0x90170110},
5114 {0x17, 0x40020008},
5115 {0x18, 0x411111f0},
5116 {0x19, 0x411111f0},
5117 {0x1a, 0x411111f0},
5118 {0x1b, 0x411111f0},
5119 {0x1d, 0x40e00001},
5120 {0x1e, 0x411111f0},
5121 {0x21, 0x0321101f},
5122 },
5123 .value = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
5124 },
5125 {
5126 .codec = 0x10ec0283,
5127 .subvendor = 0x1028,
5128#ifdef CONFIG_SND_DEBUG_VERBOSE
5129 .name = "Dell",
5130#endif
5131 .pins = (const struct hda_pintbl[]) {
5132 {0x12, 0x90a60160},
5133 {0x14, 0x90170120},
5134 {0x17, 0x40000000},
5135 {0x18, 0x411111f0},
5136 {0x19, 0x411111f0},
5137 {0x1a, 0x411111f0},
5138 {0x1b, 0x411111f0},
5139 {0x1d, 0x40700001},
5140 {0x1e, 0x411111f0},
5141 {0x21, 0x02211030},
5142 },
5143 .value = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
5144 },
5145 {
5146 .codec = 0x10ec0292,
5147 .subvendor = 0x1028,
5148#ifdef CONFIG_SND_DEBUG_VERBOSE
5149 .name = "Dell",
5150#endif
5151 .pins = (const struct hda_pintbl[]) {
5152 {0x12, 0x90a60140},
5153 {0x13, 0x411111f0},
5154 {0x14, 0x90170110},
5155 {0x15, 0x0221401f},
5156 {0x16, 0x411111f0},
5157 {0x18, 0x411111f0},
5158 {0x19, 0x411111f0},
5159 {0x1a, 0x411111f0},
5160 {0x1b, 0x411111f0},
5161 {0x1d, 0x40700001},
5162 {0x1e, 0x411111f0},
5163 },
5164 .value = ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
5165 },
5166 {
5167 .codec = 0x10ec0293,
5168 .subvendor = 0x1028,
5169#ifdef CONFIG_SND_DEBUG_VERBOSE
5170 .name = "Dell",
5171#endif
5172 .pins = (const struct hda_pintbl[]) {
5173 {0x12, 0x40000000},
5174 {0x13, 0x90a60140},
5175 {0x14, 0x90170110},
5176 {0x15, 0x0221401f},
5177 {0x16, 0x21014020},
5178 {0x18, 0x411111f0},
5179 {0x19, 0x21a19030},
5180 {0x1a, 0x411111f0},
5181 {0x1b, 0x411111f0},
5182 {0x1d, 0x40700001},
5183 {0x1e, 0x411111f0},
5184 },
5185 .value = ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
5186 },
5187 {} 5101 {}
5188}; 5102};
5189 5103
@@ -6039,90 +5953,66 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
6039}; 5953};
6040 5954
6041static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = { 5955static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
6042 { 5956 SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
6043 .codec = 0x10ec0668, 5957 {0x12, 0x99a30130},
6044 .subvendor = 0x1028, 5958 {0x14, 0x90170110},
6045#ifdef CONFIG_SND_DEBUG_VERBOSE 5959 {0x15, 0x0321101f},
6046 .name = "Dell", 5960 {0x16, 0x03011020},
6047#endif 5961 {0x18, 0x40000008},
6048 .pins = (const struct hda_pintbl[]) { 5962 {0x19, 0x411111f0},
6049 {0x12, 0x99a30130}, 5963 {0x1a, 0x411111f0},
6050 {0x14, 0x90170110}, 5964 {0x1b, 0x411111f0},
6051 {0x15, 0x0321101f}, 5965 {0x1d, 0x41000001},
6052 {0x16, 0x03011020}, 5966 {0x1e, 0x411111f0},
6053 {0x18, 0x40000008}, 5967 {0x1f, 0x411111f0}),
6054 {0x19, 0x411111f0}, 5968 SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
6055 {0x1a, 0x411111f0}, 5969 {0x12, 0x99a30140},
6056 {0x1b, 0x411111f0}, 5970 {0x14, 0x90170110},
6057 {0x1d, 0x41000001}, 5971 {0x15, 0x0321101f},
6058 {0x1e, 0x411111f0}, 5972 {0x16, 0x03011020},
6059 {0x1f, 0x411111f0}, 5973 {0x18, 0x40000008},
6060 }, 5974 {0x19, 0x411111f0},
6061 .value = ALC668_FIXUP_AUTO_MUTE, 5975 {0x1a, 0x411111f0},
6062 }, 5976 {0x1b, 0x411111f0},
6063 { 5977 {0x1d, 0x41000001},
6064 .codec = 0x10ec0668, 5978 {0x1e, 0x411111f0},
6065 .subvendor = 0x1028, 5979 {0x1f, 0x411111f0}),
6066#ifdef CONFIG_SND_DEBUG_VERBOSE 5980 SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
6067 .name = "Dell", 5981 {0x12, 0x99a30150},
6068#endif 5982 {0x14, 0x90170110},
6069 .pins = (const struct hda_pintbl[]) { 5983 {0x15, 0x0321101f},
6070 {0x12, 0x99a30140}, 5984 {0x16, 0x03011020},
6071 {0x14, 0x90170110}, 5985 {0x18, 0x40000008},
6072 {0x15, 0x0321101f}, 5986 {0x19, 0x411111f0},
6073 {0x16, 0x03011020}, 5987 {0x1a, 0x411111f0},
6074 {0x18, 0x40000008}, 5988 {0x1b, 0x411111f0},
6075 {0x19, 0x411111f0}, 5989 {0x1d, 0x41000001},
6076 {0x1a, 0x411111f0}, 5990 {0x1e, 0x411111f0},
6077 {0x1b, 0x411111f0}, 5991 {0x1f, 0x411111f0}),
6078 {0x1d, 0x41000001}, 5992 SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
6079 {0x1e, 0x411111f0}, 5993 {0x12, 0x411111f0},
6080 {0x1f, 0x411111f0}, 5994 {0x14, 0x90170110},
6081 }, 5995 {0x15, 0x0321101f},
6082 .value = ALC668_FIXUP_AUTO_MUTE, 5996 {0x16, 0x03011020},
6083 }, 5997 {0x18, 0x40000008},
6084 { 5998 {0x19, 0x411111f0},
6085 .codec = 0x10ec0668, 5999 {0x1a, 0x411111f0},
6086 .subvendor = 0x1028, 6000 {0x1b, 0x411111f0},
6087#ifdef CONFIG_SND_DEBUG_VERBOSE 6001 {0x1d, 0x41000001},
6088 .name = "Dell", 6002 {0x1e, 0x411111f0},
6089#endif 6003 {0x1f, 0x411111f0}),
6090 .pins = (const struct hda_pintbl[]) { 6004 SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell XPS 15", ALC668_FIXUP_AUTO_MUTE,
6091 {0x12, 0x99a30150}, 6005 {0x12, 0x90a60130},
6092 {0x14, 0x90170110}, 6006 {0x14, 0x90170110},
6093 {0x15, 0x0321101f}, 6007 {0x15, 0x0321101f},
6094 {0x16, 0x03011020}, 6008 {0x16, 0x40000000},
6095 {0x18, 0x40000008}, 6009 {0x18, 0x411111f0},
6096 {0x19, 0x411111f0}, 6010 {0x19, 0x411111f0},
6097 {0x1a, 0x411111f0}, 6011 {0x1a, 0x411111f0},
6098 {0x1b, 0x411111f0}, 6012 {0x1b, 0x411111f0},
6099 {0x1d, 0x41000001}, 6013 {0x1d, 0x40d6832d},
6100 {0x1e, 0x411111f0}, 6014 {0x1e, 0x411111f0},
6101 {0x1f, 0x411111f0}, 6015 {0x1f, 0x411111f0}),
6102 },
6103 .value = ALC668_FIXUP_AUTO_MUTE,
6104 },
6105 {
6106 .codec = 0x10ec0668,
6107 .subvendor = 0x1028,
6108#ifdef CONFIG_SND_DEBUG_VERBOSE
6109 .name = "Dell",
6110#endif
6111 .pins = (const struct hda_pintbl[]) {
6112 {0x12, 0x411111f0},
6113 {0x14, 0x90170110},
6114 {0x15, 0x0321101f},
6115 {0x16, 0x03011020},
6116 {0x18, 0x40000008},
6117 {0x19, 0x411111f0},
6118 {0x1a, 0x411111f0},
6119 {0x1b, 0x411111f0},
6120 {0x1d, 0x41000001},
6121 {0x1e, 0x411111f0},
6122 {0x1f, 0x411111f0},
6123 },
6124 .value = ALC668_FIXUP_AUTO_MUTE,
6125 },
6126 {} 6016 {}
6127}; 6017};
6128 6018
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 7f40a150899c..3744ea4e843d 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -122,6 +122,12 @@ enum {
122}; 122};
123 123
124enum { 124enum {
125 STAC_92HD95_HP_LED,
126 STAC_92HD95_HP_BASS,
127 STAC_92HD95_MODELS
128};
129
130enum {
125 STAC_925x_REF, 131 STAC_925x_REF,
126 STAC_M1, 132 STAC_M1,
127 STAC_M1_2, 133 STAC_M1_2,
@@ -4128,6 +4134,48 @@ static const struct snd_pci_quirk stac9205_fixup_tbl[] = {
4128 {} /* terminator */ 4134 {} /* terminator */
4129}; 4135};
4130 4136
4137static void stac92hd95_fixup_hp_led(struct hda_codec *codec,
4138 const struct hda_fixup *fix, int action)
4139{
4140 struct sigmatel_spec *spec = codec->spec;
4141
4142 if (action != HDA_FIXUP_ACT_PRE_PROBE)
4143 return;
4144
4145 if (find_mute_led_cfg(codec, spec->default_polarity))
4146 codec_dbg(codec, "mute LED gpio %d polarity %d\n",
4147 spec->gpio_led,
4148 spec->gpio_led_polarity);
4149}
4150
4151static const struct hda_fixup stac92hd95_fixups[] = {
4152 [STAC_92HD95_HP_LED] = {
4153 .type = HDA_FIXUP_FUNC,
4154 .v.func = stac92hd95_fixup_hp_led,
4155 },
4156 [STAC_92HD95_HP_BASS] = {
4157 .type = HDA_FIXUP_VERBS,
4158 .v.verbs = (const struct hda_verb[]) {
4159 {0x1a, 0x795, 0x00}, /* HPF to 100Hz */
4160 {}
4161 },
4162 .chained = true,
4163 .chain_id = STAC_92HD95_HP_LED,
4164 },
4165};
4166
4167static const struct snd_pci_quirk stac92hd95_fixup_tbl[] = {
4168 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1911, "HP Spectre 13", STAC_92HD95_HP_BASS),
4169 {} /* terminator */
4170};
4171
4172static const struct hda_model_fixup stac92hd95_models[] = {
4173 { .id = STAC_92HD95_HP_LED, .name = "hp-led" },
4174 { .id = STAC_92HD95_HP_BASS, .name = "hp-bass" },
4175 {}
4176};
4177
4178
4131static int stac_parse_auto_config(struct hda_codec *codec) 4179static int stac_parse_auto_config(struct hda_codec *codec)
4132{ 4180{
4133 struct sigmatel_spec *spec = codec->spec; 4181 struct sigmatel_spec *spec = codec->spec;
@@ -4580,10 +4628,16 @@ static int patch_stac92hd95(struct hda_codec *codec)
4580 spec->gen.beep_nid = 0x19; /* digital beep */ 4628 spec->gen.beep_nid = 0x19; /* digital beep */
4581 spec->pwr_nids = stac92hd95_pwr_nids; 4629 spec->pwr_nids = stac92hd95_pwr_nids;
4582 spec->num_pwrs = ARRAY_SIZE(stac92hd95_pwr_nids); 4630 spec->num_pwrs = ARRAY_SIZE(stac92hd95_pwr_nids);
4583 spec->default_polarity = -1; /* no default cfg */ 4631 spec->default_polarity = 0;
4584 4632
4585 codec->patch_ops = stac_patch_ops; 4633 codec->patch_ops = stac_patch_ops;
4586 4634
4635 snd_hda_pick_fixup(codec, stac92hd95_models, stac92hd95_fixup_tbl,
4636 stac92hd95_fixups);
4637 snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PRE_PROBE);
4638
4639 stac_setup_gpio(codec);
4640
4587 err = stac_parse_auto_config(codec); 4641 err = stac_parse_auto_config(codec);
4588 if (err < 0) { 4642 if (err < 0) {
4589 stac_free(codec); 4643 stac_free(codec);
@@ -4592,6 +4646,8 @@ static int patch_stac92hd95(struct hda_codec *codec)
4592 4646
4593 codec->proc_widget_hook = stac92hd_proc_hook; 4647 codec->proc_widget_hook = stac92hd_proc_hook;
4594 4648
4649 snd_hda_apply_fixup(codec, HDA_FIXUP_ACT_PROBE);
4650
4595 return 0; 4651 return 0;
4596} 4652}
4597 4653
diff --git a/sound/soc/fsl/imx-pcm-dma.c b/sound/soc/fsl/imx-pcm-dma.c
index 0849b7b83f0a..0db94f492e97 100644
--- a/sound/soc/fsl/imx-pcm-dma.c
+++ b/sound/soc/fsl/imx-pcm-dma.c
@@ -59,7 +59,6 @@ int imx_pcm_dma_init(struct platform_device *pdev)
59{ 59{
60 return devm_snd_dmaengine_pcm_register(&pdev->dev, 60 return devm_snd_dmaengine_pcm_register(&pdev->dev,
61 &imx_dmaengine_pcm_config, 61 &imx_dmaengine_pcm_config,
62 SND_DMAENGINE_PCM_FLAG_NO_RESIDUE |
63 SND_DMAENGINE_PCM_FLAG_COMPAT); 62 SND_DMAENGINE_PCM_FLAG_COMPAT);
64} 63}
65EXPORT_SYMBOL_GPL(imx_pcm_dma_init); 64EXPORT_SYMBOL_GPL(imx_pcm_dma_init);
diff --git a/sound/usb/card.c b/sound/usb/card.c
index c3b5b7dca1c3..a09e5f3519e3 100644
--- a/sound/usb/card.c
+++ b/sound/usb/card.c
@@ -307,6 +307,11 @@ static int snd_usb_create_streams(struct snd_usb_audio *chip, int ctrlif)
307 307
308static int snd_usb_audio_free(struct snd_usb_audio *chip) 308static int snd_usb_audio_free(struct snd_usb_audio *chip)
309{ 309{
310 struct list_head *p, *n;
311
312 list_for_each_safe(p, n, &chip->ep_list)
313 snd_usb_endpoint_free(p);
314
310 mutex_destroy(&chip->mutex); 315 mutex_destroy(&chip->mutex);
311 kfree(chip); 316 kfree(chip);
312 return 0; 317 return 0;
@@ -585,7 +590,7 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
585 struct snd_usb_audio *chip) 590 struct snd_usb_audio *chip)
586{ 591{
587 struct snd_card *card; 592 struct snd_card *card;
588 struct list_head *p, *n; 593 struct list_head *p;
589 594
590 if (chip == (void *)-1L) 595 if (chip == (void *)-1L)
591 return; 596 return;
@@ -598,14 +603,16 @@ static void snd_usb_audio_disconnect(struct usb_device *dev,
598 mutex_lock(&register_mutex); 603 mutex_lock(&register_mutex);
599 chip->num_interfaces--; 604 chip->num_interfaces--;
600 if (chip->num_interfaces <= 0) { 605 if (chip->num_interfaces <= 0) {
606 struct snd_usb_endpoint *ep;
607
601 snd_card_disconnect(card); 608 snd_card_disconnect(card);
602 /* release the pcm resources */ 609 /* release the pcm resources */
603 list_for_each(p, &chip->pcm_list) { 610 list_for_each(p, &chip->pcm_list) {
604 snd_usb_stream_disconnect(p); 611 snd_usb_stream_disconnect(p);
605 } 612 }
606 /* release the endpoint resources */ 613 /* release the endpoint resources */
607 list_for_each_safe(p, n, &chip->ep_list) { 614 list_for_each_entry(ep, &chip->ep_list, list) {
608 snd_usb_endpoint_free(p); 615 snd_usb_endpoint_release(ep);
609 } 616 }
610 /* release the midi resources */ 617 /* release the midi resources */
611 list_for_each(p, &chip->midi_list) { 618 list_for_each(p, &chip->midi_list) {
diff --git a/sound/usb/endpoint.c b/sound/usb/endpoint.c
index 289f582c9130..114e3e7ff511 100644
--- a/sound/usb/endpoint.c
+++ b/sound/usb/endpoint.c
@@ -987,19 +987,30 @@ void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep)
987} 987}
988 988
989/** 989/**
990 * snd_usb_endpoint_release: Tear down an snd_usb_endpoint
991 *
992 * @ep: the endpoint to release
993 *
994 * This function does not care for the endpoint's use count but will tear
995 * down all the streaming URBs immediately.
996 */
997void snd_usb_endpoint_release(struct snd_usb_endpoint *ep)
998{
999 release_urbs(ep, 1);
1000}
1001
1002/**
990 * snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint 1003 * snd_usb_endpoint_free: Free the resources of an snd_usb_endpoint
991 * 1004 *
992 * @ep: the list header of the endpoint to free 1005 * @ep: the list header of the endpoint to free
993 * 1006 *
994 * This function does not care for the endpoint's use count but will tear 1007 * This free all resources of the given ep.
995 * down all the streaming URBs immediately and free all resources.
996 */ 1008 */
997void snd_usb_endpoint_free(struct list_head *head) 1009void snd_usb_endpoint_free(struct list_head *head)
998{ 1010{
999 struct snd_usb_endpoint *ep; 1011 struct snd_usb_endpoint *ep;
1000 1012
1001 ep = list_entry(head, struct snd_usb_endpoint, list); 1013 ep = list_entry(head, struct snd_usb_endpoint, list);
1002 release_urbs(ep, 1);
1003 kfree(ep); 1014 kfree(ep);
1004} 1015}
1005 1016
diff --git a/sound/usb/endpoint.h b/sound/usb/endpoint.h
index 1c7e8ee48abc..e61ee5c356a3 100644
--- a/sound/usb/endpoint.h
+++ b/sound/usb/endpoint.h
@@ -23,6 +23,7 @@ void snd_usb_endpoint_stop(struct snd_usb_endpoint *ep);
23void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep); 23void snd_usb_endpoint_sync_pending_stop(struct snd_usb_endpoint *ep);
24int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep); 24int snd_usb_endpoint_activate(struct snd_usb_endpoint *ep);
25void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep); 25void snd_usb_endpoint_deactivate(struct snd_usb_endpoint *ep);
26void snd_usb_endpoint_release(struct snd_usb_endpoint *ep);
26void snd_usb_endpoint_free(struct list_head *head); 27void snd_usb_endpoint_free(struct list_head *head);
27 28
28int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep); 29int snd_usb_endpoint_implicit_feedback_sink(struct snd_usb_endpoint *ep);
diff --git a/tools/lib/lockdep/include/liblockdep/mutex.h b/tools/lib/lockdep/include/liblockdep/mutex.h
index c342f7087147..ee53a42818ca 100644
--- a/tools/lib/lockdep/include/liblockdep/mutex.h
+++ b/tools/lib/lockdep/include/liblockdep/mutex.h
@@ -35,7 +35,7 @@ static inline int __mutex_init(liblockdep_pthread_mutex_t *lock,
35 35
36static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock) 36static inline int liblockdep_pthread_mutex_lock(liblockdep_pthread_mutex_t *lock)
37{ 37{
38 lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); 38 lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
39 return pthread_mutex_lock(&lock->mutex); 39 return pthread_mutex_lock(&lock->mutex);
40} 40}
41 41
@@ -47,7 +47,7 @@ static inline int liblockdep_pthread_mutex_unlock(liblockdep_pthread_mutex_t *lo
47 47
48static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *lock) 48static inline int liblockdep_pthread_mutex_trylock(liblockdep_pthread_mutex_t *lock)
49{ 49{
50 lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_); 50 lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
51 return pthread_mutex_trylock(&lock->mutex) == 0 ? 1 : 0; 51 return pthread_mutex_trylock(&lock->mutex) == 0 ? 1 : 0;
52} 52}
53 53
diff --git a/tools/lib/lockdep/include/liblockdep/rwlock.h b/tools/lib/lockdep/include/liblockdep/rwlock.h
index a680ab8c2e36..4ec03f861551 100644
--- a/tools/lib/lockdep/include/liblockdep/rwlock.h
+++ b/tools/lib/lockdep/include/liblockdep/rwlock.h
@@ -36,7 +36,7 @@ static inline int __rwlock_init(liblockdep_pthread_rwlock_t *lock,
36 36
37static inline int liblockdep_pthread_rwlock_rdlock(liblockdep_pthread_rwlock_t *lock) 37static inline int liblockdep_pthread_rwlock_rdlock(liblockdep_pthread_rwlock_t *lock)
38{ 38{
39 lock_acquire(&lock->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_); 39 lock_acquire(&lock->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
40 return pthread_rwlock_rdlock(&lock->rwlock); 40 return pthread_rwlock_rdlock(&lock->rwlock);
41 41
42} 42}
@@ -49,19 +49,19 @@ static inline int liblockdep_pthread_rwlock_unlock(liblockdep_pthread_rwlock_t *
49 49
50static inline int liblockdep_pthread_rwlock_wrlock(liblockdep_pthread_rwlock_t *lock) 50static inline int liblockdep_pthread_rwlock_wrlock(liblockdep_pthread_rwlock_t *lock)
51{ 51{
52 lock_acquire(&lock->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); 52 lock_acquire(&lock->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
53 return pthread_rwlock_wrlock(&lock->rwlock); 53 return pthread_rwlock_wrlock(&lock->rwlock);
54} 54}
55 55
56static inline int liblockdep_pthread_rwlock_tryrdlock(liblockdep_pthread_rwlock_t *lock) 56static inline int liblockdep_pthread_rwlock_tryrdlock(liblockdep_pthread_rwlock_t *lock)
57{ 57{
58 lock_acquire(&lock->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_); 58 lock_acquire(&lock->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
59 return pthread_rwlock_tryrdlock(&lock->rwlock) == 0 ? 1 : 0; 59 return pthread_rwlock_tryrdlock(&lock->rwlock) == 0 ? 1 : 0;
60} 60}
61 61
62static inline int liblockdep_pthread_rwlock_trywlock(liblockdep_pthread_rwlock_t *lock) 62static inline int liblockdep_pthread_rwlock_trywlock(liblockdep_pthread_rwlock_t *lock)
63{ 63{
64 lock_acquire(&lock->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_); 64 lock_acquire(&lock->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
65 return pthread_rwlock_trywlock(&lock->rwlock) == 0 ? 1 : 0; 65 return pthread_rwlock_trywlock(&lock->rwlock) == 0 ? 1 : 0;
66} 66}
67 67
diff --git a/tools/lib/lockdep/preload.c b/tools/lib/lockdep/preload.c
index 23bd69cb5ade..6f803609e498 100644
--- a/tools/lib/lockdep/preload.c
+++ b/tools/lib/lockdep/preload.c
@@ -92,7 +92,7 @@ enum { none, prepare, done, } __init_state;
92static void init_preload(void); 92static void init_preload(void);
93static void try_init_preload(void) 93static void try_init_preload(void)
94{ 94{
95 if (!__init_state != done) 95 if (__init_state != done)
96 init_preload(); 96 init_preload();
97} 97}
98 98
@@ -252,7 +252,7 @@ int pthread_mutex_lock(pthread_mutex_t *mutex)
252 252
253 try_init_preload(); 253 try_init_preload();
254 254
255 lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 2, NULL, 255 lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL,
256 (unsigned long)_RET_IP_); 256 (unsigned long)_RET_IP_);
257 /* 257 /*
258 * Here's the thing with pthread mutexes: unlike the kernel variant, 258 * Here's the thing with pthread mutexes: unlike the kernel variant,
@@ -281,7 +281,7 @@ int pthread_mutex_trylock(pthread_mutex_t *mutex)
281 281
282 try_init_preload(); 282 try_init_preload();
283 283
284 lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_); 284 lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
285 r = ll_pthread_mutex_trylock(mutex); 285 r = ll_pthread_mutex_trylock(mutex);
286 if (r) 286 if (r)
287 lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_); 287 lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -303,7 +303,7 @@ int pthread_mutex_unlock(pthread_mutex_t *mutex)
303 */ 303 */
304 r = ll_pthread_mutex_unlock(mutex); 304 r = ll_pthread_mutex_unlock(mutex);
305 if (r) 305 if (r)
306 lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); 306 lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
307 307
308 return r; 308 return r;
309} 309}
@@ -352,7 +352,7 @@ int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
352 352
353 init_preload(); 353 init_preload();
354 354
355 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 2, NULL, (unsigned long)_RET_IP_); 355 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
356 r = ll_pthread_rwlock_rdlock(rwlock); 356 r = ll_pthread_rwlock_rdlock(rwlock);
357 if (r) 357 if (r)
358 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 358 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -366,7 +366,7 @@ int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
366 366
367 init_preload(); 367 init_preload();
368 368
369 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 2, NULL, (unsigned long)_RET_IP_); 369 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
370 r = ll_pthread_rwlock_tryrdlock(rwlock); 370 r = ll_pthread_rwlock_tryrdlock(rwlock);
371 if (r) 371 if (r)
372 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 372 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -380,7 +380,7 @@ int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
380 380
381 init_preload(); 381 init_preload();
382 382
383 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 2, NULL, (unsigned long)_RET_IP_); 383 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
384 r = ll_pthread_rwlock_trywrlock(rwlock); 384 r = ll_pthread_rwlock_trywrlock(rwlock);
385 if (r) 385 if (r)
386 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 386 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -394,7 +394,7 @@ int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
394 394
395 init_preload(); 395 init_preload();
396 396
397 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); 397 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
398 r = ll_pthread_rwlock_wrlock(rwlock); 398 r = ll_pthread_rwlock_wrlock(rwlock);
399 if (r) 399 if (r)
400 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 400 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
@@ -411,7 +411,7 @@ int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
411 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_); 411 lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
412 r = ll_pthread_rwlock_unlock(rwlock); 412 r = ll_pthread_rwlock_unlock(rwlock);
413 if (r) 413 if (r)
414 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 2, NULL, (unsigned long)_RET_IP_); 414 lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
415 415
416 return r; 416 return r;
417} 417}
@@ -439,8 +439,6 @@ __attribute__((constructor)) static void init_preload(void)
439 ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock"); 439 ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock");
440#endif 440#endif
441 441
442 printf("%p\n", ll_pthread_mutex_trylock);fflush(stdout);
443
444 lockdep_init(); 442 lockdep_init();
445 443
446 __init_state = done; 444 __init_state = done;
diff --git a/tools/perf/ui/browsers/hists.c b/tools/perf/ui/browsers/hists.c
index 52c03fbbba17..04a229aa5c0f 100644
--- a/tools/perf/ui/browsers/hists.c
+++ b/tools/perf/ui/browsers/hists.c
@@ -17,6 +17,7 @@
17#include "../util.h" 17#include "../util.h"
18#include "../ui.h" 18#include "../ui.h"
19#include "map.h" 19#include "map.h"
20#include "annotate.h"
20 21
21struct hist_browser { 22struct hist_browser {
22 struct ui_browser b; 23 struct ui_browser b;
@@ -1593,13 +1594,18 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
1593 bi->to.sym->name) > 0) 1594 bi->to.sym->name) > 0)
1594 annotate_t = nr_options++; 1595 annotate_t = nr_options++;
1595 } else { 1596 } else {
1596
1597 if (browser->selection != NULL && 1597 if (browser->selection != NULL &&
1598 browser->selection->sym != NULL && 1598 browser->selection->sym != NULL &&
1599 !browser->selection->map->dso->annotate_warned && 1599 !browser->selection->map->dso->annotate_warned) {
1600 asprintf(&options[nr_options], "Annotate %s", 1600 struct annotation *notes;
1601 browser->selection->sym->name) > 0) 1601
1602 annotate = nr_options++; 1602 notes = symbol__annotation(browser->selection->sym);
1603
1604 if (notes->src &&
1605 asprintf(&options[nr_options], "Annotate %s",
1606 browser->selection->sym->name) > 0)
1607 annotate = nr_options++;
1608 }
1603 } 1609 }
1604 1610
1605 if (thread != NULL && 1611 if (thread != NULL &&
@@ -1656,6 +1662,7 @@ retry_popup_menu:
1656 1662
1657 if (choice == annotate || choice == annotate_t || choice == annotate_f) { 1663 if (choice == annotate || choice == annotate_t || choice == annotate_f) {
1658 struct hist_entry *he; 1664 struct hist_entry *he;
1665 struct annotation *notes;
1659 int err; 1666 int err;
1660do_annotate: 1667do_annotate:
1661 if (!objdump_path && perf_session_env__lookup_objdump(env)) 1668 if (!objdump_path && perf_session_env__lookup_objdump(env))
@@ -1679,6 +1686,10 @@ do_annotate:
1679 he->ms.map = he->branch_info->to.map; 1686 he->ms.map = he->branch_info->to.map;
1680 } 1687 }
1681 1688
1689 notes = symbol__annotation(he->ms.sym);
1690 if (!notes->src)
1691 continue;
1692
1682 /* 1693 /*
1683 * Don't let this be freed, say, by hists__decay_entry. 1694 * Don't let this be freed, say, by hists__decay_entry.
1684 */ 1695 */
diff --git a/tools/perf/util/machine.c b/tools/perf/util/machine.c
index 0e5fea95d596..c73e1fc12e53 100644
--- a/tools/perf/util/machine.c
+++ b/tools/perf/util/machine.c
@@ -496,18 +496,6 @@ struct process_args {
496 u64 start; 496 u64 start;
497}; 497};
498 498
499static int symbol__in_kernel(void *arg, const char *name,
500 char type __maybe_unused, u64 start)
501{
502 struct process_args *args = arg;
503
504 if (strchr(name, '['))
505 return 0;
506
507 args->start = start;
508 return 1;
509}
510
511static void machine__get_kallsyms_filename(struct machine *machine, char *buf, 499static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
512 size_t bufsz) 500 size_t bufsz)
513{ 501{
@@ -517,27 +505,41 @@ static void machine__get_kallsyms_filename(struct machine *machine, char *buf,
517 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir); 505 scnprintf(buf, bufsz, "%s/proc/kallsyms", machine->root_dir);
518} 506}
519 507
520/* Figure out the start address of kernel map from /proc/kallsyms */ 508const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
521static u64 machine__get_kernel_start_addr(struct machine *machine) 509
510/* Figure out the start address of kernel map from /proc/kallsyms.
511 * Returns the name of the start symbol in *symbol_name. Pass in NULL as
512 * symbol_name if it's not that important.
513 */
514static u64 machine__get_kernel_start_addr(struct machine *machine,
515 const char **symbol_name)
522{ 516{
523 char filename[PATH_MAX]; 517 char filename[PATH_MAX];
524 struct process_args args; 518 int i;
519 const char *name;
520 u64 addr = 0;
525 521
526 machine__get_kallsyms_filename(machine, filename, PATH_MAX); 522 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
527 523
528 if (symbol__restricted_filename(filename, "/proc/kallsyms")) 524 if (symbol__restricted_filename(filename, "/proc/kallsyms"))
529 return 0; 525 return 0;
530 526
531 if (kallsyms__parse(filename, &args, symbol__in_kernel) <= 0) 527 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
532 return 0; 528 addr = kallsyms__get_function_start(filename, name);
529 if (addr)
530 break;
531 }
532
533 if (symbol_name)
534 *symbol_name = name;
533 535
534 return args.start; 536 return addr;
535} 537}
536 538
537int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel) 539int __machine__create_kernel_maps(struct machine *machine, struct dso *kernel)
538{ 540{
539 enum map_type type; 541 enum map_type type;
540 u64 start = machine__get_kernel_start_addr(machine); 542 u64 start = machine__get_kernel_start_addr(machine, NULL);
541 543
542 for (type = 0; type < MAP__NR_TYPES; ++type) { 544 for (type = 0; type < MAP__NR_TYPES; ++type) {
543 struct kmap *kmap; 545 struct kmap *kmap;
@@ -852,23 +854,11 @@ static int machine__create_modules(struct machine *machine)
852 return 0; 854 return 0;
853} 855}
854 856
855const char *ref_reloc_sym_names[] = {"_text", "_stext", NULL};
856
857int machine__create_kernel_maps(struct machine *machine) 857int machine__create_kernel_maps(struct machine *machine)
858{ 858{
859 struct dso *kernel = machine__get_kernel(machine); 859 struct dso *kernel = machine__get_kernel(machine);
860 char filename[PATH_MAX];
861 const char *name; 860 const char *name;
862 u64 addr = 0; 861 u64 addr = machine__get_kernel_start_addr(machine, &name);
863 int i;
864
865 machine__get_kallsyms_filename(machine, filename, PATH_MAX);
866
867 for (i = 0; (name = ref_reloc_sym_names[i]) != NULL; i++) {
868 addr = kallsyms__get_function_start(filename, name);
869 if (addr)
870 break;
871 }
872 if (!addr) 862 if (!addr)
873 return -1; 863 return -1;
874 864
diff --git a/tools/testing/selftests/cpu-hotplug/Makefile b/tools/testing/selftests/cpu-hotplug/Makefile
index ae5faf9aade2..790c23a9db44 100644
--- a/tools/testing/selftests/cpu-hotplug/Makefile
+++ b/tools/testing/selftests/cpu-hotplug/Makefile
@@ -1,6 +1,6 @@
1all: 1all:
2 2
3run_tests: 3run_tests:
4 @/bin/sh ./on-off-test.sh || echo "cpu-hotplug selftests: [FAIL]" 4 @/bin/bash ./on-off-test.sh || echo "cpu-hotplug selftests: [FAIL]"
5 5
6clean: 6clean:
diff --git a/tools/testing/selftests/ipc/msgque.c b/tools/testing/selftests/ipc/msgque.c
index aa290c0de6f5..552f0810bffb 100644
--- a/tools/testing/selftests/ipc/msgque.c
+++ b/tools/testing/selftests/ipc/msgque.c
@@ -193,6 +193,11 @@ int main(int argc, char **argv)
193 int msg, pid, err; 193 int msg, pid, err;
194 struct msgque_data msgque; 194 struct msgque_data msgque;
195 195
196 if (getuid() != 0) {
197 printf("Please run the test as root - Exiting.\n");
198 exit(1);
199 }
200
196 msgque.key = ftok(argv[0], 822155650); 201 msgque.key = ftok(argv[0], 822155650);
197 if (msgque.key == -1) { 202 if (msgque.key == -1) {
198 printf("Can't make key\n"); 203 printf("Can't make key\n");
diff --git a/tools/testing/selftests/memory-hotplug/Makefile b/tools/testing/selftests/memory-hotplug/Makefile
index 350bfeda3aa8..058c76f5d102 100644
--- a/tools/testing/selftests/memory-hotplug/Makefile
+++ b/tools/testing/selftests/memory-hotplug/Makefile
@@ -1,6 +1,6 @@
1all: 1all:
2 2
3run_tests: 3run_tests:
4 @/bin/sh ./on-off-test.sh || echo "memory-hotplug selftests: [FAIL]" 4 @/bin/bash ./on-off-test.sh || echo "memory-hotplug selftests: [FAIL]"
5 5
6clean: 6clean:
diff --git a/tools/testing/selftests/powerpc/tm/Makefile b/tools/testing/selftests/powerpc/tm/Makefile
index 51267f4184a6..2cede239a074 100644
--- a/tools/testing/selftests/powerpc/tm/Makefile
+++ b/tools/testing/selftests/powerpc/tm/Makefile
@@ -2,7 +2,7 @@ PROGS := tm-resched-dscr
2 2
3all: $(PROGS) 3all: $(PROGS)
4 4
5$(PROGS): 5$(PROGS): ../harness.c
6 6
7run_tests: all 7run_tests: all
8 @-for PROG in $(PROGS); do \ 8 @-for PROG in $(PROGS); do \
diff --git a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
index ee98e3886af2..42d4c8caad81 100644
--- a/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
+++ b/tools/testing/selftests/powerpc/tm/tm-resched-dscr.c
@@ -28,6 +28,8 @@
28#include <assert.h> 28#include <assert.h>
29#include <asm/tm.h> 29#include <asm/tm.h>
30 30
31#include "utils.h"
32
31#define TBEGIN ".long 0x7C00051D ;" 33#define TBEGIN ".long 0x7C00051D ;"
32#define TEND ".long 0x7C00055D ;" 34#define TEND ".long 0x7C00055D ;"
33#define TCHECK ".long 0x7C00059C ;" 35#define TCHECK ".long 0x7C00059C ;"
@@ -36,7 +38,8 @@
36#define SPRN_TEXASR 0x82 38#define SPRN_TEXASR 0x82
37#define SPRN_DSCR 0x03 39#define SPRN_DSCR 0x03
38 40
39int main(void) { 41int test_body(void)
42{
40 uint64_t rv, dscr1 = 1, dscr2, texasr; 43 uint64_t rv, dscr1 = 1, dscr2, texasr;
41 44
42 printf("Check DSCR TM context switch: "); 45 printf("Check DSCR TM context switch: ");
@@ -81,10 +84,15 @@ int main(void) {
81 } 84 }
82 if (dscr2 != dscr1) { 85 if (dscr2 != dscr1) {
83 printf(" FAIL\n"); 86 printf(" FAIL\n");
84 exit(EXIT_FAILURE); 87 return 1;
85 } else { 88 } else {
86 printf(" OK\n"); 89 printf(" OK\n");
87 exit(EXIT_SUCCESS); 90 return 0;
88 } 91 }
89 } 92 }
90} 93}
94
95int main(void)
96{
97 return test_harness(test_body, "tm_resched_dscr");
98}
diff --git a/tools/thermal/tmon/Makefile b/tools/thermal/tmon/Makefile
index 447321104ec0..e775adcbd29f 100644
--- a/tools/thermal/tmon/Makefile
+++ b/tools/thermal/tmon/Makefile
@@ -21,7 +21,7 @@ OBJS = tmon.o tui.o sysfs.o pid.o
21OBJS += 21OBJS +=
22 22
23tmon: $(OBJS) Makefile tmon.h 23tmon: $(OBJS) Makefile tmon.h
24 $(CC) ${CFLAGS} $(LDFLAGS) $(OBJS) -o $(TARGET) -lm -lpanel -lncursesw -lpthread 24 $(CC) ${CFLAGS} $(LDFLAGS) $(OBJS) -o $(TARGET) -lm -lpanel -lncursesw -ltinfo -lpthread
25 25
26valgrind: tmon 26valgrind: tmon
27 sudo valgrind -v --track-origins=yes --tool=memcheck --leak-check=yes --show-reachable=yes --num-callers=20 --track-fds=yes ./$(TARGET) 1> /dev/null 27 sudo valgrind -v --track-origins=yes --tool=memcheck --leak-check=yes --show-reachable=yes --num-callers=20 --track-fds=yes ./$(TARGET) 1> /dev/null
diff --git a/tools/thermal/tmon/tmon.c b/tools/thermal/tmon/tmon.c
index b30f531173e4..09b7c3218334 100644
--- a/tools/thermal/tmon/tmon.c
+++ b/tools/thermal/tmon/tmon.c
@@ -142,6 +142,7 @@ static void start_syslog(void)
142static void prepare_logging(void) 142static void prepare_logging(void)
143{ 143{
144 int i; 144 int i;
145 struct stat logstat;
145 146
146 if (!logging) 147 if (!logging)
147 return; 148 return;
@@ -152,6 +153,29 @@ static void prepare_logging(void)
152 return; 153 return;
153 } 154 }
154 155
156 if (lstat(TMON_LOG_FILE, &logstat) < 0) {
157 syslog(LOG_ERR, "Unable to stat log file %s\n", TMON_LOG_FILE);
158 fclose(tmon_log);
159 tmon_log = NULL;
160 return;
161 }
162
163 /* The log file must be a regular file owned by us */
164 if (S_ISLNK(logstat.st_mode)) {
165 syslog(LOG_ERR, "Log file is a symlink. Will not log\n");
166 fclose(tmon_log);
167 tmon_log = NULL;
168 return;
169 }
170
171 if (logstat.st_uid != getuid()) {
172 syslog(LOG_ERR, "We don't own the log file. Not logging\n");
173 fclose(tmon_log);
174 tmon_log = NULL;
175 return;
176 }
177
178
155 fprintf(tmon_log, "#----------- THERMAL SYSTEM CONFIG -------------\n"); 179 fprintf(tmon_log, "#----------- THERMAL SYSTEM CONFIG -------------\n");
156 for (i = 0; i < ptdata.nr_tz_sensor; i++) { 180 for (i = 0; i < ptdata.nr_tz_sensor; i++) {
157 char binding_str[33]; /* size of long + 1 */ 181 char binding_str[33]; /* size of long + 1 */
@@ -331,7 +355,7 @@ static void start_daemon_mode()
331 disable_tui(); 355 disable_tui();
332 356
333 /* change the file mode mask */ 357 /* change the file mode mask */
334 umask(0); 358 umask(S_IWGRP | S_IWOTH);
335 359
336 /* new SID for the daemon process */ 360 /* new SID for the daemon process */
337 sid = setsid(); 361 sid = setsid();
diff --git a/tools/usb/ffs-test.c b/tools/usb/ffs-test.c
index fe1e66b6ef40..a87e99f37c52 100644
--- a/tools/usb/ffs-test.c
+++ b/tools/usb/ffs-test.c
@@ -116,8 +116,8 @@ static const struct {
116 .header = { 116 .header = {
117 .magic = cpu_to_le32(FUNCTIONFS_DESCRIPTORS_MAGIC), 117 .magic = cpu_to_le32(FUNCTIONFS_DESCRIPTORS_MAGIC),
118 .length = cpu_to_le32(sizeof descriptors), 118 .length = cpu_to_le32(sizeof descriptors),
119 .fs_count = 3, 119 .fs_count = cpu_to_le32(3),
120 .hs_count = 3, 120 .hs_count = cpu_to_le32(3),
121 }, 121 },
122 .fs_descs = { 122 .fs_descs = {
123 .intf = { 123 .intf = {