aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2015-08-17 00:13:53 -0400
committerDave Airlie <airlied@redhat.com>2015-08-17 00:13:53 -0400
commit4eebf60b7452fbd551fd7dece855ba7825a49cbc (patch)
tree490b4d194ba09c90e10201ab7fc084a0bda0ed27
parent8f9cb50789e76f3e224e8861adf650e55c747af4 (diff)
parent2c6625cd545bdd66acff14f3394865d43920a5c7 (diff)
Merge tag 'v4.2-rc7' into drm-next
Linux 4.2-rc7 Backmerge master for i915 fixes
-rw-r--r--.mailmap1
-rw-r--r--Documentation/devicetree/bindings/arm/cpus.txt1
-rw-r--r--Documentation/devicetree/bindings/dma/apm-xgene-dma.txt2
-rw-r--r--Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt3
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt2
-rw-r--r--Documentation/devicetree/bindings/phy/ti-phy.txt16
-rw-r--r--Documentation/devicetree/bindings/sound/mt8173-max98090.txt2
-rw-r--r--Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt2
-rw-r--r--Documentation/devicetree/bindings/spi/spi-ath79.txt6
-rw-r--r--Documentation/hwmon/nct79044
-rw-r--r--Documentation/input/alps.txt6
-rwxr-xr-xDocumentation/target/tcm_mod_builder.py21
-rw-r--r--MAINTAINERS31
-rw-r--r--Makefile11
-rw-r--r--arch/arc/Kconfig13
-rw-r--r--arch/arc/Makefile10
-rw-r--r--arch/arc/include/asm/arcregs.h7
-rw-r--r--arch/arc/include/asm/atomic.h78
-rw-r--r--arch/arc/include/asm/ptrace.h54
-rw-r--r--arch/arc/include/asm/spinlock.h538
-rw-r--r--arch/arc/include/asm/spinlock_types.h2
-rw-r--r--arch/arc/include/uapi/asm/ptrace.h20
-rw-r--r--arch/arc/kernel/setup.c12
-rw-r--r--arch/arc/kernel/time.c40
-rw-r--r--arch/arc/lib/memcpy-archs.S2
-rw-r--r--arch/arc/lib/memset-archs.S43
-rw-r--r--arch/arc/plat-axs10x/axs10x.c15
-rw-r--r--arch/arm/boot/dts/dra7.dtsi3
-rw-r--r--arch/arm/boot/dts/exynos3250.dtsi2
-rw-r--r--arch/arm/boot/dts/exynos4210-origen.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210-trats.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210-universal_c210.dts4
-rw-r--r--arch/arm/boot/dts/exynos4210.dtsi12
-rw-r--r--arch/arm/boot/dts/imx25-pdk.dts5
-rw-r--r--arch/arm/boot/dts/imx35.dtsi8
-rw-r--r--arch/arm/boot/dts/imx51-apf51dev.dts2
-rw-r--r--arch/arm/boot/dts/imx53-ard.dts4
-rw-r--r--arch/arm/boot/dts/imx53-m53evk.dts4
-rw-r--r--arch/arm/boot/dts/imx53-qsb-common.dtsi4
-rw-r--r--arch/arm/boot/dts/imx53-smd.dts4
-rw-r--r--arch/arm/boot/dts/imx53-tqma53.dtsi4
-rw-r--r--arch/arm/boot/dts/imx53-tx53.dtsi4
-rw-r--r--arch/arm/boot/dts/imx53-voipac-bsb.dts4
-rw-r--r--arch/arm/boot/dts/imx6dl-riotboard.dts8
-rw-r--r--arch/arm/boot/dts/imx6q-arm2.dts5
-rw-r--r--arch/arm/boot/dts/imx6q-gk802.dts3
-rw-r--r--arch/arm/boot/dts/imx6q-tbs2910.dts4
-rw-r--r--arch/arm/boot/dts/imx6qdl-aristainetos.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-cubox-i.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw52xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw53xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-gw54xx.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-hummingboard.dtsi2
-rw-r--r--arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6qdl-rex.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabreauto.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabrelite.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6qdl-sabresd.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6qdl-tx6.dtsi4
-rw-r--r--arch/arm/boot/dts/imx6qdl-wandboard.dtsi6
-rw-r--r--arch/arm/boot/dts/imx6qdl.dtsi8
-rw-r--r--arch/arm/boot/dts/imx6sl-evk.dts10
-rw-r--r--arch/arm/boot/dts/imx6sx-sabreauto.dts4
-rw-r--r--arch/arm/boot/dts/imx6sx-sdb.dtsi4
-rw-r--r--arch/arm/boot/dts/imx7d-sdb.dts4
-rw-r--r--arch/arm/boot/dts/k2e-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/k2e.dtsi15
-rw-r--r--arch/arm/boot/dts/k2hk-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/k2hk.dtsi11
-rw-r--r--arch/arm/boot/dts/k2l-clocks.dtsi5
-rw-r--r--arch/arm/boot/dts/k2l.dtsi16
-rw-r--r--arch/arm/boot/dts/keystone.dtsi11
-rw-r--r--arch/arm/boot/dts/omap2430.dtsi3
-rw-r--r--arch/arm/boot/dts/omap4.dtsi3
-rw-r--r--arch/arm/boot/dts/omap5.dtsi3
-rw-r--r--arch/arm/boot/dts/ste-dbx5x0.dtsi53
-rw-r--r--arch/arm/boot/dts/ste-nomadik-nhk15.dts1
-rw-r--r--arch/arm/boot/dts/ste-nomadik-s8815.dts4
-rw-r--r--arch/arm/boot/dts/ste-nomadik-stn8815.dtsi1
-rw-r--r--arch/arm/kernel/entry-common.S1
-rw-r--r--arch/arm/kernel/head.S3
-rw-r--r--arch/arm/kernel/vdso.c7
-rw-r--r--arch/arm/mach-exynos/pm_domains.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c24
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_7xx_data.c5
-rw-r--r--arch/arm/net/bpf_jit_32.c57
-rw-r--r--arch/arm/vdso/Makefile2
-rw-r--r--arch/arm64/boot/dts/apm/apm-storm.dtsi2
-rw-r--r--arch/arm64/kernel/efi.c4
-rw-r--r--arch/arm64/kernel/entry.S5
-rw-r--r--arch/arm64/kernel/irq.c4
-rw-r--r--arch/arm64/kernel/signal32.c5
-rw-r--r--arch/arm64/kernel/vdso.c7
-rw-r--r--arch/avr32/kernel/time.c65
-rw-r--r--arch/avr32/mach-at32ap/clock.c20
-rw-r--r--arch/m32r/include/asm/io.h5
-rw-r--r--arch/mips/Kconfig1
-rw-r--r--arch/mips/ath79/setup.c1
-rw-r--r--arch/mips/cavium-octeon/smp.c2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/dma-coherence.h10
-rw-r--r--arch/mips/include/asm/pgtable.h31
-rw-r--r--arch/mips/include/asm/smp.h2
-rw-r--r--arch/mips/include/asm/stackframe.h25
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c5
-rw-r--r--arch/mips/kernel/prom.c2
-rw-r--r--arch/mips/kernel/relocate_kernel.S8
-rw-r--r--arch/mips/kernel/scall64-64.S2
-rw-r--r--arch/mips/kernel/scall64-n32.S2
-rw-r--r--arch/mips/kernel/signal32.c2
-rw-r--r--arch/mips/kernel/smp-bmips.c4
-rw-r--r--arch/mips/kernel/smp.c10
-rw-r--r--arch/mips/kernel/traps.c13
-rw-r--r--arch/mips/kernel/unaligned.c2
-rw-r--r--arch/mips/lantiq/irq.c3
-rw-r--r--arch/mips/loongson64/loongson-3/smp.c7
-rw-r--r--arch/mips/mm/cache.c8
-rw-r--r--arch/mips/mm/fault.c3
-rw-r--r--arch/mips/mti-malta/malta-int.c2
-rw-r--r--arch/mips/mti-malta/malta-time.c16
-rw-r--r--arch/mips/mti-sead3/sead3-time.c1
-rw-r--r--arch/mips/netlogic/common/smp.c2
-rw-r--r--arch/mips/paravirt/paravirt-smp.c2
-rw-r--r--arch/mips/pistachio/time.c1
-rw-r--r--arch/mips/pmcs-msp71xx/msp_smp.c2
-rw-r--r--arch/mips/ralink/irq.c1
-rw-r--r--arch/mips/sgi-ip27/ip27-irq.c8
-rw-r--r--arch/mips/sibyte/bcm1480/smp.c9
-rw-r--r--arch/mips/sibyte/sb1250/smp.c7
-rw-r--r--arch/powerpc/kernel/signal_32.c2
-rw-r--r--arch/powerpc/platforms/powernv/eeh-powernv.c2
-rw-r--r--arch/powerpc/platforms/powernv/pci-ioda.c11
-rw-r--r--arch/s390/kernel/asm-offsets.c15
-rw-r--r--arch/s390/kernel/cache.c2
-rw-r--r--arch/s390/kernel/entry.S13
-rw-r--r--arch/s390/kernel/traps.c4
-rw-r--r--arch/s390/kvm/kvm-s390.c4
-rw-r--r--arch/s390/net/bpf_jit_comp.c14
-rw-r--r--arch/sparc/include/asm/visasm.h16
-rw-r--r--arch/sparc/lib/NG4memcpy.S5
-rw-r--r--arch/sparc/lib/VISsave.S67
-rw-r--r--arch/sparc/lib/ksyms.c4
-rw-r--r--arch/tile/kernel/compat_signal.c2
-rw-r--r--arch/tile/kernel/setup.c2
-rw-r--r--arch/x86/boot/compressed/eboot.c4
-rw-r--r--arch/x86/entry/entry_64_compat.S17
-rw-r--r--arch/x86/include/asm/desc.h15
-rw-r--r--arch/x86/include/asm/mmu.h3
-rw-r--r--arch/x86/include/asm/mmu_context.h54
-rw-r--r--arch/x86/include/asm/sigcontext.h6
-rw-r--r--arch/x86/include/uapi/asm/kvm.h4
-rw-r--r--arch/x86/include/uapi/asm/sigcontext.h21
-rw-r--r--arch/x86/kernel/apic/io_apic.c2
-rw-r--r--arch/x86/kernel/cpu/common.c4
-rw-r--r--arch/x86/kernel/cpu/perf_event.c12
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c23
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_cqm.c16
-rw-r--r--arch/x86/kernel/fpu/init.c6
-rw-r--r--arch/x86/kernel/ldt.c262
-rw-r--r--arch/x86/kernel/process_64.c4
-rw-r--r--arch/x86/kernel/signal.c26
-rw-r--r--arch/x86/kernel/step.c8
-rw-r--r--arch/x86/kvm/lapic.c2
-rw-r--r--arch/x86/kvm/mtrr.c40
-rw-r--r--arch/x86/kvm/svm.c2
-rw-r--r--arch/x86/kvm/vmx.c5
-rw-r--r--arch/x86/kvm/x86.c7
-rw-r--r--arch/x86/kvm/x86.h5
-rw-r--r--arch/x86/math-emu/fpu_entry.c3
-rw-r--r--arch/x86/math-emu/fpu_system.h21
-rw-r--r--arch/x86/math-emu/get_address.c3
-rw-r--r--arch/x86/mm/ioremap.c23
-rw-r--r--arch/x86/mm/mmap.c7
-rw-r--r--arch/x86/mm/mpx.c24
-rw-r--r--arch/x86/mm/tlb.c2
-rw-r--r--arch/x86/net/bpf_jit_comp.c8
-rw-r--r--arch/x86/platform/efi/efi.c5
-rw-r--r--arch/x86/power/cpu.c3
-rw-r--r--arch/x86/xen/Makefile4
-rw-r--r--arch/x86/xen/enlighten.c40
-rw-r--r--arch/x86/xen/xen-ops.h6
-rw-r--r--block/bio.c17
-rw-r--r--block/blk-cgroup.c6
-rw-r--r--block/blk-settings.c4
-rw-r--r--drivers/acpi/device_pm.c2
-rw-r--r--drivers/ata/libata-core.c21
-rw-r--r--drivers/ata/libata-pmp.c7
-rw-r--r--drivers/ata/libata-scsi.c3
-rw-r--r--drivers/ata/libata-transport.c2
-rw-r--r--drivers/base/regmap/regcache-rbtree.c19
-rw-r--r--drivers/block/null_blk.c18
-rw-r--r--drivers/block/rbd.c22
-rw-r--r--drivers/block/xen-blkback/blkback.c4
-rw-r--r--drivers/block/xen-blkfront.c128
-rw-r--r--drivers/block/zram/zram_drv.c6
-rw-r--r--drivers/bluetooth/btbcm.c11
-rw-r--r--drivers/char/hw_random/core.c2
-rw-r--r--drivers/clk/pxa/clk-pxa3xx.c2
-rw-r--r--drivers/clocksource/sh_cmt.c6
-rw-r--r--drivers/cpufreq/cpufreq.c108
-rw-r--r--drivers/cpufreq/intel_pstate.c1
-rw-r--r--drivers/cpufreq/loongson2_cpufreq.c2
-rw-r--r--drivers/crypto/ixp4xx_crypto.c1
-rw-r--r--drivers/crypto/qat/qat_common/qat_algs.c24
-rw-r--r--drivers/dma/at_hdmac.c132
-rw-r--r--drivers/dma/at_hdmac_regs.h3
-rw-r--r--drivers/dma/at_xdmac.c26
-rw-r--r--drivers/dma/mv_xor.c9
-rw-r--r--drivers/dma/pl330.c3
-rw-r--r--drivers/dma/virt-dma.c19
-rw-r--r--drivers/dma/virt-dma.h13
-rw-r--r--drivers/dma/xgene-dma.c3
-rw-r--r--drivers/edac/ppc4xx_edac.c2
-rw-r--r--drivers/extcon/extcon-palmas.c13
-rw-r--r--drivers/extcon/extcon.c61
-rw-r--r--drivers/firmware/efi/cper.c15
-rw-r--r--drivers/firmware/efi/efi.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c38
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c16
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c20
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c17
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik_sdma.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cz_dpm.c70
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v10_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/dce_v11_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c54
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c48
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c12
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c10
-rw-r--r--drivers/gpu/drm/drm_crtc.c5
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c1
-rw-r--r--drivers/gpu/drm/drm_irq.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c1
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c6
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c21
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h17
-rw-r--r--drivers/gpu/drm/i915/i915_gem_gtt.c11
-rw-r--r--drivers/gpu/drm/i915/i915_gem_tiling.c5
-rw-r--r--drivers/gpu/drm/i915/intel_bios.c27
-rw-r--r--drivers/gpu/drm/i915/intel_dp_mst.c11
-rw-r--r--drivers/gpu/drm/i915/intel_uncore.c26
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c33
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c87
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h1
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c8
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c13
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h4
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c8
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c3
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_platform.c16
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_ttm.c9
-rw-r--r--drivers/gpu/drm/nouveau/nv04_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_display.c2
-rw-r--r--drivers/gpu/drm/nouveau/nv50_fbcon.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvc0_fbcon.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c2
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c39
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c14
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c40
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c8
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c8
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c3
-rw-r--r--drivers/gpu/drm/radeon/dce6_afmt.c62
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.c204
-rw-r--r--drivers/gpu/drm/radeon/radeon_audio.h3
-rw-r--r--drivers/gpu/drm/radeon/radeon_combios.c7
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c18
-rw-r--r--drivers/gpu/drm/radeon/radeon_mode.h2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_util.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c4
-rw-r--r--drivers/hid/hid-apple.c6
-rw-r--r--drivers/hid/hid-core.c6
-rw-r--r--drivers/hid/hid-cp2112.c2
-rw-r--r--drivers/hid/hid-ids.h3
-rw-r--r--drivers/hid/hid-input.c7
-rw-r--r--drivers/hid/hid-multitouch.c7
-rw-r--r--drivers/hid/hid-uclogic.c2
-rw-r--r--drivers/hid/usbhid/hid-quirks.c3
-rw-r--r--drivers/hid/wacom_sys.c76
-rw-r--r--drivers/hid/wacom_wac.c3
-rw-r--r--drivers/hwmon/dell-smm-hwmon.c18
-rw-r--r--drivers/hwmon/g762.c1
-rw-r--r--drivers/hwmon/nct7802.c2
-rw-r--r--drivers/hwmon/nct7904.c58
-rw-r--r--drivers/i2c/busses/i2c-bfin-twi.c4
-rw-r--r--drivers/i2c/busses/i2c-omap.c11
-rw-r--r--drivers/i2c/i2c-core.c24
-rw-r--r--drivers/i2c/i2c-slave-eeprom.c6
-rw-r--r--drivers/iio/accel/mma8452.c8
-rw-r--r--drivers/iio/adc/mcp320x.c2
-rw-r--r--drivers/iio/adc/vf610_adc.c2
-rw-r--r--drivers/iio/light/stk3310.c26
-rw-r--r--drivers/iio/magnetometer/Kconfig1
-rw-r--r--drivers/iio/magnetometer/bmc150_magn.c4
-rw-r--r--drivers/iio/magnetometer/mmc35240.c12
-rw-r--r--drivers/iio/temperature/mlx90614.c2
-rw-r--r--drivers/infiniband/hw/cxgb3/iwch_provider.c4
-rw-r--r--drivers/infiniband/hw/ipath/ipath_driver.c6
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_abi.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_ah.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c55
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_stats.h53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c53
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h53
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_verbs.c3
-rw-r--r--drivers/infiniband/ulp/isert/ib_isert.c16
-rw-r--r--drivers/input/input-leds.c16
-rw-r--r--drivers/input/joystick/turbografx.c2
-rw-r--r--drivers/input/misc/axp20x-pek.c1
-rw-r--r--drivers/input/misc/twl4030-vibra.c3
-rw-r--r--drivers/input/mouse/alps.c8
-rw-r--r--drivers/input/mouse/bcm5974.c165
-rw-r--r--drivers/input/mouse/elantech.c35
-rw-r--r--drivers/input/mouse/elantech.h1
-rw-r--r--drivers/input/mouse/synaptics.c4
-rw-r--r--drivers/input/touchscreen/goodix.c36
-rw-r--r--drivers/input/touchscreen/usbtouchscreen.c3
-rw-r--r--drivers/iommu/amd_iommu.c98
-rw-r--r--drivers/iommu/amd_iommu_init.c10
-rw-r--r--drivers/iommu/amd_iommu_v2.c24
-rw-r--r--drivers/iommu/arm-smmu-v3.c60
-rw-r--r--drivers/iommu/intel-iommu.c9
-rw-r--r--drivers/irqchip/irq-mips-gic.c2
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c35
-rw-r--r--drivers/macintosh/ans-lcd.c2
-rw-r--r--drivers/md/Kconfig2
-rw-r--r--drivers/md/bitmap.c28
-rw-r--r--drivers/md/dm-cache-policy-mq.c2
-rw-r--r--drivers/md/dm-cache-policy-smq.c4
-rw-r--r--drivers/md/dm-cache-target.c7
-rw-r--r--drivers/md/dm-thin-metadata.c4
-rw-r--r--drivers/md/dm-thin.c11
-rw-r--r--drivers/md/dm.c27
-rw-r--r--drivers/md/md-cluster.c12
-rw-r--r--drivers/md/md-cluster.h2
-rw-r--r--drivers/md/md.c6
-rw-r--r--drivers/md/persistent-data/dm-btree-internal.h6
-rw-r--r--drivers/md/persistent-data/dm-btree-remove.c17
-rw-r--r--drivers/md/persistent-data/dm-btree-spine.c37
-rw-r--r--drivers/md/persistent-data/dm-btree.c7
-rw-r--r--drivers/md/raid1.c19
-rw-r--r--drivers/md/raid10.c5
-rw-r--r--drivers/md/raid5.c38
-rw-r--r--drivers/md/raid5.h3
-rw-r--r--drivers/media/pci/ivtv/ivtvfb.c15
-rw-r--r--drivers/memory/omap-gpmc.c6
-rw-r--r--drivers/mfd/Kconfig2
-rw-r--r--drivers/mfd/arizona-core.c16
-rw-r--r--drivers/misc/eeprom/at24.c3
-rw-r--r--drivers/misc/mei/main.c2
-rw-r--r--drivers/misc/mic/scif/scif_nodeqp.c15
-rw-r--r--drivers/mmc/card/block.c2
-rw-r--r--drivers/mmc/host/Kconfig1
-rw-r--r--drivers/mmc/host/omap_hsmmc.c11
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c210
-rw-r--r--drivers/mmc/host/sdhci-esdhc.h2
-rw-r--r--drivers/mmc/host/sdhci-pxav3.c1
-rw-r--r--drivers/mmc/host/sdhci.c16
-rw-r--r--drivers/net/bonding/bond_main.c35
-rw-r--r--drivers/net/can/at91_can.c8
-rw-r--r--drivers/net/can/bfin_can.c6
-rw-r--r--drivers/net/can/cc770/cc770.c4
-rw-r--r--drivers/net/can/flexcan.c7
-rw-r--r--drivers/net/can/grcan.c3
-rw-r--r--drivers/net/can/sja1000/sja1000.c6
-rw-r--r--drivers/net/can/slcan.c2
-rw-r--r--drivers/net/can/spi/mcp251x.c17
-rw-r--r--drivers/net/can/ti_hecc.c2
-rw-r--r--drivers/net/can/usb/ems_usb.c6
-rw-r--r--drivers/net/can/usb/esd_usb2.c6
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb.c7
-rw-r--r--drivers/net/can/usb/peak_usb/pcan_usb_pro.c4
-rw-r--r--drivers/net/can/usb/usb_8dev.c6
-rw-r--r--drivers/net/dsa/bcm_sf2.c15
-rw-r--r--drivers/net/dsa/mv88e6xxx.c2
-rw-r--r--drivers/net/ethernet/3com/3c59x.c17
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c2
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c16
-rw-r--r--drivers/net/ethernet/brocade/bna/bnad.c2
-rw-r--r--drivers/net/ethernet/cadence/macb.c125
-rw-r--r--drivers/net/ethernet/cadence/macb.h34
-rw-r--r--drivers/net/ethernet/cavium/Kconfig3
-rw-r--r--drivers/net/ethernet/cavium/thunder/nic.h12
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c26
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_main.c55
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.c17
-rw-r--r--drivers/net/ethernet/cavium/thunder/nicvf_queues.h14
-rw-r--r--drivers/net/ethernet/cavium/thunder/thunder_bgx.c5
-rw-r--r--drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_cmds.h5
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c187
-rw-r--r--drivers/net/ethernet/freescale/fec.h1
-rw-r--r--drivers/net/ethernet/freescale/fec_main.c19
-rw-r--r--drivers/net/ethernet/freescale/fec_ptp.c10
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c3
-rw-r--r--drivers/net/ethernet/freescale/fs_enet/mac-fec.c2
-rw-r--r--drivers/net/ethernet/freescale/gianfar.c104
-rw-r--r--drivers/net/ethernet/freescale/gianfar.h3
-rw-r--r--drivers/net/ethernet/freescale/gianfar_ethtool.c350
-rw-r--r--drivers/net/ethernet/marvell/mvneta.c22
-rw-r--r--drivers/net/ethernet/marvell/mvpp2.c244
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c17
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_rx.c1
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c5
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/main.c2
-rw-r--r--drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c4
-rw-r--r--drivers/net/ethernet/renesas/ravb_main.c74
-rw-r--r--drivers/net/ethernet/rocker/rocker.c1
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c2
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c4
-rw-r--r--drivers/net/ethernet/sun/niu.c4
-rw-r--r--drivers/net/ethernet/ti/cpsw.c9
-rw-r--r--drivers/net/ethernet/ti/netcp.h2
-rw-r--r--drivers/net/ethernet/ti/netcp_core.c51
-rw-r--r--drivers/net/ethernet/ti/netcp_ethss.c67
-rw-r--r--drivers/net/ethernet/ti/netcp_sgmii.c30
-rw-r--r--drivers/net/hamradio/mkiss.c7
-rw-r--r--drivers/net/ipvlan/ipvlan.h9
-rw-r--r--drivers/net/ipvlan/ipvlan_core.c6
-rw-r--r--drivers/net/ipvlan/ipvlan_main.c42
-rw-r--r--drivers/net/macvtap.c7
-rw-r--r--drivers/net/ntb_netdev.c9
-rw-r--r--drivers/net/phy/dp83867.c2
-rw-r--r--drivers/net/phy/mdio_bus.c19
-rw-r--r--drivers/net/usb/qmi_wwan.c1
-rw-r--r--drivers/net/usb/r8152.c189
-rw-r--r--drivers/net/virtio_net.c7
-rw-r--r--drivers/net/wan/cosa.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1
-rw-r--r--drivers/net/wireless/b43/tables_nphy.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-fh.h6
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-nvm-parse.c12
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/scan.c5
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/sta.c3
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/time-event.c2
-rw-r--r--drivers/net/wireless/iwlwifi/mvm/tx.c2
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c5
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h51
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c414
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c74
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c15
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_sdio_ops.c8
-rw-r--r--drivers/net/wireless/rsi/rsi_91x_usb_ops.c4
-rw-r--r--drivers/net/wireless/rtlwifi/core.c7
-rw-r--r--drivers/net/wireless/rtlwifi/rtl8723be/sw.c1
-rw-r--r--drivers/net/xen-netback/interface.c6
-rw-r--r--drivers/net/xen-netback/netback.c68
-rw-r--r--drivers/ntb/ntb.c2
-rw-r--r--drivers/ntb/ntb_transport.c201
-rw-r--r--drivers/nvdimm/region_devs.c5
-rw-r--r--drivers/of/Kconfig2
-rw-r--r--drivers/of/unittest.c3
-rw-r--r--drivers/parport/share.c11
-rw-r--r--drivers/phy/Kconfig2
-rw-r--r--drivers/phy/phy-berlin-usb.c4
-rw-r--r--drivers/phy/phy-sun4i-usb.c1
-rw-r--r--drivers/phy/phy-ti-pipe3.c217
-rw-r--r--drivers/pinctrl/bcm/pinctrl-bcm2835.c2
-rw-r--r--drivers/pinctrl/freescale/pinctrl-imx1-core.c3
-rw-r--r--drivers/pinctrl/nomadik/pinctrl-abx500.c1
-rw-r--r--drivers/pinctrl/pinctrl-lpc18xx.c4
-rw-r--r--drivers/pinctrl/pinctrl-single.c3
-rw-r--r--drivers/pinctrl/samsung/pinctrl-samsung.c5
-rw-r--r--drivers/pinctrl/sh-pfc/sh_pfc.h2
-rw-r--r--drivers/platform/chrome/Kconfig1
-rw-r--r--drivers/regulator/88pm800.c2
-rw-r--r--drivers/regulator/core.c19
-rw-r--r--drivers/regulator/max8973-regulator.c2
-rw-r--r--drivers/regulator/s2mps11.c14
-rw-r--r--drivers/s390/Makefile2
-rw-r--r--drivers/s390/virtio/Makefile (renamed from drivers/s390/kvm/Makefile)0
-rw-r--r--drivers/s390/virtio/kvm_virtio.c (renamed from drivers/s390/kvm/kvm_virtio.c)0
-rw-r--r--drivers/s390/virtio/virtio_ccw.c (renamed from drivers/s390/kvm/virtio_ccw.c)0
-rw-r--r--drivers/scsi/ipr.c28
-rw-r--r--drivers/scsi/ipr.h1
-rw-r--r--drivers/scsi/libfc/fc_exch.c8
-rw-r--r--drivers/scsi/libfc/fc_fcp.c19
-rw-r--r--drivers/scsi/libiscsi.c25
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c6
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h20
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c190
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c3
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c7
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_sup.c2
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c763
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h72
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c28
-rw-r--r--drivers/scsi/scsi_error.c2
-rw-r--r--drivers/scsi/scsi_lib.c6
-rw-r--r--drivers/scsi/sd.c6
-rw-r--r--drivers/scsi/virtio_scsi.c4
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/spi-img-spfi.c2
-rw-r--r--drivers/spi/spi-imx.c5
-rw-r--r--drivers/spi/spi-zynqmp-gqspi.c1
-rw-r--r--drivers/spi/spidev.c1
-rw-r--r--drivers/staging/comedi/drivers/das1800.c1
-rw-r--r--drivers/staging/lustre/lustre/obdclass/debug.c2
-rw-r--r--drivers/staging/vt6655/device_main.c5
-rw-r--r--drivers/target/iscsi/iscsi_target.c48
-rw-r--r--drivers/target/iscsi/iscsi_target_login.c45
-rw-r--r--drivers/target/iscsi/iscsi_target_login.h3
-rw-r--r--drivers/target/iscsi/iscsi_target_nego.c34
-rw-r--r--drivers/target/target_core_configfs.c40
-rw-r--r--drivers/target/target_core_pr.c2
-rw-r--r--drivers/target/target_core_rd.c1
-rw-r--r--drivers/target/target_core_spc.c9
-rw-r--r--drivers/thermal/hisi_thermal.c1
-rw-r--r--drivers/thermal/power_allocator.c26
-rw-r--r--drivers/thermal/samsung/Kconfig2
-rw-r--r--drivers/thermal/samsung/exynos_tmu.c5
-rw-r--r--drivers/thermal/thermal_core.c1
-rw-r--r--drivers/tty/n_tty.c16
-rw-r--r--drivers/tty/serial/Kconfig2
-rw-r--r--drivers/tty/serial/amba-pl011.c4
-rw-r--r--drivers/tty/serial/etraxfs-uart.c2
-rw-r--r--drivers/tty/serial/imx.c15
-rw-r--r--drivers/tty/serial/sc16is7xx.c30
-rw-r--r--drivers/tty/serial/serial_core.c3
-rw-r--r--drivers/tty/vt/selection.c1
-rw-r--r--drivers/tty/vt/vt.c2
-rw-r--r--drivers/usb/chipidea/core.c13
-rw-r--r--drivers/usb/chipidea/host.c7
-rw-r--r--drivers/usb/chipidea/host.h6
-rw-r--r--drivers/usb/class/cdc-acm.c1
-rw-r--r--drivers/usb/common/ulpi.c2
-rw-r--r--drivers/usb/core/hcd.c7
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/core/usb.h1
-rw-r--r--drivers/usb/dwc3/ep0.c4
-rw-r--r--drivers/usb/gadget/function/f_hid.c4
-rw-r--r--drivers/usb/gadget/function/f_printer.c10
-rw-r--r--drivers/usb/gadget/function/f_uac2.c4
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c2
-rw-r--r--drivers/usb/gadget/udc/mv_udc_core.c2
-rw-r--r--drivers/usb/gadget/udc/udc-core.c15
-rw-r--r--drivers/usb/host/ohci-q.c7
-rw-r--r--drivers/usb/host/ohci-tmio.c2
-rw-r--r--drivers/usb/host/xhci-hub.c22
-rw-r--r--drivers/usb/host/xhci-mem.c5
-rw-r--r--drivers/usb/host/xhci-pci.c57
-rw-r--r--drivers/usb/host/xhci-ring.c5
-rw-r--r--drivers/usb/host/xhci.c3
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/serial/option.c2
-rw-r--r--drivers/usb/serial/qcserial.c2
-rw-r--r--drivers/usb/serial/sierra.c1
-rw-r--r--drivers/usb/storage/unusual_devs.h23
-rw-r--r--drivers/vfio/vfio.c91
-rw-r--r--drivers/vhost/vhost.c65
-rw-r--r--drivers/video/console/fbcon.c3
-rw-r--r--drivers/video/fbdev/Kconfig2
-rw-r--r--drivers/video/fbdev/omap2/dss/dss-of.c4
-rw-r--r--drivers/video/fbdev/pxa3xx-gcu.c4
-rw-r--r--drivers/video/of_videomode.c4
-rw-r--r--drivers/virtio/virtio_input.c4
-rw-r--r--drivers/xen/balloon.c15
-rw-r--r--drivers/xen/gntdev.c2
-rw-r--r--drivers/xen/xenbus/xenbus_client.c4
-rw-r--r--fs/btrfs/dev-replace.c2
-rw-r--r--fs/btrfs/disk-io.c3
-rw-r--r--fs/btrfs/extent-tree.c18
-rw-r--r--fs/btrfs/qgroup.c5
-rw-r--r--fs/btrfs/transaction.c3
-rw-r--r--fs/ceph/caps.c22
-rw-r--r--fs/ceph/locks.c2
-rw-r--r--fs/ceph/super.h1
-rw-r--r--fs/dax.c14
-rw-r--r--fs/dcache.c13
-rw-r--r--fs/f2fs/data.c2
-rw-r--r--fs/f2fs/file.c7
-rw-r--r--fs/f2fs/gc.c30
-rw-r--r--fs/f2fs/inline.c2
-rw-r--r--fs/f2fs/segment.c1
-rw-r--r--fs/file_table.c24
-rw-r--r--fs/fs-writeback.c1
-rw-r--r--fs/fuse/dev.c10
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/namei.c9
-rw-r--r--fs/namespace.c42
-rw-r--r--fs/nfs/client.c2
-rw-r--r--fs/nfs/flexfilelayout/flexfilelayout.c2
-rw-r--r--fs/nfs/inode.c15
-rw-r--r--fs/nfs/internal.h21
-rw-r--r--fs/nfs/nfs42proc.c19
-rw-r--r--fs/nfs/nfs4proc.c36
-rw-r--r--fs/nfs/nfs4state.c29
-rw-r--r--fs/nfs/pagelist.c7
-rw-r--r--fs/nfs/pnfs.c101
-rw-r--r--fs/nfs/write.c15
-rw-r--r--fs/nfsd/nfs4layouts.c1
-rw-r--r--fs/nfsd/nfs4state.c12
-rw-r--r--fs/nfsd/nfs4xdr.c11
-rw-r--r--fs/notify/mark.c64
-rw-r--r--fs/ocfs2/aops.c4
-rw-r--r--fs/ocfs2/dlmglue.c10
-rw-r--r--fs/pnode.h2
-rw-r--r--fs/signalfd.c5
-rw-r--r--fs/udf/inode.c19
-rw-r--r--fs/xfs/libxfs/xfs_attr_remote.c44
-rw-r--r--fs/xfs/xfs_file.c21
-rw-r--r--fs/xfs/xfs_log_recover.c11
-rw-r--r--include/drm/drmP.h2
-rw-r--r--include/drm/drm_crtc_helper.h3
-rw-r--r--include/drm/drm_pciids.h1
-rw-r--r--include/linux/ata.h1
-rw-r--r--include/linux/cper.h22
-rw-r--r--include/linux/cpufreq.h1
-rw-r--r--include/linux/fs.h5
-rw-r--r--include/linux/ftrace.h3
-rw-r--r--include/linux/libata.h2
-rw-r--r--include/linux/mtd/nand.h10
-rw-r--r--include/linux/nfs_fs.h7
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/of_device.h2
-rw-r--r--include/linux/page-flags.h10
-rw-r--r--include/linux/platform_data/macb.h14
-rw-r--r--include/linux/platform_data/mmc-esdhc-imx.h1
-rw-r--r--include/linux/skbuff.h6
-rw-r--r--include/net/act_api.h8
-rw-r--r--include/net/cfg80211.h17
-rw-r--r--include/net/inet_frag.h17
-rw-r--r--include/net/ip.h1
-rw-r--r--include/net/ip_fib.h3
-rw-r--r--include/net/netfilter/nf_conntrack.h2
-rw-r--r--include/net/netns/conntrack.h1
-rw-r--r--include/net/sock.h2
-rw-r--r--include/target/iscsi/iscsi_target_core.h1
-rw-r--r--include/uapi/drm/amdgpu_drm.h4
-rw-r--r--include/uapi/drm/i915_drm.h8
-rw-r--r--include/uapi/drm/radeon_drm.h2
-rw-r--r--include/uapi/linux/pci_regs.h1
-rw-r--r--include/uapi/linux/virtio_net.h16
-rw-r--r--include/uapi/linux/virtio_pci.h6
-rw-r--r--include/uapi/linux/virtio_ring.h5
-rw-r--r--include/uapi/sound/asoc.h39
-rw-r--r--init/main.c2
-rw-r--r--ipc/mqueue.c5
-rw-r--r--ipc/sem.c47
-rw-r--r--ipc/shm.c2
-rw-r--r--kernel/events/core.c91
-rw-r--r--kernel/events/ring_buffer.c10
-rw-r--r--kernel/kthread.c4
-rw-r--r--kernel/locking/qspinlock_paravirt.h11
-rw-r--r--kernel/module.c8
-rw-r--r--kernel/resource.c6
-rw-r--r--kernel/signal.c13
-rw-r--r--kernel/trace/ftrace.c52
-rw-r--r--lib/iommu-common.c2
-rw-r--r--mm/cma.h2
-rw-r--r--mm/huge_memory.c7
-rw-r--r--mm/kasan/kasan.c2
-rw-r--r--mm/kasan/report.c2
-rw-r--r--mm/memory-failure.c54
-rw-r--r--mm/memory_hotplug.c13
-rw-r--r--mm/migrate.c8
-rw-r--r--mm/page-writeback.c4
-rw-r--r--mm/page_alloc.c53
-rw-r--r--mm/shmem.c4
-rw-r--r--mm/slab_common.c3
-rw-r--r--mm/vmscan.c16
-rw-r--r--net/9p/trans_virtio.c1
-rw-r--r--net/ax25/ax25_subr.c1
-rw-r--r--net/batman-adv/distributed-arp-table.c18
-rw-r--r--net/batman-adv/gateway_client.c2
-rw-r--r--net/batman-adv/soft-interface.c3
-rw-r--r--net/batman-adv/translation-table.c29
-rw-r--r--net/bluetooth/mgmt.c2
-rw-r--r--net/bluetooth/smp.c4
-rw-r--r--net/bridge/br_forward.c29
-rw-r--r--net/bridge/br_mdb.c2
-rw-r--r--net/bridge/br_multicast.c87
-rw-r--r--net/bridge/br_netlink.c14
-rw-r--r--net/bridge/br_stp.c5
-rw-r--r--net/bridge/br_stp_if.c13
-rw-r--r--net/bridge/br_stp_timer.c4
-rw-r--r--net/caif/caif_socket.c19
-rw-r--r--net/core/datagram.c57
-rw-r--r--net/core/dst.c4
-rw-r--r--net/core/netclassid_cgroup.c3
-rw-r--r--net/core/pktgen.c3
-rw-r--r--net/core/request_sock.c8
-rw-r--r--net/core/rtnetlink.c11
-rw-r--r--net/core/sock.c8
-rw-r--r--net/dccp/proto.c2
-rw-r--r--net/dsa/slave.c3
-rw-r--r--net/ieee802154/6lowpan/reassembly.c6
-rw-r--r--net/ipv4/arp.c16
-rw-r--r--net/ipv4/datagram.c16
-rw-r--r--net/ipv4/devinet.c1
-rw-r--r--net/ipv4/fib_lookup.h1
-rw-r--r--net/ipv4/fib_semantics.c41
-rw-r--r--net/ipv4/fib_trie.c7
-rw-r--r--net/ipv4/inet_connection_sock.c2
-rw-r--r--net/ipv4/inet_fragment.c40
-rw-r--r--net/ipv4/inet_hashtables.c11
-rw-r--r--net/ipv4/ip_fragment.c18
-rw-r--r--net/ipv4/netfilter/ipt_SYNPROXY.c3
-rw-r--r--net/ipv4/route.c2
-rw-r--r--net/ipv4/tcp.c11
-rw-r--r--net/ipv4/tcp_input.c3
-rw-r--r--net/ipv4/tcp_ipv4.c2
-rw-r--r--net/ipv4/udp.c13
-rw-r--r--net/ipv6/datagram.c20
-rw-r--r--net/ipv6/ip6_offload.c2
-rw-r--r--net/ipv6/ndisc.c6
-rw-r--r--net/ipv6/netfilter/ip6t_SYNPROXY.c19
-rw-r--r--net/ipv6/netfilter/nf_conntrack_reasm.c6
-rw-r--r--net/ipv6/reassembly.c8
-rw-r--r--net/ipv6/route.c6
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/llc/af_llc.c4
-rw-r--r--net/mac80211/debugfs_netdev.c1
-rw-r--r--net/mac80211/iface.c25
-rw-r--r--net/mac80211/mesh_plink.c5
-rw-r--r--net/mac80211/pm.c16
-rw-r--r--net/mac80211/tdls.c6
-rw-r--r--net/mac80211/tx.c4
-rw-r--r--net/netfilter/ipvs/ip_vs_core.c16
-rw-r--r--net/netfilter/ipvs/ip_vs_ctl.c78
-rw-r--r--net/netfilter/ipvs/ip_vs_sched.c12
-rw-r--r--net/netfilter/ipvs/ip_vs_sync.c2
-rw-r--r--net/netfilter/ipvs/ip_vs_xmit.c41
-rw-r--r--net/netfilter/nf_conntrack_core.c71
-rw-r--r--net/netfilter/nf_conntrack_expect.c3
-rw-r--r--net/netfilter/nf_conntrack_netlink.c5
-rw-r--r--net/netfilter/nf_synproxy_core.c11
-rw-r--r--net/netfilter/xt_CT.c13
-rw-r--r--net/netfilter/xt_IDLETIMER.c1
-rw-r--r--net/netlink/af_netlink.c84
-rw-r--r--net/openvswitch/actions.c16
-rw-r--r--net/openvswitch/flow_table.c2
-rw-r--r--net/packet/af_packet.c11
-rw-r--r--net/rds/info.c2
-rw-r--r--net/sched/act_api.c11
-rw-r--r--net/sched/act_bpf.c50
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/act_pedit.c5
-rw-r--r--net/sched/cls_bpf.c2
-rw-r--r--net/sched/cls_flow.c5
-rw-r--r--net/sched/cls_flower.c2
-rw-r--r--net/sched/sch_choke.c13
-rw-r--r--net/sched/sch_fq_codel.c35
-rw-r--r--net/sched/sch_plug.c1
-rw-r--r--net/sched/sch_sfq.c2
-rw-r--r--net/sctp/socket.c6
-rw-r--r--net/sunrpc/backchannel_rqst.c6
-rw-r--r--net/sunrpc/clnt.c5
-rw-r--r--net/sunrpc/xprtsock.c25
-rw-r--r--net/wireless/chan.c45
-rw-r--r--net/wireless/nl80211.c14
-rw-r--r--net/wireless/reg.c8
-rw-r--r--net/wireless/trace.h11
-rw-r--r--samples/trace_events/trace-events-sample.h7
-rwxr-xr-xscripts/kconfig/streamline_config.pl2
-rw-r--r--security/keys/keyring.c8
-rw-r--r--security/yama/yama_lsm.c1
-rw-r--r--sound/core/pcm_native.c2
-rw-r--r--sound/firewire/amdtp.c5
-rw-r--r--sound/firewire/amdtp.h2
-rw-r--r--sound/firewire/fireworks/fireworks.c8
-rw-r--r--sound/firewire/fireworks/fireworks.h1
-rw-r--r--sound/firewire/fireworks/fireworks_stream.c9
-rw-r--r--sound/hda/ext/hdac_ext_controller.c6
-rw-r--r--sound/hda/ext/hdac_ext_stream.c2
-rw-r--r--sound/hda/hdac_i915.c5
-rw-r--r--sound/pci/hda/hda_intel.c32
-rw-r--r--sound/pci/hda/patch_cirrus.c4
-rw-r--r--sound/pci/hda/patch_hdmi.c2
-rw-r--r--sound/pci/hda/patch_realtek.c80
-rw-r--r--sound/pci/hda/patch_sigmatel.c3
-rw-r--r--sound/pci/oxygen/oxygen_mixer.c2
-rw-r--r--sound/soc/codecs/cs4265.c10
-rw-r--r--sound/soc/codecs/pcm1681.c2
-rw-r--r--sound/soc/codecs/rt5645.c5
-rw-r--r--sound/soc/codecs/rt5645.h4
-rw-r--r--sound/soc/codecs/sgtl5000.h2
-rw-r--r--sound/soc/codecs/ssm4567.c8
-rw-r--r--sound/soc/fsl/fsl_ssi.c2
-rw-r--r--sound/soc/intel/Makefile2
-rw-r--r--sound/soc/intel/atom/sst/sst_drv_interface.c14
-rw-r--r--sound/soc/intel/baytrail/sst-baytrail-ipc.c2
-rw-r--r--sound/soc/intel/boards/cht_bsw_max98090_ti.c4
-rw-r--r--sound/soc/intel/haswell/sst-haswell-ipc.c2
-rw-r--r--sound/soc/mediatek/mt8173-max98090.c17
-rw-r--r--sound/soc/mediatek/mt8173-rt5650-rt5676.c19
-rw-r--r--sound/soc/mediatek/mtk-afe-pcm.c2
-rw-r--r--sound/soc/soc-core.c1
-rw-r--r--sound/soc/soc-dapm.c35
-rw-r--r--sound/soc/soc-topology.c62
-rw-r--r--sound/soc/zte/zx296702-i2s.c4
-rw-r--r--sound/soc/zte/zx296702-spdif.c4
-rw-r--r--sound/sparc/amd7930.c1
-rw-r--r--sound/usb/mixer_maps.c24
-rw-r--r--tools/perf/config/Makefile2
-rw-r--r--tools/perf/util/stat-shadow.c8
-rw-r--r--tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c2
825 files changed, 9343 insertions, 5246 deletions
diff --git a/.mailmap b/.mailmap
index b4091b7a78fe..4b31af54ccd5 100644
--- a/.mailmap
+++ b/.mailmap
@@ -17,6 +17,7 @@ Aleksey Gorelov <aleksey_gorelov@phoenix.com>
17Al Viro <viro@ftp.linux.org.uk> 17Al Viro <viro@ftp.linux.org.uk>
18Al Viro <viro@zenIV.linux.org.uk> 18Al Viro <viro@zenIV.linux.org.uk>
19Andreas Herrmann <aherrman@de.ibm.com> 19Andreas Herrmann <aherrman@de.ibm.com>
20Andrey Ryabinin <ryabinin.a.a@gmail.com> <a.ryabinin@samsung.com>
20Andrew Morton <akpm@linux-foundation.org> 21Andrew Morton <akpm@linux-foundation.org>
21Andrew Vasquez <andrew.vasquez@qlogic.com> 22Andrew Vasquez <andrew.vasquez@qlogic.com>
22Andy Adamson <andros@citi.umich.edu> 23Andy Adamson <andros@citi.umich.edu>
diff --git a/Documentation/devicetree/bindings/arm/cpus.txt b/Documentation/devicetree/bindings/arm/cpus.txt
index d6b794cef0b8..91e6e5c478d0 100644
--- a/Documentation/devicetree/bindings/arm/cpus.txt
+++ b/Documentation/devicetree/bindings/arm/cpus.txt
@@ -199,6 +199,7 @@ nodes to be present and contain the properties described below.
199 "qcom,kpss-acc-v1" 199 "qcom,kpss-acc-v1"
200 "qcom,kpss-acc-v2" 200 "qcom,kpss-acc-v2"
201 "rockchip,rk3066-smp" 201 "rockchip,rk3066-smp"
202 "ste,dbx500-smp"
202 203
203 - cpu-release-addr 204 - cpu-release-addr
204 Usage: required for systems that have an "enable-method" 205 Usage: required for systems that have an "enable-method"
diff --git a/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt
index d3058768b23d..c53e0b08032f 100644
--- a/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt
+++ b/Documentation/devicetree/bindings/dma/apm-xgene-dma.txt
@@ -35,7 +35,7 @@ Example:
35 device_type = "dma"; 35 device_type = "dma";
36 reg = <0x0 0x1f270000 0x0 0x10000>, 36 reg = <0x0 0x1f270000 0x0 0x10000>,
37 <0x0 0x1f200000 0x0 0x10000>, 37 <0x0 0x1f200000 0x0 0x10000>,
38 <0x0 0x1b008000 0x0 0x2000>, 38 <0x0 0x1b000000 0x0 0x400000>,
39 <0x0 0x1054a000 0x0 0x100>; 39 <0x0 0x1054a000 0x0 0x100>;
40 interrupts = <0x0 0x82 0x4>, 40 interrupts = <0x0 0x82 0x4>,
41 <0x0 0xb8 0x4>, 41 <0x0 0xb8 0x4>,
diff --git a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
index c03eec116872..3443e0f838df 100644
--- a/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
+++ b/Documentation/devicetree/bindings/iommu/arm,smmu-v3.txt
@@ -35,3 +35,6 @@ the PCIe specification.
35 35
36 NOTE: this only applies to the SMMU itself, not 36 NOTE: this only applies to the SMMU itself, not
37 masters connected upstream of the SMMU. 37 masters connected upstream of the SMMU.
38
39- hisilicon,broken-prefetch-cmd
40 : Avoid sending CMD_PREFETCH_* commands to the SMMU.
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
index 5d0376b8f202..211e7785f4d2 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
@@ -17,7 +17,6 @@ Required properties:
17 "fsl,imx6sx-usdhc" 17 "fsl,imx6sx-usdhc"
18 18
19Optional properties: 19Optional properties:
20- fsl,cd-controller : Indicate to use controller internal card detection
21- fsl,wp-controller : Indicate to use controller internal write protection 20- fsl,wp-controller : Indicate to use controller internal write protection
22- fsl,delay-line : Specify the number of delay cells for override mode. 21- fsl,delay-line : Specify the number of delay cells for override mode.
23 This is used to set the clock delay for DLL(Delay Line) on override mode 22 This is used to set the clock delay for DLL(Delay Line) on override mode
@@ -35,7 +34,6 @@ esdhc@70004000 {
35 compatible = "fsl,imx51-esdhc"; 34 compatible = "fsl,imx51-esdhc";
36 reg = <0x70004000 0x4000>; 35 reg = <0x70004000 0x4000>;
37 interrupts = <1>; 36 interrupts = <1>;
38 fsl,cd-controller;
39 fsl,wp-controller; 37 fsl,wp-controller;
40}; 38};
41 39
diff --git a/Documentation/devicetree/bindings/phy/ti-phy.txt b/Documentation/devicetree/bindings/phy/ti-phy.txt
index 305e3df3d9b1..9cf9446eaf2e 100644
--- a/Documentation/devicetree/bindings/phy/ti-phy.txt
+++ b/Documentation/devicetree/bindings/phy/ti-phy.txt
@@ -82,6 +82,9 @@ Optional properties:
82 - id: If there are multiple instance of the same type, in order to 82 - id: If there are multiple instance of the same type, in order to
83 differentiate between each instance "id" can be used (e.g., multi-lane PCIe 83 differentiate between each instance "id" can be used (e.g., multi-lane PCIe
84 PHY). If "id" is not provided, it is set to default value of '1'. 84 PHY). If "id" is not provided, it is set to default value of '1'.
85 - syscon-pllreset: Handle to system control region that contains the
86 CTRL_CORE_SMA_SW_0 register and register offset to the CTRL_CORE_SMA_SW_0
87 register that contains the SATA_PLL_SOFT_RESET bit. Only valid for sata_phy.
85 88
86This is usually a subnode of ocp2scp to which it is connected. 89This is usually a subnode of ocp2scp to which it is connected.
87 90
@@ -100,3 +103,16 @@ usb3phy@4a084400 {
100 "sysclk", 103 "sysclk",
101 "refclk"; 104 "refclk";
102}; 105};
106
107sata_phy: phy@4A096000 {
108 compatible = "ti,phy-pipe3-sata";
109 reg = <0x4A096000 0x80>, /* phy_rx */
110 <0x4A096400 0x64>, /* phy_tx */
111 <0x4A096800 0x40>; /* pll_ctrl */
112 reg-names = "phy_rx", "phy_tx", "pll_ctrl";
113 ctrl-module = <&omap_control_sata>;
114 clocks = <&sys_clkin1>, <&sata_ref_clk>;
115 clock-names = "sysclk", "refclk";
116 syscon-pllreset = <&scm_conf 0x3fc>;
117 #phy-cells = <0>;
118};
diff --git a/Documentation/devicetree/bindings/sound/mt8173-max98090.txt b/Documentation/devicetree/bindings/sound/mt8173-max98090.txt
index 829bd26d17f8..519e97c8f1b8 100644
--- a/Documentation/devicetree/bindings/sound/mt8173-max98090.txt
+++ b/Documentation/devicetree/bindings/sound/mt8173-max98090.txt
@@ -3,11 +3,13 @@ MT8173 with MAX98090 CODEC
3Required properties: 3Required properties:
4- compatible : "mediatek,mt8173-max98090" 4- compatible : "mediatek,mt8173-max98090"
5- mediatek,audio-codec: the phandle of the MAX98090 audio codec 5- mediatek,audio-codec: the phandle of the MAX98090 audio codec
6- mediatek,platform: the phandle of MT8173 ASoC platform
6 7
7Example: 8Example:
8 9
9 sound { 10 sound {
10 compatible = "mediatek,mt8173-max98090"; 11 compatible = "mediatek,mt8173-max98090";
11 mediatek,audio-codec = <&max98090>; 12 mediatek,audio-codec = <&max98090>;
13 mediatek,platform = <&afe>;
12 }; 14 };
13 15
diff --git a/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt b/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
index 61e98c976bd4..f205ce9e31dd 100644
--- a/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
+++ b/Documentation/devicetree/bindings/sound/mt8173-rt5650-rt5676.txt
@@ -3,11 +3,13 @@ MT8173 with RT5650 RT5676 CODECS
3Required properties: 3Required properties:
4- compatible : "mediatek,mt8173-rt5650-rt5676" 4- compatible : "mediatek,mt8173-rt5650-rt5676"
5- mediatek,audio-codec: the phandles of rt5650 and rt5676 codecs 5- mediatek,audio-codec: the phandles of rt5650 and rt5676 codecs
6- mediatek,platform: the phandle of MT8173 ASoC platform
6 7
7Example: 8Example:
8 9
9 sound { 10 sound {
10 compatible = "mediatek,mt8173-rt5650-rt5676"; 11 compatible = "mediatek,mt8173-rt5650-rt5676";
11 mediatek,audio-codec = <&rt5650 &rt5676>; 12 mediatek,audio-codec = <&rt5650 &rt5676>;
13 mediatek,platform = <&afe>;
12 }; 14 };
13 15
diff --git a/Documentation/devicetree/bindings/spi/spi-ath79.txt b/Documentation/devicetree/bindings/spi/spi-ath79.txt
index f1ad9c367532..9c696fa66f81 100644
--- a/Documentation/devicetree/bindings/spi/spi-ath79.txt
+++ b/Documentation/devicetree/bindings/spi/spi-ath79.txt
@@ -3,7 +3,7 @@ Binding for Qualcomm Atheros AR7xxx/AR9xxx SPI controller
3Required properties: 3Required properties:
4- compatible: has to be "qca,<soc-type>-spi", "qca,ar7100-spi" as fallback. 4- compatible: has to be "qca,<soc-type>-spi", "qca,ar7100-spi" as fallback.
5- reg: Base address and size of the controllers memory area 5- reg: Base address and size of the controllers memory area
6- clocks: phandle to the AHB clock. 6- clocks: phandle of the AHB clock.
7- clock-names: has to be "ahb". 7- clock-names: has to be "ahb".
8- #address-cells: <1>, as required by generic SPI binding. 8- #address-cells: <1>, as required by generic SPI binding.
9- #size-cells: <0>, also as required by generic SPI binding. 9- #size-cells: <0>, also as required by generic SPI binding.
@@ -12,9 +12,9 @@ Child nodes as per the generic SPI binding.
12 12
13Example: 13Example:
14 14
15 spi@1F000000 { 15 spi@1f000000 {
16 compatible = "qca,ar9132-spi", "qca,ar7100-spi"; 16 compatible = "qca,ar9132-spi", "qca,ar7100-spi";
17 reg = <0x1F000000 0x10>; 17 reg = <0x1f000000 0x10>;
18 18
19 clocks = <&pll 2>; 19 clocks = <&pll 2>;
20 clock-names = "ahb"; 20 clock-names = "ahb";
diff --git a/Documentation/hwmon/nct7904 b/Documentation/hwmon/nct7904
index 014f112e2a14..57fffe33ebfc 100644
--- a/Documentation/hwmon/nct7904
+++ b/Documentation/hwmon/nct7904
@@ -35,11 +35,11 @@ temp1_input Local temperature (1/1000 degree,
35temp[2-9]_input CPU temperatures (1/1000 degree, 35temp[2-9]_input CPU temperatures (1/1000 degree,
36 0.125 degree resolution) 36 0.125 degree resolution)
37 37
38fan[1-4]_mode R/W, 0/1 for manual or SmartFan mode 38pwm[1-4]_enable R/W, 1/2 for manual or SmartFan mode
39 Setting SmartFan mode is supported only if it has been 39 Setting SmartFan mode is supported only if it has been
40 previously configured by BIOS (or configuration EEPROM) 40 previously configured by BIOS (or configuration EEPROM)
41 41
42fan[1-4]_pwm R/O in SmartFan mode, R/W in manual control mode 42pwm[1-4] R/O in SmartFan mode, R/W in manual control mode
43 43
44The driver checks sensor control registers and does not export the sensors 44The driver checks sensor control registers and does not export the sensors
45that are not enabled. Anyway, a sensor that is enabled may actually be not 45that are not enabled. Anyway, a sensor that is enabled may actually be not
diff --git a/Documentation/input/alps.txt b/Documentation/input/alps.txt
index c86f2f1ae4f6..1fec1135791d 100644
--- a/Documentation/input/alps.txt
+++ b/Documentation/input/alps.txt
@@ -119,8 +119,10 @@ ALPS Absolute Mode - Protocol Version 2
119 byte 5: 0 z6 z5 z4 z3 z2 z1 z0 119 byte 5: 0 z6 z5 z4 z3 z2 z1 z0
120 120
121Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for 121Protocol Version 2 DualPoint devices send standard PS/2 mouse packets for
122the DualPoint Stick. For non interleaved dualpoint devices the pointingstick 122the DualPoint Stick. The M, R and L bits signal the combined status of both
123buttons get reported separately in the PSM, PSR and PSL bits. 123the pointingstick and touchpad buttons, except for Dell dualpoint devices
124where the pointingstick buttons get reported separately in the PSM, PSR
125and PSL bits.
124 126
125Dualpoint device -- interleaved packet format 127Dualpoint device -- interleaved packet format
126--------------------------------------------- 128---------------------------------------------
diff --git a/Documentation/target/tcm_mod_builder.py b/Documentation/target/tcm_mod_builder.py
index 949de191fcdc..cda56df9b8a7 100755
--- a/Documentation/target/tcm_mod_builder.py
+++ b/Documentation/target/tcm_mod_builder.py
@@ -199,7 +199,8 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
199 buf += "#include <linux/string.h>\n" 199 buf += "#include <linux/string.h>\n"
200 buf += "#include <linux/configfs.h>\n" 200 buf += "#include <linux/configfs.h>\n"
201 buf += "#include <linux/ctype.h>\n" 201 buf += "#include <linux/ctype.h>\n"
202 buf += "#include <asm/unaligned.h>\n\n" 202 buf += "#include <asm/unaligned.h>\n"
203 buf += "#include <scsi/scsi_proto.h>\n\n"
203 buf += "#include <target/target_core_base.h>\n" 204 buf += "#include <target/target_core_base.h>\n"
204 buf += "#include <target/target_core_fabric.h>\n" 205 buf += "#include <target/target_core_fabric.h>\n"
205 buf += "#include <target/target_core_fabric_configfs.h>\n" 206 buf += "#include <target/target_core_fabric_configfs.h>\n"
@@ -230,8 +231,14 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
230 buf += " }\n" 231 buf += " }\n"
231 buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n" 232 buf += " tpg->" + fabric_mod_port + " = " + fabric_mod_port + ";\n"
232 buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n" 233 buf += " tpg->" + fabric_mod_port + "_tpgt = tpgt;\n\n"
233 buf += " ret = core_tpg_register(&" + fabric_mod_name + "_ops, wwn,\n" 234
234 buf += " &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n" 235 if proto_ident == "FC":
236 buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_FCP);\n"
237 elif proto_ident == "SAS":
238 buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SAS);\n"
239 elif proto_ident == "iSCSI":
240 buf += " ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_ISCSI);\n"
241
235 buf += " if (ret < 0) {\n" 242 buf += " if (ret < 0) {\n"
236 buf += " kfree(tpg);\n" 243 buf += " kfree(tpg);\n"
237 buf += " return NULL;\n" 244 buf += " return NULL;\n"
@@ -292,7 +299,7 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
292 299
293 buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n" 300 buf += "static const struct target_core_fabric_ops " + fabric_mod_name + "_ops = {\n"
294 buf += " .module = THIS_MODULE,\n" 301 buf += " .module = THIS_MODULE,\n"
295 buf += " .name = " + fabric_mod_name + ",\n" 302 buf += " .name = \"" + fabric_mod_name + "\",\n"
296 buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n" 303 buf += " .get_fabric_name = " + fabric_mod_name + "_get_fabric_name,\n"
297 buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n" 304 buf += " .tpg_get_wwn = " + fabric_mod_name + "_get_fabric_wwn,\n"
298 buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n" 305 buf += " .tpg_get_tag = " + fabric_mod_name + "_get_tag,\n"
@@ -322,17 +329,17 @@ def tcm_mod_build_configfs(proto_ident, fabric_mod_dir_var, fabric_mod_name):
322 buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n" 329 buf += " .fabric_make_tpg = " + fabric_mod_name + "_make_tpg,\n"
323 buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n" 330 buf += " .fabric_drop_tpg = " + fabric_mod_name + "_drop_tpg,\n"
324 buf += "\n" 331 buf += "\n"
325 buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs;\n" 332 buf += " .tfc_wwn_attrs = " + fabric_mod_name + "_wwn_attrs,\n"
326 buf += "};\n\n" 333 buf += "};\n\n"
327 334
328 buf += "static int __init " + fabric_mod_name + "_init(void)\n" 335 buf += "static int __init " + fabric_mod_name + "_init(void)\n"
329 buf += "{\n" 336 buf += "{\n"
330 buf += " return target_register_template(" + fabric_mod_name + "_ops);\n" 337 buf += " return target_register_template(&" + fabric_mod_name + "_ops);\n"
331 buf += "};\n\n" 338 buf += "};\n\n"
332 339
333 buf += "static void __exit " + fabric_mod_name + "_exit(void)\n" 340 buf += "static void __exit " + fabric_mod_name + "_exit(void)\n"
334 buf += "{\n" 341 buf += "{\n"
335 buf += " target_unregister_template(" + fabric_mod_name + "_ops);\n" 342 buf += " target_unregister_template(&" + fabric_mod_name + "_ops);\n"
336 buf += "};\n\n" 343 buf += "};\n\n"
337 344
338 buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n" 345 buf += "MODULE_DESCRIPTION(\"" + fabric_mod_name.upper() + " series fabric driver\");\n"
diff --git a/MAINTAINERS b/MAINTAINERS
index 9c9dd5fc7aff..4118b13983c2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -3592,6 +3592,15 @@ S: Maintained
3592F: drivers/gpu/drm/rockchip/ 3592F: drivers/gpu/drm/rockchip/
3593F: Documentation/devicetree/bindings/video/rockchip* 3593F: Documentation/devicetree/bindings/video/rockchip*
3594 3594
3595DRM DRIVERS FOR STI
3596M: Benjamin Gaignard <benjamin.gaignard@linaro.org>
3597M: Vincent Abriou <vincent.abriou@st.com>
3598L: dri-devel@lists.freedesktop.org
3599T: git http://git.linaro.org/people/benjamin.gaignard/kernel.git
3600S: Maintained
3601F: drivers/gpu/drm/sti
3602F: Documentation/devicetree/bindings/gpu/st,stih4xx.txt
3603
3595DSBR100 USB FM RADIO DRIVER 3604DSBR100 USB FM RADIO DRIVER
3596M: Alexey Klimov <klimov.linux@gmail.com> 3605M: Alexey Klimov <klimov.linux@gmail.com>
3597L: linux-media@vger.kernel.org 3606L: linux-media@vger.kernel.org
@@ -5605,6 +5614,7 @@ F: kernel/irq/
5605IRQCHIP DRIVERS 5614IRQCHIP DRIVERS
5606M: Thomas Gleixner <tglx@linutronix.de> 5615M: Thomas Gleixner <tglx@linutronix.de>
5607M: Jason Cooper <jason@lakedaemon.net> 5616M: Jason Cooper <jason@lakedaemon.net>
5617M: Marc Zyngier <marc.zyngier@arm.com>
5608L: linux-kernel@vger.kernel.org 5618L: linux-kernel@vger.kernel.org
5609S: Maintained 5619S: Maintained
5610T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core 5620T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
@@ -5613,11 +5623,14 @@ F: Documentation/devicetree/bindings/interrupt-controller/
5613F: drivers/irqchip/ 5623F: drivers/irqchip/
5614 5624
5615IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY) 5625IRQ DOMAINS (IRQ NUMBER MAPPING LIBRARY)
5616M: Benjamin Herrenschmidt <benh@kernel.crashing.org> 5626M: Jiang Liu <jiang.liu@linux.intel.com>
5627M: Marc Zyngier <marc.zyngier@arm.com>
5617S: Maintained 5628S: Maintained
5629T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git irq/core
5618F: Documentation/IRQ-domain.txt 5630F: Documentation/IRQ-domain.txt
5619F: include/linux/irqdomain.h 5631F: include/linux/irqdomain.h
5620F: kernel/irq/irqdomain.c 5632F: kernel/irq/irqdomain.c
5633F: kernel/irq/msi.c
5621 5634
5622ISAPNP 5635ISAPNP
5623M: Jaroslav Kysela <perex@perex.cz> 5636M: Jaroslav Kysela <perex@perex.cz>
@@ -5904,7 +5917,6 @@ S: Supported
5904F: Documentation/s390/kvm.txt 5917F: Documentation/s390/kvm.txt
5905F: arch/s390/include/asm/kvm* 5918F: arch/s390/include/asm/kvm*
5906F: arch/s390/kvm/ 5919F: arch/s390/kvm/
5907F: drivers/s390/kvm/
5908 5920
5909KERNEL VIRTUAL MACHINE (KVM) FOR ARM 5921KERNEL VIRTUAL MACHINE (KVM) FOR ARM
5910M: Christoffer Dall <christoffer.dall@linaro.org> 5922M: Christoffer Dall <christoffer.dall@linaro.org>
@@ -6844,6 +6856,12 @@ T: git git://linuxtv.org/anttip/media_tree.git
6844S: Maintained 6856S: Maintained
6845F: drivers/media/usb/msi2500/ 6857F: drivers/media/usb/msi2500/
6846 6858
6859MSYSTEMS DISKONCHIP G3 MTD DRIVER
6860M: Robert Jarzmik <robert.jarzmik@free.fr>
6861L: linux-mtd@lists.infradead.org
6862S: Maintained
6863F: drivers/mtd/devices/docg3*
6864
6847MT9M032 APTINA SENSOR DRIVER 6865MT9M032 APTINA SENSOR DRIVER
6848M: Laurent Pinchart <laurent.pinchart@ideasonboard.com> 6866M: Laurent Pinchart <laurent.pinchart@ideasonboard.com>
6849L: linux-media@vger.kernel.org 6867L: linux-media@vger.kernel.org
@@ -10901,6 +10919,15 @@ F: drivers/block/virtio_blk.c
10901F: include/linux/virtio_*.h 10919F: include/linux/virtio_*.h
10902F: include/uapi/linux/virtio_*.h 10920F: include/uapi/linux/virtio_*.h
10903 10921
10922VIRTIO DRIVERS FOR S390
10923M: Christian Borntraeger <borntraeger@de.ibm.com>
10924M: Cornelia Huck <cornelia.huck@de.ibm.com>
10925L: linux-s390@vger.kernel.org
10926L: virtualization@lists.linux-foundation.org
10927L: kvm@vger.kernel.org
10928S: Supported
10929F: drivers/s390/virtio/
10930
10904VIRTIO GPU DRIVER 10931VIRTIO GPU DRIVER
10905M: David Airlie <airlied@linux.ie> 10932M: David Airlie <airlied@linux.ie>
10906M: Gerd Hoffmann <kraxel@redhat.com> 10933M: Gerd Hoffmann <kraxel@redhat.com>
diff --git a/Makefile b/Makefile
index a9ad4908e870..6e88c371b32f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 4 1VERSION = 4
2PATCHLEVEL = 2 2PATCHLEVEL = 2
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = -rc3 4EXTRAVERSION = -rc7
5NAME = Hurr durr I'ma sheep 5NAME = Hurr durr I'ma sheep
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -597,6 +597,11 @@ endif # $(dot-config)
597# Defaults to vmlinux, but the arch makefile usually adds further targets 597# Defaults to vmlinux, but the arch makefile usually adds further targets
598all: vmlinux 598all: vmlinux
599 599
600# The arch Makefile can set ARCH_{CPP,A,C}FLAGS to override the default
601# values of the respective KBUILD_* variables
602ARCH_CPPFLAGS :=
603ARCH_AFLAGS :=
604ARCH_CFLAGS :=
600include arch/$(SRCARCH)/Makefile 605include arch/$(SRCARCH)/Makefile
601 606
602KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,) 607KBUILD_CFLAGS += $(call cc-option,-fno-delete-null-pointer-checks,)
@@ -848,10 +853,10 @@ export mod_strip_cmd
848mod_compress_cmd = true 853mod_compress_cmd = true
849ifdef CONFIG_MODULE_COMPRESS 854ifdef CONFIG_MODULE_COMPRESS
850 ifdef CONFIG_MODULE_COMPRESS_GZIP 855 ifdef CONFIG_MODULE_COMPRESS_GZIP
851 mod_compress_cmd = gzip -n 856 mod_compress_cmd = gzip -n -f
852 endif # CONFIG_MODULE_COMPRESS_GZIP 857 endif # CONFIG_MODULE_COMPRESS_GZIP
853 ifdef CONFIG_MODULE_COMPRESS_XZ 858 ifdef CONFIG_MODULE_COMPRESS_XZ
854 mod_compress_cmd = xz 859 mod_compress_cmd = xz -f
855 endif # CONFIG_MODULE_COMPRESS_XZ 860 endif # CONFIG_MODULE_COMPRESS_XZ
856endif # CONFIG_MODULE_COMPRESS 861endif # CONFIG_MODULE_COMPRESS
857export mod_compress_cmd 862export mod_compress_cmd
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 91cf4055acab..bd4670d1b89b 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -313,11 +313,11 @@ config ARC_PAGE_SIZE_8K
313 313
314config ARC_PAGE_SIZE_16K 314config ARC_PAGE_SIZE_16K
315 bool "16KB" 315 bool "16KB"
316 depends on ARC_MMU_V3 316 depends on ARC_MMU_V3 || ARC_MMU_V4
317 317
318config ARC_PAGE_SIZE_4K 318config ARC_PAGE_SIZE_4K
319 bool "4KB" 319 bool "4KB"
320 depends on ARC_MMU_V3 320 depends on ARC_MMU_V3 || ARC_MMU_V4
321 321
322endchoice 322endchoice
323 323
@@ -365,6 +365,11 @@ config ARC_HAS_LLSC
365 default y 365 default y
366 depends on !ARC_CANT_LLSC 366 depends on !ARC_CANT_LLSC
367 367
368config ARC_STAR_9000923308
369 bool "Workaround for llock/scond livelock"
370 default y
371 depends on ISA_ARCV2 && SMP && ARC_HAS_LLSC
372
368config ARC_HAS_SWAPE 373config ARC_HAS_SWAPE
369 bool "Insn: SWAPE (endian-swap)" 374 bool "Insn: SWAPE (endian-swap)"
370 default y 375 default y
@@ -379,6 +384,10 @@ config ARC_HAS_LL64
379 dest operands with 2 possible source operands. 384 dest operands with 2 possible source operands.
380 default y 385 default y
381 386
387config ARC_HAS_DIV_REM
388 bool "Insn: div, divu, rem, remu"
389 default y
390
382config ARC_HAS_RTC 391config ARC_HAS_RTC
383 bool "Local 64-bit r/o cycle counter" 392 bool "Local 64-bit r/o cycle counter"
384 default n 393 default n
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 46d87310220d..8a27a48304a4 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -36,8 +36,16 @@ cflags-$(atleast_gcc44) += -fsection-anchors
36cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock 36cflags-$(CONFIG_ARC_HAS_LLSC) += -mlock
37cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape 37cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
38 38
39ifdef CONFIG_ISA_ARCV2
40
39ifndef CONFIG_ARC_HAS_LL64 41ifndef CONFIG_ARC_HAS_LL64
40cflags-$(CONFIG_ISA_ARCV2) += -mno-ll64 42cflags-y += -mno-ll64
43endif
44
45ifndef CONFIG_ARC_HAS_DIV_REM
46cflags-y += -mno-div-rem
47endif
48
41endif 49endif
42 50
43cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables 51cflags-$(CONFIG_ARC_DW2_UNWIND) += -fasynchronous-unwind-tables
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 070f58827a5c..c8f57b8449dc 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -89,11 +89,10 @@
89#define ECR_C_BIT_DTLB_LD_MISS 8 89#define ECR_C_BIT_DTLB_LD_MISS 8
90#define ECR_C_BIT_DTLB_ST_MISS 9 90#define ECR_C_BIT_DTLB_ST_MISS 9
91 91
92
93/* Auxiliary registers */ 92/* Auxiliary registers */
94#define AUX_IDENTITY 4 93#define AUX_IDENTITY 4
95#define AUX_INTR_VEC_BASE 0x25 94#define AUX_INTR_VEC_BASE 0x25
96 95#define AUX_NON_VOL 0x5e
97 96
98/* 97/*
99 * Floating Pt Registers 98 * Floating Pt Registers
@@ -240,9 +239,9 @@ struct bcr_extn_xymem {
240 239
241struct bcr_perip { 240struct bcr_perip {
242#ifdef CONFIG_CPU_BIG_ENDIAN 241#ifdef CONFIG_CPU_BIG_ENDIAN
243 unsigned int start:8, pad2:8, sz:8, pad:8; 242 unsigned int start:8, pad2:8, sz:8, ver:8;
244#else 243#else
245 unsigned int pad:8, sz:8, pad2:8, start:8; 244 unsigned int ver:8, sz:8, pad2:8, start:8;
246#endif 245#endif
247}; 246};
248 247
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 03484cb4d16d..87d18ae53115 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -23,33 +23,60 @@
23 23
24#define atomic_set(v, i) (((v)->counter) = (i)) 24#define atomic_set(v, i) (((v)->counter) = (i))
25 25
26#ifdef CONFIG_ISA_ARCV2 26#ifdef CONFIG_ARC_STAR_9000923308
27#define PREFETCHW " prefetchw [%1] \n" 27
28#else 28#define SCOND_FAIL_RETRY_VAR_DEF \
29#define PREFETCHW 29 unsigned int delay = 1, tmp; \
30
31#define SCOND_FAIL_RETRY_ASM \
32 " bz 4f \n" \
33 " ; --- scond fail delay --- \n" \
34 " mov %[tmp], %[delay] \n" /* tmp = delay */ \
35 "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
36 " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
37 " rol %[delay], %[delay] \n" /* delay *= 2 */ \
38 " b 1b \n" /* start over */ \
39 "4: ; --- success --- \n" \
40
41#define SCOND_FAIL_RETRY_VARS \
42 ,[delay] "+&r" (delay),[tmp] "=&r" (tmp) \
43
44#else /* !CONFIG_ARC_STAR_9000923308 */
45
46#define SCOND_FAIL_RETRY_VAR_DEF
47
48#define SCOND_FAIL_RETRY_ASM \
49 " bnz 1b \n" \
50
51#define SCOND_FAIL_RETRY_VARS
52
30#endif 53#endif
31 54
32#define ATOMIC_OP(op, c_op, asm_op) \ 55#define ATOMIC_OP(op, c_op, asm_op) \
33static inline void atomic_##op(int i, atomic_t *v) \ 56static inline void atomic_##op(int i, atomic_t *v) \
34{ \ 57{ \
35 unsigned int temp; \ 58 unsigned int val; \
59 SCOND_FAIL_RETRY_VAR_DEF \
36 \ 60 \
37 __asm__ __volatile__( \ 61 __asm__ __volatile__( \
38 "1: \n" \ 62 "1: llock %[val], [%[ctr]] \n" \
39 PREFETCHW \ 63 " " #asm_op " %[val], %[val], %[i] \n" \
40 " llock %0, [%1] \n" \ 64 " scond %[val], [%[ctr]] \n" \
41 " " #asm_op " %0, %0, %2 \n" \ 65 " \n" \
42 " scond %0, [%1] \n" \ 66 SCOND_FAIL_RETRY_ASM \
43 " bnz 1b \n" \ 67 \
44 : "=&r"(temp) /* Early clobber, to prevent reg reuse */ \ 68 : [val] "=&r" (val) /* Early clobber to prevent reg reuse */ \
45 : "r"(&v->counter), "ir"(i) \ 69 SCOND_FAIL_RETRY_VARS \
70 : [ctr] "r" (&v->counter), /* Not "m": llock only supports reg direct addr mode */ \
71 [i] "ir" (i) \
46 : "cc"); \ 72 : "cc"); \
47} \ 73} \
48 74
49#define ATOMIC_OP_RETURN(op, c_op, asm_op) \ 75#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
50static inline int atomic_##op##_return(int i, atomic_t *v) \ 76static inline int atomic_##op##_return(int i, atomic_t *v) \
51{ \ 77{ \
52 unsigned int temp; \ 78 unsigned int val; \
79 SCOND_FAIL_RETRY_VAR_DEF \
53 \ 80 \
54 /* \ 81 /* \
55 * Explicit full memory barrier needed before/after as \ 82 * Explicit full memory barrier needed before/after as \
@@ -58,19 +85,21 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
58 smp_mb(); \ 85 smp_mb(); \
59 \ 86 \
60 __asm__ __volatile__( \ 87 __asm__ __volatile__( \
61 "1: \n" \ 88 "1: llock %[val], [%[ctr]] \n" \
62 PREFETCHW \ 89 " " #asm_op " %[val], %[val], %[i] \n" \
63 " llock %0, [%1] \n" \ 90 " scond %[val], [%[ctr]] \n" \
64 " " #asm_op " %0, %0, %2 \n" \ 91 " \n" \
65 " scond %0, [%1] \n" \ 92 SCOND_FAIL_RETRY_ASM \
66 " bnz 1b \n" \ 93 \
67 : "=&r"(temp) \ 94 : [val] "=&r" (val) \
68 : "r"(&v->counter), "ir"(i) \ 95 SCOND_FAIL_RETRY_VARS \
96 : [ctr] "r" (&v->counter), \
97 [i] "ir" (i) \
69 : "cc"); \ 98 : "cc"); \
70 \ 99 \
71 smp_mb(); \ 100 smp_mb(); \
72 \ 101 \
73 return temp; \ 102 return val; \
74} 103}
75 104
76#else /* !CONFIG_ARC_HAS_LLSC */ 105#else /* !CONFIG_ARC_HAS_LLSC */
@@ -150,6 +179,9 @@ ATOMIC_OP(and, &=, and)
150#undef ATOMIC_OPS 179#undef ATOMIC_OPS
151#undef ATOMIC_OP_RETURN 180#undef ATOMIC_OP_RETURN
152#undef ATOMIC_OP 181#undef ATOMIC_OP
182#undef SCOND_FAIL_RETRY_VAR_DEF
183#undef SCOND_FAIL_RETRY_ASM
184#undef SCOND_FAIL_RETRY_VARS
153 185
154/** 186/**
155 * __atomic_add_unless - add unless the number is a given value 187 * __atomic_add_unless - add unless the number is a given value
diff --git a/arch/arc/include/asm/ptrace.h b/arch/arc/include/asm/ptrace.h
index 91694ec1ce95..69095da1fcfd 100644
--- a/arch/arc/include/asm/ptrace.h
+++ b/arch/arc/include/asm/ptrace.h
@@ -20,20 +20,20 @@
20struct pt_regs { 20struct pt_regs {
21 21
22 /* Real registers */ 22 /* Real registers */
23 long bta; /* bta_l1, bta_l2, erbta */ 23 unsigned long bta; /* bta_l1, bta_l2, erbta */
24 24
25 long lp_start, lp_end, lp_count; 25 unsigned long lp_start, lp_end, lp_count;
26 26
27 long status32; /* status32_l1, status32_l2, erstatus */ 27 unsigned long status32; /* status32_l1, status32_l2, erstatus */
28 long ret; /* ilink1, ilink2 or eret */ 28 unsigned long ret; /* ilink1, ilink2 or eret */
29 long blink; 29 unsigned long blink;
30 long fp; 30 unsigned long fp;
31 long r26; /* gp */ 31 unsigned long r26; /* gp */
32 32
33 long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; 33 unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
34 34
35 long sp; /* user/kernel sp depending on where we came from */ 35 unsigned long sp; /* User/Kernel depending on where we came from */
36 long orig_r0; 36 unsigned long orig_r0;
37 37
38 /* 38 /*
39 * To distinguish bet excp, syscall, irq 39 * To distinguish bet excp, syscall, irq
@@ -55,13 +55,13 @@ struct pt_regs {
55 unsigned long event; 55 unsigned long event;
56 }; 56 };
57 57
58 long user_r25; 58 unsigned long user_r25;
59}; 59};
60#else 60#else
61 61
62struct pt_regs { 62struct pt_regs {
63 63
64 long orig_r0; 64 unsigned long orig_r0;
65 65
66 union { 66 union {
67 struct { 67 struct {
@@ -76,26 +76,26 @@ struct pt_regs {
76 unsigned long event; 76 unsigned long event;
77 }; 77 };
78 78
79 long bta; /* bta_l1, bta_l2, erbta */ 79 unsigned long bta; /* bta_l1, bta_l2, erbta */
80 80
81 long user_r25; 81 unsigned long user_r25;
82 82
83 long r26; /* gp */ 83 unsigned long r26; /* gp */
84 long fp; 84 unsigned long fp;
85 long sp; /* user/kernel sp depending on where we came from */ 85 unsigned long sp; /* user/kernel sp depending on where we came from */
86 86
87 long r12; 87 unsigned long r12;
88 88
89 /*------- Below list auto saved by h/w -----------*/ 89 /*------- Below list auto saved by h/w -----------*/
90 long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11; 90 unsigned long r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11;
91 91
92 long blink; 92 unsigned long blink;
93 long lp_end, lp_start, lp_count; 93 unsigned long lp_end, lp_start, lp_count;
94 94
95 long ei, ldi, jli; 95 unsigned long ei, ldi, jli;
96 96
97 long ret; 97 unsigned long ret;
98 long status32; 98 unsigned long status32;
99}; 99};
100 100
101#endif 101#endif
@@ -103,10 +103,10 @@ struct pt_regs {
103/* Callee saved registers - need to be saved only when you are scheduled out */ 103/* Callee saved registers - need to be saved only when you are scheduled out */
104 104
105struct callee_regs { 105struct callee_regs {
106 long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13; 106 unsigned long r25, r24, r23, r22, r21, r20, r19, r18, r17, r16, r15, r14, r13;
107}; 107};
108 108
109#define instruction_pointer(regs) (unsigned long)((regs)->ret) 109#define instruction_pointer(regs) ((regs)->ret)
110#define profile_pc(regs) instruction_pointer(regs) 110#define profile_pc(regs) instruction_pointer(regs)
111 111
112/* return 1 if user mode or 0 if kernel mode */ 112/* return 1 if user mode or 0 if kernel mode */
@@ -142,7 +142,7 @@ struct callee_regs {
142 142
143static inline long regs_return_value(struct pt_regs *regs) 143static inline long regs_return_value(struct pt_regs *regs)
144{ 144{
145 return regs->r0; 145 return (long)regs->r0;
146} 146}
147 147
148#endif /* !__ASSEMBLY__ */ 148#endif /* !__ASSEMBLY__ */
diff --git a/arch/arc/include/asm/spinlock.h b/arch/arc/include/asm/spinlock.h
index e1651df6a93d..db8c59d1eaeb 100644
--- a/arch/arc/include/asm/spinlock.h
+++ b/arch/arc/include/asm/spinlock.h
@@ -18,9 +18,518 @@
18#define arch_spin_unlock_wait(x) \ 18#define arch_spin_unlock_wait(x) \
19 do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0) 19 do { while (arch_spin_is_locked(x)) cpu_relax(); } while (0)
20 20
21#ifdef CONFIG_ARC_HAS_LLSC
22
23/*
24 * A normal LLOCK/SCOND based system, w/o need for livelock workaround
25 */
26#ifndef CONFIG_ARC_STAR_9000923308
27
21static inline void arch_spin_lock(arch_spinlock_t *lock) 28static inline void arch_spin_lock(arch_spinlock_t *lock)
22{ 29{
23 unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__; 30 unsigned int val;
31
32 smp_mb();
33
34 __asm__ __volatile__(
35 "1: llock %[val], [%[slock]] \n"
36 " breq %[val], %[LOCKED], 1b \n" /* spin while LOCKED */
37 " scond %[LOCKED], [%[slock]] \n" /* acquire */
38 " bnz 1b \n"
39 " \n"
40 : [val] "=&r" (val)
41 : [slock] "r" (&(lock->slock)),
42 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
43 : "memory", "cc");
44
45 smp_mb();
46}
47
48/* 1 - lock taken successfully */
49static inline int arch_spin_trylock(arch_spinlock_t *lock)
50{
51 unsigned int val, got_it = 0;
52
53 smp_mb();
54
55 __asm__ __volatile__(
56 "1: llock %[val], [%[slock]] \n"
57 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
58 " scond %[LOCKED], [%[slock]] \n" /* acquire */
59 " bnz 1b \n"
60 " mov %[got_it], 1 \n"
61 "4: \n"
62 " \n"
63 : [val] "=&r" (val),
64 [got_it] "+&r" (got_it)
65 : [slock] "r" (&(lock->slock)),
66 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
67 : "memory", "cc");
68
69 smp_mb();
70
71 return got_it;
72}
73
74static inline void arch_spin_unlock(arch_spinlock_t *lock)
75{
76 smp_mb();
77
78 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
79
80 smp_mb();
81}
82
83/*
84 * Read-write spinlocks, allowing multiple readers but only one writer.
85 * Unfair locking as Writers could be starved indefinitely by Reader(s)
86 */
87
88static inline void arch_read_lock(arch_rwlock_t *rw)
89{
90 unsigned int val;
91
92 smp_mb();
93
94 /*
95 * zero means writer holds the lock exclusively, deny Reader.
96 * Otherwise grant lock to first/subseq reader
97 *
98 * if (rw->counter > 0) {
99 * rw->counter--;
100 * ret = 1;
101 * }
102 */
103
104 __asm__ __volatile__(
105 "1: llock %[val], [%[rwlock]] \n"
106 " brls %[val], %[WR_LOCKED], 1b\n" /* <= 0: spin while write locked */
107 " sub %[val], %[val], 1 \n" /* reader lock */
108 " scond %[val], [%[rwlock]] \n"
109 " bnz 1b \n"
110 " \n"
111 : [val] "=&r" (val)
112 : [rwlock] "r" (&(rw->counter)),
113 [WR_LOCKED] "ir" (0)
114 : "memory", "cc");
115
116 smp_mb();
117}
118
119/* 1 - lock taken successfully */
120static inline int arch_read_trylock(arch_rwlock_t *rw)
121{
122 unsigned int val, got_it = 0;
123
124 smp_mb();
125
126 __asm__ __volatile__(
127 "1: llock %[val], [%[rwlock]] \n"
128 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
129 " sub %[val], %[val], 1 \n" /* counter-- */
130 " scond %[val], [%[rwlock]] \n"
131 " bnz 1b \n" /* retry if collided with someone */
132 " mov %[got_it], 1 \n"
133 " \n"
134 "4: ; --- done --- \n"
135
136 : [val] "=&r" (val),
137 [got_it] "+&r" (got_it)
138 : [rwlock] "r" (&(rw->counter)),
139 [WR_LOCKED] "ir" (0)
140 : "memory", "cc");
141
142 smp_mb();
143
144 return got_it;
145}
146
147static inline void arch_write_lock(arch_rwlock_t *rw)
148{
149 unsigned int val;
150
151 smp_mb();
152
153 /*
154 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
155 * deny writer. Otherwise if unlocked grant to writer
156 * Hence the claim that Linux rwlocks are unfair to writers.
157 * (can be starved for an indefinite time by readers).
158 *
159 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
160 * rw->counter = 0;
161 * ret = 1;
162 * }
163 */
164
165 __asm__ __volatile__(
166 "1: llock %[val], [%[rwlock]] \n"
167 " brne %[val], %[UNLOCKED], 1b \n" /* while !UNLOCKED spin */
168 " mov %[val], %[WR_LOCKED] \n"
169 " scond %[val], [%[rwlock]] \n"
170 " bnz 1b \n"
171 " \n"
172 : [val] "=&r" (val)
173 : [rwlock] "r" (&(rw->counter)),
174 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
175 [WR_LOCKED] "ir" (0)
176 : "memory", "cc");
177
178 smp_mb();
179}
180
181/* 1 - lock taken successfully */
182static inline int arch_write_trylock(arch_rwlock_t *rw)
183{
184 unsigned int val, got_it = 0;
185
186 smp_mb();
187
188 __asm__ __volatile__(
189 "1: llock %[val], [%[rwlock]] \n"
190 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
191 " mov %[val], %[WR_LOCKED] \n"
192 " scond %[val], [%[rwlock]] \n"
193 " bnz 1b \n" /* retry if collided with someone */
194 " mov %[got_it], 1 \n"
195 " \n"
196 "4: ; --- done --- \n"
197
198 : [val] "=&r" (val),
199 [got_it] "+&r" (got_it)
200 : [rwlock] "r" (&(rw->counter)),
201 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
202 [WR_LOCKED] "ir" (0)
203 : "memory", "cc");
204
205 smp_mb();
206
207 return got_it;
208}
209
210static inline void arch_read_unlock(arch_rwlock_t *rw)
211{
212 unsigned int val;
213
214 smp_mb();
215
216 /*
217 * rw->counter++;
218 */
219 __asm__ __volatile__(
220 "1: llock %[val], [%[rwlock]] \n"
221 " add %[val], %[val], 1 \n"
222 " scond %[val], [%[rwlock]] \n"
223 " bnz 1b \n"
224 " \n"
225 : [val] "=&r" (val)
226 : [rwlock] "r" (&(rw->counter))
227 : "memory", "cc");
228
229 smp_mb();
230}
231
232static inline void arch_write_unlock(arch_rwlock_t *rw)
233{
234 smp_mb();
235
236 rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
237
238 smp_mb();
239}
240
241#else /* CONFIG_ARC_STAR_9000923308 */
242
243/*
244 * HS38x4 could get into a LLOCK/SCOND livelock in case of multiple overlapping
245 * coherency transactions in the SCU. The exclusive line state keeps rotating
246 * among contenting cores leading to a never ending cycle. So break the cycle
247 * by deferring the retry of failed exclusive access (SCOND). The actual delay
248 * needed is function of number of contending cores as well as the unrelated
249 * coherency traffic from other cores. To keep the code simple, start off with
250 * small delay of 1 which would suffice most cases and in case of contention
251 * double the delay. Eventually the delay is sufficient such that the coherency
252 * pipeline is drained, thus a subsequent exclusive access would succeed.
253 */
254
255#define SCOND_FAIL_RETRY_VAR_DEF \
256 unsigned int delay, tmp; \
257
258#define SCOND_FAIL_RETRY_ASM \
259 " ; --- scond fail delay --- \n" \
260 " mov %[tmp], %[delay] \n" /* tmp = delay */ \
261 "2: brne.d %[tmp], 0, 2b \n" /* while (tmp != 0) */ \
262 " sub %[tmp], %[tmp], 1 \n" /* tmp-- */ \
263 " rol %[delay], %[delay] \n" /* delay *= 2 */ \
264 " b 1b \n" /* start over */ \
265 " \n" \
266 "4: ; --- done --- \n" \
267
268#define SCOND_FAIL_RETRY_VARS \
269 ,[delay] "=&r" (delay), [tmp] "=&r" (tmp) \
270
271static inline void arch_spin_lock(arch_spinlock_t *lock)
272{
273 unsigned int val;
274 SCOND_FAIL_RETRY_VAR_DEF;
275
276 smp_mb();
277
278 __asm__ __volatile__(
279 "0: mov %[delay], 1 \n"
280 "1: llock %[val], [%[slock]] \n"
281 " breq %[val], %[LOCKED], 0b \n" /* spin while LOCKED */
282 " scond %[LOCKED], [%[slock]] \n" /* acquire */
283 " bz 4f \n" /* done */
284 " \n"
285 SCOND_FAIL_RETRY_ASM
286
287 : [val] "=&r" (val)
288 SCOND_FAIL_RETRY_VARS
289 : [slock] "r" (&(lock->slock)),
290 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
291 : "memory", "cc");
292
293 smp_mb();
294}
295
296/* 1 - lock taken successfully */
297static inline int arch_spin_trylock(arch_spinlock_t *lock)
298{
299 unsigned int val, got_it = 0;
300 SCOND_FAIL_RETRY_VAR_DEF;
301
302 smp_mb();
303
304 __asm__ __volatile__(
305 "0: mov %[delay], 1 \n"
306 "1: llock %[val], [%[slock]] \n"
307 " breq %[val], %[LOCKED], 4f \n" /* already LOCKED, just bail */
308 " scond %[LOCKED], [%[slock]] \n" /* acquire */
309 " bz.d 4f \n"
310 " mov.z %[got_it], 1 \n" /* got it */
311 " \n"
312 SCOND_FAIL_RETRY_ASM
313
314 : [val] "=&r" (val),
315 [got_it] "+&r" (got_it)
316 SCOND_FAIL_RETRY_VARS
317 : [slock] "r" (&(lock->slock)),
318 [LOCKED] "r" (__ARCH_SPIN_LOCK_LOCKED__)
319 : "memory", "cc");
320
321 smp_mb();
322
323 return got_it;
324}
325
326static inline void arch_spin_unlock(arch_spinlock_t *lock)
327{
328 smp_mb();
329
330 lock->slock = __ARCH_SPIN_LOCK_UNLOCKED__;
331
332 smp_mb();
333}
334
335/*
336 * Read-write spinlocks, allowing multiple readers but only one writer.
337 * Unfair locking as Writers could be starved indefinitely by Reader(s)
338 */
339
340static inline void arch_read_lock(arch_rwlock_t *rw)
341{
342 unsigned int val;
343 SCOND_FAIL_RETRY_VAR_DEF;
344
345 smp_mb();
346
347 /*
348 * zero means writer holds the lock exclusively, deny Reader.
349 * Otherwise grant lock to first/subseq reader
350 *
351 * if (rw->counter > 0) {
352 * rw->counter--;
353 * ret = 1;
354 * }
355 */
356
357 __asm__ __volatile__(
358 "0: mov %[delay], 1 \n"
359 "1: llock %[val], [%[rwlock]] \n"
360 " brls %[val], %[WR_LOCKED], 0b\n" /* <= 0: spin while write locked */
361 " sub %[val], %[val], 1 \n" /* reader lock */
362 " scond %[val], [%[rwlock]] \n"
363 " bz 4f \n" /* done */
364 " \n"
365 SCOND_FAIL_RETRY_ASM
366
367 : [val] "=&r" (val)
368 SCOND_FAIL_RETRY_VARS
369 : [rwlock] "r" (&(rw->counter)),
370 [WR_LOCKED] "ir" (0)
371 : "memory", "cc");
372
373 smp_mb();
374}
375
376/* 1 - lock taken successfully */
377static inline int arch_read_trylock(arch_rwlock_t *rw)
378{
379 unsigned int val, got_it = 0;
380 SCOND_FAIL_RETRY_VAR_DEF;
381
382 smp_mb();
383
384 __asm__ __volatile__(
385 "0: mov %[delay], 1 \n"
386 "1: llock %[val], [%[rwlock]] \n"
387 " brls %[val], %[WR_LOCKED], 4f\n" /* <= 0: already write locked, bail */
388 " sub %[val], %[val], 1 \n" /* counter-- */
389 " scond %[val], [%[rwlock]] \n"
390 " bz.d 4f \n"
391 " mov.z %[got_it], 1 \n" /* got it */
392 " \n"
393 SCOND_FAIL_RETRY_ASM
394
395 : [val] "=&r" (val),
396 [got_it] "+&r" (got_it)
397 SCOND_FAIL_RETRY_VARS
398 : [rwlock] "r" (&(rw->counter)),
399 [WR_LOCKED] "ir" (0)
400 : "memory", "cc");
401
402 smp_mb();
403
404 return got_it;
405}
406
407static inline void arch_write_lock(arch_rwlock_t *rw)
408{
409 unsigned int val;
410 SCOND_FAIL_RETRY_VAR_DEF;
411
412 smp_mb();
413
414 /*
415 * If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
416 * deny writer. Otherwise if unlocked grant to writer
417 * Hence the claim that Linux rwlocks are unfair to writers.
418 * (can be starved for an indefinite time by readers).
419 *
420 * if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
421 * rw->counter = 0;
422 * ret = 1;
423 * }
424 */
425
426 __asm__ __volatile__(
427 "0: mov %[delay], 1 \n"
428 "1: llock %[val], [%[rwlock]] \n"
429 " brne %[val], %[UNLOCKED], 0b \n" /* while !UNLOCKED spin */
430 " mov %[val], %[WR_LOCKED] \n"
431 " scond %[val], [%[rwlock]] \n"
432 " bz 4f \n"
433 " \n"
434 SCOND_FAIL_RETRY_ASM
435
436 : [val] "=&r" (val)
437 SCOND_FAIL_RETRY_VARS
438 : [rwlock] "r" (&(rw->counter)),
439 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
440 [WR_LOCKED] "ir" (0)
441 : "memory", "cc");
442
443 smp_mb();
444}
445
446/* 1 - lock taken successfully */
447static inline int arch_write_trylock(arch_rwlock_t *rw)
448{
449 unsigned int val, got_it = 0;
450 SCOND_FAIL_RETRY_VAR_DEF;
451
452 smp_mb();
453
454 __asm__ __volatile__(
455 "0: mov %[delay], 1 \n"
456 "1: llock %[val], [%[rwlock]] \n"
457 " brne %[val], %[UNLOCKED], 4f \n" /* !UNLOCKED, bail */
458 " mov %[val], %[WR_LOCKED] \n"
459 " scond %[val], [%[rwlock]] \n"
460 " bz.d 4f \n"
461 " mov.z %[got_it], 1 \n" /* got it */
462 " \n"
463 SCOND_FAIL_RETRY_ASM
464
465 : [val] "=&r" (val),
466 [got_it] "+&r" (got_it)
467 SCOND_FAIL_RETRY_VARS
468 : [rwlock] "r" (&(rw->counter)),
469 [UNLOCKED] "ir" (__ARCH_RW_LOCK_UNLOCKED__),
470 [WR_LOCKED] "ir" (0)
471 : "memory", "cc");
472
473 smp_mb();
474
475 return got_it;
476}
477
478static inline void arch_read_unlock(arch_rwlock_t *rw)
479{
480 unsigned int val;
481
482 smp_mb();
483
484 /*
485 * rw->counter++;
486 */
487 __asm__ __volatile__(
488 "1: llock %[val], [%[rwlock]] \n"
489 " add %[val], %[val], 1 \n"
490 " scond %[val], [%[rwlock]] \n"
491 " bnz 1b \n"
492 " \n"
493 : [val] "=&r" (val)
494 : [rwlock] "r" (&(rw->counter))
495 : "memory", "cc");
496
497 smp_mb();
498}
499
500static inline void arch_write_unlock(arch_rwlock_t *rw)
501{
502 unsigned int val;
503
504 smp_mb();
505
506 /*
507 * rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
508 */
509 __asm__ __volatile__(
510 "1: llock %[val], [%[rwlock]] \n"
511 " scond %[UNLOCKED], [%[rwlock]]\n"
512 " bnz 1b \n"
513 " \n"
514 : [val] "=&r" (val)
515 : [rwlock] "r" (&(rw->counter)),
516 [UNLOCKED] "r" (__ARCH_RW_LOCK_UNLOCKED__)
517 : "memory", "cc");
518
519 smp_mb();
520}
521
522#undef SCOND_FAIL_RETRY_VAR_DEF
523#undef SCOND_FAIL_RETRY_ASM
524#undef SCOND_FAIL_RETRY_VARS
525
526#endif /* CONFIG_ARC_STAR_9000923308 */
527
528#else /* !CONFIG_ARC_HAS_LLSC */
529
530static inline void arch_spin_lock(arch_spinlock_t *lock)
531{
532 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
24 533
25 /* 534 /*
26 * This smp_mb() is technically superfluous, we only need the one 535 * This smp_mb() is technically superfluous, we only need the one
@@ -33,7 +542,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
33 __asm__ __volatile__( 542 __asm__ __volatile__(
34 "1: ex %0, [%1] \n" 543 "1: ex %0, [%1] \n"
35 " breq %0, %2, 1b \n" 544 " breq %0, %2, 1b \n"
36 : "+&r" (tmp) 545 : "+&r" (val)
37 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__) 546 : "r"(&(lock->slock)), "ir"(__ARCH_SPIN_LOCK_LOCKED__)
38 : "memory"); 547 : "memory");
39 548
@@ -48,26 +557,27 @@ static inline void arch_spin_lock(arch_spinlock_t *lock)
48 smp_mb(); 557 smp_mb();
49} 558}
50 559
560/* 1 - lock taken successfully */
51static inline int arch_spin_trylock(arch_spinlock_t *lock) 561static inline int arch_spin_trylock(arch_spinlock_t *lock)
52{ 562{
53 unsigned int tmp = __ARCH_SPIN_LOCK_LOCKED__; 563 unsigned int val = __ARCH_SPIN_LOCK_LOCKED__;
54 564
55 smp_mb(); 565 smp_mb();
56 566
57 __asm__ __volatile__( 567 __asm__ __volatile__(
58 "1: ex %0, [%1] \n" 568 "1: ex %0, [%1] \n"
59 : "+r" (tmp) 569 : "+r" (val)
60 : "r"(&(lock->slock)) 570 : "r"(&(lock->slock))
61 : "memory"); 571 : "memory");
62 572
63 smp_mb(); 573 smp_mb();
64 574
65 return (tmp == __ARCH_SPIN_LOCK_UNLOCKED__); 575 return (val == __ARCH_SPIN_LOCK_UNLOCKED__);
66} 576}
67 577
68static inline void arch_spin_unlock(arch_spinlock_t *lock) 578static inline void arch_spin_unlock(arch_spinlock_t *lock)
69{ 579{
70 unsigned int tmp = __ARCH_SPIN_LOCK_UNLOCKED__; 580 unsigned int val = __ARCH_SPIN_LOCK_UNLOCKED__;
71 581
72 /* 582 /*
73 * RELEASE barrier: given the instructions avail on ARCv2, full barrier 583 * RELEASE barrier: given the instructions avail on ARCv2, full barrier
@@ -77,7 +587,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
77 587
78 __asm__ __volatile__( 588 __asm__ __volatile__(
79 " ex %0, [%1] \n" 589 " ex %0, [%1] \n"
80 : "+r" (tmp) 590 : "+r" (val)
81 : "r"(&(lock->slock)) 591 : "r"(&(lock->slock))
82 : "memory"); 592 : "memory");
83 593
@@ -90,19 +600,12 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
90 600
91/* 601/*
92 * Read-write spinlocks, allowing multiple readers but only one writer. 602 * Read-write spinlocks, allowing multiple readers but only one writer.
603 * Unfair locking as Writers could be starved indefinitely by Reader(s)
93 * 604 *
94 * The spinlock itself is contained in @counter and access to it is 605 * The spinlock itself is contained in @counter and access to it is
95 * serialized with @lock_mutex. 606 * serialized with @lock_mutex.
96 *
97 * Unfair locking as Writers could be starved indefinitely by Reader(s)
98 */ 607 */
99 608
100/* Would read_trylock() succeed? */
101#define arch_read_can_lock(x) ((x)->counter > 0)
102
103/* Would write_trylock() succeed? */
104#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
105
106/* 1 - lock taken successfully */ 609/* 1 - lock taken successfully */
107static inline int arch_read_trylock(arch_rwlock_t *rw) 610static inline int arch_read_trylock(arch_rwlock_t *rw)
108{ 611{
@@ -173,6 +676,11 @@ static inline void arch_write_unlock(arch_rwlock_t *rw)
173 arch_spin_unlock(&(rw->lock_mutex)); 676 arch_spin_unlock(&(rw->lock_mutex));
174} 677}
175 678
679#endif
680
681#define arch_read_can_lock(x) ((x)->counter > 0)
682#define arch_write_can_lock(x) ((x)->counter == __ARCH_RW_LOCK_UNLOCKED__)
683
176#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) 684#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
177#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) 685#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
178 686
diff --git a/arch/arc/include/asm/spinlock_types.h b/arch/arc/include/asm/spinlock_types.h
index 662627ced4f2..4e1ef5f650c6 100644
--- a/arch/arc/include/asm/spinlock_types.h
+++ b/arch/arc/include/asm/spinlock_types.h
@@ -26,7 +26,9 @@ typedef struct {
26 */ 26 */
27typedef struct { 27typedef struct {
28 volatile unsigned int counter; 28 volatile unsigned int counter;
29#ifndef CONFIG_ARC_HAS_LLSC
29 arch_spinlock_t lock_mutex; 30 arch_spinlock_t lock_mutex;
31#endif
30} arch_rwlock_t; 32} arch_rwlock_t;
31 33
32#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000 34#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
diff --git a/arch/arc/include/uapi/asm/ptrace.h b/arch/arc/include/uapi/asm/ptrace.h
index 76a7739aab1c..0b3ef63d4a03 100644
--- a/arch/arc/include/uapi/asm/ptrace.h
+++ b/arch/arc/include/uapi/asm/ptrace.h
@@ -32,20 +32,20 @@
32*/ 32*/
33struct user_regs_struct { 33struct user_regs_struct {
34 34
35 long pad; 35 unsigned long pad;
36 struct { 36 struct {
37 long bta, lp_start, lp_end, lp_count; 37 unsigned long bta, lp_start, lp_end, lp_count;
38 long status32, ret, blink, fp, gp; 38 unsigned long status32, ret, blink, fp, gp;
39 long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0; 39 unsigned long r12, r11, r10, r9, r8, r7, r6, r5, r4, r3, r2, r1, r0;
40 long sp; 40 unsigned long sp;
41 } scratch; 41 } scratch;
42 long pad2; 42 unsigned long pad2;
43 struct { 43 struct {
44 long r25, r24, r23, r22, r21, r20; 44 unsigned long r25, r24, r23, r22, r21, r20;
45 long r19, r18, r17, r16, r15, r14, r13; 45 unsigned long r19, r18, r17, r16, r15, r14, r13;
46 } callee; 46 } callee;
47 long efa; /* break pt addr, for break points in delay slots */ 47 unsigned long efa; /* break pt addr, for break points in delay slots */
48 long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */ 48 unsigned long stop_pc; /* give dbg stop_pc after ensuring brkpt trap */
49}; 49};
50#endif /* !__ASSEMBLY__ */ 50#endif /* !__ASSEMBLY__ */
51 51
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index 18cc01591c96..cabde9dc0696 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -47,6 +47,7 @@ static void read_arc_build_cfg_regs(void)
47 struct bcr_perip uncached_space; 47 struct bcr_perip uncached_space;
48 struct bcr_generic bcr; 48 struct bcr_generic bcr;
49 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()]; 49 struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
50 unsigned long perip_space;
50 FIX_PTR(cpu); 51 FIX_PTR(cpu);
51 52
52 READ_BCR(AUX_IDENTITY, cpu->core); 53 READ_BCR(AUX_IDENTITY, cpu->core);
@@ -56,7 +57,12 @@ static void read_arc_build_cfg_regs(void)
56 cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE); 57 cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
57 58
58 READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space); 59 READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
59 BUG_ON((uncached_space.start << 24) != ARC_UNCACHED_ADDR_SPACE); 60 if (uncached_space.ver < 3)
61 perip_space = uncached_space.start << 24;
62 else
63 perip_space = read_aux_reg(AUX_NON_VOL) & 0xF0000000;
64
65 BUG_ON(perip_space != ARC_UNCACHED_ADDR_SPACE);
60 66
61 READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy); 67 READ_BCR(ARC_REG_MUL_BCR, cpu->extn_mpy);
62 68
@@ -330,6 +336,10 @@ static void arc_chk_core_config(void)
330 pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n"); 336 pr_warn("CONFIG_ARC_FPU_SAVE_RESTORE needed for working apps\n");
331 else if (!cpu->extn.fpu_dp && fpu_enabled) 337 else if (!cpu->extn.fpu_dp && fpu_enabled)
332 panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n"); 338 panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
339
340 if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
341 !IS_ENABLED(CONFIG_ARC_STAR_9000923308))
342 panic("llock/scond livelock workaround missing\n");
333} 343}
334 344
335/* 345/*
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index 3364d2bbc515..4294761a2b3e 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -203,34 +203,24 @@ static int arc_clkevent_set_next_event(unsigned long delta,
203 return 0; 203 return 0;
204} 204}
205 205
206static void arc_clkevent_set_mode(enum clock_event_mode mode, 206static int arc_clkevent_set_periodic(struct clock_event_device *dev)
207 struct clock_event_device *dev)
208{ 207{
209 switch (mode) { 208 /*
210 case CLOCK_EVT_MODE_PERIODIC: 209 * At X Hz, 1 sec = 1000ms -> X cycles;
211 /* 210 * 10ms -> X / 100 cycles
212 * At X Hz, 1 sec = 1000ms -> X cycles; 211 */
213 * 10ms -> X / 100 cycles 212 arc_timer_event_setup(arc_get_core_freq() / HZ);
214 */ 213 return 0;
215 arc_timer_event_setup(arc_get_core_freq() / HZ);
216 break;
217 case CLOCK_EVT_MODE_ONESHOT:
218 break;
219 default:
220 break;
221 }
222
223 return;
224} 214}
225 215
226static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = { 216static DEFINE_PER_CPU(struct clock_event_device, arc_clockevent_device) = {
227 .name = "ARC Timer0", 217 .name = "ARC Timer0",
228 .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC, 218 .features = CLOCK_EVT_FEAT_ONESHOT |
229 .mode = CLOCK_EVT_MODE_UNUSED, 219 CLOCK_EVT_FEAT_PERIODIC,
230 .rating = 300, 220 .rating = 300,
231 .irq = TIMER0_IRQ, /* hardwired, no need for resources */ 221 .irq = TIMER0_IRQ, /* hardwired, no need for resources */
232 .set_next_event = arc_clkevent_set_next_event, 222 .set_next_event = arc_clkevent_set_next_event,
233 .set_mode = arc_clkevent_set_mode, 223 .set_state_periodic = arc_clkevent_set_periodic,
234}; 224};
235 225
236static irqreturn_t timer_irq_handler(int irq, void *dev_id) 226static irqreturn_t timer_irq_handler(int irq, void *dev_id)
@@ -240,7 +230,7 @@ static irqreturn_t timer_irq_handler(int irq, void *dev_id)
240 * irq_set_chip_and_handler() asked for handle_percpu_devid_irq() 230 * irq_set_chip_and_handler() asked for handle_percpu_devid_irq()
241 */ 231 */
242 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device); 232 struct clock_event_device *evt = this_cpu_ptr(&arc_clockevent_device);
243 int irq_reenable = evt->mode == CLOCK_EVT_MODE_PERIODIC; 233 int irq_reenable = clockevent_state_periodic(evt);
244 234
245 /* 235 /*
246 * Any write to CTRL reg ACks the interrupt, we rewrite the 236 * Any write to CTRL reg ACks the interrupt, we rewrite the
diff --git a/arch/arc/lib/memcpy-archs.S b/arch/arc/lib/memcpy-archs.S
index 1b2b3acfed52..0cab0b8a57c5 100644
--- a/arch/arc/lib/memcpy-archs.S
+++ b/arch/arc/lib/memcpy-archs.S
@@ -206,7 +206,7 @@ unalignedOffby3:
206 ld.ab r6, [r1, 4] 206 ld.ab r6, [r1, 4]
207 prefetch [r1, 28] ;Prefetch the next read location 207 prefetch [r1, 28] ;Prefetch the next read location
208 ld.ab r8, [r1,4] 208 ld.ab r8, [r1,4]
209 prefetch [r3, 32] ;Prefetch the next write location 209 prefetchw [r3, 32] ;Prefetch the next write location
210 210
211 SHIFT_1 (r7, r6, 8) 211 SHIFT_1 (r7, r6, 8)
212 or r7, r7, r5 212 or r7, r7, r5
diff --git a/arch/arc/lib/memset-archs.S b/arch/arc/lib/memset-archs.S
index 92d573c734b5..365b18364815 100644
--- a/arch/arc/lib/memset-archs.S
+++ b/arch/arc/lib/memset-archs.S
@@ -10,12 +10,6 @@
10 10
11#undef PREALLOC_NOT_AVAIL 11#undef PREALLOC_NOT_AVAIL
12 12
13#ifdef PREALLOC_NOT_AVAIL
14#define PREWRITE(A,B) prefetchw [(A),(B)]
15#else
16#define PREWRITE(A,B) prealloc [(A),(B)]
17#endif
18
19ENTRY(memset) 13ENTRY(memset)
20 prefetchw [r0] ; Prefetch the write location 14 prefetchw [r0] ; Prefetch the write location
21 mov.f 0, r2 15 mov.f 0, r2
@@ -51,9 +45,15 @@ ENTRY(memset)
51 45
52;;; Convert len to Dwords, unfold x8 46;;; Convert len to Dwords, unfold x8
53 lsr.f lp_count, lp_count, 6 47 lsr.f lp_count, lp_count, 6
48
54 lpnz @.Lset64bytes 49 lpnz @.Lset64bytes
55 ;; LOOP START 50 ;; LOOP START
56 PREWRITE(r3, 64) ;Prefetch the next write location 51#ifdef PREALLOC_NOT_AVAIL
52 prefetchw [r3, 64] ;Prefetch the next write location
53#else
54 prealloc [r3, 64]
55#endif
56#ifdef CONFIG_ARC_HAS_LL64
57 std.ab r4, [r3, 8] 57 std.ab r4, [r3, 8]
58 std.ab r4, [r3, 8] 58 std.ab r4, [r3, 8]
59 std.ab r4, [r3, 8] 59 std.ab r4, [r3, 8]
@@ -62,16 +62,45 @@ ENTRY(memset)
62 std.ab r4, [r3, 8] 62 std.ab r4, [r3, 8]
63 std.ab r4, [r3, 8] 63 std.ab r4, [r3, 8]
64 std.ab r4, [r3, 8] 64 std.ab r4, [r3, 8]
65#else
66 st.ab r4, [r3, 4]
67 st.ab r4, [r3, 4]
68 st.ab r4, [r3, 4]
69 st.ab r4, [r3, 4]
70 st.ab r4, [r3, 4]
71 st.ab r4, [r3, 4]
72 st.ab r4, [r3, 4]
73 st.ab r4, [r3, 4]
74 st.ab r4, [r3, 4]
75 st.ab r4, [r3, 4]
76 st.ab r4, [r3, 4]
77 st.ab r4, [r3, 4]
78 st.ab r4, [r3, 4]
79 st.ab r4, [r3, 4]
80 st.ab r4, [r3, 4]
81 st.ab r4, [r3, 4]
82#endif
65.Lset64bytes: 83.Lset64bytes:
66 84
67 lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes 85 lsr.f lp_count, r2, 5 ;Last remaining max 124 bytes
68 lpnz .Lset32bytes 86 lpnz .Lset32bytes
69 ;; LOOP START 87 ;; LOOP START
70 prefetchw [r3, 32] ;Prefetch the next write location 88 prefetchw [r3, 32] ;Prefetch the next write location
89#ifdef CONFIG_ARC_HAS_LL64
71 std.ab r4, [r3, 8] 90 std.ab r4, [r3, 8]
72 std.ab r4, [r3, 8] 91 std.ab r4, [r3, 8]
73 std.ab r4, [r3, 8] 92 std.ab r4, [r3, 8]
74 std.ab r4, [r3, 8] 93 std.ab r4, [r3, 8]
94#else
95 st.ab r4, [r3, 4]
96 st.ab r4, [r3, 4]
97 st.ab r4, [r3, 4]
98 st.ab r4, [r3, 4]
99 st.ab r4, [r3, 4]
100 st.ab r4, [r3, 4]
101 st.ab r4, [r3, 4]
102 st.ab r4, [r3, 4]
103#endif
75.Lset32bytes: 104.Lset32bytes:
76 105
77 and.f lp_count, r2, 0x1F ;Last remaining 31 bytes 106 and.f lp_count, r2, 0x1F ;Last remaining 31 bytes
diff --git a/arch/arc/plat-axs10x/axs10x.c b/arch/arc/plat-axs10x/axs10x.c
index 99f7da513a48..e7769c3ab5f2 100644
--- a/arch/arc/plat-axs10x/axs10x.c
+++ b/arch/arc/plat-axs10x/axs10x.c
@@ -389,6 +389,21 @@ axs103_set_freq(unsigned int id, unsigned int fd, unsigned int od)
389 389
390static void __init axs103_early_init(void) 390static void __init axs103_early_init(void)
391{ 391{
392 /*
393 * AXS103 configurations for SMP/QUAD configurations share device tree
394 * which defaults to 90 MHz. However recent failures of Quad config
395 * revealed P&R timing violations so clamp it down to safe 50 MHz
396 * Instead of duplicating defconfig/DT for SMP/QUAD, add a small hack
397 *
398 * This hack is really hacky as of now. Fix it properly by getting the
399 * number of cores as return value of platform's early SMP callback
400 */
401#ifdef CONFIG_ARC_MCIP
402 unsigned int num_cores = (read_aux_reg(ARC_REG_MCIP_BCR) >> 16) & 0x3F;
403 if (num_cores > 2)
404 arc_set_core_freq(50 * 1000000);
405#endif
406
392 switch (arc_get_core_freq()/1000000) { 407 switch (arc_get_core_freq()/1000000) {
393 case 33: 408 case 33:
394 axs103_set_freq(1, 1, 1); 409 axs103_set_freq(1, 1, 1);
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index 8f1e25bcecbd..1e29ccf77ea2 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -116,7 +116,7 @@
116 ranges = <0 0x2000 0x2000>; 116 ranges = <0 0x2000 0x2000>;
117 117
118 scm_conf: scm_conf@0 { 118 scm_conf: scm_conf@0 {
119 compatible = "syscon"; 119 compatible = "syscon", "simple-bus";
120 reg = <0x0 0x1400>; 120 reg = <0x0 0x1400>;
121 #address-cells = <1>; 121 #address-cells = <1>;
122 #size-cells = <1>; 122 #size-cells = <1>;
@@ -1140,6 +1140,7 @@
1140 ctrl-module = <&omap_control_sata>; 1140 ctrl-module = <&omap_control_sata>;
1141 clocks = <&sys_clkin1>, <&sata_ref_clk>; 1141 clocks = <&sys_clkin1>, <&sata_ref_clk>;
1142 clock-names = "sysclk", "refclk"; 1142 clock-names = "sysclk", "refclk";
1143 syscon-pllreset = <&scm_conf 0x3fc>;
1143 #phy-cells = <0>; 1144 #phy-cells = <0>;
1144 }; 1145 };
1145 1146
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index d7201333e3bc..2db99433e17f 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -138,8 +138,8 @@
138 138
139 mipi_phy: video-phy@10020710 { 139 mipi_phy: video-phy@10020710 {
140 compatible = "samsung,s5pv210-mipi-video-phy"; 140 compatible = "samsung,s5pv210-mipi-video-phy";
141 reg = <0x10020710 8>;
142 #phy-cells = <1>; 141 #phy-cells = <1>;
142 syscon = <&pmu_system_controller>;
143 }; 143 };
144 144
145 pd_cam: cam-power-domain@10023C00 { 145 pd_cam: cam-power-domain@10023C00 {
diff --git a/arch/arm/boot/dts/exynos4210-origen.dts b/arch/arm/boot/dts/exynos4210-origen.dts
index e0abfc3324d1..e050d85cdacd 100644
--- a/arch/arm/boot/dts/exynos4210-origen.dts
+++ b/arch/arm/boot/dts/exynos4210-origen.dts
@@ -127,6 +127,10 @@
127 }; 127 };
128}; 128};
129 129
130&cpu0 {
131 cpu0-supply = <&buck1_reg>;
132};
133
130&fimd { 134&fimd {
131 pinctrl-0 = <&lcd_en &lcd_clk &lcd_data24 &pwm0_out>; 135 pinctrl-0 = <&lcd_en &lcd_clk &lcd_data24 &pwm0_out>;
132 pinctrl-names = "default"; 136 pinctrl-names = "default";
diff --git a/arch/arm/boot/dts/exynos4210-trats.dts b/arch/arm/boot/dts/exynos4210-trats.dts
index 98f3ce65cb9a..ba34886f8b65 100644
--- a/arch/arm/boot/dts/exynos4210-trats.dts
+++ b/arch/arm/boot/dts/exynos4210-trats.dts
@@ -188,6 +188,10 @@
188 }; 188 };
189}; 189};
190 190
191&cpu0 {
192 cpu0-supply = <&varm_breg>;
193};
194
191&dsi_0 { 195&dsi_0 {
192 vddcore-supply = <&vusb_reg>; 196 vddcore-supply = <&vusb_reg>;
193 vddio-supply = <&vmipi_reg>; 197 vddio-supply = <&vmipi_reg>;
diff --git a/arch/arm/boot/dts/exynos4210-universal_c210.dts b/arch/arm/boot/dts/exynos4210-universal_c210.dts
index d4f2b11319dd..775892b2cc6a 100644
--- a/arch/arm/boot/dts/exynos4210-universal_c210.dts
+++ b/arch/arm/boot/dts/exynos4210-universal_c210.dts
@@ -548,6 +548,10 @@
548 }; 548 };
549}; 549};
550 550
551&cpu0 {
552 cpu0-supply = <&vdd_arm_reg>;
553};
554
551&pinctrl_1 { 555&pinctrl_1 {
552 hdmi_hpd: hdmi-hpd { 556 hdmi_hpd: hdmi-hpd {
553 samsung,pins = "gpx3-7"; 557 samsung,pins = "gpx3-7";
diff --git a/arch/arm/boot/dts/exynos4210.dtsi b/arch/arm/boot/dts/exynos4210.dtsi
index 10d3c173396e..3e5ba665d200 100644
--- a/arch/arm/boot/dts/exynos4210.dtsi
+++ b/arch/arm/boot/dts/exynos4210.dtsi
@@ -40,6 +40,18 @@
40 device_type = "cpu"; 40 device_type = "cpu";
41 compatible = "arm,cortex-a9"; 41 compatible = "arm,cortex-a9";
42 reg = <0x900>; 42 reg = <0x900>;
43 clocks = <&clock CLK_ARM_CLK>;
44 clock-names = "cpu";
45 clock-latency = <160000>;
46
47 operating-points = <
48 1200000 1250000
49 1000000 1150000
50 800000 1075000
51 500000 975000
52 400000 975000
53 200000 950000
54 >;
43 cooling-min-level = <4>; 55 cooling-min-level = <4>;
44 cooling-max-level = <2>; 56 cooling-max-level = <2>;
45 #cooling-cells = <2>; /* min followed by max */ 57 #cooling-cells = <2>; /* min followed by max */
diff --git a/arch/arm/boot/dts/imx25-pdk.dts b/arch/arm/boot/dts/imx25-pdk.dts
index dd45e6971bc3..9351296356dc 100644
--- a/arch/arm/boot/dts/imx25-pdk.dts
+++ b/arch/arm/boot/dts/imx25-pdk.dts
@@ -10,6 +10,7 @@
10 */ 10 */
11 11
12/dts-v1/; 12/dts-v1/;
13#include <dt-bindings/gpio/gpio.h>
13#include <dt-bindings/input/input.h> 14#include <dt-bindings/input/input.h>
14#include "imx25.dtsi" 15#include "imx25.dtsi"
15 16
@@ -114,8 +115,8 @@
114&esdhc1 { 115&esdhc1 {
115 pinctrl-names = "default"; 116 pinctrl-names = "default";
116 pinctrl-0 = <&pinctrl_esdhc1>; 117 pinctrl-0 = <&pinctrl_esdhc1>;
117 cd-gpios = <&gpio2 1 0>; 118 cd-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>;
118 wp-gpios = <&gpio2 0 0>; 119 wp-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>;
119 status = "okay"; 120 status = "okay";
120}; 121};
121 122
diff --git a/arch/arm/boot/dts/imx35.dtsi b/arch/arm/boot/dts/imx35.dtsi
index b6478e97d6a7..e6540b5cfa4c 100644
--- a/arch/arm/boot/dts/imx35.dtsi
+++ b/arch/arm/boot/dts/imx35.dtsi
@@ -286,8 +286,8 @@
286 can1: can@53fe4000 { 286 can1: can@53fe4000 {
287 compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan"; 287 compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
288 reg = <0x53fe4000 0x1000>; 288 reg = <0x53fe4000 0x1000>;
289 clocks = <&clks 33>; 289 clocks = <&clks 33>, <&clks 33>;
290 clock-names = "ipg"; 290 clock-names = "ipg", "per";
291 interrupts = <43>; 291 interrupts = <43>;
292 status = "disabled"; 292 status = "disabled";
293 }; 293 };
@@ -295,8 +295,8 @@
295 can2: can@53fe8000 { 295 can2: can@53fe8000 {
296 compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan"; 296 compatible = "fsl,imx35-flexcan", "fsl,p1010-flexcan";
297 reg = <0x53fe8000 0x1000>; 297 reg = <0x53fe8000 0x1000>;
298 clocks = <&clks 34>; 298 clocks = <&clks 34>, <&clks 34>;
299 clock-names = "ipg"; 299 clock-names = "ipg", "per";
300 interrupts = <44>; 300 interrupts = <44>;
301 status = "disabled"; 301 status = "disabled";
302 }; 302 };
diff --git a/arch/arm/boot/dts/imx51-apf51dev.dts b/arch/arm/boot/dts/imx51-apf51dev.dts
index 93d3ea12328c..0f3fe29b816e 100644
--- a/arch/arm/boot/dts/imx51-apf51dev.dts
+++ b/arch/arm/boot/dts/imx51-apf51dev.dts
@@ -98,7 +98,7 @@
98&esdhc1 { 98&esdhc1 {
99 pinctrl-names = "default"; 99 pinctrl-names = "default";
100 pinctrl-0 = <&pinctrl_esdhc1>; 100 pinctrl-0 = <&pinctrl_esdhc1>;
101 cd-gpios = <&gpio2 29 GPIO_ACTIVE_HIGH>; 101 cd-gpios = <&gpio2 29 GPIO_ACTIVE_LOW>;
102 bus-width = <4>; 102 bus-width = <4>;
103 status = "okay"; 103 status = "okay";
104}; 104};
diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts
index e9337ad52f59..3bc18835fb4b 100644
--- a/arch/arm/boot/dts/imx53-ard.dts
+++ b/arch/arm/boot/dts/imx53-ard.dts
@@ -103,8 +103,8 @@
103&esdhc1 { 103&esdhc1 {
104 pinctrl-names = "default"; 104 pinctrl-names = "default";
105 pinctrl-0 = <&pinctrl_esdhc1>; 105 pinctrl-0 = <&pinctrl_esdhc1>;
106 cd-gpios = <&gpio1 1 0>; 106 cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
107 wp-gpios = <&gpio1 9 0>; 107 wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
108 status = "okay"; 108 status = "okay";
109}; 109};
110 110
diff --git a/arch/arm/boot/dts/imx53-m53evk.dts b/arch/arm/boot/dts/imx53-m53evk.dts
index d0e0f57eb432..53f40885c530 100644
--- a/arch/arm/boot/dts/imx53-m53evk.dts
+++ b/arch/arm/boot/dts/imx53-m53evk.dts
@@ -124,8 +124,8 @@
124&esdhc1 { 124&esdhc1 {
125 pinctrl-names = "default"; 125 pinctrl-names = "default";
126 pinctrl-0 = <&pinctrl_esdhc1>; 126 pinctrl-0 = <&pinctrl_esdhc1>;
127 cd-gpios = <&gpio1 1 0>; 127 cd-gpios = <&gpio1 1 GPIO_ACTIVE_LOW>;
128 wp-gpios = <&gpio1 9 0>; 128 wp-gpios = <&gpio1 9 GPIO_ACTIVE_HIGH>;
129 status = "okay"; 129 status = "okay";
130}; 130};
131 131
diff --git a/arch/arm/boot/dts/imx53-qsb-common.dtsi b/arch/arm/boot/dts/imx53-qsb-common.dtsi
index ab4ba39f2ed9..b0d5542ac829 100644
--- a/arch/arm/boot/dts/imx53-qsb-common.dtsi
+++ b/arch/arm/boot/dts/imx53-qsb-common.dtsi
@@ -147,8 +147,8 @@
147&esdhc3 { 147&esdhc3 {
148 pinctrl-names = "default"; 148 pinctrl-names = "default";
149 pinctrl-0 = <&pinctrl_esdhc3>; 149 pinctrl-0 = <&pinctrl_esdhc3>;
150 cd-gpios = <&gpio3 11 0>; 150 cd-gpios = <&gpio3 11 GPIO_ACTIVE_LOW>;
151 wp-gpios = <&gpio3 12 0>; 151 wp-gpios = <&gpio3 12 GPIO_ACTIVE_HIGH>;
152 bus-width = <8>; 152 bus-width = <8>;
153 status = "okay"; 153 status = "okay";
154}; 154};
diff --git a/arch/arm/boot/dts/imx53-smd.dts b/arch/arm/boot/dts/imx53-smd.dts
index 1d325576bcc0..fc89ce1e5763 100644
--- a/arch/arm/boot/dts/imx53-smd.dts
+++ b/arch/arm/boot/dts/imx53-smd.dts
@@ -41,8 +41,8 @@
41&esdhc1 { 41&esdhc1 {
42 pinctrl-names = "default"; 42 pinctrl-names = "default";
43 pinctrl-0 = <&pinctrl_esdhc1>; 43 pinctrl-0 = <&pinctrl_esdhc1>;
44 cd-gpios = <&gpio3 13 0>; 44 cd-gpios = <&gpio3 13 GPIO_ACTIVE_LOW>;
45 wp-gpios = <&gpio4 11 0>; 45 wp-gpios = <&gpio4 11 GPIO_ACTIVE_HIGH>;
46 status = "okay"; 46 status = "okay";
47}; 47};
48 48
diff --git a/arch/arm/boot/dts/imx53-tqma53.dtsi b/arch/arm/boot/dts/imx53-tqma53.dtsi
index 4f1f0e2868bf..e03373a58760 100644
--- a/arch/arm/boot/dts/imx53-tqma53.dtsi
+++ b/arch/arm/boot/dts/imx53-tqma53.dtsi
@@ -41,8 +41,8 @@
41 pinctrl-0 = <&pinctrl_esdhc2>, 41 pinctrl-0 = <&pinctrl_esdhc2>,
42 <&pinctrl_esdhc2_cdwp>; 42 <&pinctrl_esdhc2_cdwp>;
43 vmmc-supply = <&reg_3p3v>; 43 vmmc-supply = <&reg_3p3v>;
44 wp-gpios = <&gpio1 2 0>; 44 wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
45 cd-gpios = <&gpio1 4 0>; 45 cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
46 status = "disabled"; 46 status = "disabled";
47}; 47};
48 48
diff --git a/arch/arm/boot/dts/imx53-tx53.dtsi b/arch/arm/boot/dts/imx53-tx53.dtsi
index 704bd72cbfec..d3e50b22064f 100644
--- a/arch/arm/boot/dts/imx53-tx53.dtsi
+++ b/arch/arm/boot/dts/imx53-tx53.dtsi
@@ -183,7 +183,7 @@
183}; 183};
184 184
185&esdhc1 { 185&esdhc1 {
186 cd-gpios = <&gpio3 24 GPIO_ACTIVE_HIGH>; 186 cd-gpios = <&gpio3 24 GPIO_ACTIVE_LOW>;
187 fsl,wp-controller; 187 fsl,wp-controller;
188 pinctrl-names = "default"; 188 pinctrl-names = "default";
189 pinctrl-0 = <&pinctrl_esdhc1>; 189 pinctrl-0 = <&pinctrl_esdhc1>;
@@ -191,7 +191,7 @@
191}; 191};
192 192
193&esdhc2 { 193&esdhc2 {
194 cd-gpios = <&gpio3 25 GPIO_ACTIVE_HIGH>; 194 cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
195 fsl,wp-controller; 195 fsl,wp-controller;
196 pinctrl-names = "default"; 196 pinctrl-names = "default";
197 pinctrl-0 = <&pinctrl_esdhc2>; 197 pinctrl-0 = <&pinctrl_esdhc2>;
diff --git a/arch/arm/boot/dts/imx53-voipac-bsb.dts b/arch/arm/boot/dts/imx53-voipac-bsb.dts
index c17d3ad6dba5..fc51b87ad208 100644
--- a/arch/arm/boot/dts/imx53-voipac-bsb.dts
+++ b/arch/arm/boot/dts/imx53-voipac-bsb.dts
@@ -119,8 +119,8 @@
119&esdhc2 { 119&esdhc2 {
120 pinctrl-names = "default"; 120 pinctrl-names = "default";
121 pinctrl-0 = <&pinctrl_esdhc2>; 121 pinctrl-0 = <&pinctrl_esdhc2>;
122 cd-gpios = <&gpio3 25 0>; 122 cd-gpios = <&gpio3 25 GPIO_ACTIVE_LOW>;
123 wp-gpios = <&gpio2 19 0>; 123 wp-gpios = <&gpio2 19 GPIO_ACTIVE_HIGH>;
124 vmmc-supply = <&reg_3p3v>; 124 vmmc-supply = <&reg_3p3v>;
125 status = "okay"; 125 status = "okay";
126}; 126};
diff --git a/arch/arm/boot/dts/imx6dl-riotboard.dts b/arch/arm/boot/dts/imx6dl-riotboard.dts
index 43cb3fd76be7..5111f5170d53 100644
--- a/arch/arm/boot/dts/imx6dl-riotboard.dts
+++ b/arch/arm/boot/dts/imx6dl-riotboard.dts
@@ -305,8 +305,8 @@
305&usdhc2 { 305&usdhc2 {
306 pinctrl-names = "default"; 306 pinctrl-names = "default";
307 pinctrl-0 = <&pinctrl_usdhc2>; 307 pinctrl-0 = <&pinctrl_usdhc2>;
308 cd-gpios = <&gpio1 4 0>; 308 cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
309 wp-gpios = <&gpio1 2 0>; 309 wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
310 vmmc-supply = <&reg_3p3v>; 310 vmmc-supply = <&reg_3p3v>;
311 status = "okay"; 311 status = "okay";
312}; 312};
@@ -314,8 +314,8 @@
314&usdhc3 { 314&usdhc3 {
315 pinctrl-names = "default"; 315 pinctrl-names = "default";
316 pinctrl-0 = <&pinctrl_usdhc3>; 316 pinctrl-0 = <&pinctrl_usdhc3>;
317 cd-gpios = <&gpio7 0 0>; 317 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
318 wp-gpios = <&gpio7 1 0>; 318 wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
319 vmmc-supply = <&reg_3p3v>; 319 vmmc-supply = <&reg_3p3v>;
320 status = "okay"; 320 status = "okay";
321}; 321};
diff --git a/arch/arm/boot/dts/imx6q-arm2.dts b/arch/arm/boot/dts/imx6q-arm2.dts
index 78df05e9d1ce..d6515f7a56c4 100644
--- a/arch/arm/boot/dts/imx6q-arm2.dts
+++ b/arch/arm/boot/dts/imx6q-arm2.dts
@@ -11,6 +11,7 @@
11 */ 11 */
12 12
13/dts-v1/; 13/dts-v1/;
14#include <dt-bindings/gpio/gpio.h>
14#include "imx6q.dtsi" 15#include "imx6q.dtsi"
15 16
16/ { 17/ {
@@ -196,8 +197,8 @@
196}; 197};
197 198
198&usdhc3 { 199&usdhc3 {
199 cd-gpios = <&gpio6 11 0>; 200 cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
200 wp-gpios = <&gpio6 14 0>; 201 wp-gpios = <&gpio6 14 GPIO_ACTIVE_HIGH>;
201 vmmc-supply = <&reg_3p3v>; 202 vmmc-supply = <&reg_3p3v>;
202 pinctrl-names = "default"; 203 pinctrl-names = "default";
203 pinctrl-0 = <&pinctrl_usdhc3 204 pinctrl-0 = <&pinctrl_usdhc3
diff --git a/arch/arm/boot/dts/imx6q-gk802.dts b/arch/arm/boot/dts/imx6q-gk802.dts
index 703539cf36d3..00bd63e63d0c 100644
--- a/arch/arm/boot/dts/imx6q-gk802.dts
+++ b/arch/arm/boot/dts/imx6q-gk802.dts
@@ -7,6 +7,7 @@
7 */ 7 */
8 8
9/dts-v1/; 9/dts-v1/;
10#include <dt-bindings/gpio/gpio.h>
10#include "imx6q.dtsi" 11#include "imx6q.dtsi"
11 12
12/ { 13/ {
@@ -161,7 +162,7 @@
161 pinctrl-names = "default"; 162 pinctrl-names = "default";
162 pinctrl-0 = <&pinctrl_usdhc3>; 163 pinctrl-0 = <&pinctrl_usdhc3>;
163 bus-width = <4>; 164 bus-width = <4>;
164 cd-gpios = <&gpio6 11 0>; 165 cd-gpios = <&gpio6 11 GPIO_ACTIVE_LOW>;
165 vmmc-supply = <&reg_3p3v>; 166 vmmc-supply = <&reg_3p3v>;
166 status = "okay"; 167 status = "okay";
167}; 168};
diff --git a/arch/arm/boot/dts/imx6q-tbs2910.dts b/arch/arm/boot/dts/imx6q-tbs2910.dts
index a43abfa21e33..5645d52850a7 100644
--- a/arch/arm/boot/dts/imx6q-tbs2910.dts
+++ b/arch/arm/boot/dts/imx6q-tbs2910.dts
@@ -251,7 +251,7 @@
251 pinctrl-names = "default"; 251 pinctrl-names = "default";
252 pinctrl-0 = <&pinctrl_usdhc2>; 252 pinctrl-0 = <&pinctrl_usdhc2>;
253 bus-width = <4>; 253 bus-width = <4>;
254 cd-gpios = <&gpio2 2 GPIO_ACTIVE_HIGH>; 254 cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
255 vmmc-supply = <&reg_3p3v>; 255 vmmc-supply = <&reg_3p3v>;
256 status = "okay"; 256 status = "okay";
257}; 257};
@@ -260,7 +260,7 @@
260 pinctrl-names = "default"; 260 pinctrl-names = "default";
261 pinctrl-0 = <&pinctrl_usdhc3>; 261 pinctrl-0 = <&pinctrl_usdhc3>;
262 bus-width = <4>; 262 bus-width = <4>;
263 cd-gpios = <&gpio2 0 GPIO_ACTIVE_HIGH>; 263 cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
264 wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>; 264 wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
265 vmmc-supply = <&reg_3p3v>; 265 vmmc-supply = <&reg_3p3v>;
266 status = "okay"; 266 status = "okay";
diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
index e6d9195a1da7..f4d6ae564ead 100644
--- a/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-aristainetos.dtsi
@@ -173,7 +173,7 @@
173 pinctrl-names = "default"; 173 pinctrl-names = "default";
174 pinctrl-0 = <&pinctrl_usdhc1>; 174 pinctrl-0 = <&pinctrl_usdhc1>;
175 vmmc-supply = <&reg_3p3v>; 175 vmmc-supply = <&reg_3p3v>;
176 cd-gpios = <&gpio4 7 GPIO_ACTIVE_HIGH>; 176 cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>;
177 status = "okay"; 177 status = "okay";
178}; 178};
179 179
@@ -181,7 +181,7 @@
181 pinctrl-names = "default"; 181 pinctrl-names = "default";
182 pinctrl-0 = <&pinctrl_usdhc2>; 182 pinctrl-0 = <&pinctrl_usdhc2>;
183 vmmc-supply = <&reg_3p3v>; 183 vmmc-supply = <&reg_3p3v>;
184 cd-gpios = <&gpio4 8 GPIO_ACTIVE_HIGH>; 184 cd-gpios = <&gpio4 8 GPIO_ACTIVE_LOW>;
185 status = "okay"; 185 status = "okay";
186}; 186};
187 187
diff --git a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
index 1d85de2befb3..a47a0399a172 100644
--- a/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-aristainetos2.dtsi
@@ -392,7 +392,7 @@
392&usdhc1 { 392&usdhc1 {
393 pinctrl-names = "default"; 393 pinctrl-names = "default";
394 pinctrl-0 = <&pinctrl_usdhc1>; 394 pinctrl-0 = <&pinctrl_usdhc1>;
395 cd-gpios = <&gpio1 27 GPIO_ACTIVE_HIGH>; 395 cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
396 no-1-8-v; 396 no-1-8-v;
397 status = "okay"; 397 status = "okay";
398}; 398};
@@ -400,7 +400,7 @@
400&usdhc2 { 400&usdhc2 {
401 pinctrl-names = "default"; 401 pinctrl-names = "default";
402 pinctrl-0 = <&pinctrl_usdhc2>; 402 pinctrl-0 = <&pinctrl_usdhc2>;
403 cd-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>; 403 cd-gpios = <&gpio4 5 GPIO_ACTIVE_LOW>;
404 wp-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>; 404 wp-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>;
405 no-1-8-v; 405 no-1-8-v;
406 status = "okay"; 406 status = "okay";
diff --git a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
index 59e5d15e3ec4..ff41f83551de 100644
--- a/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-cubox-i.dtsi
@@ -258,6 +258,6 @@
258 pinctrl-names = "default"; 258 pinctrl-names = "default";
259 pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>; 259 pinctrl-0 = <&pinctrl_cubox_i_usdhc2_aux &pinctrl_cubox_i_usdhc2>;
260 vmmc-supply = <&reg_3p3v>; 260 vmmc-supply = <&reg_3p3v>;
261 cd-gpios = <&gpio1 4 0>; 261 cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
262 status = "okay"; 262 status = "okay";
263}; 263};
diff --git a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
index 2c253d6d20bd..45e7c39e80d5 100644
--- a/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-dfi-fs700-m60.dtsi
@@ -1,3 +1,5 @@
1#include <dt-bindings/gpio/gpio.h>
2
1/ { 3/ {
2 regulators { 4 regulators {
3 compatible = "simple-bus"; 5 compatible = "simple-bus";
@@ -181,7 +183,7 @@
181&usdhc2 { /* module slot */ 183&usdhc2 { /* module slot */
182 pinctrl-names = "default"; 184 pinctrl-names = "default";
183 pinctrl-0 = <&pinctrl_usdhc2>; 185 pinctrl-0 = <&pinctrl_usdhc2>;
184 cd-gpios = <&gpio2 2 0>; 186 cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
185 status = "okay"; 187 status = "okay";
186}; 188};
187 189
diff --git a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
index b5756c21ea1d..4493f6e99330 100644
--- a/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw52xx.dtsi
@@ -318,7 +318,7 @@
318&usdhc3 { 318&usdhc3 {
319 pinctrl-names = "default"; 319 pinctrl-names = "default";
320 pinctrl-0 = <&pinctrl_usdhc3>; 320 pinctrl-0 = <&pinctrl_usdhc3>;
321 cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; 321 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
322 vmmc-supply = <&reg_3p3v>; 322 vmmc-supply = <&reg_3p3v>;
323 status = "okay"; 323 status = "okay";
324}; 324};
diff --git a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
index 86f03c1b147c..a857d1294609 100644
--- a/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw53xx.dtsi
@@ -324,7 +324,7 @@
324&usdhc3 { 324&usdhc3 {
325 pinctrl-names = "default"; 325 pinctrl-names = "default";
326 pinctrl-0 = <&pinctrl_usdhc3>; 326 pinctrl-0 = <&pinctrl_usdhc3>;
327 cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; 327 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
328 vmmc-supply = <&reg_3p3v>; 328 vmmc-supply = <&reg_3p3v>;
329 status = "okay"; 329 status = "okay";
330}; 330};
diff --git a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
index 4a8d97f47759..1afe3385e2d2 100644
--- a/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-gw54xx.dtsi
@@ -417,7 +417,7 @@
417&usdhc3 { 417&usdhc3 {
418 pinctrl-names = "default"; 418 pinctrl-names = "default";
419 pinctrl-0 = <&pinctrl_usdhc3>; 419 pinctrl-0 = <&pinctrl_usdhc3>;
420 cd-gpios = <&gpio7 0 GPIO_ACTIVE_HIGH>; 420 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
421 vmmc-supply = <&reg_3p3v>; 421 vmmc-supply = <&reg_3p3v>;
422 status = "okay"; 422 status = "okay";
423}; 423};
diff --git a/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi b/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
index 62a82f3eba88..6dd0b764e036 100644
--- a/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-hummingboard.dtsi
@@ -299,6 +299,6 @@
299 &pinctrl_hummingboard_usdhc2 299 &pinctrl_hummingboard_usdhc2
300 >; 300 >;
301 vmmc-supply = <&reg_3p3v>; 301 vmmc-supply = <&reg_3p3v>;
302 cd-gpios = <&gpio1 4 0>; 302 cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
303 status = "okay"; 303 status = "okay";
304}; 304};
diff --git a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
index 3af16dfe417b..d7fe6672d00c 100644
--- a/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-nitrogen6x.dtsi
@@ -453,7 +453,7 @@
453&usdhc3 { 453&usdhc3 {
454 pinctrl-names = "default"; 454 pinctrl-names = "default";
455 pinctrl-0 = <&pinctrl_usdhc3>; 455 pinctrl-0 = <&pinctrl_usdhc3>;
456 cd-gpios = <&gpio7 0 0>; 456 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
457 vmmc-supply = <&reg_3p3v>; 457 vmmc-supply = <&reg_3p3v>;
458 status = "okay"; 458 status = "okay";
459}; 459};
@@ -461,7 +461,7 @@
461&usdhc4 { 461&usdhc4 {
462 pinctrl-names = "default"; 462 pinctrl-names = "default";
463 pinctrl-0 = <&pinctrl_usdhc4>; 463 pinctrl-0 = <&pinctrl_usdhc4>;
464 cd-gpios = <&gpio2 6 0>; 464 cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
465 vmmc-supply = <&reg_3p3v>; 465 vmmc-supply = <&reg_3p3v>;
466 status = "okay"; 466 status = "okay";
467}; 467};
diff --git a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
index 1ce6133b67f5..9e6ecd99b472 100644
--- a/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
@@ -409,8 +409,8 @@
409&usdhc2 { 409&usdhc2 {
410 pinctrl-names = "default"; 410 pinctrl-names = "default";
411 pinctrl-0 = <&pinctrl_usdhc2>; 411 pinctrl-0 = <&pinctrl_usdhc2>;
412 cd-gpios = <&gpio1 4 0>; 412 cd-gpios = <&gpio1 4 GPIO_ACTIVE_LOW>;
413 wp-gpios = <&gpio1 2 0>; 413 wp-gpios = <&gpio1 2 GPIO_ACTIVE_HIGH>;
414 status = "disabled"; 414 status = "disabled";
415}; 415};
416 416
@@ -418,7 +418,7 @@
418 pinctrl-names = "default"; 418 pinctrl-names = "default";
419 pinctrl-0 = <&pinctrl_usdhc3 419 pinctrl-0 = <&pinctrl_usdhc3
420 &pinctrl_usdhc3_cdwp>; 420 &pinctrl_usdhc3_cdwp>;
421 cd-gpios = <&gpio1 27 0>; 421 cd-gpios = <&gpio1 27 GPIO_ACTIVE_LOW>;
422 wp-gpios = <&gpio1 29 0>; 422 wp-gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
423 status = "disabled"; 423 status = "disabled";
424}; 424};
diff --git a/arch/arm/boot/dts/imx6qdl-rex.dtsi b/arch/arm/boot/dts/imx6qdl-rex.dtsi
index 488a640796ac..3373fd958e95 100644
--- a/arch/arm/boot/dts/imx6qdl-rex.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-rex.dtsi
@@ -342,7 +342,7 @@
342 pinctrl-0 = <&pinctrl_usdhc2>; 342 pinctrl-0 = <&pinctrl_usdhc2>;
343 bus-width = <4>; 343 bus-width = <4>;
344 cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>; 344 cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
345 wp-gpios = <&gpio2 3 GPIO_ACTIVE_LOW>; 345 wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
346 status = "okay"; 346 status = "okay";
347}; 347};
348 348
@@ -351,6 +351,6 @@
351 pinctrl-0 = <&pinctrl_usdhc3>; 351 pinctrl-0 = <&pinctrl_usdhc3>;
352 bus-width = <4>; 352 bus-width = <4>;
353 cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>; 353 cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
354 wp-gpios = <&gpio2 1 GPIO_ACTIVE_LOW>; 354 wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
355 status = "okay"; 355 status = "okay";
356}; 356};
diff --git a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
index 3b24b12651b2..e329ca5c3322 100644
--- a/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
@@ -467,8 +467,8 @@
467 pinctrl-0 = <&pinctrl_usdhc3>; 467 pinctrl-0 = <&pinctrl_usdhc3>;
468 pinctrl-1 = <&pinctrl_usdhc3_100mhz>; 468 pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
469 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 469 pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
470 cd-gpios = <&gpio6 15 0>; 470 cd-gpios = <&gpio6 15 GPIO_ACTIVE_LOW>;
471 wp-gpios = <&gpio1 13 0>; 471 wp-gpios = <&gpio1 13 GPIO_ACTIVE_HIGH>;
472 status = "okay"; 472 status = "okay";
473}; 473};
474 474
diff --git a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
index e00c44f6a0df..782379320517 100644
--- a/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabrelite.dtsi
@@ -448,8 +448,8 @@
448&usdhc3 { 448&usdhc3 {
449 pinctrl-names = "default"; 449 pinctrl-names = "default";
450 pinctrl-0 = <&pinctrl_usdhc3>; 450 pinctrl-0 = <&pinctrl_usdhc3>;
451 cd-gpios = <&gpio7 0 0>; 451 cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>;
452 wp-gpios = <&gpio7 1 0>; 452 wp-gpios = <&gpio7 1 GPIO_ACTIVE_HIGH>;
453 vmmc-supply = <&reg_3p3v>; 453 vmmc-supply = <&reg_3p3v>;
454 status = "okay"; 454 status = "okay";
455}; 455};
@@ -457,7 +457,7 @@
457&usdhc4 { 457&usdhc4 {
458 pinctrl-names = "default"; 458 pinctrl-names = "default";
459 pinctrl-0 = <&pinctrl_usdhc4>; 459 pinctrl-0 = <&pinctrl_usdhc4>;
460 cd-gpios = <&gpio2 6 0>; 460 cd-gpios = <&gpio2 6 GPIO_ACTIVE_LOW>;
461 vmmc-supply = <&reg_3p3v>; 461 vmmc-supply = <&reg_3p3v>;
462 status = "okay"; 462 status = "okay";
463}; 463};
diff --git a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
index a626e6dd8022..944eb81cb2b8 100644
--- a/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-sabresd.dtsi
@@ -562,8 +562,8 @@
562 pinctrl-names = "default"; 562 pinctrl-names = "default";
563 pinctrl-0 = <&pinctrl_usdhc2>; 563 pinctrl-0 = <&pinctrl_usdhc2>;
564 bus-width = <8>; 564 bus-width = <8>;
565 cd-gpios = <&gpio2 2 0>; 565 cd-gpios = <&gpio2 2 GPIO_ACTIVE_LOW>;
566 wp-gpios = <&gpio2 3 0>; 566 wp-gpios = <&gpio2 3 GPIO_ACTIVE_HIGH>;
567 status = "okay"; 567 status = "okay";
568}; 568};
569 569
@@ -571,8 +571,8 @@
571 pinctrl-names = "default"; 571 pinctrl-names = "default";
572 pinctrl-0 = <&pinctrl_usdhc3>; 572 pinctrl-0 = <&pinctrl_usdhc3>;
573 bus-width = <8>; 573 bus-width = <8>;
574 cd-gpios = <&gpio2 0 0>; 574 cd-gpios = <&gpio2 0 GPIO_ACTIVE_LOW>;
575 wp-gpios = <&gpio2 1 0>; 575 wp-gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
576 status = "okay"; 576 status = "okay";
577}; 577};
578 578
diff --git a/arch/arm/boot/dts/imx6qdl-tx6.dtsi b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
index f02b80b41d4f..da08de324e9e 100644
--- a/arch/arm/boot/dts/imx6qdl-tx6.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-tx6.dtsi
@@ -680,7 +680,7 @@
680 pinctrl-0 = <&pinctrl_usdhc1>; 680 pinctrl-0 = <&pinctrl_usdhc1>;
681 bus-width = <4>; 681 bus-width = <4>;
682 no-1-8-v; 682 no-1-8-v;
683 cd-gpios = <&gpio7 2 0>; 683 cd-gpios = <&gpio7 2 GPIO_ACTIVE_LOW>;
684 fsl,wp-controller; 684 fsl,wp-controller;
685 status = "okay"; 685 status = "okay";
686}; 686};
@@ -690,7 +690,7 @@
690 pinctrl-0 = <&pinctrl_usdhc2>; 690 pinctrl-0 = <&pinctrl_usdhc2>;
691 bus-width = <4>; 691 bus-width = <4>;
692 no-1-8-v; 692 no-1-8-v;
693 cd-gpios = <&gpio7 3 0>; 693 cd-gpios = <&gpio7 3 GPIO_ACTIVE_LOW>;
694 fsl,wp-controller; 694 fsl,wp-controller;
695 status = "okay"; 695 status = "okay";
696}; 696};
diff --git a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
index 5fb091675582..9e096d811bed 100644
--- a/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
+++ b/arch/arm/boot/dts/imx6qdl-wandboard.dtsi
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12#include <dt-bindings/gpio/gpio.h>
13
12/ { 14/ {
13 regulators { 15 regulators {
14 compatible = "simple-bus"; 16 compatible = "simple-bus";
@@ -250,13 +252,13 @@
250&usdhc1 { 252&usdhc1 {
251 pinctrl-names = "default"; 253 pinctrl-names = "default";
252 pinctrl-0 = <&pinctrl_usdhc1>; 254 pinctrl-0 = <&pinctrl_usdhc1>;
253 cd-gpios = <&gpio1 2 0>; 255 cd-gpios = <&gpio1 2 GPIO_ACTIVE_LOW>;
254 status = "okay"; 256 status = "okay";
255}; 257};
256 258
257&usdhc3 { 259&usdhc3 {
258 pinctrl-names = "default"; 260 pinctrl-names = "default";
259 pinctrl-0 = <&pinctrl_usdhc3>; 261 pinctrl-0 = <&pinctrl_usdhc3>;
260 cd-gpios = <&gpio3 9 0>; 262 cd-gpios = <&gpio3 9 GPIO_ACTIVE_LOW>;
261 status = "okay"; 263 status = "okay";
262}; 264};
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index e6d13592080d..b57033e8c633 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -181,10 +181,10 @@
181 interrupt-names = "msi"; 181 interrupt-names = "msi";
182 #interrupt-cells = <1>; 182 #interrupt-cells = <1>;
183 interrupt-map-mask = <0 0 0 0x7>; 183 interrupt-map-mask = <0 0 0 0x7>;
184 interrupt-map = <0 0 0 1 &intc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>, 184 interrupt-map = <0 0 0 1 &gpc GIC_SPI 123 IRQ_TYPE_LEVEL_HIGH>,
185 <0 0 0 2 &intc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>, 185 <0 0 0 2 &gpc GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>,
186 <0 0 0 3 &intc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>, 186 <0 0 0 3 &gpc GIC_SPI 121 IRQ_TYPE_LEVEL_HIGH>,
187 <0 0 0 4 &intc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; 187 <0 0 0 4 &gpc GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>;
188 clocks = <&clks IMX6QDL_CLK_PCIE_AXI>, 188 clocks = <&clks IMX6QDL_CLK_PCIE_AXI>,
189 <&clks IMX6QDL_CLK_LVDS1_GATE>, 189 <&clks IMX6QDL_CLK_LVDS1_GATE>,
190 <&clks IMX6QDL_CLK_PCIE_REF_125M>; 190 <&clks IMX6QDL_CLK_PCIE_REF_125M>;
diff --git a/arch/arm/boot/dts/imx6sl-evk.dts b/arch/arm/boot/dts/imx6sl-evk.dts
index 945887d3fdb3..b84dff2e94ea 100644
--- a/arch/arm/boot/dts/imx6sl-evk.dts
+++ b/arch/arm/boot/dts/imx6sl-evk.dts
@@ -617,8 +617,8 @@
617 pinctrl-1 = <&pinctrl_usdhc1_100mhz>; 617 pinctrl-1 = <&pinctrl_usdhc1_100mhz>;
618 pinctrl-2 = <&pinctrl_usdhc1_200mhz>; 618 pinctrl-2 = <&pinctrl_usdhc1_200mhz>;
619 bus-width = <8>; 619 bus-width = <8>;
620 cd-gpios = <&gpio4 7 0>; 620 cd-gpios = <&gpio4 7 GPIO_ACTIVE_LOW>;
621 wp-gpios = <&gpio4 6 0>; 621 wp-gpios = <&gpio4 6 GPIO_ACTIVE_HIGH>;
622 status = "okay"; 622 status = "okay";
623}; 623};
624 624
@@ -627,8 +627,8 @@
627 pinctrl-0 = <&pinctrl_usdhc2>; 627 pinctrl-0 = <&pinctrl_usdhc2>;
628 pinctrl-1 = <&pinctrl_usdhc2_100mhz>; 628 pinctrl-1 = <&pinctrl_usdhc2_100mhz>;
629 pinctrl-2 = <&pinctrl_usdhc2_200mhz>; 629 pinctrl-2 = <&pinctrl_usdhc2_200mhz>;
630 cd-gpios = <&gpio5 0 0>; 630 cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
631 wp-gpios = <&gpio4 29 0>; 631 wp-gpios = <&gpio4 29 GPIO_ACTIVE_HIGH>;
632 status = "okay"; 632 status = "okay";
633}; 633};
634 634
@@ -637,6 +637,6 @@
637 pinctrl-0 = <&pinctrl_usdhc3>; 637 pinctrl-0 = <&pinctrl_usdhc3>;
638 pinctrl-1 = <&pinctrl_usdhc3_100mhz>; 638 pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
639 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 639 pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
640 cd-gpios = <&gpio3 22 0>; 640 cd-gpios = <&gpio3 22 GPIO_ACTIVE_LOW>;
641 status = "okay"; 641 status = "okay";
642}; 642};
diff --git a/arch/arm/boot/dts/imx6sx-sabreauto.dts b/arch/arm/boot/dts/imx6sx-sabreauto.dts
index e3c0b63c2205..115f3fd78971 100644
--- a/arch/arm/boot/dts/imx6sx-sabreauto.dts
+++ b/arch/arm/boot/dts/imx6sx-sabreauto.dts
@@ -49,7 +49,7 @@
49 pinctrl-1 = <&pinctrl_usdhc3_100mhz>; 49 pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
50 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 50 pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
51 bus-width = <8>; 51 bus-width = <8>;
52 cd-gpios = <&gpio7 10 GPIO_ACTIVE_HIGH>; 52 cd-gpios = <&gpio7 10 GPIO_ACTIVE_LOW>;
53 wp-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>; 53 wp-gpios = <&gpio3 19 GPIO_ACTIVE_HIGH>;
54 keep-power-in-suspend; 54 keep-power-in-suspend;
55 enable-sdio-wakeup; 55 enable-sdio-wakeup;
@@ -61,7 +61,7 @@
61 pinctrl-names = "default"; 61 pinctrl-names = "default";
62 pinctrl-0 = <&pinctrl_usdhc4>; 62 pinctrl-0 = <&pinctrl_usdhc4>;
63 bus-width = <8>; 63 bus-width = <8>;
64 cd-gpios = <&gpio7 11 GPIO_ACTIVE_HIGH>; 64 cd-gpios = <&gpio7 11 GPIO_ACTIVE_LOW>;
65 no-1-8-v; 65 no-1-8-v;
66 keep-power-in-suspend; 66 keep-power-in-suspend;
67 enable-sdio-wakup; 67 enable-sdio-wakup;
diff --git a/arch/arm/boot/dts/imx6sx-sdb.dtsi b/arch/arm/boot/dts/imx6sx-sdb.dtsi
index cef04cef3a80..ac88c3467078 100644
--- a/arch/arm/boot/dts/imx6sx-sdb.dtsi
+++ b/arch/arm/boot/dts/imx6sx-sdb.dtsi
@@ -293,7 +293,7 @@
293 pinctrl-1 = <&pinctrl_usdhc3_100mhz>; 293 pinctrl-1 = <&pinctrl_usdhc3_100mhz>;
294 pinctrl-2 = <&pinctrl_usdhc3_200mhz>; 294 pinctrl-2 = <&pinctrl_usdhc3_200mhz>;
295 bus-width = <8>; 295 bus-width = <8>;
296 cd-gpios = <&gpio2 10 GPIO_ACTIVE_HIGH>; 296 cd-gpios = <&gpio2 10 GPIO_ACTIVE_LOW>;
297 wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>; 297 wp-gpios = <&gpio2 15 GPIO_ACTIVE_HIGH>;
298 keep-power-in-suspend; 298 keep-power-in-suspend;
299 enable-sdio-wakeup; 299 enable-sdio-wakeup;
@@ -304,7 +304,7 @@
304&usdhc4 { 304&usdhc4 {
305 pinctrl-names = "default"; 305 pinctrl-names = "default";
306 pinctrl-0 = <&pinctrl_usdhc4>; 306 pinctrl-0 = <&pinctrl_usdhc4>;
307 cd-gpios = <&gpio6 21 GPIO_ACTIVE_HIGH>; 307 cd-gpios = <&gpio6 21 GPIO_ACTIVE_LOW>;
308 wp-gpios = <&gpio6 20 GPIO_ACTIVE_HIGH>; 308 wp-gpios = <&gpio6 20 GPIO_ACTIVE_HIGH>;
309 status = "okay"; 309 status = "okay";
310}; 310};
diff --git a/arch/arm/boot/dts/imx7d-sdb.dts b/arch/arm/boot/dts/imx7d-sdb.dts
index 4d1a4b977d84..fdd1d7c9a5cc 100644
--- a/arch/arm/boot/dts/imx7d-sdb.dts
+++ b/arch/arm/boot/dts/imx7d-sdb.dts
@@ -234,8 +234,8 @@
234&usdhc1 { 234&usdhc1 {
235 pinctrl-names = "default"; 235 pinctrl-names = "default";
236 pinctrl-0 = <&pinctrl_usdhc1>; 236 pinctrl-0 = <&pinctrl_usdhc1>;
237 cd-gpios = <&gpio5 0 0>; 237 cd-gpios = <&gpio5 0 GPIO_ACTIVE_LOW>;
238 wp-gpios = <&gpio5 1 0>; 238 wp-gpios = <&gpio5 1 GPIO_ACTIVE_HIGH>;
239 enable-sdio-wakeup; 239 enable-sdio-wakeup;
240 keep-power-in-suspend; 240 keep-power-in-suspend;
241 status = "okay"; 241 status = "okay";
diff --git a/arch/arm/boot/dts/k2e-clocks.dtsi b/arch/arm/boot/dts/k2e-clocks.dtsi
index 4773d6af66a0..d56d68fe7ffc 100644
--- a/arch/arm/boot/dts/k2e-clocks.dtsi
+++ b/arch/arm/boot/dts/k2e-clocks.dtsi
@@ -13,9 +13,8 @@ clocks {
13 #clock-cells = <0>; 13 #clock-cells = <0>;
14 compatible = "ti,keystone,main-pll-clock"; 14 compatible = "ti,keystone,main-pll-clock";
15 clocks = <&refclksys>; 15 clocks = <&refclksys>;
16 reg = <0x02620350 4>, <0x02310110 4>; 16 reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
17 reg-names = "control", "multiplier"; 17 reg-names = "control", "multiplier", "post-divider";
18 fixed-postdiv = <2>;
19 }; 18 };
20 19
21 papllclk: papllclk@2620358 { 20 papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/k2e.dtsi b/arch/arm/boot/dts/k2e.dtsi
index 1b6494fbdb91..675fb8e492c6 100644
--- a/arch/arm/boot/dts/k2e.dtsi
+++ b/arch/arm/boot/dts/k2e.dtsi
@@ -131,10 +131,17 @@
131 <GIC_SPI 376 IRQ_TYPE_EDGE_RISING>; 131 <GIC_SPI 376 IRQ_TYPE_EDGE_RISING>;
132 }; 132 };
133 }; 133 };
134
135 mdio: mdio@24200f00 {
136 compatible = "ti,keystone_mdio", "ti,davinci_mdio";
137 #address-cells = <1>;
138 #size-cells = <0>;
139 reg = <0x24200f00 0x100>;
140 status = "disabled";
141 clocks = <&clkcpgmac>;
142 clock-names = "fck";
143 bus_freq = <2500000>;
144 };
134 /include/ "k2e-netcp.dtsi" 145 /include/ "k2e-netcp.dtsi"
135 }; 146 };
136}; 147};
137
138&mdio {
139 reg = <0x24200f00 0x100>;
140};
diff --git a/arch/arm/boot/dts/k2hk-clocks.dtsi b/arch/arm/boot/dts/k2hk-clocks.dtsi
index d5adee3c0067..af9b7190533a 100644
--- a/arch/arm/boot/dts/k2hk-clocks.dtsi
+++ b/arch/arm/boot/dts/k2hk-clocks.dtsi
@@ -22,9 +22,8 @@ clocks {
22 #clock-cells = <0>; 22 #clock-cells = <0>;
23 compatible = "ti,keystone,main-pll-clock"; 23 compatible = "ti,keystone,main-pll-clock";
24 clocks = <&refclksys>; 24 clocks = <&refclksys>;
25 reg = <0x02620350 4>, <0x02310110 4>; 25 reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
26 reg-names = "control", "multiplier"; 26 reg-names = "control", "multiplier", "post-divider";
27 fixed-postdiv = <2>;
28 }; 27 };
29 28
30 papllclk: papllclk@2620358 { 29 papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/k2hk.dtsi b/arch/arm/boot/dts/k2hk.dtsi
index ae6472407b22..d0810a5f2968 100644
--- a/arch/arm/boot/dts/k2hk.dtsi
+++ b/arch/arm/boot/dts/k2hk.dtsi
@@ -98,6 +98,17 @@
98 #gpio-cells = <2>; 98 #gpio-cells = <2>;
99 gpio,syscon-dev = <&devctrl 0x25c>; 99 gpio,syscon-dev = <&devctrl 0x25c>;
100 }; 100 };
101
102 mdio: mdio@02090300 {
103 compatible = "ti,keystone_mdio", "ti,davinci_mdio";
104 #address-cells = <1>;
105 #size-cells = <0>;
106 reg = <0x02090300 0x100>;
107 status = "disabled";
108 clocks = <&clkcpgmac>;
109 clock-names = "fck";
110 bus_freq = <2500000>;
111 };
101 /include/ "k2hk-netcp.dtsi" 112 /include/ "k2hk-netcp.dtsi"
102 }; 113 };
103}; 114};
diff --git a/arch/arm/boot/dts/k2l-clocks.dtsi b/arch/arm/boot/dts/k2l-clocks.dtsi
index eb1e3e29f073..ef8464bb11ff 100644
--- a/arch/arm/boot/dts/k2l-clocks.dtsi
+++ b/arch/arm/boot/dts/k2l-clocks.dtsi
@@ -22,9 +22,8 @@ clocks {
22 #clock-cells = <0>; 22 #clock-cells = <0>;
23 compatible = "ti,keystone,main-pll-clock"; 23 compatible = "ti,keystone,main-pll-clock";
24 clocks = <&refclksys>; 24 clocks = <&refclksys>;
25 reg = <0x02620350 4>, <0x02310110 4>; 25 reg = <0x02620350 4>, <0x02310110 4>, <0x02310108 4>;
26 reg-names = "control", "multiplier"; 26 reg-names = "control", "multiplier", "post-divider";
27 fixed-postdiv = <2>;
28 }; 27 };
29 28
30 papllclk: papllclk@2620358 { 29 papllclk: papllclk@2620358 {
diff --git a/arch/arm/boot/dts/k2l.dtsi b/arch/arm/boot/dts/k2l.dtsi
index 0e007483615e..49fd414f680c 100644
--- a/arch/arm/boot/dts/k2l.dtsi
+++ b/arch/arm/boot/dts/k2l.dtsi
@@ -29,7 +29,6 @@
29 }; 29 };
30 30
31 soc { 31 soc {
32
33 /include/ "k2l-clocks.dtsi" 32 /include/ "k2l-clocks.dtsi"
34 33
35 uart2: serial@02348400 { 34 uart2: serial@02348400 {
@@ -79,6 +78,17 @@
79 #gpio-cells = <2>; 78 #gpio-cells = <2>;
80 gpio,syscon-dev = <&devctrl 0x24c>; 79 gpio,syscon-dev = <&devctrl 0x24c>;
81 }; 80 };
81
82 mdio: mdio@26200f00 {
83 compatible = "ti,keystone_mdio", "ti,davinci_mdio";
84 #address-cells = <1>;
85 #size-cells = <0>;
86 reg = <0x26200f00 0x100>;
87 status = "disabled";
88 clocks = <&clkcpgmac>;
89 clock-names = "fck";
90 bus_freq = <2500000>;
91 };
82 /include/ "k2l-netcp.dtsi" 92 /include/ "k2l-netcp.dtsi"
83 }; 93 };
84}; 94};
@@ -96,7 +106,3 @@
96 /* Pin muxed. Enabled and configured by Bootloader */ 106 /* Pin muxed. Enabled and configured by Bootloader */
97 status = "disabled"; 107 status = "disabled";
98}; 108};
99
100&mdio {
101 reg = <0x26200f00 0x100>;
102};
diff --git a/arch/arm/boot/dts/keystone.dtsi b/arch/arm/boot/dts/keystone.dtsi
index e7a6f6deabb6..72816d65f7ec 100644
--- a/arch/arm/boot/dts/keystone.dtsi
+++ b/arch/arm/boot/dts/keystone.dtsi
@@ -267,17 +267,6 @@
267 1 0 0x21000A00 0x00000100>; 267 1 0 0x21000A00 0x00000100>;
268 }; 268 };
269 269
270 mdio: mdio@02090300 {
271 compatible = "ti,keystone_mdio", "ti,davinci_mdio";
272 #address-cells = <1>;
273 #size-cells = <0>;
274 reg = <0x02090300 0x100>;
275 status = "disabled";
276 clocks = <&clkpa>;
277 clock-names = "fck";
278 bus_freq = <2500000>;
279 };
280
281 kirq0: keystone_irq@26202a0 { 270 kirq0: keystone_irq@26202a0 {
282 compatible = "ti,keystone-irq"; 271 compatible = "ti,keystone-irq";
283 interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>; 272 interrupts = <GIC_SPI 4 IRQ_TYPE_EDGE_RISING>;
diff --git a/arch/arm/boot/dts/omap2430.dtsi b/arch/arm/boot/dts/omap2430.dtsi
index 11a7963be003..2390f387c271 100644
--- a/arch/arm/boot/dts/omap2430.dtsi
+++ b/arch/arm/boot/dts/omap2430.dtsi
@@ -51,7 +51,8 @@
51 }; 51 };
52 52
53 scm_conf: scm_conf@270 { 53 scm_conf: scm_conf@270 {
54 compatible = "syscon"; 54 compatible = "syscon",
55 "simple-bus";
55 reg = <0x270 0x240>; 56 reg = <0x270 0x240>;
56 #address-cells = <1>; 57 #address-cells = <1>;
57 #size-cells = <1>; 58 #size-cells = <1>;
diff --git a/arch/arm/boot/dts/omap4.dtsi b/arch/arm/boot/dts/omap4.dtsi
index 7d31c6ff246f..abc4473e6f8a 100644
--- a/arch/arm/boot/dts/omap4.dtsi
+++ b/arch/arm/boot/dts/omap4.dtsi
@@ -191,7 +191,8 @@
191 }; 191 };
192 192
193 omap4_padconf_global: omap4_padconf_global@5a0 { 193 omap4_padconf_global: omap4_padconf_global@5a0 {
194 compatible = "syscon"; 194 compatible = "syscon",
195 "simple-bus";
195 reg = <0x5a0 0x170>; 196 reg = <0x5a0 0x170>;
196 #address-cells = <1>; 197 #address-cells = <1>;
197 #size-cells = <1>; 198 #size-cells = <1>;
diff --git a/arch/arm/boot/dts/omap5.dtsi b/arch/arm/boot/dts/omap5.dtsi
index c8fd648a7108..b1a1263e6001 100644
--- a/arch/arm/boot/dts/omap5.dtsi
+++ b/arch/arm/boot/dts/omap5.dtsi
@@ -180,7 +180,8 @@
180 }; 180 };
181 181
182 omap5_padconf_global: omap5_padconf_global@5a0 { 182 omap5_padconf_global: omap5_padconf_global@5a0 {
183 compatible = "syscon"; 183 compatible = "syscon",
184 "simple-bus";
184 reg = <0x5a0 0xec>; 185 reg = <0x5a0 0xec>;
185 #address-cells = <1>; 186 #address-cells = <1>;
186 #size-cells = <1>; 187 #size-cells = <1>;
diff --git a/arch/arm/boot/dts/ste-dbx5x0.dtsi b/arch/arm/boot/dts/ste-dbx5x0.dtsi
index a75f3289e653..b8f81fb418ce 100644
--- a/arch/arm/boot/dts/ste-dbx5x0.dtsi
+++ b/arch/arm/boot/dts/ste-dbx5x0.dtsi
@@ -15,6 +15,33 @@
15#include "skeleton.dtsi" 15#include "skeleton.dtsi"
16 16
17/ { 17/ {
18 cpus {
19 #address-cells = <1>;
20 #size-cells = <0>;
21 enable-method = "ste,dbx500-smp";
22
23 cpu-map {
24 cluster0 {
25 core0 {
26 cpu = <&CPU0>;
27 };
28 core1 {
29 cpu = <&CPU1>;
30 };
31 };
32 };
33 CPU0: cpu@300 {
34 device_type = "cpu";
35 compatible = "arm,cortex-a9";
36 reg = <0x300>;
37 };
38 CPU1: cpu@301 {
39 device_type = "cpu";
40 compatible = "arm,cortex-a9";
41 reg = <0x301>;
42 };
43 };
44
18 soc { 45 soc {
19 #address-cells = <1>; 46 #address-cells = <1>;
20 #size-cells = <1>; 47 #size-cells = <1>;
@@ -22,32 +49,6 @@
22 interrupt-parent = <&intc>; 49 interrupt-parent = <&intc>;
23 ranges; 50 ranges;
24 51
25 cpus {
26 #address-cells = <1>;
27 #size-cells = <0>;
28
29 cpu-map {
30 cluster0 {
31 core0 {
32 cpu = <&CPU0>;
33 };
34 core1 {
35 cpu = <&CPU1>;
36 };
37 };
38 };
39 CPU0: cpu@0 {
40 device_type = "cpu";
41 compatible = "arm,cortex-a9";
42 reg = <0>;
43 };
44 CPU1: cpu@1 {
45 device_type = "cpu";
46 compatible = "arm,cortex-a9";
47 reg = <1>;
48 };
49 };
50
51 ptm@801ae000 { 52 ptm@801ae000 {
52 compatible = "arm,coresight-etm3x", "arm,primecell"; 53 compatible = "arm,coresight-etm3x", "arm,primecell";
53 reg = <0x801ae000 0x1000>; 54 reg = <0x801ae000 0x1000>;
diff --git a/arch/arm/boot/dts/ste-nomadik-nhk15.dts b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
index 3d0b8755caee..3d25dba143a5 100644
--- a/arch/arm/boot/dts/ste-nomadik-nhk15.dts
+++ b/arch/arm/boot/dts/ste-nomadik-nhk15.dts
@@ -17,6 +17,7 @@
17 }; 17 };
18 18
19 aliases { 19 aliases {
20 serial1 = &uart1;
20 stmpe-i2c0 = &stmpe0; 21 stmpe-i2c0 = &stmpe0;
21 stmpe-i2c1 = &stmpe1; 22 stmpe-i2c1 = &stmpe1;
22 }; 23 };
diff --git a/arch/arm/boot/dts/ste-nomadik-s8815.dts b/arch/arm/boot/dts/ste-nomadik-s8815.dts
index 85d3b95dfdba..3c140d05f796 100644
--- a/arch/arm/boot/dts/ste-nomadik-s8815.dts
+++ b/arch/arm/boot/dts/ste-nomadik-s8815.dts
@@ -15,6 +15,10 @@
15 bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk"; 15 bootargs = "root=/dev/ram0 console=ttyAMA1,115200n8 earlyprintk";
16 }; 16 };
17 17
18 aliases {
19 serial1 = &uart1;
20 };
21
18 src@101e0000 { 22 src@101e0000 {
19 /* These chrystal drivers are not used on this board */ 23 /* These chrystal drivers are not used on this board */
20 disable-sxtalo; 24 disable-sxtalo;
diff --git a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
index 9a5f2ba139b7..ef794a33b4dc 100644
--- a/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
+++ b/arch/arm/boot/dts/ste-nomadik-stn8815.dtsi
@@ -757,6 +757,7 @@
757 clock-names = "uartclk", "apb_pclk"; 757 clock-names = "uartclk", "apb_pclk";
758 pinctrl-names = "default"; 758 pinctrl-names = "default";
759 pinctrl-0 = <&uart0_default_mux>; 759 pinctrl-0 = <&uart0_default_mux>;
760 status = "disabled";
760 }; 761 };
761 762
762 uart1: uart@101fb000 { 763 uart1: uart@101fb000 {
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 92828a1dec80..b48dd4f37f80 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -61,6 +61,7 @@ work_pending:
61 movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE) 61 movlt scno, #(__NR_restart_syscall - __NR_SYSCALL_BASE)
62 ldmia sp, {r0 - r6} @ have to reload r0 - r6 62 ldmia sp, {r0 - r6} @ have to reload r0 - r6
63 b local_restart @ ... and off we go 63 b local_restart @ ... and off we go
64ENDPROC(ret_fast_syscall)
64 65
65/* 66/*
66 * "slow" syscall return path. "why" tells us if this was a real syscall. 67 * "slow" syscall return path. "why" tells us if this was a real syscall.
diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
index bd755d97e459..29e2991465cb 100644
--- a/arch/arm/kernel/head.S
+++ b/arch/arm/kernel/head.S
@@ -399,6 +399,9 @@ ENTRY(secondary_startup)
399 sub lr, r4, r5 @ mmu has been enabled 399 sub lr, r4, r5 @ mmu has been enabled
400 add r3, r7, lr 400 add r3, r7, lr
401 ldrd r4, [r3, #0] @ get secondary_data.pgdir 401 ldrd r4, [r3, #0] @ get secondary_data.pgdir
402ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE:
403ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps
404ARM_BE8(eor r4, r4, r5) @ without using a temp reg.
402 ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir 405 ldr r8, [r3, #8] @ get secondary_data.swapper_pg_dir
403 badr lr, __enable_mmu @ return address 406 badr lr, __enable_mmu @ return address
404 mov r13, r12 @ __secondary_switched address 407 mov r13, r12 @ __secondary_switched address
diff --git a/arch/arm/kernel/vdso.c b/arch/arm/kernel/vdso.c
index efe17dd9b921..54a5aeab988d 100644
--- a/arch/arm/kernel/vdso.c
+++ b/arch/arm/kernel/vdso.c
@@ -296,7 +296,6 @@ static bool tk_is_cntvct(const struct timekeeper *tk)
296 */ 296 */
297void update_vsyscall(struct timekeeper *tk) 297void update_vsyscall(struct timekeeper *tk)
298{ 298{
299 struct timespec xtime_coarse;
300 struct timespec64 *wtm = &tk->wall_to_monotonic; 299 struct timespec64 *wtm = &tk->wall_to_monotonic;
301 300
302 if (!cntvct_ok) { 301 if (!cntvct_ok) {
@@ -308,10 +307,10 @@ void update_vsyscall(struct timekeeper *tk)
308 307
309 vdso_write_begin(vdso_data); 308 vdso_write_begin(vdso_data);
310 309
311 xtime_coarse = __current_kernel_time();
312 vdso_data->tk_is_cntvct = tk_is_cntvct(tk); 310 vdso_data->tk_is_cntvct = tk_is_cntvct(tk);
313 vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; 311 vdso_data->xtime_coarse_sec = tk->xtime_sec;
314 vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; 312 vdso_data->xtime_coarse_nsec = (u32)(tk->tkr_mono.xtime_nsec >>
313 tk->tkr_mono.shift);
315 vdso_data->wtm_clock_sec = wtm->tv_sec; 314 vdso_data->wtm_clock_sec = wtm->tv_sec;
316 vdso_data->wtm_clock_nsec = wtm->tv_nsec; 315 vdso_data->wtm_clock_nsec = wtm->tv_nsec;
317 316
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 6001f1c9d136..4a87e86dec45 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -146,9 +146,8 @@ static __init int exynos4_pm_init_power_domain(void)
146 pd->base = of_iomap(np, 0); 146 pd->base = of_iomap(np, 0);
147 if (!pd->base) { 147 if (!pd->base) {
148 pr_warn("%s: failed to map memory\n", __func__); 148 pr_warn("%s: failed to map memory\n", __func__);
149 kfree(pd->pd.name); 149 kfree_const(pd->pd.name);
150 kfree(pd); 150 kfree(pd);
151 of_node_put(np);
152 continue; 151 continue;
153 } 152 }
154 153
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index d78c12e7cb5e..486cc4ded190 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -2373,6 +2373,9 @@ static int of_dev_hwmod_lookup(struct device_node *np,
2373 * registers. This address is needed early so the OCP registers that 2373 * registers. This address is needed early so the OCP registers that
2374 * are part of the device's address space can be ioremapped properly. 2374 * are part of the device's address space can be ioremapped properly.
2375 * 2375 *
2376 * If SYSC access is not needed, the registers will not be remapped
2377 * and non-availability of MPU access is not treated as an error.
2378 *
2376 * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and 2379 * Returns 0 on success, -EINVAL if an invalid hwmod is passed, and
2377 * -ENXIO on absent or invalid register target address space. 2380 * -ENXIO on absent or invalid register target address space.
2378 */ 2381 */
@@ -2387,6 +2390,11 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
2387 2390
2388 _save_mpu_port_index(oh); 2391 _save_mpu_port_index(oh);
2389 2392
2393 /* if we don't need sysc access we don't need to ioremap */
2394 if (!oh->class->sysc)
2395 return 0;
2396
2397 /* we can't continue without MPU PORT if we need sysc access */
2390 if (oh->_int_flags & _HWMOD_NO_MPU_PORT) 2398 if (oh->_int_flags & _HWMOD_NO_MPU_PORT)
2391 return -ENXIO; 2399 return -ENXIO;
2392 2400
@@ -2396,8 +2404,10 @@ static int __init _init_mpu_rt_base(struct omap_hwmod *oh, void *data,
2396 oh->name); 2404 oh->name);
2397 2405
2398 /* Extract the IO space from device tree blob */ 2406 /* Extract the IO space from device tree blob */
2399 if (!np) 2407 if (!np) {
2408 pr_err("omap_hwmod: %s: no dt node\n", oh->name);
2400 return -ENXIO; 2409 return -ENXIO;
2410 }
2401 2411
2402 va_start = of_iomap(np, index + oh->mpu_rt_idx); 2412 va_start = of_iomap(np, index + oh->mpu_rt_idx);
2403 } else { 2413 } else {
@@ -2456,13 +2466,11 @@ static int __init _init(struct omap_hwmod *oh, void *data)
2456 oh->name, np->name); 2466 oh->name, np->name);
2457 } 2467 }
2458 2468
2459 if (oh->class->sysc) { 2469 r = _init_mpu_rt_base(oh, NULL, index, np);
2460 r = _init_mpu_rt_base(oh, NULL, index, np); 2470 if (r < 0) {
2461 if (r < 0) { 2471 WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n",
2462 WARN(1, "omap_hwmod: %s: doesn't have mpu register target base\n", 2472 oh->name);
2463 oh->name); 2473 return 0;
2464 return 0;
2465 }
2466 } 2474 }
2467 2475
2468 r = _init_clocks(oh, NULL); 2476 r = _init_clocks(oh, NULL);
diff --git a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
index 2606c6608bd8..562247bced49 100644
--- a/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_7xx_data.c
@@ -827,8 +827,7 @@ static struct omap_hwmod_class_sysconfig dra7xx_gpmc_sysc = {
827 .syss_offs = 0x0014, 827 .syss_offs = 0x0014,
828 .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE | 828 .sysc_flags = (SYSC_HAS_AUTOIDLE | SYSC_HAS_SIDLEMODE |
829 SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS), 829 SYSC_HAS_SOFTRESET | SYSS_HAS_RESET_STATUS),
830 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | 830 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART),
831 SIDLE_SMART_WKUP),
832 .sysc_fields = &omap_hwmod_sysc_type1, 831 .sysc_fields = &omap_hwmod_sysc_type1,
833}; 832};
834 833
@@ -844,7 +843,7 @@ static struct omap_hwmod dra7xx_gpmc_hwmod = {
844 .class = &dra7xx_gpmc_hwmod_class, 843 .class = &dra7xx_gpmc_hwmod_class,
845 .clkdm_name = "l3main1_clkdm", 844 .clkdm_name = "l3main1_clkdm",
846 /* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */ 845 /* Skip reset for CONFIG_OMAP_GPMC_DEBUG for bootloader timings */
847 .flags = HWMOD_SWSUP_SIDLE | DEBUG_OMAP_GPMC_HWMOD_FLAGS, 846 .flags = DEBUG_OMAP_GPMC_HWMOD_FLAGS,
848 .main_clk = "l3_iclk_div", 847 .main_clk = "l3_iclk_div",
849 .prcm = { 848 .prcm = {
850 .omap4 = { 849 .omap4 = {
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 4550d247e308..c011e2296cb1 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -74,32 +74,52 @@ struct jit_ctx {
74 74
75int bpf_jit_enable __read_mostly; 75int bpf_jit_enable __read_mostly;
76 76
77static u64 jit_get_skb_b(struct sk_buff *skb, unsigned offset) 77static inline int call_neg_helper(struct sk_buff *skb, int offset, void *ret,
78 unsigned int size)
79{
80 void *ptr = bpf_internal_load_pointer_neg_helper(skb, offset, size);
81
82 if (!ptr)
83 return -EFAULT;
84 memcpy(ret, ptr, size);
85 return 0;
86}
87
88static u64 jit_get_skb_b(struct sk_buff *skb, int offset)
78{ 89{
79 u8 ret; 90 u8 ret;
80 int err; 91 int err;
81 92
82 err = skb_copy_bits(skb, offset, &ret, 1); 93 if (offset < 0)
94 err = call_neg_helper(skb, offset, &ret, 1);
95 else
96 err = skb_copy_bits(skb, offset, &ret, 1);
83 97
84 return (u64)err << 32 | ret; 98 return (u64)err << 32 | ret;
85} 99}
86 100
87static u64 jit_get_skb_h(struct sk_buff *skb, unsigned offset) 101static u64 jit_get_skb_h(struct sk_buff *skb, int offset)
88{ 102{
89 u16 ret; 103 u16 ret;
90 int err; 104 int err;
91 105
92 err = skb_copy_bits(skb, offset, &ret, 2); 106 if (offset < 0)
107 err = call_neg_helper(skb, offset, &ret, 2);
108 else
109 err = skb_copy_bits(skb, offset, &ret, 2);
93 110
94 return (u64)err << 32 | ntohs(ret); 111 return (u64)err << 32 | ntohs(ret);
95} 112}
96 113
97static u64 jit_get_skb_w(struct sk_buff *skb, unsigned offset) 114static u64 jit_get_skb_w(struct sk_buff *skb, int offset)
98{ 115{
99 u32 ret; 116 u32 ret;
100 int err; 117 int err;
101 118
102 err = skb_copy_bits(skb, offset, &ret, 4); 119 if (offset < 0)
120 err = call_neg_helper(skb, offset, &ret, 4);
121 else
122 err = skb_copy_bits(skb, offset, &ret, 4);
103 123
104 return (u64)err << 32 | ntohl(ret); 124 return (u64)err << 32 | ntohl(ret);
105} 125}
@@ -536,9 +556,6 @@ static int build_body(struct jit_ctx *ctx)
536 case BPF_LD | BPF_B | BPF_ABS: 556 case BPF_LD | BPF_B | BPF_ABS:
537 load_order = 0; 557 load_order = 0;
538load: 558load:
539 /* the interpreter will deal with the negative K */
540 if ((int)k < 0)
541 return -ENOTSUPP;
542 emit_mov_i(r_off, k, ctx); 559 emit_mov_i(r_off, k, ctx);
543load_common: 560load_common:
544 ctx->seen |= SEEN_DATA | SEEN_CALL; 561 ctx->seen |= SEEN_DATA | SEEN_CALL;
@@ -547,12 +564,24 @@ load_common:
547 emit(ARM_SUB_I(r_scratch, r_skb_hl, 564 emit(ARM_SUB_I(r_scratch, r_skb_hl,
548 1 << load_order), ctx); 565 1 << load_order), ctx);
549 emit(ARM_CMP_R(r_scratch, r_off), ctx); 566 emit(ARM_CMP_R(r_scratch, r_off), ctx);
550 condt = ARM_COND_HS; 567 condt = ARM_COND_GE;
551 } else { 568 } else {
552 emit(ARM_CMP_R(r_skb_hl, r_off), ctx); 569 emit(ARM_CMP_R(r_skb_hl, r_off), ctx);
553 condt = ARM_COND_HI; 570 condt = ARM_COND_HI;
554 } 571 }
555 572
573 /*
574 * test for negative offset, only if we are
575 * currently scheduled to take the fast
576 * path. this will update the flags so that
577 * the slowpath instruction are ignored if the
578 * offset is negative.
579 *
580 * for loard_order == 0 the HI condition will
581 * make loads at offset 0 take the slow path too.
582 */
583 _emit(condt, ARM_CMP_I(r_off, 0), ctx);
584
556 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data), 585 _emit(condt, ARM_ADD_R(r_scratch, r_off, r_skb_data),
557 ctx); 586 ctx);
558 587
@@ -860,9 +889,11 @@ b_epilogue:
860 off = offsetof(struct sk_buff, vlan_tci); 889 off = offsetof(struct sk_buff, vlan_tci);
861 emit(ARM_LDRH_I(r_A, r_skb, off), ctx); 890 emit(ARM_LDRH_I(r_A, r_skb, off), ctx);
862 if (code == (BPF_ANC | SKF_AD_VLAN_TAG)) 891 if (code == (BPF_ANC | SKF_AD_VLAN_TAG))
863 OP_IMM3(ARM_AND, r_A, r_A, VLAN_VID_MASK, ctx); 892 OP_IMM3(ARM_AND, r_A, r_A, ~VLAN_TAG_PRESENT, ctx);
864 else 893 else {
865 OP_IMM3(ARM_AND, r_A, r_A, VLAN_TAG_PRESENT, ctx); 894 OP_IMM3(ARM_LSR, r_A, r_A, 12, ctx);
895 OP_IMM3(ARM_AND, r_A, r_A, 0x1, ctx);
896 }
866 break; 897 break;
867 case BPF_ANC | SKF_AD_QUEUE: 898 case BPF_ANC | SKF_AD_QUEUE:
868 ctx->seen |= SEEN_SKB; 899 ctx->seen |= SEEN_SKB;
diff --git a/arch/arm/vdso/Makefile b/arch/arm/vdso/Makefile
index 9d259d94e429..1160434eece0 100644
--- a/arch/arm/vdso/Makefile
+++ b/arch/arm/vdso/Makefile
@@ -14,7 +14,7 @@ VDSO_LDFLAGS += -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096
14VDSO_LDFLAGS += -nostdlib -shared 14VDSO_LDFLAGS += -nostdlib -shared
15VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) 15VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
16VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id) 16VDSO_LDFLAGS += $(call cc-ldoption, -Wl$(comma)--build-id)
17VDSO_LDFLAGS += $(call cc-option, -fuse-ld=bfd) 17VDSO_LDFLAGS += $(call cc-ldoption, -fuse-ld=bfd)
18 18
19obj-$(CONFIG_VDSO) += vdso.o 19obj-$(CONFIG_VDSO) += vdso.o
20extra-$(CONFIG_VDSO) += vdso.lds 20extra-$(CONFIG_VDSO) += vdso.lds
diff --git a/arch/arm64/boot/dts/apm/apm-storm.dtsi b/arch/arm64/boot/dts/apm/apm-storm.dtsi
index 0689c3fb56e3..58093edeea2e 100644
--- a/arch/arm64/boot/dts/apm/apm-storm.dtsi
+++ b/arch/arm64/boot/dts/apm/apm-storm.dtsi
@@ -823,7 +823,7 @@
823 device_type = "dma"; 823 device_type = "dma";
824 reg = <0x0 0x1f270000 0x0 0x10000>, 824 reg = <0x0 0x1f270000 0x0 0x10000>,
825 <0x0 0x1f200000 0x0 0x10000>, 825 <0x0 0x1f200000 0x0 0x10000>,
826 <0x0 0x1b008000 0x0 0x2000>, 826 <0x0 0x1b000000 0x0 0x400000>,
827 <0x0 0x1054a000 0x0 0x100>; 827 <0x0 0x1054a000 0x0 0x100>;
828 interrupts = <0x0 0x82 0x4>, 828 interrupts = <0x0 0x82 0x4>,
829 <0x0 0xb8 0x4>, 829 <0x0 0xb8 0x4>,
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index 9d4aa18f2a82..e8ca6eaedd02 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -122,12 +122,12 @@ static int __init uefi_init(void)
122 122
123 /* Show what we know for posterity */ 123 /* Show what we know for posterity */
124 c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor), 124 c16 = early_memremap(efi_to_phys(efi.systab->fw_vendor),
125 sizeof(vendor)); 125 sizeof(vendor) * sizeof(efi_char16_t));
126 if (c16) { 126 if (c16) {
127 for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i) 127 for (i = 0; i < (int) sizeof(vendor) - 1 && *c16; ++i)
128 vendor[i] = c16[i]; 128 vendor[i] = c16[i];
129 vendor[i] = '\0'; 129 vendor[i] = '\0';
130 early_memunmap(c16, sizeof(vendor)); 130 early_memunmap(c16, sizeof(vendor) * sizeof(efi_char16_t));
131 } 131 }
132 132
133 pr_info("EFI v%u.%.02u by %s\n", 133 pr_info("EFI v%u.%.02u by %s\n",
diff --git a/arch/arm64/kernel/entry.S b/arch/arm64/kernel/entry.S
index f860bfda454a..e16351819fed 100644
--- a/arch/arm64/kernel/entry.S
+++ b/arch/arm64/kernel/entry.S
@@ -585,7 +585,8 @@ ENDPROC(el0_irq)
585 * 585 *
586 */ 586 */
587ENTRY(cpu_switch_to) 587ENTRY(cpu_switch_to)
588 add x8, x0, #THREAD_CPU_CONTEXT 588 mov x10, #THREAD_CPU_CONTEXT
589 add x8, x0, x10
589 mov x9, sp 590 mov x9, sp
590 stp x19, x20, [x8], #16 // store callee-saved registers 591 stp x19, x20, [x8], #16 // store callee-saved registers
591 stp x21, x22, [x8], #16 592 stp x21, x22, [x8], #16
@@ -594,7 +595,7 @@ ENTRY(cpu_switch_to)
594 stp x27, x28, [x8], #16 595 stp x27, x28, [x8], #16
595 stp x29, x9, [x8], #16 596 stp x29, x9, [x8], #16
596 str lr, [x8] 597 str lr, [x8]
597 add x8, x1, #THREAD_CPU_CONTEXT 598 add x8, x1, x10
598 ldp x19, x20, [x8], #16 // restore callee-saved registers 599 ldp x19, x20, [x8], #16 // restore callee-saved registers
599 ldp x21, x22, [x8], #16 600 ldp x21, x22, [x8], #16
600 ldp x23, x24, [x8], #16 601 ldp x23, x24, [x8], #16
diff --git a/arch/arm64/kernel/irq.c b/arch/arm64/kernel/irq.c
index 240b75c0e94f..463fa2e7e34c 100644
--- a/arch/arm64/kernel/irq.c
+++ b/arch/arm64/kernel/irq.c
@@ -61,7 +61,7 @@ void __init init_IRQ(void)
61static bool migrate_one_irq(struct irq_desc *desc) 61static bool migrate_one_irq(struct irq_desc *desc)
62{ 62{
63 struct irq_data *d = irq_desc_get_irq_data(desc); 63 struct irq_data *d = irq_desc_get_irq_data(desc);
64 const struct cpumask *affinity = d->affinity; 64 const struct cpumask *affinity = irq_data_get_affinity_mask(d);
65 struct irq_chip *c; 65 struct irq_chip *c;
66 bool ret = false; 66 bool ret = false;
67 67
@@ -81,7 +81,7 @@ static bool migrate_one_irq(struct irq_desc *desc)
81 if (!c->irq_set_affinity) 81 if (!c->irq_set_affinity)
82 pr_debug("IRQ%u: unable to set affinity\n", d->irq); 82 pr_debug("IRQ%u: unable to set affinity\n", d->irq);
83 else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret) 83 else if (c->irq_set_affinity(d, affinity, false) == IRQ_SET_MASK_OK && ret)
84 cpumask_copy(d->affinity, affinity); 84 cpumask_copy(irq_data_get_affinity_mask(d), affinity);
85 85
86 return ret; 86 return ret;
87} 87}
diff --git a/arch/arm64/kernel/signal32.c b/arch/arm64/kernel/signal32.c
index 1670f15ef69e..948f0ad2de23 100644
--- a/arch/arm64/kernel/signal32.c
+++ b/arch/arm64/kernel/signal32.c
@@ -168,7 +168,8 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
168 * Other callers might not initialize the si_lsb field, 168 * Other callers might not initialize the si_lsb field,
169 * so check explicitely for the right codes here. 169 * so check explicitely for the right codes here.
170 */ 170 */
171 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) 171 if (from->si_signo == SIGBUS &&
172 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
172 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); 173 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
173#endif 174#endif
174 break; 175 break;
@@ -201,8 +202,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
201 202
202int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) 203int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
203{ 204{
204 memset(to, 0, sizeof *to);
205
206 if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) || 205 if (copy_from_user(to, from, __ARCH_SI_PREAMBLE_SIZE) ||
207 copy_from_user(to->_sifields._pad, 206 copy_from_user(to->_sifields._pad,
208 from->_sifields._pad, SI_PAD_SIZE)) 207 from->_sifields._pad, SI_PAD_SIZE))
diff --git a/arch/arm64/kernel/vdso.c b/arch/arm64/kernel/vdso.c
index ec37ab3f524f..97bc68f4c689 100644
--- a/arch/arm64/kernel/vdso.c
+++ b/arch/arm64/kernel/vdso.c
@@ -199,16 +199,15 @@ up_fail:
199 */ 199 */
200void update_vsyscall(struct timekeeper *tk) 200void update_vsyscall(struct timekeeper *tk)
201{ 201{
202 struct timespec xtime_coarse;
203 u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter"); 202 u32 use_syscall = strcmp(tk->tkr_mono.clock->name, "arch_sys_counter");
204 203
205 ++vdso_data->tb_seq_count; 204 ++vdso_data->tb_seq_count;
206 smp_wmb(); 205 smp_wmb();
207 206
208 xtime_coarse = __current_kernel_time();
209 vdso_data->use_syscall = use_syscall; 207 vdso_data->use_syscall = use_syscall;
210 vdso_data->xtime_coarse_sec = xtime_coarse.tv_sec; 208 vdso_data->xtime_coarse_sec = tk->xtime_sec;
211 vdso_data->xtime_coarse_nsec = xtime_coarse.tv_nsec; 209 vdso_data->xtime_coarse_nsec = tk->tkr_mono.xtime_nsec >>
210 tk->tkr_mono.shift;
212 vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec; 211 vdso_data->wtm_clock_sec = tk->wall_to_monotonic.tv_sec;
213 vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec; 212 vdso_data->wtm_clock_nsec = tk->wall_to_monotonic.tv_nsec;
214 213
diff --git a/arch/avr32/kernel/time.c b/arch/avr32/kernel/time.c
index d0f771be9e96..a124c55733db 100644
--- a/arch/avr32/kernel/time.c
+++ b/arch/avr32/kernel/time.c
@@ -18,6 +18,7 @@
18 18
19#include <mach/pm.h> 19#include <mach/pm.h>
20 20
21static bool disable_cpu_idle_poll;
21 22
22static cycle_t read_cycle_count(struct clocksource *cs) 23static cycle_t read_cycle_count(struct clocksource *cs)
23{ 24{
@@ -80,45 +81,45 @@ static int comparator_next_event(unsigned long delta,
80 return 0; 81 return 0;
81} 82}
82 83
83static void comparator_mode(enum clock_event_mode mode, 84static int comparator_shutdown(struct clock_event_device *evdev)
84 struct clock_event_device *evdev)
85{ 85{
86 switch (mode) { 86 pr_debug("%s: %s\n", __func__, evdev->name);
87 case CLOCK_EVT_MODE_ONESHOT: 87 sysreg_write(COMPARE, 0);
88 pr_debug("%s: start\n", evdev->name); 88
89 /* FALLTHROUGH */ 89 if (disable_cpu_idle_poll) {
90 case CLOCK_EVT_MODE_RESUME: 90 disable_cpu_idle_poll = false;
91 /* 91 /*
92 * If we're using the COUNT and COMPARE registers we 92 * Only disable idle poll if we have forced that
93 * need to force idle poll. 93 * in a previous call.
94 */ 94 */
95 cpu_idle_poll_ctrl(true); 95 cpu_idle_poll_ctrl(false);
96 break;
97 case CLOCK_EVT_MODE_UNUSED:
98 case CLOCK_EVT_MODE_SHUTDOWN:
99 sysreg_write(COMPARE, 0);
100 pr_debug("%s: stop\n", evdev->name);
101 if (evdev->mode == CLOCK_EVT_MODE_ONESHOT ||
102 evdev->mode == CLOCK_EVT_MODE_RESUME) {
103 /*
104 * Only disable idle poll if we have forced that
105 * in a previous call.
106 */
107 cpu_idle_poll_ctrl(false);
108 }
109 break;
110 default:
111 BUG();
112 } 96 }
97 return 0;
98}
99
100static int comparator_set_oneshot(struct clock_event_device *evdev)
101{
102 pr_debug("%s: %s\n", __func__, evdev->name);
103
104 disable_cpu_idle_poll = true;
105 /*
106 * If we're using the COUNT and COMPARE registers we
107 * need to force idle poll.
108 */
109 cpu_idle_poll_ctrl(true);
110
111 return 0;
113} 112}
114 113
115static struct clock_event_device comparator = { 114static struct clock_event_device comparator = {
116 .name = "avr32_comparator", 115 .name = "avr32_comparator",
117 .features = CLOCK_EVT_FEAT_ONESHOT, 116 .features = CLOCK_EVT_FEAT_ONESHOT,
118 .shift = 16, 117 .shift = 16,
119 .rating = 50, 118 .rating = 50,
120 .set_next_event = comparator_next_event, 119 .set_next_event = comparator_next_event,
121 .set_mode = comparator_mode, 120 .set_state_shutdown = comparator_shutdown,
121 .set_state_oneshot = comparator_set_oneshot,
122 .tick_resume = comparator_set_oneshot,
122}; 123};
123 124
124void read_persistent_clock(struct timespec *ts) 125void read_persistent_clock(struct timespec *ts)
diff --git a/arch/avr32/mach-at32ap/clock.c b/arch/avr32/mach-at32ap/clock.c
index 23b1a97fae7a..52c179bec0cc 100644
--- a/arch/avr32/mach-at32ap/clock.c
+++ b/arch/avr32/mach-at32ap/clock.c
@@ -80,6 +80,9 @@ int clk_enable(struct clk *clk)
80{ 80{
81 unsigned long flags; 81 unsigned long flags;
82 82
83 if (!clk)
84 return 0;
85
83 spin_lock_irqsave(&clk_lock, flags); 86 spin_lock_irqsave(&clk_lock, flags);
84 __clk_enable(clk); 87 __clk_enable(clk);
85 spin_unlock_irqrestore(&clk_lock, flags); 88 spin_unlock_irqrestore(&clk_lock, flags);
@@ -106,6 +109,9 @@ void clk_disable(struct clk *clk)
106{ 109{
107 unsigned long flags; 110 unsigned long flags;
108 111
112 if (IS_ERR_OR_NULL(clk))
113 return;
114
109 spin_lock_irqsave(&clk_lock, flags); 115 spin_lock_irqsave(&clk_lock, flags);
110 __clk_disable(clk); 116 __clk_disable(clk);
111 spin_unlock_irqrestore(&clk_lock, flags); 117 spin_unlock_irqrestore(&clk_lock, flags);
@@ -117,6 +123,9 @@ unsigned long clk_get_rate(struct clk *clk)
117 unsigned long flags; 123 unsigned long flags;
118 unsigned long rate; 124 unsigned long rate;
119 125
126 if (!clk)
127 return 0;
128
120 spin_lock_irqsave(&clk_lock, flags); 129 spin_lock_irqsave(&clk_lock, flags);
121 rate = clk->get_rate(clk); 130 rate = clk->get_rate(clk);
122 spin_unlock_irqrestore(&clk_lock, flags); 131 spin_unlock_irqrestore(&clk_lock, flags);
@@ -129,6 +138,9 @@ long clk_round_rate(struct clk *clk, unsigned long rate)
129{ 138{
130 unsigned long flags, actual_rate; 139 unsigned long flags, actual_rate;
131 140
141 if (!clk)
142 return 0;
143
132 if (!clk->set_rate) 144 if (!clk->set_rate)
133 return -ENOSYS; 145 return -ENOSYS;
134 146
@@ -145,6 +157,9 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
145 unsigned long flags; 157 unsigned long flags;
146 long ret; 158 long ret;
147 159
160 if (!clk)
161 return 0;
162
148 if (!clk->set_rate) 163 if (!clk->set_rate)
149 return -ENOSYS; 164 return -ENOSYS;
150 165
@@ -161,6 +176,9 @@ int clk_set_parent(struct clk *clk, struct clk *parent)
161 unsigned long flags; 176 unsigned long flags;
162 int ret; 177 int ret;
163 178
179 if (!clk)
180 return 0;
181
164 if (!clk->set_parent) 182 if (!clk->set_parent)
165 return -ENOSYS; 183 return -ENOSYS;
166 184
@@ -174,7 +192,7 @@ EXPORT_SYMBOL(clk_set_parent);
174 192
175struct clk *clk_get_parent(struct clk *clk) 193struct clk *clk_get_parent(struct clk *clk)
176{ 194{
177 return clk->parent; 195 return !clk ? NULL : clk->parent;
178} 196}
179EXPORT_SYMBOL(clk_get_parent); 197EXPORT_SYMBOL(clk_get_parent);
180 198
diff --git a/arch/m32r/include/asm/io.h b/arch/m32r/include/asm/io.h
index 0c3f25ee3381..f8de767ce2bc 100644
--- a/arch/m32r/include/asm/io.h
+++ b/arch/m32r/include/asm/io.h
@@ -174,6 +174,11 @@ static inline void _writel(unsigned long l, unsigned long addr)
174#define iowrite16 writew 174#define iowrite16 writew
175#define iowrite32 writel 175#define iowrite32 writel
176 176
177#define ioread16be(addr) be16_to_cpu(readw(addr))
178#define ioread32be(addr) be32_to_cpu(readl(addr))
179#define iowrite16be(v, addr) writew(cpu_to_be16(v), (addr))
180#define iowrite32be(v, addr) writel(cpu_to_be32(v), (addr))
181
177#define mmiowb() 182#define mmiowb()
178 183
179#define flush_write_buffers() do { } while (0) /* M32R_FIXME */ 184#define flush_write_buffers() do { } while (0) /* M32R_FIXME */
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index cee5f93e5712..199a8357838c 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -151,7 +151,6 @@ config BMIPS_GENERIC
151 select BCM7120_L2_IRQ 151 select BCM7120_L2_IRQ
152 select BRCMSTB_L2_IRQ 152 select BRCMSTB_L2_IRQ
153 select IRQ_MIPS_CPU 153 select IRQ_MIPS_CPU
154 select RAW_IRQ_ACCESSORS
155 select DMA_NONCOHERENT 154 select DMA_NONCOHERENT
156 select SYS_SUPPORTS_32BIT_KERNEL 155 select SYS_SUPPORTS_32BIT_KERNEL
157 select SYS_SUPPORTS_LITTLE_ENDIAN 156 select SYS_SUPPORTS_LITTLE_ENDIAN
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 01a644f174dd..1ba21204ebe0 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -190,6 +190,7 @@ int get_c0_perfcount_int(void)
190{ 190{
191 return ATH79_MISC_IRQ(5); 191 return ATH79_MISC_IRQ(5);
192} 192}
193EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
193 194
194unsigned int get_c0_compare_int(void) 195unsigned int get_c0_compare_int(void)
195{ 196{
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 56f5d080ef9d..b7fa9ae28c36 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -42,7 +42,7 @@ static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
42 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action); 42 cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
43 43
44 if (action & SMP_CALL_FUNCTION) 44 if (action & SMP_CALL_FUNCTION)
45 smp_call_function_interrupt(); 45 generic_smp_call_function_interrupt();
46 if (action & SMP_RESCHEDULE_YOURSELF) 46 if (action & SMP_RESCHEDULE_YOURSELF)
47 scheduler_ipi(); 47 scheduler_ipi();
48 48
diff --git a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h b/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
deleted file mode 100644
index 11d3b572b1b3..000000000000
--- a/arch/mips/include/asm/mach-bcm63xx/dma-coherence.h
+++ /dev/null
@@ -1,10 +0,0 @@
1#ifndef __ASM_MACH_BCM63XX_DMA_COHERENCE_H
2#define __ASM_MACH_BCM63XX_DMA_COHERENCE_H
3
4#include <asm/bmips.h>
5
6#define plat_post_dma_flush bmips_post_dma_flush
7
8#include <asm/mach-generic/dma-coherence.h>
9
10#endif /* __ASM_MACH_BCM63XX_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 9d8106758142..ae8569475264 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -182,8 +182,39 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
182 * Make sure the buddy is global too (if it's !none, 182 * Make sure the buddy is global too (if it's !none,
183 * it better already be global) 183 * it better already be global)
184 */ 184 */
185#ifdef CONFIG_SMP
186 /*
187 * For SMP, multiple CPUs can race, so we need to do
188 * this atomically.
189 */
190#ifdef CONFIG_64BIT
191#define LL_INSN "lld"
192#define SC_INSN "scd"
193#else /* CONFIG_32BIT */
194#define LL_INSN "ll"
195#define SC_INSN "sc"
196#endif
197 unsigned long page_global = _PAGE_GLOBAL;
198 unsigned long tmp;
199
200 __asm__ __volatile__ (
201 " .set push\n"
202 " .set noreorder\n"
203 "1: " LL_INSN " %[tmp], %[buddy]\n"
204 " bnez %[tmp], 2f\n"
205 " or %[tmp], %[tmp], %[global]\n"
206 " " SC_INSN " %[tmp], %[buddy]\n"
207 " beqz %[tmp], 1b\n"
208 " nop\n"
209 "2:\n"
210 " .set pop"
211 : [buddy] "+m" (buddy->pte),
212 [tmp] "=&r" (tmp)
213 : [global] "r" (page_global));
214#else /* !CONFIG_SMP */
185 if (pte_none(*buddy)) 215 if (pte_none(*buddy))
186 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL; 216 pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
217#endif /* CONFIG_SMP */
187 } 218 }
188#endif 219#endif
189} 220}
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 16f1ea9ab191..03722d4326a1 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -83,8 +83,6 @@ static inline void __cpu_die(unsigned int cpu)
83extern void play_dead(void); 83extern void play_dead(void);
84#endif 84#endif
85 85
86extern asmlinkage void smp_call_function_interrupt(void);
87
88static inline void arch_send_call_function_single_ipi(int cpu) 86static inline void arch_send_call_function_single_ipi(int cpu)
89{ 87{
90 extern struct plat_smp_ops *mp_ops; /* private */ 88 extern struct plat_smp_ops *mp_ops; /* private */
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index 28d6d9364bd1..a71da576883c 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -152,6 +152,31 @@
152 .set noreorder 152 .set noreorder
153 bltz k0, 8f 153 bltz k0, 8f
154 move k1, sp 154 move k1, sp
155#ifdef CONFIG_EVA
156 /*
157 * Flush interAptiv's Return Prediction Stack (RPS) by writing
158 * EntryHi. Toggling Config7.RPS is slower and less portable.
159 *
160 * The RPS isn't automatically flushed when exceptions are
161 * taken, which can result in kernel mode speculative accesses
162 * to user addresses if the RPS mispredicts. That's harmless
163 * when user and kernel share the same address space, but with
164 * EVA the same user segments may be unmapped to kernel mode,
165 * even containing sensitive MMIO regions or invalid memory.
166 *
167 * This can happen when the kernel sets the return address to
168 * ret_from_* and jr's to the exception handler, which looks
169 * more like a tail call than a function call. If nested calls
170 * don't evict the last user address in the RPS, it will
171 * mispredict the return and fetch from a user controlled
172 * address into the icache.
173 *
174 * More recent EVA-capable cores with MAAR to restrict
175 * speculative accesses aren't affected.
176 */
177 MFC0 k0, CP0_ENTRYHI
178 MTC0 k0, CP0_ENTRYHI
179#endif
155 .set reorder 180 .set reorder
156 /* Called from user mode, new stack. */ 181 /* Called from user mode, new stack. */
157 get_saved_sp 182 get_saved_sp
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
index 3e4491aa6d6b..789d7bf4fef3 100644
--- a/arch/mips/kernel/mips-mt-fpaff.c
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -154,7 +154,7 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
154 unsigned long __user *user_mask_ptr) 154 unsigned long __user *user_mask_ptr)
155{ 155{
156 unsigned int real_len; 156 unsigned int real_len;
157 cpumask_t mask; 157 cpumask_t allowed, mask;
158 int retval; 158 int retval;
159 struct task_struct *p; 159 struct task_struct *p;
160 160
@@ -173,7 +173,8 @@ asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
173 if (retval) 173 if (retval)
174 goto out_unlock; 174 goto out_unlock;
175 175
176 cpumask_and(&mask, &p->thread.user_cpus_allowed, cpu_possible_mask); 176 cpumask_or(&allowed, &p->thread.user_cpus_allowed, &p->cpus_allowed);
177 cpumask_and(&mask, &allowed, cpu_active_mask);
177 178
178out_unlock: 179out_unlock:
179 read_unlock(&tasklist_lock); 180 read_unlock(&tasklist_lock);
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index b130033838ba..5fcec3032f38 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -38,7 +38,7 @@ char *mips_get_machine_name(void)
38 return mips_machine_name; 38 return mips_machine_name;
39} 39}
40 40
41#ifdef CONFIG_OF 41#ifdef CONFIG_USE_OF
42void __init early_init_dt_add_memory_arch(u64 base, u64 size) 42void __init early_init_dt_add_memory_arch(u64 base, u64 size)
43{ 43{
44 return add_memory_region(base, size, BOOT_MEM_RAM); 44 return add_memory_region(base, size, BOOT_MEM_RAM);
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
index 74bab9ddd0e1..c6bbf2165051 100644
--- a/arch/mips/kernel/relocate_kernel.S
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -24,7 +24,7 @@ LEAF(relocate_new_kernel)
24 24
25process_entry: 25process_entry:
26 PTR_L s2, (s0) 26 PTR_L s2, (s0)
27 PTR_ADD s0, s0, SZREG 27 PTR_ADDIU s0, s0, SZREG
28 28
29 /* 29 /*
30 * In case of a kdump/crash kernel, the indirection page is not 30 * In case of a kdump/crash kernel, the indirection page is not
@@ -61,9 +61,9 @@ copy_word:
61 /* copy page word by word */ 61 /* copy page word by word */
62 REG_L s5, (s2) 62 REG_L s5, (s2)
63 REG_S s5, (s4) 63 REG_S s5, (s4)
64 PTR_ADD s4, s4, SZREG 64 PTR_ADDIU s4, s4, SZREG
65 PTR_ADD s2, s2, SZREG 65 PTR_ADDIU s2, s2, SZREG
66 LONG_SUB s6, s6, 1 66 LONG_ADDIU s6, s6, -1
67 beq s6, zero, process_entry 67 beq s6, zero, process_entry
68 b copy_word 68 b copy_word
69 b process_entry 69 b process_entry
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index ad4d44635c76..a6f6b762c47a 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -80,7 +80,7 @@ syscall_trace_entry:
80 SAVE_STATIC 80 SAVE_STATIC
81 move s0, t2 81 move s0, t2
82 move a0, sp 82 move a0, sp
83 daddiu a1, v0, __NR_64_Linux 83 move a1, v0
84 jal syscall_trace_enter 84 jal syscall_trace_enter
85 85
86 bltz v0, 2f # seccomp failed? Skip syscall 86 bltz v0, 2f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 446cc654da56..4b2010654c46 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -72,7 +72,7 @@ n32_syscall_trace_entry:
72 SAVE_STATIC 72 SAVE_STATIC
73 move s0, t2 73 move s0, t2
74 move a0, sp 74 move a0, sp
75 daddiu a1, v0, __NR_N32_Linux 75 move a1, v0
76 jal syscall_trace_enter 76 jal syscall_trace_enter
77 77
78 bltz v0, 2f # seccomp failed? Skip syscall 78 bltz v0, 2f # seccomp failed? Skip syscall
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 19a7705f2a01..5d7f2634996f 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -409,8 +409,6 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
409 409
410int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from) 410int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
411{ 411{
412 memset(to, 0, sizeof *to);
413
414 if (copy_from_user(to, from, 3*sizeof(int)) || 412 if (copy_from_user(to, from, 3*sizeof(int)) ||
415 copy_from_user(to->_sifields._pad, 413 copy_from_user(to->_sifields._pad,
416 from->_sifields._pad, SI_PAD_SIZE32)) 414 from->_sifields._pad, SI_PAD_SIZE32))
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 336708ae5c5b..78cf8c2f1de0 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -284,7 +284,7 @@ static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id)
284 if (action == 0) 284 if (action == 0)
285 scheduler_ipi(); 285 scheduler_ipi();
286 else 286 else
287 smp_call_function_interrupt(); 287 generic_smp_call_function_interrupt();
288 288
289 return IRQ_HANDLED; 289 return IRQ_HANDLED;
290} 290}
@@ -336,7 +336,7 @@ static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
336 if (action & SMP_RESCHEDULE_YOURSELF) 336 if (action & SMP_RESCHEDULE_YOURSELF)
337 scheduler_ipi(); 337 scheduler_ipi();
338 if (action & SMP_CALL_FUNCTION) 338 if (action & SMP_CALL_FUNCTION)
339 smp_call_function_interrupt(); 339 generic_smp_call_function_interrupt();
340 340
341 return IRQ_HANDLED; 341 return IRQ_HANDLED;
342} 342}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index d0744cc77ea7..a31896c33716 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -192,16 +192,6 @@ asmlinkage void start_secondary(void)
192 cpu_startup_entry(CPUHP_ONLINE); 192 cpu_startup_entry(CPUHP_ONLINE);
193} 193}
194 194
195/*
196 * Call into both interrupt handlers, as we share the IPI for them
197 */
198void __irq_entry smp_call_function_interrupt(void)
199{
200 irq_enter();
201 generic_smp_call_function_interrupt();
202 irq_exit();
203}
204
205static void stop_this_cpu(void *dummy) 195static void stop_this_cpu(void *dummy)
206{ 196{
207 /* 197 /*
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index e207a43b5f8f..8ea28e6ab37d 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -192,6 +192,7 @@ static void show_stacktrace(struct task_struct *task,
192void show_stack(struct task_struct *task, unsigned long *sp) 192void show_stack(struct task_struct *task, unsigned long *sp)
193{ 193{
194 struct pt_regs regs; 194 struct pt_regs regs;
195 mm_segment_t old_fs = get_fs();
195 if (sp) { 196 if (sp) {
196 regs.regs[29] = (unsigned long)sp; 197 regs.regs[29] = (unsigned long)sp;
197 regs.regs[31] = 0; 198 regs.regs[31] = 0;
@@ -210,7 +211,13 @@ void show_stack(struct task_struct *task, unsigned long *sp)
210 prepare_frametrace(&regs); 211 prepare_frametrace(&regs);
211 } 212 }
212 } 213 }
214 /*
215 * show_stack() deals exclusively with kernel mode, so be sure to access
216 * the stack in the kernel (not user) address space.
217 */
218 set_fs(KERNEL_DS);
213 show_stacktrace(task, &regs); 219 show_stacktrace(task, &regs);
220 set_fs(old_fs);
214} 221}
215 222
216static void show_code(unsigned int __user *pc) 223static void show_code(unsigned int __user *pc)
@@ -1519,6 +1526,7 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
1519 const int field = 2 * sizeof(unsigned long); 1526 const int field = 2 * sizeof(unsigned long);
1520 int multi_match = regs->cp0_status & ST0_TS; 1527 int multi_match = regs->cp0_status & ST0_TS;
1521 enum ctx_state prev_state; 1528 enum ctx_state prev_state;
1529 mm_segment_t old_fs = get_fs();
1522 1530
1523 prev_state = exception_enter(); 1531 prev_state = exception_enter();
1524 show_regs(regs); 1532 show_regs(regs);
@@ -1540,8 +1548,13 @@ asmlinkage void do_mcheck(struct pt_regs *regs)
1540 dump_tlb_all(); 1548 dump_tlb_all();
1541 } 1549 }
1542 1550
1551 if (!user_mode(regs))
1552 set_fs(KERNEL_DS);
1553
1543 show_code((unsigned int __user *) regs->cp0_epc); 1554 show_code((unsigned int __user *) regs->cp0_epc);
1544 1555
1556 set_fs(old_fs);
1557
1545 /* 1558 /*
1546 * Some chips may have other causes of machine check (e.g. SB1 1559 * Some chips may have other causes of machine check (e.g. SB1
1547 * graduation timer) 1560 * graduation timer)
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index af84bef0c90d..eb3efd137fd1 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -438,7 +438,7 @@ do { \
438 : "memory"); \ 438 : "memory"); \
439} while(0) 439} while(0)
440 440
441#define StoreDW(addr, value, res) \ 441#define _StoreDW(addr, value, res) \
442do { \ 442do { \
443 __asm__ __volatile__ ( \ 443 __asm__ __volatile__ ( \
444 ".set\tpush\n\t" \ 444 ".set\tpush\n\t" \
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 6ab10573490d..2c218c3bbca5 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -293,7 +293,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
293 293
294static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 294static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
295{ 295{
296 smp_call_function_interrupt(); 296 generic_smp_call_function_interrupt();
297 return IRQ_HANDLED; 297 return IRQ_HANDLED;
298} 298}
299 299
@@ -466,6 +466,7 @@ int get_c0_perfcount_int(void)
466{ 466{
467 return ltq_perfcount_irq; 467 return ltq_perfcount_irq;
468} 468}
469EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
469 470
470unsigned int get_c0_compare_int(void) 471unsigned int get_c0_compare_int(void)
471{ 472{
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
index 509877c6e9d9..1a4738a8f2d3 100644
--- a/arch/mips/loongson64/loongson-3/smp.c
+++ b/arch/mips/loongson64/loongson-3/smp.c
@@ -266,8 +266,11 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
266 if (action & SMP_RESCHEDULE_YOURSELF) 266 if (action & SMP_RESCHEDULE_YOURSELF)
267 scheduler_ipi(); 267 scheduler_ipi();
268 268
269 if (action & SMP_CALL_FUNCTION) 269 if (action & SMP_CALL_FUNCTION) {
270 smp_call_function_interrupt(); 270 irq_enter();
271 generic_smp_call_function_interrupt();
272 irq_exit();
273 }
271 274
272 if (action & SMP_ASK_C0COUNT) { 275 if (action & SMP_ASK_C0COUNT) {
273 BUG_ON(cpu != 0); 276 BUG_ON(cpu != 0);
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 77d96db8253c..aab218c36e0d 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -160,18 +160,18 @@ static inline void setup_protection_map(void)
160 protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); 160 protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
161 protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); 161 protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
162 protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); 162 protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
163 protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); 163 protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
164 protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 164 protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
165 protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); 165 protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
166 protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 166 protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
167 167
168 protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); 168 protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ);
169 protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); 169 protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC);
170 protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ); 170 protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ);
171 protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); 171 protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE);
172 protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_READ); 172 protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
173 protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT); 173 protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT);
174 protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE | _PAGE_NO_READ); 174 protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
175 protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); 175 protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE);
176 176
177 } else { 177 } else {
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 36c0f26fac6b..852a41c6da45 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -133,7 +133,8 @@ good_area:
133#endif 133#endif
134 goto bad_area; 134 goto bad_area;
135 } 135 }
136 if (!(vma->vm_flags & VM_READ)) { 136 if (!(vma->vm_flags & VM_READ) &&
137 exception_epc(regs) != address) {
137#if 0 138#if 0
138 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n", 139 pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n",
139 raw_smp_processor_id(), 140 raw_smp_processor_id(),
diff --git a/arch/mips/mti-malta/malta-int.c b/arch/mips/mti-malta/malta-int.c
index d1392f8f5811..fa8f591f3713 100644
--- a/arch/mips/mti-malta/malta-int.c
+++ b/arch/mips/mti-malta/malta-int.c
@@ -222,7 +222,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
222 222
223static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 223static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
224{ 224{
225 smp_call_function_interrupt(); 225 generic_smp_call_function_interrupt();
226 226
227 return IRQ_HANDLED; 227 return IRQ_HANDLED;
228} 228}
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index 5625b190edc0..b7bf721eabf5 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -154,6 +154,7 @@ int get_c0_perfcount_int(void)
154 154
155 return mips_cpu_perf_irq; 155 return mips_cpu_perf_irq;
156} 156}
157EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
157 158
158unsigned int get_c0_compare_int(void) 159unsigned int get_c0_compare_int(void)
159{ 160{
@@ -171,14 +172,17 @@ unsigned int get_c0_compare_int(void)
171 172
172static void __init init_rtc(void) 173static void __init init_rtc(void)
173{ 174{
174 /* stop the clock whilst setting it up */ 175 unsigned char freq, ctrl;
175 CMOS_WRITE(RTC_SET | RTC_24H, RTC_CONTROL);
176 176
177 /* 32KHz time base */ 177 /* Set 32KHz time base if not already set */
178 CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT); 178 freq = CMOS_READ(RTC_FREQ_SELECT);
179 if ((freq & RTC_DIV_CTL) != RTC_REF_CLCK_32KHZ)
180 CMOS_WRITE(RTC_REF_CLCK_32KHZ, RTC_FREQ_SELECT);
179 181
180 /* start the clock */ 182 /* Ensure SET bit is clear so RTC can run */
181 CMOS_WRITE(RTC_24H, RTC_CONTROL); 183 ctrl = CMOS_READ(RTC_CONTROL);
184 if (ctrl & RTC_SET)
185 CMOS_WRITE(ctrl & ~RTC_SET, RTC_CONTROL);
182} 186}
183 187
184void __init plat_time_init(void) 188void __init plat_time_init(void)
diff --git a/arch/mips/mti-sead3/sead3-time.c b/arch/mips/mti-sead3/sead3-time.c
index e1d69895fb1d..a120b7a5a8fe 100644
--- a/arch/mips/mti-sead3/sead3-time.c
+++ b/arch/mips/mti-sead3/sead3-time.c
@@ -77,6 +77,7 @@ int get_c0_perfcount_int(void)
77 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 77 return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
78 return -1; 78 return -1;
79} 79}
80EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
80 81
81unsigned int get_c0_compare_int(void) 82unsigned int get_c0_compare_int(void)
82{ 83{
diff --git a/arch/mips/netlogic/common/smp.c b/arch/mips/netlogic/common/smp.c
index dc3e327fbbac..f5fff228b347 100644
--- a/arch/mips/netlogic/common/smp.c
+++ b/arch/mips/netlogic/common/smp.c
@@ -86,7 +86,7 @@ void nlm_smp_function_ipi_handler(unsigned int irq, struct irq_desc *desc)
86{ 86{
87 clear_c0_eimr(irq); 87 clear_c0_eimr(irq);
88 ack_c0_eirr(irq); 88 ack_c0_eirr(irq);
89 smp_call_function_interrupt(); 89 generic_smp_call_function_interrupt();
90 set_c0_eimr(irq); 90 set_c0_eimr(irq);
91} 91}
92 92
diff --git a/arch/mips/paravirt/paravirt-smp.c b/arch/mips/paravirt/paravirt-smp.c
index 42181c7105df..f8d3e081b2eb 100644
--- a/arch/mips/paravirt/paravirt-smp.c
+++ b/arch/mips/paravirt/paravirt-smp.c
@@ -114,7 +114,7 @@ static irqreturn_t paravirt_reched_interrupt(int irq, void *dev_id)
114 114
115static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id) 115static irqreturn_t paravirt_function_interrupt(int irq, void *dev_id)
116{ 116{
117 smp_call_function_interrupt(); 117 generic_smp_call_function_interrupt();
118 return IRQ_HANDLED; 118 return IRQ_HANDLED;
119} 119}
120 120
diff --git a/arch/mips/pistachio/time.c b/arch/mips/pistachio/time.c
index 7c73fcb92a10..8a377346f0ca 100644
--- a/arch/mips/pistachio/time.c
+++ b/arch/mips/pistachio/time.c
@@ -26,6 +26,7 @@ int get_c0_perfcount_int(void)
26{ 26{
27 return gic_get_c0_perfcount_int(); 27 return gic_get_c0_perfcount_int();
28} 28}
29EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
29 30
30int get_c0_fdc_int(void) 31int get_c0_fdc_int(void)
31{ 32{
diff --git a/arch/mips/pmcs-msp71xx/msp_smp.c b/arch/mips/pmcs-msp71xx/msp_smp.c
index 10170580a2de..ffa0f7101a97 100644
--- a/arch/mips/pmcs-msp71xx/msp_smp.c
+++ b/arch/mips/pmcs-msp71xx/msp_smp.c
@@ -44,7 +44,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
44 44
45static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 45static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
46{ 46{
47 smp_call_function_interrupt(); 47 generic_smp_call_function_interrupt();
48 48
49 return IRQ_HANDLED; 49 return IRQ_HANDLED;
50} 50}
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 53707aacc0f8..8c624a8b9ea2 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -89,6 +89,7 @@ int get_c0_perfcount_int(void)
89{ 89{
90 return rt_perfcount_irq; 90 return rt_perfcount_irq;
91} 91}
92EXPORT_SYMBOL_GPL(get_c0_perfcount_int);
92 93
93unsigned int get_c0_compare_int(void) 94unsigned int get_c0_compare_int(void)
94{ 95{
diff --git a/arch/mips/sgi-ip27/ip27-irq.c b/arch/mips/sgi-ip27/ip27-irq.c
index 3fbaef97a1b8..16ec4e12daa3 100644
--- a/arch/mips/sgi-ip27/ip27-irq.c
+++ b/arch/mips/sgi-ip27/ip27-irq.c
@@ -107,10 +107,14 @@ static void ip27_do_irq_mask0(void)
107 scheduler_ipi(); 107 scheduler_ipi();
108 } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) { 108 } else if (pend0 & (1UL << CPU_CALL_A_IRQ)) {
109 LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ); 109 LOCAL_HUB_CLR_INTR(CPU_CALL_A_IRQ);
110 smp_call_function_interrupt(); 110 irq_enter();
111 generic_smp_call_function_interrupt();
112 irq_exit();
111 } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) { 113 } else if (pend0 & (1UL << CPU_CALL_B_IRQ)) {
112 LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ); 114 LOCAL_HUB_CLR_INTR(CPU_CALL_B_IRQ);
113 smp_call_function_interrupt(); 115 irq_enter();
116 generic_smp_call_function_interrupt();
117 irq_exit();
114 } else 118 } else
115#endif 119#endif
116 { 120 {
diff --git a/arch/mips/sibyte/bcm1480/smp.c b/arch/mips/sibyte/bcm1480/smp.c
index af7d44edd9a8..4c71aea25663 100644
--- a/arch/mips/sibyte/bcm1480/smp.c
+++ b/arch/mips/sibyte/bcm1480/smp.c
@@ -29,8 +29,6 @@
29#include <asm/sibyte/bcm1480_regs.h> 29#include <asm/sibyte/bcm1480_regs.h>
30#include <asm/sibyte/bcm1480_int.h> 30#include <asm/sibyte/bcm1480_int.h>
31 31
32extern void smp_call_function_interrupt(void);
33
34/* 32/*
35 * These are routines for dealing with the bcm1480 smp capabilities 33 * These are routines for dealing with the bcm1480 smp capabilities
36 * independent of board/firmware 34 * independent of board/firmware
@@ -184,6 +182,9 @@ void bcm1480_mailbox_interrupt(void)
184 if (action & SMP_RESCHEDULE_YOURSELF) 182 if (action & SMP_RESCHEDULE_YOURSELF)
185 scheduler_ipi(); 183 scheduler_ipi();
186 184
187 if (action & SMP_CALL_FUNCTION) 185 if (action & SMP_CALL_FUNCTION) {
188 smp_call_function_interrupt(); 186 irq_enter();
187 generic_smp_call_function_interrupt();
188 irq_exit();
189 }
189} 190}
diff --git a/arch/mips/sibyte/sb1250/smp.c b/arch/mips/sibyte/sb1250/smp.c
index c0c4b3f88a08..1cf66f5ff23d 100644
--- a/arch/mips/sibyte/sb1250/smp.c
+++ b/arch/mips/sibyte/sb1250/smp.c
@@ -172,6 +172,9 @@ void sb1250_mailbox_interrupt(void)
172 if (action & SMP_RESCHEDULE_YOURSELF) 172 if (action & SMP_RESCHEDULE_YOURSELF)
173 scheduler_ipi(); 173 scheduler_ipi();
174 174
175 if (action & SMP_CALL_FUNCTION) 175 if (action & SMP_CALL_FUNCTION) {
176 smp_call_function_interrupt(); 176 irq_enter();
177 generic_smp_call_function_interrupt();
178 irq_exit();
179 }
177} 180}
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index d3a831ac0f92..da50e0c9c57e 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -966,8 +966,6 @@ int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
966 966
967int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from) 967int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
968{ 968{
969 memset(to, 0, sizeof *to);
970
971 if (copy_from_user(to, from, 3*sizeof(int)) || 969 if (copy_from_user(to, from, 3*sizeof(int)) ||
972 copy_from_user(to->_sifields._pad, 970 copy_from_user(to->_sifields._pad,
973 from->_sifields._pad, SI_PAD_SIZE32)) 971 from->_sifields._pad, SI_PAD_SIZE32))
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 5cf5e6ea213b..7cf0df859d05 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -1478,7 +1478,7 @@ static int pnv_eeh_next_error(struct eeh_pe **pe)
1478 } 1478 }
1479 1479
1480 /* Unmask the event */ 1480 /* Unmask the event */
1481 if (eeh_enabled()) 1481 if (ret == EEH_NEXT_ERR_NONE && eeh_enabled())
1482 enable_irq(eeh_event_irq); 1482 enable_irq(eeh_event_irq);
1483 1483
1484 return ret; 1484 return ret;
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 5738d315248b..85cbc96eff6c 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2220,7 +2220,7 @@ static void pnv_pci_ioda_setup_opal_tce_kill(struct pnv_phb *phb)
2220 2220
2221static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift, 2221static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
2222 unsigned levels, unsigned long limit, 2222 unsigned levels, unsigned long limit,
2223 unsigned long *current_offset) 2223 unsigned long *current_offset, unsigned long *total_allocated)
2224{ 2224{
2225 struct page *tce_mem = NULL; 2225 struct page *tce_mem = NULL;
2226 __be64 *addr, *tmp; 2226 __be64 *addr, *tmp;
@@ -2236,6 +2236,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
2236 } 2236 }
2237 addr = page_address(tce_mem); 2237 addr = page_address(tce_mem);
2238 memset(addr, 0, allocated); 2238 memset(addr, 0, allocated);
2239 *total_allocated += allocated;
2239 2240
2240 --levels; 2241 --levels;
2241 if (!levels) { 2242 if (!levels) {
@@ -2245,7 +2246,7 @@ static __be64 *pnv_pci_ioda2_table_do_alloc_pages(int nid, unsigned shift,
2245 2246
2246 for (i = 0; i < entries; ++i) { 2247 for (i = 0; i < entries; ++i) {
2247 tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift, 2248 tmp = pnv_pci_ioda2_table_do_alloc_pages(nid, shift,
2248 levels, limit, current_offset); 2249 levels, limit, current_offset, total_allocated);
2249 if (!tmp) 2250 if (!tmp)
2250 break; 2251 break;
2251 2252
@@ -2267,7 +2268,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2267 struct iommu_table *tbl) 2268 struct iommu_table *tbl)
2268{ 2269{
2269 void *addr; 2270 void *addr;
2270 unsigned long offset = 0, level_shift; 2271 unsigned long offset = 0, level_shift, total_allocated = 0;
2271 const unsigned window_shift = ilog2(window_size); 2272 const unsigned window_shift = ilog2(window_size);
2272 unsigned entries_shift = window_shift - page_shift; 2273 unsigned entries_shift = window_shift - page_shift;
2273 unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT); 2274 unsigned table_shift = max_t(unsigned, entries_shift + 3, PAGE_SHIFT);
@@ -2286,7 +2287,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2286 2287
2287 /* Allocate TCE table */ 2288 /* Allocate TCE table */
2288 addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift, 2289 addr = pnv_pci_ioda2_table_do_alloc_pages(nid, level_shift,
2289 levels, tce_table_size, &offset); 2290 levels, tce_table_size, &offset, &total_allocated);
2290 2291
2291 /* addr==NULL means that the first level allocation failed */ 2292 /* addr==NULL means that the first level allocation failed */
2292 if (!addr) 2293 if (!addr)
@@ -2308,7 +2309,7 @@ static long pnv_pci_ioda2_table_alloc_pages(int nid, __u64 bus_offset,
2308 page_shift); 2309 page_shift);
2309 tbl->it_level_size = 1ULL << (level_shift - 3); 2310 tbl->it_level_size = 1ULL << (level_shift - 3);
2310 tbl->it_indirect_levels = levels - 1; 2311 tbl->it_indirect_levels = levels - 1;
2311 tbl->it_allocated_size = offset; 2312 tbl->it_allocated_size = total_allocated;
2312 2313
2313 pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n", 2314 pr_devel("Created TCE table: ws=%08llx ts=%lx @%08llx\n",
2314 window_size, tce_table_size, bus_offset); 2315 window_size, tce_table_size, bus_offset);
diff --git a/arch/s390/kernel/asm-offsets.c b/arch/s390/kernel/asm-offsets.c
index c7d1b9d09011..a2da259d9327 100644
--- a/arch/s390/kernel/asm-offsets.c
+++ b/arch/s390/kernel/asm-offsets.c
@@ -23,15 +23,15 @@
23 23
24int main(void) 24int main(void)
25{ 25{
26 DEFINE(__THREAD_info, offsetof(struct task_struct, stack)); 26 DEFINE(__TASK_thread_info, offsetof(struct task_struct, stack));
27 DEFINE(__THREAD_ksp, offsetof(struct task_struct, thread.ksp)); 27 DEFINE(__TASK_thread, offsetof(struct task_struct, thread));
28 DEFINE(__THREAD_mm_segment, offsetof(struct task_struct, thread.mm_segment));
29 BLANK();
30 DEFINE(__TASK_pid, offsetof(struct task_struct, pid)); 28 DEFINE(__TASK_pid, offsetof(struct task_struct, pid));
31 BLANK(); 29 BLANK();
32 DEFINE(__THREAD_per_cause, offsetof(struct task_struct, thread.per_event.cause)); 30 DEFINE(__THREAD_ksp, offsetof(struct thread_struct, ksp));
33 DEFINE(__THREAD_per_address, offsetof(struct task_struct, thread.per_event.address)); 31 DEFINE(__THREAD_per_cause, offsetof(struct thread_struct, per_event.cause));
34 DEFINE(__THREAD_per_paid, offsetof(struct task_struct, thread.per_event.paid)); 32 DEFINE(__THREAD_per_address, offsetof(struct thread_struct, per_event.address));
33 DEFINE(__THREAD_per_paid, offsetof(struct thread_struct, per_event.paid));
34 DEFINE(__THREAD_trap_tdb, offsetof(struct thread_struct, trap_tdb));
35 BLANK(); 35 BLANK();
36 DEFINE(__TI_task, offsetof(struct thread_info, task)); 36 DEFINE(__TI_task, offsetof(struct thread_info, task));
37 DEFINE(__TI_flags, offsetof(struct thread_info, flags)); 37 DEFINE(__TI_flags, offsetof(struct thread_info, flags));
@@ -176,7 +176,6 @@ int main(void)
176 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data)); 176 DEFINE(__LC_VDSO_PER_CPU, offsetof(struct _lowcore, vdso_per_cpu_data));
177 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap)); 177 DEFINE(__LC_GMAP, offsetof(struct _lowcore, gmap));
178 DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb)); 178 DEFINE(__LC_PGM_TDB, offsetof(struct _lowcore, pgm_tdb));
179 DEFINE(__THREAD_trap_tdb, offsetof(struct task_struct, thread.trap_tdb));
180 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce)); 179 DEFINE(__GMAP_ASCE, offsetof(struct gmap, asce));
181 DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c)); 180 DEFINE(__SIE_PROG0C, offsetof(struct kvm_s390_sie_block, prog0c));
182 DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20)); 181 DEFINE(__SIE_PROG20, offsetof(struct kvm_s390_sie_block, prog20));
diff --git a/arch/s390/kernel/cache.c b/arch/s390/kernel/cache.c
index bff5e3b6d822..8ba32436effe 100644
--- a/arch/s390/kernel/cache.c
+++ b/arch/s390/kernel/cache.c
@@ -138,6 +138,8 @@ int init_cache_level(unsigned int cpu)
138 union cache_topology ct; 138 union cache_topology ct;
139 enum cache_type ctype; 139 enum cache_type ctype;
140 140
141 if (!test_facility(34))
142 return -EOPNOTSUPP;
141 if (!this_cpu_ci) 143 if (!this_cpu_ci)
142 return -EINVAL; 144 return -EINVAL;
143 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0); 145 ct.raw = ecag(EXTRACT_TOPOLOGY, 0, 0);
diff --git a/arch/s390/kernel/entry.S b/arch/s390/kernel/entry.S
index 3238893c9d4f..84062e7a77da 100644
--- a/arch/s390/kernel/entry.S
+++ b/arch/s390/kernel/entry.S
@@ -178,17 +178,21 @@ _PIF_WORK = (_PIF_PER_TRAP)
178 */ 178 */
179ENTRY(__switch_to) 179ENTRY(__switch_to)
180 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task 180 stmg %r6,%r15,__SF_GPRS(%r15) # store gprs of prev task
181 stg %r15,__THREAD_ksp(%r2) # store kernel stack of prev 181 lgr %r1,%r2
182 lg %r4,__THREAD_info(%r2) # get thread_info of prev 182 aghi %r1,__TASK_thread # thread_struct of prev task
183 lg %r5,__THREAD_info(%r3) # get thread_info of next 183 lg %r4,__TASK_thread_info(%r2) # get thread_info of prev
184 lg %r5,__TASK_thread_info(%r3) # get thread_info of next
185 stg %r15,__THREAD_ksp(%r1) # store kernel stack of prev
186 lgr %r1,%r3
187 aghi %r1,__TASK_thread # thread_struct of next task
184 lgr %r15,%r5 188 lgr %r15,%r5
185 aghi %r15,STACK_INIT # end of kernel stack of next 189 aghi %r15,STACK_INIT # end of kernel stack of next
186 stg %r3,__LC_CURRENT # store task struct of next 190 stg %r3,__LC_CURRENT # store task struct of next
187 stg %r5,__LC_THREAD_INFO # store thread info of next 191 stg %r5,__LC_THREAD_INFO # store thread info of next
188 stg %r15,__LC_KERNEL_STACK # store end of kernel stack 192 stg %r15,__LC_KERNEL_STACK # store end of kernel stack
193 lg %r15,__THREAD_ksp(%r1) # load kernel stack of next
189 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4 194 lctl %c4,%c4,__TASK_pid(%r3) # load pid to control reg. 4
190 mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next 195 mvc __LC_CURRENT_PID+4(4,%r0),__TASK_pid(%r3) # store pid of next
191 lg %r15,__THREAD_ksp(%r3) # load kernel stack of next
192 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task 196 lmg %r6,%r15,__SF_GPRS(%r15) # load gprs of next task
193 br %r14 197 br %r14
194 198
@@ -417,6 +421,7 @@ ENTRY(pgm_check_handler)
417 LAST_BREAK %r14 421 LAST_BREAK %r14
418 lg %r15,__LC_KERNEL_STACK 422 lg %r15,__LC_KERNEL_STACK
419 lg %r14,__TI_task(%r12) 423 lg %r14,__TI_task(%r12)
424 aghi %r14,__TASK_thread # pointer to thread_struct
420 lghi %r13,__LC_PGM_TDB 425 lghi %r13,__LC_PGM_TDB
421 tm __LC_PGM_ILC+2,0x02 # check for transaction abort 426 tm __LC_PGM_ILC+2,0x02 # check for transaction abort
422 jz 2f 427 jz 2f
diff --git a/arch/s390/kernel/traps.c b/arch/s390/kernel/traps.c
index 4d96c9f53455..7bea81d8a363 100644
--- a/arch/s390/kernel/traps.c
+++ b/arch/s390/kernel/traps.c
@@ -259,7 +259,7 @@ void vector_exception(struct pt_regs *regs)
259 } 259 }
260 260
261 /* get vector interrupt code from fpc */ 261 /* get vector interrupt code from fpc */
262 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 262 asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc));
263 vic = (current->thread.fp_regs.fpc & 0xf00) >> 8; 263 vic = (current->thread.fp_regs.fpc & 0xf00) >> 8;
264 switch (vic) { 264 switch (vic) {
265 case 1: /* invalid vector operation */ 265 case 1: /* invalid vector operation */
@@ -297,7 +297,7 @@ void data_exception(struct pt_regs *regs)
297 297
298 location = get_trap_ip(regs); 298 location = get_trap_ip(regs);
299 299
300 asm volatile("stfpc %0" : "=m" (current->thread.fp_regs.fpc)); 300 asm volatile("stfpc %0" : "=Q" (current->thread.fp_regs.fpc));
301 /* Check for vector register enablement */ 301 /* Check for vector register enablement */
302 if (MACHINE_HAS_VX && !current->thread.vxrs && 302 if (MACHINE_HAS_VX && !current->thread.vxrs &&
303 (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) { 303 (current->thread.fp_regs.fpc & FPC_DXC_MASK) == 0xfe00) {
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 2078f92d15ac..f32f843a3631 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -1742,10 +1742,10 @@ static bool ibs_enabled(struct kvm_vcpu *vcpu)
1742 1742
1743static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu) 1743static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1744{ 1744{
1745 if (!vcpu->requests)
1746 return 0;
1747retry: 1745retry:
1748 kvm_s390_vcpu_request_handled(vcpu); 1746 kvm_s390_vcpu_request_handled(vcpu);
1747 if (!vcpu->requests)
1748 return 0;
1749 /* 1749 /*
1750 * We use MMU_RELOAD just to re-arm the ipte notifier for the 1750 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1751 * guest prefix page. gmap_ipte_notify will wait on the ptl lock. 1751 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
diff --git a/arch/s390/net/bpf_jit_comp.c b/arch/s390/net/bpf_jit_comp.c
index fee782acc2ee..8d2e5165865f 100644
--- a/arch/s390/net/bpf_jit_comp.c
+++ b/arch/s390/net/bpf_jit_comp.c
@@ -448,13 +448,13 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
448 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0, 448 EMIT6_DISP_LH(0xe3000000, 0x0004, REG_SKB_DATA, REG_0,
449 BPF_REG_1, offsetof(struct sk_buff, data)); 449 BPF_REG_1, offsetof(struct sk_buff, data));
450 } 450 }
451 /* BPF compatibility: clear A (%b7) and X (%b8) registers */ 451 /* BPF compatibility: clear A (%b0) and X (%b7) registers */
452 if (REG_SEEN(BPF_REG_7)) 452 if (REG_SEEN(BPF_REG_A))
453 /* lghi %b7,0 */ 453 /* lghi %ba,0 */
454 EMIT4_IMM(0xa7090000, BPF_REG_7, 0); 454 EMIT4_IMM(0xa7090000, BPF_REG_A, 0);
455 if (REG_SEEN(BPF_REG_8)) 455 if (REG_SEEN(BPF_REG_X))
456 /* lghi %b8,0 */ 456 /* lghi %bx,0 */
457 EMIT4_IMM(0xa7090000, BPF_REG_8, 0); 457 EMIT4_IMM(0xa7090000, BPF_REG_X, 0);
458} 458}
459 459
460/* 460/*
diff --git a/arch/sparc/include/asm/visasm.h b/arch/sparc/include/asm/visasm.h
index 1f0aa2024e94..6424249d5f78 100644
--- a/arch/sparc/include/asm/visasm.h
+++ b/arch/sparc/include/asm/visasm.h
@@ -28,16 +28,10 @@
28 * Must preserve %o5 between VISEntryHalf and VISExitHalf */ 28 * Must preserve %o5 between VISEntryHalf and VISExitHalf */
29 29
30#define VISEntryHalf \ 30#define VISEntryHalf \
31 rd %fprs, %o5; \ 31 VISEntry
32 andcc %o5, FPRS_FEF, %g0; \ 32
33 be,pt %icc, 297f; \ 33#define VISExitHalf \
34 sethi %hi(298f), %g7; \ 34 VISExit
35 sethi %hi(VISenterhalf), %g1; \
36 jmpl %g1 + %lo(VISenterhalf), %g0; \
37 or %g7, %lo(298f), %g7; \
38 clr %o5; \
39297: wr %o5, FPRS_FEF, %fprs; \
40298:
41 35
42#define VISEntryHalfFast(fail_label) \ 36#define VISEntryHalfFast(fail_label) \
43 rd %fprs, %o5; \ 37 rd %fprs, %o5; \
@@ -47,7 +41,7 @@
47 ba,a,pt %xcc, fail_label; \ 41 ba,a,pt %xcc, fail_label; \
48297: wr %o5, FPRS_FEF, %fprs; 42297: wr %o5, FPRS_FEF, %fprs;
49 43
50#define VISExitHalf \ 44#define VISExitHalfFast \
51 wr %o5, 0, %fprs; 45 wr %o5, 0, %fprs;
52 46
53#ifndef __ASSEMBLY__ 47#ifndef __ASSEMBLY__
diff --git a/arch/sparc/lib/NG4memcpy.S b/arch/sparc/lib/NG4memcpy.S
index 140527a20e7d..83aeeb1dffdb 100644
--- a/arch/sparc/lib/NG4memcpy.S
+++ b/arch/sparc/lib/NG4memcpy.S
@@ -240,8 +240,11 @@ FUNC_NAME: /* %o0=dst, %o1=src, %o2=len */
240 add %o0, 0x40, %o0 240 add %o0, 0x40, %o0
241 bne,pt %icc, 1b 241 bne,pt %icc, 1b
242 LOAD(prefetch, %g1 + 0x200, #n_reads_strong) 242 LOAD(prefetch, %g1 + 0x200, #n_reads_strong)
243#ifdef NON_USER_COPY
244 VISExitHalfFast
245#else
243 VISExitHalf 246 VISExitHalf
244 247#endif
245 brz,pn %o2, .Lexit 248 brz,pn %o2, .Lexit
246 cmp %o2, 19 249 cmp %o2, 19
247 ble,pn %icc, .Lsmall_unaligned 250 ble,pn %icc, .Lsmall_unaligned
diff --git a/arch/sparc/lib/VISsave.S b/arch/sparc/lib/VISsave.S
index b320ae9e2e2e..a063d84336d6 100644
--- a/arch/sparc/lib/VISsave.S
+++ b/arch/sparc/lib/VISsave.S
@@ -44,9 +44,8 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
44 44
45 stx %g3, [%g6 + TI_GSR] 45 stx %g3, [%g6 + TI_GSR]
462: add %g6, %g1, %g3 462: add %g6, %g1, %g3
47 cmp %o5, FPRS_DU 47 mov FPRS_DU | FPRS_DL | FPRS_FEF, %o5
48 be,pn %icc, 6f 48 sll %g1, 3, %g1
49 sll %g1, 3, %g1
50 stb %o5, [%g3 + TI_FPSAVED] 49 stb %o5, [%g3 + TI_FPSAVED]
51 rd %gsr, %g2 50 rd %gsr, %g2
52 add %g6, %g1, %g3 51 add %g6, %g1, %g3
@@ -80,65 +79,3 @@ vis1: ldub [%g6 + TI_FPSAVED], %g3
80 .align 32 79 .align 32
8180: jmpl %g7 + %g0, %g0 8080: jmpl %g7 + %g0, %g0
82 nop 81 nop
83
846: ldub [%g3 + TI_FPSAVED], %o5
85 or %o5, FPRS_DU, %o5
86 add %g6, TI_FPREGS+0x80, %g2
87 stb %o5, [%g3 + TI_FPSAVED]
88
89 sll %g1, 5, %g1
90 add %g6, TI_FPREGS+0xc0, %g3
91 wr %g0, FPRS_FEF, %fprs
92 membar #Sync
93 stda %f32, [%g2 + %g1] ASI_BLK_P
94 stda %f48, [%g3 + %g1] ASI_BLK_P
95 membar #Sync
96 ba,pt %xcc, 80f
97 nop
98
99 .align 32
10080: jmpl %g7 + %g0, %g0
101 nop
102
103 .align 32
104VISenterhalf:
105 ldub [%g6 + TI_FPDEPTH], %g1
106 brnz,a,pn %g1, 1f
107 cmp %g1, 1
108 stb %g0, [%g6 + TI_FPSAVED]
109 stx %fsr, [%g6 + TI_XFSR]
110 clr %o5
111 jmpl %g7 + %g0, %g0
112 wr %g0, FPRS_FEF, %fprs
113
1141: bne,pn %icc, 2f
115 srl %g1, 1, %g1
116 ba,pt %xcc, vis1
117 sub %g7, 8, %g7
1182: addcc %g6, %g1, %g3
119 sll %g1, 3, %g1
120 andn %o5, FPRS_DU, %g2
121 stb %g2, [%g3 + TI_FPSAVED]
122
123 rd %gsr, %g2
124 add %g6, %g1, %g3
125 stx %g2, [%g3 + TI_GSR]
126 add %g6, %g1, %g2
127 stx %fsr, [%g2 + TI_XFSR]
128 sll %g1, 5, %g1
1293: andcc %o5, FPRS_DL, %g0
130 be,pn %icc, 4f
131 add %g6, TI_FPREGS, %g2
132
133 add %g6, TI_FPREGS+0x40, %g3
134 membar #Sync
135 stda %f0, [%g2 + %g1] ASI_BLK_P
136 stda %f16, [%g3 + %g1] ASI_BLK_P
137 membar #Sync
138 ba,pt %xcc, 4f
139 nop
140
141 .align 32
1424: and %o5, FPRS_DU, %o5
143 jmpl %g7 + %g0, %g0
144 wr %o5, FPRS_FEF, %fprs
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 1d649a95660c..8069ce12f20b 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -135,10 +135,6 @@ EXPORT_SYMBOL(copy_user_page);
135void VISenter(void); 135void VISenter(void);
136EXPORT_SYMBOL(VISenter); 136EXPORT_SYMBOL(VISenter);
137 137
138/* CRYPTO code needs this */
139void VISenterhalf(void);
140EXPORT_SYMBOL(VISenterhalf);
141
142extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *); 138extern void xor_vis_2(unsigned long, unsigned long *, unsigned long *);
143extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *, 139extern void xor_vis_3(unsigned long, unsigned long *, unsigned long *,
144 unsigned long *); 140 unsigned long *);
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index e8c2c04143cd..c667e104a0c2 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -113,8 +113,6 @@ int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
113 if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo))) 113 if (!access_ok(VERIFY_READ, from, sizeof(struct compat_siginfo)))
114 return -EFAULT; 114 return -EFAULT;
115 115
116 memset(to, 0, sizeof(*to));
117
118 err = __get_user(to->si_signo, &from->si_signo); 116 err = __get_user(to->si_signo, &from->si_signo);
119 err |= __get_user(to->si_errno, &from->si_errno); 117 err |= __get_user(to->si_errno, &from->si_errno);
120 err |= __get_user(to->si_code, &from->si_code); 118 err |= __get_user(to->si_code, &from->si_code);
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 99c9ff87e018..6b755d125783 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -1139,7 +1139,7 @@ static void __init load_hv_initrd(void)
1139 1139
1140void __init free_initrd_mem(unsigned long begin, unsigned long end) 1140void __init free_initrd_mem(unsigned long begin, unsigned long end)
1141{ 1141{
1142 free_bootmem(__pa(begin), end - begin); 1142 free_bootmem_late(__pa(begin), end - begin);
1143} 1143}
1144 1144
1145static int __init setup_initrd(char *str) 1145static int __init setup_initrd(char *str)
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 2c82bd150d43..7d69afd8b6fa 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -1193,6 +1193,10 @@ static efi_status_t setup_e820(struct boot_params *params,
1193 unsigned int e820_type = 0; 1193 unsigned int e820_type = 0;
1194 unsigned long m = efi->efi_memmap; 1194 unsigned long m = efi->efi_memmap;
1195 1195
1196#ifdef CONFIG_X86_64
1197 m |= (u64)efi->efi_memmap_hi << 32;
1198#endif
1199
1196 d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size)); 1200 d = (efi_memory_desc_t *)(m + (i * efi->efi_memdesc_size));
1197 switch (d->type) { 1201 switch (d->type) {
1198 case EFI_RESERVED_TYPE: 1202 case EFI_RESERVED_TYPE:
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index bb187a6a877c..a7e257d9cb90 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -140,6 +140,7 @@ sysexit_from_sys_call:
140 */ 140 */
141 andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) 141 andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
142 movl RIP(%rsp), %ecx /* User %eip */ 142 movl RIP(%rsp), %ecx /* User %eip */
143 movq RAX(%rsp), %rax
143 RESTORE_RSI_RDI 144 RESTORE_RSI_RDI
144 xorl %edx, %edx /* Do not leak kernel information */ 145 xorl %edx, %edx /* Do not leak kernel information */
145 xorq %r8, %r8 146 xorq %r8, %r8
@@ -205,7 +206,6 @@ sysexit_from_sys_call:
205 movl RDX(%rsp), %edx /* arg3 */ 206 movl RDX(%rsp), %edx /* arg3 */
206 movl RSI(%rsp), %ecx /* arg4 */ 207 movl RSI(%rsp), %ecx /* arg4 */
207 movl RDI(%rsp), %r8d /* arg5 */ 208 movl RDI(%rsp), %r8d /* arg5 */
208 movl %ebp, %r9d /* arg6 */
209 .endm 209 .endm
210 210
211 .macro auditsys_exit exit 211 .macro auditsys_exit exit
@@ -220,7 +220,6 @@ sysexit_from_sys_call:
2201: setbe %al /* 1 if error, 0 if not */ 2201: setbe %al /* 1 if error, 0 if not */
221 movzbl %al, %edi /* zero-extend that into %edi */ 221 movzbl %al, %edi /* zero-extend that into %edi */
222 call __audit_syscall_exit 222 call __audit_syscall_exit
223 movq RAX(%rsp), %rax /* reload syscall return value */
224 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi 223 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
225 DISABLE_INTERRUPTS(CLBR_NONE) 224 DISABLE_INTERRUPTS(CLBR_NONE)
226 TRACE_IRQS_OFF 225 TRACE_IRQS_OFF
@@ -236,6 +235,7 @@ sysexit_from_sys_call:
236 235
237sysenter_auditsys: 236sysenter_auditsys:
238 auditsys_entry_common 237 auditsys_entry_common
238 movl %ebp, %r9d /* reload 6th syscall arg */
239 jmp sysenter_dispatch 239 jmp sysenter_dispatch
240 240
241sysexit_audit: 241sysexit_audit:
@@ -336,7 +336,7 @@ ENTRY(entry_SYSCALL_compat)
336 * 32-bit zero extended: 336 * 32-bit zero extended:
337 */ 337 */
338 ASM_STAC 338 ASM_STAC
3391: movl (%r8), %ebp 3391: movl (%r8), %r9d
340 _ASM_EXTABLE(1b, ia32_badarg) 340 _ASM_EXTABLE(1b, ia32_badarg)
341 ASM_CLAC 341 ASM_CLAC
342 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS) 342 orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
@@ -346,7 +346,7 @@ ENTRY(entry_SYSCALL_compat)
346cstar_do_call: 346cstar_do_call:
347 /* 32-bit syscall -> 64-bit C ABI argument conversion */ 347 /* 32-bit syscall -> 64-bit C ABI argument conversion */
348 movl %edi, %r8d /* arg5 */ 348 movl %edi, %r8d /* arg5 */
349 movl %ebp, %r9d /* arg6 */ 349 /* r9 already loaded */ /* arg6 */
350 xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */ 350 xchg %ecx, %esi /* rsi:arg2, rcx:arg4 */
351 movl %ebx, %edi /* arg1 */ 351 movl %ebx, %edi /* arg1 */
352 movl %edx, %edx /* arg3 (zero extension) */ 352 movl %edx, %edx /* arg3 (zero extension) */
@@ -358,7 +358,6 @@ cstar_dispatch:
358 call *ia32_sys_call_table(, %rax, 8) 358 call *ia32_sys_call_table(, %rax, 8)
359 movq %rax, RAX(%rsp) 359 movq %rax, RAX(%rsp)
3601: 3601:
361 movl RCX(%rsp), %ebp
362 DISABLE_INTERRUPTS(CLBR_NONE) 361 DISABLE_INTERRUPTS(CLBR_NONE)
363 TRACE_IRQS_OFF 362 TRACE_IRQS_OFF
364 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) 363 testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
@@ -369,6 +368,7 @@ sysretl_from_sys_call:
369 RESTORE_RSI_RDI_RDX 368 RESTORE_RSI_RDI_RDX
370 movl RIP(%rsp), %ecx 369 movl RIP(%rsp), %ecx
371 movl EFLAGS(%rsp), %r11d 370 movl EFLAGS(%rsp), %r11d
371 movq RAX(%rsp), %rax
372 xorq %r10, %r10 372 xorq %r10, %r10
373 xorq %r9, %r9 373 xorq %r9, %r9
374 xorq %r8, %r8 374 xorq %r8, %r8
@@ -392,7 +392,9 @@ sysretl_from_sys_call:
392 392
393#ifdef CONFIG_AUDITSYSCALL 393#ifdef CONFIG_AUDITSYSCALL
394cstar_auditsys: 394cstar_auditsys:
395 movl %r9d, R9(%rsp) /* register to be clobbered by call */
395 auditsys_entry_common 396 auditsys_entry_common
397 movl R9(%rsp), %r9d /* reload 6th syscall arg */
396 jmp cstar_dispatch 398 jmp cstar_dispatch
397 399
398sysretl_audit: 400sysretl_audit:
@@ -404,14 +406,16 @@ cstar_tracesys:
404 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS) 406 testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
405 jz cstar_auditsys 407 jz cstar_auditsys
406#endif 408#endif
409 xchgl %r9d, %ebp
407 SAVE_EXTRA_REGS 410 SAVE_EXTRA_REGS
408 xorl %eax, %eax /* Do not leak kernel information */ 411 xorl %eax, %eax /* Do not leak kernel information */
409 movq %rax, R11(%rsp) 412 movq %rax, R11(%rsp)
410 movq %rax, R10(%rsp) 413 movq %rax, R10(%rsp)
411 movq %rax, R9(%rsp) 414 movq %r9, R9(%rsp)
412 movq %rax, R8(%rsp) 415 movq %rax, R8(%rsp)
413 movq %rsp, %rdi /* &pt_regs -> arg1 */ 416 movq %rsp, %rdi /* &pt_regs -> arg1 */
414 call syscall_trace_enter 417 call syscall_trace_enter
418 movl R9(%rsp), %r9d
415 419
416 /* Reload arg registers from stack. (see sysenter_tracesys) */ 420 /* Reload arg registers from stack. (see sysenter_tracesys) */
417 movl RCX(%rsp), %ecx 421 movl RCX(%rsp), %ecx
@@ -421,6 +425,7 @@ cstar_tracesys:
421 movl %eax, %eax /* zero extension */ 425 movl %eax, %eax /* zero extension */
422 426
423 RESTORE_EXTRA_REGS 427 RESTORE_EXTRA_REGS
428 xchgl %ebp, %r9d
424 jmp cstar_do_call 429 jmp cstar_do_call
425END(entry_SYSCALL_compat) 430END(entry_SYSCALL_compat)
426 431
diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
index a0bf89fd2647..4e10d73cf018 100644
--- a/arch/x86/include/asm/desc.h
+++ b/arch/x86/include/asm/desc.h
@@ -280,21 +280,6 @@ static inline void clear_LDT(void)
280 set_ldt(NULL, 0); 280 set_ldt(NULL, 0);
281} 281}
282 282
283/*
284 * load one particular LDT into the current CPU
285 */
286static inline void load_LDT_nolock(mm_context_t *pc)
287{
288 set_ldt(pc->ldt, pc->size);
289}
290
291static inline void load_LDT(mm_context_t *pc)
292{
293 preempt_disable();
294 load_LDT_nolock(pc);
295 preempt_enable();
296}
297
298static inline unsigned long get_desc_base(const struct desc_struct *desc) 283static inline unsigned long get_desc_base(const struct desc_struct *desc)
299{ 284{
300 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24)); 285 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
index 09b9620a73b4..364d27481a52 100644
--- a/arch/x86/include/asm/mmu.h
+++ b/arch/x86/include/asm/mmu.h
@@ -9,8 +9,7 @@
9 * we put the segment information here. 9 * we put the segment information here.
10 */ 10 */
11typedef struct { 11typedef struct {
12 void *ldt; 12 struct ldt_struct *ldt;
13 int size;
14 13
15#ifdef CONFIG_X86_64 14#ifdef CONFIG_X86_64
16 /* True if mm supports a task running in 32 bit compatibility mode. */ 15 /* True if mm supports a task running in 32 bit compatibility mode. */
diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
index 804a3a6030ca..984abfe47edc 100644
--- a/arch/x86/include/asm/mmu_context.h
+++ b/arch/x86/include/asm/mmu_context.h
@@ -34,6 +34,50 @@ static inline void load_mm_cr4(struct mm_struct *mm) {}
34#endif 34#endif
35 35
36/* 36/*
37 * ldt_structs can be allocated, used, and freed, but they are never
38 * modified while live.
39 */
40struct ldt_struct {
41 /*
42 * Xen requires page-aligned LDTs with special permissions. This is
43 * needed to prevent us from installing evil descriptors such as
44 * call gates. On native, we could merge the ldt_struct and LDT
45 * allocations, but it's not worth trying to optimize.
46 */
47 struct desc_struct *entries;
48 int size;
49};
50
51static inline void load_mm_ldt(struct mm_struct *mm)
52{
53 struct ldt_struct *ldt;
54
55 /* lockless_dereference synchronizes with smp_store_release */
56 ldt = lockless_dereference(mm->context.ldt);
57
58 /*
59 * Any change to mm->context.ldt is followed by an IPI to all
60 * CPUs with the mm active. The LDT will not be freed until
61 * after the IPI is handled by all such CPUs. This means that,
62 * if the ldt_struct changes before we return, the values we see
63 * will be safe, and the new values will be loaded before we run
64 * any user code.
65 *
66 * NB: don't try to convert this to use RCU without extreme care.
67 * We would still need IRQs off, because we don't want to change
68 * the local LDT after an IPI loaded a newer value than the one
69 * that we can see.
70 */
71
72 if (unlikely(ldt))
73 set_ldt(ldt->entries, ldt->size);
74 else
75 clear_LDT();
76
77 DEBUG_LOCKS_WARN_ON(preemptible());
78}
79
80/*
37 * Used for LDT copy/destruction. 81 * Used for LDT copy/destruction.
38 */ 82 */
39int init_new_context(struct task_struct *tsk, struct mm_struct *mm); 83int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
@@ -78,12 +122,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
78 * was called and then modify_ldt changed 122 * was called and then modify_ldt changed
79 * prev->context.ldt but suppressed an IPI to this CPU. 123 * prev->context.ldt but suppressed an IPI to this CPU.
80 * In this case, prev->context.ldt != NULL, because we 124 * In this case, prev->context.ldt != NULL, because we
81 * never free an LDT while the mm still exists. That 125 * never set context.ldt to NULL while the mm still
82 * means that next->context.ldt != prev->context.ldt, 126 * exists. That means that next->context.ldt !=
83 * because mms never share an LDT. 127 * prev->context.ldt, because mms never share an LDT.
84 */ 128 */
85 if (unlikely(prev->context.ldt != next->context.ldt)) 129 if (unlikely(prev->context.ldt != next->context.ldt))
86 load_LDT_nolock(&next->context); 130 load_mm_ldt(next);
87 } 131 }
88#ifdef CONFIG_SMP 132#ifdef CONFIG_SMP
89 else { 133 else {
@@ -106,7 +150,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
106 load_cr3(next->pgd); 150 load_cr3(next->pgd);
107 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL); 151 trace_tlb_flush(TLB_FLUSH_ON_TASK_SWITCH, TLB_FLUSH_ALL);
108 load_mm_cr4(next); 152 load_mm_cr4(next);
109 load_LDT_nolock(&next->context); 153 load_mm_ldt(next);
110 } 154 }
111 } 155 }
112#endif 156#endif
diff --git a/arch/x86/include/asm/sigcontext.h b/arch/x86/include/asm/sigcontext.h
index 6fe6b182c998..9dfce4e0417d 100644
--- a/arch/x86/include/asm/sigcontext.h
+++ b/arch/x86/include/asm/sigcontext.h
@@ -57,9 +57,9 @@ struct sigcontext {
57 unsigned long ip; 57 unsigned long ip;
58 unsigned long flags; 58 unsigned long flags;
59 unsigned short cs; 59 unsigned short cs;
60 unsigned short __pad2; /* Was called gs, but was always zero. */ 60 unsigned short gs;
61 unsigned short __pad1; /* Was called fs, but was always zero. */ 61 unsigned short fs;
62 unsigned short ss; 62 unsigned short __pad0;
63 unsigned long err; 63 unsigned long err;
64 unsigned long trapno; 64 unsigned long trapno;
65 unsigned long oldmask; 65 unsigned long oldmask;
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index a4ae82eb82aa..cd54147cb365 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -354,7 +354,7 @@ struct kvm_xcrs {
354struct kvm_sync_regs { 354struct kvm_sync_regs {
355}; 355};
356 356
357#define KVM_QUIRK_LINT0_REENABLED (1 << 0) 357#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
358#define KVM_QUIRK_CD_NW_CLEARED (1 << 1) 358#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
359 359
360#endif /* _ASM_X86_KVM_H */ 360#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/include/uapi/asm/sigcontext.h b/arch/x86/include/uapi/asm/sigcontext.h
index 0e8a973de9ee..40836a9a7250 100644
--- a/arch/x86/include/uapi/asm/sigcontext.h
+++ b/arch/x86/include/uapi/asm/sigcontext.h
@@ -177,24 +177,9 @@ struct sigcontext {
177 __u64 rip; 177 __u64 rip;
178 __u64 eflags; /* RFLAGS */ 178 __u64 eflags; /* RFLAGS */
179 __u16 cs; 179 __u16 cs;
180 180 __u16 gs;
181 /* 181 __u16 fs;
182 * Prior to 2.5.64 ("[PATCH] x86-64 updates for 2.5.64-bk3"), 182 __u16 __pad0;
183 * Linux saved and restored fs and gs in these slots. This
184 * was counterproductive, as fsbase and gsbase were never
185 * saved, so arch_prctl was presumably unreliable.
186 *
187 * If these slots are ever needed for any other purpose, there
188 * is some risk that very old 64-bit binaries could get
189 * confused. I doubt that many such binaries still work,
190 * though, since the same patch in 2.5.64 also removed the
191 * 64-bit set_thread_area syscall, so it appears that there is
192 * no TLS API that works in both pre- and post-2.5.64 kernels.
193 */
194 __u16 __pad2; /* Was gs. */
195 __u16 __pad1; /* Was fs. */
196
197 __u16 ss;
198 __u64 err; 183 __u64 err;
199 __u64 trapno; 184 __u64 trapno;
200 __u64 oldmask; 185 __u64 oldmask;
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index 845dc0df2002..206052e55517 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -943,7 +943,7 @@ static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info)
943 */ 943 */
944 if (irq < nr_legacy_irqs() && data->count == 1) { 944 if (irq < nr_legacy_irqs() && data->count == 1) {
945 if (info->ioapic_trigger != data->trigger) 945 if (info->ioapic_trigger != data->trigger)
946 mp_register_handler(irq, data->trigger); 946 mp_register_handler(irq, info->ioapic_trigger);
947 data->entry.trigger = data->trigger = info->ioapic_trigger; 947 data->entry.trigger = data->trigger = info->ioapic_trigger;
948 data->entry.polarity = data->polarity = info->ioapic_polarity; 948 data->entry.polarity = data->polarity = info->ioapic_polarity;
949 } 949 }
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 922c5e0cea4c..cb9e5df42dd2 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1410,7 +1410,7 @@ void cpu_init(void)
1410 load_sp0(t, &current->thread); 1410 load_sp0(t, &current->thread);
1411 set_tss_desc(cpu, t); 1411 set_tss_desc(cpu, t);
1412 load_TR_desc(); 1412 load_TR_desc();
1413 load_LDT(&init_mm.context); 1413 load_mm_ldt(&init_mm);
1414 1414
1415 clear_all_debug_regs(); 1415 clear_all_debug_regs();
1416 dbg_restore_debug_regs(); 1416 dbg_restore_debug_regs();
@@ -1459,7 +1459,7 @@ void cpu_init(void)
1459 load_sp0(t, thread); 1459 load_sp0(t, thread);
1460 set_tss_desc(cpu, t); 1460 set_tss_desc(cpu, t);
1461 load_TR_desc(); 1461 load_TR_desc();
1462 load_LDT(&init_mm.context); 1462 load_mm_ldt(&init_mm);
1463 1463
1464 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap); 1464 t->x86_tss.io_bitmap_base = offsetof(struct tss_struct, io_bitmap);
1465 1465
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 3658de47900f..9469dfa55607 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -2179,21 +2179,25 @@ static unsigned long get_segment_base(unsigned int segment)
2179 int idx = segment >> 3; 2179 int idx = segment >> 3;
2180 2180
2181 if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) { 2181 if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
2182 struct ldt_struct *ldt;
2183
2182 if (idx > LDT_ENTRIES) 2184 if (idx > LDT_ENTRIES)
2183 return 0; 2185 return 0;
2184 2186
2185 if (idx > current->active_mm->context.size) 2187 /* IRQs are off, so this synchronizes with smp_store_release */
2188 ldt = lockless_dereference(current->active_mm->context.ldt);
2189 if (!ldt || idx > ldt->size)
2186 return 0; 2190 return 0;
2187 2191
2188 desc = current->active_mm->context.ldt; 2192 desc = &ldt->entries[idx];
2189 } else { 2193 } else {
2190 if (idx > GDT_ENTRIES) 2194 if (idx > GDT_ENTRIES)
2191 return 0; 2195 return 0;
2192 2196
2193 desc = raw_cpu_ptr(gdt_page.gdt); 2197 desc = raw_cpu_ptr(gdt_page.gdt) + idx;
2194 } 2198 }
2195 2199
2196 return get_desc_base(desc + idx); 2200 return get_desc_base(desc);
2197} 2201}
2198 2202
2199#ifdef CONFIG_COMPAT 2203#ifdef CONFIG_COMPAT
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index b9826a981fb2..6326ae24e4d5 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -2534,7 +2534,7 @@ static int intel_pmu_cpu_prepare(int cpu)
2534 if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) { 2534 if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
2535 cpuc->shared_regs = allocate_shared_regs(cpu); 2535 cpuc->shared_regs = allocate_shared_regs(cpu);
2536 if (!cpuc->shared_regs) 2536 if (!cpuc->shared_regs)
2537 return NOTIFY_BAD; 2537 goto err;
2538 } 2538 }
2539 2539
2540 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) { 2540 if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
@@ -2542,18 +2542,27 @@ static int intel_pmu_cpu_prepare(int cpu)
2542 2542
2543 cpuc->constraint_list = kzalloc(sz, GFP_KERNEL); 2543 cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
2544 if (!cpuc->constraint_list) 2544 if (!cpuc->constraint_list)
2545 return NOTIFY_BAD; 2545 goto err_shared_regs;
2546 2546
2547 cpuc->excl_cntrs = allocate_excl_cntrs(cpu); 2547 cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
2548 if (!cpuc->excl_cntrs) { 2548 if (!cpuc->excl_cntrs)
2549 kfree(cpuc->constraint_list); 2549 goto err_constraint_list;
2550 kfree(cpuc->shared_regs); 2550
2551 return NOTIFY_BAD;
2552 }
2553 cpuc->excl_thread_id = 0; 2551 cpuc->excl_thread_id = 0;
2554 } 2552 }
2555 2553
2556 return NOTIFY_OK; 2554 return NOTIFY_OK;
2555
2556err_constraint_list:
2557 kfree(cpuc->constraint_list);
2558 cpuc->constraint_list = NULL;
2559
2560err_shared_regs:
2561 kfree(cpuc->shared_regs);
2562 cpuc->shared_regs = NULL;
2563
2564err:
2565 return NOTIFY_BAD;
2557} 2566}
2558 2567
2559static void intel_pmu_cpu_starting(int cpu) 2568static void intel_pmu_cpu_starting(int cpu)
diff --git a/arch/x86/kernel/cpu/perf_event_intel_cqm.c b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
index 188076161c1b..377e8f8ed391 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_cqm.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_cqm.c
@@ -952,6 +952,14 @@ static u64 intel_cqm_event_count(struct perf_event *event)
952 return 0; 952 return 0;
953 953
954 /* 954 /*
955 * Getting up-to-date values requires an SMP IPI which is not
956 * possible if we're being called in interrupt context. Return
957 * the cached values instead.
958 */
959 if (unlikely(in_interrupt()))
960 goto out;
961
962 /*
955 * Notice that we don't perform the reading of an RMID 963 * Notice that we don't perform the reading of an RMID
956 * atomically, because we can't hold a spin lock across the 964 * atomically, because we can't hold a spin lock across the
957 * IPIs. 965 * IPIs.
@@ -1247,7 +1255,7 @@ static inline void cqm_pick_event_reader(int cpu)
1247 cpumask_set_cpu(cpu, &cqm_cpumask); 1255 cpumask_set_cpu(cpu, &cqm_cpumask);
1248} 1256}
1249 1257
1250static void intel_cqm_cpu_prepare(unsigned int cpu) 1258static void intel_cqm_cpu_starting(unsigned int cpu)
1251{ 1259{
1252 struct intel_pqr_state *state = &per_cpu(pqr_state, cpu); 1260 struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
1253 struct cpuinfo_x86 *c = &cpu_data(cpu); 1261 struct cpuinfo_x86 *c = &cpu_data(cpu);
@@ -1288,13 +1296,11 @@ static int intel_cqm_cpu_notifier(struct notifier_block *nb,
1288 unsigned int cpu = (unsigned long)hcpu; 1296 unsigned int cpu = (unsigned long)hcpu;
1289 1297
1290 switch (action & ~CPU_TASKS_FROZEN) { 1298 switch (action & ~CPU_TASKS_FROZEN) {
1291 case CPU_UP_PREPARE:
1292 intel_cqm_cpu_prepare(cpu);
1293 break;
1294 case CPU_DOWN_PREPARE: 1299 case CPU_DOWN_PREPARE:
1295 intel_cqm_cpu_exit(cpu); 1300 intel_cqm_cpu_exit(cpu);
1296 break; 1301 break;
1297 case CPU_STARTING: 1302 case CPU_STARTING:
1303 intel_cqm_cpu_starting(cpu);
1298 cqm_pick_event_reader(cpu); 1304 cqm_pick_event_reader(cpu);
1299 break; 1305 break;
1300 } 1306 }
@@ -1365,7 +1371,7 @@ static int __init intel_cqm_init(void)
1365 goto out; 1371 goto out;
1366 1372
1367 for_each_online_cpu(i) { 1373 for_each_online_cpu(i) {
1368 intel_cqm_cpu_prepare(i); 1374 intel_cqm_cpu_starting(i);
1369 cqm_pick_event_reader(i); 1375 cqm_pick_event_reader(i);
1370 } 1376 }
1371 1377
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
index 0b39173dd971..1e173f6285c7 100644
--- a/arch/x86/kernel/fpu/init.c
+++ b/arch/x86/kernel/fpu/init.c
@@ -351,9 +351,15 @@ static int __init x86_noxsave_setup(char *s)
351 351
352 setup_clear_cpu_cap(X86_FEATURE_XSAVE); 352 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
353 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT); 353 setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
354 setup_clear_cpu_cap(X86_FEATURE_XSAVEC);
354 setup_clear_cpu_cap(X86_FEATURE_XSAVES); 355 setup_clear_cpu_cap(X86_FEATURE_XSAVES);
355 setup_clear_cpu_cap(X86_FEATURE_AVX); 356 setup_clear_cpu_cap(X86_FEATURE_AVX);
356 setup_clear_cpu_cap(X86_FEATURE_AVX2); 357 setup_clear_cpu_cap(X86_FEATURE_AVX2);
358 setup_clear_cpu_cap(X86_FEATURE_AVX512F);
359 setup_clear_cpu_cap(X86_FEATURE_AVX512PF);
360 setup_clear_cpu_cap(X86_FEATURE_AVX512ER);
361 setup_clear_cpu_cap(X86_FEATURE_AVX512CD);
362 setup_clear_cpu_cap(X86_FEATURE_MPX);
357 363
358 return 1; 364 return 1;
359} 365}
diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
index c37886d759cc..2bcc0525f1c1 100644
--- a/arch/x86/kernel/ldt.c
+++ b/arch/x86/kernel/ldt.c
@@ -12,6 +12,7 @@
12#include <linux/string.h> 12#include <linux/string.h>
13#include <linux/mm.h> 13#include <linux/mm.h>
14#include <linux/smp.h> 14#include <linux/smp.h>
15#include <linux/slab.h>
15#include <linux/vmalloc.h> 16#include <linux/vmalloc.h>
16#include <linux/uaccess.h> 17#include <linux/uaccess.h>
17 18
@@ -20,82 +21,82 @@
20#include <asm/mmu_context.h> 21#include <asm/mmu_context.h>
21#include <asm/syscalls.h> 22#include <asm/syscalls.h>
22 23
23#ifdef CONFIG_SMP 24/* context.lock is held for us, so we don't need any locking. */
24static void flush_ldt(void *current_mm) 25static void flush_ldt(void *current_mm)
25{ 26{
26 if (current->active_mm == current_mm) 27 mm_context_t *pc;
27 load_LDT(&current->active_mm->context); 28
29 if (current->active_mm != current_mm)
30 return;
31
32 pc = &current->active_mm->context;
33 set_ldt(pc->ldt->entries, pc->ldt->size);
28} 34}
29#endif
30 35
31static int alloc_ldt(mm_context_t *pc, int mincount, int reload) 36/* The caller must call finalize_ldt_struct on the result. LDT starts zeroed. */
37static struct ldt_struct *alloc_ldt_struct(int size)
32{ 38{
33 void *oldldt, *newldt; 39 struct ldt_struct *new_ldt;
34 int oldsize; 40 int alloc_size;
35 41
36 if (mincount <= pc->size) 42 if (size > LDT_ENTRIES)
37 return 0; 43 return NULL;
38 oldsize = pc->size; 44
39 mincount = (mincount + (PAGE_SIZE / LDT_ENTRY_SIZE - 1)) & 45 new_ldt = kmalloc(sizeof(struct ldt_struct), GFP_KERNEL);
40 (~(PAGE_SIZE / LDT_ENTRY_SIZE - 1)); 46 if (!new_ldt)
41 if (mincount * LDT_ENTRY_SIZE > PAGE_SIZE) 47 return NULL;
42 newldt = vmalloc(mincount * LDT_ENTRY_SIZE); 48
49 BUILD_BUG_ON(LDT_ENTRY_SIZE != sizeof(struct desc_struct));
50 alloc_size = size * LDT_ENTRY_SIZE;
51
52 /*
53 * Xen is very picky: it requires a page-aligned LDT that has no
54 * trailing nonzero bytes in any page that contains LDT descriptors.
55 * Keep it simple: zero the whole allocation and never allocate less
56 * than PAGE_SIZE.
57 */
58 if (alloc_size > PAGE_SIZE)
59 new_ldt->entries = vzalloc(alloc_size);
43 else 60 else
44 newldt = (void *)__get_free_page(GFP_KERNEL); 61 new_ldt->entries = kzalloc(PAGE_SIZE, GFP_KERNEL);
45
46 if (!newldt)
47 return -ENOMEM;
48 62
49 if (oldsize) 63 if (!new_ldt->entries) {
50 memcpy(newldt, pc->ldt, oldsize * LDT_ENTRY_SIZE); 64 kfree(new_ldt);
51 oldldt = pc->ldt; 65 return NULL;
52 memset(newldt + oldsize * LDT_ENTRY_SIZE, 0, 66 }
53 (mincount - oldsize) * LDT_ENTRY_SIZE);
54 67
55 paravirt_alloc_ldt(newldt, mincount); 68 new_ldt->size = size;
69 return new_ldt;
70}
56 71
57#ifdef CONFIG_X86_64 72/* After calling this, the LDT is immutable. */
58 /* CHECKME: Do we really need this ? */ 73static void finalize_ldt_struct(struct ldt_struct *ldt)
59 wmb(); 74{
60#endif 75 paravirt_alloc_ldt(ldt->entries, ldt->size);
61 pc->ldt = newldt;
62 wmb();
63 pc->size = mincount;
64 wmb();
65
66 if (reload) {
67#ifdef CONFIG_SMP
68 preempt_disable();
69 load_LDT(pc);
70 if (!cpumask_equal(mm_cpumask(current->mm),
71 cpumask_of(smp_processor_id())))
72 smp_call_function(flush_ldt, current->mm, 1);
73 preempt_enable();
74#else
75 load_LDT(pc);
76#endif
77 }
78 if (oldsize) {
79 paravirt_free_ldt(oldldt, oldsize);
80 if (oldsize * LDT_ENTRY_SIZE > PAGE_SIZE)
81 vfree(oldldt);
82 else
83 put_page(virt_to_page(oldldt));
84 }
85 return 0;
86} 76}
87 77
88static inline int copy_ldt(mm_context_t *new, mm_context_t *old) 78/* context.lock is held */
79static void install_ldt(struct mm_struct *current_mm,
80 struct ldt_struct *ldt)
89{ 81{
90 int err = alloc_ldt(new, old->size, 0); 82 /* Synchronizes with lockless_dereference in load_mm_ldt. */
91 int i; 83 smp_store_release(&current_mm->context.ldt, ldt);
84
85 /* Activate the LDT for all CPUs using current_mm. */
86 on_each_cpu_mask(mm_cpumask(current_mm), flush_ldt, current_mm, true);
87}
92 88
93 if (err < 0) 89static void free_ldt_struct(struct ldt_struct *ldt)
94 return err; 90{
91 if (likely(!ldt))
92 return;
95 93
96 for (i = 0; i < old->size; i++) 94 paravirt_free_ldt(ldt->entries, ldt->size);
97 write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); 95 if (ldt->size * LDT_ENTRY_SIZE > PAGE_SIZE)
98 return 0; 96 vfree(ldt->entries);
97 else
98 kfree(ldt->entries);
99 kfree(ldt);
99} 100}
100 101
101/* 102/*
@@ -104,17 +105,37 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
104 */ 105 */
105int init_new_context(struct task_struct *tsk, struct mm_struct *mm) 106int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
106{ 107{
108 struct ldt_struct *new_ldt;
107 struct mm_struct *old_mm; 109 struct mm_struct *old_mm;
108 int retval = 0; 110 int retval = 0;
109 111
110 mutex_init(&mm->context.lock); 112 mutex_init(&mm->context.lock);
111 mm->context.size = 0;
112 old_mm = current->mm; 113 old_mm = current->mm;
113 if (old_mm && old_mm->context.size > 0) { 114 if (!old_mm) {
114 mutex_lock(&old_mm->context.lock); 115 mm->context.ldt = NULL;
115 retval = copy_ldt(&mm->context, &old_mm->context); 116 return 0;
116 mutex_unlock(&old_mm->context.lock);
117 } 117 }
118
119 mutex_lock(&old_mm->context.lock);
120 if (!old_mm->context.ldt) {
121 mm->context.ldt = NULL;
122 goto out_unlock;
123 }
124
125 new_ldt = alloc_ldt_struct(old_mm->context.ldt->size);
126 if (!new_ldt) {
127 retval = -ENOMEM;
128 goto out_unlock;
129 }
130
131 memcpy(new_ldt->entries, old_mm->context.ldt->entries,
132 new_ldt->size * LDT_ENTRY_SIZE);
133 finalize_ldt_struct(new_ldt);
134
135 mm->context.ldt = new_ldt;
136
137out_unlock:
138 mutex_unlock(&old_mm->context.lock);
118 return retval; 139 return retval;
119} 140}
120 141
@@ -125,53 +146,47 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
125 */ 146 */
126void destroy_context(struct mm_struct *mm) 147void destroy_context(struct mm_struct *mm)
127{ 148{
128 if (mm->context.size) { 149 free_ldt_struct(mm->context.ldt);
129#ifdef CONFIG_X86_32 150 mm->context.ldt = NULL;
130 /* CHECKME: Can this ever happen ? */
131 if (mm == current->active_mm)
132 clear_LDT();
133#endif
134 paravirt_free_ldt(mm->context.ldt, mm->context.size);
135 if (mm->context.size * LDT_ENTRY_SIZE > PAGE_SIZE)
136 vfree(mm->context.ldt);
137 else
138 put_page(virt_to_page(mm->context.ldt));
139 mm->context.size = 0;
140 }
141} 151}
142 152
143static int read_ldt(void __user *ptr, unsigned long bytecount) 153static int read_ldt(void __user *ptr, unsigned long bytecount)
144{ 154{
145 int err; 155 int retval;
146 unsigned long size; 156 unsigned long size;
147 struct mm_struct *mm = current->mm; 157 struct mm_struct *mm = current->mm;
148 158
149 if (!mm->context.size) 159 mutex_lock(&mm->context.lock);
150 return 0; 160
161 if (!mm->context.ldt) {
162 retval = 0;
163 goto out_unlock;
164 }
165
151 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES) 166 if (bytecount > LDT_ENTRY_SIZE * LDT_ENTRIES)
152 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES; 167 bytecount = LDT_ENTRY_SIZE * LDT_ENTRIES;
153 168
154 mutex_lock(&mm->context.lock); 169 size = mm->context.ldt->size * LDT_ENTRY_SIZE;
155 size = mm->context.size * LDT_ENTRY_SIZE;
156 if (size > bytecount) 170 if (size > bytecount)
157 size = bytecount; 171 size = bytecount;
158 172
159 err = 0; 173 if (copy_to_user(ptr, mm->context.ldt->entries, size)) {
160 if (copy_to_user(ptr, mm->context.ldt, size)) 174 retval = -EFAULT;
161 err = -EFAULT; 175 goto out_unlock;
162 mutex_unlock(&mm->context.lock); 176 }
163 if (err < 0) 177
164 goto error_return;
165 if (size != bytecount) { 178 if (size != bytecount) {
166 /* zero-fill the rest */ 179 /* Zero-fill the rest and pretend we read bytecount bytes. */
167 if (clear_user(ptr + size, bytecount - size) != 0) { 180 if (clear_user(ptr + size, bytecount - size)) {
168 err = -EFAULT; 181 retval = -EFAULT;
169 goto error_return; 182 goto out_unlock;
170 } 183 }
171 } 184 }
172 return bytecount; 185 retval = bytecount;
173error_return: 186
174 return err; 187out_unlock:
188 mutex_unlock(&mm->context.lock);
189 return retval;
175} 190}
176 191
177static int read_default_ldt(void __user *ptr, unsigned long bytecount) 192static int read_default_ldt(void __user *ptr, unsigned long bytecount)
@@ -195,6 +210,8 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
195 struct desc_struct ldt; 210 struct desc_struct ldt;
196 int error; 211 int error;
197 struct user_desc ldt_info; 212 struct user_desc ldt_info;
213 int oldsize, newsize;
214 struct ldt_struct *new_ldt, *old_ldt;
198 215
199 error = -EINVAL; 216 error = -EINVAL;
200 if (bytecount != sizeof(ldt_info)) 217 if (bytecount != sizeof(ldt_info))
@@ -213,34 +230,39 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
213 goto out; 230 goto out;
214 } 231 }
215 232
216 mutex_lock(&mm->context.lock); 233 if ((oldmode && !ldt_info.base_addr && !ldt_info.limit) ||
217 if (ldt_info.entry_number >= mm->context.size) { 234 LDT_empty(&ldt_info)) {
218 error = alloc_ldt(&current->mm->context, 235 /* The user wants to clear the entry. */
219 ldt_info.entry_number + 1, 1); 236 memset(&ldt, 0, sizeof(ldt));
220 if (error < 0) 237 } else {
221 goto out_unlock; 238 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) {
222 } 239 error = -EINVAL;
223 240 goto out;
224 /* Allow LDTs to be cleared by the user. */
225 if (ldt_info.base_addr == 0 && ldt_info.limit == 0) {
226 if (oldmode || LDT_empty(&ldt_info)) {
227 memset(&ldt, 0, sizeof(ldt));
228 goto install;
229 } 241 }
242
243 fill_ldt(&ldt, &ldt_info);
244 if (oldmode)
245 ldt.avl = 0;
230 } 246 }
231 247
232 if (!IS_ENABLED(CONFIG_X86_16BIT) && !ldt_info.seg_32bit) { 248 mutex_lock(&mm->context.lock);
233 error = -EINVAL; 249
250 old_ldt = mm->context.ldt;
251 oldsize = old_ldt ? old_ldt->size : 0;
252 newsize = max((int)(ldt_info.entry_number + 1), oldsize);
253
254 error = -ENOMEM;
255 new_ldt = alloc_ldt_struct(newsize);
256 if (!new_ldt)
234 goto out_unlock; 257 goto out_unlock;
235 }
236 258
237 fill_ldt(&ldt, &ldt_info); 259 if (old_ldt)
238 if (oldmode) 260 memcpy(new_ldt->entries, old_ldt->entries, oldsize * LDT_ENTRY_SIZE);
239 ldt.avl = 0; 261 new_ldt->entries[ldt_info.entry_number] = ldt;
262 finalize_ldt_struct(new_ldt);
240 263
241 /* Install the new entry ... */ 264 install_ldt(mm, new_ldt);
242install: 265 free_ldt_struct(old_ldt);
243 write_ldt_entry(mm->context.ldt, ldt_info.entry_number, &ldt);
244 error = 0; 266 error = 0;
245 267
246out_unlock: 268out_unlock:
diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
index 71d7849a07f7..f6b916387590 100644
--- a/arch/x86/kernel/process_64.c
+++ b/arch/x86/kernel/process_64.c
@@ -121,11 +121,11 @@ void __show_regs(struct pt_regs *regs, int all)
121void release_thread(struct task_struct *dead_task) 121void release_thread(struct task_struct *dead_task)
122{ 122{
123 if (dead_task->mm) { 123 if (dead_task->mm) {
124 if (dead_task->mm->context.size) { 124 if (dead_task->mm->context.ldt) {
125 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n", 125 pr_warn("WARNING: dead process %s still has LDT? <%p/%d>\n",
126 dead_task->comm, 126 dead_task->comm,
127 dead_task->mm->context.ldt, 127 dead_task->mm->context.ldt,
128 dead_task->mm->context.size); 128 dead_task->mm->context.ldt->size);
129 BUG(); 129 BUG();
130 } 130 }
131 } 131 }
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 206996c1669d..71820c42b6ce 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -93,8 +93,15 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
93 COPY(r15); 93 COPY(r15);
94#endif /* CONFIG_X86_64 */ 94#endif /* CONFIG_X86_64 */
95 95
96#ifdef CONFIG_X86_32
96 COPY_SEG_CPL3(cs); 97 COPY_SEG_CPL3(cs);
97 COPY_SEG_CPL3(ss); 98 COPY_SEG_CPL3(ss);
99#else /* !CONFIG_X86_32 */
100 /* Kernel saves and restores only the CS segment register on signals,
101 * which is the bare minimum needed to allow mixed 32/64-bit code.
102 * App's signal handler can save/restore other segments if needed. */
103 COPY_SEG_CPL3(cs);
104#endif /* CONFIG_X86_32 */
98 105
99 get_user_ex(tmpflags, &sc->flags); 106 get_user_ex(tmpflags, &sc->flags);
100 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS); 107 regs->flags = (regs->flags & ~FIX_EFLAGS) | (tmpflags & FIX_EFLAGS);
@@ -154,9 +161,8 @@ int setup_sigcontext(struct sigcontext __user *sc, void __user *fpstate,
154#else /* !CONFIG_X86_32 */ 161#else /* !CONFIG_X86_32 */
155 put_user_ex(regs->flags, &sc->flags); 162 put_user_ex(regs->flags, &sc->flags);
156 put_user_ex(regs->cs, &sc->cs); 163 put_user_ex(regs->cs, &sc->cs);
157 put_user_ex(0, &sc->__pad2); 164 put_user_ex(0, &sc->gs);
158 put_user_ex(0, &sc->__pad1); 165 put_user_ex(0, &sc->fs);
159 put_user_ex(regs->ss, &sc->ss);
160#endif /* CONFIG_X86_32 */ 166#endif /* CONFIG_X86_32 */
161 167
162 put_user_ex(fpstate, &sc->fpstate); 168 put_user_ex(fpstate, &sc->fpstate);
@@ -451,19 +457,9 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
451 457
452 regs->sp = (unsigned long)frame; 458 regs->sp = (unsigned long)frame;
453 459
454 /* 460 /* Set up the CS register to run signal handlers in 64-bit mode,
455 * Set up the CS and SS registers to run signal handlers in 461 even if the handler happens to be interrupting 32-bit code. */
456 * 64-bit mode, even if the handler happens to be interrupting
457 * 32-bit or 16-bit code.
458 *
459 * SS is subtle. In 64-bit mode, we don't need any particular
460 * SS descriptor, but we do need SS to be valid. It's possible
461 * that the old SS is entirely bogus -- this can happen if the
462 * signal we're trying to deliver is #GP or #SS caused by a bad
463 * SS value.
464 */
465 regs->cs = __USER_CS; 462 regs->cs = __USER_CS;
466 regs->ss = __USER_DS;
467 463
468 return 0; 464 return 0;
469} 465}
diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
index 9b4d51d0c0d0..0ccb53a9fcd9 100644
--- a/arch/x86/kernel/step.c
+++ b/arch/x86/kernel/step.c
@@ -5,6 +5,7 @@
5#include <linux/mm.h> 5#include <linux/mm.h>
6#include <linux/ptrace.h> 6#include <linux/ptrace.h>
7#include <asm/desc.h> 7#include <asm/desc.h>
8#include <asm/mmu_context.h>
8 9
9unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs) 10unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *regs)
10{ 11{
@@ -27,13 +28,14 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
27 struct desc_struct *desc; 28 struct desc_struct *desc;
28 unsigned long base; 29 unsigned long base;
29 30
30 seg &= ~7UL; 31 seg >>= 3;
31 32
32 mutex_lock(&child->mm->context.lock); 33 mutex_lock(&child->mm->context.lock);
33 if (unlikely((seg >> 3) >= child->mm->context.size)) 34 if (unlikely(!child->mm->context.ldt ||
35 seg >= child->mm->context.ldt->size))
34 addr = -1L; /* bogus selector, access would fault */ 36 addr = -1L; /* bogus selector, access would fault */
35 else { 37 else {
36 desc = child->mm->context.ldt + seg; 38 desc = &child->mm->context.ldt->entries[seg];
37 base = get_desc_base(desc); 39 base = get_desc_base(desc);
38 40
39 /* 16-bit code segment? */ 41 /* 16-bit code segment? */
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 954e98a8c2e3..2a5ca97c263b 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1595,7 +1595,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
1595 for (i = 0; i < APIC_LVT_NUM; i++) 1595 for (i = 0; i < APIC_LVT_NUM; i++)
1596 apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED); 1596 apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
1597 apic_update_lvtt(apic); 1597 apic_update_lvtt(apic);
1598 if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_LINT0_REENABLED)) 1598 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_LINT0_REENABLED))
1599 apic_set_reg(apic, APIC_LVT0, 1599 apic_set_reg(apic, APIC_LVT0,
1600 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT)); 1600 SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
1601 apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0)); 1601 apic_manage_nmi_watchdog(apic, kvm_apic_get_reg(apic, APIC_LVT0));
diff --git a/arch/x86/kvm/mtrr.c b/arch/x86/kvm/mtrr.c
index de1d2d8062e2..9e8bf13572e6 100644
--- a/arch/x86/kvm/mtrr.c
+++ b/arch/x86/kvm/mtrr.c
@@ -120,6 +120,16 @@ static u8 mtrr_default_type(struct kvm_mtrr *mtrr_state)
120 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK; 120 return mtrr_state->deftype & IA32_MTRR_DEF_TYPE_TYPE_MASK;
121} 121}
122 122
123static u8 mtrr_disabled_type(void)
124{
125 /*
126 * Intel SDM 11.11.2.2: all MTRRs are disabled when
127 * IA32_MTRR_DEF_TYPE.E bit is cleared, and the UC
128 * memory type is applied to all of physical memory.
129 */
130 return MTRR_TYPE_UNCACHABLE;
131}
132
123/* 133/*
124* Three terms are used in the following code: 134* Three terms are used in the following code:
125* - segment, it indicates the address segments covered by fixed MTRRs. 135* - segment, it indicates the address segments covered by fixed MTRRs.
@@ -434,6 +444,8 @@ struct mtrr_iter {
434 444
435 /* output fields. */ 445 /* output fields. */
436 int mem_type; 446 int mem_type;
447 /* mtrr is completely disabled? */
448 bool mtrr_disabled;
437 /* [start, end) is not fully covered in MTRRs? */ 449 /* [start, end) is not fully covered in MTRRs? */
438 bool partial_map; 450 bool partial_map;
439 451
@@ -549,7 +561,7 @@ static void mtrr_lookup_var_next(struct mtrr_iter *iter)
549static void mtrr_lookup_start(struct mtrr_iter *iter) 561static void mtrr_lookup_start(struct mtrr_iter *iter)
550{ 562{
551 if (!mtrr_is_enabled(iter->mtrr_state)) { 563 if (!mtrr_is_enabled(iter->mtrr_state)) {
552 iter->partial_map = true; 564 iter->mtrr_disabled = true;
553 return; 565 return;
554 } 566 }
555 567
@@ -563,6 +575,7 @@ static void mtrr_lookup_init(struct mtrr_iter *iter,
563 iter->mtrr_state = mtrr_state; 575 iter->mtrr_state = mtrr_state;
564 iter->start = start; 576 iter->start = start;
565 iter->end = end; 577 iter->end = end;
578 iter->mtrr_disabled = false;
566 iter->partial_map = false; 579 iter->partial_map = false;
567 iter->fixed = false; 580 iter->fixed = false;
568 iter->range = NULL; 581 iter->range = NULL;
@@ -656,15 +669,19 @@ u8 kvm_mtrr_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn)
656 return MTRR_TYPE_WRBACK; 669 return MTRR_TYPE_WRBACK;
657 } 670 }
658 671
659 /* It is not covered by MTRRs. */ 672 if (iter.mtrr_disabled)
660 if (iter.partial_map) { 673 return mtrr_disabled_type();
661 /* 674
662 * We just check one page, partially covered by MTRRs is 675 /* not contained in any MTRRs. */
663 * impossible. 676 if (type == -1)
664 */ 677 return mtrr_default_type(mtrr_state);
665 WARN_ON(type != -1); 678
666 type = mtrr_default_type(mtrr_state); 679 /*
667 } 680 * We just check one page, partially covered by MTRRs is
681 * impossible.
682 */
683 WARN_ON(iter.partial_map);
684
668 return type; 685 return type;
669} 686}
670EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type); 687EXPORT_SYMBOL_GPL(kvm_mtrr_get_guest_memory_type);
@@ -689,6 +706,9 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
689 return false; 706 return false;
690 } 707 }
691 708
709 if (iter.mtrr_disabled)
710 return true;
711
692 if (!iter.partial_map) 712 if (!iter.partial_map)
693 return true; 713 return true;
694 714
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index bbc678a66b18..8e0c0844c6b9 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1672,7 +1672,7 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1672 * does not do it - this results in some delay at 1672 * does not do it - this results in some delay at
1673 * reboot 1673 * reboot
1674 */ 1674 */
1675 if (!(vcpu->kvm->arch.disabled_quirks & KVM_QUIRK_CD_NW_CLEARED)) 1675 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
1676 cr0 &= ~(X86_CR0_CD | X86_CR0_NW); 1676 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
1677 svm->vmcb->save.cr0 = cr0; 1677 svm->vmcb->save.cr0 = cr0;
1678 mark_dirty(svm->vmcb, VMCB_CR); 1678 mark_dirty(svm->vmcb, VMCB_CR);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 5b4e9384717a..83b7b5cd75d5 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -8650,7 +8650,10 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
8650 8650
8651 if (kvm_read_cr0(vcpu) & X86_CR0_CD) { 8651 if (kvm_read_cr0(vcpu) & X86_CR0_CD) {
8652 ipat = VMX_EPT_IPAT_BIT; 8652 ipat = VMX_EPT_IPAT_BIT;
8653 cache = MTRR_TYPE_UNCACHABLE; 8653 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
8654 cache = MTRR_TYPE_WRBACK;
8655 else
8656 cache = MTRR_TYPE_UNCACHABLE;
8654 goto exit; 8657 goto exit;
8655 } 8658 }
8656 8659
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5ef2560075bf..8f0f6eca69da 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2105,7 +2105,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2105 if (guest_cpuid_has_tsc_adjust(vcpu)) { 2105 if (guest_cpuid_has_tsc_adjust(vcpu)) {
2106 if (!msr_info->host_initiated) { 2106 if (!msr_info->host_initiated) {
2107 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr; 2107 s64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
2108 kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true); 2108 adjust_tsc_offset_guest(vcpu, adj);
2109 } 2109 }
2110 vcpu->arch.ia32_tsc_adjust_msr = data; 2110 vcpu->arch.ia32_tsc_adjust_msr = data;
2111 } 2111 }
@@ -6327,6 +6327,7 @@ static void process_smi_save_state_64(struct kvm_vcpu *vcpu, char *buf)
6327static void process_smi(struct kvm_vcpu *vcpu) 6327static void process_smi(struct kvm_vcpu *vcpu)
6328{ 6328{
6329 struct kvm_segment cs, ds; 6329 struct kvm_segment cs, ds;
6330 struct desc_ptr dt;
6330 char buf[512]; 6331 char buf[512];
6331 u32 cr0; 6332 u32 cr0;
6332 6333
@@ -6359,6 +6360,10 @@ static void process_smi(struct kvm_vcpu *vcpu)
6359 6360
6360 kvm_x86_ops->set_cr4(vcpu, 0); 6361 kvm_x86_ops->set_cr4(vcpu, 0);
6361 6362
6363 /* Undocumented: IDT limit is set to zero on entry to SMM. */
6364 dt.address = dt.size = 0;
6365 kvm_x86_ops->set_idt(vcpu, &dt);
6366
6362 __kvm_set_dr(vcpu, 7, DR7_FIXED_1); 6367 __kvm_set_dr(vcpu, 7, DR7_FIXED_1);
6363 6368
6364 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff; 6369 cs.selector = (vcpu->arch.smbase >> 4) & 0xffff;
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index edc8cdcd786b..0ca2f3e4803c 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -147,6 +147,11 @@ static inline void kvm_register_writel(struct kvm_vcpu *vcpu,
147 return kvm_register_write(vcpu, reg, val); 147 return kvm_register_write(vcpu, reg, val);
148} 148}
149 149
150static inline bool kvm_check_has_quirk(struct kvm *kvm, u64 quirk)
151{
152 return !(kvm->arch.disabled_quirks & quirk);
153}
154
150void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); 155void kvm_before_handle_nmi(struct kvm_vcpu *vcpu);
151void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); 156void kvm_after_handle_nmi(struct kvm_vcpu *vcpu);
152void kvm_set_pending_timer(struct kvm_vcpu *vcpu); 157void kvm_set_pending_timer(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/math-emu/fpu_entry.c b/arch/x86/math-emu/fpu_entry.c
index f37e84ab49f3..3d8f2e421466 100644
--- a/arch/x86/math-emu/fpu_entry.c
+++ b/arch/x86/math-emu/fpu_entry.c
@@ -29,7 +29,6 @@
29 29
30#include <asm/uaccess.h> 30#include <asm/uaccess.h>
31#include <asm/traps.h> 31#include <asm/traps.h>
32#include <asm/desc.h>
33#include <asm/user.h> 32#include <asm/user.h>
34#include <asm/fpu/internal.h> 33#include <asm/fpu/internal.h>
35 34
@@ -181,7 +180,7 @@ void math_emulate(struct math_emu_info *info)
181 math_abort(FPU_info, SIGILL); 180 math_abort(FPU_info, SIGILL);
182 } 181 }
183 182
184 code_descriptor = LDT_DESCRIPTOR(FPU_CS); 183 code_descriptor = FPU_get_ldt_descriptor(FPU_CS);
185 if (SEG_D_SIZE(code_descriptor)) { 184 if (SEG_D_SIZE(code_descriptor)) {
186 /* The above test may be wrong, the book is not clear */ 185 /* The above test may be wrong, the book is not clear */
187 /* Segmented 32 bit protected mode */ 186 /* Segmented 32 bit protected mode */
diff --git a/arch/x86/math-emu/fpu_system.h b/arch/x86/math-emu/fpu_system.h
index 9ccecb61a4fa..5e044d506b7a 100644
--- a/arch/x86/math-emu/fpu_system.h
+++ b/arch/x86/math-emu/fpu_system.h
@@ -16,9 +16,24 @@
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/mm.h> 17#include <linux/mm.h>
18 18
19/* s is always from a cpu register, and the cpu does bounds checking 19#include <asm/desc.h>
20 * during register load --> no further bounds checks needed */ 20#include <asm/mmu_context.h>
21#define LDT_DESCRIPTOR(s) (((struct desc_struct *)current->mm->context.ldt)[(s) >> 3]) 21
22static inline struct desc_struct FPU_get_ldt_descriptor(unsigned seg)
23{
24 static struct desc_struct zero_desc;
25 struct desc_struct ret = zero_desc;
26
27#ifdef CONFIG_MODIFY_LDT_SYSCALL
28 seg >>= 3;
29 mutex_lock(&current->mm->context.lock);
30 if (current->mm->context.ldt && seg < current->mm->context.ldt->size)
31 ret = current->mm->context.ldt->entries[seg];
32 mutex_unlock(&current->mm->context.lock);
33#endif
34 return ret;
35}
36
22#define SEG_D_SIZE(x) ((x).b & (3 << 21)) 37#define SEG_D_SIZE(x) ((x).b & (3 << 21))
23#define SEG_G_BIT(x) ((x).b & (1 << 23)) 38#define SEG_G_BIT(x) ((x).b & (1 << 23))
24#define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1) 39#define SEG_GRANULARITY(x) (((x).b & (1 << 23)) ? 4096 : 1)
diff --git a/arch/x86/math-emu/get_address.c b/arch/x86/math-emu/get_address.c
index 6ef5e99380f9..8300db71c2a6 100644
--- a/arch/x86/math-emu/get_address.c
+++ b/arch/x86/math-emu/get_address.c
@@ -20,7 +20,6 @@
20#include <linux/stddef.h> 20#include <linux/stddef.h>
21 21
22#include <asm/uaccess.h> 22#include <asm/uaccess.h>
23#include <asm/desc.h>
24 23
25#include "fpu_system.h" 24#include "fpu_system.h"
26#include "exception.h" 25#include "exception.h"
@@ -158,7 +157,7 @@ static long pm_address(u_char FPU_modrm, u_char segment,
158 addr->selector = PM_REG_(segment); 157 addr->selector = PM_REG_(segment);
159 } 158 }
160 159
161 descriptor = LDT_DESCRIPTOR(PM_REG_(segment)); 160 descriptor = FPU_get_ldt_descriptor(addr->selector);
162 base_address = SEG_BASE_ADDR(descriptor); 161 base_address = SEG_BASE_ADDR(descriptor);
163 address = base_address + offset; 162 address = base_address + offset;
164 limit = base_address 163 limit = base_address
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index cc5ccc415cc0..b9c78f3bcd67 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -63,8 +63,6 @@ static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
63 !PageReserved(pfn_to_page(start_pfn + i))) 63 !PageReserved(pfn_to_page(start_pfn + i)))
64 return 1; 64 return 1;
65 65
66 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
67
68 return 0; 66 return 0;
69} 67}
70 68
@@ -94,7 +92,6 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
94 pgprot_t prot; 92 pgprot_t prot;
95 int retval; 93 int retval;
96 void __iomem *ret_addr; 94 void __iomem *ret_addr;
97 int ram_region;
98 95
99 /* Don't allow wraparound or zero size */ 96 /* Don't allow wraparound or zero size */
100 last_addr = phys_addr + size - 1; 97 last_addr = phys_addr + size - 1;
@@ -117,23 +114,15 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
117 /* 114 /*
118 * Don't allow anybody to remap normal RAM that we're using.. 115 * Don't allow anybody to remap normal RAM that we're using..
119 */ 116 */
120 /* First check if whole region can be identified as RAM or not */ 117 pfn = phys_addr >> PAGE_SHIFT;
121 ram_region = region_is_ram(phys_addr, size); 118 last_pfn = last_addr >> PAGE_SHIFT;
122 if (ram_region > 0) { 119 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
123 WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n", 120 __ioremap_check_ram) == 1) {
124 (unsigned long int)phys_addr, 121 WARN_ONCE(1, "ioremap on RAM at %pa - %pa\n",
125 (unsigned long int)last_addr); 122 &phys_addr, &last_addr);
126 return NULL; 123 return NULL;
127 } 124 }
128 125
129 /* If could not be identified(-1), check page by page */
130 if (ram_region < 0) {
131 pfn = phys_addr >> PAGE_SHIFT;
132 last_pfn = last_addr >> PAGE_SHIFT;
133 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
134 __ioremap_check_ram) == 1)
135 return NULL;
136 }
137 /* 126 /*
138 * Mappings have to be page-aligned 127 * Mappings have to be page-aligned
139 */ 128 */
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 9d518d693b4b..844b06d67df4 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -126,3 +126,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
126 mm->get_unmapped_area = arch_get_unmapped_area_topdown; 126 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
127 } 127 }
128} 128}
129
130const char *arch_vma_name(struct vm_area_struct *vma)
131{
132 if (vma->vm_flags & VM_MPX)
133 return "[mpx]";
134 return NULL;
135}
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index 7a657f58bbea..db1b0bc5017c 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -20,20 +20,6 @@
20#define CREATE_TRACE_POINTS 20#define CREATE_TRACE_POINTS
21#include <asm/trace/mpx.h> 21#include <asm/trace/mpx.h>
22 22
23static const char *mpx_mapping_name(struct vm_area_struct *vma)
24{
25 return "[mpx]";
26}
27
28static struct vm_operations_struct mpx_vma_ops = {
29 .name = mpx_mapping_name,
30};
31
32static int is_mpx_vma(struct vm_area_struct *vma)
33{
34 return (vma->vm_ops == &mpx_vma_ops);
35}
36
37static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm) 23static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm)
38{ 24{
39 if (is_64bit_mm(mm)) 25 if (is_64bit_mm(mm))
@@ -53,9 +39,6 @@ static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm)
53/* 39/*
54 * This is really a simplified "vm_mmap". it only handles MPX 40 * This is really a simplified "vm_mmap". it only handles MPX
55 * bounds tables (the bounds directory is user-allocated). 41 * bounds tables (the bounds directory is user-allocated).
56 *
57 * Later on, we use the vma->vm_ops to uniquely identify these
58 * VMAs.
59 */ 42 */
60static unsigned long mpx_mmap(unsigned long len) 43static unsigned long mpx_mmap(unsigned long len)
61{ 44{
@@ -101,7 +84,6 @@ static unsigned long mpx_mmap(unsigned long len)
101 ret = -ENOMEM; 84 ret = -ENOMEM;
102 goto out; 85 goto out;
103 } 86 }
104 vma->vm_ops = &mpx_vma_ops;
105 87
106 if (vm_flags & VM_LOCKED) { 88 if (vm_flags & VM_LOCKED) {
107 up_write(&mm->mmap_sem); 89 up_write(&mm->mmap_sem);
@@ -812,7 +794,7 @@ static noinline int zap_bt_entries_mapping(struct mm_struct *mm,
812 * so stop immediately and return an error. This 794 * so stop immediately and return an error. This
813 * probably results in a SIGSEGV. 795 * probably results in a SIGSEGV.
814 */ 796 */
815 if (!is_mpx_vma(vma)) 797 if (!(vma->vm_flags & VM_MPX))
816 return -EINVAL; 798 return -EINVAL;
817 799
818 len = min(vma->vm_end, end) - addr; 800 len = min(vma->vm_end, end) - addr;
@@ -945,9 +927,9 @@ static int try_unmap_single_bt(struct mm_struct *mm,
945 * lots of tables even though we have no actual table 927 * lots of tables even though we have no actual table
946 * entries in use. 928 * entries in use.
947 */ 929 */
948 while (next && is_mpx_vma(next)) 930 while (next && (next->vm_flags & VM_MPX))
949 next = next->vm_next; 931 next = next->vm_next;
950 while (prev && is_mpx_vma(prev)) 932 while (prev && (prev->vm_flags & VM_MPX))
951 prev = prev->vm_prev; 933 prev = prev->vm_prev;
952 /* 934 /*
953 * We know 'start' and 'end' lie within an area controlled 935 * We know 'start' and 'end' lie within an area controlled
diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
index 3250f2371aea..90b924acd982 100644
--- a/arch/x86/mm/tlb.c
+++ b/arch/x86/mm/tlb.c
@@ -117,7 +117,7 @@ static void flush_tlb_func(void *info)
117 } else { 117 } else {
118 unsigned long addr; 118 unsigned long addr;
119 unsigned long nr_pages = 119 unsigned long nr_pages =
120 f->flush_end - f->flush_start / PAGE_SIZE; 120 (f->flush_end - f->flush_start) / PAGE_SIZE;
121 addr = f->flush_start; 121 addr = f->flush_start;
122 while (addr < f->flush_end) { 122 while (addr < f->flush_end) {
123 __flush_tlb_single(addr); 123 __flush_tlb_single(addr);
diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
index 579a8fd74be0..be2e7a2b10d7 100644
--- a/arch/x86/net/bpf_jit_comp.c
+++ b/arch/x86/net/bpf_jit_comp.c
@@ -269,7 +269,7 @@ static void emit_bpf_tail_call(u8 **pprog)
269 EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */ 269 EMIT4(0x48, 0x8B, 0x46, /* mov rax, qword ptr [rsi + 16] */
270 offsetof(struct bpf_array, map.max_entries)); 270 offsetof(struct bpf_array, map.max_entries));
271 EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */ 271 EMIT3(0x48, 0x39, 0xD0); /* cmp rax, rdx */
272#define OFFSET1 44 /* number of bytes to jump */ 272#define OFFSET1 47 /* number of bytes to jump */
273 EMIT2(X86_JBE, OFFSET1); /* jbe out */ 273 EMIT2(X86_JBE, OFFSET1); /* jbe out */
274 label1 = cnt; 274 label1 = cnt;
275 275
@@ -278,15 +278,15 @@ static void emit_bpf_tail_call(u8 **pprog)
278 */ 278 */
279 EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */ 279 EMIT2_off32(0x8B, 0x85, -STACKSIZE + 36); /* mov eax, dword ptr [rbp - 516] */
280 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */ 280 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
281#define OFFSET2 33 281#define OFFSET2 36
282 EMIT2(X86_JA, OFFSET2); /* ja out */ 282 EMIT2(X86_JA, OFFSET2); /* ja out */
283 label2 = cnt; 283 label2 = cnt;
284 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */ 284 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
285 EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */ 285 EMIT2_off32(0x89, 0x85, -STACKSIZE + 36); /* mov dword ptr [rbp - 516], eax */
286 286
287 /* prog = array->prog[index]; */ 287 /* prog = array->prog[index]; */
288 EMIT4(0x48, 0x8D, 0x44, 0xD6); /* lea rax, [rsi + rdx * 8 + 0x50] */ 288 EMIT4_off32(0x48, 0x8D, 0x84, 0xD6, /* lea rax, [rsi + rdx * 8 + offsetof(...)] */
289 EMIT1(offsetof(struct bpf_array, prog)); 289 offsetof(struct bpf_array, prog));
290 EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */ 290 EMIT3(0x48, 0x8B, 0x00); /* mov rax, qword ptr [rax] */
291 291
292 /* if (prog == NULL) 292 /* if (prog == NULL)
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
index cfba30f27392..e4308fe6afe8 100644
--- a/arch/x86/platform/efi/efi.c
+++ b/arch/x86/platform/efi/efi.c
@@ -972,6 +972,11 @@ u64 efi_mem_attributes(unsigned long phys_addr)
972 972
973static int __init arch_parse_efi_cmdline(char *str) 973static int __init arch_parse_efi_cmdline(char *str)
974{ 974{
975 if (!str) {
976 pr_warn("need at least one option\n");
977 return -EINVAL;
978 }
979
975 if (parse_option_str(str, "old_map")) 980 if (parse_option_str(str, "old_map"))
976 set_bit(EFI_OLD_MEMMAP, &efi.flags); 981 set_bit(EFI_OLD_MEMMAP, &efi.flags);
977 if (parse_option_str(str, "debug")) 982 if (parse_option_str(str, "debug"))
diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
index 0d7dd1f5ac36..9ab52791fed5 100644
--- a/arch/x86/power/cpu.c
+++ b/arch/x86/power/cpu.c
@@ -22,6 +22,7 @@
22#include <asm/fpu/internal.h> 22#include <asm/fpu/internal.h>
23#include <asm/debugreg.h> 23#include <asm/debugreg.h>
24#include <asm/cpu.h> 24#include <asm/cpu.h>
25#include <asm/mmu_context.h>
25 26
26#ifdef CONFIG_X86_32 27#ifdef CONFIG_X86_32
27__visible unsigned long saved_context_ebx; 28__visible unsigned long saved_context_ebx;
@@ -153,7 +154,7 @@ static void fix_processor_context(void)
153 syscall_init(); /* This sets MSR_*STAR and related */ 154 syscall_init(); /* This sets MSR_*STAR and related */
154#endif 155#endif
155 load_TR_desc(); /* This does ltr */ 156 load_TR_desc(); /* This does ltr */
156 load_LDT(&current->active_mm->context); /* This does lldt */ 157 load_mm_ldt(current->active_mm); /* This does lldt */
157 158
158 fpu__resume_cpu(); 159 fpu__resume_cpu();
159} 160}
diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile
index 7322755f337a..4b6e29ac0968 100644
--- a/arch/x86/xen/Makefile
+++ b/arch/x86/xen/Makefile
@@ -13,13 +13,13 @@ CFLAGS_mmu.o := $(nostackp)
13obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \ 13obj-y := enlighten.o setup.o multicalls.o mmu.o irq.o \
14 time.o xen-asm.o xen-asm_$(BITS).o \ 14 time.o xen-asm.o xen-asm_$(BITS).o \
15 grant-table.o suspend.o platform-pci-unplug.o \ 15 grant-table.o suspend.o platform-pci-unplug.o \
16 p2m.o 16 p2m.o apic.o
17 17
18obj-$(CONFIG_EVENT_TRACING) += trace.o 18obj-$(CONFIG_EVENT_TRACING) += trace.o
19 19
20obj-$(CONFIG_SMP) += smp.o 20obj-$(CONFIG_SMP) += smp.o
21obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o 21obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o
22obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o 22obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o
23obj-$(CONFIG_XEN_DOM0) += apic.o vga.o 23obj-$(CONFIG_XEN_DOM0) += vga.o
24obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o 24obj-$(CONFIG_SWIOTLB_XEN) += pci-swiotlb-xen.o
25obj-$(CONFIG_XEN_EFI) += efi.o 25obj-$(CONFIG_XEN_EFI) += efi.o
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 0b95c9b8283f..11d6fb4e8483 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -483,6 +483,7 @@ static void set_aliased_prot(void *v, pgprot_t prot)
483 pte_t pte; 483 pte_t pte;
484 unsigned long pfn; 484 unsigned long pfn;
485 struct page *page; 485 struct page *page;
486 unsigned char dummy;
486 487
487 ptep = lookup_address((unsigned long)v, &level); 488 ptep = lookup_address((unsigned long)v, &level);
488 BUG_ON(ptep == NULL); 489 BUG_ON(ptep == NULL);
@@ -492,6 +493,32 @@ static void set_aliased_prot(void *v, pgprot_t prot)
492 493
493 pte = pfn_pte(pfn, prot); 494 pte = pfn_pte(pfn, prot);
494 495
496 /*
497 * Careful: update_va_mapping() will fail if the virtual address
498 * we're poking isn't populated in the page tables. We don't
499 * need to worry about the direct map (that's always in the page
500 * tables), but we need to be careful about vmap space. In
501 * particular, the top level page table can lazily propagate
502 * entries between processes, so if we've switched mms since we
503 * vmapped the target in the first place, we might not have the
504 * top-level page table entry populated.
505 *
506 * We disable preemption because we want the same mm active when
507 * we probe the target and when we issue the hypercall. We'll
508 * have the same nominal mm, but if we're a kernel thread, lazy
509 * mm dropping could change our pgd.
510 *
511 * Out of an abundance of caution, this uses __get_user() to fault
512 * in the target address just in case there's some obscure case
513 * in which the target address isn't readable.
514 */
515
516 preempt_disable();
517
518 pagefault_disable(); /* Avoid warnings due to being atomic. */
519 __get_user(dummy, (unsigned char __user __force *)v);
520 pagefault_enable();
521
495 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0)) 522 if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0))
496 BUG(); 523 BUG();
497 524
@@ -503,6 +530,8 @@ static void set_aliased_prot(void *v, pgprot_t prot)
503 BUG(); 530 BUG();
504 } else 531 } else
505 kmap_flush_unused(); 532 kmap_flush_unused();
533
534 preempt_enable();
506} 535}
507 536
508static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) 537static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
@@ -510,6 +539,17 @@ static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries)
510 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; 539 const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE;
511 int i; 540 int i;
512 541
542 /*
543 * We need to mark the all aliases of the LDT pages RO. We
544 * don't need to call vm_flush_aliases(), though, since that's
545 * only responsible for flushing aliases out the TLBs, not the
546 * page tables, and Xen will flush the TLB for us if needed.
547 *
548 * To avoid confusing future readers: none of this is necessary
549 * to load the LDT. The hypervisor only checks this when the
550 * LDT is faulted in due to subsequent descriptor access.
551 */
552
513 for(i = 0; i < entries; i += entries_per_page) 553 for(i = 0; i < entries; i += entries_per_page)
514 set_aliased_prot(ldt + i, PAGE_KERNEL_RO); 554 set_aliased_prot(ldt + i, PAGE_KERNEL_RO);
515} 555}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index c20fe29e65f4..2292721b1d10 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -101,17 +101,15 @@ struct dom0_vga_console_info;
101 101
102#ifdef CONFIG_XEN_DOM0 102#ifdef CONFIG_XEN_DOM0
103void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size); 103void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size);
104void __init xen_init_apic(void);
105#else 104#else
106static inline void __init xen_init_vga(const struct dom0_vga_console_info *info, 105static inline void __init xen_init_vga(const struct dom0_vga_console_info *info,
107 size_t size) 106 size_t size)
108{ 107{
109} 108}
110static inline void __init xen_init_apic(void)
111{
112}
113#endif 109#endif
114 110
111void __init xen_init_apic(void);
112
115#ifdef CONFIG_XEN_EFI 113#ifdef CONFIG_XEN_EFI
116extern void xen_efi_init(void); 114extern void xen_efi_init(void);
117#else 115#else
diff --git a/block/bio.c b/block/bio.c
index 2a00d349cd68..d6e5ba3399f0 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1831,8 +1831,9 @@ EXPORT_SYMBOL(bio_endio);
1831 * Allocates and returns a new bio which represents @sectors from the start of 1831 * Allocates and returns a new bio which represents @sectors from the start of
1832 * @bio, and updates @bio to represent the remaining sectors. 1832 * @bio, and updates @bio to represent the remaining sectors.
1833 * 1833 *
1834 * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's 1834 * Unless this is a discard request the newly allocated bio will point
1835 * responsibility to ensure that @bio is not freed before the split. 1835 * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
1836 * @bio is not freed before the split.
1836 */ 1837 */
1837struct bio *bio_split(struct bio *bio, int sectors, 1838struct bio *bio_split(struct bio *bio, int sectors,
1838 gfp_t gfp, struct bio_set *bs) 1839 gfp_t gfp, struct bio_set *bs)
@@ -1842,7 +1843,15 @@ struct bio *bio_split(struct bio *bio, int sectors,
1842 BUG_ON(sectors <= 0); 1843 BUG_ON(sectors <= 0);
1843 BUG_ON(sectors >= bio_sectors(bio)); 1844 BUG_ON(sectors >= bio_sectors(bio));
1844 1845
1845 split = bio_clone_fast(bio, gfp, bs); 1846 /*
1847 * Discards need a mutable bio_vec to accommodate the payload
1848 * required by the DSM TRIM and UNMAP commands.
1849 */
1850 if (bio->bi_rw & REQ_DISCARD)
1851 split = bio_clone_bioset(bio, gfp, bs);
1852 else
1853 split = bio_clone_fast(bio, gfp, bs);
1854
1846 if (!split) 1855 if (!split)
1847 return NULL; 1856 return NULL;
1848 1857
@@ -2009,6 +2018,7 @@ int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
2009 bio->bi_css = blkcg_css; 2018 bio->bi_css = blkcg_css;
2010 return 0; 2019 return 0;
2011} 2020}
2021EXPORT_SYMBOL_GPL(bio_associate_blkcg);
2012 2022
2013/** 2023/**
2014 * bio_associate_current - associate a bio with %current 2024 * bio_associate_current - associate a bio with %current
@@ -2039,6 +2049,7 @@ int bio_associate_current(struct bio *bio)
2039 bio->bi_css = task_get_css(current, blkio_cgrp_id); 2049 bio->bi_css = task_get_css(current, blkio_cgrp_id);
2040 return 0; 2050 return 0;
2041} 2051}
2052EXPORT_SYMBOL_GPL(bio_associate_current);
2042 2053
2043/** 2054/**
2044 * bio_disassociate_task - undo bio_associate_current() 2055 * bio_disassociate_task - undo bio_associate_current()
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 9da02c021ebe..d6283b3f5db5 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -718,8 +718,12 @@ int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
718 return -EINVAL; 718 return -EINVAL;
719 719
720 disk = get_gendisk(MKDEV(major, minor), &part); 720 disk = get_gendisk(MKDEV(major, minor), &part);
721 if (!disk || part) 721 if (!disk)
722 return -EINVAL; 722 return -EINVAL;
723 if (part) {
724 put_disk(disk);
725 return -EINVAL;
726 }
723 727
724 rcu_read_lock(); 728 rcu_read_lock();
725 spin_lock_irq(disk->queue->queue_lock); 729 spin_lock_irq(disk->queue->queue_lock);
diff --git a/block/blk-settings.c b/block/blk-settings.c
index 12600bfffca9..e0057d035200 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -241,8 +241,8 @@ EXPORT_SYMBOL(blk_queue_bounce_limit);
241 * Description: 241 * Description:
242 * Enables a low level driver to set a hard upper limit, 242 * Enables a low level driver to set a hard upper limit,
243 * max_hw_sectors, on the size of requests. max_hw_sectors is set by 243 * max_hw_sectors, on the size of requests. max_hw_sectors is set by
244 * the device driver based upon the combined capabilities of I/O 244 * the device driver based upon the capabilities of the I/O
245 * controller and storage device. 245 * controller.
246 * 246 *
247 * max_sectors is a soft limit imposed by the block layer for 247 * max_sectors is a soft limit imposed by the block layer for
248 * filesystem type requests. This value can be overridden on a 248 * filesystem type requests. This value can be overridden on a
diff --git a/drivers/acpi/device_pm.c b/drivers/acpi/device_pm.c
index 717afcdb5f4a..88dbbb115285 100644
--- a/drivers/acpi/device_pm.c
+++ b/drivers/acpi/device_pm.c
@@ -231,7 +231,7 @@ int acpi_device_set_power(struct acpi_device *device, int state)
231 dev_warn(&device->dev, "Failed to change power state to %s\n", 231 dev_warn(&device->dev, "Failed to change power state to %s\n",
232 acpi_power_state_string(state)); 232 acpi_power_state_string(state));
233 } else { 233 } else {
234 device->power.state = state; 234 device->power.state = target_state;
235 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 235 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
236 "Device [%s] transitioned to %s\n", 236 "Device [%s] transitioned to %s\n",
237 device->pnp.bus_id, 237 device->pnp.bus_id,
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index e83fc3d0da9c..db5d9f79a247 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -2478,6 +2478,10 @@ int ata_dev_configure(struct ata_device *dev)
2478 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, 2478 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128,
2479 dev->max_sectors); 2479 dev->max_sectors);
2480 2480
2481 if (dev->horkage & ATA_HORKAGE_MAX_SEC_1024)
2482 dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_1024,
2483 dev->max_sectors);
2484
2481 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) 2485 if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48)
2482 dev->max_sectors = ATA_MAX_SECTORS_LBA48; 2486 dev->max_sectors = ATA_MAX_SECTORS_LBA48;
2483 2487
@@ -4146,6 +4150,12 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4146 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4150 { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4147 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, 4151 { "Slimtype DVD A DS8A9SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 },
4148 4152
4153 /*
4154 * Causes silent data corruption with higher max sects.
4155 * http://lkml.kernel.org/g/x49wpy40ysk.fsf@segfault.boston.devel.redhat.com
4156 */
4157 { "ST380013AS", "3.20", ATA_HORKAGE_MAX_SEC_1024 },
4158
4149 /* Devices we expect to fail diagnostics */ 4159 /* Devices we expect to fail diagnostics */
4150 4160
4151 /* Devices where NCQ should be avoided */ 4161 /* Devices where NCQ should be avoided */
@@ -4174,9 +4184,10 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4174 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | 4184 { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ |
4175 ATA_HORKAGE_FIRMWARE_WARN }, 4185 ATA_HORKAGE_FIRMWARE_WARN },
4176 4186
4177 /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ 4187 /* drives which fail FPDMA_AA activation (some may freeze afterwards) */
4178 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4188 { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4179 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA }, 4189 { "ST1000LM024 HN-M101MBB", "2BA30001", ATA_HORKAGE_BROKEN_FPDMA_AA },
4190 { "VB0250EAVER", "HPG7", ATA_HORKAGE_BROKEN_FPDMA_AA },
4180 4191
4181 /* Blacklist entries taken from Silicon Image 3124/3132 4192 /* Blacklist entries taken from Silicon Image 3124/3132
4182 Windows driver .inf file - also several Linux problem reports */ 4193 Windows driver .inf file - also several Linux problem reports */
@@ -4229,7 +4240,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4229 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4240 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4230 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4241 { "Crucial_CT*M500*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4231 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4242 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4232 { "Micron_M5[15]0*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4243 { "Micron_M5[15]0_*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4233 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4244 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4234 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM | 4245 { "Crucial_CT*M550*", "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
4235 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4246 ATA_HORKAGE_ZERO_AFTER_TRIM, },
@@ -4238,6 +4249,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
4238 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM | 4249 { "Samsung SSD 8*", NULL, ATA_HORKAGE_NO_NCQ_TRIM |
4239 ATA_HORKAGE_ZERO_AFTER_TRIM, }, 4250 ATA_HORKAGE_ZERO_AFTER_TRIM, },
4240 4251
4252 /* devices that don't properly handle TRIM commands */
4253 { "SuperSSpeed S238*", NULL, ATA_HORKAGE_NOTRIM, },
4254
4241 /* 4255 /*
4242 * As defined, the DRAT (Deterministic Read After Trim) and RZAT 4256 * As defined, the DRAT (Deterministic Read After Trim) and RZAT
4243 * (Return Zero After Trim) flags in the ATA Command Set are 4257 * (Return Zero After Trim) flags in the ATA Command Set are
@@ -4501,7 +4515,8 @@ static unsigned int ata_dev_set_xfermode(struct ata_device *dev)
4501 else /* In the ancient relic department - skip all of this */ 4515 else /* In the ancient relic department - skip all of this */
4502 return 0; 4516 return 0;
4503 4517
4504 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 0); 4518 /* On some disks, this command causes spin-up, so we need longer timeout */
4519 err_mask = ata_exec_internal(dev, &tf, NULL, DMA_NONE, NULL, 0, 15000);
4505 4520
4506 DPRINTK("EXIT, err_mask=%x\n", err_mask); 4521 DPRINTK("EXIT, err_mask=%x\n", err_mask);
4507 return err_mask; 4522 return err_mask;
diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c
index 7ccc084bf1df..85aa76116a30 100644
--- a/drivers/ata/libata-pmp.c
+++ b/drivers/ata/libata-pmp.c
@@ -460,6 +460,13 @@ static void sata_pmp_quirks(struct ata_port *ap)
460 ATA_LFLAG_NO_SRST | 460 ATA_LFLAG_NO_SRST |
461 ATA_LFLAG_ASSUME_ATA; 461 ATA_LFLAG_ASSUME_ATA;
462 } 462 }
463 } else if (vendor == 0x11ab && devid == 0x4140) {
464 /* Marvell 4140 quirks */
465 ata_for_each_link(link, ap, EDGE) {
466 /* port 4 is for SEMB device and it doesn't like SRST */
467 if (link->pmp == 4)
468 link->flags |= ATA_LFLAG_DISABLED;
469 }
463 } 470 }
464} 471}
465 472
diff --git a/drivers/ata/libata-scsi.c b/drivers/ata/libata-scsi.c
index 3131adcc1f87..641a61a59e89 100644
--- a/drivers/ata/libata-scsi.c
+++ b/drivers/ata/libata-scsi.c
@@ -2568,7 +2568,8 @@ static unsigned int ata_scsiop_read_cap(struct ata_scsi_args *args, u8 *rbuf)
2568 rbuf[14] = (lowest_aligned >> 8) & 0x3f; 2568 rbuf[14] = (lowest_aligned >> 8) & 0x3f;
2569 rbuf[15] = lowest_aligned; 2569 rbuf[15] = lowest_aligned;
2570 2570
2571 if (ata_id_has_trim(args->id)) { 2571 if (ata_id_has_trim(args->id) &&
2572 !(dev->horkage & ATA_HORKAGE_NOTRIM)) {
2572 rbuf[14] |= 0x80; /* LBPME */ 2573 rbuf[14] |= 0x80; /* LBPME */
2573 2574
2574 if (ata_id_has_zero_after_trim(args->id) && 2575 if (ata_id_has_zero_after_trim(args->id) &&
diff --git a/drivers/ata/libata-transport.c b/drivers/ata/libata-transport.c
index d6c37bcd416d..e2d94972962d 100644
--- a/drivers/ata/libata-transport.c
+++ b/drivers/ata/libata-transport.c
@@ -569,6 +569,8 @@ show_ata_dev_trim(struct device *dev,
569 569
570 if (!ata_id_has_trim(ata_dev->id)) 570 if (!ata_id_has_trim(ata_dev->id))
571 mode = "unsupported"; 571 mode = "unsupported";
572 else if (ata_dev->horkage & ATA_HORKAGE_NOTRIM)
573 mode = "forced_unsupported";
572 else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM) 574 else if (ata_dev->horkage & ATA_HORKAGE_NO_NCQ_TRIM)
573 mode = "forced_unqueued"; 575 mode = "forced_unqueued";
574 else if (ata_fpdma_dsm_supported(ata_dev)) 576 else if (ata_fpdma_dsm_supported(ata_dev))
diff --git a/drivers/base/regmap/regcache-rbtree.c b/drivers/base/regmap/regcache-rbtree.c
index 81751a49d8bf..56486d92c4e7 100644
--- a/drivers/base/regmap/regcache-rbtree.c
+++ b/drivers/base/regmap/regcache-rbtree.c
@@ -296,11 +296,20 @@ static int regcache_rbtree_insert_to_block(struct regmap *map,
296 if (!blk) 296 if (!blk)
297 return -ENOMEM; 297 return -ENOMEM;
298 298
299 present = krealloc(rbnode->cache_present, 299 if (BITS_TO_LONGS(blklen) > BITS_TO_LONGS(rbnode->blklen)) {
300 BITS_TO_LONGS(blklen) * sizeof(*present), GFP_KERNEL); 300 present = krealloc(rbnode->cache_present,
301 if (!present) { 301 BITS_TO_LONGS(blklen) * sizeof(*present),
302 kfree(blk); 302 GFP_KERNEL);
303 return -ENOMEM; 303 if (!present) {
304 kfree(blk);
305 return -ENOMEM;
306 }
307
308 memset(present + BITS_TO_LONGS(rbnode->blklen), 0,
309 (BITS_TO_LONGS(blklen) - BITS_TO_LONGS(rbnode->blklen))
310 * sizeof(*present));
311 } else {
312 present = rbnode->cache_present;
304 } 313 }
305 314
306 /* insert the register value in the correct place in the rbnode block */ 315 /* insert the register value in the correct place in the rbnode block */
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 69de41a87b74..3177b245d2bd 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -240,19 +240,19 @@ static enum hrtimer_restart null_cmd_timer_expired(struct hrtimer *timer)
240 while ((entry = llist_del_all(&cq->list)) != NULL) { 240 while ((entry = llist_del_all(&cq->list)) != NULL) {
241 entry = llist_reverse_order(entry); 241 entry = llist_reverse_order(entry);
242 do { 242 do {
243 struct request_queue *q = NULL;
244
243 cmd = container_of(entry, struct nullb_cmd, ll_list); 245 cmd = container_of(entry, struct nullb_cmd, ll_list);
244 entry = entry->next; 246 entry = entry->next;
247 if (cmd->rq)
248 q = cmd->rq->q;
245 end_cmd(cmd); 249 end_cmd(cmd);
246 250
247 if (cmd->rq) { 251 if (q && !q->mq_ops && blk_queue_stopped(q)) {
248 struct request_queue *q = cmd->rq->q; 252 spin_lock(q->queue_lock);
249 253 if (blk_queue_stopped(q))
250 if (!q->mq_ops && blk_queue_stopped(q)) { 254 blk_start_queue(q);
251 spin_lock(q->queue_lock); 255 spin_unlock(q->queue_lock);
252 if (blk_queue_stopped(q))
253 blk_start_queue(q);
254 spin_unlock(q->queue_lock);
255 }
256 } 256 }
257 } while (entry); 257 } while (entry);
258 } 258 }
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index d94529d5c8e9..bc67a93aa4f4 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -523,6 +523,7 @@ void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
523# define rbd_assert(expr) ((void) 0) 523# define rbd_assert(expr) ((void) 0)
524#endif /* !RBD_DEBUG */ 524#endif /* !RBD_DEBUG */
525 525
526static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
526static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request); 527static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
527static void rbd_img_parent_read(struct rbd_obj_request *obj_request); 528static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
528static void rbd_dev_remove_parent(struct rbd_device *rbd_dev); 529static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
@@ -1818,6 +1819,16 @@ static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1818 obj_request_done_set(obj_request); 1819 obj_request_done_set(obj_request);
1819} 1820}
1820 1821
1822static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1823{
1824 dout("%s: obj %p\n", __func__, obj_request);
1825
1826 if (obj_request_img_data_test(obj_request))
1827 rbd_osd_copyup_callback(obj_request);
1828 else
1829 obj_request_done_set(obj_request);
1830}
1831
1821static void rbd_osd_req_callback(struct ceph_osd_request *osd_req, 1832static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1822 struct ceph_msg *msg) 1833 struct ceph_msg *msg)
1823{ 1834{
@@ -1866,6 +1877,8 @@ static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1866 rbd_osd_discard_callback(obj_request); 1877 rbd_osd_discard_callback(obj_request);
1867 break; 1878 break;
1868 case CEPH_OSD_OP_CALL: 1879 case CEPH_OSD_OP_CALL:
1880 rbd_osd_call_callback(obj_request);
1881 break;
1869 case CEPH_OSD_OP_NOTIFY_ACK: 1882 case CEPH_OSD_OP_NOTIFY_ACK:
1870 case CEPH_OSD_OP_WATCH: 1883 case CEPH_OSD_OP_WATCH:
1871 rbd_osd_trivial_callback(obj_request); 1884 rbd_osd_trivial_callback(obj_request);
@@ -2530,13 +2543,15 @@ out_unwind:
2530} 2543}
2531 2544
2532static void 2545static void
2533rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request) 2546rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
2534{ 2547{
2535 struct rbd_img_request *img_request; 2548 struct rbd_img_request *img_request;
2536 struct rbd_device *rbd_dev; 2549 struct rbd_device *rbd_dev;
2537 struct page **pages; 2550 struct page **pages;
2538 u32 page_count; 2551 u32 page_count;
2539 2552
2553 dout("%s: obj %p\n", __func__, obj_request);
2554
2540 rbd_assert(obj_request->type == OBJ_REQUEST_BIO || 2555 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2541 obj_request->type == OBJ_REQUEST_NODATA); 2556 obj_request->type == OBJ_REQUEST_NODATA);
2542 rbd_assert(obj_request_img_data_test(obj_request)); 2557 rbd_assert(obj_request_img_data_test(obj_request));
@@ -2563,9 +2578,7 @@ rbd_img_obj_copyup_callback(struct rbd_obj_request *obj_request)
2563 if (!obj_request->result) 2578 if (!obj_request->result)
2564 obj_request->xferred = obj_request->length; 2579 obj_request->xferred = obj_request->length;
2565 2580
2566 /* Finish up with the normal image object callback */ 2581 obj_request_done_set(obj_request);
2567
2568 rbd_img_obj_callback(obj_request);
2569} 2582}
2570 2583
2571static void 2584static void
@@ -2650,7 +2663,6 @@ rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2650 2663
2651 /* All set, send it off. */ 2664 /* All set, send it off. */
2652 2665
2653 orig_request->callback = rbd_img_obj_copyup_callback;
2654 osdc = &rbd_dev->rbd_client->client->osdc; 2666 osdc = &rbd_dev->rbd_client->client->osdc;
2655 img_result = rbd_obj_request_submit(osdc, orig_request); 2667 img_result = rbd_obj_request_submit(osdc, orig_request);
2656 if (!img_result) 2668 if (!img_result)
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index ced96777b677..954c0029fb3b 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -369,8 +369,8 @@ static void purge_persistent_gnt(struct xen_blkif *blkif)
369 return; 369 return;
370 } 370 }
371 371
372 if (work_pending(&blkif->persistent_purge_work)) { 372 if (work_busy(&blkif->persistent_purge_work)) {
373 pr_alert_ratelimited("Scheduled work from previous purge is still pending, cannot purge list\n"); 373 pr_alert_ratelimited("Scheduled work from previous purge is still busy, cannot purge list\n");
374 return; 374 return;
375 } 375 }
376 376
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 6d89ed35d80c..7a8a73f1fc04 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -179,6 +179,7 @@ static DEFINE_SPINLOCK(minor_lock);
179 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME) 179 ((_segs + SEGS_PER_INDIRECT_FRAME - 1)/SEGS_PER_INDIRECT_FRAME)
180 180
181static int blkfront_setup_indirect(struct blkfront_info *info); 181static int blkfront_setup_indirect(struct blkfront_info *info);
182static int blkfront_gather_backend_features(struct blkfront_info *info);
182 183
183static int get_id_from_freelist(struct blkfront_info *info) 184static int get_id_from_freelist(struct blkfront_info *info)
184{ 185{
@@ -1128,8 +1129,10 @@ static void blkif_completion(struct blk_shadow *s, struct blkfront_info *info,
1128 * Add the used indirect page back to the list of 1129 * Add the used indirect page back to the list of
1129 * available pages for indirect grefs. 1130 * available pages for indirect grefs.
1130 */ 1131 */
1131 indirect_page = pfn_to_page(s->indirect_grants[i]->pfn); 1132 if (!info->feature_persistent) {
1132 list_add(&indirect_page->lru, &info->indirect_pages); 1133 indirect_page = pfn_to_page(s->indirect_grants[i]->pfn);
1134 list_add(&indirect_page->lru, &info->indirect_pages);
1135 }
1133 s->indirect_grants[i]->gref = GRANT_INVALID_REF; 1136 s->indirect_grants[i]->gref = GRANT_INVALID_REF;
1134 list_add_tail(&s->indirect_grants[i]->node, &info->grants); 1137 list_add_tail(&s->indirect_grants[i]->node, &info->grants);
1135 } 1138 }
@@ -1519,7 +1522,7 @@ static int blkif_recover(struct blkfront_info *info)
1519 info->shadow_free = info->ring.req_prod_pvt; 1522 info->shadow_free = info->ring.req_prod_pvt;
1520 info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff; 1523 info->shadow[BLK_RING_SIZE(info)-1].req.u.rw.id = 0x0fffffff;
1521 1524
1522 rc = blkfront_setup_indirect(info); 1525 rc = blkfront_gather_backend_features(info);
1523 if (rc) { 1526 if (rc) {
1524 kfree(copy); 1527 kfree(copy);
1525 return rc; 1528 return rc;
@@ -1720,20 +1723,13 @@ static void blkfront_setup_discard(struct blkfront_info *info)
1720 1723
1721static int blkfront_setup_indirect(struct blkfront_info *info) 1724static int blkfront_setup_indirect(struct blkfront_info *info)
1722{ 1725{
1723 unsigned int indirect_segments, segs; 1726 unsigned int segs;
1724 int err, i; 1727 int err, i;
1725 1728
1726 err = xenbus_gather(XBT_NIL, info->xbdev->otherend, 1729 if (info->max_indirect_segments == 0)
1727 "feature-max-indirect-segments", "%u", &indirect_segments,
1728 NULL);
1729 if (err) {
1730 info->max_indirect_segments = 0;
1731 segs = BLKIF_MAX_SEGMENTS_PER_REQUEST; 1730 segs = BLKIF_MAX_SEGMENTS_PER_REQUEST;
1732 } else { 1731 else
1733 info->max_indirect_segments = min(indirect_segments,
1734 xen_blkif_max_segments);
1735 segs = info->max_indirect_segments; 1732 segs = info->max_indirect_segments;
1736 }
1737 1733
1738 err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info)); 1734 err = fill_grant_buffer(info, (segs + INDIRECT_GREFS(segs)) * BLK_RING_SIZE(info));
1739 if (err) 1735 if (err)
@@ -1797,6 +1793,68 @@ out_of_memory:
1797} 1793}
1798 1794
1799/* 1795/*
1796 * Gather all backend feature-*
1797 */
1798static int blkfront_gather_backend_features(struct blkfront_info *info)
1799{
1800 int err;
1801 int barrier, flush, discard, persistent;
1802 unsigned int indirect_segments;
1803
1804 info->feature_flush = 0;
1805
1806 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1807 "feature-barrier", "%d", &barrier,
1808 NULL);
1809
1810 /*
1811 * If there's no "feature-barrier" defined, then it means
1812 * we're dealing with a very old backend which writes
1813 * synchronously; nothing to do.
1814 *
1815 * If there are barriers, then we use flush.
1816 */
1817 if (!err && barrier)
1818 info->feature_flush = REQ_FLUSH | REQ_FUA;
1819 /*
1820 * And if there is "feature-flush-cache" use that above
1821 * barriers.
1822 */
1823 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1824 "feature-flush-cache", "%d", &flush,
1825 NULL);
1826
1827 if (!err && flush)
1828 info->feature_flush = REQ_FLUSH;
1829
1830 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1831 "feature-discard", "%d", &discard,
1832 NULL);
1833
1834 if (!err && discard)
1835 blkfront_setup_discard(info);
1836
1837 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1838 "feature-persistent", "%u", &persistent,
1839 NULL);
1840 if (err)
1841 info->feature_persistent = 0;
1842 else
1843 info->feature_persistent = persistent;
1844
1845 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1846 "feature-max-indirect-segments", "%u", &indirect_segments,
1847 NULL);
1848 if (err)
1849 info->max_indirect_segments = 0;
1850 else
1851 info->max_indirect_segments = min(indirect_segments,
1852 xen_blkif_max_segments);
1853
1854 return blkfront_setup_indirect(info);
1855}
1856
1857/*
1800 * Invoked when the backend is finally 'ready' (and has told produced 1858 * Invoked when the backend is finally 'ready' (and has told produced
1801 * the details about the physical device - #sectors, size, etc). 1859 * the details about the physical device - #sectors, size, etc).
1802 */ 1860 */
@@ -1807,7 +1865,6 @@ static void blkfront_connect(struct blkfront_info *info)
1807 unsigned int physical_sector_size; 1865 unsigned int physical_sector_size;
1808 unsigned int binfo; 1866 unsigned int binfo;
1809 int err; 1867 int err;
1810 int barrier, flush, discard, persistent;
1811 1868
1812 switch (info->connected) { 1869 switch (info->connected) {
1813 case BLKIF_STATE_CONNECTED: 1870 case BLKIF_STATE_CONNECTED:
@@ -1864,48 +1921,7 @@ static void blkfront_connect(struct blkfront_info *info)
1864 if (err != 1) 1921 if (err != 1)
1865 physical_sector_size = sector_size; 1922 physical_sector_size = sector_size;
1866 1923
1867 info->feature_flush = 0; 1924 err = blkfront_gather_backend_features(info);
1868
1869 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1870 "feature-barrier", "%d", &barrier,
1871 NULL);
1872
1873 /*
1874 * If there's no "feature-barrier" defined, then it means
1875 * we're dealing with a very old backend which writes
1876 * synchronously; nothing to do.
1877 *
1878 * If there are barriers, then we use flush.
1879 */
1880 if (!err && barrier)
1881 info->feature_flush = REQ_FLUSH | REQ_FUA;
1882 /*
1883 * And if there is "feature-flush-cache" use that above
1884 * barriers.
1885 */
1886 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1887 "feature-flush-cache", "%d", &flush,
1888 NULL);
1889
1890 if (!err && flush)
1891 info->feature_flush = REQ_FLUSH;
1892
1893 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1894 "feature-discard", "%d", &discard,
1895 NULL);
1896
1897 if (!err && discard)
1898 blkfront_setup_discard(info);
1899
1900 err = xenbus_gather(XBT_NIL, info->xbdev->otherend,
1901 "feature-persistent", "%u", &persistent,
1902 NULL);
1903 if (err)
1904 info->feature_persistent = 0;
1905 else
1906 info->feature_persistent = persistent;
1907
1908 err = blkfront_setup_indirect(info);
1909 if (err) { 1925 if (err) {
1910 xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s", 1926 xenbus_dev_fatal(info->xbdev, err, "setup_indirect at %s",
1911 info->xbdev->otherend); 1927 info->xbdev->otherend);
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index fb655e8d1e3b..763301c7828c 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -496,10 +496,9 @@ static void zram_meta_free(struct zram_meta *meta, u64 disksize)
496 kfree(meta); 496 kfree(meta);
497} 497}
498 498
499static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize) 499static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
500{ 500{
501 size_t num_pages; 501 size_t num_pages;
502 char pool_name[8];
503 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL); 502 struct zram_meta *meta = kmalloc(sizeof(*meta), GFP_KERNEL);
504 503
505 if (!meta) 504 if (!meta)
@@ -512,7 +511,6 @@ static struct zram_meta *zram_meta_alloc(int device_id, u64 disksize)
512 goto out_error; 511 goto out_error;
513 } 512 }
514 513
515 snprintf(pool_name, sizeof(pool_name), "zram%d", device_id);
516 meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM); 514 meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
517 if (!meta->mem_pool) { 515 if (!meta->mem_pool) {
518 pr_err("Error creating memory pool\n"); 516 pr_err("Error creating memory pool\n");
@@ -1031,7 +1029,7 @@ static ssize_t disksize_store(struct device *dev,
1031 return -EINVAL; 1029 return -EINVAL;
1032 1030
1033 disksize = PAGE_ALIGN(disksize); 1031 disksize = PAGE_ALIGN(disksize);
1034 meta = zram_meta_alloc(zram->disk->first_minor, disksize); 1032 meta = zram_meta_alloc(zram->disk->disk_name, disksize);
1035 if (!meta) 1033 if (!meta)
1036 return -ENOMEM; 1034 return -ENOMEM;
1037 1035
diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c
index 1e1a4323a71f..9ceb8ac68fdc 100644
--- a/drivers/bluetooth/btbcm.c
+++ b/drivers/bluetooth/btbcm.c
@@ -472,12 +472,11 @@ int btbcm_setup_apple(struct hci_dev *hdev)
472 472
473 /* Read Verbose Config Version Info */ 473 /* Read Verbose Config Version Info */
474 skb = btbcm_read_verbose_config(hdev); 474 skb = btbcm_read_verbose_config(hdev);
475 if (IS_ERR(skb)) 475 if (!IS_ERR(skb)) {
476 return PTR_ERR(skb); 476 BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1],
477 477 get_unaligned_le16(skb->data + 5));
478 BT_INFO("%s: BCM: chip id %u build %4.4u", hdev->name, skb->data[1], 478 kfree_skb(skb);
479 get_unaligned_le16(skb->data + 5)); 479 }
480 kfree_skb(skb);
481 480
482 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); 481 set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
483 482
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index da8faf78536a..5643b65cee20 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -429,7 +429,7 @@ static int hwrng_fillfn(void *unused)
429static void start_khwrngd(void) 429static void start_khwrngd(void)
430{ 430{
431 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng"); 431 hwrng_fill = kthread_run(hwrng_fillfn, NULL, "hwrng");
432 if (hwrng_fill == ERR_PTR(-ENOMEM)) { 432 if (IS_ERR(hwrng_fill)) {
433 pr_err("hwrng_fill thread creation failed"); 433 pr_err("hwrng_fill thread creation failed");
434 hwrng_fill = NULL; 434 hwrng_fill = NULL;
435 } 435 }
diff --git a/drivers/clk/pxa/clk-pxa3xx.c b/drivers/clk/pxa/clk-pxa3xx.c
index 4b93a1efb36d..ac03ba49e9d1 100644
--- a/drivers/clk/pxa/clk-pxa3xx.c
+++ b/drivers/clk/pxa/clk-pxa3xx.c
@@ -126,7 +126,7 @@ PARENTS(pxa3xx_ac97_bus) = { "ring_osc_60mhz", "ac97" };
126PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" }; 126PARENTS(pxa3xx_sbus) = { "ring_osc_60mhz", "system_bus" };
127PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" }; 127PARENTS(pxa3xx_smemcbus) = { "ring_osc_60mhz", "smemc" };
128 128
129#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENA : &CKENB) 129#define CKEN_AB(bit) ((CKEN_ ## bit > 31) ? &CKENB : &CKENA)
130#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \ 130#define PXA3XX_CKEN(dev_id, con_id, parents, mult_lp, div_lp, mult_hp, \
131 div_hp, bit, is_lp, flags) \ 131 div_hp, bit, is_lp, flags) \
132 PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \ 132 PXA_CKEN(dev_id, con_id, bit, parents, mult_lp, div_lp, \
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index b8ff3c64cc45..c96de14036a0 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -661,6 +661,9 @@ static void sh_cmt_clocksource_suspend(struct clocksource *cs)
661{ 661{
662 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); 662 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
663 663
664 if (!ch->cs_enabled)
665 return;
666
664 sh_cmt_stop(ch, FLAG_CLOCKSOURCE); 667 sh_cmt_stop(ch, FLAG_CLOCKSOURCE);
665 pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev); 668 pm_genpd_syscore_poweroff(&ch->cmt->pdev->dev);
666} 669}
@@ -669,6 +672,9 @@ static void sh_cmt_clocksource_resume(struct clocksource *cs)
669{ 672{
670 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs); 673 struct sh_cmt_channel *ch = cs_to_sh_cmt(cs);
671 674
675 if (!ch->cs_enabled)
676 return;
677
672 pm_genpd_syscore_poweron(&ch->cmt->pdev->dev); 678 pm_genpd_syscore_poweron(&ch->cmt->pdev->dev);
673 sh_cmt_start(ch, FLAG_CLOCKSOURCE); 679 sh_cmt_start(ch, FLAG_CLOCKSOURCE);
674} 680}
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 26063afb3eba..7a3c30c4336f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -1002,7 +1002,7 @@ static int cpufreq_add_dev_symlink(struct cpufreq_policy *policy)
1002 int ret = 0; 1002 int ret = 0;
1003 1003
1004 /* Some related CPUs might not be present (physically hotplugged) */ 1004 /* Some related CPUs might not be present (physically hotplugged) */
1005 for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) { 1005 for_each_cpu(j, policy->real_cpus) {
1006 if (j == policy->kobj_cpu) 1006 if (j == policy->kobj_cpu)
1007 continue; 1007 continue;
1008 1008
@@ -1019,7 +1019,7 @@ static void cpufreq_remove_dev_symlink(struct cpufreq_policy *policy)
1019 unsigned int j; 1019 unsigned int j;
1020 1020
1021 /* Some related CPUs might not be present (physically hotplugged) */ 1021 /* Some related CPUs might not be present (physically hotplugged) */
1022 for_each_cpu_and(j, policy->related_cpus, cpu_present_mask) { 1022 for_each_cpu(j, policy->real_cpus) {
1023 if (j == policy->kobj_cpu) 1023 if (j == policy->kobj_cpu)
1024 continue; 1024 continue;
1025 1025
@@ -1163,11 +1163,14 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
1163 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL)) 1163 if (!zalloc_cpumask_var(&policy->related_cpus, GFP_KERNEL))
1164 goto err_free_cpumask; 1164 goto err_free_cpumask;
1165 1165
1166 if (!zalloc_cpumask_var(&policy->real_cpus, GFP_KERNEL))
1167 goto err_free_rcpumask;
1168
1166 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj, 1169 ret = kobject_init_and_add(&policy->kobj, &ktype_cpufreq, &dev->kobj,
1167 "cpufreq"); 1170 "cpufreq");
1168 if (ret) { 1171 if (ret) {
1169 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret); 1172 pr_err("%s: failed to init policy->kobj: %d\n", __func__, ret);
1170 goto err_free_rcpumask; 1173 goto err_free_real_cpus;
1171 } 1174 }
1172 1175
1173 INIT_LIST_HEAD(&policy->policy_list); 1176 INIT_LIST_HEAD(&policy->policy_list);
@@ -1184,6 +1187,8 @@ static struct cpufreq_policy *cpufreq_policy_alloc(struct device *dev)
1184 1187
1185 return policy; 1188 return policy;
1186 1189
1190err_free_real_cpus:
1191 free_cpumask_var(policy->real_cpus);
1187err_free_rcpumask: 1192err_free_rcpumask:
1188 free_cpumask_var(policy->related_cpus); 1193 free_cpumask_var(policy->related_cpus);
1189err_free_cpumask: 1194err_free_cpumask:
@@ -1234,6 +1239,7 @@ static void cpufreq_policy_free(struct cpufreq_policy *policy, bool notify)
1234 write_unlock_irqrestore(&cpufreq_driver_lock, flags); 1239 write_unlock_irqrestore(&cpufreq_driver_lock, flags);
1235 1240
1236 cpufreq_policy_put_kobj(policy, notify); 1241 cpufreq_policy_put_kobj(policy, notify);
1242 free_cpumask_var(policy->real_cpus);
1237 free_cpumask_var(policy->related_cpus); 1243 free_cpumask_var(policy->related_cpus);
1238 free_cpumask_var(policy->cpus); 1244 free_cpumask_var(policy->cpus);
1239 kfree(policy); 1245 kfree(policy);
@@ -1258,14 +1264,17 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1258 1264
1259 pr_debug("adding CPU %u\n", cpu); 1265 pr_debug("adding CPU %u\n", cpu);
1260 1266
1261 /* 1267 if (cpu_is_offline(cpu)) {
1262 * Only possible if 'cpu' wasn't physically present earlier and we are 1268 /*
1263 * here from subsys_interface add callback. A hotplug notifier will 1269 * Only possible if we are here from the subsys_interface add
1264 * follow and we will handle it like logical CPU hotplug then. For now, 1270 * callback. A hotplug notifier will follow and we will handle
1265 * just create the sysfs link. 1271 * it as CPU online then. For now, just create the sysfs link,
1266 */ 1272 * unless there is no policy or the link is already present.
1267 if (cpu_is_offline(cpu)) 1273 */
1268 return add_cpu_dev_symlink(per_cpu(cpufreq_cpu_data, cpu), cpu); 1274 policy = per_cpu(cpufreq_cpu_data, cpu);
1275 return policy && !cpumask_test_and_set_cpu(cpu, policy->real_cpus)
1276 ? add_cpu_dev_symlink(policy, cpu) : 0;
1277 }
1269 1278
1270 if (!down_read_trylock(&cpufreq_rwsem)) 1279 if (!down_read_trylock(&cpufreq_rwsem))
1271 return 0; 1280 return 0;
@@ -1307,6 +1316,10 @@ static int cpufreq_add_dev(struct device *dev, struct subsys_interface *sif)
1307 /* related cpus should atleast have policy->cpus */ 1316 /* related cpus should atleast have policy->cpus */
1308 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus); 1317 cpumask_or(policy->related_cpus, policy->related_cpus, policy->cpus);
1309 1318
1319 /* Remember which CPUs have been present at the policy creation time. */
1320 if (!recover_policy)
1321 cpumask_and(policy->real_cpus, policy->cpus, cpu_present_mask);
1322
1310 /* 1323 /*
1311 * affected cpus must always be the one, which are online. We aren't 1324 * affected cpus must always be the one, which are online. We aren't
1312 * managing offline cpus here. 1325 * managing offline cpus here.
@@ -1420,8 +1433,7 @@ nomem_out:
1420 return ret; 1433 return ret;
1421} 1434}
1422 1435
1423static int __cpufreq_remove_dev_prepare(struct device *dev, 1436static int __cpufreq_remove_dev_prepare(struct device *dev)
1424 struct subsys_interface *sif)
1425{ 1437{
1426 unsigned int cpu = dev->id; 1438 unsigned int cpu = dev->id;
1427 int ret = 0; 1439 int ret = 0;
@@ -1437,10 +1449,8 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1437 1449
1438 if (has_target()) { 1450 if (has_target()) {
1439 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP); 1451 ret = __cpufreq_governor(policy, CPUFREQ_GOV_STOP);
1440 if (ret) { 1452 if (ret)
1441 pr_err("%s: Failed to stop governor\n", __func__); 1453 pr_err("%s: Failed to stop governor\n", __func__);
1442 return ret;
1443 }
1444 } 1454 }
1445 1455
1446 down_write(&policy->rwsem); 1456 down_write(&policy->rwsem);
@@ -1473,8 +1483,7 @@ static int __cpufreq_remove_dev_prepare(struct device *dev,
1473 return ret; 1483 return ret;
1474} 1484}
1475 1485
1476static int __cpufreq_remove_dev_finish(struct device *dev, 1486static int __cpufreq_remove_dev_finish(struct device *dev)
1477 struct subsys_interface *sif)
1478{ 1487{
1479 unsigned int cpu = dev->id; 1488 unsigned int cpu = dev->id;
1480 int ret; 1489 int ret;
@@ -1492,10 +1501,8 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1492 /* If cpu is last user of policy, free policy */ 1501 /* If cpu is last user of policy, free policy */
1493 if (has_target()) { 1502 if (has_target()) {
1494 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT); 1503 ret = __cpufreq_governor(policy, CPUFREQ_GOV_POLICY_EXIT);
1495 if (ret) { 1504 if (ret)
1496 pr_err("%s: Failed to exit governor\n", __func__); 1505 pr_err("%s: Failed to exit governor\n", __func__);
1497 return ret;
1498 }
1499 } 1506 }
1500 1507
1501 /* 1508 /*
@@ -1506,10 +1513,6 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1506 if (cpufreq_driver->exit) 1513 if (cpufreq_driver->exit)
1507 cpufreq_driver->exit(policy); 1514 cpufreq_driver->exit(policy);
1508 1515
1509 /* Free the policy only if the driver is getting removed. */
1510 if (sif)
1511 cpufreq_policy_free(policy, true);
1512
1513 return 0; 1516 return 0;
1514} 1517}
1515 1518
@@ -1521,42 +1524,41 @@ static int __cpufreq_remove_dev_finish(struct device *dev,
1521static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif) 1524static int cpufreq_remove_dev(struct device *dev, struct subsys_interface *sif)
1522{ 1525{
1523 unsigned int cpu = dev->id; 1526 unsigned int cpu = dev->id;
1524 int ret; 1527 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1525
1526 /*
1527 * Only possible if 'cpu' is getting physically removed now. A hotplug
1528 * notifier should have already been called and we just need to remove
1529 * link or free policy here.
1530 */
1531 if (cpu_is_offline(cpu)) {
1532 struct cpufreq_policy *policy = per_cpu(cpufreq_cpu_data, cpu);
1533 struct cpumask mask;
1534 1528
1535 if (!policy) 1529 if (!policy)
1536 return 0; 1530 return 0;
1537 1531
1538 cpumask_copy(&mask, policy->related_cpus); 1532 if (cpu_online(cpu)) {
1539 cpumask_clear_cpu(cpu, &mask); 1533 __cpufreq_remove_dev_prepare(dev);
1534 __cpufreq_remove_dev_finish(dev);
1535 }
1540 1536
1541 /* 1537 cpumask_clear_cpu(cpu, policy->real_cpus);
1542 * Free policy only if all policy->related_cpus are removed
1543 * physically.
1544 */
1545 if (cpumask_intersects(&mask, cpu_present_mask)) {
1546 remove_cpu_dev_symlink(policy, cpu);
1547 return 0;
1548 }
1549 1538
1539 if (cpumask_empty(policy->real_cpus)) {
1550 cpufreq_policy_free(policy, true); 1540 cpufreq_policy_free(policy, true);
1551 return 0; 1541 return 0;
1552 } 1542 }
1553 1543
1554 ret = __cpufreq_remove_dev_prepare(dev, sif); 1544 if (cpu != policy->kobj_cpu) {
1545 remove_cpu_dev_symlink(policy, cpu);
1546 } else {
1547 /*
1548 * The CPU owning the policy object is going away. Move it to
1549 * another suitable CPU.
1550 */
1551 unsigned int new_cpu = cpumask_first(policy->real_cpus);
1552 struct device *new_dev = get_cpu_device(new_cpu);
1553
1554 dev_dbg(dev, "%s: Moving policy object to CPU%u\n", __func__, new_cpu);
1555 1555
1556 if (!ret) 1556 sysfs_remove_link(&new_dev->kobj, "cpufreq");
1557 ret = __cpufreq_remove_dev_finish(dev, sif); 1557 policy->kobj_cpu = new_cpu;
1558 WARN_ON(kobject_move(&policy->kobj, &new_dev->kobj));
1559 }
1558 1560
1559 return ret; 1561 return 0;
1560} 1562}
1561 1563
1562static void handle_update(struct work_struct *work) 1564static void handle_update(struct work_struct *work)
@@ -2395,11 +2397,11 @@ static int cpufreq_cpu_callback(struct notifier_block *nfb,
2395 break; 2397 break;
2396 2398
2397 case CPU_DOWN_PREPARE: 2399 case CPU_DOWN_PREPARE:
2398 __cpufreq_remove_dev_prepare(dev, NULL); 2400 __cpufreq_remove_dev_prepare(dev);
2399 break; 2401 break;
2400 2402
2401 case CPU_POST_DEAD: 2403 case CPU_POST_DEAD:
2402 __cpufreq_remove_dev_finish(dev, NULL); 2404 __cpufreq_remove_dev_finish(dev);
2403 break; 2405 break;
2404 2406
2405 case CPU_DOWN_FAILED: 2407 case CPU_DOWN_FAILED:
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 15ada47bb720..fcb929ec5304 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -681,6 +681,7 @@ static struct cpu_defaults knl_params = {
681 .get_max = core_get_max_pstate, 681 .get_max = core_get_max_pstate,
682 .get_min = core_get_min_pstate, 682 .get_min = core_get_min_pstate,
683 .get_turbo = knl_get_turbo_pstate, 683 .get_turbo = knl_get_turbo_pstate,
684 .get_scaling = core_get_scaling,
684 .set = core_set_pstate, 685 .set = core_set_pstate,
685 }, 686 },
686}; 687};
diff --git a/drivers/cpufreq/loongson2_cpufreq.c b/drivers/cpufreq/loongson2_cpufreq.c
index e362860c2b50..cd593c1f66dc 100644
--- a/drivers/cpufreq/loongson2_cpufreq.c
+++ b/drivers/cpufreq/loongson2_cpufreq.c
@@ -20,7 +20,7 @@
20#include <asm/clock.h> 20#include <asm/clock.h>
21#include <asm/idle.h> 21#include <asm/idle.h>
22 22
23#include <asm/mach-loongson/loongson.h> 23#include <asm/mach-loongson64/loongson.h>
24 24
25static uint nowait; 25static uint nowait;
26 26
diff --git a/drivers/crypto/ixp4xx_crypto.c b/drivers/crypto/ixp4xx_crypto.c
index 7ba495f75370..402631a19a11 100644
--- a/drivers/crypto/ixp4xx_crypto.c
+++ b/drivers/crypto/ixp4xx_crypto.c
@@ -905,7 +905,6 @@ static int ablk_perform(struct ablkcipher_request *req, int encrypt)
905 crypt->mode |= NPE_OP_NOT_IN_PLACE; 905 crypt->mode |= NPE_OP_NOT_IN_PLACE;
906 /* This was never tested by Intel 906 /* This was never tested by Intel
907 * for more than one dst buffer, I think. */ 907 * for more than one dst buffer, I think. */
908 BUG_ON(req->dst->length < nbytes);
909 req_ctx->dst = NULL; 908 req_ctx->dst = NULL;
910 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook, 909 if (!chainup_buffers(dev, req->dst, nbytes, &dst_hook,
911 flags, DMA_FROM_DEVICE)) 910 flags, DMA_FROM_DEVICE))
diff --git a/drivers/crypto/qat/qat_common/qat_algs.c b/drivers/crypto/qat/qat_common/qat_algs.c
index 067402c7c2a9..df427c0e9e7b 100644
--- a/drivers/crypto/qat/qat_common/qat_algs.c
+++ b/drivers/crypto/qat/qat_common/qat_algs.c
@@ -73,7 +73,8 @@
73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \ 73 ICP_QAT_HW_CIPHER_KEY_CONVERT, \
74 ICP_QAT_HW_CIPHER_DECRYPT) 74 ICP_QAT_HW_CIPHER_DECRYPT)
75 75
76static atomic_t active_dev; 76static DEFINE_MUTEX(algs_lock);
77static unsigned int active_devs;
77 78
78struct qat_alg_buf { 79struct qat_alg_buf {
79 uint32_t len; 80 uint32_t len;
@@ -1280,7 +1281,10 @@ static struct crypto_alg qat_algs[] = { {
1280 1281
1281int qat_algs_register(void) 1282int qat_algs_register(void)
1282{ 1283{
1283 if (atomic_add_return(1, &active_dev) == 1) { 1284 int ret = 0;
1285
1286 mutex_lock(&algs_lock);
1287 if (++active_devs == 1) {
1284 int i; 1288 int i;
1285 1289
1286 for (i = 0; i < ARRAY_SIZE(qat_algs); i++) 1290 for (i = 0; i < ARRAY_SIZE(qat_algs); i++)
@@ -1289,21 +1293,25 @@ int qat_algs_register(void)
1289 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC : 1293 CRYPTO_ALG_TYPE_AEAD | CRYPTO_ALG_ASYNC :
1290 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC; 1294 CRYPTO_ALG_TYPE_ABLKCIPHER | CRYPTO_ALG_ASYNC;
1291 1295
1292 return crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs)); 1296 ret = crypto_register_algs(qat_algs, ARRAY_SIZE(qat_algs));
1293 } 1297 }
1294 return 0; 1298 mutex_unlock(&algs_lock);
1299 return ret;
1295} 1300}
1296 1301
1297int qat_algs_unregister(void) 1302int qat_algs_unregister(void)
1298{ 1303{
1299 if (atomic_sub_return(1, &active_dev) == 0) 1304 int ret = 0;
1300 return crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs)); 1305
1301 return 0; 1306 mutex_lock(&algs_lock);
1307 if (--active_devs == 0)
1308 ret = crypto_unregister_algs(qat_algs, ARRAY_SIZE(qat_algs));
1309 mutex_unlock(&algs_lock);
1310 return ret;
1302} 1311}
1303 1312
1304int qat_algs_init(void) 1313int qat_algs_init(void)
1305{ 1314{
1306 atomic_set(&active_dev, 0);
1307 crypto_get_default_rng(); 1315 crypto_get_default_rng();
1308 return 0; 1316 return 0;
1309} 1317}
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 59892126d175..d3629b7482dd 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -48,6 +48,8 @@
48 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\ 48 BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) |\
49 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES)) 49 BIT(DMA_SLAVE_BUSWIDTH_4_BYTES))
50 50
51#define ATC_MAX_DSCR_TRIALS 10
52
51/* 53/*
52 * Initial number of descriptors to allocate for each channel. This could 54 * Initial number of descriptors to allocate for each channel. This could
53 * be increased during dma usage. 55 * be increased during dma usage.
@@ -285,28 +287,19 @@ static struct at_desc *atc_get_desc_by_cookie(struct at_dma_chan *atchan,
285 * 287 *
286 * @current_len: the number of bytes left before reading CTRLA 288 * @current_len: the number of bytes left before reading CTRLA
287 * @ctrla: the value of CTRLA 289 * @ctrla: the value of CTRLA
288 * @desc: the descriptor containing the transfer width
289 */ 290 */
290static inline int atc_calc_bytes_left(int current_len, u32 ctrla, 291static inline int atc_calc_bytes_left(int current_len, u32 ctrla)
291 struct at_desc *desc)
292{ 292{
293 return current_len - ((ctrla & ATC_BTSIZE_MAX) << desc->tx_width); 293 u32 btsize = (ctrla & ATC_BTSIZE_MAX);
294} 294 u32 src_width = ATC_REG_TO_SRC_WIDTH(ctrla);
295 295
296/** 296 /*
297 * atc_calc_bytes_left_from_reg - calculates the number of bytes left according 297 * According to the datasheet, when reading the Control A Register
298 * to the current value of CTRLA. 298 * (ctrla), the Buffer Transfer Size (btsize) bitfield refers to the
299 * 299 * number of transfers completed on the Source Interface.
300 * @current_len: the number of bytes left before reading CTRLA 300 * So btsize is always a number of source width transfers.
301 * @atchan: the channel to read CTRLA for 301 */
302 * @desc: the descriptor containing the transfer width 302 return current_len - (btsize << src_width);
303 */
304static inline int atc_calc_bytes_left_from_reg(int current_len,
305 struct at_dma_chan *atchan, struct at_desc *desc)
306{
307 u32 ctrla = channel_readl(atchan, CTRLA);
308
309 return atc_calc_bytes_left(current_len, ctrla, desc);
310} 303}
311 304
312/** 305/**
@@ -320,7 +313,7 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
320 struct at_desc *desc_first = atc_first_active(atchan); 313 struct at_desc *desc_first = atc_first_active(atchan);
321 struct at_desc *desc; 314 struct at_desc *desc;
322 int ret; 315 int ret;
323 u32 ctrla, dscr; 316 u32 ctrla, dscr, trials;
324 317
325 /* 318 /*
326 * If the cookie doesn't match to the currently running transfer then 319 * If the cookie doesn't match to the currently running transfer then
@@ -346,15 +339,82 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
346 * the channel's DSCR register and compare it against the value 339 * the channel's DSCR register and compare it against the value
347 * of the hardware linked list structure of each child 340 * of the hardware linked list structure of each child
348 * descriptor. 341 * descriptor.
342 *
343 * The CTRLA register provides us with the amount of data
344 * already read from the source for the current child
345 * descriptor. So we can compute a more accurate residue by also
346 * removing the number of bytes corresponding to this amount of
347 * data.
348 *
349 * However, the DSCR and CTRLA registers cannot be read both
350 * atomically. Hence a race condition may occur: the first read
351 * register may refer to one child descriptor whereas the second
352 * read may refer to a later child descriptor in the list
353 * because of the DMA transfer progression inbetween the two
354 * reads.
355 *
356 * One solution could have been to pause the DMA transfer, read
357 * the DSCR and CTRLA then resume the DMA transfer. Nonetheless,
358 * this approach presents some drawbacks:
359 * - If the DMA transfer is paused, RX overruns or TX underruns
360 * are more likey to occur depending on the system latency.
361 * Taking the USART driver as an example, it uses a cyclic DMA
362 * transfer to read data from the Receive Holding Register
363 * (RHR) to avoid RX overruns since the RHR is not protected
364 * by any FIFO on most Atmel SoCs. So pausing the DMA transfer
365 * to compute the residue would break the USART driver design.
366 * - The atc_pause() function masks interrupts but we'd rather
367 * avoid to do so for system latency purpose.
368 *
369 * Then we'd rather use another solution: the DSCR is read a
370 * first time, the CTRLA is read in turn, next the DSCR is read
371 * a second time. If the two consecutive read values of the DSCR
372 * are the same then we assume both refers to the very same
373 * child descriptor as well as the CTRLA value read inbetween
374 * does. For cyclic tranfers, the assumption is that a full loop
375 * is "not so fast".
376 * If the two DSCR values are different, we read again the CTRLA
377 * then the DSCR till two consecutive read values from DSCR are
378 * equal or till the maxium trials is reach.
379 * This algorithm is very unlikely not to find a stable value for
380 * DSCR.
349 */ 381 */
350 382
351 ctrla = channel_readl(atchan, CTRLA);
352 rmb(); /* ensure CTRLA is read before DSCR */
353 dscr = channel_readl(atchan, DSCR); 383 dscr = channel_readl(atchan, DSCR);
384 rmb(); /* ensure DSCR is read before CTRLA */
385 ctrla = channel_readl(atchan, CTRLA);
386 for (trials = 0; trials < ATC_MAX_DSCR_TRIALS; ++trials) {
387 u32 new_dscr;
388
389 rmb(); /* ensure DSCR is read after CTRLA */
390 new_dscr = channel_readl(atchan, DSCR);
391
392 /*
393 * If the DSCR register value has not changed inside the
394 * DMA controller since the previous read, we assume
395 * that both the dscr and ctrla values refers to the
396 * very same descriptor.
397 */
398 if (likely(new_dscr == dscr))
399 break;
400
401 /*
402 * DSCR has changed inside the DMA controller, so the
403 * previouly read value of CTRLA may refer to an already
404 * processed descriptor hence could be outdated.
405 * We need to update ctrla to match the current
406 * descriptor.
407 */
408 dscr = new_dscr;
409 rmb(); /* ensure DSCR is read before CTRLA */
410 ctrla = channel_readl(atchan, CTRLA);
411 }
412 if (unlikely(trials >= ATC_MAX_DSCR_TRIALS))
413 return -ETIMEDOUT;
354 414
355 /* for the first descriptor we can be more accurate */ 415 /* for the first descriptor we can be more accurate */
356 if (desc_first->lli.dscr == dscr) 416 if (desc_first->lli.dscr == dscr)
357 return atc_calc_bytes_left(ret, ctrla, desc_first); 417 return atc_calc_bytes_left(ret, ctrla);
358 418
359 ret -= desc_first->len; 419 ret -= desc_first->len;
360 list_for_each_entry(desc, &desc_first->tx_list, desc_node) { 420 list_for_each_entry(desc, &desc_first->tx_list, desc_node) {
@@ -365,16 +425,14 @@ static int atc_get_bytes_left(struct dma_chan *chan, dma_cookie_t cookie)
365 } 425 }
366 426
367 /* 427 /*
368 * For the last descriptor in the chain we can calculate 428 * For the current descriptor in the chain we can calculate
369 * the remaining bytes using the channel's register. 429 * the remaining bytes using the channel's register.
370 * Note that the transfer width of the first and last
371 * descriptor may differ.
372 */ 430 */
373 if (!desc->lli.dscr) 431 ret = atc_calc_bytes_left(ret, ctrla);
374 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc);
375 } else { 432 } else {
376 /* single transfer */ 433 /* single transfer */
377 ret = atc_calc_bytes_left_from_reg(ret, atchan, desc_first); 434 ctrla = channel_readl(atchan, CTRLA);
435 ret = atc_calc_bytes_left(ret, ctrla);
378 } 436 }
379 437
380 return ret; 438 return ret;
@@ -726,7 +784,6 @@ atc_prep_dma_interleaved(struct dma_chan *chan,
726 784
727 desc->txd.cookie = -EBUSY; 785 desc->txd.cookie = -EBUSY;
728 desc->total_len = desc->len = len; 786 desc->total_len = desc->len = len;
729 desc->tx_width = dwidth;
730 787
731 /* set end-of-link to the last link descriptor of list*/ 788 /* set end-of-link to the last link descriptor of list*/
732 set_desc_eol(desc); 789 set_desc_eol(desc);
@@ -804,10 +861,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
804 first->txd.cookie = -EBUSY; 861 first->txd.cookie = -EBUSY;
805 first->total_len = len; 862 first->total_len = len;
806 863
807 /* set transfer width for the calculation of the residue */
808 first->tx_width = src_width;
809 prev->tx_width = src_width;
810
811 /* set end-of-link to the last link descriptor of list*/ 864 /* set end-of-link to the last link descriptor of list*/
812 set_desc_eol(desc); 865 set_desc_eol(desc);
813 866
@@ -956,10 +1009,6 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
956 first->txd.cookie = -EBUSY; 1009 first->txd.cookie = -EBUSY;
957 first->total_len = total_len; 1010 first->total_len = total_len;
958 1011
959 /* set transfer width for the calculation of the residue */
960 first->tx_width = reg_width;
961 prev->tx_width = reg_width;
962
963 /* first link descriptor of list is responsible of flags */ 1012 /* first link descriptor of list is responsible of flags */
964 first->txd.flags = flags; /* client is in control of this ack */ 1013 first->txd.flags = flags; /* client is in control of this ack */
965 1014
@@ -1077,12 +1126,6 @@ atc_prep_dma_sg(struct dma_chan *chan,
1077 desc->txd.cookie = 0; 1126 desc->txd.cookie = 0;
1078 desc->len = len; 1127 desc->len = len;
1079 1128
1080 /*
1081 * Although we only need the transfer width for the first and
1082 * the last descriptor, its easier to set it to all descriptors.
1083 */
1084 desc->tx_width = src_width;
1085
1086 atc_desc_chain(&first, &prev, desc); 1129 atc_desc_chain(&first, &prev, desc);
1087 1130
1088 /* update the lengths and addresses for the next loop cycle */ 1131 /* update the lengths and addresses for the next loop cycle */
@@ -1256,7 +1299,6 @@ atc_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
1256 /* First descriptor of the chain embedds additional information */ 1299 /* First descriptor of the chain embedds additional information */
1257 first->txd.cookie = -EBUSY; 1300 first->txd.cookie = -EBUSY;
1258 first->total_len = buf_len; 1301 first->total_len = buf_len;
1259 first->tx_width = reg_width;
1260 1302
1261 return &first->txd; 1303 return &first->txd;
1262 1304
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index bc8d5ebedd19..7f5a08230f76 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -112,6 +112,7 @@
112#define ATC_SRC_WIDTH_BYTE (0x0 << 24) 112#define ATC_SRC_WIDTH_BYTE (0x0 << 24)
113#define ATC_SRC_WIDTH_HALFWORD (0x1 << 24) 113#define ATC_SRC_WIDTH_HALFWORD (0x1 << 24)
114#define ATC_SRC_WIDTH_WORD (0x2 << 24) 114#define ATC_SRC_WIDTH_WORD (0x2 << 24)
115#define ATC_REG_TO_SRC_WIDTH(r) (((r) >> 24) & 0x3)
115#define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */ 116#define ATC_DST_WIDTH_MASK (0x3 << 28) /* Destination Single Transfer Size */
116#define ATC_DST_WIDTH(x) ((x) << 28) 117#define ATC_DST_WIDTH(x) ((x) << 28)
117#define ATC_DST_WIDTH_BYTE (0x0 << 28) 118#define ATC_DST_WIDTH_BYTE (0x0 << 28)
@@ -182,7 +183,6 @@ struct at_lli {
182 * @txd: support for the async_tx api 183 * @txd: support for the async_tx api
183 * @desc_node: node on the channed descriptors list 184 * @desc_node: node on the channed descriptors list
184 * @len: descriptor byte count 185 * @len: descriptor byte count
185 * @tx_width: transfer width
186 * @total_len: total transaction byte count 186 * @total_len: total transaction byte count
187 */ 187 */
188struct at_desc { 188struct at_desc {
@@ -194,7 +194,6 @@ struct at_desc {
194 struct dma_async_tx_descriptor txd; 194 struct dma_async_tx_descriptor txd;
195 struct list_head desc_node; 195 struct list_head desc_node;
196 size_t len; 196 size_t len;
197 u32 tx_width;
198 size_t total_len; 197 size_t total_len;
199 198
200 /* Interleaved data */ 199 /* Interleaved data */
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index cf1213de7865..40afa2a16cfc 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -359,18 +359,19 @@ static void at_xdmac_start_xfer(struct at_xdmac_chan *atchan,
359 * descriptor view 2 since some fields of the configuration register 359 * descriptor view 2 since some fields of the configuration register
360 * depend on transfer size and src/dest addresses. 360 * depend on transfer size and src/dest addresses.
361 */ 361 */
362 if (at_xdmac_chan_is_cyclic(atchan)) { 362 if (at_xdmac_chan_is_cyclic(atchan))
363 reg = AT_XDMAC_CNDC_NDVIEW_NDV1; 363 reg = AT_XDMAC_CNDC_NDVIEW_NDV1;
364 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg); 364 else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3)
365 } else if (first->lld.mbr_ubc & AT_XDMAC_MBR_UBC_NDV3) {
366 reg = AT_XDMAC_CNDC_NDVIEW_NDV3; 365 reg = AT_XDMAC_CNDC_NDVIEW_NDV3;
367 } else { 366 else
368 /*
369 * No need to write AT_XDMAC_CC reg, it will be done when the
370 * descriptor is fecthed.
371 */
372 reg = AT_XDMAC_CNDC_NDVIEW_NDV2; 367 reg = AT_XDMAC_CNDC_NDVIEW_NDV2;
373 } 368 /*
369 * Even if the register will be updated from the configuration in the
370 * descriptor when using view 2 or higher, the PROT bit won't be set
371 * properly. This bit can be modified only by using the channel
372 * configuration register.
373 */
374 at_xdmac_chan_write(atchan, AT_XDMAC_CC, first->lld.mbr_cfg);
374 375
375 reg |= AT_XDMAC_CNDC_NDDUP 376 reg |= AT_XDMAC_CNDC_NDDUP
376 | AT_XDMAC_CNDC_NDSUP 377 | AT_XDMAC_CNDC_NDSUP
@@ -681,15 +682,16 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
681 desc->lld.mbr_sa = mem; 682 desc->lld.mbr_sa = mem;
682 desc->lld.mbr_da = atchan->sconfig.dst_addr; 683 desc->lld.mbr_da = atchan->sconfig.dst_addr;
683 } 684 }
684 desc->lld.mbr_cfg = atchan->cfg; 685 dwidth = at_xdmac_get_dwidth(atchan->cfg);
685 dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
686 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth) 686 fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
687 ? at_xdmac_get_dwidth(desc->lld.mbr_cfg) 687 ? dwidth
688 : AT_XDMAC_CC_DWIDTH_BYTE; 688 : AT_XDMAC_CC_DWIDTH_BYTE;
689 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */ 689 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV2 /* next descriptor view */
690 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */ 690 | AT_XDMAC_MBR_UBC_NDEN /* next descriptor dst parameter update */
691 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */ 691 | AT_XDMAC_MBR_UBC_NSEN /* next descriptor src parameter update */
692 | (len >> fixed_dwidth); /* microblock length */ 692 | (len >> fixed_dwidth); /* microblock length */
693 desc->lld.mbr_cfg = (atchan->cfg & ~AT_XDMAC_CC_DWIDTH_MASK) |
694 AT_XDMAC_CC_DWIDTH(fixed_dwidth);
693 dev_dbg(chan2dev(chan), 695 dev_dbg(chan2dev(chan),
694 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n", 696 "%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x\n",
695 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc); 697 __func__, &desc->lld.mbr_sa, &desc->lld.mbr_da, desc->lld.mbr_ubc);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index fbaf1ead2597..f1325f62563e 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -162,10 +162,11 @@ static void mv_chan_set_mode(struct mv_xor_chan *chan,
162 config &= ~0x7; 162 config &= ~0x7;
163 config |= op_mode; 163 config |= op_mode;
164 164
165 if (IS_ENABLED(__BIG_ENDIAN)) 165#if defined(__BIG_ENDIAN)
166 config |= XOR_DESCRIPTOR_SWAP; 166 config |= XOR_DESCRIPTOR_SWAP;
167 else 167#else
168 config &= ~XOR_DESCRIPTOR_SWAP; 168 config &= ~XOR_DESCRIPTOR_SWAP;
169#endif
169 170
170 writel_relaxed(config, XOR_CONFIG(chan)); 171 writel_relaxed(config, XOR_CONFIG(chan));
171 chan->current_type = type; 172 chan->current_type = type;
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index f513f77b1d85..ecab4ea059b4 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -2328,7 +2328,7 @@ static dma_cookie_t pl330_tx_submit(struct dma_async_tx_descriptor *tx)
2328 desc->txd.callback = last->txd.callback; 2328 desc->txd.callback = last->txd.callback;
2329 desc->txd.callback_param = last->txd.callback_param; 2329 desc->txd.callback_param = last->txd.callback_param;
2330 } 2330 }
2331 last->last = false; 2331 desc->last = false;
2332 2332
2333 dma_cookie_assign(&desc->txd); 2333 dma_cookie_assign(&desc->txd);
2334 2334
@@ -2623,6 +2623,7 @@ pl330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst,
2623 desc->rqcfg.brst_len = 1; 2623 desc->rqcfg.brst_len = 1;
2624 2624
2625 desc->rqcfg.brst_len = get_burst_len(desc, len); 2625 desc->rqcfg.brst_len = get_burst_len(desc, len);
2626 desc->bytes_requested = len;
2626 2627
2627 desc->txd.flags = flags; 2628 desc->txd.flags = flags;
2628 2629
diff --git a/drivers/dma/virt-dma.c b/drivers/dma/virt-dma.c
index 7d2c17d8d30f..6f80432a3f0a 100644
--- a/drivers/dma/virt-dma.c
+++ b/drivers/dma/virt-dma.c
@@ -29,7 +29,7 @@ dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
29 spin_lock_irqsave(&vc->lock, flags); 29 spin_lock_irqsave(&vc->lock, flags);
30 cookie = dma_cookie_assign(tx); 30 cookie = dma_cookie_assign(tx);
31 31
32 list_move_tail(&vd->node, &vc->desc_submitted); 32 list_add_tail(&vd->node, &vc->desc_submitted);
33 spin_unlock_irqrestore(&vc->lock, flags); 33 spin_unlock_irqrestore(&vc->lock, flags);
34 34
35 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n", 35 dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
@@ -83,10 +83,8 @@ static void vchan_complete(unsigned long arg)
83 cb_data = vd->tx.callback_param; 83 cb_data = vd->tx.callback_param;
84 84
85 list_del(&vd->node); 85 list_del(&vd->node);
86 if (async_tx_test_ack(&vd->tx)) 86
87 list_add(&vd->node, &vc->desc_allocated); 87 vc->desc_free(vd);
88 else
89 vc->desc_free(vd);
90 88
91 if (cb) 89 if (cb)
92 cb(cb_data); 90 cb(cb_data);
@@ -98,13 +96,9 @@ void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
98 while (!list_empty(head)) { 96 while (!list_empty(head)) {
99 struct virt_dma_desc *vd = list_first_entry(head, 97 struct virt_dma_desc *vd = list_first_entry(head,
100 struct virt_dma_desc, node); 98 struct virt_dma_desc, node);
101 if (async_tx_test_ack(&vd->tx)) { 99 list_del(&vd->node);
102 list_move_tail(&vd->node, &vc->desc_allocated); 100 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
103 } else { 101 vc->desc_free(vd);
104 dev_dbg(vc->chan.device->dev, "txd %p: freeing\n", vd);
105 list_del(&vd->node);
106 vc->desc_free(vd);
107 }
108 } 102 }
109} 103}
110EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list); 104EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
@@ -114,7 +108,6 @@ void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
114 dma_cookie_init(&vc->chan); 108 dma_cookie_init(&vc->chan);
115 109
116 spin_lock_init(&vc->lock); 110 spin_lock_init(&vc->lock);
117 INIT_LIST_HEAD(&vc->desc_allocated);
118 INIT_LIST_HEAD(&vc->desc_submitted); 111 INIT_LIST_HEAD(&vc->desc_submitted);
119 INIT_LIST_HEAD(&vc->desc_issued); 112 INIT_LIST_HEAD(&vc->desc_issued);
120 INIT_LIST_HEAD(&vc->desc_completed); 113 INIT_LIST_HEAD(&vc->desc_completed);
diff --git a/drivers/dma/virt-dma.h b/drivers/dma/virt-dma.h
index 189e75dbcb15..181b95267866 100644
--- a/drivers/dma/virt-dma.h
+++ b/drivers/dma/virt-dma.h
@@ -29,7 +29,6 @@ struct virt_dma_chan {
29 spinlock_t lock; 29 spinlock_t lock;
30 30
31 /* protected by vc.lock */ 31 /* protected by vc.lock */
32 struct list_head desc_allocated;
33 struct list_head desc_submitted; 32 struct list_head desc_submitted;
34 struct list_head desc_issued; 33 struct list_head desc_issued;
35 struct list_head desc_completed; 34 struct list_head desc_completed;
@@ -56,16 +55,11 @@ static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan
56 struct virt_dma_desc *vd, unsigned long tx_flags) 55 struct virt_dma_desc *vd, unsigned long tx_flags)
57{ 56{
58 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *); 57 extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
59 unsigned long flags;
60 58
61 dma_async_tx_descriptor_init(&vd->tx, &vc->chan); 59 dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
62 vd->tx.flags = tx_flags; 60 vd->tx.flags = tx_flags;
63 vd->tx.tx_submit = vchan_tx_submit; 61 vd->tx.tx_submit = vchan_tx_submit;
64 62
65 spin_lock_irqsave(&vc->lock, flags);
66 list_add_tail(&vd->node, &vc->desc_allocated);
67 spin_unlock_irqrestore(&vc->lock, flags);
68
69 return &vd->tx; 63 return &vd->tx;
70} 64}
71 65
@@ -128,8 +122,7 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
128} 122}
129 123
130/** 124/**
131 * vchan_get_all_descriptors - obtain all allocated, submitted and issued 125 * vchan_get_all_descriptors - obtain all submitted and issued descriptors
132 * descriptors
133 * vc: virtual channel to get descriptors from 126 * vc: virtual channel to get descriptors from
134 * head: list of descriptors found 127 * head: list of descriptors found
135 * 128 *
@@ -141,7 +134,6 @@ static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
141static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc, 134static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
142 struct list_head *head) 135 struct list_head *head)
143{ 136{
144 list_splice_tail_init(&vc->desc_allocated, head);
145 list_splice_tail_init(&vc->desc_submitted, head); 137 list_splice_tail_init(&vc->desc_submitted, head);
146 list_splice_tail_init(&vc->desc_issued, head); 138 list_splice_tail_init(&vc->desc_issued, head);
147 list_splice_tail_init(&vc->desc_completed, head); 139 list_splice_tail_init(&vc->desc_completed, head);
@@ -149,14 +141,11 @@ static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
149 141
150static inline void vchan_free_chan_resources(struct virt_dma_chan *vc) 142static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
151{ 143{
152 struct virt_dma_desc *vd;
153 unsigned long flags; 144 unsigned long flags;
154 LIST_HEAD(head); 145 LIST_HEAD(head);
155 146
156 spin_lock_irqsave(&vc->lock, flags); 147 spin_lock_irqsave(&vc->lock, flags);
157 vchan_get_all_descriptors(vc, &head); 148 vchan_get_all_descriptors(vc, &head);
158 list_for_each_entry(vd, &head, node)
159 async_tx_clear_ack(&vd->tx);
160 spin_unlock_irqrestore(&vc->lock, flags); 149 spin_unlock_irqrestore(&vc->lock, flags);
161 150
162 vchan_dma_desc_free_list(vc, &head); 151 vchan_dma_desc_free_list(vc, &head);
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 620fd55ec766..dff22ab01851 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -111,6 +111,7 @@
111#define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070 111#define XGENE_DMA_MEM_RAM_SHUTDOWN 0xD070
112#define XGENE_DMA_BLK_MEM_RDY 0xD074 112#define XGENE_DMA_BLK_MEM_RDY 0xD074
113#define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF 113#define XGENE_DMA_BLK_MEM_RDY_VAL 0xFFFFFFFF
114#define XGENE_DMA_RING_CMD_SM_OFFSET 0x8000
114 115
115/* X-Gene SoC EFUSE csr register and bit defination */ 116/* X-Gene SoC EFUSE csr register and bit defination */
116#define XGENE_SOC_JTAG1_SHADOW 0x18 117#define XGENE_SOC_JTAG1_SHADOW 0x18
@@ -1887,6 +1888,8 @@ static int xgene_dma_get_resources(struct platform_device *pdev,
1887 return -ENOMEM; 1888 return -ENOMEM;
1888 } 1889 }
1889 1890
1891 pdma->csr_ring_cmd += XGENE_DMA_RING_CMD_SM_OFFSET;
1892
1890 /* Get efuse csr region */ 1893 /* Get efuse csr region */
1891 res = platform_get_resource(pdev, IORESOURCE_MEM, 3); 1894 res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
1892 if (!res) { 1895 if (!res) {
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index 3515b381c131..711d8ad74f11 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -920,7 +920,7 @@ static int ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
920 */ 920 */
921 921
922 for (row = 0; row < mci->nr_csrows; row++) { 922 for (row = 0; row < mci->nr_csrows; row++) {
923 struct csrow_info *csi = &mci->csrows[row]; 923 struct csrow_info *csi = mci->csrows[row];
924 924
925 /* 925 /*
926 * Get the configuration settings for this 926 * Get the configuration settings for this
diff --git a/drivers/extcon/extcon-palmas.c b/drivers/extcon/extcon-palmas.c
index 080d5cc27055..eebdf2a33bfe 100644
--- a/drivers/extcon/extcon-palmas.c
+++ b/drivers/extcon/extcon-palmas.c
@@ -200,7 +200,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
200 status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev); 200 status = devm_extcon_dev_register(&pdev->dev, palmas_usb->edev);
201 if (status) { 201 if (status) {
202 dev_err(&pdev->dev, "failed to register extcon device\n"); 202 dev_err(&pdev->dev, "failed to register extcon device\n");
203 kfree(palmas_usb->edev->name);
204 return status; 203 return status;
205 } 204 }
206 205
@@ -214,7 +213,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
214 if (status < 0) { 213 if (status < 0) {
215 dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", 214 dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
216 palmas_usb->id_irq, status); 215 palmas_usb->id_irq, status);
217 kfree(palmas_usb->edev->name);
218 return status; 216 return status;
219 } 217 }
220 } 218 }
@@ -229,7 +227,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
229 if (status < 0) { 227 if (status < 0) {
230 dev_err(&pdev->dev, "can't get IRQ %d, err %d\n", 228 dev_err(&pdev->dev, "can't get IRQ %d, err %d\n",
231 palmas_usb->vbus_irq, status); 229 palmas_usb->vbus_irq, status);
232 kfree(palmas_usb->edev->name);
233 return status; 230 return status;
234 } 231 }
235 } 232 }
@@ -239,15 +236,6 @@ static int palmas_usb_probe(struct platform_device *pdev)
239 return 0; 236 return 0;
240} 237}
241 238
242static int palmas_usb_remove(struct platform_device *pdev)
243{
244 struct palmas_usb *palmas_usb = platform_get_drvdata(pdev);
245
246 kfree(palmas_usb->edev->name);
247
248 return 0;
249}
250
251#ifdef CONFIG_PM_SLEEP 239#ifdef CONFIG_PM_SLEEP
252static int palmas_usb_suspend(struct device *dev) 240static int palmas_usb_suspend(struct device *dev)
253{ 241{
@@ -288,7 +276,6 @@ static const struct of_device_id of_palmas_match_tbl[] = {
288 276
289static struct platform_driver palmas_usb_driver = { 277static struct platform_driver palmas_usb_driver = {
290 .probe = palmas_usb_probe, 278 .probe = palmas_usb_probe,
291 .remove = palmas_usb_remove,
292 .driver = { 279 .driver = {
293 .name = "palmas-usb", 280 .name = "palmas-usb",
294 .of_match_table = of_palmas_match_tbl, 281 .of_match_table = of_palmas_match_tbl,
diff --git a/drivers/extcon/extcon.c b/drivers/extcon/extcon.c
index 76157ab9faf3..43b57b02d050 100644
--- a/drivers/extcon/extcon.c
+++ b/drivers/extcon/extcon.c
@@ -124,25 +124,35 @@ static int find_cable_index_by_id(struct extcon_dev *edev, const unsigned int id
124 return -EINVAL; 124 return -EINVAL;
125} 125}
126 126
127static int find_cable_index_by_name(struct extcon_dev *edev, const char *name) 127static int find_cable_id_by_name(struct extcon_dev *edev, const char *name)
128{ 128{
129 unsigned int id = EXTCON_NONE; 129 unsigned int id = -EINVAL;
130 int i = 0; 130 int i = 0;
131 131
132 if (edev->max_supported == 0) 132 /* Find the id of extcon cable */
133 return -EINVAL;
134
135 /* Find the the number of extcon cable */
136 while (extcon_name[i]) { 133 while (extcon_name[i]) {
137 if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) { 134 if (!strncmp(extcon_name[i], name, CABLE_NAME_MAX)) {
138 id = i; 135 id = i;
139 break; 136 break;
140 } 137 }
138 i++;
141 } 139 }
142 140
143 if (id == EXTCON_NONE) 141 return id;
142}
143
144static int find_cable_index_by_name(struct extcon_dev *edev, const char *name)
145{
146 unsigned int id;
147
148 if (edev->max_supported == 0)
144 return -EINVAL; 149 return -EINVAL;
145 150
151 /* Find the the number of extcon cable */
152 id = find_cable_id_by_name(edev, name);
153 if (id < 0)
154 return id;
155
146 return find_cable_index_by_id(edev, id); 156 return find_cable_index_by_id(edev, id);
147} 157}
148 158
@@ -228,9 +238,11 @@ static ssize_t cable_state_show(struct device *dev,
228 struct extcon_cable *cable = container_of(attr, struct extcon_cable, 238 struct extcon_cable *cable = container_of(attr, struct extcon_cable,
229 attr_state); 239 attr_state);
230 240
241 int i = cable->cable_index;
242
231 return sprintf(buf, "%d\n", 243 return sprintf(buf, "%d\n",
232 extcon_get_cable_state_(cable->edev, 244 extcon_get_cable_state_(cable->edev,
233 cable->cable_index)); 245 cable->edev->supported_cable[i]));
234} 246}
235 247
236/** 248/**
@@ -263,20 +275,25 @@ int extcon_update_state(struct extcon_dev *edev, u32 mask, u32 state)
263 spin_lock_irqsave(&edev->lock, flags); 275 spin_lock_irqsave(&edev->lock, flags);
264 276
265 if (edev->state != ((edev->state & ~mask) | (state & mask))) { 277 if (edev->state != ((edev->state & ~mask) | (state & mask))) {
278 u32 old_state;
279
266 if (check_mutually_exclusive(edev, (edev->state & ~mask) | 280 if (check_mutually_exclusive(edev, (edev->state & ~mask) |
267 (state & mask))) { 281 (state & mask))) {
268 spin_unlock_irqrestore(&edev->lock, flags); 282 spin_unlock_irqrestore(&edev->lock, flags);
269 return -EPERM; 283 return -EPERM;
270 } 284 }
271 285
272 for (index = 0; index < edev->max_supported; index++) { 286 old_state = edev->state;
273 if (is_extcon_changed(edev->state, state, index, &attached))
274 raw_notifier_call_chain(&edev->nh[index], attached, edev);
275 }
276
277 edev->state &= ~mask; 287 edev->state &= ~mask;
278 edev->state |= state & mask; 288 edev->state |= state & mask;
279 289
290 for (index = 0; index < edev->max_supported; index++) {
291 if (is_extcon_changed(old_state, edev->state, index,
292 &attached))
293 raw_notifier_call_chain(&edev->nh[index],
294 attached, edev);
295 }
296
280 /* This could be in interrupt handler */ 297 /* This could be in interrupt handler */
281 prop_buf = (char *)get_zeroed_page(GFP_ATOMIC); 298 prop_buf = (char *)get_zeroed_page(GFP_ATOMIC);
282 if (prop_buf) { 299 if (prop_buf) {
@@ -361,8 +378,13 @@ EXPORT_SYMBOL_GPL(extcon_get_cable_state_);
361 */ 378 */
362int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name) 379int extcon_get_cable_state(struct extcon_dev *edev, const char *cable_name)
363{ 380{
364 return extcon_get_cable_state_(edev, find_cable_index_by_name 381 unsigned int id;
365 (edev, cable_name)); 382
383 id = find_cable_id_by_name(edev, cable_name);
384 if (id < 0)
385 return id;
386
387 return extcon_get_cable_state_(edev, id);
366} 388}
367EXPORT_SYMBOL_GPL(extcon_get_cable_state); 389EXPORT_SYMBOL_GPL(extcon_get_cable_state);
368 390
@@ -404,8 +426,13 @@ EXPORT_SYMBOL_GPL(extcon_set_cable_state_);
404int extcon_set_cable_state(struct extcon_dev *edev, 426int extcon_set_cable_state(struct extcon_dev *edev,
405 const char *cable_name, bool cable_state) 427 const char *cable_name, bool cable_state)
406{ 428{
407 return extcon_set_cable_state_(edev, find_cable_index_by_name 429 unsigned int id;
408 (edev, cable_name), cable_state); 430
431 id = find_cable_id_by_name(edev, cable_name);
432 if (id < 0)
433 return id;
434
435 return extcon_set_cable_state_(edev, id, cable_state);
409} 436}
410EXPORT_SYMBOL_GPL(extcon_set_cable_state); 437EXPORT_SYMBOL_GPL(extcon_set_cable_state);
411 438
diff --git a/drivers/firmware/efi/cper.c b/drivers/firmware/efi/cper.c
index 4fd9961d552e..d42537425438 100644
--- a/drivers/firmware/efi/cper.c
+++ b/drivers/firmware/efi/cper.c
@@ -305,10 +305,17 @@ const char *cper_mem_err_unpack(struct trace_seq *p,
305 return ret; 305 return ret;
306} 306}
307 307
308static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem) 308static void cper_print_mem(const char *pfx, const struct cper_sec_mem_err *mem,
309 int len)
309{ 310{
310 struct cper_mem_err_compact cmem; 311 struct cper_mem_err_compact cmem;
311 312
313 /* Don't trust UEFI 2.1/2.2 structure with bad validation bits */
314 if (len == sizeof(struct cper_sec_mem_err_old) &&
315 (mem->validation_bits & ~(CPER_MEM_VALID_RANK_NUMBER - 1))) {
316 pr_err(FW_WARN "valid bits set for fields beyond structure\n");
317 return;
318 }
312 if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS) 319 if (mem->validation_bits & CPER_MEM_VALID_ERROR_STATUS)
313 printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status); 320 printk("%s""error_status: 0x%016llx\n", pfx, mem->error_status);
314 if (mem->validation_bits & CPER_MEM_VALID_PA) 321 if (mem->validation_bits & CPER_MEM_VALID_PA)
@@ -405,8 +412,10 @@ static void cper_estatus_print_section(
405 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) { 412 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PLATFORM_MEM)) {
406 struct cper_sec_mem_err *mem_err = (void *)(gdata + 1); 413 struct cper_sec_mem_err *mem_err = (void *)(gdata + 1);
407 printk("%s""section_type: memory error\n", newpfx); 414 printk("%s""section_type: memory error\n", newpfx);
408 if (gdata->error_data_length >= sizeof(*mem_err)) 415 if (gdata->error_data_length >=
409 cper_print_mem(newpfx, mem_err); 416 sizeof(struct cper_sec_mem_err_old))
417 cper_print_mem(newpfx, mem_err,
418 gdata->error_data_length);
410 else 419 else
411 goto err_section_too_small; 420 goto err_section_too_small;
412 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) { 421 } else if (!uuid_le_cmp(*sec_type, CPER_SEC_PCIE)) {
diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
index 9fa8084a7c8d..d6144e3b97c5 100644
--- a/drivers/firmware/efi/efi.c
+++ b/drivers/firmware/efi/efi.c
@@ -58,6 +58,11 @@ bool efi_runtime_disabled(void)
58 58
59static int __init parse_efi_cmdline(char *str) 59static int __init parse_efi_cmdline(char *str)
60{ 60{
61 if (!str) {
62 pr_warn("need at least one option\n");
63 return -EINVAL;
64 }
65
61 if (parse_option_str(str, "noruntime")) 66 if (parse_option_str(str, "noruntime"))
62 disable_runtime = true; 67 disable_runtime = true;
63 68
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index f3791e0d27d4..baefa635169a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -1130,6 +1130,9 @@ struct amdgpu_gfx {
1130 uint32_t me_feature_version; 1130 uint32_t me_feature_version;
1131 uint32_t ce_feature_version; 1131 uint32_t ce_feature_version;
1132 uint32_t pfp_feature_version; 1132 uint32_t pfp_feature_version;
1133 uint32_t rlc_feature_version;
1134 uint32_t mec_feature_version;
1135 uint32_t mec2_feature_version;
1133 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS]; 1136 struct amdgpu_ring gfx_ring[AMDGPU_MAX_GFX_RINGS];
1134 unsigned num_gfx_rings; 1137 unsigned num_gfx_rings;
1135 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS]; 1138 struct amdgpu_ring compute_ring[AMDGPU_MAX_COMPUTE_RINGS];
@@ -1614,6 +1617,9 @@ struct amdgpu_uvd {
1614#define AMDGPU_MAX_VCE_HANDLES 16 1617#define AMDGPU_MAX_VCE_HANDLES 16
1615#define AMDGPU_VCE_FIRMWARE_OFFSET 256 1618#define AMDGPU_VCE_FIRMWARE_OFFSET 256
1616 1619
1620#define AMDGPU_VCE_HARVEST_VCE0 (1 << 0)
1621#define AMDGPU_VCE_HARVEST_VCE1 (1 << 1)
1622
1617struct amdgpu_vce { 1623struct amdgpu_vce {
1618 struct amdgpu_bo *vcpu_bo; 1624 struct amdgpu_bo *vcpu_bo;
1619 uint64_t gpu_addr; 1625 uint64_t gpu_addr;
@@ -1626,6 +1632,7 @@ struct amdgpu_vce {
1626 const struct firmware *fw; /* VCE firmware */ 1632 const struct firmware *fw; /* VCE firmware */
1627 struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS]; 1633 struct amdgpu_ring ring[AMDGPU_MAX_VCE_RINGS];
1628 struct amdgpu_irq_src irq; 1634 struct amdgpu_irq_src irq;
1635 unsigned harvest_config;
1629}; 1636};
1630 1637
1631/* 1638/*
@@ -1635,6 +1642,7 @@ struct amdgpu_sdma {
1635 /* SDMA firmware */ 1642 /* SDMA firmware */
1636 const struct firmware *fw; 1643 const struct firmware *fw;
1637 uint32_t fw_version; 1644 uint32_t fw_version;
1645 uint32_t feature_version;
1638 1646
1639 struct amdgpu_ring ring; 1647 struct amdgpu_ring ring;
1640}; 1648};
@@ -1862,6 +1870,12 @@ typedef void (*amdgpu_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1862typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t); 1870typedef uint32_t (*amdgpu_block_rreg_t)(struct amdgpu_device*, uint32_t, uint32_t);
1863typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t); 1871typedef void (*amdgpu_block_wreg_t)(struct amdgpu_device*, uint32_t, uint32_t, uint32_t);
1864 1872
1873struct amdgpu_ip_block_status {
1874 bool valid;
1875 bool sw;
1876 bool hw;
1877};
1878
1865struct amdgpu_device { 1879struct amdgpu_device {
1866 struct device *dev; 1880 struct device *dev;
1867 struct drm_device *ddev; 1881 struct drm_device *ddev;
@@ -2004,7 +2018,7 @@ struct amdgpu_device {
2004 2018
2005 const struct amdgpu_ip_block_version *ip_blocks; 2019 const struct amdgpu_ip_block_version *ip_blocks;
2006 int num_ip_blocks; 2020 int num_ip_blocks;
2007 bool *ip_block_enabled; 2021 struct amdgpu_ip_block_status *ip_block_status;
2008 struct mutex mn_lock; 2022 struct mutex mn_lock;
2009 DECLARE_HASHTABLE(mn_hash, 7); 2023 DECLARE_HASHTABLE(mn_hash, 7);
2010 2024
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index d79009b65867..99f158e1baff 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1191,8 +1191,9 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
1191 return -EINVAL; 1191 return -EINVAL;
1192 } 1192 }
1193 1193
1194 adev->ip_block_enabled = kcalloc(adev->num_ip_blocks, sizeof(bool), GFP_KERNEL); 1194 adev->ip_block_status = kcalloc(adev->num_ip_blocks,
1195 if (adev->ip_block_enabled == NULL) 1195 sizeof(struct amdgpu_ip_block_status), GFP_KERNEL);
1196 if (adev->ip_block_status == NULL)
1196 return -ENOMEM; 1197 return -ENOMEM;
1197 1198
1198 if (adev->ip_blocks == NULL) { 1199 if (adev->ip_blocks == NULL) {
@@ -1203,18 +1204,18 @@ static int amdgpu_early_init(struct amdgpu_device *adev)
1203 for (i = 0; i < adev->num_ip_blocks; i++) { 1204 for (i = 0; i < adev->num_ip_blocks; i++) {
1204 if ((amdgpu_ip_block_mask & (1 << i)) == 0) { 1205 if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
1205 DRM_ERROR("disabled ip block: %d\n", i); 1206 DRM_ERROR("disabled ip block: %d\n", i);
1206 adev->ip_block_enabled[i] = false; 1207 adev->ip_block_status[i].valid = false;
1207 } else { 1208 } else {
1208 if (adev->ip_blocks[i].funcs->early_init) { 1209 if (adev->ip_blocks[i].funcs->early_init) {
1209 r = adev->ip_blocks[i].funcs->early_init((void *)adev); 1210 r = adev->ip_blocks[i].funcs->early_init((void *)adev);
1210 if (r == -ENOENT) 1211 if (r == -ENOENT)
1211 adev->ip_block_enabled[i] = false; 1212 adev->ip_block_status[i].valid = false;
1212 else if (r) 1213 else if (r)
1213 return r; 1214 return r;
1214 else 1215 else
1215 adev->ip_block_enabled[i] = true; 1216 adev->ip_block_status[i].valid = true;
1216 } else { 1217 } else {
1217 adev->ip_block_enabled[i] = true; 1218 adev->ip_block_status[i].valid = true;
1218 } 1219 }
1219 } 1220 }
1220 } 1221 }
@@ -1227,11 +1228,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
1227 int i, r; 1228 int i, r;
1228 1229
1229 for (i = 0; i < adev->num_ip_blocks; i++) { 1230 for (i = 0; i < adev->num_ip_blocks; i++) {
1230 if (!adev->ip_block_enabled[i]) 1231 if (!adev->ip_block_status[i].valid)
1231 continue; 1232 continue;
1232 r = adev->ip_blocks[i].funcs->sw_init((void *)adev); 1233 r = adev->ip_blocks[i].funcs->sw_init((void *)adev);
1233 if (r) 1234 if (r)
1234 return r; 1235 return r;
1236 adev->ip_block_status[i].sw = true;
1235 /* need to do gmc hw init early so we can allocate gpu mem */ 1237 /* need to do gmc hw init early so we can allocate gpu mem */
1236 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { 1238 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
1237 r = amdgpu_vram_scratch_init(adev); 1239 r = amdgpu_vram_scratch_init(adev);
@@ -1243,11 +1245,12 @@ static int amdgpu_init(struct amdgpu_device *adev)
1243 r = amdgpu_wb_init(adev); 1245 r = amdgpu_wb_init(adev);
1244 if (r) 1246 if (r)
1245 return r; 1247 return r;
1248 adev->ip_block_status[i].hw = true;
1246 } 1249 }
1247 } 1250 }
1248 1251
1249 for (i = 0; i < adev->num_ip_blocks; i++) { 1252 for (i = 0; i < adev->num_ip_blocks; i++) {
1250 if (!adev->ip_block_enabled[i]) 1253 if (!adev->ip_block_status[i].sw)
1251 continue; 1254 continue;
1252 /* gmc hw init is done early */ 1255 /* gmc hw init is done early */
1253 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) 1256 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC)
@@ -1255,6 +1258,7 @@ static int amdgpu_init(struct amdgpu_device *adev)
1255 r = adev->ip_blocks[i].funcs->hw_init((void *)adev); 1258 r = adev->ip_blocks[i].funcs->hw_init((void *)adev);
1256 if (r) 1259 if (r)
1257 return r; 1260 return r;
1261 adev->ip_block_status[i].hw = true;
1258 } 1262 }
1259 1263
1260 return 0; 1264 return 0;
@@ -1265,7 +1269,7 @@ static int amdgpu_late_init(struct amdgpu_device *adev)
1265 int i = 0, r; 1269 int i = 0, r;
1266 1270
1267 for (i = 0; i < adev->num_ip_blocks; i++) { 1271 for (i = 0; i < adev->num_ip_blocks; i++) {
1268 if (!adev->ip_block_enabled[i]) 1272 if (!adev->ip_block_status[i].valid)
1269 continue; 1273 continue;
1270 /* enable clockgating to save power */ 1274 /* enable clockgating to save power */
1271 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, 1275 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
@@ -1287,7 +1291,7 @@ static int amdgpu_fini(struct amdgpu_device *adev)
1287 int i, r; 1291 int i, r;
1288 1292
1289 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1293 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1290 if (!adev->ip_block_enabled[i]) 1294 if (!adev->ip_block_status[i].hw)
1291 continue; 1295 continue;
1292 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) { 1296 if (adev->ip_blocks[i].type == AMD_IP_BLOCK_TYPE_GMC) {
1293 amdgpu_wb_fini(adev); 1297 amdgpu_wb_fini(adev);
@@ -1300,14 +1304,16 @@ static int amdgpu_fini(struct amdgpu_device *adev)
1300 return r; 1304 return r;
1301 r = adev->ip_blocks[i].funcs->hw_fini((void *)adev); 1305 r = adev->ip_blocks[i].funcs->hw_fini((void *)adev);
1302 /* XXX handle errors */ 1306 /* XXX handle errors */
1307 adev->ip_block_status[i].hw = false;
1303 } 1308 }
1304 1309
1305 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1310 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1306 if (!adev->ip_block_enabled[i]) 1311 if (!adev->ip_block_status[i].sw)
1307 continue; 1312 continue;
1308 r = adev->ip_blocks[i].funcs->sw_fini((void *)adev); 1313 r = adev->ip_blocks[i].funcs->sw_fini((void *)adev);
1309 /* XXX handle errors */ 1314 /* XXX handle errors */
1310 adev->ip_block_enabled[i] = false; 1315 adev->ip_block_status[i].sw = false;
1316 adev->ip_block_status[i].valid = false;
1311 } 1317 }
1312 1318
1313 return 0; 1319 return 0;
@@ -1318,7 +1324,7 @@ static int amdgpu_suspend(struct amdgpu_device *adev)
1318 int i, r; 1324 int i, r;
1319 1325
1320 for (i = adev->num_ip_blocks - 1; i >= 0; i--) { 1326 for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
1321 if (!adev->ip_block_enabled[i]) 1327 if (!adev->ip_block_status[i].valid)
1322 continue; 1328 continue;
1323 /* ungate blocks so that suspend can properly shut them down */ 1329 /* ungate blocks so that suspend can properly shut them down */
1324 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev, 1330 r = adev->ip_blocks[i].funcs->set_clockgating_state((void *)adev,
@@ -1336,7 +1342,7 @@ static int amdgpu_resume(struct amdgpu_device *adev)
1336 int i, r; 1342 int i, r;
1337 1343
1338 for (i = 0; i < adev->num_ip_blocks; i++) { 1344 for (i = 0; i < adev->num_ip_blocks; i++) {
1339 if (!adev->ip_block_enabled[i]) 1345 if (!adev->ip_block_status[i].valid)
1340 continue; 1346 continue;
1341 r = adev->ip_blocks[i].funcs->resume(adev); 1347 r = adev->ip_blocks[i].funcs->resume(adev);
1342 if (r) 1348 if (r)
@@ -1582,8 +1588,8 @@ void amdgpu_device_fini(struct amdgpu_device *adev)
1582 amdgpu_fence_driver_fini(adev); 1588 amdgpu_fence_driver_fini(adev);
1583 amdgpu_fbdev_fini(adev); 1589 amdgpu_fbdev_fini(adev);
1584 r = amdgpu_fini(adev); 1590 r = amdgpu_fini(adev);
1585 kfree(adev->ip_block_enabled); 1591 kfree(adev->ip_block_status);
1586 adev->ip_block_enabled = NULL; 1592 adev->ip_block_status = NULL;
1587 adev->accel_working = false; 1593 adev->accel_working = false;
1588 /* free i2c buses */ 1594 /* free i2c buses */
1589 amdgpu_i2c_fini(adev); 1595 amdgpu_i2c_fini(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index ae43b58c9733..4afc507820c0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -449,7 +449,7 @@ out:
449 * vital here, so they are not reported back to userspace. 449 * vital here, so they are not reported back to userspace.
450 */ 450 */
451static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev, 451static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
452 struct amdgpu_bo_va *bo_va) 452 struct amdgpu_bo_va *bo_va, uint32_t operation)
453{ 453{
454 struct ttm_validate_buffer tv, *entry; 454 struct ttm_validate_buffer tv, *entry;
455 struct amdgpu_bo_list_entry *vm_bos; 455 struct amdgpu_bo_list_entry *vm_bos;
@@ -485,7 +485,9 @@ static void amdgpu_gem_va_update_vm(struct amdgpu_device *adev,
485 if (r) 485 if (r)
486 goto error_unlock; 486 goto error_unlock;
487 487
488 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem); 488
489 if (operation == AMDGPU_VA_OP_MAP)
490 r = amdgpu_vm_bo_update(adev, bo_va, &bo_va->bo->tbo.mem);
489 491
490error_unlock: 492error_unlock:
491 mutex_unlock(&bo_va->vm->mutex); 493 mutex_unlock(&bo_va->vm->mutex);
@@ -580,7 +582,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data,
580 } 582 }
581 583
582 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE)) 584 if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
583 amdgpu_gem_va_update_vm(adev, bo_va); 585 amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
584 586
585 drm_gem_object_unreference_unlocked(gobj); 587 drm_gem_object_unreference_unlocked(gobj);
586 return r; 588 return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 52dff75aac6f..bc0fac618a3f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -180,16 +180,16 @@ int amdgpu_ib_schedule(struct amdgpu_device *adev, unsigned num_ibs,
180 if (vm) { 180 if (vm) {
181 /* do context switch */ 181 /* do context switch */
182 amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update); 182 amdgpu_vm_flush(ring, vm, ib->sync.last_vm_update);
183 }
184 183
185 if (vm && ring->funcs->emit_gds_switch) 184 if (ring->funcs->emit_gds_switch)
186 amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id, 185 amdgpu_ring_emit_gds_switch(ring, ib->vm->ids[ring->idx].id,
187 ib->gds_base, ib->gds_size, 186 ib->gds_base, ib->gds_size,
188 ib->gws_base, ib->gws_size, 187 ib->gws_base, ib->gws_size,
189 ib->oa_base, ib->oa_size); 188 ib->oa_base, ib->oa_size);
190 189
191 if (ring->funcs->emit_hdp_flush) 190 if (ring->funcs->emit_hdp_flush)
192 amdgpu_ring_emit_hdp_flush(ring); 191 amdgpu_ring_emit_hdp_flush(ring);
192 }
193 193
194 old_ctx = ring->current_ctx; 194 old_ctx = ring->current_ctx;
195 for (i = 0; i < num_ibs; ++i) { 195 for (i = 0; i < num_ibs; ++i) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 8c40a9671b9f..93000af92283 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -242,7 +242,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
242 242
243 for (i = 0; i < adev->num_ip_blocks; i++) { 243 for (i = 0; i < adev->num_ip_blocks; i++) {
244 if (adev->ip_blocks[i].type == type && 244 if (adev->ip_blocks[i].type == type &&
245 adev->ip_block_enabled[i]) { 245 adev->ip_block_status[i].valid) {
246 ip.hw_ip_version_major = adev->ip_blocks[i].major; 246 ip.hw_ip_version_major = adev->ip_blocks[i].major;
247 ip.hw_ip_version_minor = adev->ip_blocks[i].minor; 247 ip.hw_ip_version_minor = adev->ip_blocks[i].minor;
248 ip.capabilities_flags = 0; 248 ip.capabilities_flags = 0;
@@ -281,7 +281,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
281 281
282 for (i = 0; i < adev->num_ip_blocks; i++) 282 for (i = 0; i < adev->num_ip_blocks; i++)
283 if (adev->ip_blocks[i].type == type && 283 if (adev->ip_blocks[i].type == type &&
284 adev->ip_block_enabled[i] && 284 adev->ip_block_status[i].valid &&
285 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT) 285 count < AMDGPU_HW_IP_INSTANCE_MAX_COUNT)
286 count++; 286 count++;
287 287
@@ -324,16 +324,17 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
324 break; 324 break;
325 case AMDGPU_INFO_FW_GFX_RLC: 325 case AMDGPU_INFO_FW_GFX_RLC:
326 fw_info.ver = adev->gfx.rlc_fw_version; 326 fw_info.ver = adev->gfx.rlc_fw_version;
327 fw_info.feature = 0; 327 fw_info.feature = adev->gfx.rlc_feature_version;
328 break; 328 break;
329 case AMDGPU_INFO_FW_GFX_MEC: 329 case AMDGPU_INFO_FW_GFX_MEC:
330 if (info->query_fw.index == 0) 330 if (info->query_fw.index == 0) {
331 fw_info.ver = adev->gfx.mec_fw_version; 331 fw_info.ver = adev->gfx.mec_fw_version;
332 else if (info->query_fw.index == 1) 332 fw_info.feature = adev->gfx.mec_feature_version;
333 } else if (info->query_fw.index == 1) {
333 fw_info.ver = adev->gfx.mec2_fw_version; 334 fw_info.ver = adev->gfx.mec2_fw_version;
334 else 335 fw_info.feature = adev->gfx.mec2_feature_version;
336 } else
335 return -EINVAL; 337 return -EINVAL;
336 fw_info.feature = 0;
337 break; 338 break;
338 case AMDGPU_INFO_FW_SMC: 339 case AMDGPU_INFO_FW_SMC:
339 fw_info.ver = adev->pm.fw_version; 340 fw_info.ver = adev->pm.fw_version;
@@ -343,7 +344,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
343 if (info->query_fw.index >= 2) 344 if (info->query_fw.index >= 2)
344 return -EINVAL; 345 return -EINVAL;
345 fw_info.ver = adev->sdma[info->query_fw.index].fw_version; 346 fw_info.ver = adev->sdma[info->query_fw.index].fw_version;
346 fw_info.feature = 0; 347 fw_info.feature = adev->sdma[info->query_fw.index].feature_version;
347 break; 348 break;
348 default: 349 default:
349 return -EINVAL; 350 return -EINVAL;
@@ -423,7 +424,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
423 return n ? -EFAULT : 0; 424 return n ? -EFAULT : 0;
424 } 425 }
425 case AMDGPU_INFO_DEV_INFO: { 426 case AMDGPU_INFO_DEV_INFO: {
426 struct drm_amdgpu_info_device dev_info; 427 struct drm_amdgpu_info_device dev_info = {};
427 struct amdgpu_cu_info cu_info; 428 struct amdgpu_cu_info cu_info;
428 429
429 dev_info.device_id = dev->pdev->device; 430 dev_info.device_id = dev->pdev->device;
@@ -466,6 +467,7 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
466 memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap)); 467 memcpy(&dev_info.cu_bitmap[0], &cu_info.bitmap[0], sizeof(cu_info.bitmap));
467 dev_info.vram_type = adev->mc.vram_type; 468 dev_info.vram_type = adev->mc.vram_type;
468 dev_info.vram_bit_width = adev->mc.vram_width; 469 dev_info.vram_bit_width = adev->mc.vram_width;
470 dev_info.vce_harvest_config = adev->vce.harvest_config;
469 471
470 return copy_to_user(out, &dev_info, 472 return copy_to_user(out, &dev_info,
471 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0; 473 min((size_t)size, sizeof(dev_info))) ? -EFAULT : 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 2f7a5efa21c2..f5c22556ec2c 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -374,7 +374,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
374 unsigned height_in_mb = ALIGN(height / 16, 2); 374 unsigned height_in_mb = ALIGN(height / 16, 2);
375 unsigned fs_in_mb = width_in_mb * height_in_mb; 375 unsigned fs_in_mb = width_in_mb * height_in_mb;
376 376
377 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer; 377 unsigned image_size, tmp, min_dpb_size, num_dpb_buffer, min_ctx_size;
378 378
379 image_size = width * height; 379 image_size = width * height;
380 image_size += image_size / 2; 380 image_size += image_size / 2;
@@ -466,6 +466,8 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
466 466
467 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2; 467 num_dpb_buffer = (le32_to_cpu(msg[59]) & 0xff) + 2;
468 min_dpb_size = image_size * num_dpb_buffer; 468 min_dpb_size = image_size * num_dpb_buffer;
469 min_ctx_size = ((width + 255) / 16) * ((height + 255) / 16)
470 * 16 * num_dpb_buffer + 52 * 1024;
469 break; 471 break;
470 472
471 default: 473 default:
@@ -486,6 +488,7 @@ static int amdgpu_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
486 488
487 buf_sizes[0x1] = dpb_size; 489 buf_sizes[0x1] = dpb_size;
488 buf_sizes[0x2] = image_size; 490 buf_sizes[0x2] = image_size;
491 buf_sizes[0x4] = min_ctx_size;
489 return 0; 492 return 0;
490} 493}
491 494
@@ -628,6 +631,13 @@ static int amdgpu_uvd_cs_pass2(struct amdgpu_uvd_cs_ctx *ctx)
628 return -EINVAL; 631 return -EINVAL;
629 } 632 }
630 633
634 } else if (cmd == 0x206) {
635 if ((end - start) < ctx->buf_sizes[4]) {
636 DRM_ERROR("buffer (%d) to small (%d / %d)!\n", cmd,
637 (unsigned)(end - start),
638 ctx->buf_sizes[4]);
639 return -EINVAL;
640 }
631 } else if ((cmd != 0x100) && (cmd != 0x204)) { 641 } else if ((cmd != 0x100) && (cmd != 0x204)) {
632 DRM_ERROR("invalid UVD command %X!\n", cmd); 642 DRM_ERROR("invalid UVD command %X!\n", cmd);
633 return -EINVAL; 643 return -EINVAL;
@@ -755,9 +765,10 @@ int amdgpu_uvd_ring_parse_cs(struct amdgpu_cs_parser *parser, uint32_t ib_idx)
755 struct amdgpu_uvd_cs_ctx ctx = {}; 765 struct amdgpu_uvd_cs_ctx ctx = {};
756 unsigned buf_sizes[] = { 766 unsigned buf_sizes[] = {
757 [0x00000000] = 2048, 767 [0x00000000] = 2048,
758 [0x00000001] = 32 * 1024 * 1024, 768 [0x00000001] = 0xFFFFFFFF,
759 [0x00000002] = 2048 * 1152 * 3, 769 [0x00000002] = 0xFFFFFFFF,
760 [0x00000003] = 2048, 770 [0x00000003] = 2048,
771 [0x00000004] = 0xFFFFFFFF,
761 }; 772 };
762 struct amdgpu_ib *ib = &parser->ibs[ib_idx]; 773 struct amdgpu_ib *ib = &parser->ibs[ib_idx];
763 int r; 774 int r;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index ab83cc1ca4cc..15df46c93f0a 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -500,6 +500,7 @@ static int cik_sdma_load_microcode(struct amdgpu_device *adev)
500 amdgpu_ucode_print_sdma_hdr(&hdr->header); 500 amdgpu_ucode_print_sdma_hdr(&hdr->header);
501 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 501 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
502 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version); 502 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
503 adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
503 fw_data = (const __le32 *) 504 fw_data = (const __le32 *)
504 (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 505 (adev->sdma[i].fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes));
505 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0); 506 WREG32(mmSDMA0_UCODE_ADDR + sdma_offsets[i], 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 1a2d419cbf16..ace870afc7d4 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -494,29 +494,67 @@ static void cz_dpm_fini(struct amdgpu_device *adev)
494 amdgpu_free_extended_power_table(adev); 494 amdgpu_free_extended_power_table(adev);
495} 495}
496 496
497#define ixSMUSVI_NB_CURRENTVID 0xD8230044
498#define CURRENT_NB_VID_MASK 0xff000000
499#define CURRENT_NB_VID__SHIFT 24
500#define ixSMUSVI_GFX_CURRENTVID 0xD8230048
501#define CURRENT_GFX_VID_MASK 0xff000000
502#define CURRENT_GFX_VID__SHIFT 24
503
497static void 504static void
498cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, 505cz_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
499 struct seq_file *m) 506 struct seq_file *m)
500{ 507{
508 struct cz_power_info *pi = cz_get_pi(adev);
501 struct amdgpu_clock_voltage_dependency_table *table = 509 struct amdgpu_clock_voltage_dependency_table *table =
502 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; 510 &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk;
503 u32 current_index = 511 struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table =
504 (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & 512 &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table;
505 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> 513 struct amdgpu_vce_clock_voltage_dependency_table *vce_table =
506 TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; 514 &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
507 u32 sclk, tmp; 515 u32 sclk_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX),
508 u16 vddc; 516 TARGET_AND_CURRENT_PROFILE_INDEX, CURR_SCLK_INDEX);
509 517 u32 uvd_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
510 if (current_index >= NUM_SCLK_LEVELS) { 518 TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_UVD_INDEX);
511 seq_printf(m, "invalid dpm profile %d\n", current_index); 519 u32 vce_index = REG_GET_FIELD(RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX_2),
520 TARGET_AND_CURRENT_PROFILE_INDEX_2, CURR_VCE_INDEX);
521 u32 sclk, vclk, dclk, ecclk, tmp;
522 u16 vddnb, vddgfx;
523
524 if (sclk_index >= NUM_SCLK_LEVELS) {
525 seq_printf(m, "invalid sclk dpm profile %d\n", sclk_index);
512 } else { 526 } else {
513 sclk = table->entries[current_index].clk; 527 sclk = table->entries[sclk_index].clk;
514 tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & 528 seq_printf(m, "%u sclk: %u\n", sclk_index, sclk);
515 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> 529 }
516 SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; 530
517 vddc = cz_convert_8bit_index_to_voltage(adev, (u16)tmp); 531 tmp = (RREG32_SMC(ixSMUSVI_NB_CURRENTVID) &
518 seq_printf(m, "power level %d sclk: %u vddc: %u\n", 532 CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
519 current_index, sclk, vddc); 533 vddnb = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
534 tmp = (RREG32_SMC(ixSMUSVI_GFX_CURRENTVID) &
535 CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
536 vddgfx = cz_convert_8bit_index_to_voltage(adev, (u16)tmp);
537 seq_printf(m, "vddnb: %u vddgfx: %u\n", vddnb, vddgfx);
538
539 seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en");
540 if (!pi->uvd_power_gated) {
541 if (uvd_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
542 seq_printf(m, "invalid uvd dpm level %d\n", uvd_index);
543 } else {
544 vclk = uvd_table->entries[uvd_index].vclk;
545 dclk = uvd_table->entries[uvd_index].dclk;
546 seq_printf(m, "%u uvd vclk: %u dclk: %u\n", uvd_index, vclk, dclk);
547 }
548 }
549
550 seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en");
551 if (!pi->vce_power_gated) {
552 if (vce_index >= CZ_MAX_HARDWARE_POWERLEVELS) {
553 seq_printf(m, "invalid vce dpm level %d\n", vce_index);
554 } else {
555 ecclk = vce_table->entries[vce_index].ecclk;
556 seq_printf(m, "%u vce ecclk: %u\n", vce_index, ecclk);
557 }
520 } 558 }
521} 559}
522 560
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
index 6e77964f1b64..e70a26f587a0 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c
@@ -2632,6 +2632,7 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2632 struct drm_device *dev = crtc->dev; 2632 struct drm_device *dev = crtc->dev;
2633 struct amdgpu_device *adev = dev->dev_private; 2633 struct amdgpu_device *adev = dev->dev_private;
2634 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2634 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2635 unsigned type;
2635 2636
2636 switch (mode) { 2637 switch (mode) {
2637 case DRM_MODE_DPMS_ON: 2638 case DRM_MODE_DPMS_ON:
@@ -2640,6 +2641,9 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2640 dce_v10_0_vga_enable(crtc, true); 2641 dce_v10_0_vga_enable(crtc, true);
2641 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2642 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2642 dce_v10_0_vga_enable(crtc, false); 2643 dce_v10_0_vga_enable(crtc, false);
2644 /* Make sure VBLANK interrupt is still enabled */
2645 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2646 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2643 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); 2647 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
2644 dce_v10_0_crtc_load_lut(crtc); 2648 dce_v10_0_crtc_load_lut(crtc);
2645 break; 2649 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
index 7f7abb0e0be5..dcb402ee048a 100644
--- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c
@@ -2631,6 +2631,7 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2631 struct drm_device *dev = crtc->dev; 2631 struct drm_device *dev = crtc->dev;
2632 struct amdgpu_device *adev = dev->dev_private; 2632 struct amdgpu_device *adev = dev->dev_private;
2633 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); 2633 struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
2634 unsigned type;
2634 2635
2635 switch (mode) { 2636 switch (mode) {
2636 case DRM_MODE_DPMS_ON: 2637 case DRM_MODE_DPMS_ON:
@@ -2639,6 +2640,9 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode)
2639 dce_v11_0_vga_enable(crtc, true); 2640 dce_v11_0_vga_enable(crtc, true);
2640 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE); 2641 amdgpu_atombios_crtc_blank(crtc, ATOM_DISABLE);
2641 dce_v11_0_vga_enable(crtc, false); 2642 dce_v11_0_vga_enable(crtc, false);
2643 /* Make sure VBLANK interrupt is still enabled */
2644 type = amdgpu_crtc_idx_to_irq_type(adev, amdgpu_crtc->crtc_id);
2645 amdgpu_irq_update(adev, &adev->crtc_irq, type);
2642 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id); 2646 drm_vblank_post_modeset(dev, amdgpu_crtc->crtc_id);
2643 dce_v11_0_crtc_load_lut(crtc); 2647 dce_v11_0_crtc_load_lut(crtc);
2644 break; 2648 break;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 2c188fb9fd22..0d8bf2cb1956 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -2561,7 +2561,7 @@ static bool gfx_v7_0_ring_emit_semaphore(struct amdgpu_ring *ring,
2561 * sheduling on the ring. This function schedules the IB 2561 * sheduling on the ring. This function schedules the IB
2562 * on the gfx ring for execution by the GPU. 2562 * on the gfx ring for execution by the GPU.
2563 */ 2563 */
2564static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring, 2564static void gfx_v7_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
2565 struct amdgpu_ib *ib) 2565 struct amdgpu_ib *ib)
2566{ 2566{
2567 bool need_ctx_switch = ring->current_ctx != ib->ctx; 2567 bool need_ctx_switch = ring->current_ctx != ib->ctx;
@@ -2569,15 +2569,10 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
2569 u32 next_rptr = ring->wptr + 5; 2569 u32 next_rptr = ring->wptr + 5;
2570 2570
2571 /* drop the CE preamble IB for the same context */ 2571 /* drop the CE preamble IB for the same context */
2572 if ((ring->type == AMDGPU_RING_TYPE_GFX) && 2572 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
2573 (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
2574 !need_ctx_switch)
2575 return; 2573 return;
2576 2574
2577 if (ring->type == AMDGPU_RING_TYPE_COMPUTE) 2575 if (need_ctx_switch)
2578 control |= INDIRECT_BUFFER_VALID;
2579
2580 if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
2581 next_rptr += 2; 2576 next_rptr += 2;
2582 2577
2583 next_rptr += 4; 2578 next_rptr += 4;
@@ -2588,7 +2583,7 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
2588 amdgpu_ring_write(ring, next_rptr); 2583 amdgpu_ring_write(ring, next_rptr);
2589 2584
2590 /* insert SWITCH_BUFFER packet before first IB in the ring frame */ 2585 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
2591 if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { 2586 if (need_ctx_switch) {
2592 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 2587 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
2593 amdgpu_ring_write(ring, 0); 2588 amdgpu_ring_write(ring, 0);
2594 } 2589 }
@@ -2611,6 +2606,35 @@ static void gfx_v7_0_ring_emit_ib(struct amdgpu_ring *ring,
2611 amdgpu_ring_write(ring, control); 2606 amdgpu_ring_write(ring, control);
2612} 2607}
2613 2608
2609static void gfx_v7_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
2610 struct amdgpu_ib *ib)
2611{
2612 u32 header, control = 0;
2613 u32 next_rptr = ring->wptr + 5;
2614
2615 control |= INDIRECT_BUFFER_VALID;
2616 next_rptr += 4;
2617 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
2618 amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
2619 amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
2620 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
2621 amdgpu_ring_write(ring, next_rptr);
2622
2623 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
2624
2625 control |= ib->length_dw |
2626 (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
2627
2628 amdgpu_ring_write(ring, header);
2629 amdgpu_ring_write(ring,
2630#ifdef __BIG_ENDIAN
2631 (2 << 0) |
2632#endif
2633 (ib->gpu_addr & 0xFFFFFFFC));
2634 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
2635 amdgpu_ring_write(ring, control);
2636}
2637
2614/** 2638/**
2615 * gfx_v7_0_ring_test_ib - basic ring IB test 2639 * gfx_v7_0_ring_test_ib - basic ring IB test
2616 * 2640 *
@@ -3056,6 +3080,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3056 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 3080 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
3057 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 3081 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
3058 adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version); 3082 adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
3083 adev->gfx.mec_feature_version = le32_to_cpu(
3084 mec_hdr->ucode_feature_version);
3059 3085
3060 gfx_v7_0_cp_compute_enable(adev, false); 3086 gfx_v7_0_cp_compute_enable(adev, false);
3061 3087
@@ -3078,6 +3104,8 @@ static int gfx_v7_0_cp_compute_load_microcode(struct amdgpu_device *adev)
3078 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; 3104 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
3079 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); 3105 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
3080 adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version); 3106 adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
3107 adev->gfx.mec2_feature_version = le32_to_cpu(
3108 mec2_hdr->ucode_feature_version);
3081 3109
3082 /* MEC2 */ 3110 /* MEC2 */
3083 fw_data = (const __le32 *) 3111 fw_data = (const __le32 *)
@@ -4042,6 +4070,8 @@ static int gfx_v7_0_rlc_resume(struct amdgpu_device *adev)
4042 hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data; 4070 hdr = (const struct rlc_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
4043 amdgpu_ucode_print_rlc_hdr(&hdr->header); 4071 amdgpu_ucode_print_rlc_hdr(&hdr->header);
4044 adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version); 4072 adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
4073 adev->gfx.rlc_feature_version = le32_to_cpu(
4074 hdr->ucode_feature_version);
4045 4075
4046 gfx_v7_0_rlc_stop(adev); 4076 gfx_v7_0_rlc_stop(adev);
4047 4077
@@ -5098,7 +5128,7 @@ static void gfx_v7_0_print_status(void *handle)
5098 dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n", 5128 dev_info(adev->dev, " CP_HPD_EOP_CONTROL=0x%08X\n",
5099 RREG32(mmCP_HPD_EOP_CONTROL)); 5129 RREG32(mmCP_HPD_EOP_CONTROL));
5100 5130
5101 for (queue = 0; queue < 8; i++) { 5131 for (queue = 0; queue < 8; queue++) {
5102 cik_srbm_select(adev, me, pipe, queue, 0); 5132 cik_srbm_select(adev, me, pipe, queue, 0);
5103 dev_info(adev->dev, " queue: %d\n", queue); 5133 dev_info(adev->dev, " queue: %d\n", queue);
5104 dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n", 5134 dev_info(adev->dev, " CP_PQ_WPTR_POLL_CNTL=0x%08X\n",
@@ -5555,7 +5585,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_gfx = {
5555 .get_wptr = gfx_v7_0_ring_get_wptr_gfx, 5585 .get_wptr = gfx_v7_0_ring_get_wptr_gfx,
5556 .set_wptr = gfx_v7_0_ring_set_wptr_gfx, 5586 .set_wptr = gfx_v7_0_ring_set_wptr_gfx,
5557 .parse_cs = NULL, 5587 .parse_cs = NULL,
5558 .emit_ib = gfx_v7_0_ring_emit_ib, 5588 .emit_ib = gfx_v7_0_ring_emit_ib_gfx,
5559 .emit_fence = gfx_v7_0_ring_emit_fence_gfx, 5589 .emit_fence = gfx_v7_0_ring_emit_fence_gfx,
5560 .emit_semaphore = gfx_v7_0_ring_emit_semaphore, 5590 .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
5561 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, 5591 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
@@ -5571,7 +5601,7 @@ static const struct amdgpu_ring_funcs gfx_v7_0_ring_funcs_compute = {
5571 .get_wptr = gfx_v7_0_ring_get_wptr_compute, 5601 .get_wptr = gfx_v7_0_ring_get_wptr_compute,
5572 .set_wptr = gfx_v7_0_ring_set_wptr_compute, 5602 .set_wptr = gfx_v7_0_ring_set_wptr_compute,
5573 .parse_cs = NULL, 5603 .parse_cs = NULL,
5574 .emit_ib = gfx_v7_0_ring_emit_ib, 5604 .emit_ib = gfx_v7_0_ring_emit_ib_compute,
5575 .emit_fence = gfx_v7_0_ring_emit_fence_compute, 5605 .emit_fence = gfx_v7_0_ring_emit_fence_compute,
5576 .emit_semaphore = gfx_v7_0_ring_emit_semaphore, 5606 .emit_semaphore = gfx_v7_0_ring_emit_semaphore,
5577 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush, 5607 .emit_vm_flush = gfx_v7_0_ring_emit_vm_flush,
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 1c7c992dea37..20e2cfd521d5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -587,6 +587,7 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
587 int err; 587 int err;
588 struct amdgpu_firmware_info *info = NULL; 588 struct amdgpu_firmware_info *info = NULL;
589 const struct common_firmware_header *header = NULL; 589 const struct common_firmware_header *header = NULL;
590 const struct gfx_firmware_header_v1_0 *cp_hdr;
590 591
591 DRM_DEBUG("\n"); 592 DRM_DEBUG("\n");
592 593
@@ -611,6 +612,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
611 err = amdgpu_ucode_validate(adev->gfx.pfp_fw); 612 err = amdgpu_ucode_validate(adev->gfx.pfp_fw);
612 if (err) 613 if (err)
613 goto out; 614 goto out;
615 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
616 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
617 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
614 618
615 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name); 619 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_me.bin", chip_name);
616 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev); 620 err = request_firmware(&adev->gfx.me_fw, fw_name, adev->dev);
@@ -619,6 +623,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
619 err = amdgpu_ucode_validate(adev->gfx.me_fw); 623 err = amdgpu_ucode_validate(adev->gfx.me_fw);
620 if (err) 624 if (err)
621 goto out; 625 goto out;
626 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
627 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
628 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
622 629
623 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); 630 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name);
624 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev); 631 err = request_firmware(&adev->gfx.ce_fw, fw_name, adev->dev);
@@ -627,12 +634,18 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
627 err = amdgpu_ucode_validate(adev->gfx.ce_fw); 634 err = amdgpu_ucode_validate(adev->gfx.ce_fw);
628 if (err) 635 if (err)
629 goto out; 636 goto out;
637 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
638 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
639 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
630 640
631 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name); 641 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_rlc.bin", chip_name);
632 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev); 642 err = request_firmware(&adev->gfx.rlc_fw, fw_name, adev->dev);
633 if (err) 643 if (err)
634 goto out; 644 goto out;
635 err = amdgpu_ucode_validate(adev->gfx.rlc_fw); 645 err = amdgpu_ucode_validate(adev->gfx.rlc_fw);
646 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.rlc_fw->data;
647 adev->gfx.rlc_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
648 adev->gfx.rlc_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
636 649
637 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name); 650 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec.bin", chip_name);
638 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev); 651 err = request_firmware(&adev->gfx.mec_fw, fw_name, adev->dev);
@@ -641,6 +654,9 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
641 err = amdgpu_ucode_validate(adev->gfx.mec_fw); 654 err = amdgpu_ucode_validate(adev->gfx.mec_fw);
642 if (err) 655 if (err)
643 goto out; 656 goto out;
657 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
658 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
659 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
644 660
645 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name); 661 snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_mec2.bin", chip_name);
646 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev); 662 err = request_firmware(&adev->gfx.mec2_fw, fw_name, adev->dev);
@@ -648,6 +664,12 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev)
648 err = amdgpu_ucode_validate(adev->gfx.mec2_fw); 664 err = amdgpu_ucode_validate(adev->gfx.mec2_fw);
649 if (err) 665 if (err)
650 goto out; 666 goto out;
667 cp_hdr = (const struct gfx_firmware_header_v1_0 *)
668 adev->gfx.mec2_fw->data;
669 adev->gfx.mec2_fw_version = le32_to_cpu(
670 cp_hdr->header.ucode_version);
671 adev->gfx.mec2_feature_version = le32_to_cpu(
672 cp_hdr->ucode_feature_version);
651 } else { 673 } else {
652 err = 0; 674 err = 0;
653 adev->gfx.mec2_fw = NULL; 675 adev->gfx.mec2_fw = NULL;
@@ -1983,6 +2005,7 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
1983 adev->gfx.config.max_shader_engines = 1; 2005 adev->gfx.config.max_shader_engines = 1;
1984 adev->gfx.config.max_tile_pipes = 2; 2006 adev->gfx.config.max_tile_pipes = 2;
1985 adev->gfx.config.max_sh_per_se = 1; 2007 adev->gfx.config.max_sh_per_se = 1;
2008 adev->gfx.config.max_backends_per_se = 2;
1986 2009
1987 switch (adev->pdev->revision) { 2010 switch (adev->pdev->revision) {
1988 case 0xc4: 2011 case 0xc4:
@@ -1991,7 +2014,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
1991 case 0xcc: 2014 case 0xcc:
1992 /* B10 */ 2015 /* B10 */
1993 adev->gfx.config.max_cu_per_sh = 8; 2016 adev->gfx.config.max_cu_per_sh = 8;
1994 adev->gfx.config.max_backends_per_se = 2;
1995 break; 2017 break;
1996 case 0xc5: 2018 case 0xc5:
1997 case 0x81: 2019 case 0x81:
@@ -2000,14 +2022,12 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
2000 case 0xcd: 2022 case 0xcd:
2001 /* B8 */ 2023 /* B8 */
2002 adev->gfx.config.max_cu_per_sh = 6; 2024 adev->gfx.config.max_cu_per_sh = 6;
2003 adev->gfx.config.max_backends_per_se = 2;
2004 break; 2025 break;
2005 case 0xc6: 2026 case 0xc6:
2006 case 0xca: 2027 case 0xca:
2007 case 0xce: 2028 case 0xce:
2008 /* B6 */ 2029 /* B6 */
2009 adev->gfx.config.max_cu_per_sh = 6; 2030 adev->gfx.config.max_cu_per_sh = 6;
2010 adev->gfx.config.max_backends_per_se = 2;
2011 break; 2031 break;
2012 case 0xc7: 2032 case 0xc7:
2013 case 0x87: 2033 case 0x87:
@@ -2015,7 +2035,6 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev)
2015 default: 2035 default:
2016 /* B4 */ 2036 /* B4 */
2017 adev->gfx.config.max_cu_per_sh = 4; 2037 adev->gfx.config.max_cu_per_sh = 4;
2018 adev->gfx.config.max_backends_per_se = 1;
2019 break; 2038 break;
2020 } 2039 }
2021 2040
@@ -2275,7 +2294,6 @@ static int gfx_v8_0_rlc_load_microcode(struct amdgpu_device *adev)
2275 2294
2276 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data; 2295 hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
2277 amdgpu_ucode_print_rlc_hdr(&hdr->header); 2296 amdgpu_ucode_print_rlc_hdr(&hdr->header);
2278 adev->gfx.rlc_fw_version = le32_to_cpu(hdr->header.ucode_version);
2279 2297
2280 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data + 2298 fw_data = (const __le32 *)(adev->gfx.rlc_fw->data +
2281 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 2299 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
@@ -2361,12 +2379,6 @@ static int gfx_v8_0_cp_gfx_load_microcode(struct amdgpu_device *adev)
2361 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header); 2379 amdgpu_ucode_print_gfx_hdr(&pfp_hdr->header);
2362 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header); 2380 amdgpu_ucode_print_gfx_hdr(&ce_hdr->header);
2363 amdgpu_ucode_print_gfx_hdr(&me_hdr->header); 2381 amdgpu_ucode_print_gfx_hdr(&me_hdr->header);
2364 adev->gfx.pfp_fw_version = le32_to_cpu(pfp_hdr->header.ucode_version);
2365 adev->gfx.ce_fw_version = le32_to_cpu(ce_hdr->header.ucode_version);
2366 adev->gfx.me_fw_version = le32_to_cpu(me_hdr->header.ucode_version);
2367 adev->gfx.me_feature_version = le32_to_cpu(me_hdr->ucode_feature_version);
2368 adev->gfx.ce_feature_version = le32_to_cpu(ce_hdr->ucode_feature_version);
2369 adev->gfx.pfp_feature_version = le32_to_cpu(pfp_hdr->ucode_feature_version);
2370 2382
2371 gfx_v8_0_cp_gfx_enable(adev, false); 2383 gfx_v8_0_cp_gfx_enable(adev, false);
2372 2384
@@ -2622,7 +2634,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2622 2634
2623 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data; 2635 mec_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
2624 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header); 2636 amdgpu_ucode_print_gfx_hdr(&mec_hdr->header);
2625 adev->gfx.mec_fw_version = le32_to_cpu(mec_hdr->header.ucode_version);
2626 2637
2627 fw_data = (const __le32 *) 2638 fw_data = (const __le32 *)
2628 (adev->gfx.mec_fw->data + 2639 (adev->gfx.mec_fw->data +
@@ -2641,7 +2652,6 @@ static int gfx_v8_0_cp_compute_load_microcode(struct amdgpu_device *adev)
2641 2652
2642 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data; 2653 mec2_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec2_fw->data;
2643 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header); 2654 amdgpu_ucode_print_gfx_hdr(&mec2_hdr->header);
2644 adev->gfx.mec2_fw_version = le32_to_cpu(mec2_hdr->header.ucode_version);
2645 2655
2646 fw_data = (const __le32 *) 2656 fw_data = (const __le32 *)
2647 (adev->gfx.mec2_fw->data + 2657 (adev->gfx.mec2_fw->data +
@@ -3125,7 +3135,7 @@ static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev)
3125 WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, 3135 WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER,
3126 AMDGPU_DOORBELL_KIQ << 2); 3136 AMDGPU_DOORBELL_KIQ << 2);
3127 WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, 3137 WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER,
3128 0x7FFFF << 2); 3138 AMDGPU_DOORBELL_MEC_RING7 << 2);
3129 } 3139 }
3130 tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); 3140 tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL);
3131 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, 3141 tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL,
@@ -3753,7 +3763,7 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring)
3753 amdgpu_ring_write(ring, 0x20); /* poll interval */ 3763 amdgpu_ring_write(ring, 0x20); /* poll interval */
3754} 3764}
3755 3765
3756static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring, 3766static void gfx_v8_0_ring_emit_ib_gfx(struct amdgpu_ring *ring,
3757 struct amdgpu_ib *ib) 3767 struct amdgpu_ib *ib)
3758{ 3768{
3759 bool need_ctx_switch = ring->current_ctx != ib->ctx; 3769 bool need_ctx_switch = ring->current_ctx != ib->ctx;
@@ -3761,15 +3771,10 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
3761 u32 next_rptr = ring->wptr + 5; 3771 u32 next_rptr = ring->wptr + 5;
3762 3772
3763 /* drop the CE preamble IB for the same context */ 3773 /* drop the CE preamble IB for the same context */
3764 if ((ring->type == AMDGPU_RING_TYPE_GFX) && 3774 if ((ib->flags & AMDGPU_IB_FLAG_PREAMBLE) && !need_ctx_switch)
3765 (ib->flags & AMDGPU_IB_FLAG_PREAMBLE) &&
3766 !need_ctx_switch)
3767 return; 3775 return;
3768 3776
3769 if (ring->type == AMDGPU_RING_TYPE_COMPUTE) 3777 if (need_ctx_switch)
3770 control |= INDIRECT_BUFFER_VALID;
3771
3772 if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX)
3773 next_rptr += 2; 3778 next_rptr += 2;
3774 3779
3775 next_rptr += 4; 3780 next_rptr += 4;
@@ -3780,7 +3785,7 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
3780 amdgpu_ring_write(ring, next_rptr); 3785 amdgpu_ring_write(ring, next_rptr);
3781 3786
3782 /* insert SWITCH_BUFFER packet before first IB in the ring frame */ 3787 /* insert SWITCH_BUFFER packet before first IB in the ring frame */
3783 if (need_ctx_switch && ring->type == AMDGPU_RING_TYPE_GFX) { 3788 if (need_ctx_switch) {
3784 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); 3789 amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0));
3785 amdgpu_ring_write(ring, 0); 3790 amdgpu_ring_write(ring, 0);
3786 } 3791 }
@@ -3803,6 +3808,36 @@ static void gfx_v8_0_ring_emit_ib(struct amdgpu_ring *ring,
3803 amdgpu_ring_write(ring, control); 3808 amdgpu_ring_write(ring, control);
3804} 3809}
3805 3810
3811static void gfx_v8_0_ring_emit_ib_compute(struct amdgpu_ring *ring,
3812 struct amdgpu_ib *ib)
3813{
3814 u32 header, control = 0;
3815 u32 next_rptr = ring->wptr + 5;
3816
3817 control |= INDIRECT_BUFFER_VALID;
3818
3819 next_rptr += 4;
3820 amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3));
3821 amdgpu_ring_write(ring, WRITE_DATA_DST_SEL(5) | WR_CONFIRM);
3822 amdgpu_ring_write(ring, ring->next_rptr_gpu_addr & 0xfffffffc);
3823 amdgpu_ring_write(ring, upper_32_bits(ring->next_rptr_gpu_addr) & 0xffffffff);
3824 amdgpu_ring_write(ring, next_rptr);
3825
3826 header = PACKET3(PACKET3_INDIRECT_BUFFER, 2);
3827
3828 control |= ib->length_dw |
3829 (ib->vm ? (ib->vm->ids[ring->idx].id << 24) : 0);
3830
3831 amdgpu_ring_write(ring, header);
3832 amdgpu_ring_write(ring,
3833#ifdef __BIG_ENDIAN
3834 (2 << 0) |
3835#endif
3836 (ib->gpu_addr & 0xFFFFFFFC));
3837 amdgpu_ring_write(ring, upper_32_bits(ib->gpu_addr) & 0xFFFF);
3838 amdgpu_ring_write(ring, control);
3839}
3840
3806static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr, 3841static void gfx_v8_0_ring_emit_fence_gfx(struct amdgpu_ring *ring, u64 addr,
3807 u64 seq, unsigned flags) 3842 u64 seq, unsigned flags)
3808{ 3843{
@@ -4224,7 +4259,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = {
4224 .get_wptr = gfx_v8_0_ring_get_wptr_gfx, 4259 .get_wptr = gfx_v8_0_ring_get_wptr_gfx,
4225 .set_wptr = gfx_v8_0_ring_set_wptr_gfx, 4260 .set_wptr = gfx_v8_0_ring_set_wptr_gfx,
4226 .parse_cs = NULL, 4261 .parse_cs = NULL,
4227 .emit_ib = gfx_v8_0_ring_emit_ib, 4262 .emit_ib = gfx_v8_0_ring_emit_ib_gfx,
4228 .emit_fence = gfx_v8_0_ring_emit_fence_gfx, 4263 .emit_fence = gfx_v8_0_ring_emit_fence_gfx,
4229 .emit_semaphore = gfx_v8_0_ring_emit_semaphore, 4264 .emit_semaphore = gfx_v8_0_ring_emit_semaphore,
4230 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, 4265 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
@@ -4240,7 +4275,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = {
4240 .get_wptr = gfx_v8_0_ring_get_wptr_compute, 4275 .get_wptr = gfx_v8_0_ring_get_wptr_compute,
4241 .set_wptr = gfx_v8_0_ring_set_wptr_compute, 4276 .set_wptr = gfx_v8_0_ring_set_wptr_compute,
4242 .parse_cs = NULL, 4277 .parse_cs = NULL,
4243 .emit_ib = gfx_v8_0_ring_emit_ib, 4278 .emit_ib = gfx_v8_0_ring_emit_ib_compute,
4244 .emit_fence = gfx_v8_0_ring_emit_fence_compute, 4279 .emit_fence = gfx_v8_0_ring_emit_fence_compute,
4245 .emit_semaphore = gfx_v8_0_ring_emit_semaphore, 4280 .emit_semaphore = gfx_v8_0_ring_emit_semaphore,
4246 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush, 4281 .emit_vm_flush = gfx_v8_0_ring_emit_vm_flush,
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
index d7895885fe0c..a988dfb1d394 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c
@@ -121,6 +121,7 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
121 int err, i; 121 int err, i;
122 struct amdgpu_firmware_info *info = NULL; 122 struct amdgpu_firmware_info *info = NULL;
123 const struct common_firmware_header *header = NULL; 123 const struct common_firmware_header *header = NULL;
124 const struct sdma_firmware_header_v1_0 *hdr;
124 125
125 DRM_DEBUG("\n"); 126 DRM_DEBUG("\n");
126 127
@@ -142,6 +143,9 @@ static int sdma_v2_4_init_microcode(struct amdgpu_device *adev)
142 err = amdgpu_ucode_validate(adev->sdma[i].fw); 143 err = amdgpu_ucode_validate(adev->sdma[i].fw);
143 if (err) 144 if (err)
144 goto out; 145 goto out;
146 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
147 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
148 adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
145 149
146 if (adev->firmware.smu_load) { 150 if (adev->firmware.smu_load) {
147 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; 151 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -541,8 +545,6 @@ static int sdma_v2_4_load_microcode(struct amdgpu_device *adev)
541 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; 545 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
542 amdgpu_ucode_print_sdma_hdr(&hdr->header); 546 amdgpu_ucode_print_sdma_hdr(&hdr->header);
543 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 547 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
544 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
545
546 fw_data = (const __le32 *) 548 fw_data = (const __le32 *)
547 (adev->sdma[i].fw->data + 549 (adev->sdma[i].fw->data +
548 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 550 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
index 7bb37b93993f..2b86569b18d3 100644
--- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c
@@ -159,6 +159,7 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
159 int err, i; 159 int err, i;
160 struct amdgpu_firmware_info *info = NULL; 160 struct amdgpu_firmware_info *info = NULL;
161 const struct common_firmware_header *header = NULL; 161 const struct common_firmware_header *header = NULL;
162 const struct sdma_firmware_header_v1_0 *hdr;
162 163
163 DRM_DEBUG("\n"); 164 DRM_DEBUG("\n");
164 165
@@ -183,6 +184,9 @@ static int sdma_v3_0_init_microcode(struct amdgpu_device *adev)
183 err = amdgpu_ucode_validate(adev->sdma[i].fw); 184 err = amdgpu_ucode_validate(adev->sdma[i].fw);
184 if (err) 185 if (err)
185 goto out; 186 goto out;
187 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
188 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
189 adev->sdma[i].feature_version = le32_to_cpu(hdr->ucode_feature_version);
186 190
187 if (adev->firmware.smu_load) { 191 if (adev->firmware.smu_load) {
188 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i]; 192 info = &adev->firmware.ucode[AMDGPU_UCODE_ID_SDMA0 + i];
@@ -630,8 +634,6 @@ static int sdma_v3_0_load_microcode(struct amdgpu_device *adev)
630 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data; 634 hdr = (const struct sdma_firmware_header_v1_0 *)adev->sdma[i].fw->data;
631 amdgpu_ucode_print_sdma_hdr(&hdr->header); 635 amdgpu_ucode_print_sdma_hdr(&hdr->header);
632 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; 636 fw_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4;
633 adev->sdma[i].fw_version = le32_to_cpu(hdr->header.ucode_version);
634
635 fw_data = (const __le32 *) 637 fw_data = (const __le32 *)
636 (adev->sdma[i].fw->data + 638 (adev->sdma[i].fw->data +
637 le32_to_cpu(hdr->header.ucode_array_offset_bytes)); 639 le32_to_cpu(hdr->header.ucode_array_offset_bytes));
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index d62c4002e39c..d1064ca3670e 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -35,6 +35,8 @@
35#include "oss/oss_2_0_d.h" 35#include "oss/oss_2_0_d.h"
36#include "oss/oss_2_0_sh_mask.h" 36#include "oss/oss_2_0_sh_mask.h"
37#include "gca/gfx_8_0_d.h" 37#include "gca/gfx_8_0_d.h"
38#include "smu/smu_7_1_2_d.h"
39#include "smu/smu_7_1_2_sh_mask.h"
38 40
39#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04 41#define GRBM_GFX_INDEX__VCE_INSTANCE__SHIFT 0x04
40#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10 42#define GRBM_GFX_INDEX__VCE_INSTANCE_MASK 0x10
@@ -112,6 +114,10 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
112 114
113 mutex_lock(&adev->grbm_idx_mutex); 115 mutex_lock(&adev->grbm_idx_mutex);
114 for (idx = 0; idx < 2; ++idx) { 116 for (idx = 0; idx < 2; ++idx) {
117
118 if (adev->vce.harvest_config & (1 << idx))
119 continue;
120
115 if(idx == 0) 121 if(idx == 0)
116 WREG32_P(mmGRBM_GFX_INDEX, 0, 122 WREG32_P(mmGRBM_GFX_INDEX, 0,
117 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK); 123 ~GRBM_GFX_INDEX__VCE_INSTANCE_MASK);
@@ -190,10 +196,52 @@ static int vce_v3_0_start(struct amdgpu_device *adev)
190 return 0; 196 return 0;
191} 197}
192 198
199#define ixVCE_HARVEST_FUSE_MACRO__ADDRESS 0xC0014074
200#define VCE_HARVEST_FUSE_MACRO__SHIFT 27
201#define VCE_HARVEST_FUSE_MACRO__MASK 0x18000000
202
203static unsigned vce_v3_0_get_harvest_config(struct amdgpu_device *adev)
204{
205 u32 tmp;
206 unsigned ret;
207
208 if (adev->flags & AMDGPU_IS_APU)
209 tmp = (RREG32_SMC(ixVCE_HARVEST_FUSE_MACRO__ADDRESS) &
210 VCE_HARVEST_FUSE_MACRO__MASK) >>
211 VCE_HARVEST_FUSE_MACRO__SHIFT;
212 else
213 tmp = (RREG32_SMC(ixCC_HARVEST_FUSES) &
214 CC_HARVEST_FUSES__VCE_DISABLE_MASK) >>
215 CC_HARVEST_FUSES__VCE_DISABLE__SHIFT;
216
217 switch (tmp) {
218 case 1:
219 ret = AMDGPU_VCE_HARVEST_VCE0;
220 break;
221 case 2:
222 ret = AMDGPU_VCE_HARVEST_VCE1;
223 break;
224 case 3:
225 ret = AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1;
226 break;
227 default:
228 ret = 0;
229 }
230
231 return ret;
232}
233
193static int vce_v3_0_early_init(void *handle) 234static int vce_v3_0_early_init(void *handle)
194{ 235{
195 struct amdgpu_device *adev = (struct amdgpu_device *)handle; 236 struct amdgpu_device *adev = (struct amdgpu_device *)handle;
196 237
238 adev->vce.harvest_config = vce_v3_0_get_harvest_config(adev);
239
240 if ((adev->vce.harvest_config &
241 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1)) ==
242 (AMDGPU_VCE_HARVEST_VCE0 | AMDGPU_VCE_HARVEST_VCE1))
243 return -ENOENT;
244
197 vce_v3_0_set_ring_funcs(adev); 245 vce_v3_0_set_ring_funcs(adev);
198 vce_v3_0_set_irq_funcs(adev); 246 vce_v3_0_set_irq_funcs(adev);
199 247
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index 8b8fe3762ca9..9f6e234e7029 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -357,6 +357,7 @@ int atmel_hlcdc_crtc_create(struct drm_device *dev)
357 planes->overlays[i]->base.possible_crtcs = 1 << crtc->id; 357 planes->overlays[i]->base.possible_crtcs = 1 << crtc->id;
358 358
359 drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs); 359 drm_crtc_helper_add(&crtc->base, &lcdc_crtc_helper_funcs);
360 drm_crtc_vblank_reset(&crtc->base);
360 361
361 dc->crtc = &crtc->base; 362 dc->crtc = &crtc->base;
362 363
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
index 60b0c13d7ff5..6fad1f9648f3 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c
@@ -313,20 +313,20 @@ static int atmel_hlcdc_dc_load(struct drm_device *dev)
313 313
314 pm_runtime_enable(dev->dev); 314 pm_runtime_enable(dev->dev);
315 315
316 ret = atmel_hlcdc_dc_modeset_init(dev); 316 ret = drm_vblank_init(dev, 1);
317 if (ret < 0) { 317 if (ret < 0) {
318 dev_err(dev->dev, "failed to initialize mode setting\n"); 318 dev_err(dev->dev, "failed to initialize vblank\n");
319 goto err_periph_clk_disable; 319 goto err_periph_clk_disable;
320 } 320 }
321 321
322 drm_mode_config_reset(dev); 322 ret = atmel_hlcdc_dc_modeset_init(dev);
323
324 ret = drm_vblank_init(dev, 1);
325 if (ret < 0) { 323 if (ret < 0) {
326 dev_err(dev->dev, "failed to initialize vblank\n"); 324 dev_err(dev->dev, "failed to initialize mode setting\n");
327 goto err_periph_clk_disable; 325 goto err_periph_clk_disable;
328 } 326 }
329 327
328 drm_mode_config_reset(dev);
329
330 pm_runtime_get_sync(dev->dev); 330 pm_runtime_get_sync(dev->dev);
331 ret = drm_irq_install(dev, dc->hlcdc->irq); 331 ret = drm_irq_install(dev, dc->hlcdc->irq);
332 pm_runtime_put_sync(dev->dev); 332 pm_runtime_put_sync(dev->dev);
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 52dbeedcdcc8..d432348837a5 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -196,7 +196,12 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
196 } 196 }
197 197
198 funcs = connector->helper_private; 198 funcs = connector->helper_private;
199 new_encoder = funcs->best_encoder(connector); 199
200 if (funcs->atomic_best_encoder)
201 new_encoder = funcs->atomic_best_encoder(connector,
202 connector_state);
203 else
204 new_encoder = funcs->best_encoder(connector);
200 205
201 if (!new_encoder) { 206 if (!new_encoder) {
202 DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n", 207 DRM_DEBUG_ATOMIC("No suitable encoder found for [CONNECTOR:%d:%s]\n",
@@ -229,6 +234,9 @@ update_connector_routing(struct drm_atomic_state *state, int conn_idx)
229 } 234 }
230 } 235 }
231 236
237 if (WARN_ON(!connector_state->crtc))
238 return -EINVAL;
239
232 connector_state->best_encoder = new_encoder; 240 connector_state->best_encoder = new_encoder;
233 idx = drm_crtc_index(connector_state->crtc); 241 idx = drm_crtc_index(connector_state->crtc);
234 242
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index 21f1f5ce2d60..33d877c65ced 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -5274,12 +5274,9 @@ void drm_mode_config_reset(struct drm_device *dev)
5274 encoder->funcs->reset(encoder); 5274 encoder->funcs->reset(encoder);
5275 5275
5276 mutex_lock(&dev->mode_config.mutex); 5276 mutex_lock(&dev->mode_config.mutex);
5277 drm_for_each_connector(connector, dev) { 5277 drm_for_each_connector(connector, dev)
5278 connector->status = connector_status_unknown;
5279
5280 if (connector->funcs->reset) 5278 if (connector->funcs->reset)
5281 connector->funcs->reset(connector); 5279 connector->funcs->reset(connector);
5282 }
5283 mutex_unlock(&dev->mode_config.mutex); 5280 mutex_unlock(&dev->mode_config.mutex);
5284} 5281}
5285EXPORT_SYMBOL(drm_mode_config_reset); 5282EXPORT_SYMBOL(drm_mode_config_reset);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 778bbb6425b8..b0487c9f018c 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1294,7 +1294,6 @@ retry:
1294 goto retry; 1294 goto retry;
1295 } 1295 }
1296 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret); 1296 DRM_DEBUG_KMS("failed to dpcd write %d %d\n", tosend, ret);
1297 WARN(1, "fail\n");
1298 1297
1299 return -EIO; 1298 return -EIO;
1300 } 1299 }
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index 05b939e8da41..22d207e211e7 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -75,7 +75,7 @@ module_param_named(timestamp_precision_usec, drm_timestamp_precision, int, 0600)
75module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600); 75module_param_named(timestamp_monotonic, drm_timestamp_monotonic, int, 0600);
76 76
77static void store_vblank(struct drm_device *dev, int crtc, 77static void store_vblank(struct drm_device *dev, int crtc,
78 unsigned vblank_count_inc, 78 u32 vblank_count_inc,
79 struct timeval *t_vblank) 79 struct timeval *t_vblank)
80{ 80{
81 struct drm_vblank_crtc *vblank = &dev->vblank[crtc]; 81 struct drm_vblank_crtc *vblank = &dev->vblank[crtc];
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 842d6b8dc3c4..2a652359af64 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1745,7 +1745,6 @@ static int fimc_probe(struct platform_device *pdev)
1745 spin_lock_init(&ctx->lock); 1745 spin_lock_init(&ctx->lock);
1746 platform_set_drvdata(pdev, ctx); 1746 platform_set_drvdata(pdev, ctx);
1747 1747
1748 pm_runtime_set_active(dev);
1749 pm_runtime_enable(dev); 1748 pm_runtime_enable(dev);
1750 1749
1751 ret = exynos_drm_ippdrv_register(ippdrv); 1750 ret = exynos_drm_ippdrv_register(ippdrv);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 8040ed2a831f..f1c6b76c127f 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -593,8 +593,7 @@ static int gsc_src_set_transf(struct device *dev,
593 593
594 gsc_write(cfg, GSC_IN_CON); 594 gsc_write(cfg, GSC_IN_CON);
595 595
596 ctx->rotation = cfg & 596 ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
597 (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
598 *swap = ctx->rotation; 597 *swap = ctx->rotation;
599 598
600 return 0; 599 return 0;
@@ -857,8 +856,7 @@ static int gsc_dst_set_transf(struct device *dev,
857 856
858 gsc_write(cfg, GSC_IN_CON); 857 gsc_write(cfg, GSC_IN_CON);
859 858
860 ctx->rotation = cfg & 859 ctx->rotation = (cfg & GSC_IN_ROT_90) ? 1 : 0;
861 (GSC_IN_ROT_90 | GSC_IN_ROT_270) ? 1 : 0;
862 *swap = ctx->rotation; 860 *swap = ctx->rotation;
863 861
864 return 0; 862 return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 99e286489031..4a00990e4ae4 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -1064,6 +1064,7 @@ static int hdmi_get_modes(struct drm_connector *connector)
1064{ 1064{
1065 struct hdmi_context *hdata = ctx_from_connector(connector); 1065 struct hdmi_context *hdata = ctx_from_connector(connector);
1066 struct edid *edid; 1066 struct edid *edid;
1067 int ret;
1067 1068
1068 if (!hdata->ddc_adpt) 1069 if (!hdata->ddc_adpt)
1069 return -ENODEV; 1070 return -ENODEV;
@@ -1079,7 +1080,11 @@ static int hdmi_get_modes(struct drm_connector *connector)
1079 1080
1080 drm_mode_connector_update_edid_property(connector, edid); 1081 drm_mode_connector_update_edid_property(connector, edid);
1081 1082
1082 return drm_add_edid_modes(connector, edid); 1083 ret = drm_add_edid_modes(connector, edid);
1084
1085 kfree(edid);
1086
1087 return ret;
1083} 1088}
1084 1089
1085static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock) 1090static int hdmi_find_phy_conf(struct hdmi_context *hdata, u32 pixel_clock)
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index cae98db33062..4706b56902b4 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -718,6 +718,10 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
718 718
719 /* handling VSYNC */ 719 /* handling VSYNC */
720 if (val & MXR_INT_STATUS_VSYNC) { 720 if (val & MXR_INT_STATUS_VSYNC) {
721 /* vsync interrupt use different bit for read and clear */
722 val |= MXR_INT_CLEAR_VSYNC;
723 val &= ~MXR_INT_STATUS_VSYNC;
724
721 /* interlace scan need to check shadow register */ 725 /* interlace scan need to check shadow register */
722 if (ctx->interlace) { 726 if (ctx->interlace) {
723 base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0)); 727 base = mixer_reg_read(res, MXR_GRAPHIC_BASE(0));
@@ -743,11 +747,6 @@ static irqreturn_t mixer_irq_handler(int irq, void *arg)
743 747
744out: 748out:
745 /* clear interrupts */ 749 /* clear interrupts */
746 if (~val & MXR_INT_EN_VSYNC) {
747 /* vsync interrupt use different bit for read and clear */
748 val &= ~MXR_INT_EN_VSYNC;
749 val |= MXR_INT_CLEAR_VSYNC;
750 }
751 mixer_reg_write(res, MXR_INT_STATUS, val); 750 mixer_reg_write(res, MXR_INT_STATUS, val);
752 751
753 spin_unlock(&res->reg_slock); 752 spin_unlock(&res->reg_slock);
@@ -907,8 +906,8 @@ static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
907 } 906 }
908 907
909 /* enable vsync interrupt */ 908 /* enable vsync interrupt */
910 mixer_reg_writemask(res, MXR_INT_EN, MXR_INT_EN_VSYNC, 909 mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
911 MXR_INT_EN_VSYNC); 910 mixer_reg_writemask(res, MXR_INT_EN, ~0, MXR_INT_EN_VSYNC);
912 911
913 return 0; 912 return 0;
914} 913}
@@ -918,7 +917,13 @@ static void mixer_disable_vblank(struct exynos_drm_crtc *crtc)
918 struct mixer_context *mixer_ctx = crtc->ctx; 917 struct mixer_context *mixer_ctx = crtc->ctx;
919 struct mixer_resources *res = &mixer_ctx->mixer_res; 918 struct mixer_resources *res = &mixer_ctx->mixer_res;
920 919
920 if (!mixer_ctx->powered) {
921 mixer_ctx->int_en &= MXR_INT_EN_VSYNC;
922 return;
923 }
924
921 /* disable vsync interrupt */ 925 /* disable vsync interrupt */
926 mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
922 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC); 927 mixer_reg_writemask(res, MXR_INT_EN, 0, MXR_INT_EN_VSYNC);
923} 928}
924 929
@@ -1047,6 +1052,8 @@ static void mixer_enable(struct exynos_drm_crtc *crtc)
1047 1052
1048 mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET); 1053 mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_SOFT_RESET);
1049 1054
1055 if (ctx->int_en & MXR_INT_EN_VSYNC)
1056 mixer_reg_writemask(res, MXR_INT_STATUS, ~0, MXR_INT_CLEAR_VSYNC);
1050 mixer_reg_write(res, MXR_INT_EN, ctx->int_en); 1057 mixer_reg_write(res, MXR_INT_EN, ctx->int_en);
1051 mixer_win_reset(ctx); 1058 mixer_win_reset(ctx);
1052} 1059}
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index fe1599d75f14..424228be79ae 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -606,8 +606,6 @@ static void
606tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr, 606tda998x_write_if(struct tda998x_priv *priv, uint8_t bit, uint16_t addr,
607 uint8_t *buf, size_t size) 607 uint8_t *buf, size_t size)
608{ 608{
609 buf[PB(0)] = tda998x_cksum(buf, size);
610
611 reg_clear(priv, REG_DIP_IF_FLAGS, bit); 609 reg_clear(priv, REG_DIP_IF_FLAGS, bit);
612 reg_write_range(priv, addr, buf, size); 610 reg_write_range(priv, addr, buf, size);
613 reg_set(priv, REG_DIP_IF_FLAGS, bit); 611 reg_set(priv, REG_DIP_IF_FLAGS, bit);
@@ -627,6 +625,8 @@ tda998x_write_aif(struct tda998x_priv *priv, struct tda998x_encoder_params *p)
627 buf[PB(4)] = p->audio_frame[4]; 625 buf[PB(4)] = p->audio_frame[4];
628 buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */ 626 buf[PB(5)] = p->audio_frame[5] & 0xf8; /* DM_INH + LSV */
629 627
628 buf[PB(0)] = tda998x_cksum(buf, sizeof(buf));
629
630 tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf, 630 tda998x_write_if(priv, DIP_IF_FLAGS_IF4, REG_IF4_HB0, buf,
631 sizeof(buf)); 631 sizeof(buf));
632} 632}
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index e9d2befbcaf3..574d0f1c26bf 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -3359,15 +3359,14 @@ int intel_freq_opcode(struct drm_i915_private *dev_priv, int val);
3359#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true) 3359#define I915_READ64(reg) dev_priv->uncore.funcs.mmio_readq(dev_priv, (reg), true)
3360 3360
3361#define I915_READ64_2x32(lower_reg, upper_reg) ({ \ 3361#define I915_READ64_2x32(lower_reg, upper_reg) ({ \
3362 u32 upper = I915_READ(upper_reg); \ 3362 u32 upper, lower, tmp; \
3363 u32 lower = I915_READ(lower_reg); \ 3363 tmp = I915_READ(upper_reg); \
3364 u32 tmp = I915_READ(upper_reg); \ 3364 do { \
3365 if (upper != tmp) { \ 3365 upper = tmp; \
3366 upper = tmp; \ 3366 lower = I915_READ(lower_reg); \
3367 lower = I915_READ(lower_reg); \ 3367 tmp = I915_READ(upper_reg); \
3368 WARN_ON(I915_READ(upper_reg) != upper); \ 3368 } while (upper != tmp); \
3369 } \ 3369 (u64)upper << 32 | lower; })
3370 (u64)upper << 32 | lower; })
3371 3370
3372#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg) 3371#define POSTING_READ(reg) (void)I915_READ_NOTRACE(reg)
3373#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg) 3372#define POSTING_READ16(reg) (void)I915_READ16_NOTRACE(reg)
diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c
index c2a291e09bd9..96054a560f4f 100644
--- a/drivers/gpu/drm/i915/i915_gem_gtt.c
+++ b/drivers/gpu/drm/i915/i915_gem_gtt.c
@@ -2003,6 +2003,17 @@ static int ggtt_bind_vma(struct i915_vma *vma,
2003 vma->vm->insert_entries(vma->vm, pages, 2003 vma->vm->insert_entries(vma->vm, pages,
2004 vma->node.start, 2004 vma->node.start,
2005 cache_level, pte_flags); 2005 cache_level, pte_flags);
2006
2007 /* Note the inconsistency here is due to absence of the
2008 * aliasing ppgtt on gen4 and earlier. Though we always
2009 * request PIN_USER for execbuffer (translated to LOCAL_BIND),
2010 * without the appgtt, we cannot honour that request and so
2011 * must substitute it with a global binding. Since we do this
2012 * behind the upper layers back, we need to explicitly set
2013 * the bound flag ourselves.
2014 */
2015 vma->bound |= GLOBAL_BIND;
2016
2006 } 2017 }
2007 2018
2008 if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) { 2019 if (dev_priv->mm.aliasing_ppgtt && flags & LOCAL_BIND) {
diff --git a/drivers/gpu/drm/i915/i915_gem_tiling.c b/drivers/gpu/drm/i915/i915_gem_tiling.c
index 633bd1fcab69..d19c9db5e18c 100644
--- a/drivers/gpu/drm/i915/i915_gem_tiling.c
+++ b/drivers/gpu/drm/i915/i915_gem_tiling.c
@@ -464,7 +464,10 @@ i915_gem_get_tiling(struct drm_device *dev, void *data,
464 } 464 }
465 465
466 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */ 466 /* Hide bit 17 from the user -- see comment in i915_gem_set_tiling */
467 args->phys_swizzle_mode = args->swizzle_mode; 467 if (dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES)
468 args->phys_swizzle_mode = I915_BIT_6_SWIZZLE_UNKNOWN;
469 else
470 args->phys_swizzle_mode = args->swizzle_mode;
468 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17) 471 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_17)
469 args->swizzle_mode = I915_BIT_6_SWIZZLE_9; 472 args->swizzle_mode = I915_BIT_6_SWIZZLE_9;
470 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17) 473 if (args->swizzle_mode == I915_BIT_6_SWIZZLE_9_10_17)
diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c
index 2ff9eb00fdec..31b1079cbd1b 100644
--- a/drivers/gpu/drm/i915/intel_bios.c
+++ b/drivers/gpu/drm/i915/intel_bios.c
@@ -1015,15 +1015,34 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
1015 const union child_device_config *p_child; 1015 const union child_device_config *p_child;
1016 union child_device_config *child_dev_ptr; 1016 union child_device_config *child_dev_ptr;
1017 int i, child_device_num, count; 1017 int i, child_device_num, count;
1018 u16 block_size; 1018 u8 expected_size;
1019 u16 block_size;
1019 1020
1020 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS); 1021 p_defs = find_section(bdb, BDB_GENERAL_DEFINITIONS);
1021 if (!p_defs) { 1022 if (!p_defs) {
1022 DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n"); 1023 DRM_DEBUG_KMS("No general definition block is found, no devices defined.\n");
1023 return; 1024 return;
1024 } 1025 }
1025 if (p_defs->child_dev_size < sizeof(*p_child)) { 1026 if (bdb->version < 195) {
1026 DRM_ERROR("General definiton block child device size is too small.\n"); 1027 expected_size = 33;
1028 } else if (bdb->version == 195) {
1029 expected_size = 37;
1030 } else if (bdb->version <= 197) {
1031 expected_size = 38;
1032 } else {
1033 expected_size = 38;
1034 DRM_DEBUG_DRIVER("Expected child_device_config size for BDB version %u not known; assuming %u\n",
1035 expected_size, bdb->version);
1036 }
1037
1038 if (expected_size > sizeof(*p_child)) {
1039 DRM_ERROR("child_device_config cannot fit in p_child\n");
1040 return;
1041 }
1042
1043 if (p_defs->child_dev_size != expected_size) {
1044 DRM_ERROR("Size mismatch; child_device_config size=%u (expected %u); bdb->version: %u\n",
1045 p_defs->child_dev_size, expected_size, bdb->version);
1027 return; 1046 return;
1028 } 1047 }
1029 /* get the block size of general definitions */ 1048 /* get the block size of general definitions */
@@ -1070,7 +1089,7 @@ parse_device_mapping(struct drm_i915_private *dev_priv,
1070 1089
1071 child_dev_ptr = dev_priv->vbt.child_dev + count; 1090 child_dev_ptr = dev_priv->vbt.child_dev + count;
1072 count++; 1091 count++;
1073 memcpy(child_dev_ptr, p_child, sizeof(*p_child)); 1092 memcpy(child_dev_ptr, p_child, p_defs->child_dev_size);
1074 } 1093 }
1075 return; 1094 return;
1076} 1095}
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index efc8cf53f0f3..a61df29918ed 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -357,6 +357,16 @@ intel_dp_mst_mode_valid(struct drm_connector *connector,
357 return MODE_OK; 357 return MODE_OK;
358} 358}
359 359
360static struct drm_encoder *intel_mst_atomic_best_encoder(struct drm_connector *connector,
361 struct drm_connector_state *state)
362{
363 struct intel_connector *intel_connector = to_intel_connector(connector);
364 struct intel_dp *intel_dp = intel_connector->mst_port;
365 struct intel_crtc *crtc = to_intel_crtc(state->crtc);
366
367 return &intel_dp->mst_encoders[crtc->pipe]->base.base;
368}
369
360static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector) 370static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connector)
361{ 371{
362 struct intel_connector *intel_connector = to_intel_connector(connector); 372 struct intel_connector *intel_connector = to_intel_connector(connector);
@@ -367,6 +377,7 @@ static struct drm_encoder *intel_mst_best_encoder(struct drm_connector *connecto
367static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = { 377static const struct drm_connector_helper_funcs intel_dp_mst_connector_helper_funcs = {
368 .get_modes = intel_dp_mst_get_modes, 378 .get_modes = intel_dp_mst_get_modes,
369 .mode_valid = intel_dp_mst_mode_valid, 379 .mode_valid = intel_dp_mst_mode_valid,
380 .atomic_best_encoder = intel_mst_atomic_best_encoder,
370 .best_encoder = intel_mst_best_encoder, 381 .best_encoder = intel_mst_best_encoder,
371}; 382};
372 383
diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c
index 45285a9178fe..9d3c2e420d2b 100644
--- a/drivers/gpu/drm/i915/intel_uncore.c
+++ b/drivers/gpu/drm/i915/intel_uncore.c
@@ -1274,10 +1274,12 @@ int i915_reg_read_ioctl(struct drm_device *dev,
1274 struct drm_i915_private *dev_priv = dev->dev_private; 1274 struct drm_i915_private *dev_priv = dev->dev_private;
1275 struct drm_i915_reg_read *reg = data; 1275 struct drm_i915_reg_read *reg = data;
1276 struct register_whitelist const *entry = whitelist; 1276 struct register_whitelist const *entry = whitelist;
1277 unsigned size;
1278 u64 offset;
1277 int i, ret = 0; 1279 int i, ret = 0;
1278 1280
1279 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) { 1281 for (i = 0; i < ARRAY_SIZE(whitelist); i++, entry++) {
1280 if (entry->offset == reg->offset && 1282 if (entry->offset == (reg->offset & -entry->size) &&
1281 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask)) 1283 (1 << INTEL_INFO(dev)->gen & entry->gen_bitmask))
1282 break; 1284 break;
1283 } 1285 }
@@ -1285,23 +1287,33 @@ int i915_reg_read_ioctl(struct drm_device *dev,
1285 if (i == ARRAY_SIZE(whitelist)) 1287 if (i == ARRAY_SIZE(whitelist))
1286 return -EINVAL; 1288 return -EINVAL;
1287 1289
1290 /* We use the low bits to encode extra flags as the register should
1291 * be naturally aligned (and those that are not so aligned merely
1292 * limit the available flags for that register).
1293 */
1294 offset = entry->offset;
1295 size = entry->size;
1296 size |= reg->offset ^ offset;
1297
1288 intel_runtime_pm_get(dev_priv); 1298 intel_runtime_pm_get(dev_priv);
1289 1299
1290 switch (entry->size) { 1300 switch (size) {
1301 case 8 | 1:
1302 reg->val = I915_READ64_2x32(offset, offset+4);
1303 break;
1291 case 8: 1304 case 8:
1292 reg->val = I915_READ64(reg->offset); 1305 reg->val = I915_READ64(offset);
1293 break; 1306 break;
1294 case 4: 1307 case 4:
1295 reg->val = I915_READ(reg->offset); 1308 reg->val = I915_READ(offset);
1296 break; 1309 break;
1297 case 2: 1310 case 2:
1298 reg->val = I915_READ16(reg->offset); 1311 reg->val = I915_READ16(offset);
1299 break; 1312 break;
1300 case 1: 1313 case 1:
1301 reg->val = I915_READ8(reg->offset); 1314 reg->val = I915_READ8(offset);
1302 break; 1315 break;
1303 default: 1316 default:
1304 MISSING_CASE(entry->size);
1305 ret = -EINVAL; 1317 ret = -EINVAL;
1306 goto out; 1318 goto out;
1307 } 1319 }
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 0d1dbb737933..247a424445f7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -220,13 +220,15 @@ static int mdp4_plane_mode_set(struct drm_plane *plane,
220 uint32_t op_mode = 0; 220 uint32_t op_mode = 0;
221 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT; 221 uint32_t phasex_step = MDP4_VG_PHASE_STEP_DEFAULT;
222 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT; 222 uint32_t phasey_step = MDP4_VG_PHASE_STEP_DEFAULT;
223 enum mdp4_frame_format frame_type = mdp4_get_frame_format(fb); 223 enum mdp4_frame_format frame_type;
224 224
225 if (!(crtc && fb)) { 225 if (!(crtc && fb)) {
226 DBG("%s: disabled!", mdp4_plane->name); 226 DBG("%s: disabled!", mdp4_plane->name);
227 return 0; 227 return 0;
228 } 228 }
229 229
230 frame_type = mdp4_get_frame_format(fb);
231
230 /* src values are in Q16 fixed point, convert to integer: */ 232 /* src values are in Q16 fixed point, convert to integer: */
231 src_x = src_x >> 16; 233 src_x = src_x >> 16;
232 src_y = src_y >> 16; 234 src_y = src_y >> 16;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index 206f758f7d64..e253db5de5aa 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -76,7 +76,20 @@ static void mdp5_prepare_commit(struct msm_kms *kms, struct drm_atomic_state *st
76 76
77static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state) 77static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *state)
78{ 78{
79 int i;
79 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); 80 struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
81 int nplanes = mdp5_kms->dev->mode_config.num_total_plane;
82
83 for (i = 0; i < nplanes; i++) {
84 struct drm_plane *plane = state->planes[i];
85 struct drm_plane_state *plane_state = state->plane_states[i];
86
87 if (!plane)
88 continue;
89
90 mdp5_plane_complete_commit(plane, plane_state);
91 }
92
80 mdp5_disable(mdp5_kms); 93 mdp5_disable(mdp5_kms);
81} 94}
82 95
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index e0eb24587c84..e79ac09b7216 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -227,6 +227,8 @@ void mdp5_plane_install_properties(struct drm_plane *plane,
227 struct drm_mode_object *obj); 227 struct drm_mode_object *obj);
228uint32_t mdp5_plane_get_flush(struct drm_plane *plane); 228uint32_t mdp5_plane_get_flush(struct drm_plane *plane);
229void mdp5_plane_complete_flip(struct drm_plane *plane); 229void mdp5_plane_complete_flip(struct drm_plane *plane);
230void mdp5_plane_complete_commit(struct drm_plane *plane,
231 struct drm_plane_state *state);
230enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane); 232enum mdp5_pipe mdp5_plane_pipe(struct drm_plane *plane);
231struct drm_plane *mdp5_plane_init(struct drm_device *dev, 233struct drm_plane *mdp5_plane_init(struct drm_device *dev,
232 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset); 234 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 57b8f56ae9d0..22275568ab8b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -31,8 +31,6 @@ struct mdp5_plane {
31 31
32 uint32_t nformats; 32 uint32_t nformats;
33 uint32_t formats[32]; 33 uint32_t formats[32];
34
35 bool enabled;
36}; 34};
37#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base) 35#define to_mdp5_plane(x) container_of(x, struct mdp5_plane, base)
38 36
@@ -56,22 +54,6 @@ static bool plane_enabled(struct drm_plane_state *state)
56 return state->fb && state->crtc; 54 return state->fb && state->crtc;
57} 55}
58 56
59static int mdp5_plane_disable(struct drm_plane *plane)
60{
61 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
62 struct mdp5_kms *mdp5_kms = get_kms(plane);
63 enum mdp5_pipe pipe = mdp5_plane->pipe;
64
65 DBG("%s: disable", mdp5_plane->name);
66
67 if (mdp5_kms) {
68 /* Release the memory we requested earlier from the SMP: */
69 mdp5_smp_release(mdp5_kms->smp, pipe);
70 }
71
72 return 0;
73}
74
75static void mdp5_plane_destroy(struct drm_plane *plane) 57static void mdp5_plane_destroy(struct drm_plane *plane)
76{ 58{
77 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane); 59 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
@@ -224,7 +206,6 @@ static void mdp5_plane_atomic_update(struct drm_plane *plane,
224 206
225 if (!plane_enabled(state)) { 207 if (!plane_enabled(state)) {
226 to_mdp5_plane_state(state)->pending = true; 208 to_mdp5_plane_state(state)->pending = true;
227 mdp5_plane_disable(plane);
228 } else if (to_mdp5_plane_state(state)->mode_changed) { 209 } else if (to_mdp5_plane_state(state)->mode_changed) {
229 int ret; 210 int ret;
230 to_mdp5_plane_state(state)->pending = true; 211 to_mdp5_plane_state(state)->pending = true;
@@ -602,6 +583,20 @@ uint32_t mdp5_plane_get_flush(struct drm_plane *plane)
602 return mdp5_plane->flush_mask; 583 return mdp5_plane->flush_mask;
603} 584}
604 585
586/* called after vsync in thread context */
587void mdp5_plane_complete_commit(struct drm_plane *plane,
588 struct drm_plane_state *state)
589{
590 struct mdp5_kms *mdp5_kms = get_kms(plane);
591 struct mdp5_plane *mdp5_plane = to_mdp5_plane(plane);
592 enum mdp5_pipe pipe = mdp5_plane->pipe;
593
594 if (!plane_enabled(plane->state)) {
595 DBG("%s: free SMP", mdp5_plane->name);
596 mdp5_smp_release(mdp5_kms->smp, pipe);
597 }
598}
599
605/* initialize plane */ 600/* initialize plane */
606struct drm_plane *mdp5_plane_init(struct drm_device *dev, 601struct drm_plane *mdp5_plane_init(struct drm_device *dev,
607 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset) 602 enum mdp5_pipe pipe, bool private_plane, uint32_t reg_offset)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
index 16702aecf0df..64a27d86f2f5 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.c
@@ -34,22 +34,44 @@
34 * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0). 34 * and CANNOT be re-allocated (eg: MMB0 and MMB1 both tied to RGB0).
35 * 35 *
36 * For each block that can be dynamically allocated, it can be either 36 * For each block that can be dynamically allocated, it can be either
37 * free, or pending/in-use by a client. The updates happen in three steps: 37 * free:
38 * The block is free.
39 *
40 * pending:
41 * The block is allocated to some client and not free.
42 *
43 * configured:
44 * The block is allocated to some client, and assigned to that
45 * client in MDP5_MDP_SMP_ALLOC registers.
46 *
47 * inuse:
48 * The block is being actively used by a client.
49 *
50 * The updates happen in the following steps:
38 * 51 *
39 * 1) mdp5_smp_request(): 52 * 1) mdp5_smp_request():
40 * When plane scanout is setup, calculate required number of 53 * When plane scanout is setup, calculate required number of
41 * blocks needed per client, and request. Blocks not inuse or 54 * blocks needed per client, and request. Blocks neither inuse nor
42 * pending by any other client are added to client's pending 55 * configured nor pending by any other client are added to client's
43 * set. 56 * pending set.
57 * For shrinking, blocks in pending but not in configured can be freed
58 * directly, but those already in configured will be freed later by
59 * mdp5_smp_commit.
44 * 60 *
45 * 2) mdp5_smp_configure(): 61 * 2) mdp5_smp_configure():
46 * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers 62 * As hw is programmed, before FLUSH, MDP5_MDP_SMP_ALLOC registers
47 * are configured for the union(pending, inuse) 63 * are configured for the union(pending, inuse)
64 * Current pending is copied to configured.
65 * It is assumed that mdp5_smp_request and mdp5_smp_configure not run
66 * concurrently for the same pipe.
48 * 67 *
49 * 3) mdp5_smp_commit(): 68 * 3) mdp5_smp_commit():
50 * After next vblank, copy pending -> inuse. Optionally update 69 * After next vblank, copy configured -> inuse. Optionally update
51 * MDP5_SMP_ALLOC registers if there are newly unused blocks 70 * MDP5_SMP_ALLOC registers if there are newly unused blocks
52 * 71 *
72 * 4) mdp5_smp_release():
73 * Must be called after the pipe is disabled and no longer uses any SMB
74 *
53 * On the next vblank after changes have been committed to hw, the 75 * On the next vblank after changes have been committed to hw, the
54 * client's pending blocks become it's in-use blocks (and no-longer 76 * client's pending blocks become it's in-use blocks (and no-longer
55 * in-use blocks become available to other clients). 77 * in-use blocks become available to other clients).
@@ -77,6 +99,9 @@ struct mdp5_smp {
77 struct mdp5_client_smp_state client_state[MAX_CLIENTS]; 99 struct mdp5_client_smp_state client_state[MAX_CLIENTS];
78}; 100};
79 101
102static void update_smp_state(struct mdp5_smp *smp,
103 u32 cid, mdp5_smp_state_t *assigned);
104
80static inline 105static inline
81struct mdp5_kms *get_kms(struct mdp5_smp *smp) 106struct mdp5_kms *get_kms(struct mdp5_smp *smp)
82{ 107{
@@ -149,7 +174,12 @@ static int smp_request_block(struct mdp5_smp *smp,
149 for (i = cur_nblks; i > nblks; i--) { 174 for (i = cur_nblks; i > nblks; i--) {
150 int blk = find_first_bit(ps->pending, cnt); 175 int blk = find_first_bit(ps->pending, cnt);
151 clear_bit(blk, ps->pending); 176 clear_bit(blk, ps->pending);
152 /* don't clear in global smp_state until _commit() */ 177
178 /* clear in global smp_state if not in configured
179 * otherwise until _commit()
180 */
181 if (!test_bit(blk, ps->configured))
182 clear_bit(blk, smp->state);
153 } 183 }
154 } 184 }
155 185
@@ -223,10 +253,33 @@ int mdp5_smp_request(struct mdp5_smp *smp, enum mdp5_pipe pipe, u32 fmt, u32 wid
223/* Release SMP blocks for all clients of the pipe */ 253/* Release SMP blocks for all clients of the pipe */
224void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe) 254void mdp5_smp_release(struct mdp5_smp *smp, enum mdp5_pipe pipe)
225{ 255{
226 int i, nblks; 256 int i;
257 unsigned long flags;
258 int cnt = smp->blk_cnt;
259
260 for (i = 0; i < pipe2nclients(pipe); i++) {
261 mdp5_smp_state_t assigned;
262 u32 cid = pipe2client(pipe, i);
263 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
264
265 spin_lock_irqsave(&smp->state_lock, flags);
266
267 /* clear hw assignment */
268 bitmap_or(assigned, ps->inuse, ps->configured, cnt);
269 update_smp_state(smp, CID_UNUSED, &assigned);
270
271 /* free to global pool */
272 bitmap_andnot(smp->state, smp->state, ps->pending, cnt);
273 bitmap_andnot(smp->state, smp->state, assigned, cnt);
274
275 /* clear client's infor */
276 bitmap_zero(ps->pending, cnt);
277 bitmap_zero(ps->configured, cnt);
278 bitmap_zero(ps->inuse, cnt);
279
280 spin_unlock_irqrestore(&smp->state_lock, flags);
281 }
227 282
228 for (i = 0, nblks = 0; i < pipe2nclients(pipe); i++)
229 smp_request_block(smp, pipe2client(pipe, i), 0);
230 set_fifo_thresholds(smp, pipe, 0); 283 set_fifo_thresholds(smp, pipe, 0);
231} 284}
232 285
@@ -274,12 +327,20 @@ void mdp5_smp_configure(struct mdp5_smp *smp, enum mdp5_pipe pipe)
274 u32 cid = pipe2client(pipe, i); 327 u32 cid = pipe2client(pipe, i);
275 struct mdp5_client_smp_state *ps = &smp->client_state[cid]; 328 struct mdp5_client_smp_state *ps = &smp->client_state[cid];
276 329
277 bitmap_or(assigned, ps->inuse, ps->pending, cnt); 330 /*
331 * if vblank has not happened since last smp_configure
332 * skip the configure for now
333 */
334 if (!bitmap_equal(ps->inuse, ps->configured, cnt))
335 continue;
336
337 bitmap_copy(ps->configured, ps->pending, cnt);
338 bitmap_or(assigned, ps->inuse, ps->configured, cnt);
278 update_smp_state(smp, cid, &assigned); 339 update_smp_state(smp, cid, &assigned);
279 } 340 }
280} 341}
281 342
282/* step #3: after vblank, copy pending -> inuse: */ 343/* step #3: after vblank, copy configured -> inuse: */
283void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe) 344void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
284{ 345{
285 int cnt = smp->blk_cnt; 346 int cnt = smp->blk_cnt;
@@ -295,7 +356,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
295 * using, which can be released and made available to other 356 * using, which can be released and made available to other
296 * clients: 357 * clients:
297 */ 358 */
298 if (bitmap_andnot(released, ps->inuse, ps->pending, cnt)) { 359 if (bitmap_andnot(released, ps->inuse, ps->configured, cnt)) {
299 unsigned long flags; 360 unsigned long flags;
300 361
301 spin_lock_irqsave(&smp->state_lock, flags); 362 spin_lock_irqsave(&smp->state_lock, flags);
@@ -306,7 +367,7 @@ void mdp5_smp_commit(struct mdp5_smp *smp, enum mdp5_pipe pipe)
306 update_smp_state(smp, CID_UNUSED, &released); 367 update_smp_state(smp, CID_UNUSED, &released);
307 } 368 }
308 369
309 bitmap_copy(ps->inuse, ps->pending, cnt); 370 bitmap_copy(ps->inuse, ps->configured, cnt);
310 } 371 }
311} 372}
312 373
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
index e47179f63585..5b6c2363f592 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_smp.h
@@ -23,6 +23,7 @@
23 23
24struct mdp5_client_smp_state { 24struct mdp5_client_smp_state {
25 mdp5_smp_state_t inuse; 25 mdp5_smp_state_t inuse;
26 mdp5_smp_state_t configured;
26 mdp5_smp_state_t pending; 27 mdp5_smp_state_t pending;
27}; 28};
28 29
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 1b22d8bfe142..1ceb4f22dd89 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -283,12 +283,8 @@ int msm_atomic_commit(struct drm_device *dev,
283 283
284 timeout = ktime_add_ms(ktime_get(), 1000); 284 timeout = ktime_add_ms(ktime_get(), 1000);
285 285
286 ret = msm_wait_fence_interruptable(dev, c->fence, &timeout); 286 /* uninterruptible wait */
287 if (ret) { 287 msm_wait_fence(dev, c->fence, &timeout, false);
288 WARN_ON(ret); // TODO unswap state back? or??
289 commit_destroy(c);
290 return ret;
291 }
292 288
293 complete_commit(c); 289 complete_commit(c);
294 290
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index b7ef56ed8d1c..d3467b115e04 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -637,8 +637,8 @@ static void msm_debugfs_cleanup(struct drm_minor *minor)
637 * Fences: 637 * Fences:
638 */ 638 */
639 639
640int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, 640int msm_wait_fence(struct drm_device *dev, uint32_t fence,
641 ktime_t *timeout) 641 ktime_t *timeout , bool interruptible)
642{ 642{
643 struct msm_drm_private *priv = dev->dev_private; 643 struct msm_drm_private *priv = dev->dev_private;
644 int ret; 644 int ret;
@@ -667,7 +667,12 @@ int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence,
667 remaining_jiffies = timespec_to_jiffies(&ts); 667 remaining_jiffies = timespec_to_jiffies(&ts);
668 } 668 }
669 669
670 ret = wait_event_interruptible_timeout(priv->fence_event, 670 if (interruptible)
671 ret = wait_event_interruptible_timeout(priv->fence_event,
672 fence_completed(dev, fence),
673 remaining_jiffies);
674 else
675 ret = wait_event_timeout(priv->fence_event,
671 fence_completed(dev, fence), 676 fence_completed(dev, fence),
672 remaining_jiffies); 677 remaining_jiffies);
673 678
@@ -853,7 +858,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
853 return -EINVAL; 858 return -EINVAL;
854 } 859 }
855 860
856 return msm_wait_fence_interruptable(dev, args->fence, &timeout); 861 return msm_wait_fence(dev, args->fence, &timeout, true);
857} 862}
858 863
859static const struct drm_ioctl_desc msm_ioctls[] = { 864static const struct drm_ioctl_desc msm_ioctls[] = {
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index e7c5ea125d45..4ff0ec9c994b 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -164,8 +164,8 @@ int msm_atomic_commit(struct drm_device *dev,
164 164
165int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); 165int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
166 166
167int msm_wait_fence_interruptable(struct drm_device *dev, uint32_t fence, 167int msm_wait_fence(struct drm_device *dev, uint32_t fence,
168 ktime_t *timeout); 168 ktime_t *timeout, bool interruptible);
169int msm_queue_fence_cb(struct drm_device *dev, 169int msm_queue_fence_cb(struct drm_device *dev,
170 struct msm_fence_cb *cb, uint32_t fence); 170 struct msm_fence_cb *cb, uint32_t fence);
171void msm_update_fence(struct drm_device *dev, uint32_t fence); 171void msm_update_fence(struct drm_device *dev, uint32_t fence);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f211b80e3a1e..c76cc853b08a 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -460,7 +460,7 @@ int msm_gem_cpu_prep(struct drm_gem_object *obj, uint32_t op, ktime_t *timeout)
460 if (op & MSM_PREP_NOSYNC) 460 if (op & MSM_PREP_NOSYNC)
461 timeout = NULL; 461 timeout = NULL;
462 462
463 ret = msm_wait_fence_interruptable(dev, fence, timeout); 463 ret = msm_wait_fence(dev, fence, timeout, true);
464 } 464 }
465 465
466 /* TODO cache maintenance */ 466 /* TODO cache maintenance */
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index dd7a7ab603e2..831461bc98a5 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -23,8 +23,12 @@
23struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj) 23struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
24{ 24{
25 struct msm_gem_object *msm_obj = to_msm_bo(obj); 25 struct msm_gem_object *msm_obj = to_msm_bo(obj);
26 BUG_ON(!msm_obj->sgt); /* should have already pinned! */ 26 int npages = obj->size >> PAGE_SHIFT;
27 return msm_obj->sgt; 27
28 if (WARN_ON(!msm_obj->pages)) /* should have already pinned! */
29 return NULL;
30
31 return drm_prime_pages_to_sg(msm_obj->pages, npages);
28} 32}
29 33
30void *msm_gem_prime_vmap(struct drm_gem_object *obj) 34void *msm_gem_prime_vmap(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index 36b40c9252b5..109b8262dc85 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -128,6 +128,7 @@ nouveau_cli_destroy(struct nouveau_cli *cli)
128 nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL); 128 nvkm_vm_ref(NULL, &nvxx_client(&cli->base)->vm, NULL);
129 nvif_client_fini(&cli->base); 129 nvif_client_fini(&cli->base);
130 usif_client_fini(cli); 130 usif_client_fini(cli);
131 kfree(cli);
131} 132}
132 133
133static void 134static void
@@ -865,8 +866,10 @@ nouveau_drm_preclose(struct drm_device *dev, struct drm_file *fpriv)
865 866
866 pm_runtime_get_sync(dev->dev); 867 pm_runtime_get_sync(dev->dev);
867 868
869 mutex_lock(&cli->mutex);
868 if (cli->abi16) 870 if (cli->abi16)
869 nouveau_abi16_fini(cli->abi16); 871 nouveau_abi16_fini(cli->abi16);
872 mutex_unlock(&cli->mutex);
870 873
871 mutex_lock(&drm->client.mutex); 874 mutex_lock(&drm->client.mutex);
872 list_del(&cli->head); 875 list_del(&cli->head);
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 775277f1edb0..dcfbbfaf1739 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -92,6 +92,8 @@ static int nouveau_platform_power_down(struct nouveau_platform_gpu *gpu)
92 return 0; 92 return 0;
93} 93}
94 94
95#if IS_ENABLED(CONFIG_IOMMU_API)
96
95static void nouveau_platform_probe_iommu(struct device *dev, 97static void nouveau_platform_probe_iommu(struct device *dev,
96 struct nouveau_platform_gpu *gpu) 98 struct nouveau_platform_gpu *gpu)
97{ 99{
@@ -158,6 +160,20 @@ static void nouveau_platform_remove_iommu(struct device *dev,
158 } 160 }
159} 161}
160 162
163#else
164
165static void nouveau_platform_probe_iommu(struct device *dev,
166 struct nouveau_platform_gpu *gpu)
167{
168}
169
170static void nouveau_platform_remove_iommu(struct device *dev,
171 struct nouveau_platform_gpu *gpu)
172{
173}
174
175#endif
176
161static int nouveau_platform_probe(struct platform_device *pdev) 177static int nouveau_platform_probe(struct platform_device *pdev)
162{ 178{
163 struct nouveau_platform_gpu *gpu; 179 struct nouveau_platform_gpu *gpu;
diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c
index 1f8ec0e2156c..737e8f976a98 100644
--- a/drivers/gpu/drm/nouveau/nouveau_ttm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c
@@ -175,15 +175,24 @@ nouveau_gart_manager_new(struct ttm_mem_type_manager *man,
175 node->page_shift = 12; 175 node->page_shift = 12;
176 176
177 switch (drm->device.info.family) { 177 switch (drm->device.info.family) {
178 case NV_DEVICE_INFO_V0_TNT:
179 case NV_DEVICE_INFO_V0_CELSIUS:
180 case NV_DEVICE_INFO_V0_KELVIN:
181 case NV_DEVICE_INFO_V0_RANKINE:
182 case NV_DEVICE_INFO_V0_CURIE:
183 break;
178 case NV_DEVICE_INFO_V0_TESLA: 184 case NV_DEVICE_INFO_V0_TESLA:
179 if (drm->device.info.chipset != 0x50) 185 if (drm->device.info.chipset != 0x50)
180 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8; 186 node->memtype = (nvbo->tile_flags & 0x7f00) >> 8;
181 break; 187 break;
182 case NV_DEVICE_INFO_V0_FERMI: 188 case NV_DEVICE_INFO_V0_FERMI:
183 case NV_DEVICE_INFO_V0_KEPLER: 189 case NV_DEVICE_INFO_V0_KEPLER:
190 case NV_DEVICE_INFO_V0_MAXWELL:
184 node->memtype = (nvbo->tile_flags & 0xff00) >> 8; 191 node->memtype = (nvbo->tile_flags & 0xff00) >> 8;
185 break; 192 break;
186 default: 193 default:
194 NV_WARN(drm, "%s: unhandled family type %x\n", __func__,
195 drm->device.info.family);
187 break; 196 break;
188 } 197 }
189 198
diff --git a/drivers/gpu/drm/nouveau/nv04_fbcon.c b/drivers/gpu/drm/nouveau/nv04_fbcon.c
index 4ef602c5469d..495c57644ced 100644
--- a/drivers/gpu/drm/nouveau/nv04_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv04_fbcon.c
@@ -203,7 +203,7 @@ nv04_fbcon_accel_init(struct fb_info *info)
203 if (ret) 203 if (ret)
204 return ret; 204 return ret;
205 205
206 if (RING_SPACE(chan, 49)) { 206 if (RING_SPACE(chan, 49 + (device->info.chipset >= 0x11 ? 4 : 0))) {
207 nouveau_fbcon_gpu_lockup(info); 207 nouveau_fbcon_gpu_lockup(info);
208 return 0; 208 return 0;
209 } 209 }
diff --git a/drivers/gpu/drm/nouveau/nv50_display.c b/drivers/gpu/drm/nouveau/nv50_display.c
index 7da7958556a3..981342d142ff 100644
--- a/drivers/gpu/drm/nouveau/nv50_display.c
+++ b/drivers/gpu/drm/nouveau/nv50_display.c
@@ -979,7 +979,7 @@ nv50_crtc_cursor_show_hide(struct nouveau_crtc *nv_crtc, bool show, bool update)
979{ 979{
980 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev); 980 struct nv50_mast *mast = nv50_mast(nv_crtc->base.dev);
981 981
982 if (show && nv_crtc->cursor.nvbo) 982 if (show && nv_crtc->cursor.nvbo && nv_crtc->base.enabled)
983 nv50_crtc_cursor_show(nv_crtc); 983 nv50_crtc_cursor_show(nv_crtc);
984 else 984 else
985 nv50_crtc_cursor_hide(nv_crtc); 985 nv50_crtc_cursor_hide(nv_crtc);
diff --git a/drivers/gpu/drm/nouveau/nv50_fbcon.c b/drivers/gpu/drm/nouveau/nv50_fbcon.c
index 394c89abcc97..901130b06072 100644
--- a/drivers/gpu/drm/nouveau/nv50_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nv50_fbcon.c
@@ -188,7 +188,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
188 if (ret) 188 if (ret)
189 return ret; 189 return ret;
190 190
191 ret = RING_SPACE(chan, 59); 191 ret = RING_SPACE(chan, 58);
192 if (ret) { 192 if (ret) {
193 nouveau_fbcon_gpu_lockup(info); 193 nouveau_fbcon_gpu_lockup(info);
194 return ret; 194 return ret;
@@ -252,6 +252,7 @@ nv50_fbcon_accel_init(struct fb_info *info)
252 OUT_RING(chan, info->var.yres_virtual); 252 OUT_RING(chan, info->var.yres_virtual);
253 OUT_RING(chan, upper_32_bits(fb->vma.offset)); 253 OUT_RING(chan, upper_32_bits(fb->vma.offset));
254 OUT_RING(chan, lower_32_bits(fb->vma.offset)); 254 OUT_RING(chan, lower_32_bits(fb->vma.offset));
255 FIRE_RING(chan);
255 256
256 return 0; 257 return 0;
257} 258}
diff --git a/drivers/gpu/drm/nouveau/nvc0_fbcon.c b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
index 61246677e8dc..fcd2e5f27bb9 100644
--- a/drivers/gpu/drm/nouveau/nvc0_fbcon.c
+++ b/drivers/gpu/drm/nouveau/nvc0_fbcon.c
@@ -188,7 +188,7 @@ nvc0_fbcon_accel_init(struct fb_info *info)
188 return -EINVAL; 188 return -EINVAL;
189 } 189 }
190 190
191 ret = RING_SPACE(chan, 60); 191 ret = RING_SPACE(chan, 58);
192 if (ret) { 192 if (ret) {
193 WARN_ON(1); 193 WARN_ON(1);
194 nouveau_fbcon_gpu_lockup(info); 194 nouveau_fbcon_gpu_lockup(info);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
index 9ef6728c528d..7f2f05f78cc8 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/gf110.c
@@ -809,7 +809,7 @@ exec_lookup(struct nv50_disp_priv *priv, int head, int or, u32 ctrl,
809 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break; 809 case 0x00000900: type = DCB_OUTPUT_DP; mask = 2; break;
810 default: 810 default:
811 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl); 811 nv_error(priv, "unknown SOR mc 0x%08x\n", ctrl);
812 return 0x0000; 812 return NULL;
813 } 813 }
814 } 814 }
815 815
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
index 5606c25e5d02..ca11ddb6ed46 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.c
@@ -663,6 +663,37 @@ gf100_gr_zbc_init(struct gf100_gr_priv *priv)
663 gf100_gr_zbc_clear_depth(priv, index); 663 gf100_gr_zbc_clear_depth(priv, index);
664} 664}
665 665
666/**
667 * Wait until GR goes idle. GR is considered idle if it is disabled by the
668 * MC (0x200) register, or GR is not busy and a context switch is not in
669 * progress.
670 */
671int
672gf100_gr_wait_idle(struct gf100_gr_priv *priv)
673{
674 unsigned long end_jiffies = jiffies + msecs_to_jiffies(2000);
675 bool gr_enabled, ctxsw_active, gr_busy;
676
677 do {
678 /*
679 * required to make sure FIFO_ENGINE_STATUS (0x2640) is
680 * up-to-date
681 */
682 nv_rd32(priv, 0x400700);
683
684 gr_enabled = nv_rd32(priv, 0x200) & 0x1000;
685 ctxsw_active = nv_rd32(priv, 0x2640) & 0x8000;
686 gr_busy = nv_rd32(priv, 0x40060c) & 0x1;
687
688 if (!gr_enabled || (!gr_busy && !ctxsw_active))
689 return 0;
690 } while (time_before(jiffies, end_jiffies));
691
692 nv_error(priv, "wait for idle timeout (en: %d, ctxsw: %d, busy: %d)\n",
693 gr_enabled, ctxsw_active, gr_busy);
694 return -EAGAIN;
695}
696
666void 697void
667gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p) 698gf100_gr_mmio(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
668{ 699{
@@ -699,7 +730,13 @@ gf100_gr_icmd(struct gf100_gr_priv *priv, const struct gf100_gr_pack *p)
699 730
700 while (addr < next) { 731 while (addr < next) {
701 nv_wr32(priv, 0x400200, addr); 732 nv_wr32(priv, 0x400200, addr);
702 nv_wait(priv, 0x400700, 0x00000002, 0x00000000); 733 /**
734 * Wait for GR to go idle after submitting a
735 * GO_IDLE bundle
736 */
737 if ((addr & 0xffff) == 0xe100)
738 gf100_gr_wait_idle(priv);
739 nv_wait(priv, 0x400700, 0x00000004, 0x00000000);
703 addr += init->pitch; 740 addr += init->pitch;
704 } 741 }
705 } 742 }
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
index 8af1a89eda84..c9533fdac4fc 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/gr/gf100.h
@@ -181,6 +181,7 @@ struct gf100_gr_oclass {
181 int ppc_nr; 181 int ppc_nr;
182}; 182};
183 183
184int gf100_gr_wait_idle(struct gf100_gr_priv *);
184void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *); 185void gf100_gr_mmio(struct gf100_gr_priv *, const struct gf100_gr_pack *);
185void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *); 186void gf100_gr_icmd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
186void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *); 187void gf100_gr_mthd(struct gf100_gr_priv *, const struct gf100_gr_pack *);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
index 2006c445938d..4cf36a3aa814 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/pm/base.c
@@ -332,9 +332,12 @@ static void
332nvkm_perfctx_dtor(struct nvkm_object *object) 332nvkm_perfctx_dtor(struct nvkm_object *object)
333{ 333{
334 struct nvkm_pm *ppm = (void *)object->engine; 334 struct nvkm_pm *ppm = (void *)object->engine;
335 struct nvkm_perfctx *ctx = (void *)object;
336
335 mutex_lock(&nv_subdev(ppm)->mutex); 337 mutex_lock(&nv_subdev(ppm)->mutex);
336 nvkm_engctx_destroy(&ppm->context->base); 338 nvkm_engctx_destroy(&ctx->base);
337 ppm->context = NULL; 339 if (ppm->context == ctx)
340 ppm->context = NULL;
338 mutex_unlock(&nv_subdev(ppm)->mutex); 341 mutex_unlock(&nv_subdev(ppm)->mutex);
339} 342}
340 343
@@ -355,12 +358,11 @@ nvkm_perfctx_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
355 mutex_lock(&nv_subdev(ppm)->mutex); 358 mutex_lock(&nv_subdev(ppm)->mutex);
356 if (ppm->context == NULL) 359 if (ppm->context == NULL)
357 ppm->context = ctx; 360 ppm->context = ctx;
358 mutex_unlock(&nv_subdev(ppm)->mutex);
359
360 if (ctx != ppm->context) 361 if (ctx != ppm->context)
361 return -EBUSY; 362 ret = -EBUSY;
363 mutex_unlock(&nv_subdev(ppm)->mutex);
362 364
363 return 0; 365 return ret;
364} 366}
365 367
366struct nvkm_oclass 368struct nvkm_oclass
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
index f67cdae1e90a..f4611e3f0971 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/init.c
@@ -1285,6 +1285,44 @@ init_zm_reg_sequence(struct nvbios_init *init)
1285} 1285}
1286 1286
1287/** 1287/**
1288 * INIT_PLL_INDIRECT - opcode 0x59
1289 *
1290 */
1291static void
1292init_pll_indirect(struct nvbios_init *init)
1293{
1294 struct nvkm_bios *bios = init->bios;
1295 u32 reg = nv_ro32(bios, init->offset + 1);
1296 u16 addr = nv_ro16(bios, init->offset + 5);
1297 u32 freq = (u32)nv_ro16(bios, addr) * 1000;
1298
1299 trace("PLL_INDIRECT\tR[0x%06x] =PLL= VBIOS[%04x] = %dkHz\n",
1300 reg, addr, freq);
1301 init->offset += 7;
1302
1303 init_prog_pll(init, reg, freq);
1304}
1305
1306/**
1307 * INIT_ZM_REG_INDIRECT - opcode 0x5a
1308 *
1309 */
1310static void
1311init_zm_reg_indirect(struct nvbios_init *init)
1312{
1313 struct nvkm_bios *bios = init->bios;
1314 u32 reg = nv_ro32(bios, init->offset + 1);
1315 u16 addr = nv_ro16(bios, init->offset + 5);
1316 u32 data = nv_ro32(bios, addr);
1317
1318 trace("ZM_REG_INDIRECT\tR[0x%06x] = VBIOS[0x%04x] = 0x%08x\n",
1319 reg, addr, data);
1320 init->offset += 7;
1321
1322 init_wr32(init, addr, data);
1323}
1324
1325/**
1288 * INIT_SUB_DIRECT - opcode 0x5b 1326 * INIT_SUB_DIRECT - opcode 0x5b
1289 * 1327 *
1290 */ 1328 */
@@ -2145,6 +2183,8 @@ static struct nvbios_init_opcode {
2145 [0x56] = { init_condition_time }, 2183 [0x56] = { init_condition_time },
2146 [0x57] = { init_ltime }, 2184 [0x57] = { init_ltime },
2147 [0x58] = { init_zm_reg_sequence }, 2185 [0x58] = { init_zm_reg_sequence },
2186 [0x59] = { init_pll_indirect },
2187 [0x5a] = { init_zm_reg_indirect },
2148 [0x5b] = { init_sub_direct }, 2188 [0x5b] = { init_sub_direct },
2149 [0x5c] = { init_jump }, 2189 [0x5c] = { init_jump },
2150 [0x5e] = { init_i2c_if }, 2190 [0x5e] = { init_i2c_if },
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
index 822d32a28d6e..065e9f5c8db9 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/clk/gt215.c
@@ -180,7 +180,8 @@ gt215_clk_info(struct nvkm_clk *clock, int clk, u32 khz,
180 struct gt215_clk_info *info) 180 struct gt215_clk_info *info)
181{ 181{
182 struct gt215_clk_priv *priv = (void *)clock; 182 struct gt215_clk_priv *priv = (void *)clock;
183 u32 oclk, sclk, sdiv, diff; 183 u32 oclk, sclk, sdiv;
184 s32 diff;
184 185
185 info->clk = 0; 186 info->clk = 0;
186 187
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
index c0fdb89e74ac..24dcdfb58a8d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/ibus/gk20a.c
@@ -38,6 +38,14 @@ gk20a_ibus_init_priv_ring(struct gk20a_ibus_priv *priv)
38 nv_wr32(priv, 0x12004c, 0x4); 38 nv_wr32(priv, 0x12004c, 0x4);
39 nv_wr32(priv, 0x122204, 0x2); 39 nv_wr32(priv, 0x122204, 0x2);
40 nv_rd32(priv, 0x122204); 40 nv_rd32(priv, 0x122204);
41
42 /*
43 * Bug: increase clock timeout to avoid operation failure at high
44 * gpcclk rate.
45 */
46 nv_wr32(priv, 0x122354, 0x800);
47 nv_wr32(priv, 0x128328, 0x800);
48 nv_wr32(priv, 0x124320, 0x800);
41} 49}
42 50
43static void 51static void
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
index 80614f1b2074..282143f49d72 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/instmem/nv04.c
@@ -50,7 +50,12 @@ nv04_instobj_dtor(struct nvkm_object *object)
50{ 50{
51 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object); 51 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(object);
52 struct nv04_instobj_priv *node = (void *)object; 52 struct nv04_instobj_priv *node = (void *)object;
53 struct nvkm_subdev *subdev = (void *)priv;
54
55 mutex_lock(&subdev->mutex);
53 nvkm_mm_free(&priv->heap, &node->mem); 56 nvkm_mm_free(&priv->heap, &node->mem);
57 mutex_unlock(&subdev->mutex);
58
54 nvkm_instobj_destroy(&node->base); 59 nvkm_instobj_destroy(&node->base);
55} 60}
56 61
@@ -62,6 +67,7 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
62 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent); 67 struct nv04_instmem_priv *priv = (void *)nvkm_instmem(parent);
63 struct nv04_instobj_priv *node; 68 struct nv04_instobj_priv *node;
64 struct nvkm_instobj_args *args = data; 69 struct nvkm_instobj_args *args = data;
70 struct nvkm_subdev *subdev = (void *)priv;
65 int ret; 71 int ret;
66 72
67 if (!args->align) 73 if (!args->align)
@@ -72,8 +78,10 @@ nv04_instobj_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
72 if (ret) 78 if (ret)
73 return ret; 79 return ret;
74 80
81 mutex_lock(&subdev->mutex);
75 ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size, 82 ret = nvkm_mm_head(&priv->heap, 0, 1, args->size, args->size,
76 args->align, &node->mem); 83 args->align, &node->mem);
84 mutex_unlock(&subdev->mutex);
77 if (ret) 85 if (ret)
78 return ret; 86 return ret;
79 87
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index dd39f434b4a7..c3872598b85a 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -2299,8 +2299,7 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
2299 encoder_mode = atombios_get_encoder_mode(encoder); 2299 encoder_mode = atombios_get_encoder_mode(encoder);
2300 if (connector && (radeon_audio != 0) && 2300 if (connector && (radeon_audio != 0) &&
2301 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) || 2301 ((encoder_mode == ATOM_ENCODER_MODE_HDMI) ||
2302 (ENCODER_MODE_IS_DP(encoder_mode) && 2302 ENCODER_MODE_IS_DP(encoder_mode)))
2303 drm_detect_monitor_audio(radeon_connector_edid(connector)))))
2304 radeon_audio_mode_set(encoder, adjusted_mode); 2303 radeon_audio_mode_set(encoder, adjusted_mode);
2305} 2304}
2306 2305
diff --git a/drivers/gpu/drm/radeon/dce6_afmt.c b/drivers/gpu/drm/radeon/dce6_afmt.c
index 68fd9fc677e3..44480c1b9738 100644
--- a/drivers/gpu/drm/radeon/dce6_afmt.c
+++ b/drivers/gpu/drm/radeon/dce6_afmt.c
@@ -93,30 +93,26 @@ void dce6_afmt_select_pin(struct drm_encoder *encoder)
93 struct radeon_device *rdev = encoder->dev->dev_private; 93 struct radeon_device *rdev = encoder->dev->dev_private;
94 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 94 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
95 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 95 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
96 u32 offset;
97 96
98 if (!dig || !dig->afmt || !dig->afmt->pin) 97 if (!dig || !dig->afmt || !dig->pin)
99 return; 98 return;
100 99
101 offset = dig->afmt->offset; 100 WREG32(AFMT_AUDIO_SRC_CONTROL + dig->afmt->offset,
102 101 AFMT_AUDIO_SRC_SELECT(dig->pin->id));
103 WREG32(AFMT_AUDIO_SRC_CONTROL + offset,
104 AFMT_AUDIO_SRC_SELECT(dig->afmt->pin->id));
105} 102}
106 103
107void dce6_afmt_write_latency_fields(struct drm_encoder *encoder, 104void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
108 struct drm_connector *connector, struct drm_display_mode *mode) 105 struct drm_connector *connector,
106 struct drm_display_mode *mode)
109{ 107{
110 struct radeon_device *rdev = encoder->dev->dev_private; 108 struct radeon_device *rdev = encoder->dev->dev_private;
111 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 109 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
112 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 110 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
113 u32 tmp = 0, offset; 111 u32 tmp = 0;
114 112
115 if (!dig || !dig->afmt || !dig->afmt->pin) 113 if (!dig || !dig->afmt || !dig->pin)
116 return; 114 return;
117 115
118 offset = dig->afmt->pin->offset;
119
120 if (mode->flags & DRM_MODE_FLAG_INTERLACE) { 116 if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
121 if (connector->latency_present[1]) 117 if (connector->latency_present[1])
122 tmp = VIDEO_LIPSYNC(connector->video_latency[1]) | 118 tmp = VIDEO_LIPSYNC(connector->video_latency[1]) |
@@ -130,24 +126,24 @@ void dce6_afmt_write_latency_fields(struct drm_encoder *encoder,
130 else 126 else
131 tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0); 127 tmp = VIDEO_LIPSYNC(0) | AUDIO_LIPSYNC(0);
132 } 128 }
133 WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp); 129 WREG32_ENDPOINT(dig->pin->offset,
130 AZ_F0_CODEC_PIN_CONTROL_RESPONSE_LIPSYNC, tmp);
134} 131}
135 132
136void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder, 133void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
137 u8 *sadb, int sad_count) 134 u8 *sadb, int sad_count)
138{ 135{
139 struct radeon_device *rdev = encoder->dev->dev_private; 136 struct radeon_device *rdev = encoder->dev->dev_private;
140 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 137 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
141 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 138 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
142 u32 offset, tmp; 139 u32 tmp;
143 140
144 if (!dig || !dig->afmt || !dig->afmt->pin) 141 if (!dig || !dig->afmt || !dig->pin)
145 return; 142 return;
146 143
147 offset = dig->afmt->pin->offset;
148
149 /* program the speaker allocation */ 144 /* program the speaker allocation */
150 tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); 145 tmp = RREG32_ENDPOINT(dig->pin->offset,
146 AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
151 tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK); 147 tmp &= ~(DP_CONNECTION | SPEAKER_ALLOCATION_MASK);
152 /* set HDMI mode */ 148 /* set HDMI mode */
153 tmp |= HDMI_CONNECTION; 149 tmp |= HDMI_CONNECTION;
@@ -155,24 +151,24 @@ void dce6_afmt_hdmi_write_speaker_allocation(struct drm_encoder *encoder,
155 tmp |= SPEAKER_ALLOCATION(sadb[0]); 151 tmp |= SPEAKER_ALLOCATION(sadb[0]);
156 else 152 else
157 tmp |= SPEAKER_ALLOCATION(5); /* stereo */ 153 tmp |= SPEAKER_ALLOCATION(5); /* stereo */
158 WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); 154 WREG32_ENDPOINT(dig->pin->offset,
155 AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
159} 156}
160 157
161void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder, 158void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
162 u8 *sadb, int sad_count) 159 u8 *sadb, int sad_count)
163{ 160{
164 struct radeon_device *rdev = encoder->dev->dev_private; 161 struct radeon_device *rdev = encoder->dev->dev_private;
165 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 162 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
166 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 163 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
167 u32 offset, tmp; 164 u32 tmp;
168 165
169 if (!dig || !dig->afmt || !dig->afmt->pin) 166 if (!dig || !dig->afmt || !dig->pin)
170 return; 167 return;
171 168
172 offset = dig->afmt->pin->offset;
173
174 /* program the speaker allocation */ 169 /* program the speaker allocation */
175 tmp = RREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER); 170 tmp = RREG32_ENDPOINT(dig->pin->offset,
171 AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER);
176 tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK); 172 tmp &= ~(HDMI_CONNECTION | SPEAKER_ALLOCATION_MASK);
177 /* set DP mode */ 173 /* set DP mode */
178 tmp |= DP_CONNECTION; 174 tmp |= DP_CONNECTION;
@@ -180,13 +176,13 @@ void dce6_afmt_dp_write_speaker_allocation(struct drm_encoder *encoder,
180 tmp |= SPEAKER_ALLOCATION(sadb[0]); 176 tmp |= SPEAKER_ALLOCATION(sadb[0]);
181 else 177 else
182 tmp |= SPEAKER_ALLOCATION(5); /* stereo */ 178 tmp |= SPEAKER_ALLOCATION(5); /* stereo */
183 WREG32_ENDPOINT(offset, AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp); 179 WREG32_ENDPOINT(dig->pin->offset,
180 AZ_F0_CODEC_PIN_CONTROL_CHANNEL_SPEAKER, tmp);
184} 181}
185 182
186void dce6_afmt_write_sad_regs(struct drm_encoder *encoder, 183void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
187 struct cea_sad *sads, int sad_count) 184 struct cea_sad *sads, int sad_count)
188{ 185{
189 u32 offset;
190 int i; 186 int i;
191 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 187 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
192 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 188 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
@@ -206,11 +202,9 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
206 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO }, 202 { AZ_F0_CODEC_PIN_CONTROL_AUDIO_DESCRIPTOR13, HDMI_AUDIO_CODING_TYPE_WMA_PRO },
207 }; 203 };
208 204
209 if (!dig || !dig->afmt || !dig->afmt->pin) 205 if (!dig || !dig->afmt || !dig->pin)
210 return; 206 return;
211 207
212 offset = dig->afmt->pin->offset;
213
214 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) { 208 for (i = 0; i < ARRAY_SIZE(eld_reg_to_type); i++) {
215 u32 value = 0; 209 u32 value = 0;
216 u8 stereo_freqs = 0; 210 u8 stereo_freqs = 0;
@@ -237,7 +231,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder,
237 231
238 value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs); 232 value |= SUPPORTED_FREQUENCIES_STEREO(stereo_freqs);
239 233
240 WREG32_ENDPOINT(offset, eld_reg_to_type[i][0], value); 234 WREG32_ENDPOINT(dig->pin->offset, eld_reg_to_type[i][0], value);
241 } 235 }
242} 236}
243 237
@@ -253,7 +247,7 @@ void dce6_audio_enable(struct radeon_device *rdev,
253} 247}
254 248
255void dce6_hdmi_audio_set_dto(struct radeon_device *rdev, 249void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
256 struct radeon_crtc *crtc, unsigned int clock) 250 struct radeon_crtc *crtc, unsigned int clock)
257{ 251{
258 /* Two dtos; generally use dto0 for HDMI */ 252 /* Two dtos; generally use dto0 for HDMI */
259 u32 value = 0; 253 u32 value = 0;
@@ -272,7 +266,7 @@ void dce6_hdmi_audio_set_dto(struct radeon_device *rdev,
272} 266}
273 267
274void dce6_dp_audio_set_dto(struct radeon_device *rdev, 268void dce6_dp_audio_set_dto(struct radeon_device *rdev,
275 struct radeon_crtc *crtc, unsigned int clock) 269 struct radeon_crtc *crtc, unsigned int clock)
276{ 270{
277 /* Two dtos; generally use dto1 for DP */ 271 /* Two dtos; generally use dto1 for DP */
278 u32 value = 0; 272 u32 value = 0;
diff --git a/drivers/gpu/drm/radeon/radeon_audio.c b/drivers/gpu/drm/radeon/radeon_audio.c
index fa719c53449b..fbc8d88d6e5d 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.c
+++ b/drivers/gpu/drm/radeon/radeon_audio.c
@@ -245,6 +245,28 @@ static struct radeon_audio_funcs dce6_dp_funcs = {
245static void radeon_audio_enable(struct radeon_device *rdev, 245static void radeon_audio_enable(struct radeon_device *rdev,
246 struct r600_audio_pin *pin, u8 enable_mask) 246 struct r600_audio_pin *pin, u8 enable_mask)
247{ 247{
248 struct drm_encoder *encoder;
249 struct radeon_encoder *radeon_encoder;
250 struct radeon_encoder_atom_dig *dig;
251 int pin_count = 0;
252
253 if (!pin)
254 return;
255
256 if (rdev->mode_info.mode_config_initialized) {
257 list_for_each_entry(encoder, &rdev->ddev->mode_config.encoder_list, head) {
258 if (radeon_encoder_is_digital(encoder)) {
259 radeon_encoder = to_radeon_encoder(encoder);
260 dig = radeon_encoder->enc_priv;
261 if (dig->pin == pin)
262 pin_count++;
263 }
264 }
265
266 if ((pin_count > 1) && (enable_mask == 0))
267 return;
268 }
269
248 if (rdev->audio.funcs->enable) 270 if (rdev->audio.funcs->enable)
249 rdev->audio.funcs->enable(rdev, pin, enable_mask); 271 rdev->audio.funcs->enable(rdev, pin, enable_mask);
250} 272}
@@ -336,24 +358,13 @@ void radeon_audio_endpoint_wreg(struct radeon_device *rdev, u32 offset,
336 358
337static void radeon_audio_write_sad_regs(struct drm_encoder *encoder) 359static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
338{ 360{
339 struct radeon_encoder *radeon_encoder; 361 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
340 struct drm_connector *connector; 362 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
341 struct radeon_connector *radeon_connector = NULL;
342 struct cea_sad *sads; 363 struct cea_sad *sads;
343 int sad_count; 364 int sad_count;
344 365
345 list_for_each_entry(connector, 366 if (!connector)
346 &encoder->dev->mode_config.connector_list, head) {
347 if (connector->encoder == encoder) {
348 radeon_connector = to_radeon_connector(connector);
349 break;
350 }
351 }
352
353 if (!radeon_connector) {
354 DRM_ERROR("Couldn't find encoder's connector\n");
355 return; 367 return;
356 }
357 368
358 sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads); 369 sad_count = drm_edid_to_sad(radeon_connector_edid(connector), &sads);
359 if (sad_count <= 0) { 370 if (sad_count <= 0) {
@@ -362,8 +373,6 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
362 } 373 }
363 BUG_ON(!sads); 374 BUG_ON(!sads);
364 375
365 radeon_encoder = to_radeon_encoder(encoder);
366
367 if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs) 376 if (radeon_encoder->audio && radeon_encoder->audio->write_sad_regs)
368 radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count); 377 radeon_encoder->audio->write_sad_regs(encoder, sads, sad_count);
369 378
@@ -372,27 +381,16 @@ static void radeon_audio_write_sad_regs(struct drm_encoder *encoder)
372 381
373static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder) 382static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
374{ 383{
384 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
375 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 385 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
376 struct drm_connector *connector;
377 struct radeon_connector *radeon_connector = NULL;
378 u8 *sadb = NULL; 386 u8 *sadb = NULL;
379 int sad_count; 387 int sad_count;
380 388
381 list_for_each_entry(connector, 389 if (!connector)
382 &encoder->dev->mode_config.connector_list, head) {
383 if (connector->encoder == encoder) {
384 radeon_connector = to_radeon_connector(connector);
385 break;
386 }
387 }
388
389 if (!radeon_connector) {
390 DRM_ERROR("Couldn't find encoder's connector\n");
391 return; 390 return;
392 }
393 391
394 sad_count = drm_edid_to_speaker_allocation( 392 sad_count = drm_edid_to_speaker_allocation(radeon_connector_edid(connector),
395 radeon_connector_edid(connector), &sadb); 393 &sadb);
396 if (sad_count < 0) { 394 if (sad_count < 0) {
397 DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n", 395 DRM_DEBUG("Couldn't read Speaker Allocation Data Block: %d\n",
398 sad_count); 396 sad_count);
@@ -406,26 +404,13 @@ static void radeon_audio_write_speaker_allocation(struct drm_encoder *encoder)
406} 404}
407 405
408static void radeon_audio_write_latency_fields(struct drm_encoder *encoder, 406static void radeon_audio_write_latency_fields(struct drm_encoder *encoder,
409 struct drm_display_mode *mode) 407 struct drm_display_mode *mode)
410{ 408{
411 struct radeon_encoder *radeon_encoder; 409 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
412 struct drm_connector *connector; 410 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
413 struct radeon_connector *radeon_connector = 0;
414
415 list_for_each_entry(connector,
416 &encoder->dev->mode_config.connector_list, head) {
417 if (connector->encoder == encoder) {
418 radeon_connector = to_radeon_connector(connector);
419 break;
420 }
421 }
422 411
423 if (!radeon_connector) { 412 if (!connector)
424 DRM_ERROR("Couldn't find encoder's connector\n");
425 return; 413 return;
426 }
427
428 radeon_encoder = to_radeon_encoder(encoder);
429 414
430 if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields) 415 if (radeon_encoder->audio && radeon_encoder->audio->write_latency_fields)
431 radeon_encoder->audio->write_latency_fields(encoder, connector, mode); 416 radeon_encoder->audio->write_latency_fields(encoder, connector, mode);
@@ -451,29 +436,23 @@ static void radeon_audio_select_pin(struct drm_encoder *encoder)
451} 436}
452 437
453void radeon_audio_detect(struct drm_connector *connector, 438void radeon_audio_detect(struct drm_connector *connector,
439 struct drm_encoder *encoder,
454 enum drm_connector_status status) 440 enum drm_connector_status status)
455{ 441{
456 struct radeon_device *rdev; 442 struct drm_device *dev = connector->dev;
457 struct radeon_encoder *radeon_encoder; 443 struct radeon_device *rdev = dev->dev_private;
444 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
458 struct radeon_encoder_atom_dig *dig; 445 struct radeon_encoder_atom_dig *dig;
459 446
460 if (!connector || !connector->encoder) 447 if (!radeon_audio_chipset_supported(rdev))
461 return; 448 return;
462 449
463 rdev = connector->encoder->dev->dev_private; 450 if (!radeon_encoder_is_digital(encoder))
464
465 if (!radeon_audio_chipset_supported(rdev))
466 return; 451 return;
467 452
468 radeon_encoder = to_radeon_encoder(connector->encoder);
469 dig = radeon_encoder->enc_priv; 453 dig = radeon_encoder->enc_priv;
470 454
471 if (status == connector_status_connected) { 455 if (status == connector_status_connected) {
472 if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
473 radeon_encoder->audio = NULL;
474 return;
475 }
476
477 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) { 456 if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
478 struct radeon_connector *radeon_connector = to_radeon_connector(connector); 457 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
479 458
@@ -486,11 +465,17 @@ void radeon_audio_detect(struct drm_connector *connector,
486 radeon_encoder->audio = rdev->audio.hdmi_funcs; 465 radeon_encoder->audio = rdev->audio.hdmi_funcs;
487 } 466 }
488 467
489 dig->afmt->pin = radeon_audio_get_pin(connector->encoder); 468 if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
490 radeon_audio_enable(rdev, dig->afmt->pin, 0xf); 469 if (!dig->pin)
470 dig->pin = radeon_audio_get_pin(encoder);
471 radeon_audio_enable(rdev, dig->pin, 0xf);
472 } else {
473 radeon_audio_enable(rdev, dig->pin, 0);
474 dig->pin = NULL;
475 }
491 } else { 476 } else {
492 radeon_audio_enable(rdev, dig->afmt->pin, 0); 477 radeon_audio_enable(rdev, dig->pin, 0);
493 dig->afmt->pin = NULL; 478 dig->pin = NULL;
494 } 479 }
495} 480}
496 481
@@ -518,29 +503,18 @@ static void radeon_audio_set_dto(struct drm_encoder *encoder, unsigned int clock
518} 503}
519 504
520static int radeon_audio_set_avi_packet(struct drm_encoder *encoder, 505static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
521 struct drm_display_mode *mode) 506 struct drm_display_mode *mode)
522{ 507{
523 struct radeon_device *rdev = encoder->dev->dev_private; 508 struct radeon_device *rdev = encoder->dev->dev_private;
524 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 509 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
525 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 510 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
526 struct drm_connector *connector; 511 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
527 struct radeon_connector *radeon_connector = NULL;
528 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; 512 u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE];
529 struct hdmi_avi_infoframe frame; 513 struct hdmi_avi_infoframe frame;
530 int err; 514 int err;
531 515
532 list_for_each_entry(connector, 516 if (!connector)
533 &encoder->dev->mode_config.connector_list, head) { 517 return -EINVAL;
534 if (connector->encoder == encoder) {
535 radeon_connector = to_radeon_connector(connector);
536 break;
537 }
538 }
539
540 if (!radeon_connector) {
541 DRM_ERROR("Couldn't find encoder's connector\n");
542 return -ENOENT;
543 }
544 518
545 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode); 519 err = drm_hdmi_avi_infoframe_from_display_mode(&frame, mode);
546 if (err < 0) { 520 if (err < 0) {
@@ -563,8 +537,8 @@ static int radeon_audio_set_avi_packet(struct drm_encoder *encoder,
563 return err; 537 return err;
564 } 538 }
565 539
566 if (dig && dig->afmt && 540 if (dig && dig->afmt && radeon_encoder->audio &&
567 radeon_encoder->audio && radeon_encoder->audio->set_avi_packet) 541 radeon_encoder->audio->set_avi_packet)
568 radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset, 542 radeon_encoder->audio->set_avi_packet(rdev, dig->afmt->offset,
569 buffer, sizeof(buffer)); 543 buffer, sizeof(buffer));
570 544
@@ -722,30 +696,41 @@ static void radeon_audio_hdmi_mode_set(struct drm_encoder *encoder,
722{ 696{
723 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 697 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
724 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 698 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
699 struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
725 700
726 if (!dig || !dig->afmt) 701 if (!dig || !dig->afmt)
727 return; 702 return;
728 703
729 radeon_audio_set_mute(encoder, true); 704 if (!connector)
705 return;
730 706
731 radeon_audio_write_speaker_allocation(encoder); 707 if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
732 radeon_audio_write_sad_regs(encoder); 708 radeon_audio_set_mute(encoder, true);
733 radeon_audio_write_latency_fields(encoder, mode);
734 radeon_audio_set_dto(encoder, mode->clock);
735 radeon_audio_set_vbi_packet(encoder);
736 radeon_hdmi_set_color_depth(encoder);
737 radeon_audio_update_acr(encoder, mode->clock);
738 radeon_audio_set_audio_packet(encoder);
739 radeon_audio_select_pin(encoder);
740 709
741 if (radeon_audio_set_avi_packet(encoder, mode) < 0) 710 radeon_audio_write_speaker_allocation(encoder);
742 return; 711 radeon_audio_write_sad_regs(encoder);
712 radeon_audio_write_latency_fields(encoder, mode);
713 radeon_audio_set_dto(encoder, mode->clock);
714 radeon_audio_set_vbi_packet(encoder);
715 radeon_hdmi_set_color_depth(encoder);
716 radeon_audio_update_acr(encoder, mode->clock);
717 radeon_audio_set_audio_packet(encoder);
718 radeon_audio_select_pin(encoder);
719
720 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
721 return;
743 722
744 radeon_audio_set_mute(encoder, false); 723 radeon_audio_set_mute(encoder, false);
724 } else {
725 radeon_hdmi_set_color_depth(encoder);
726
727 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
728 return;
729 }
745} 730}
746 731
747static void radeon_audio_dp_mode_set(struct drm_encoder *encoder, 732static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
748 struct drm_display_mode *mode) 733 struct drm_display_mode *mode)
749{ 734{
750 struct drm_device *dev = encoder->dev; 735 struct drm_device *dev = encoder->dev;
751 struct radeon_device *rdev = dev->dev_private; 736 struct radeon_device *rdev = dev->dev_private;
@@ -759,22 +744,27 @@ static void radeon_audio_dp_mode_set(struct drm_encoder *encoder,
759 if (!dig || !dig->afmt) 744 if (!dig || !dig->afmt)
760 return; 745 return;
761 746
762 radeon_audio_write_speaker_allocation(encoder); 747 if (!connector)
763 radeon_audio_write_sad_regs(encoder);
764 radeon_audio_write_latency_fields(encoder, mode);
765 if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
766 radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
767 else
768 radeon_audio_set_dto(encoder, dig_connector->dp_clock);
769 radeon_audio_set_audio_packet(encoder);
770 radeon_audio_select_pin(encoder);
771
772 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
773 return; 748 return;
749
750 if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
751 radeon_audio_write_speaker_allocation(encoder);
752 radeon_audio_write_sad_regs(encoder);
753 radeon_audio_write_latency_fields(encoder, mode);
754 if (rdev->clock.dp_extclk || ASIC_IS_DCE5(rdev))
755 radeon_audio_set_dto(encoder, rdev->clock.default_dispclk * 10);
756 else
757 radeon_audio_set_dto(encoder, dig_connector->dp_clock);
758 radeon_audio_set_audio_packet(encoder);
759 radeon_audio_select_pin(encoder);
760
761 if (radeon_audio_set_avi_packet(encoder, mode) < 0)
762 return;
763 }
774} 764}
775 765
776void radeon_audio_mode_set(struct drm_encoder *encoder, 766void radeon_audio_mode_set(struct drm_encoder *encoder,
777 struct drm_display_mode *mode) 767 struct drm_display_mode *mode)
778{ 768{
779 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 769 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
780 770
diff --git a/drivers/gpu/drm/radeon/radeon_audio.h b/drivers/gpu/drm/radeon/radeon_audio.h
index 8438304f7139..059cc3012062 100644
--- a/drivers/gpu/drm/radeon/radeon_audio.h
+++ b/drivers/gpu/drm/radeon/radeon_audio.h
@@ -68,7 +68,8 @@ struct radeon_audio_funcs
68 68
69int radeon_audio_init(struct radeon_device *rdev); 69int radeon_audio_init(struct radeon_device *rdev);
70void radeon_audio_detect(struct drm_connector *connector, 70void radeon_audio_detect(struct drm_connector *connector,
71 enum drm_connector_status status); 71 struct drm_encoder *encoder,
72 enum drm_connector_status status);
72u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev, 73u32 radeon_audio_endpoint_rreg(struct radeon_device *rdev,
73 u32 offset, u32 reg); 74 u32 offset, u32 reg);
74void radeon_audio_endpoint_wreg(struct radeon_device *rdev, 75void radeon_audio_endpoint_wreg(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c
index 3e5f6b71f3ad..c097d3a82bda 100644
--- a/drivers/gpu/drm/radeon/radeon_combios.c
+++ b/drivers/gpu/drm/radeon/radeon_combios.c
@@ -1255,10 +1255,15 @@ struct radeon_encoder_lvds *radeon_combios_get_lvds_info(struct radeon_encoder
1255 1255
1256 if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) && 1256 if ((RBIOS16(tmp) == lvds->native_mode.hdisplay) &&
1257 (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) { 1257 (RBIOS16(tmp + 2) == lvds->native_mode.vdisplay)) {
1258 u32 hss = (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8;
1259
1260 if (hss > lvds->native_mode.hdisplay)
1261 hss = (10 - 1) * 8;
1262
1258 lvds->native_mode.htotal = lvds->native_mode.hdisplay + 1263 lvds->native_mode.htotal = lvds->native_mode.hdisplay +
1259 (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8; 1264 (RBIOS16(tmp + 17) - RBIOS16(tmp + 19)) * 8;
1260 lvds->native_mode.hsync_start = lvds->native_mode.hdisplay + 1265 lvds->native_mode.hsync_start = lvds->native_mode.hdisplay +
1261 (RBIOS16(tmp + 21) - RBIOS16(tmp + 19) - 1) * 8; 1266 hss;
1262 lvds->native_mode.hsync_end = lvds->native_mode.hsync_start + 1267 lvds->native_mode.hsync_end = lvds->native_mode.hsync_start +
1263 (RBIOS8(tmp + 23) * 8); 1268 (RBIOS8(tmp + 23) * 8);
1264 1269
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index cebb65e07e1d..94b21ae70ef7 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -1379,8 +1379,16 @@ out:
1379 /* updated in get modes as well since we need to know if it's analog or digital */ 1379 /* updated in get modes as well since we need to know if it's analog or digital */
1380 radeon_connector_update_scratch_regs(connector, ret); 1380 radeon_connector_update_scratch_regs(connector, ret);
1381 1381
1382 if (radeon_audio != 0) 1382 if ((radeon_audio != 0) && radeon_connector->use_digital) {
1383 radeon_audio_detect(connector, ret); 1383 const struct drm_connector_helper_funcs *connector_funcs =
1384 connector->helper_private;
1385
1386 encoder = connector_funcs->best_encoder(connector);
1387 if (encoder && (encoder->encoder_type == DRM_MODE_ENCODER_TMDS)) {
1388 radeon_connector_get_edid(connector);
1389 radeon_audio_detect(connector, encoder, ret);
1390 }
1391 }
1384 1392
1385exit: 1393exit:
1386 pm_runtime_mark_last_busy(connector->dev->dev); 1394 pm_runtime_mark_last_busy(connector->dev->dev);
@@ -1717,8 +1725,10 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
1717 1725
1718 radeon_connector_update_scratch_regs(connector, ret); 1726 radeon_connector_update_scratch_regs(connector, ret);
1719 1727
1720 if (radeon_audio != 0) 1728 if ((radeon_audio != 0) && encoder) {
1721 radeon_audio_detect(connector, ret); 1729 radeon_connector_get_edid(connector);
1730 radeon_audio_detect(connector, encoder, ret);
1731 }
1722 1732
1723out: 1733out:
1724 pm_runtime_mark_last_busy(connector->dev->dev); 1734 pm_runtime_mark_last_busy(connector->dev->dev);
diff --git a/drivers/gpu/drm/radeon/radeon_mode.h b/drivers/gpu/drm/radeon/radeon_mode.h
index 07909d817381..aecc3e3dec0c 100644
--- a/drivers/gpu/drm/radeon/radeon_mode.h
+++ b/drivers/gpu/drm/radeon/radeon_mode.h
@@ -237,7 +237,6 @@ struct radeon_afmt {
237 int offset; 237 int offset;
238 bool last_buffer_filled_status; 238 bool last_buffer_filled_status;
239 int id; 239 int id;
240 struct r600_audio_pin *pin;
241}; 240};
242 241
243struct radeon_mode_info { 242struct radeon_mode_info {
@@ -439,6 +438,7 @@ struct radeon_encoder_atom_dig {
439 uint8_t backlight_level; 438 uint8_t backlight_level;
440 int panel_mode; 439 int panel_mode;
441 struct radeon_afmt *afmt; 440 struct radeon_afmt *afmt;
441 struct r600_audio_pin *pin;
442 int active_mst_links; 442 int active_mst_links;
443}; 443};
444 444
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c
index 882cccdad272..ac6fe40b99f7 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_util.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_util.c
@@ -490,7 +490,8 @@ pgprot_t ttm_io_prot(uint32_t caching_flags, pgprot_t tmp)
490 else if (boot_cpu_data.x86 > 3) 490 else if (boot_cpu_data.x86 > 3)
491 tmp = pgprot_noncached(tmp); 491 tmp = pgprot_noncached(tmp);
492#endif 492#endif
493#if defined(__ia64__) || defined(__arm__) || defined(__powerpc__) 493#if defined(__ia64__) || defined(__arm__) || defined(__aarch64__) || \
494 defined(__powerpc__)
494 if (caching_flags & TTM_PL_FLAG_WC) 495 if (caching_flags & TTM_PL_FLAG_WC)
495 tmp = pgprot_writecombine(tmp); 496 tmp = pgprot_writecombine(tmp);
496 else 497 else
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
index 654c8daeb5ab..97ad3bcb99a7 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c
@@ -2492,7 +2492,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2492 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes, 2492 ret = ttm_eu_reserve_buffers(&ticket, &sw_context->validate_nodes,
2493 true, NULL); 2493 true, NULL);
2494 if (unlikely(ret != 0)) 2494 if (unlikely(ret != 0))
2495 goto out_err; 2495 goto out_err_nores;
2496 2496
2497 ret = vmw_validate_buffers(dev_priv, sw_context); 2497 ret = vmw_validate_buffers(dev_priv, sw_context);
2498 if (unlikely(ret != 0)) 2498 if (unlikely(ret != 0))
@@ -2536,6 +2536,7 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2536 vmw_resource_relocations_free(&sw_context->res_relocations); 2536 vmw_resource_relocations_free(&sw_context->res_relocations);
2537 2537
2538 vmw_fifo_commit(dev_priv, command_size); 2538 vmw_fifo_commit(dev_priv, command_size);
2539 mutex_unlock(&dev_priv->binding_mutex);
2539 2540
2540 vmw_query_bo_switch_commit(dev_priv, sw_context); 2541 vmw_query_bo_switch_commit(dev_priv, sw_context);
2541 ret = vmw_execbuf_fence_commands(file_priv, dev_priv, 2542 ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
@@ -2551,7 +2552,6 @@ int vmw_execbuf_process(struct drm_file *file_priv,
2551 DRM_ERROR("Fence submission error. Syncing.\n"); 2552 DRM_ERROR("Fence submission error. Syncing.\n");
2552 2553
2553 vmw_resource_list_unreserve(&sw_context->resource_list, false); 2554 vmw_resource_list_unreserve(&sw_context->resource_list, false);
2554 mutex_unlock(&dev_priv->binding_mutex);
2555 2555
2556 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes, 2556 ttm_eu_fence_buffer_objects(&ticket, &sw_context->validate_nodes,
2557 (void *) fence); 2557 (void *) fence);
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index f822fd2a1ada..884d82f9190e 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -546,6 +546,12 @@ static const struct hid_device_id apple_devices[] = {
546 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD }, 546 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
547 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS), 547 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
548 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS }, 548 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
549 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
550 .driver_data = APPLE_HAS_FN },
551 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
552 .driver_data = APPLE_HAS_FN | APPLE_ISO_KEYBOARD },
553 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
554 .driver_data = APPLE_HAS_FN | APPLE_RDESC_JIS },
549 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI), 555 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI),
550 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN }, 556 .driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
551 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO), 557 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 157c62775053..e6fce23b121a 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1782,6 +1782,9 @@ static const struct hid_device_id hid_have_special_driver[] = {
1782 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) }, 1782 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
1783 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) }, 1783 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
1784 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) }, 1784 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
1785 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
1786 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
1787 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
1785 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) }, 1788 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ANSI) },
1786 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) }, 1789 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_ISO) },
1787 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) }, 1790 { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2009_JIS) },
@@ -2463,6 +2466,9 @@ static const struct hid_device_id hid_mouse_ignore_list[] = {
2463 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) }, 2466 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI) },
2464 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) }, 2467 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_ISO) },
2465 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) }, 2468 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING8_JIS) },
2469 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI) },
2470 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_ISO) },
2471 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING9_JIS) },
2466 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) }, 2472 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
2467 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) }, 2473 { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
2468 { } 2474 { }
diff --git a/drivers/hid/hid-cp2112.c b/drivers/hid/hid-cp2112.c
index 3318de690e00..a2dbbbe0d8d7 100644
--- a/drivers/hid/hid-cp2112.c
+++ b/drivers/hid/hid-cp2112.c
@@ -356,6 +356,8 @@ static int cp2112_read(struct cp2112_device *dev, u8 *data, size_t size)
356 struct cp2112_force_read_report report; 356 struct cp2112_force_read_report report;
357 int ret; 357 int ret;
358 358
359 if (size > sizeof(dev->read_data))
360 size = sizeof(dev->read_data);
359 report.report = CP2112_DATA_READ_FORCE_SEND; 361 report.report = CP2112_DATA_READ_FORCE_SEND;
360 report.length = cpu_to_be16(size); 362 report.length = cpu_to_be16(size);
361 363
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index b04b0820d816..b3b225b75d0a 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -142,6 +142,9 @@
142#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 142#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
143#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 143#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
144#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 144#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
145#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272
146#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273
147#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274
145#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a 148#define USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY 0x030a
146#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b 149#define USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY 0x030b
147#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240 150#define USB_DEVICE_ID_APPLE_IRCONTROL 0x8240
diff --git a/drivers/hid/hid-input.c b/drivers/hid/hid-input.c
index 3511bbaba505..e3c63640df73 100644
--- a/drivers/hid/hid-input.c
+++ b/drivers/hid/hid-input.c
@@ -462,12 +462,15 @@ out:
462 462
463static void hidinput_cleanup_battery(struct hid_device *dev) 463static void hidinput_cleanup_battery(struct hid_device *dev)
464{ 464{
465 const struct power_supply_desc *psy_desc;
466
465 if (!dev->battery) 467 if (!dev->battery)
466 return; 468 return;
467 469
470 psy_desc = dev->battery->desc;
468 power_supply_unregister(dev->battery); 471 power_supply_unregister(dev->battery);
469 kfree(dev->battery->desc->name); 472 kfree(psy_desc->name);
470 kfree(dev->battery->desc); 473 kfree(psy_desc);
471 dev->battery = NULL; 474 dev->battery = NULL;
472} 475}
473#else /* !CONFIG_HID_BATTERY_STRENGTH */ 476#else /* !CONFIG_HID_BATTERY_STRENGTH */
diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
index 6a9b05b328a9..7c811252c1ce 100644
--- a/drivers/hid/hid-multitouch.c
+++ b/drivers/hid/hid-multitouch.c
@@ -778,9 +778,16 @@ static int mt_input_mapping(struct hid_device *hdev, struct hid_input *hi,
778 /* 778 /*
779 * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN" 779 * some egalax touchscreens have "application == HID_DG_TOUCHSCREEN"
780 * for the stylus. 780 * for the stylus.
781 * The check for mt_report_id ensures we don't process
782 * HID_DG_CONTACTCOUNT from the pen report as it is outside the physical
783 * collection, but within the report ID.
781 */ 784 */
782 if (field->physical == HID_DG_STYLUS) 785 if (field->physical == HID_DG_STYLUS)
783 return 0; 786 return 0;
787 else if ((field->physical == 0) &&
788 (field->report->id != td->mt_report_id) &&
789 (td->mt_report_id != -1))
790 return 0;
784 791
785 if (field->application == HID_DG_TOUCHSCREEN || 792 if (field->application == HID_DG_TOUCHSCREEN ||
786 field->application == HID_DG_TOUCHPAD) 793 field->application == HID_DG_TOUCHPAD)
diff --git a/drivers/hid/hid-uclogic.c b/drivers/hid/hid-uclogic.c
index 94167310e15a..b905d501e752 100644
--- a/drivers/hid/hid-uclogic.c
+++ b/drivers/hid/hid-uclogic.c
@@ -858,7 +858,7 @@ static int uclogic_tablet_enable(struct hid_device *hdev)
858 for (p = drvdata->rdesc; 858 for (p = drvdata->rdesc;
859 p <= drvdata->rdesc + drvdata->rsize - 4;) { 859 p <= drvdata->rdesc + drvdata->rsize - 4;) {
860 if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D && 860 if (p[0] == 0xFE && p[1] == 0xED && p[2] == 0x1D &&
861 p[3] < sizeof(params)) { 861 p[3] < ARRAY_SIZE(params)) {
862 v = params[p[3]]; 862 v = params[p[3]];
863 put_unaligned(cpu_to_le32(v), (s32 *)p); 863 put_unaligned(cpu_to_le32(v), (s32 *)p);
864 p += 4; 864 p += 4;
diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c
index 53e7de7cb9e2..20f9a653444c 100644
--- a/drivers/hid/usbhid/hid-quirks.c
+++ b/drivers/hid/usbhid/hid-quirks.c
@@ -87,6 +87,9 @@ static const struct hid_blacklist {
87 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL }, 87 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C05A, HID_QUIRK_ALWAYS_POLL },
88 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL }, 88 { USB_VENDOR_ID_LOGITECH, USB_DEVICE_ID_LOGITECH_MOUSE_C06A, HID_QUIRK_ALWAYS_POLL },
89 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, 89 { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET },
90 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_SURFACE_PRO_2, HID_QUIRK_NO_INIT_REPORTS },
91 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
92 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TOUCH_COVER_2, HID_QUIRK_NO_INIT_REPORTS },
90 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS }, 93 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3, HID_QUIRK_NO_INIT_REPORTS },
91 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS }, 94 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_TYPE_COVER_3_JP, HID_QUIRK_NO_INIT_REPORTS },
92 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS }, 95 { USB_VENDOR_ID_MICROSOFT, USB_DEVICE_ID_MS_POWER_COVER, HID_QUIRK_NO_INIT_REPORTS },
diff --git a/drivers/hid/wacom_sys.c b/drivers/hid/wacom_sys.c
index 4c0ffca97bef..01b937e63cf3 100644
--- a/drivers/hid/wacom_sys.c
+++ b/drivers/hid/wacom_sys.c
@@ -1271,17 +1271,52 @@ fail_leds:
1271 pad_input_dev = NULL; 1271 pad_input_dev = NULL;
1272 wacom_wac->pad_registered = false; 1272 wacom_wac->pad_registered = false;
1273fail_register_pad_input: 1273fail_register_pad_input:
1274 input_unregister_device(touch_input_dev); 1274 if (touch_input_dev)
1275 input_unregister_device(touch_input_dev);
1275 wacom_wac->touch_input = NULL; 1276 wacom_wac->touch_input = NULL;
1276 wacom_wac->touch_registered = false; 1277 wacom_wac->touch_registered = false;
1277fail_register_touch_input: 1278fail_register_touch_input:
1278 input_unregister_device(pen_input_dev); 1279 if (pen_input_dev)
1280 input_unregister_device(pen_input_dev);
1279 wacom_wac->pen_input = NULL; 1281 wacom_wac->pen_input = NULL;
1280 wacom_wac->pen_registered = false; 1282 wacom_wac->pen_registered = false;
1281fail_register_pen_input: 1283fail_register_pen_input:
1282 return error; 1284 return error;
1283} 1285}
1284 1286
1287/*
1288 * Not all devices report physical dimensions from HID.
1289 * Compute the default from hardcoded logical dimension
1290 * and resolution before driver overwrites them.
1291 */
1292static void wacom_set_default_phy(struct wacom_features *features)
1293{
1294 if (features->x_resolution) {
1295 features->x_phy = (features->x_max * 100) /
1296 features->x_resolution;
1297 features->y_phy = (features->y_max * 100) /
1298 features->y_resolution;
1299 }
1300}
1301
1302static void wacom_calculate_res(struct wacom_features *features)
1303{
1304 /* set unit to "100th of a mm" for devices not reported by HID */
1305 if (!features->unit) {
1306 features->unit = 0x11;
1307 features->unitExpo = -3;
1308 }
1309
1310 features->x_resolution = wacom_calc_hid_res(features->x_max,
1311 features->x_phy,
1312 features->unit,
1313 features->unitExpo);
1314 features->y_resolution = wacom_calc_hid_res(features->y_max,
1315 features->y_phy,
1316 features->unit,
1317 features->unitExpo);
1318}
1319
1285static void wacom_wireless_work(struct work_struct *work) 1320static void wacom_wireless_work(struct work_struct *work)
1286{ 1321{
1287 struct wacom *wacom = container_of(work, struct wacom, work); 1322 struct wacom *wacom = container_of(work, struct wacom, work);
@@ -1339,6 +1374,8 @@ static void wacom_wireless_work(struct work_struct *work)
1339 if (wacom_wac1->features.type != INTUOSHT && 1374 if (wacom_wac1->features.type != INTUOSHT &&
1340 wacom_wac1->features.type != BAMBOO_PT) 1375 wacom_wac1->features.type != BAMBOO_PT)
1341 wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD; 1376 wacom_wac1->features.device_type |= WACOM_DEVICETYPE_PAD;
1377 wacom_set_default_phy(&wacom_wac1->features);
1378 wacom_calculate_res(&wacom_wac1->features);
1342 snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen", 1379 snprintf(wacom_wac1->pen_name, WACOM_NAME_MAX, "%s (WL) Pen",
1343 wacom_wac1->features.name); 1380 wacom_wac1->features.name);
1344 snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad", 1381 snprintf(wacom_wac1->pad_name, WACOM_NAME_MAX, "%s (WL) Pad",
@@ -1357,7 +1394,9 @@ static void wacom_wireless_work(struct work_struct *work)
1357 wacom_wac2->features = 1394 wacom_wac2->features =
1358 *((struct wacom_features *)id->driver_data); 1395 *((struct wacom_features *)id->driver_data);
1359 wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3; 1396 wacom_wac2->features.pktlen = WACOM_PKGLEN_BBTOUCH3;
1397 wacom_set_default_phy(&wacom_wac2->features);
1360 wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096; 1398 wacom_wac2->features.x_max = wacom_wac2->features.y_max = 4096;
1399 wacom_calculate_res(&wacom_wac2->features);
1361 snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX, 1400 snprintf(wacom_wac2->touch_name, WACOM_NAME_MAX,
1362 "%s (WL) Finger",wacom_wac2->features.name); 1401 "%s (WL) Finger",wacom_wac2->features.name);
1363 snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX, 1402 snprintf(wacom_wac2->pad_name, WACOM_NAME_MAX,
@@ -1405,39 +1444,6 @@ void wacom_battery_work(struct work_struct *work)
1405 } 1444 }
1406} 1445}
1407 1446
1408/*
1409 * Not all devices report physical dimensions from HID.
1410 * Compute the default from hardcoded logical dimension
1411 * and resolution before driver overwrites them.
1412 */
1413static void wacom_set_default_phy(struct wacom_features *features)
1414{
1415 if (features->x_resolution) {
1416 features->x_phy = (features->x_max * 100) /
1417 features->x_resolution;
1418 features->y_phy = (features->y_max * 100) /
1419 features->y_resolution;
1420 }
1421}
1422
1423static void wacom_calculate_res(struct wacom_features *features)
1424{
1425 /* set unit to "100th of a mm" for devices not reported by HID */
1426 if (!features->unit) {
1427 features->unit = 0x11;
1428 features->unitExpo = -3;
1429 }
1430
1431 features->x_resolution = wacom_calc_hid_res(features->x_max,
1432 features->x_phy,
1433 features->unit,
1434 features->unitExpo);
1435 features->y_resolution = wacom_calc_hid_res(features->y_max,
1436 features->y_phy,
1437 features->unit,
1438 features->unitExpo);
1439}
1440
1441static size_t wacom_compute_pktlen(struct hid_device *hdev) 1447static size_t wacom_compute_pktlen(struct hid_device *hdev)
1442{ 1448{
1443 struct hid_report_enum *report_enum; 1449 struct hid_report_enum *report_enum;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 232da89f4e88..0d244239e55d 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2213,6 +2213,9 @@ void wacom_setup_device_quirks(struct wacom *wacom)
2213 features->x_max = 4096; 2213 features->x_max = 4096;
2214 features->y_max = 4096; 2214 features->y_max = 4096;
2215 } 2215 }
2216 else if (features->pktlen == WACOM_PKGLEN_BBTOUCH) {
2217 features->device_type |= WACOM_DEVICETYPE_PAD;
2218 }
2216 } 2219 }
2217 2220
2218 /* 2221 /*
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index 37c16afe007a..c8487894b312 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -929,6 +929,21 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
929 929
930MODULE_DEVICE_TABLE(dmi, i8k_dmi_table); 930MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
931 931
932static struct dmi_system_id i8k_blacklist_dmi_table[] __initdata = {
933 {
934 /*
935 * CPU fan speed going up and down on Dell Studio XPS 8100
936 * for unknown reasons.
937 */
938 .ident = "Dell Studio XPS 8100",
939 .matches = {
940 DMI_EXACT_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
941 DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Studio XPS 8100"),
942 },
943 },
944 { }
945};
946
932/* 947/*
933 * Probe for the presence of a supported laptop. 948 * Probe for the presence of a supported laptop.
934 */ 949 */
@@ -940,7 +955,8 @@ static int __init i8k_probe(void)
940 /* 955 /*
941 * Get DMI information 956 * Get DMI information
942 */ 957 */
943 if (!dmi_check_system(i8k_dmi_table)) { 958 if (!dmi_check_system(i8k_dmi_table) ||
959 dmi_check_system(i8k_blacklist_dmi_table)) {
944 if (!ignore_dmi && !force) 960 if (!ignore_dmi && !force)
945 return -ENODEV; 961 return -ENODEV;
946 962
diff --git a/drivers/hwmon/g762.c b/drivers/hwmon/g762.c
index 9b55e673b67c..85d106fe3ce8 100644
--- a/drivers/hwmon/g762.c
+++ b/drivers/hwmon/g762.c
@@ -582,6 +582,7 @@ static const struct of_device_id g762_dt_match[] = {
582 { .compatible = "gmt,g763" }, 582 { .compatible = "gmt,g763" },
583 { }, 583 { },
584}; 584};
585MODULE_DEVICE_TABLE(of, g762_dt_match);
585 586
586/* 587/*
587 * Grab clock (a required property), enable it, get (fixed) clock frequency 588 * Grab clock (a required property), enable it, get (fixed) clock frequency
diff --git a/drivers/hwmon/nct7802.c b/drivers/hwmon/nct7802.c
index 28fcb2e246d5..fbfc02bb2cfa 100644
--- a/drivers/hwmon/nct7802.c
+++ b/drivers/hwmon/nct7802.c
@@ -195,7 +195,7 @@ abort:
195} 195}
196 196
197static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index, 197static int nct7802_write_voltage(struct nct7802_data *data, int nr, int index,
198 unsigned int voltage) 198 unsigned long voltage)
199{ 199{
200 int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr]; 200 int shift = 8 - REG_VOLTAGE_LIMIT_MSB_SHIFT[index - 1][nr];
201 int err; 201 int err;
diff --git a/drivers/hwmon/nct7904.c b/drivers/hwmon/nct7904.c
index b77b82f24480..08ff89d222e5 100644
--- a/drivers/hwmon/nct7904.c
+++ b/drivers/hwmon/nct7904.c
@@ -412,8 +412,9 @@ static ssize_t show_pwm(struct device *dev,
412 return sprintf(buf, "%d\n", val); 412 return sprintf(buf, "%d\n", val);
413} 413}
414 414
415static ssize_t store_mode(struct device *dev, struct device_attribute *devattr, 415static ssize_t store_enable(struct device *dev,
416 const char *buf, size_t count) 416 struct device_attribute *devattr,
417 const char *buf, size_t count)
417{ 418{
418 int index = to_sensor_dev_attr(devattr)->index; 419 int index = to_sensor_dev_attr(devattr)->index;
419 struct nct7904_data *data = dev_get_drvdata(dev); 420 struct nct7904_data *data = dev_get_drvdata(dev);
@@ -422,18 +423,18 @@ static ssize_t store_mode(struct device *dev, struct device_attribute *devattr,
422 423
423 if (kstrtoul(buf, 10, &val) < 0) 424 if (kstrtoul(buf, 10, &val) < 0)
424 return -EINVAL; 425 return -EINVAL;
425 if (val > 1 || (val && !data->fan_mode[index])) 426 if (val < 1 || val > 2 || (val == 2 && !data->fan_mode[index]))
426 return -EINVAL; 427 return -EINVAL;
427 428
428 ret = nct7904_write_reg(data, BANK_3, FANCTL1_FMR_REG + index, 429 ret = nct7904_write_reg(data, BANK_3, FANCTL1_FMR_REG + index,
429 val ? data->fan_mode[index] : 0); 430 val == 2 ? data->fan_mode[index] : 0);
430 431
431 return ret ? ret : count; 432 return ret ? ret : count;
432} 433}
433 434
434/* Return 0 for manual mode or 1 for SmartFan mode */ 435/* Return 1 for manual mode or 2 for SmartFan mode */
435static ssize_t show_mode(struct device *dev, 436static ssize_t show_enable(struct device *dev,
436 struct device_attribute *devattr, char *buf) 437 struct device_attribute *devattr, char *buf)
437{ 438{
438 int index = to_sensor_dev_attr(devattr)->index; 439 int index = to_sensor_dev_attr(devattr)->index;
439 struct nct7904_data *data = dev_get_drvdata(dev); 440 struct nct7904_data *data = dev_get_drvdata(dev);
@@ -443,36 +444,36 @@ static ssize_t show_mode(struct device *dev,
443 if (val < 0) 444 if (val < 0)
444 return val; 445 return val;
445 446
446 return sprintf(buf, "%d\n", val ? 1 : 0); 447 return sprintf(buf, "%d\n", val ? 2 : 1);
447} 448}
448 449
449/* 2 attributes per channel: pwm and mode */ 450/* 2 attributes per channel: pwm and mode */
450static SENSOR_DEVICE_ATTR(fan1_pwm, S_IRUGO | S_IWUSR, 451static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR,
451 show_pwm, store_pwm, 0); 452 show_pwm, store_pwm, 0);
452static SENSOR_DEVICE_ATTR(fan1_mode, S_IRUGO | S_IWUSR, 453static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR,
453 show_mode, store_mode, 0); 454 show_enable, store_enable, 0);
454static SENSOR_DEVICE_ATTR(fan2_pwm, S_IRUGO | S_IWUSR, 455static SENSOR_DEVICE_ATTR(pwm2, S_IRUGO | S_IWUSR,
455 show_pwm, store_pwm, 1); 456 show_pwm, store_pwm, 1);
456static SENSOR_DEVICE_ATTR(fan2_mode, S_IRUGO | S_IWUSR, 457static SENSOR_DEVICE_ATTR(pwm2_enable, S_IRUGO | S_IWUSR,
457 show_mode, store_mode, 1); 458 show_enable, store_enable, 1);
458static SENSOR_DEVICE_ATTR(fan3_pwm, S_IRUGO | S_IWUSR, 459static SENSOR_DEVICE_ATTR(pwm3, S_IRUGO | S_IWUSR,
459 show_pwm, store_pwm, 2); 460 show_pwm, store_pwm, 2);
460static SENSOR_DEVICE_ATTR(fan3_mode, S_IRUGO | S_IWUSR, 461static SENSOR_DEVICE_ATTR(pwm3_enable, S_IRUGO | S_IWUSR,
461 show_mode, store_mode, 2); 462 show_enable, store_enable, 2);
462static SENSOR_DEVICE_ATTR(fan4_pwm, S_IRUGO | S_IWUSR, 463static SENSOR_DEVICE_ATTR(pwm4, S_IRUGO | S_IWUSR,
463 show_pwm, store_pwm, 3); 464 show_pwm, store_pwm, 3);
464static SENSOR_DEVICE_ATTR(fan4_mode, S_IRUGO | S_IWUSR, 465static SENSOR_DEVICE_ATTR(pwm4_enable, S_IRUGO | S_IWUSR,
465 show_mode, store_mode, 3); 466 show_enable, store_enable, 3);
466 467
467static struct attribute *nct7904_fanctl_attrs[] = { 468static struct attribute *nct7904_fanctl_attrs[] = {
468 &sensor_dev_attr_fan1_pwm.dev_attr.attr, 469 &sensor_dev_attr_pwm1.dev_attr.attr,
469 &sensor_dev_attr_fan1_mode.dev_attr.attr, 470 &sensor_dev_attr_pwm1_enable.dev_attr.attr,
470 &sensor_dev_attr_fan2_pwm.dev_attr.attr, 471 &sensor_dev_attr_pwm2.dev_attr.attr,
471 &sensor_dev_attr_fan2_mode.dev_attr.attr, 472 &sensor_dev_attr_pwm2_enable.dev_attr.attr,
472 &sensor_dev_attr_fan3_pwm.dev_attr.attr, 473 &sensor_dev_attr_pwm3.dev_attr.attr,
473 &sensor_dev_attr_fan3_mode.dev_attr.attr, 474 &sensor_dev_attr_pwm3_enable.dev_attr.attr,
474 &sensor_dev_attr_fan4_pwm.dev_attr.attr, 475 &sensor_dev_attr_pwm4.dev_attr.attr,
475 &sensor_dev_attr_fan4_mode.dev_attr.attr, 476 &sensor_dev_attr_pwm4_enable.dev_attr.attr,
476 NULL 477 NULL
477}; 478};
478 479
@@ -574,6 +575,7 @@ static const struct i2c_device_id nct7904_id[] = {
574 {"nct7904", 0}, 575 {"nct7904", 0},
575 {} 576 {}
576}; 577};
578MODULE_DEVICE_TABLE(i2c, nct7904_id);
577 579
578static struct i2c_driver nct7904_driver = { 580static struct i2c_driver nct7904_driver = {
579 .class = I2C_CLASS_HWMON, 581 .class = I2C_CLASS_HWMON,
diff --git a/drivers/i2c/busses/i2c-bfin-twi.c b/drivers/i2c/busses/i2c-bfin-twi.c
index af162b4c7a6d..025686d41640 100644
--- a/drivers/i2c/busses/i2c-bfin-twi.c
+++ b/drivers/i2c/busses/i2c-bfin-twi.c
@@ -692,7 +692,7 @@ static int i2c_bfin_twi_probe(struct platform_device *pdev)
692 692
693 platform_set_drvdata(pdev, iface); 693 platform_set_drvdata(pdev, iface);
694 694
695 dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Contoller, " 695 dev_info(&pdev->dev, "Blackfin BF5xx on-chip I2C TWI Controller, "
696 "regs_base@%p\n", iface->regs_base); 696 "regs_base@%p\n", iface->regs_base);
697 697
698 return 0; 698 return 0;
@@ -735,6 +735,6 @@ subsys_initcall(i2c_bfin_twi_init);
735module_exit(i2c_bfin_twi_exit); 735module_exit(i2c_bfin_twi_exit);
736 736
737MODULE_AUTHOR("Bryan Wu, Sonic Zhang"); 737MODULE_AUTHOR("Bryan Wu, Sonic Zhang");
738MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Contoller Driver"); 738MODULE_DESCRIPTION("Blackfin BF5xx on-chip I2C TWI Controller Driver");
739MODULE_LICENSE("GPL"); 739MODULE_LICENSE("GPL");
740MODULE_ALIAS("platform:i2c-bfin-twi"); 740MODULE_ALIAS("platform:i2c-bfin-twi");
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index d1c22e3fdd14..fc9bf7f30e35 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1247,7 +1247,14 @@ static void omap_i2c_prepare_recovery(struct i2c_adapter *adap)
1247 u32 reg; 1247 u32 reg;
1248 1248
1249 reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG); 1249 reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
1250 /* enable test mode */
1250 reg |= OMAP_I2C_SYSTEST_ST_EN; 1251 reg |= OMAP_I2C_SYSTEST_ST_EN;
1252 /* select SDA/SCL IO mode */
1253 reg |= 3 << OMAP_I2C_SYSTEST_TMODE_SHIFT;
1254 /* set SCL to high-impedance state (reset value is 0) */
1255 reg |= OMAP_I2C_SYSTEST_SCL_O;
1256 /* set SDA to high-impedance state (reset value is 0) */
1257 reg |= OMAP_I2C_SYSTEST_SDA_O;
1251 omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg); 1258 omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg);
1252} 1259}
1253 1260
@@ -1257,7 +1264,11 @@ static void omap_i2c_unprepare_recovery(struct i2c_adapter *adap)
1257 u32 reg; 1264 u32 reg;
1258 1265
1259 reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG); 1266 reg = omap_i2c_read_reg(dev, OMAP_I2C_SYSTEST_REG);
1267 /* restore reset values */
1260 reg &= ~OMAP_I2C_SYSTEST_ST_EN; 1268 reg &= ~OMAP_I2C_SYSTEST_ST_EN;
1269 reg &= ~OMAP_I2C_SYSTEST_TMODE_MASK;
1270 reg &= ~OMAP_I2C_SYSTEST_SCL_O;
1271 reg &= ~OMAP_I2C_SYSTEST_SDA_O;
1261 omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg); 1272 omap_i2c_write_reg(dev, OMAP_I2C_SYSTEST_REG, reg);
1262} 1273}
1263 1274
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index e6d4935161e4..c83e4d13cfc5 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -567,6 +567,9 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
567 if (bri->prepare_recovery) 567 if (bri->prepare_recovery)
568 bri->prepare_recovery(adap); 568 bri->prepare_recovery(adap);
569 569
570 bri->set_scl(adap, val);
571 ndelay(RECOVERY_NDELAY);
572
570 /* 573 /*
571 * By this time SCL is high, as we need to give 9 falling-rising edges 574 * By this time SCL is high, as we need to give 9 falling-rising edges
572 */ 575 */
@@ -597,7 +600,6 @@ static int i2c_generic_recovery(struct i2c_adapter *adap)
597 600
598int i2c_generic_scl_recovery(struct i2c_adapter *adap) 601int i2c_generic_scl_recovery(struct i2c_adapter *adap)
599{ 602{
600 adap->bus_recovery_info->set_scl(adap, 1);
601 return i2c_generic_recovery(adap); 603 return i2c_generic_recovery(adap);
602} 604}
603EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery); 605EXPORT_SYMBOL_GPL(i2c_generic_scl_recovery);
@@ -1338,13 +1340,17 @@ static int of_dev_node_match(struct device *dev, void *data)
1338struct i2c_client *of_find_i2c_device_by_node(struct device_node *node) 1340struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
1339{ 1341{
1340 struct device *dev; 1342 struct device *dev;
1343 struct i2c_client *client;
1341 1344
1342 dev = bus_find_device(&i2c_bus_type, NULL, node, 1345 dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
1343 of_dev_node_match);
1344 if (!dev) 1346 if (!dev)
1345 return NULL; 1347 return NULL;
1346 1348
1347 return i2c_verify_client(dev); 1349 client = i2c_verify_client(dev);
1350 if (!client)
1351 put_device(dev);
1352
1353 return client;
1348} 1354}
1349EXPORT_SYMBOL(of_find_i2c_device_by_node); 1355EXPORT_SYMBOL(of_find_i2c_device_by_node);
1350 1356
@@ -1352,13 +1358,17 @@ EXPORT_SYMBOL(of_find_i2c_device_by_node);
1352struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node) 1358struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
1353{ 1359{
1354 struct device *dev; 1360 struct device *dev;
1361 struct i2c_adapter *adapter;
1355 1362
1356 dev = bus_find_device(&i2c_bus_type, NULL, node, 1363 dev = bus_find_device(&i2c_bus_type, NULL, node, of_dev_node_match);
1357 of_dev_node_match);
1358 if (!dev) 1364 if (!dev)
1359 return NULL; 1365 return NULL;
1360 1366
1361 return i2c_verify_adapter(dev); 1367 adapter = i2c_verify_adapter(dev);
1368 if (!adapter)
1369 put_device(dev);
1370
1371 return adapter;
1362} 1372}
1363EXPORT_SYMBOL(of_find_i2c_adapter_by_node); 1373EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
1364#else 1374#else
diff --git a/drivers/i2c/i2c-slave-eeprom.c b/drivers/i2c/i2c-slave-eeprom.c
index 822374654609..1da449614779 100644
--- a/drivers/i2c/i2c-slave-eeprom.c
+++ b/drivers/i2c/i2c-slave-eeprom.c
@@ -80,9 +80,6 @@ static ssize_t i2c_slave_eeprom_bin_read(struct file *filp, struct kobject *kobj
80 struct eeprom_data *eeprom; 80 struct eeprom_data *eeprom;
81 unsigned long flags; 81 unsigned long flags;
82 82
83 if (off + count > attr->size)
84 return -EFBIG;
85
86 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); 83 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
87 84
88 spin_lock_irqsave(&eeprom->buffer_lock, flags); 85 spin_lock_irqsave(&eeprom->buffer_lock, flags);
@@ -98,9 +95,6 @@ static ssize_t i2c_slave_eeprom_bin_write(struct file *filp, struct kobject *kob
98 struct eeprom_data *eeprom; 95 struct eeprom_data *eeprom;
99 unsigned long flags; 96 unsigned long flags;
100 97
101 if (off + count > attr->size)
102 return -EFBIG;
103
104 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj)); 98 eeprom = dev_get_drvdata(container_of(kobj, struct device, kobj));
105 99
106 spin_lock_irqsave(&eeprom->buffer_lock, flags); 100 spin_lock_irqsave(&eeprom->buffer_lock, flags);
diff --git a/drivers/iio/accel/mma8452.c b/drivers/iio/accel/mma8452.c
index e8e2077c7244..13ea1ea23328 100644
--- a/drivers/iio/accel/mma8452.c
+++ b/drivers/iio/accel/mma8452.c
@@ -557,21 +557,21 @@ static void mma8452_transient_interrupt(struct iio_dev *indio_dev)
557 if (src & MMA8452_TRANSIENT_SRC_XTRANSE) 557 if (src & MMA8452_TRANSIENT_SRC_XTRANSE)
558 iio_push_event(indio_dev, 558 iio_push_event(indio_dev,
559 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X, 559 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_X,
560 IIO_EV_TYPE_THRESH, 560 IIO_EV_TYPE_MAG,
561 IIO_EV_DIR_RISING), 561 IIO_EV_DIR_RISING),
562 ts); 562 ts);
563 563
564 if (src & MMA8452_TRANSIENT_SRC_YTRANSE) 564 if (src & MMA8452_TRANSIENT_SRC_YTRANSE)
565 iio_push_event(indio_dev, 565 iio_push_event(indio_dev,
566 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y, 566 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Y,
567 IIO_EV_TYPE_THRESH, 567 IIO_EV_TYPE_MAG,
568 IIO_EV_DIR_RISING), 568 IIO_EV_DIR_RISING),
569 ts); 569 ts);
570 570
571 if (src & MMA8452_TRANSIENT_SRC_ZTRANSE) 571 if (src & MMA8452_TRANSIENT_SRC_ZTRANSE)
572 iio_push_event(indio_dev, 572 iio_push_event(indio_dev,
573 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z, 573 IIO_MOD_EVENT_CODE(IIO_ACCEL, 0, IIO_MOD_Z,
574 IIO_EV_TYPE_THRESH, 574 IIO_EV_TYPE_MAG,
575 IIO_EV_DIR_RISING), 575 IIO_EV_DIR_RISING),
576 ts); 576 ts);
577} 577}
@@ -644,7 +644,7 @@ static int mma8452_reg_access_dbg(struct iio_dev *indio_dev,
644 644
645static const struct iio_event_spec mma8452_transient_event[] = { 645static const struct iio_event_spec mma8452_transient_event[] = {
646 { 646 {
647 .type = IIO_EV_TYPE_THRESH, 647 .type = IIO_EV_TYPE_MAG,
648 .dir = IIO_EV_DIR_RISING, 648 .dir = IIO_EV_DIR_RISING,
649 .mask_separate = BIT(IIO_EV_INFO_ENABLE), 649 .mask_separate = BIT(IIO_EV_INFO_ENABLE),
650 .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) | 650 .mask_shared_by_type = BIT(IIO_EV_INFO_VALUE) |
diff --git a/drivers/iio/adc/mcp320x.c b/drivers/iio/adc/mcp320x.c
index 8d9c9b9215dd..d819823f7257 100644
--- a/drivers/iio/adc/mcp320x.c
+++ b/drivers/iio/adc/mcp320x.c
@@ -299,6 +299,8 @@ static int mcp320x_probe(struct spi_device *spi)
299 indio_dev->channels = chip_info->channels; 299 indio_dev->channels = chip_info->channels;
300 indio_dev->num_channels = chip_info->num_channels; 300 indio_dev->num_channels = chip_info->num_channels;
301 301
302 adc->chip_info = chip_info;
303
302 adc->transfer[0].tx_buf = &adc->tx_buf; 304 adc->transfer[0].tx_buf = &adc->tx_buf;
303 adc->transfer[0].len = sizeof(adc->tx_buf); 305 adc->transfer[0].len = sizeof(adc->tx_buf);
304 adc->transfer[1].rx_buf = adc->rx_buf; 306 adc->transfer[1].rx_buf = adc->rx_buf;
diff --git a/drivers/iio/adc/vf610_adc.c b/drivers/iio/adc/vf610_adc.c
index 480f335a0f9f..819632bf1fda 100644
--- a/drivers/iio/adc/vf610_adc.c
+++ b/drivers/iio/adc/vf610_adc.c
@@ -635,7 +635,7 @@ static int vf610_adc_reg_access(struct iio_dev *indio_dev,
635 struct vf610_adc *info = iio_priv(indio_dev); 635 struct vf610_adc *info = iio_priv(indio_dev);
636 636
637 if ((readval == NULL) || 637 if ((readval == NULL) ||
638 (!(reg % 4) || (reg > VF610_REG_ADC_PCTL))) 638 ((reg % 4) || (reg > VF610_REG_ADC_PCTL)))
639 return -EINVAL; 639 return -EINVAL;
640 640
641 *readval = readl(info->regs + reg); 641 *readval = readl(info->regs + reg);
diff --git a/drivers/iio/light/stk3310.c b/drivers/iio/light/stk3310.c
index c1a218236be5..11a027adc204 100644
--- a/drivers/iio/light/stk3310.c
+++ b/drivers/iio/light/stk3310.c
@@ -200,7 +200,7 @@ static int stk3310_read_event(struct iio_dev *indio_dev,
200 int *val, int *val2) 200 int *val, int *val2)
201{ 201{
202 u8 reg; 202 u8 reg;
203 u16 buf; 203 __be16 buf;
204 int ret; 204 int ret;
205 struct stk3310_data *data = iio_priv(indio_dev); 205 struct stk3310_data *data = iio_priv(indio_dev);
206 206
@@ -222,7 +222,7 @@ static int stk3310_read_event(struct iio_dev *indio_dev,
222 dev_err(&data->client->dev, "register read failed\n"); 222 dev_err(&data->client->dev, "register read failed\n");
223 return ret; 223 return ret;
224 } 224 }
225 *val = swab16(buf); 225 *val = be16_to_cpu(buf);
226 226
227 return IIO_VAL_INT; 227 return IIO_VAL_INT;
228} 228}
@@ -235,7 +235,7 @@ static int stk3310_write_event(struct iio_dev *indio_dev,
235 int val, int val2) 235 int val, int val2)
236{ 236{
237 u8 reg; 237 u8 reg;
238 u16 buf; 238 __be16 buf;
239 int ret; 239 int ret;
240 unsigned int index; 240 unsigned int index;
241 struct stk3310_data *data = iio_priv(indio_dev); 241 struct stk3310_data *data = iio_priv(indio_dev);
@@ -252,7 +252,7 @@ static int stk3310_write_event(struct iio_dev *indio_dev,
252 else 252 else
253 return -EINVAL; 253 return -EINVAL;
254 254
255 buf = swab16(val); 255 buf = cpu_to_be16(val);
256 ret = regmap_bulk_write(data->regmap, reg, &buf, 2); 256 ret = regmap_bulk_write(data->regmap, reg, &buf, 2);
257 if (ret < 0) 257 if (ret < 0)
258 dev_err(&client->dev, "failed to set PS threshold!\n"); 258 dev_err(&client->dev, "failed to set PS threshold!\n");
@@ -301,7 +301,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev,
301 int *val, int *val2, long mask) 301 int *val, int *val2, long mask)
302{ 302{
303 u8 reg; 303 u8 reg;
304 u16 buf; 304 __be16 buf;
305 int ret; 305 int ret;
306 unsigned int index; 306 unsigned int index;
307 struct stk3310_data *data = iio_priv(indio_dev); 307 struct stk3310_data *data = iio_priv(indio_dev);
@@ -322,7 +322,7 @@ static int stk3310_read_raw(struct iio_dev *indio_dev,
322 mutex_unlock(&data->lock); 322 mutex_unlock(&data->lock);
323 return ret; 323 return ret;
324 } 324 }
325 *val = swab16(buf); 325 *val = be16_to_cpu(buf);
326 mutex_unlock(&data->lock); 326 mutex_unlock(&data->lock);
327 return IIO_VAL_INT; 327 return IIO_VAL_INT;
328 case IIO_CHAN_INFO_INT_TIME: 328 case IIO_CHAN_INFO_INT_TIME:
@@ -608,13 +608,7 @@ static int stk3310_probe(struct i2c_client *client,
608 if (ret < 0) 608 if (ret < 0)
609 return ret; 609 return ret;
610 610
611 ret = iio_device_register(indio_dev); 611 if (client->irq < 0)
612 if (ret < 0) {
613 dev_err(&client->dev, "device_register failed\n");
614 stk3310_set_state(data, STK3310_STATE_STANDBY);
615 }
616
617 if (client->irq <= 0)
618 client->irq = stk3310_gpio_probe(client); 612 client->irq = stk3310_gpio_probe(client);
619 613
620 if (client->irq >= 0) { 614 if (client->irq >= 0) {
@@ -629,6 +623,12 @@ static int stk3310_probe(struct i2c_client *client,
629 client->irq); 623 client->irq);
630 } 624 }
631 625
626 ret = iio_device_register(indio_dev);
627 if (ret < 0) {
628 dev_err(&client->dev, "device_register failed\n");
629 stk3310_set_state(data, STK3310_STATE_STANDBY);
630 }
631
632 return ret; 632 return ret;
633} 633}
634 634
diff --git a/drivers/iio/magnetometer/Kconfig b/drivers/iio/magnetometer/Kconfig
index dcadfc4f0661..efb9350b0d76 100644
--- a/drivers/iio/magnetometer/Kconfig
+++ b/drivers/iio/magnetometer/Kconfig
@@ -90,6 +90,7 @@ config IIO_ST_MAGN_SPI_3AXIS
90config BMC150_MAGN 90config BMC150_MAGN
91 tristate "Bosch BMC150 Magnetometer Driver" 91 tristate "Bosch BMC150 Magnetometer Driver"
92 depends on I2C 92 depends on I2C
93 select REGMAP_I2C
93 select IIO_BUFFER 94 select IIO_BUFFER
94 select IIO_TRIGGERED_BUFFER 95 select IIO_TRIGGERED_BUFFER
95 help 96 help
diff --git a/drivers/iio/magnetometer/bmc150_magn.c b/drivers/iio/magnetometer/bmc150_magn.c
index d4c178869991..1347a1f2e46f 100644
--- a/drivers/iio/magnetometer/bmc150_magn.c
+++ b/drivers/iio/magnetometer/bmc150_magn.c
@@ -706,11 +706,11 @@ static int bmc150_magn_init(struct bmc150_magn_data *data)
706 goto err_poweroff; 706 goto err_poweroff;
707 } 707 }
708 if (chip_id != BMC150_MAGN_CHIP_ID_VAL) { 708 if (chip_id != BMC150_MAGN_CHIP_ID_VAL) {
709 dev_err(&data->client->dev, "Invalid chip id 0x%x\n", ret); 709 dev_err(&data->client->dev, "Invalid chip id 0x%x\n", chip_id);
710 ret = -ENODEV; 710 ret = -ENODEV;
711 goto err_poweroff; 711 goto err_poweroff;
712 } 712 }
713 dev_dbg(&data->client->dev, "Chip id %x\n", ret); 713 dev_dbg(&data->client->dev, "Chip id %x\n", chip_id);
714 714
715 preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET]; 715 preset = bmc150_magn_presets_table[BMC150_MAGN_DEFAULT_PRESET];
716 ret = bmc150_magn_set_odr(data, preset.odr); 716 ret = bmc150_magn_set_odr(data, preset.odr);
diff --git a/drivers/iio/magnetometer/mmc35240.c b/drivers/iio/magnetometer/mmc35240.c
index d927397a6ef7..706ebfd6297f 100644
--- a/drivers/iio/magnetometer/mmc35240.c
+++ b/drivers/iio/magnetometer/mmc35240.c
@@ -202,8 +202,8 @@ static int mmc35240_hw_set(struct mmc35240_data *data, bool set)
202 coil_bit = MMC35240_CTRL0_RESET_BIT; 202 coil_bit = MMC35240_CTRL0_RESET_BIT;
203 203
204 return regmap_update_bits(data->regmap, MMC35240_REG_CTRL0, 204 return regmap_update_bits(data->regmap, MMC35240_REG_CTRL0,
205 MMC35240_CTRL0_REFILL_BIT, 205 coil_bit, coil_bit);
206 coil_bit); 206
207} 207}
208 208
209static int mmc35240_init(struct mmc35240_data *data) 209static int mmc35240_init(struct mmc35240_data *data)
@@ -222,14 +222,15 @@ static int mmc35240_init(struct mmc35240_data *data)
222 222
223 /* 223 /*
224 * make sure we restore sensor characteristics, by doing 224 * make sure we restore sensor characteristics, by doing
225 * a RESET/SET sequence 225 * a SET/RESET sequence, the axis polarity being naturally
226 * aligned after RESET
226 */ 227 */
227 ret = mmc35240_hw_set(data, false); 228 ret = mmc35240_hw_set(data, true);
228 if (ret < 0) 229 if (ret < 0)
229 return ret; 230 return ret;
230 usleep_range(MMC53240_WAIT_SET_RESET, MMC53240_WAIT_SET_RESET + 1); 231 usleep_range(MMC53240_WAIT_SET_RESET, MMC53240_WAIT_SET_RESET + 1);
231 232
232 ret = mmc35240_hw_set(data, true); 233 ret = mmc35240_hw_set(data, false);
233 if (ret < 0) 234 if (ret < 0)
234 return ret; 235 return ret;
235 236
@@ -503,6 +504,7 @@ static int mmc35240_probe(struct i2c_client *client,
503 } 504 }
504 505
505 data = iio_priv(indio_dev); 506 data = iio_priv(indio_dev);
507 i2c_set_clientdata(client, indio_dev);
506 data->client = client; 508 data->client = client;
507 data->regmap = regmap; 509 data->regmap = regmap;
508 data->res = MMC35240_16_BITS_SLOW; 510 data->res = MMC35240_16_BITS_SLOW;
diff --git a/drivers/iio/temperature/mlx90614.c b/drivers/iio/temperature/mlx90614.c
index cb2e8ad8bfdc..7a2b639eaa96 100644
--- a/drivers/iio/temperature/mlx90614.c
+++ b/drivers/iio/temperature/mlx90614.c
@@ -204,7 +204,7 @@ static int mlx90614_read_raw(struct iio_dev *indio_dev,
204 *val = ret; 204 *val = ret;
205 return IIO_VAL_INT; 205 return IIO_VAL_INT;
206 case IIO_CHAN_INFO_OFFSET: 206 case IIO_CHAN_INFO_OFFSET:
207 *val = 13657; 207 *val = -13657;
208 *val2 = 500000; 208 *val2 = 500000;
209 return IIO_VAL_INT_PLUS_MICRO; 209 return IIO_VAL_INT_PLUS_MICRO;
210 case IIO_CHAN_INFO_SCALE: 210 case IIO_CHAN_INFO_SCALE:
diff --git a/drivers/infiniband/hw/cxgb3/iwch_provider.c b/drivers/infiniband/hw/cxgb3/iwch_provider.c
index b1b73232f217..bbbe0184e592 100644
--- a/drivers/infiniband/hw/cxgb3/iwch_provider.c
+++ b/drivers/infiniband/hw/cxgb3/iwch_provider.c
@@ -736,6 +736,10 @@ static struct ib_mr *iwch_get_dma_mr(struct ib_pd *pd, int acc)
736 /* 736 /*
737 * T3 only supports 32 bits of size. 737 * T3 only supports 32 bits of size.
738 */ 738 */
739 if (sizeof(phys_addr_t) > 4) {
740 pr_warn_once(MOD "Cannot support dma_mrs on this platform.\n");
741 return ERR_PTR(-ENOTSUPP);
742 }
739 bl.size = 0xffffffff; 743 bl.size = 0xffffffff;
740 bl.addr = 0; 744 bl.addr = 0;
741 kva = 0; 745 kva = 0;
diff --git a/drivers/infiniband/hw/ipath/ipath_driver.c b/drivers/infiniband/hw/ipath/ipath_driver.c
index 2d7e503d13cb..871dbe56216a 100644
--- a/drivers/infiniband/hw/ipath/ipath_driver.c
+++ b/drivers/infiniband/hw/ipath/ipath_driver.c
@@ -31,6 +31,8 @@
31 * SOFTWARE. 31 * SOFTWARE.
32 */ 32 */
33 33
34#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
35
34#include <linux/sched.h> 36#include <linux/sched.h>
35#include <linux/spinlock.h> 37#include <linux/spinlock.h>
36#include <linux/idr.h> 38#include <linux/idr.h>
@@ -399,8 +401,8 @@ static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
399 u32 bar0 = 0, bar1 = 0; 401 u32 bar0 = 0, bar1 = 0;
400 402
401#ifdef CONFIG_X86_64 403#ifdef CONFIG_X86_64
402 if (WARN(pat_enabled(), 404 if (pat_enabled()) {
403 "ipath needs PAT disabled, boot with nopat kernel parameter\n")) { 405 pr_warn("ipath needs PAT disabled, boot with nopat kernel parameter\n");
404 ret = -ENODEV; 406 ret = -ENODEV;
405 goto bail; 407 goto bail;
406 } 408 }
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index b396344fae16..6a36338593cd 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_H__ 43#ifndef __OCRDMA_H__
29#define __OCRDMA_H__ 44#define __OCRDMA_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
index 1554cca5712a..430b1350fe96 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_ABI_H__ 43#ifndef __OCRDMA_ABI_H__
29#define __OCRDMA_ABI_H__ 44#define __OCRDMA_ABI_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
index 29b27675dd70..44766fee1f4e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <net/neighbour.h> 43#include <net/neighbour.h>
29#include <net/netevent.h> 44#include <net/netevent.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
index cf366fe03cb8..04a30ae67473 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_ah.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_AH_H__ 43#ifndef __OCRDMA_AH_H__
29#define __OCRDMA_AH_H__ 44#define __OCRDMA_AH_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 47615ff33bc6..aab391a15db4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) CNA Adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <linux/sched.h> 43#include <linux/sched.h>
29#include <linux/interrupt.h> 44#include <linux/interrupt.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
index e905972fceb7..7ed885c1851e 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) CNA Adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_HW_H__ 43#ifndef __OCRDMA_HW_H__
29#define __OCRDMA_HW_H__ 44#define __OCRDMA_HW_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index d98a707a5eb9..b119a3413a15 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <linux/module.h> 43#include <linux/module.h>
29#include <linux/idr.h> 44#include <linux/idr.h>
@@ -46,7 +61,7 @@
46MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION); 61MODULE_VERSION(OCRDMA_ROCE_DRV_VERSION);
47MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION); 62MODULE_DESCRIPTION(OCRDMA_ROCE_DRV_DESC " " OCRDMA_ROCE_DRV_VERSION);
48MODULE_AUTHOR("Emulex Corporation"); 63MODULE_AUTHOR("Emulex Corporation");
49MODULE_LICENSE("GPL"); 64MODULE_LICENSE("Dual BSD/GPL");
50 65
51static LIST_HEAD(ocrdma_dev_list); 66static LIST_HEAD(ocrdma_dev_list);
52static DEFINE_SPINLOCK(ocrdma_devlist_lock); 67static DEFINE_SPINLOCK(ocrdma_devlist_lock);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 02ad0aee99af..80006b24aa11 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_SLI_H__ 43#ifndef __OCRDMA_SLI_H__
29#define __OCRDMA_SLI_H__ 44#define __OCRDMA_SLI_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 48d7ef51aa0c..69334e214571 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2014 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <rdma/ib_addr.h> 43#include <rdma/ib_addr.h>
29#include <rdma/ib_pma.h> 44#include <rdma/ib_pma.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
index 091edd68a8a3..c9e58d04c7b8 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2014 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_STATS_H__ 43#ifndef __OCRDMA_STATS_H__
29#define __OCRDMA_STATS_H__ 44#define __OCRDMA_STATS_H__
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index 5bb61eb58f2c..bc84cd462ecf 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#include <linux/dma-mapping.h> 43#include <linux/dma-mapping.h>
29#include <rdma/ib_verbs.h> 44#include <rdma/ib_verbs.h>
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index b15c608efa7b..eaccb2d3cb9f 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -1,21 +1,36 @@
1/******************************************************************* 1/* This file is part of the Emulex RoCE Device Driver for
2 * This file is part of the Emulex RoCE Device Driver for * 2 * RoCE (RDMA over Converged Ethernet) adapters.
3 * RoCE (RDMA over Converged Ethernet) adapters. * 3 * Copyright (C) 2012-2015 Emulex. All rights reserved.
4 * Copyright (C) 2008-2012 Emulex. All rights reserved. * 4 * EMULEX and SLI are trademarks of Emulex.
5 * EMULEX and SLI are trademarks of Emulex. * 5 * www.emulex.com
6 * www.emulex.com * 6 *
7 * * 7 * This software is available to you under a choice of one of two licenses.
8 * This program is free software; you can redistribute it and/or * 8 * You may choose to be licensed under the terms of the GNU General Public
9 * modify it under the terms of version 2 of the GNU General * 9 * License (GPL) Version 2, available from the file COPYING in the main
10 * Public License as published by the Free Software Foundation. * 10 * directory of this source tree, or the BSD license below:
11 * This program is distributed in the hope that it will be useful. * 11 *
12 * ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND * 12 * Redistribution and use in source and binary forms, with or without
13 * WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, * 13 * modification, are permitted provided that the following conditions
14 * FITNESS FOR A PARTICULAR PURPOSE, OR NON-INFRINGEMENT, ARE * 14 * are met:
15 * DISCLAIMED, EXCEPT TO THE EXTENT THAT SUCH DISCLAIMERS ARE HELD * 15 *
16 * TO BE LEGALLY INVALID. See the GNU General Public License for * 16 * - Redistributions of source code must retain the above copyright notice,
17 * more details, a copy of which can be found in the file COPYING * 17 * this list of conditions and the following disclaimer.
18 * included with this package. * 18 *
19 * - Redistributions in binary form must reproduce the above copyright
20 * notice, this list of conditions and the following disclaimer in
21 * the documentation and/or other materials provided with the distribution.
22 *
23 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,THE
25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
29 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
30 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
31 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
32 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
33 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
19 * 34 *
20 * Contact Information: 35 * Contact Information:
21 * linux-drivers@emulex.com 36 * linux-drivers@emulex.com
@@ -23,7 +38,7 @@
23 * Emulex 38 * Emulex
24 * 3333 Susan Street 39 * 3333 Susan Street
25 * Costa Mesa, CA 92626 40 * Costa Mesa, CA 92626
26 *******************************************************************/ 41 */
27 42
28#ifndef __OCRDMA_VERBS_H__ 43#ifndef __OCRDMA_VERBS_H__
29#define __OCRDMA_VERBS_H__ 44#define __OCRDMA_VERBS_H__
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
index 9e6ee82a8fd7..851c8219d501 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_verbs.c
@@ -177,7 +177,8 @@ int ipoib_transport_dev_init(struct net_device *dev, struct ib_device *ca)
177 else 177 else
178 size += ipoib_recvq_size * ipoib_max_conn_qp; 178 size += ipoib_recvq_size * ipoib_max_conn_qp;
179 } else 179 } else
180 goto out_free_wq; 180 if (ret != -ENOSYS)
181 goto out_free_wq;
181 182
182 cq_attr.cqe = size; 183 cq_attr.cqe = size;
183 priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL, 184 priv->recv_cq = ib_create_cq(priv->ca, ipoib_ib_completion, NULL,
diff --git a/drivers/infiniband/ulp/isert/ib_isert.c b/drivers/infiniband/ulp/isert/ib_isert.c
index 771700963127..d851e1828d6f 100644
--- a/drivers/infiniband/ulp/isert/ib_isert.c
+++ b/drivers/infiniband/ulp/isert/ib_isert.c
@@ -775,6 +775,17 @@ isert_connect_request(struct rdma_cm_id *cma_id, struct rdma_cm_event *event)
775 ret = isert_rdma_post_recvl(isert_conn); 775 ret = isert_rdma_post_recvl(isert_conn);
776 if (ret) 776 if (ret)
777 goto out_conn_dev; 777 goto out_conn_dev;
778 /*
779 * Obtain the second reference now before isert_rdma_accept() to
780 * ensure that any initiator generated REJECT CM event that occurs
781 * asynchronously won't drop the last reference until the error path
782 * in iscsi_target_login_sess_out() does it's ->iscsit_free_conn() ->
783 * isert_free_conn() -> isert_put_conn() -> kref_put().
784 */
785 if (!kref_get_unless_zero(&isert_conn->kref)) {
786 isert_warn("conn %p connect_release is running\n", isert_conn);
787 goto out_conn_dev;
788 }
778 789
779 ret = isert_rdma_accept(isert_conn); 790 ret = isert_rdma_accept(isert_conn);
780 if (ret) 791 if (ret)
@@ -836,11 +847,6 @@ isert_connected_handler(struct rdma_cm_id *cma_id)
836 847
837 isert_info("conn %p\n", isert_conn); 848 isert_info("conn %p\n", isert_conn);
838 849
839 if (!kref_get_unless_zero(&isert_conn->kref)) {
840 isert_warn("conn %p connect_release is running\n", isert_conn);
841 return;
842 }
843
844 mutex_lock(&isert_conn->mutex); 850 mutex_lock(&isert_conn->mutex);
845 if (isert_conn->state != ISER_CONN_FULL_FEATURE) 851 if (isert_conn->state != ISER_CONN_FULL_FEATURE)
846 isert_conn->state = ISER_CONN_UP; 852 isert_conn->state = ISER_CONN_UP;
diff --git a/drivers/input/input-leds.c b/drivers/input/input-leds.c
index 074a65ed17bb..766bf2660116 100644
--- a/drivers/input/input-leds.c
+++ b/drivers/input/input-leds.c
@@ -71,6 +71,18 @@ static void input_leds_event(struct input_handle *handle, unsigned int type,
71{ 71{
72} 72}
73 73
74static int input_leds_get_count(struct input_dev *dev)
75{
76 unsigned int led_code;
77 int count = 0;
78
79 for_each_set_bit(led_code, dev->ledbit, LED_CNT)
80 if (input_led_info[led_code].name)
81 count++;
82
83 return count;
84}
85
74static int input_leds_connect(struct input_handler *handler, 86static int input_leds_connect(struct input_handler *handler,
75 struct input_dev *dev, 87 struct input_dev *dev,
76 const struct input_device_id *id) 88 const struct input_device_id *id)
@@ -81,7 +93,7 @@ static int input_leds_connect(struct input_handler *handler,
81 int led_no; 93 int led_no;
82 int error; 94 int error;
83 95
84 num_leds = bitmap_weight(dev->ledbit, LED_CNT); 96 num_leds = input_leds_get_count(dev);
85 if (!num_leds) 97 if (!num_leds)
86 return -ENXIO; 98 return -ENXIO;
87 99
@@ -112,7 +124,7 @@ static int input_leds_connect(struct input_handler *handler,
112 led->handle = &leds->handle; 124 led->handle = &leds->handle;
113 led->code = led_code; 125 led->code = led_code;
114 126
115 if (WARN_ON(!input_led_info[led_code].name)) 127 if (!input_led_info[led_code].name)
116 continue; 128 continue;
117 129
118 led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s", 130 led->cdev.name = kasprintf(GFP_KERNEL, "%s::%s",
diff --git a/drivers/input/joystick/turbografx.c b/drivers/input/joystick/turbografx.c
index 27b6a3ce18ca..891797ad76bc 100644
--- a/drivers/input/joystick/turbografx.c
+++ b/drivers/input/joystick/turbografx.c
@@ -196,7 +196,7 @@ static struct tgfx __init *tgfx_probe(int parport, int *n_buttons, int n_devs)
196 if (n_buttons[i] < 1) 196 if (n_buttons[i] < 1)
197 continue; 197 continue;
198 198
199 if (n_buttons[i] > 6) { 199 if (n_buttons[i] > ARRAY_SIZE(tgfx_buttons)) {
200 printk(KERN_ERR "turbografx.c: Invalid number of buttons %d\n", n_buttons[i]); 200 printk(KERN_ERR "turbografx.c: Invalid number of buttons %d\n", n_buttons[i]);
201 err = -EINVAL; 201 err = -EINVAL;
202 goto err_unreg_devs; 202 goto err_unreg_devs;
diff --git a/drivers/input/misc/axp20x-pek.c b/drivers/input/misc/axp20x-pek.c
index 10e140af5aac..1ac898db303a 100644
--- a/drivers/input/misc/axp20x-pek.c
+++ b/drivers/input/misc/axp20x-pek.c
@@ -292,3 +292,4 @@ module_platform_driver(axp20x_pek_driver);
292MODULE_DESCRIPTION("axp20x Power Button"); 292MODULE_DESCRIPTION("axp20x Power Button");
293MODULE_AUTHOR("Carlo Caione <carlo@caione.org>"); 293MODULE_AUTHOR("Carlo Caione <carlo@caione.org>");
294MODULE_LICENSE("GPL"); 294MODULE_LICENSE("GPL");
295MODULE_ALIAS("platform:axp20x-pek");
diff --git a/drivers/input/misc/twl4030-vibra.c b/drivers/input/misc/twl4030-vibra.c
index fc17b9592f54..10c4e3d462f1 100644
--- a/drivers/input/misc/twl4030-vibra.c
+++ b/drivers/input/misc/twl4030-vibra.c
@@ -183,7 +183,8 @@ static bool twl4030_vibra_check_coexist(struct twl4030_vibra_data *pdata,
183 if (pdata && pdata->coexist) 183 if (pdata && pdata->coexist)
184 return true; 184 return true;
185 185
186 if (of_find_node_by_name(node, "codec")) { 186 node = of_find_node_by_name(node, "codec");
187 if (node) {
187 of_node_put(node); 188 of_node_put(node);
188 return true; 189 return true;
189 } 190 }
diff --git a/drivers/input/mouse/alps.c b/drivers/input/mouse/alps.c
index 113d6f1516a5..4d246861d692 100644
--- a/drivers/input/mouse/alps.c
+++ b/drivers/input/mouse/alps.c
@@ -20,6 +20,7 @@
20#include <linux/input/mt.h> 20#include <linux/input/mt.h>
21#include <linux/serio.h> 21#include <linux/serio.h>
22#include <linux/libps2.h> 22#include <linux/libps2.h>
23#include <linux/dmi.h>
23 24
24#include "psmouse.h" 25#include "psmouse.h"
25#include "alps.h" 26#include "alps.h"
@@ -99,6 +100,7 @@ static const struct alps_nibble_commands alps_v6_nibble_commands[] = {
99#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */ 100#define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */
100#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with 101#define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with
101 6-byte ALPS packet */ 102 6-byte ALPS packet */
103#define ALPS_DELL 0x100 /* device is a Dell laptop */
102#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */ 104#define ALPS_BUTTONPAD 0x200 /* device is a clickpad */
103 105
104static const struct alps_model_info alps_model_data[] = { 106static const struct alps_model_info alps_model_data[] = {
@@ -251,9 +253,9 @@ static void alps_process_packet_v1_v2(struct psmouse *psmouse)
251 return; 253 return;
252 } 254 }
253 255
254 /* Non interleaved V2 dualpoint has separate stick button bits */ 256 /* Dell non interleaved V2 dualpoint has separate stick button bits */
255 if (priv->proto_version == ALPS_PROTO_V2 && 257 if (priv->proto_version == ALPS_PROTO_V2 &&
256 priv->flags == (ALPS_PASS | ALPS_DUALPOINT)) { 258 priv->flags == (ALPS_DELL | ALPS_PASS | ALPS_DUALPOINT)) {
257 left |= packet[0] & 1; 259 left |= packet[0] & 1;
258 right |= packet[0] & 2; 260 right |= packet[0] & 2;
259 middle |= packet[0] & 4; 261 middle |= packet[0] & 4;
@@ -2550,6 +2552,8 @@ static int alps_set_protocol(struct psmouse *psmouse,
2550 priv->byte0 = protocol->byte0; 2552 priv->byte0 = protocol->byte0;
2551 priv->mask0 = protocol->mask0; 2553 priv->mask0 = protocol->mask0;
2552 priv->flags = protocol->flags; 2554 priv->flags = protocol->flags;
2555 if (dmi_name_in_vendors("Dell"))
2556 priv->flags |= ALPS_DELL;
2553 2557
2554 priv->x_max = 2000; 2558 priv->x_max = 2000;
2555 priv->y_max = 1400; 2559 priv->y_max = 1400;
diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c
index b10709f04615..30e3442518f8 100644
--- a/drivers/input/mouse/bcm5974.c
+++ b/drivers/input/mouse/bcm5974.c
@@ -2,6 +2,7 @@
2 * Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver 2 * Apple USB BCM5974 (Macbook Air and Penryn Macbook Pro) multitouch driver
3 * 3 *
4 * Copyright (C) 2008 Henrik Rydberg (rydberg@euromail.se) 4 * Copyright (C) 2008 Henrik Rydberg (rydberg@euromail.se)
5 * Copyright (C) 2015 John Horan (knasher@gmail.com)
5 * 6 *
6 * The USB initialization and package decoding was made by 7 * The USB initialization and package decoding was made by
7 * Scott Shawcroft as part of the touchd user-space driver project: 8 * Scott Shawcroft as part of the touchd user-space driver project:
@@ -91,6 +92,10 @@
91#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290 92#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
92#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291 93#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
93#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292 94#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
95/* MacbookPro12,1 (2015) */
96#define USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI 0x0272
97#define USB_DEVICE_ID_APPLE_WELLSPRING9_ISO 0x0273
98#define USB_DEVICE_ID_APPLE_WELLSPRING9_JIS 0x0274
94 99
95#define BCM5974_DEVICE(prod) { \ 100#define BCM5974_DEVICE(prod) { \
96 .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \ 101 .match_flags = (USB_DEVICE_ID_MATCH_DEVICE | \
@@ -152,6 +157,10 @@ static const struct usb_device_id bcm5974_table[] = {
152 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI), 157 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI),
153 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ISO), 158 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_ISO),
154 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_JIS), 159 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING8_JIS),
160 /* MacbookPro12,1 */
161 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI),
162 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_ISO),
163 BCM5974_DEVICE(USB_DEVICE_ID_APPLE_WELLSPRING9_JIS),
155 /* Terminating entry */ 164 /* Terminating entry */
156 {} 165 {}
157}; 166};
@@ -180,21 +189,47 @@ struct bt_data {
180enum tp_type { 189enum tp_type {
181 TYPE1, /* plain trackpad */ 190 TYPE1, /* plain trackpad */
182 TYPE2, /* button integrated in trackpad */ 191 TYPE2, /* button integrated in trackpad */
183 TYPE3 /* additional header fields since June 2013 */ 192 TYPE3, /* additional header fields since June 2013 */
193 TYPE4 /* additional header field for pressure data */
184}; 194};
185 195
186/* trackpad finger data offsets, le16-aligned */ 196/* trackpad finger data offsets, le16-aligned */
187#define FINGER_TYPE1 (13 * sizeof(__le16)) 197#define HEADER_TYPE1 (13 * sizeof(__le16))
188#define FINGER_TYPE2 (15 * sizeof(__le16)) 198#define HEADER_TYPE2 (15 * sizeof(__le16))
189#define FINGER_TYPE3 (19 * sizeof(__le16)) 199#define HEADER_TYPE3 (19 * sizeof(__le16))
200#define HEADER_TYPE4 (23 * sizeof(__le16))
190 201
191/* trackpad button data offsets */ 202/* trackpad button data offsets */
203#define BUTTON_TYPE1 0
192#define BUTTON_TYPE2 15 204#define BUTTON_TYPE2 15
193#define BUTTON_TYPE3 23 205#define BUTTON_TYPE3 23
206#define BUTTON_TYPE4 31
194 207
195/* list of device capability bits */ 208/* list of device capability bits */
196#define HAS_INTEGRATED_BUTTON 1 209#define HAS_INTEGRATED_BUTTON 1
197 210
211/* trackpad finger data block size */
212#define FSIZE_TYPE1 (14 * sizeof(__le16))
213#define FSIZE_TYPE2 (14 * sizeof(__le16))
214#define FSIZE_TYPE3 (14 * sizeof(__le16))
215#define FSIZE_TYPE4 (15 * sizeof(__le16))
216
217/* offset from header to finger struct */
218#define DELTA_TYPE1 (0 * sizeof(__le16))
219#define DELTA_TYPE2 (0 * sizeof(__le16))
220#define DELTA_TYPE3 (0 * sizeof(__le16))
221#define DELTA_TYPE4 (1 * sizeof(__le16))
222
223/* usb control message mode switch data */
224#define USBMSG_TYPE1 8, 0x300, 0, 0, 0x1, 0x8
225#define USBMSG_TYPE2 8, 0x300, 0, 0, 0x1, 0x8
226#define USBMSG_TYPE3 8, 0x300, 0, 0, 0x1, 0x8
227#define USBMSG_TYPE4 2, 0x302, 2, 1, 0x1, 0x0
228
229/* Wellspring initialization constants */
230#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1
231#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9
232
198/* trackpad finger structure, le16-aligned */ 233/* trackpad finger structure, le16-aligned */
199struct tp_finger { 234struct tp_finger {
200 __le16 origin; /* zero when switching track finger */ 235 __le16 origin; /* zero when switching track finger */
@@ -207,14 +242,13 @@ struct tp_finger {
207 __le16 orientation; /* 16384 when point, else 15 bit angle */ 242 __le16 orientation; /* 16384 when point, else 15 bit angle */
208 __le16 touch_major; /* touch area, major axis */ 243 __le16 touch_major; /* touch area, major axis */
209 __le16 touch_minor; /* touch area, minor axis */ 244 __le16 touch_minor; /* touch area, minor axis */
210 __le16 unused[3]; /* zeros */ 245 __le16 unused[2]; /* zeros */
246 __le16 pressure; /* pressure on forcetouch touchpad */
211 __le16 multi; /* one finger: varies, more fingers: constant */ 247 __le16 multi; /* one finger: varies, more fingers: constant */
212} __attribute__((packed,aligned(2))); 248} __attribute__((packed,aligned(2)));
213 249
214/* trackpad finger data size, empirically at least ten fingers */ 250/* trackpad finger data size, empirically at least ten fingers */
215#define MAX_FINGERS 16 251#define MAX_FINGERS 16
216#define SIZEOF_FINGER sizeof(struct tp_finger)
217#define SIZEOF_ALL_FINGERS (MAX_FINGERS * SIZEOF_FINGER)
218#define MAX_FINGER_ORIENTATION 16384 252#define MAX_FINGER_ORIENTATION 16384
219 253
220/* device-specific parameters */ 254/* device-specific parameters */
@@ -232,8 +266,17 @@ struct bcm5974_config {
232 int bt_datalen; /* data length of the button interface */ 266 int bt_datalen; /* data length of the button interface */
233 int tp_ep; /* the endpoint of the trackpad interface */ 267 int tp_ep; /* the endpoint of the trackpad interface */
234 enum tp_type tp_type; /* type of trackpad interface */ 268 enum tp_type tp_type; /* type of trackpad interface */
235 int tp_offset; /* offset to trackpad finger data */ 269 int tp_header; /* bytes in header block */
236 int tp_datalen; /* data length of the trackpad interface */ 270 int tp_datalen; /* data length of the trackpad interface */
271 int tp_button; /* offset to button data */
272 int tp_fsize; /* bytes in single finger block */
273 int tp_delta; /* offset from header to finger struct */
274 int um_size; /* usb control message length */
275 int um_req_val; /* usb control message value */
276 int um_req_idx; /* usb control message index */
277 int um_switch_idx; /* usb control message mode switch index */
278 int um_switch_on; /* usb control message mode switch on */
279 int um_switch_off; /* usb control message mode switch off */
237 struct bcm5974_param p; /* finger pressure limits */ 280 struct bcm5974_param p; /* finger pressure limits */
238 struct bcm5974_param w; /* finger width limits */ 281 struct bcm5974_param w; /* finger width limits */
239 struct bcm5974_param x; /* horizontal limits */ 282 struct bcm5974_param x; /* horizontal limits */
@@ -259,6 +302,24 @@ struct bcm5974 {
259 int slots[MAX_FINGERS]; /* slot assignments */ 302 int slots[MAX_FINGERS]; /* slot assignments */
260}; 303};
261 304
305/* trackpad finger block data, le16-aligned */
306static const struct tp_finger *get_tp_finger(const struct bcm5974 *dev, int i)
307{
308 const struct bcm5974_config *c = &dev->cfg;
309 u8 *f_base = dev->tp_data + c->tp_header + c->tp_delta;
310
311 return (const struct tp_finger *)(f_base + i * c->tp_fsize);
312}
313
314#define DATAFORMAT(type) \
315 type, \
316 HEADER_##type, \
317 HEADER_##type + (MAX_FINGERS) * (FSIZE_##type), \
318 BUTTON_##type, \
319 FSIZE_##type, \
320 DELTA_##type, \
321 USBMSG_##type
322
262/* logical signal quality */ 323/* logical signal quality */
263#define SN_PRESSURE 45 /* pressure signal-to-noise ratio */ 324#define SN_PRESSURE 45 /* pressure signal-to-noise ratio */
264#define SN_WIDTH 25 /* width signal-to-noise ratio */ 325#define SN_WIDTH 25 /* width signal-to-noise ratio */
@@ -273,7 +334,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
273 USB_DEVICE_ID_APPLE_WELLSPRING_JIS, 334 USB_DEVICE_ID_APPLE_WELLSPRING_JIS,
274 0, 335 0,
275 0x84, sizeof(struct bt_data), 336 0x84, sizeof(struct bt_data),
276 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS, 337 0x81, DATAFORMAT(TYPE1),
277 { SN_PRESSURE, 0, 256 }, 338 { SN_PRESSURE, 0, 256 },
278 { SN_WIDTH, 0, 2048 }, 339 { SN_WIDTH, 0, 2048 },
279 { SN_COORD, -4824, 5342 }, 340 { SN_COORD, -4824, 5342 },
@@ -286,7 +347,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
286 USB_DEVICE_ID_APPLE_WELLSPRING2_JIS, 347 USB_DEVICE_ID_APPLE_WELLSPRING2_JIS,
287 0, 348 0,
288 0x84, sizeof(struct bt_data), 349 0x84, sizeof(struct bt_data),
289 0x81, TYPE1, FINGER_TYPE1, FINGER_TYPE1 + SIZEOF_ALL_FINGERS, 350 0x81, DATAFORMAT(TYPE1),
290 { SN_PRESSURE, 0, 256 }, 351 { SN_PRESSURE, 0, 256 },
291 { SN_WIDTH, 0, 2048 }, 352 { SN_WIDTH, 0, 2048 },
292 { SN_COORD, -4824, 4824 }, 353 { SN_COORD, -4824, 4824 },
@@ -299,7 +360,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
299 USB_DEVICE_ID_APPLE_WELLSPRING3_JIS, 360 USB_DEVICE_ID_APPLE_WELLSPRING3_JIS,
300 HAS_INTEGRATED_BUTTON, 361 HAS_INTEGRATED_BUTTON,
301 0x84, sizeof(struct bt_data), 362 0x84, sizeof(struct bt_data),
302 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 363 0x81, DATAFORMAT(TYPE2),
303 { SN_PRESSURE, 0, 300 }, 364 { SN_PRESSURE, 0, 300 },
304 { SN_WIDTH, 0, 2048 }, 365 { SN_WIDTH, 0, 2048 },
305 { SN_COORD, -4460, 5166 }, 366 { SN_COORD, -4460, 5166 },
@@ -312,7 +373,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
312 USB_DEVICE_ID_APPLE_WELLSPRING4_JIS, 373 USB_DEVICE_ID_APPLE_WELLSPRING4_JIS,
313 HAS_INTEGRATED_BUTTON, 374 HAS_INTEGRATED_BUTTON,
314 0x84, sizeof(struct bt_data), 375 0x84, sizeof(struct bt_data),
315 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 376 0x81, DATAFORMAT(TYPE2),
316 { SN_PRESSURE, 0, 300 }, 377 { SN_PRESSURE, 0, 300 },
317 { SN_WIDTH, 0, 2048 }, 378 { SN_WIDTH, 0, 2048 },
318 { SN_COORD, -4620, 5140 }, 379 { SN_COORD, -4620, 5140 },
@@ -325,7 +386,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
325 USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS, 386 USB_DEVICE_ID_APPLE_WELLSPRING4A_JIS,
326 HAS_INTEGRATED_BUTTON, 387 HAS_INTEGRATED_BUTTON,
327 0x84, sizeof(struct bt_data), 388 0x84, sizeof(struct bt_data),
328 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 389 0x81, DATAFORMAT(TYPE2),
329 { SN_PRESSURE, 0, 300 }, 390 { SN_PRESSURE, 0, 300 },
330 { SN_WIDTH, 0, 2048 }, 391 { SN_WIDTH, 0, 2048 },
331 { SN_COORD, -4616, 5112 }, 392 { SN_COORD, -4616, 5112 },
@@ -338,7 +399,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
338 USB_DEVICE_ID_APPLE_WELLSPRING5_JIS, 399 USB_DEVICE_ID_APPLE_WELLSPRING5_JIS,
339 HAS_INTEGRATED_BUTTON, 400 HAS_INTEGRATED_BUTTON,
340 0x84, sizeof(struct bt_data), 401 0x84, sizeof(struct bt_data),
341 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 402 0x81, DATAFORMAT(TYPE2),
342 { SN_PRESSURE, 0, 300 }, 403 { SN_PRESSURE, 0, 300 },
343 { SN_WIDTH, 0, 2048 }, 404 { SN_WIDTH, 0, 2048 },
344 { SN_COORD, -4415, 5050 }, 405 { SN_COORD, -4415, 5050 },
@@ -351,7 +412,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
351 USB_DEVICE_ID_APPLE_WELLSPRING6_JIS, 412 USB_DEVICE_ID_APPLE_WELLSPRING6_JIS,
352 HAS_INTEGRATED_BUTTON, 413 HAS_INTEGRATED_BUTTON,
353 0x84, sizeof(struct bt_data), 414 0x84, sizeof(struct bt_data),
354 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 415 0x81, DATAFORMAT(TYPE2),
355 { SN_PRESSURE, 0, 300 }, 416 { SN_PRESSURE, 0, 300 },
356 { SN_WIDTH, 0, 2048 }, 417 { SN_WIDTH, 0, 2048 },
357 { SN_COORD, -4620, 5140 }, 418 { SN_COORD, -4620, 5140 },
@@ -364,7 +425,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
364 USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS, 425 USB_DEVICE_ID_APPLE_WELLSPRING5A_JIS,
365 HAS_INTEGRATED_BUTTON, 426 HAS_INTEGRATED_BUTTON,
366 0x84, sizeof(struct bt_data), 427 0x84, sizeof(struct bt_data),
367 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 428 0x81, DATAFORMAT(TYPE2),
368 { SN_PRESSURE, 0, 300 }, 429 { SN_PRESSURE, 0, 300 },
369 { SN_WIDTH, 0, 2048 }, 430 { SN_WIDTH, 0, 2048 },
370 { SN_COORD, -4750, 5280 }, 431 { SN_COORD, -4750, 5280 },
@@ -377,7 +438,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
377 USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS, 438 USB_DEVICE_ID_APPLE_WELLSPRING6A_JIS,
378 HAS_INTEGRATED_BUTTON, 439 HAS_INTEGRATED_BUTTON,
379 0x84, sizeof(struct bt_data), 440 0x84, sizeof(struct bt_data),
380 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 441 0x81, DATAFORMAT(TYPE2),
381 { SN_PRESSURE, 0, 300 }, 442 { SN_PRESSURE, 0, 300 },
382 { SN_WIDTH, 0, 2048 }, 443 { SN_WIDTH, 0, 2048 },
383 { SN_COORD, -4620, 5140 }, 444 { SN_COORD, -4620, 5140 },
@@ -390,7 +451,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
390 USB_DEVICE_ID_APPLE_WELLSPRING7_JIS, 451 USB_DEVICE_ID_APPLE_WELLSPRING7_JIS,
391 HAS_INTEGRATED_BUTTON, 452 HAS_INTEGRATED_BUTTON,
392 0x84, sizeof(struct bt_data), 453 0x84, sizeof(struct bt_data),
393 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 454 0x81, DATAFORMAT(TYPE2),
394 { SN_PRESSURE, 0, 300 }, 455 { SN_PRESSURE, 0, 300 },
395 { SN_WIDTH, 0, 2048 }, 456 { SN_WIDTH, 0, 2048 },
396 { SN_COORD, -4750, 5280 }, 457 { SN_COORD, -4750, 5280 },
@@ -403,7 +464,7 @@ static const struct bcm5974_config bcm5974_config_table[] = {
403 USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS, 464 USB_DEVICE_ID_APPLE_WELLSPRING7A_JIS,
404 HAS_INTEGRATED_BUTTON, 465 HAS_INTEGRATED_BUTTON,
405 0x84, sizeof(struct bt_data), 466 0x84, sizeof(struct bt_data),
406 0x81, TYPE2, FINGER_TYPE2, FINGER_TYPE2 + SIZEOF_ALL_FINGERS, 467 0x81, DATAFORMAT(TYPE2),
407 { SN_PRESSURE, 0, 300 }, 468 { SN_PRESSURE, 0, 300 },
408 { SN_WIDTH, 0, 2048 }, 469 { SN_WIDTH, 0, 2048 },
409 { SN_COORD, -4750, 5280 }, 470 { SN_COORD, -4750, 5280 },
@@ -416,13 +477,26 @@ static const struct bcm5974_config bcm5974_config_table[] = {
416 USB_DEVICE_ID_APPLE_WELLSPRING8_JIS, 477 USB_DEVICE_ID_APPLE_WELLSPRING8_JIS,
417 HAS_INTEGRATED_BUTTON, 478 HAS_INTEGRATED_BUTTON,
418 0, sizeof(struct bt_data), 479 0, sizeof(struct bt_data),
419 0x83, TYPE3, FINGER_TYPE3, FINGER_TYPE3 + SIZEOF_ALL_FINGERS, 480 0x83, DATAFORMAT(TYPE3),
420 { SN_PRESSURE, 0, 300 }, 481 { SN_PRESSURE, 0, 300 },
421 { SN_WIDTH, 0, 2048 }, 482 { SN_WIDTH, 0, 2048 },
422 { SN_COORD, -4620, 5140 }, 483 { SN_COORD, -4620, 5140 },
423 { SN_COORD, -150, 6600 }, 484 { SN_COORD, -150, 6600 },
424 { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION } 485 { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
425 }, 486 },
487 {
488 USB_DEVICE_ID_APPLE_WELLSPRING9_ANSI,
489 USB_DEVICE_ID_APPLE_WELLSPRING9_ISO,
490 USB_DEVICE_ID_APPLE_WELLSPRING9_JIS,
491 HAS_INTEGRATED_BUTTON,
492 0, sizeof(struct bt_data),
493 0x83, DATAFORMAT(TYPE4),
494 { SN_PRESSURE, 0, 300 },
495 { SN_WIDTH, 0, 2048 },
496 { SN_COORD, -4828, 5345 },
497 { SN_COORD, -203, 6803 },
498 { SN_ORIENT, -MAX_FINGER_ORIENTATION, MAX_FINGER_ORIENTATION }
499 },
426 {} 500 {}
427}; 501};
428 502
@@ -549,19 +623,18 @@ static int report_tp_state(struct bcm5974 *dev, int size)
549 struct input_dev *input = dev->input; 623 struct input_dev *input = dev->input;
550 int raw_n, i, n = 0; 624 int raw_n, i, n = 0;
551 625
552 if (size < c->tp_offset || (size - c->tp_offset) % SIZEOF_FINGER != 0) 626 if (size < c->tp_header || (size - c->tp_header) % c->tp_fsize != 0)
553 return -EIO; 627 return -EIO;
554 628
555 /* finger data, le16-aligned */ 629 raw_n = (size - c->tp_header) / c->tp_fsize;
556 f = (const struct tp_finger *)(dev->tp_data + c->tp_offset);
557 raw_n = (size - c->tp_offset) / SIZEOF_FINGER;
558 630
559 for (i = 0; i < raw_n; i++) { 631 for (i = 0; i < raw_n; i++) {
560 if (raw2int(f[i].touch_major) == 0) 632 f = get_tp_finger(dev, i);
633 if (raw2int(f->touch_major) == 0)
561 continue; 634 continue;
562 dev->pos[n].x = raw2int(f[i].abs_x); 635 dev->pos[n].x = raw2int(f->abs_x);
563 dev->pos[n].y = c->y.min + c->y.max - raw2int(f[i].abs_y); 636 dev->pos[n].y = c->y.min + c->y.max - raw2int(f->abs_y);
564 dev->index[n++] = &f[i]; 637 dev->index[n++] = f;
565 } 638 }
566 639
567 input_mt_assign_slots(input, dev->slots, dev->pos, n, 0); 640 input_mt_assign_slots(input, dev->slots, dev->pos, n, 0);
@@ -572,32 +645,22 @@ static int report_tp_state(struct bcm5974 *dev, int size)
572 645
573 input_mt_sync_frame(input); 646 input_mt_sync_frame(input);
574 647
575 report_synaptics_data(input, c, f, raw_n); 648 report_synaptics_data(input, c, get_tp_finger(dev, 0), raw_n);
576 649
577 /* type 2 reports button events via ibt only */ 650 /* later types report button events via integrated button only */
578 if (c->tp_type == TYPE2) { 651 if (c->caps & HAS_INTEGRATED_BUTTON) {
579 int ibt = raw2int(dev->tp_data[BUTTON_TYPE2]); 652 int ibt = raw2int(dev->tp_data[c->tp_button]);
580 input_report_key(input, BTN_LEFT, ibt); 653 input_report_key(input, BTN_LEFT, ibt);
581 } 654 }
582 655
583 if (c->tp_type == TYPE3)
584 input_report_key(input, BTN_LEFT, dev->tp_data[BUTTON_TYPE3]);
585
586 input_sync(input); 656 input_sync(input);
587 657
588 return 0; 658 return 0;
589} 659}
590 660
591/* Wellspring initialization constants */
592#define BCM5974_WELLSPRING_MODE_READ_REQUEST_ID 1
593#define BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID 9
594#define BCM5974_WELLSPRING_MODE_REQUEST_VALUE 0x300
595#define BCM5974_WELLSPRING_MODE_REQUEST_INDEX 0
596#define BCM5974_WELLSPRING_MODE_VENDOR_VALUE 0x01
597#define BCM5974_WELLSPRING_MODE_NORMAL_VALUE 0x08
598
599static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on) 661static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
600{ 662{
663 const struct bcm5974_config *c = &dev->cfg;
601 int retval = 0, size; 664 int retval = 0, size;
602 char *data; 665 char *data;
603 666
@@ -605,7 +668,7 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
605 if (dev->cfg.tp_type == TYPE3) 668 if (dev->cfg.tp_type == TYPE3)
606 return 0; 669 return 0;
607 670
608 data = kmalloc(8, GFP_KERNEL); 671 data = kmalloc(c->um_size, GFP_KERNEL);
609 if (!data) { 672 if (!data) {
610 dev_err(&dev->intf->dev, "out of memory\n"); 673 dev_err(&dev->intf->dev, "out of memory\n");
611 retval = -ENOMEM; 674 retval = -ENOMEM;
@@ -616,28 +679,24 @@ static int bcm5974_wellspring_mode(struct bcm5974 *dev, bool on)
616 size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), 679 size = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0),
617 BCM5974_WELLSPRING_MODE_READ_REQUEST_ID, 680 BCM5974_WELLSPRING_MODE_READ_REQUEST_ID,
618 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 681 USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
619 BCM5974_WELLSPRING_MODE_REQUEST_VALUE, 682 c->um_req_val, c->um_req_idx, data, c->um_size, 5000);
620 BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
621 683
622 if (size != 8) { 684 if (size != c->um_size) {
623 dev_err(&dev->intf->dev, "could not read from device\n"); 685 dev_err(&dev->intf->dev, "could not read from device\n");
624 retval = -EIO; 686 retval = -EIO;
625 goto out; 687 goto out;
626 } 688 }
627 689
628 /* apply the mode switch */ 690 /* apply the mode switch */
629 data[0] = on ? 691 data[c->um_switch_idx] = on ? c->um_switch_on : c->um_switch_off;
630 BCM5974_WELLSPRING_MODE_VENDOR_VALUE :
631 BCM5974_WELLSPRING_MODE_NORMAL_VALUE;
632 692
633 /* write configuration */ 693 /* write configuration */
634 size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 694 size = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0),
635 BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID, 695 BCM5974_WELLSPRING_MODE_WRITE_REQUEST_ID,
636 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, 696 USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE,
637 BCM5974_WELLSPRING_MODE_REQUEST_VALUE, 697 c->um_req_val, c->um_req_idx, data, c->um_size, 5000);
638 BCM5974_WELLSPRING_MODE_REQUEST_INDEX, data, 8, 5000);
639 698
640 if (size != 8) { 699 if (size != c->um_size) {
641 dev_err(&dev->intf->dev, "could not write to device\n"); 700 dev_err(&dev->intf->dev, "could not write to device\n");
642 retval = -EIO; 701 retval = -EIO;
643 goto out; 702 goto out;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index ce3d40004458..2955f1d0ca6c 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -783,19 +783,26 @@ static int elantech_packet_check_v4(struct psmouse *psmouse)
783 struct elantech_data *etd = psmouse->private; 783 struct elantech_data *etd = psmouse->private;
784 unsigned char *packet = psmouse->packet; 784 unsigned char *packet = psmouse->packet;
785 unsigned char packet_type = packet[3] & 0x03; 785 unsigned char packet_type = packet[3] & 0x03;
786 unsigned int ic_version;
786 bool sanity_check; 787 bool sanity_check;
787 788
788 if (etd->tp_dev && (packet[3] & 0x0f) == 0x06) 789 if (etd->tp_dev && (packet[3] & 0x0f) == 0x06)
789 return PACKET_TRACKPOINT; 790 return PACKET_TRACKPOINT;
790 791
792 /* This represents the version of IC body. */
793 ic_version = (etd->fw_version & 0x0f0000) >> 16;
794
791 /* 795 /*
792 * Sanity check based on the constant bits of a packet. 796 * Sanity check based on the constant bits of a packet.
793 * The constant bits change depending on the value of 797 * The constant bits change depending on the value of
794 * the hardware flag 'crc_enabled' but are the same for 798 * the hardware flag 'crc_enabled' and the version of
795 * every packet, regardless of the type. 799 * the IC body, but are the same for every packet,
800 * regardless of the type.
796 */ 801 */
797 if (etd->crc_enabled) 802 if (etd->crc_enabled)
798 sanity_check = ((packet[3] & 0x08) == 0x00); 803 sanity_check = ((packet[3] & 0x08) == 0x00);
804 else if (ic_version == 7 && etd->samples[1] == 0x2A)
805 sanity_check = ((packet[3] & 0x1c) == 0x10);
799 else 806 else
800 sanity_check = ((packet[0] & 0x0c) == 0x04 && 807 sanity_check = ((packet[0] & 0x0c) == 0x04 &&
801 (packet[3] & 0x1c) == 0x10); 808 (packet[3] & 0x1c) == 0x10);
@@ -1116,6 +1123,7 @@ static int elantech_get_resolution_v4(struct psmouse *psmouse,
1116 * Avatar AVIU-145A2 0x361f00 ? clickpad 1123 * Avatar AVIU-145A2 0x361f00 ? clickpad
1117 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons 1124 * Fujitsu LIFEBOOK E544 0x470f00 d0, 12, 09 2 hw buttons
1118 * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons 1125 * Fujitsu LIFEBOOK E554 0x570f01 40, 14, 0c 2 hw buttons
1126 * Fujitsu T725 0x470f01 05, 12, 09 2 hw buttons
1119 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**) 1127 * Fujitsu H730 0x570f00 c0, 14, 0c 3 hw buttons (**)
1120 * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons 1128 * Gigabyte U2442 0x450f01 58, 17, 0c 2 hw buttons
1121 * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*) 1129 * Lenovo L430 0x350f02 b9, 15, 0c 2 hw buttons (*)
@@ -1167,7 +1175,7 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1167 struct input_dev *dev = psmouse->dev; 1175 struct input_dev *dev = psmouse->dev;
1168 struct elantech_data *etd = psmouse->private; 1176 struct elantech_data *etd = psmouse->private;
1169 unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0; 1177 unsigned int x_min = 0, y_min = 0, x_max = 0, y_max = 0, width = 0;
1170 unsigned int x_res = 0, y_res = 0; 1178 unsigned int x_res = 31, y_res = 31;
1171 1179
1172 if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width)) 1180 if (elantech_set_range(psmouse, &x_min, &y_min, &x_max, &y_max, &width))
1173 return -1; 1181 return -1;
@@ -1232,8 +1240,6 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1232 /* For X to recognize me as touchpad. */ 1240 /* For X to recognize me as touchpad. */
1233 input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0); 1241 input_set_abs_params(dev, ABS_X, x_min, x_max, 0, 0);
1234 input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0); 1242 input_set_abs_params(dev, ABS_Y, y_min, y_max, 0, 0);
1235 input_abs_set_res(dev, ABS_X, x_res);
1236 input_abs_set_res(dev, ABS_Y, y_res);
1237 /* 1243 /*
1238 * range of pressure and width is the same as v2, 1244 * range of pressure and width is the same as v2,
1239 * report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility. 1245 * report ABS_PRESSURE, ABS_TOOL_WIDTH for compatibility.
@@ -1246,8 +1252,6 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1246 input_mt_init_slots(dev, ETP_MAX_FINGERS, 0); 1252 input_mt_init_slots(dev, ETP_MAX_FINGERS, 0);
1247 input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0); 1253 input_set_abs_params(dev, ABS_MT_POSITION_X, x_min, x_max, 0, 0);
1248 input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0); 1254 input_set_abs_params(dev, ABS_MT_POSITION_Y, y_min, y_max, 0, 0);
1249 input_abs_set_res(dev, ABS_MT_POSITION_X, x_res);
1250 input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res);
1251 input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2, 1255 input_set_abs_params(dev, ABS_MT_PRESSURE, ETP_PMIN_V2,
1252 ETP_PMAX_V2, 0, 0); 1256 ETP_PMAX_V2, 0, 0);
1253 /* 1257 /*
@@ -1259,6 +1263,13 @@ static int elantech_set_input_params(struct psmouse *psmouse)
1259 break; 1263 break;
1260 } 1264 }
1261 1265
1266 input_abs_set_res(dev, ABS_X, x_res);
1267 input_abs_set_res(dev, ABS_Y, y_res);
1268 if (etd->hw_version > 1) {
1269 input_abs_set_res(dev, ABS_MT_POSITION_X, x_res);
1270 input_abs_set_res(dev, ABS_MT_POSITION_Y, y_res);
1271 }
1272
1262 etd->y_max = y_max; 1273 etd->y_max = y_max;
1263 etd->width = width; 1274 etd->width = width;
1264 1275
@@ -1648,6 +1659,16 @@ int elantech_init(struct psmouse *psmouse)
1648 etd->capabilities[0], etd->capabilities[1], 1659 etd->capabilities[0], etd->capabilities[1],
1649 etd->capabilities[2]); 1660 etd->capabilities[2]);
1650 1661
1662 if (etd->hw_version != 1) {
1663 if (etd->send_cmd(psmouse, ETP_SAMPLE_QUERY, etd->samples)) {
1664 psmouse_err(psmouse, "failed to query sample data\n");
1665 goto init_fail;
1666 }
1667 psmouse_info(psmouse,
1668 "Elan sample query result %02x, %02x, %02x\n",
1669 etd->samples[0], etd->samples[1], etd->samples[2]);
1670 }
1671
1651 if (elantech_set_absolute_mode(psmouse)) { 1672 if (elantech_set_absolute_mode(psmouse)) {
1652 psmouse_err(psmouse, 1673 psmouse_err(psmouse,
1653 "failed to put touchpad into absolute mode.\n"); 1674 "failed to put touchpad into absolute mode.\n");
diff --git a/drivers/input/mouse/elantech.h b/drivers/input/mouse/elantech.h
index f965d1569cc3..e1cbf409d9c8 100644
--- a/drivers/input/mouse/elantech.h
+++ b/drivers/input/mouse/elantech.h
@@ -129,6 +129,7 @@ struct elantech_data {
129 unsigned char reg_26; 129 unsigned char reg_26;
130 unsigned char debug; 130 unsigned char debug;
131 unsigned char capabilities[3]; 131 unsigned char capabilities[3];
132 unsigned char samples[3];
132 bool paritycheck; 133 bool paritycheck;
133 bool jumpy_cursor; 134 bool jumpy_cursor;
134 bool reports_pressure; 135 bool reports_pressure;
diff --git a/drivers/input/mouse/synaptics.c b/drivers/input/mouse/synaptics.c
index 3a32caf06bf1..6025eb430c0a 100644
--- a/drivers/input/mouse/synaptics.c
+++ b/drivers/input/mouse/synaptics.c
@@ -1484,12 +1484,12 @@ static int __synaptics_init(struct psmouse *psmouse, bool absolute_mode)
1484 priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS; 1484 priv->pkt_type = SYN_MODEL_NEWABS(priv->model_id) ? SYN_NEWABS : SYN_OLDABS;
1485 1485
1486 psmouse_info(psmouse, 1486 psmouse_info(psmouse,
1487 "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx, board id: %lu, fw id: %lu\n", 1487 "Touchpad model: %ld, fw: %ld.%ld, id: %#lx, caps: %#lx/%#lx/%#lx/%#lx, board id: %lu, fw id: %lu\n",
1488 SYN_ID_MODEL(priv->identity), 1488 SYN_ID_MODEL(priv->identity),
1489 SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity), 1489 SYN_ID_MAJOR(priv->identity), SYN_ID_MINOR(priv->identity),
1490 priv->model_id, 1490 priv->model_id,
1491 priv->capabilities, priv->ext_cap, priv->ext_cap_0c, 1491 priv->capabilities, priv->ext_cap, priv->ext_cap_0c,
1492 priv->board_id, priv->firmware_id); 1492 priv->ext_cap_10, priv->board_id, priv->firmware_id);
1493 1493
1494 set_input_params(psmouse, priv); 1494 set_input_params(psmouse, priv);
1495 1495
diff --git a/drivers/input/touchscreen/goodix.c b/drivers/input/touchscreen/goodix.c
index b4d12e29abff..e36162b28c2a 100644
--- a/drivers/input/touchscreen/goodix.c
+++ b/drivers/input/touchscreen/goodix.c
@@ -15,6 +15,7 @@
15 */ 15 */
16 16
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/dmi.h>
18#include <linux/i2c.h> 19#include <linux/i2c.h>
19#include <linux/input.h> 20#include <linux/input.h>
20#include <linux/input/mt.h> 21#include <linux/input/mt.h>
@@ -34,6 +35,7 @@ struct goodix_ts_data {
34 int abs_y_max; 35 int abs_y_max;
35 unsigned int max_touch_num; 36 unsigned int max_touch_num;
36 unsigned int int_trigger_type; 37 unsigned int int_trigger_type;
38 bool rotated_screen;
37}; 39};
38 40
39#define GOODIX_MAX_HEIGHT 4096 41#define GOODIX_MAX_HEIGHT 4096
@@ -60,6 +62,30 @@ static const unsigned long goodix_irq_flags[] = {
60 IRQ_TYPE_LEVEL_HIGH, 62 IRQ_TYPE_LEVEL_HIGH,
61}; 63};
62 64
65/*
66 * Those tablets have their coordinates origin at the bottom right
67 * of the tablet, as if rotated 180 degrees
68 */
69static const struct dmi_system_id rotated_screen[] = {
70#if defined(CONFIG_DMI) && defined(CONFIG_X86)
71 {
72 .ident = "WinBook TW100",
73 .matches = {
74 DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
75 DMI_MATCH(DMI_PRODUCT_NAME, "TW100")
76 }
77 },
78 {
79 .ident = "WinBook TW700",
80 .matches = {
81 DMI_MATCH(DMI_SYS_VENDOR, "WinBook"),
82 DMI_MATCH(DMI_PRODUCT_NAME, "TW700")
83 },
84 },
85#endif
86 {}
87};
88
63/** 89/**
64 * goodix_i2c_read - read data from a register of the i2c slave device. 90 * goodix_i2c_read - read data from a register of the i2c slave device.
65 * 91 *
@@ -129,6 +155,11 @@ static void goodix_ts_report_touch(struct goodix_ts_data *ts, u8 *coor_data)
129 int input_y = get_unaligned_le16(&coor_data[3]); 155 int input_y = get_unaligned_le16(&coor_data[3]);
130 int input_w = get_unaligned_le16(&coor_data[5]); 156 int input_w = get_unaligned_le16(&coor_data[5]);
131 157
158 if (ts->rotated_screen) {
159 input_x = ts->abs_x_max - input_x;
160 input_y = ts->abs_y_max - input_y;
161 }
162
132 input_mt_slot(ts->input_dev, id); 163 input_mt_slot(ts->input_dev, id);
133 input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true); 164 input_mt_report_slot_state(ts->input_dev, MT_TOOL_FINGER, true);
134 input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x); 165 input_report_abs(ts->input_dev, ABS_MT_POSITION_X, input_x);
@@ -223,6 +254,11 @@ static void goodix_read_config(struct goodix_ts_data *ts)
223 ts->abs_y_max = GOODIX_MAX_HEIGHT; 254 ts->abs_y_max = GOODIX_MAX_HEIGHT;
224 ts->max_touch_num = GOODIX_MAX_CONTACTS; 255 ts->max_touch_num = GOODIX_MAX_CONTACTS;
225 } 256 }
257
258 ts->rotated_screen = dmi_check_system(rotated_screen);
259 if (ts->rotated_screen)
260 dev_dbg(&ts->client->dev,
261 "Applying '180 degrees rotated screen' quirk\n");
226} 262}
227 263
228/** 264/**
diff --git a/drivers/input/touchscreen/usbtouchscreen.c b/drivers/input/touchscreen/usbtouchscreen.c
index f2c6c352c55a..2c41107240de 100644
--- a/drivers/input/touchscreen/usbtouchscreen.c
+++ b/drivers/input/touchscreen/usbtouchscreen.c
@@ -627,6 +627,9 @@ static int dmc_tsc10_init(struct usbtouch_usb *usbtouch)
627 goto err_out; 627 goto err_out;
628 } 628 }
629 629
630 /* TSC-25 data sheet specifies a delay after the RESET command */
631 msleep(150);
632
630 /* set coordinate output rate */ 633 /* set coordinate output rate */
631 buf[0] = buf[1] = 0xFF; 634 buf[0] = buf[1] = 0xFF;
632 ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0), 635 ret = usb_control_msg(dev, usb_rcvctrlpipe (dev, 0),
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a57e9b749895..658ee39e6569 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -76,8 +76,6 @@ LIST_HEAD(hpet_map);
76 * Domain for untranslated devices - only allocated 76 * Domain for untranslated devices - only allocated
77 * if iommu=pt passed on kernel cmd line. 77 * if iommu=pt passed on kernel cmd line.
78 */ 78 */
79static struct protection_domain *pt_domain;
80
81static const struct iommu_ops amd_iommu_ops; 79static const struct iommu_ops amd_iommu_ops;
82 80
83static ATOMIC_NOTIFIER_HEAD(ppr_notifier); 81static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
@@ -96,7 +94,7 @@ struct iommu_dev_data {
96 struct protection_domain *domain; /* Domain the device is bound to */ 94 struct protection_domain *domain; /* Domain the device is bound to */
97 u16 devid; /* PCI Device ID */ 95 u16 devid; /* PCI Device ID */
98 bool iommu_v2; /* Device can make use of IOMMUv2 */ 96 bool iommu_v2; /* Device can make use of IOMMUv2 */
99 bool passthrough; /* Default for device is pt_domain */ 97 bool passthrough; /* Device is identity mapped */
100 struct { 98 struct {
101 bool enabled; 99 bool enabled;
102 int qdep; 100 int qdep;
@@ -116,7 +114,6 @@ struct iommu_cmd {
116struct kmem_cache *amd_iommu_irq_cache; 114struct kmem_cache *amd_iommu_irq_cache;
117 115
118static void update_domain(struct protection_domain *domain); 116static void update_domain(struct protection_domain *domain);
119static int alloc_passthrough_domain(void);
120static int protection_domain_init(struct protection_domain *domain); 117static int protection_domain_init(struct protection_domain *domain);
121 118
122/**************************************************************************** 119/****************************************************************************
@@ -2167,15 +2164,17 @@ static int attach_device(struct device *dev,
2167 dev_data = get_dev_data(dev); 2164 dev_data = get_dev_data(dev);
2168 2165
2169 if (domain->flags & PD_IOMMUV2_MASK) { 2166 if (domain->flags & PD_IOMMUV2_MASK) {
2170 if (!dev_data->iommu_v2 || !dev_data->passthrough) 2167 if (!dev_data->passthrough)
2171 return -EINVAL; 2168 return -EINVAL;
2172 2169
2173 if (pdev_iommuv2_enable(pdev) != 0) 2170 if (dev_data->iommu_v2) {
2174 return -EINVAL; 2171 if (pdev_iommuv2_enable(pdev) != 0)
2172 return -EINVAL;
2175 2173
2176 dev_data->ats.enabled = true; 2174 dev_data->ats.enabled = true;
2177 dev_data->ats.qdep = pci_ats_queue_depth(pdev); 2175 dev_data->ats.qdep = pci_ats_queue_depth(pdev);
2178 dev_data->pri_tlp = pci_pri_tlp_required(pdev); 2176 dev_data->pri_tlp = pci_pri_tlp_required(pdev);
2177 }
2179 } else if (amd_iommu_iotlb_sup && 2178 } else if (amd_iommu_iotlb_sup &&
2180 pci_enable_ats(pdev, PAGE_SHIFT) == 0) { 2179 pci_enable_ats(pdev, PAGE_SHIFT) == 0) {
2181 dev_data->ats.enabled = true; 2180 dev_data->ats.enabled = true;
@@ -2221,15 +2220,6 @@ static void __detach_device(struct iommu_dev_data *dev_data)
2221 do_detach(head); 2220 do_detach(head);
2222 2221
2223 spin_unlock_irqrestore(&domain->lock, flags); 2222 spin_unlock_irqrestore(&domain->lock, flags);
2224
2225 /*
2226 * If we run in passthrough mode the device must be assigned to the
2227 * passthrough domain if it is detached from any other domain.
2228 * Make sure we can deassign from the pt_domain itself.
2229 */
2230 if (dev_data->passthrough &&
2231 (dev_data->domain == NULL && domain != pt_domain))
2232 __attach_device(dev_data, pt_domain);
2233} 2223}
2234 2224
2235/* 2225/*
@@ -2249,7 +2239,7 @@ static void detach_device(struct device *dev)
2249 __detach_device(dev_data); 2239 __detach_device(dev_data);
2250 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags); 2240 write_unlock_irqrestore(&amd_iommu_devtable_lock, flags);
2251 2241
2252 if (domain->flags & PD_IOMMUV2_MASK) 2242 if (domain->flags & PD_IOMMUV2_MASK && dev_data->iommu_v2)
2253 pdev_iommuv2_disable(to_pci_dev(dev)); 2243 pdev_iommuv2_disable(to_pci_dev(dev));
2254 else if (dev_data->ats.enabled) 2244 else if (dev_data->ats.enabled)
2255 pci_disable_ats(to_pci_dev(dev)); 2245 pci_disable_ats(to_pci_dev(dev));
@@ -2287,17 +2277,15 @@ static int amd_iommu_add_device(struct device *dev)
2287 2277
2288 BUG_ON(!dev_data); 2278 BUG_ON(!dev_data);
2289 2279
2290 if (dev_data->iommu_v2) 2280 if (iommu_pass_through || dev_data->iommu_v2)
2291 iommu_request_dm_for_dev(dev); 2281 iommu_request_dm_for_dev(dev);
2292 2282
2293 /* Domains are initialized for this device - have a look what we ended up with */ 2283 /* Domains are initialized for this device - have a look what we ended up with */
2294 domain = iommu_get_domain_for_dev(dev); 2284 domain = iommu_get_domain_for_dev(dev);
2295 if (domain->type == IOMMU_DOMAIN_IDENTITY) { 2285 if (domain->type == IOMMU_DOMAIN_IDENTITY)
2296 dev_data->passthrough = true; 2286 dev_data->passthrough = true;
2297 dev->archdata.dma_ops = &nommu_dma_ops; 2287 else
2298 } else {
2299 dev->archdata.dma_ops = &amd_iommu_dma_ops; 2288 dev->archdata.dma_ops = &amd_iommu_dma_ops;
2300 }
2301 2289
2302out: 2290out:
2303 iommu_completion_wait(iommu); 2291 iommu_completion_wait(iommu);
@@ -2862,8 +2850,17 @@ int __init amd_iommu_init_api(void)
2862 2850
2863int __init amd_iommu_init_dma_ops(void) 2851int __init amd_iommu_init_dma_ops(void)
2864{ 2852{
2853 swiotlb = iommu_pass_through ? 1 : 0;
2865 iommu_detected = 1; 2854 iommu_detected = 1;
2866 swiotlb = 0; 2855
2856 /*
2857 * In case we don't initialize SWIOTLB (actually the common case
2858 * when AMD IOMMU is enabled), make sure there are global
2859 * dma_ops set as a fall-back for devices not handled by this
2860 * driver (for example non-PCI devices).
2861 */
2862 if (!swiotlb)
2863 dma_ops = &nommu_dma_ops;
2867 2864
2868 amd_iommu_stats_init(); 2865 amd_iommu_stats_init();
2869 2866
@@ -2947,21 +2944,6 @@ out_err:
2947 return NULL; 2944 return NULL;
2948} 2945}
2949 2946
2950static int alloc_passthrough_domain(void)
2951{
2952 if (pt_domain != NULL)
2953 return 0;
2954
2955 /* allocate passthrough domain */
2956 pt_domain = protection_domain_alloc();
2957 if (!pt_domain)
2958 return -ENOMEM;
2959
2960 pt_domain->mode = PAGE_MODE_NONE;
2961
2962 return 0;
2963}
2964
2965static struct iommu_domain *amd_iommu_domain_alloc(unsigned type) 2947static struct iommu_domain *amd_iommu_domain_alloc(unsigned type)
2966{ 2948{
2967 struct protection_domain *pdomain; 2949 struct protection_domain *pdomain;
@@ -3222,33 +3204,6 @@ static const struct iommu_ops amd_iommu_ops = {
3222 * 3204 *
3223 *****************************************************************************/ 3205 *****************************************************************************/
3224 3206
3225int __init amd_iommu_init_passthrough(void)
3226{
3227 struct iommu_dev_data *dev_data;
3228 struct pci_dev *dev = NULL;
3229 int ret;
3230
3231 ret = alloc_passthrough_domain();
3232 if (ret)
3233 return ret;
3234
3235 for_each_pci_dev(dev) {
3236 if (!check_device(&dev->dev))
3237 continue;
3238
3239 dev_data = get_dev_data(&dev->dev);
3240 dev_data->passthrough = true;
3241
3242 attach_device(&dev->dev, pt_domain);
3243 }
3244
3245 amd_iommu_stats_init();
3246
3247 pr_info("AMD-Vi: Initialized for Passthrough Mode\n");
3248
3249 return 0;
3250}
3251
3252/* IOMMUv2 specific functions */ 3207/* IOMMUv2 specific functions */
3253int amd_iommu_register_ppr_notifier(struct notifier_block *nb) 3208int amd_iommu_register_ppr_notifier(struct notifier_block *nb)
3254{ 3209{
@@ -3363,7 +3318,12 @@ static int __flush_pasid(struct protection_domain *domain, int pasid,
3363 struct amd_iommu *iommu; 3318 struct amd_iommu *iommu;
3364 int qdep; 3319 int qdep;
3365 3320
3366 BUG_ON(!dev_data->ats.enabled); 3321 /*
3322 There might be non-IOMMUv2 capable devices in an IOMMUv2
3323 * domain.
3324 */
3325 if (!dev_data->ats.enabled)
3326 continue;
3367 3327
3368 qdep = dev_data->ats.qdep; 3328 qdep = dev_data->ats.qdep;
3369 iommu = amd_iommu_rlookup_table[dev_data->devid]; 3329 iommu = amd_iommu_rlookup_table[dev_data->devid];
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index dbda9ae68c5d..a24495eb4e26 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -2026,14 +2026,6 @@ static bool detect_ivrs(void)
2026 return true; 2026 return true;
2027} 2027}
2028 2028
2029static int amd_iommu_init_dma(void)
2030{
2031 if (iommu_pass_through)
2032 return amd_iommu_init_passthrough();
2033 else
2034 return amd_iommu_init_dma_ops();
2035}
2036
2037/**************************************************************************** 2029/****************************************************************************
2038 * 2030 *
2039 * AMD IOMMU Initialization State Machine 2031 * AMD IOMMU Initialization State Machine
@@ -2073,7 +2065,7 @@ static int __init state_next(void)
2073 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN; 2065 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_INTERRUPTS_EN;
2074 break; 2066 break;
2075 case IOMMU_INTERRUPTS_EN: 2067 case IOMMU_INTERRUPTS_EN:
2076 ret = amd_iommu_init_dma(); 2068 ret = amd_iommu_init_dma_ops();
2077 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS; 2069 init_state = ret ? IOMMU_INIT_ERROR : IOMMU_DMA_OPS;
2078 break; 2070 break;
2079 case IOMMU_DMA_OPS: 2071 case IOMMU_DMA_OPS:
diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
index 3465faf1809e..f7b875bb70d4 100644
--- a/drivers/iommu/amd_iommu_v2.c
+++ b/drivers/iommu/amd_iommu_v2.c
@@ -132,11 +132,19 @@ static struct device_state *get_device_state(u16 devid)
132 132
133static void free_device_state(struct device_state *dev_state) 133static void free_device_state(struct device_state *dev_state)
134{ 134{
135 struct iommu_group *group;
136
135 /* 137 /*
136 * First detach device from domain - No more PRI requests will arrive 138 * First detach device from domain - No more PRI requests will arrive
137 * from that device after it is unbound from the IOMMUv2 domain. 139 * from that device after it is unbound from the IOMMUv2 domain.
138 */ 140 */
139 iommu_detach_device(dev_state->domain, &dev_state->pdev->dev); 141 group = iommu_group_get(&dev_state->pdev->dev);
142 if (WARN_ON(!group))
143 return;
144
145 iommu_detach_group(dev_state->domain, group);
146
147 iommu_group_put(group);
140 148
141 /* Everything is down now, free the IOMMUv2 domain */ 149 /* Everything is down now, free the IOMMUv2 domain */
142 iommu_domain_free(dev_state->domain); 150 iommu_domain_free(dev_state->domain);
@@ -731,6 +739,7 @@ EXPORT_SYMBOL(amd_iommu_unbind_pasid);
731int amd_iommu_init_device(struct pci_dev *pdev, int pasids) 739int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
732{ 740{
733 struct device_state *dev_state; 741 struct device_state *dev_state;
742 struct iommu_group *group;
734 unsigned long flags; 743 unsigned long flags;
735 int ret, tmp; 744 int ret, tmp;
736 u16 devid; 745 u16 devid;
@@ -776,10 +785,16 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
776 if (ret) 785 if (ret)
777 goto out_free_domain; 786 goto out_free_domain;
778 787
779 ret = iommu_attach_device(dev_state->domain, &pdev->dev); 788 group = iommu_group_get(&pdev->dev);
780 if (ret != 0) 789 if (!group)
781 goto out_free_domain; 790 goto out_free_domain;
782 791
792 ret = iommu_attach_group(dev_state->domain, group);
793 if (ret != 0)
794 goto out_drop_group;
795
796 iommu_group_put(group);
797
783 spin_lock_irqsave(&state_lock, flags); 798 spin_lock_irqsave(&state_lock, flags);
784 799
785 if (__get_device_state(devid) != NULL) { 800 if (__get_device_state(devid) != NULL) {
@@ -794,6 +809,9 @@ int amd_iommu_init_device(struct pci_dev *pdev, int pasids)
794 809
795 return 0; 810 return 0;
796 811
812out_drop_group:
813 iommu_group_put(group);
814
797out_free_domain: 815out_free_domain:
798 iommu_domain_free(dev_state->domain); 816 iommu_domain_free(dev_state->domain);
799 817
diff --git a/drivers/iommu/arm-smmu-v3.c b/drivers/iommu/arm-smmu-v3.c
index 8e9ec81ce4bb..da902baaa794 100644
--- a/drivers/iommu/arm-smmu-v3.c
+++ b/drivers/iommu/arm-smmu-v3.c
@@ -199,9 +199,10 @@
199 * Stream table. 199 * Stream table.
200 * 200 *
201 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries 201 * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
202 * 2lvl: 8k L1 entries, 256 lazy entries per table (each table covers a PCI bus) 202 * 2lvl: 128k L1 entries,
203 * 256 lazy entries per table (each table covers a PCI bus)
203 */ 204 */
204#define STRTAB_L1_SZ_SHIFT 16 205#define STRTAB_L1_SZ_SHIFT 20
205#define STRTAB_SPLIT 8 206#define STRTAB_SPLIT 8
206 207
207#define STRTAB_L1_DESC_DWORDS 1 208#define STRTAB_L1_DESC_DWORDS 1
@@ -269,10 +270,10 @@
269#define ARM64_TCR_TG0_SHIFT 14 270#define ARM64_TCR_TG0_SHIFT 14
270#define ARM64_TCR_TG0_MASK 0x3UL 271#define ARM64_TCR_TG0_MASK 0x3UL
271#define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8 272#define CTXDESC_CD_0_TCR_IRGN0_SHIFT 8
272#define ARM64_TCR_IRGN0_SHIFT 24 273#define ARM64_TCR_IRGN0_SHIFT 8
273#define ARM64_TCR_IRGN0_MASK 0x3UL 274#define ARM64_TCR_IRGN0_MASK 0x3UL
274#define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10 275#define CTXDESC_CD_0_TCR_ORGN0_SHIFT 10
275#define ARM64_TCR_ORGN0_SHIFT 26 276#define ARM64_TCR_ORGN0_SHIFT 10
276#define ARM64_TCR_ORGN0_MASK 0x3UL 277#define ARM64_TCR_ORGN0_MASK 0x3UL
277#define CTXDESC_CD_0_TCR_SH0_SHIFT 12 278#define CTXDESC_CD_0_TCR_SH0_SHIFT 12
278#define ARM64_TCR_SH0_SHIFT 12 279#define ARM64_TCR_SH0_SHIFT 12
@@ -542,6 +543,9 @@ struct arm_smmu_device {
542#define ARM_SMMU_FEAT_HYP (1 << 12) 543#define ARM_SMMU_FEAT_HYP (1 << 12)
543 u32 features; 544 u32 features;
544 545
546#define ARM_SMMU_OPT_SKIP_PREFETCH (1 << 0)
547 u32 options;
548
545 struct arm_smmu_cmdq cmdq; 549 struct arm_smmu_cmdq cmdq;
546 struct arm_smmu_evtq evtq; 550 struct arm_smmu_evtq evtq;
547 struct arm_smmu_priq priq; 551 struct arm_smmu_priq priq;
@@ -602,11 +606,35 @@ struct arm_smmu_domain {
602static DEFINE_SPINLOCK(arm_smmu_devices_lock); 606static DEFINE_SPINLOCK(arm_smmu_devices_lock);
603static LIST_HEAD(arm_smmu_devices); 607static LIST_HEAD(arm_smmu_devices);
604 608
609struct arm_smmu_option_prop {
610 u32 opt;
611 const char *prop;
612};
613
614static struct arm_smmu_option_prop arm_smmu_options[] = {
615 { ARM_SMMU_OPT_SKIP_PREFETCH, "hisilicon,broken-prefetch-cmd" },
616 { 0, NULL},
617};
618
605static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) 619static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
606{ 620{
607 return container_of(dom, struct arm_smmu_domain, domain); 621 return container_of(dom, struct arm_smmu_domain, domain);
608} 622}
609 623
624static void parse_driver_options(struct arm_smmu_device *smmu)
625{
626 int i = 0;
627
628 do {
629 if (of_property_read_bool(smmu->dev->of_node,
630 arm_smmu_options[i].prop)) {
631 smmu->options |= arm_smmu_options[i].opt;
632 dev_notice(smmu->dev, "option %s\n",
633 arm_smmu_options[i].prop);
634 }
635 } while (arm_smmu_options[++i].opt);
636}
637
610/* Low-level queue manipulation functions */ 638/* Low-level queue manipulation functions */
611static bool queue_full(struct arm_smmu_queue *q) 639static bool queue_full(struct arm_smmu_queue *q)
612{ 640{
@@ -1036,7 +1064,8 @@ static void arm_smmu_write_strtab_ent(struct arm_smmu_device *smmu, u32 sid,
1036 arm_smmu_sync_ste_for_sid(smmu, sid); 1064 arm_smmu_sync_ste_for_sid(smmu, sid);
1037 1065
1038 /* It's likely that we'll want to use the new STE soon */ 1066 /* It's likely that we'll want to use the new STE soon */
1039 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd); 1067 if (!(smmu->options & ARM_SMMU_OPT_SKIP_PREFETCH))
1068 arm_smmu_cmdq_issue_cmd(smmu, &prefetch_cmd);
1040} 1069}
1041 1070
1042static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent) 1071static void arm_smmu_init_bypass_stes(u64 *strtab, unsigned int nent)
@@ -1064,7 +1093,7 @@ static int arm_smmu_init_l2_strtab(struct arm_smmu_device *smmu, u32 sid)
1064 return 0; 1093 return 0;
1065 1094
1066 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3); 1095 size = 1 << (STRTAB_SPLIT + ilog2(STRTAB_STE_DWORDS) + 3);
1067 strtab = &cfg->strtab[sid >> STRTAB_SPLIT << STRTAB_L1_DESC_DWORDS]; 1096 strtab = &cfg->strtab[(sid >> STRTAB_SPLIT) * STRTAB_L1_DESC_DWORDS];
1068 1097
1069 desc->span = STRTAB_SPLIT + 1; 1098 desc->span = STRTAB_SPLIT + 1;
1070 desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma, 1099 desc->l2ptr = dma_zalloc_coherent(smmu->dev, size, &desc->l2ptr_dma,
@@ -2020,21 +2049,23 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2020{ 2049{
2021 void *strtab; 2050 void *strtab;
2022 u64 reg; 2051 u64 reg;
2023 u32 size; 2052 u32 size, l1size;
2024 int ret; 2053 int ret;
2025 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg; 2054 struct arm_smmu_strtab_cfg *cfg = &smmu->strtab_cfg;
2026 2055
2027 /* Calculate the L1 size, capped to the SIDSIZE */ 2056 /* Calculate the L1 size, capped to the SIDSIZE */
2028 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3); 2057 size = STRTAB_L1_SZ_SHIFT - (ilog2(STRTAB_L1_DESC_DWORDS) + 3);
2029 size = min(size, smmu->sid_bits - STRTAB_SPLIT); 2058 size = min(size, smmu->sid_bits - STRTAB_SPLIT);
2030 if (size + STRTAB_SPLIT < smmu->sid_bits) 2059 cfg->num_l1_ents = 1 << size;
2060
2061 size += STRTAB_SPLIT;
2062 if (size < smmu->sid_bits)
2031 dev_warn(smmu->dev, 2063 dev_warn(smmu->dev,
2032 "2-level strtab only covers %u/%u bits of SID\n", 2064 "2-level strtab only covers %u/%u bits of SID\n",
2033 size + STRTAB_SPLIT, smmu->sid_bits); 2065 size, smmu->sid_bits);
2034 2066
2035 cfg->num_l1_ents = 1 << size; 2067 l1size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3);
2036 size = cfg->num_l1_ents * (STRTAB_L1_DESC_DWORDS << 3); 2068 strtab = dma_zalloc_coherent(smmu->dev, l1size, &cfg->strtab_dma,
2037 strtab = dma_zalloc_coherent(smmu->dev, size, &cfg->strtab_dma,
2038 GFP_KERNEL); 2069 GFP_KERNEL);
2039 if (!strtab) { 2070 if (!strtab) {
2040 dev_err(smmu->dev, 2071 dev_err(smmu->dev,
@@ -2055,8 +2086,7 @@ static int arm_smmu_init_strtab_2lvl(struct arm_smmu_device *smmu)
2055 ret = arm_smmu_init_l1_strtab(smmu); 2086 ret = arm_smmu_init_l1_strtab(smmu);
2056 if (ret) 2087 if (ret)
2057 dma_free_coherent(smmu->dev, 2088 dma_free_coherent(smmu->dev,
2058 cfg->num_l1_ents * 2089 l1size,
2059 (STRTAB_L1_DESC_DWORDS << 3),
2060 strtab, 2090 strtab,
2061 cfg->strtab_dma); 2091 cfg->strtab_dma);
2062 return ret; 2092 return ret;
@@ -2573,6 +2603,8 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev)
2573 if (irq > 0) 2603 if (irq > 0)
2574 smmu->gerr_irq = irq; 2604 smmu->gerr_irq = irq;
2575 2605
2606 parse_driver_options(smmu);
2607
2576 /* Probe the h/w */ 2608 /* Probe the h/w */
2577 ret = arm_smmu_device_probe(smmu); 2609 ret = arm_smmu_device_probe(smmu);
2578 if (ret) 2610 if (ret)
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index a98a7b27aca1..0649b94f5958 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1830,8 +1830,9 @@ static int domain_init(struct dmar_domain *domain, int guest_width)
1830 1830
1831static void domain_exit(struct dmar_domain *domain) 1831static void domain_exit(struct dmar_domain *domain)
1832{ 1832{
1833 struct dmar_drhd_unit *drhd;
1834 struct intel_iommu *iommu;
1833 struct page *freelist = NULL; 1835 struct page *freelist = NULL;
1834 int i;
1835 1836
1836 /* Domain 0 is reserved, so dont process it */ 1837 /* Domain 0 is reserved, so dont process it */
1837 if (!domain) 1838 if (!domain)
@@ -1851,8 +1852,10 @@ static void domain_exit(struct dmar_domain *domain)
1851 1852
1852 /* clear attached or cached domains */ 1853 /* clear attached or cached domains */
1853 rcu_read_lock(); 1854 rcu_read_lock();
1854 for_each_set_bit(i, domain->iommu_bmp, g_num_of_iommus) 1855 for_each_active_iommu(iommu, drhd)
1855 iommu_detach_domain(domain, g_iommus[i]); 1856 if (domain_type_is_vm(domain) ||
1857 test_bit(iommu->seq_id, domain->iommu_bmp))
1858 iommu_detach_domain(domain, iommu);
1856 rcu_read_unlock(); 1859 rcu_read_unlock();
1857 1860
1858 dma_free_pagelist(freelist); 1861 dma_free_pagelist(freelist);
diff --git a/drivers/irqchip/irq-mips-gic.c b/drivers/irqchip/irq-mips-gic.c
index b7d54d428b5e..ff4be0515a0d 100644
--- a/drivers/irqchip/irq-mips-gic.c
+++ b/drivers/irqchip/irq-mips-gic.c
@@ -538,7 +538,7 @@ static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
538 538
539static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) 539static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
540{ 540{
541 smp_call_function_interrupt(); 541 generic_smp_call_function_interrupt();
542 542
543 return IRQ_HANDLED; 543 return IRQ_HANDLED;
544} 544}
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 8c91fd5eb6fd..375be509e95f 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -524,9 +524,18 @@ gigaset_tty_open(struct tty_struct *tty)
524 cs->hw.ser->tty = tty; 524 cs->hw.ser->tty = tty;
525 atomic_set(&cs->hw.ser->refcnt, 1); 525 atomic_set(&cs->hw.ser->refcnt, 1);
526 init_completion(&cs->hw.ser->dead_cmp); 526 init_completion(&cs->hw.ser->dead_cmp);
527
528 tty->disc_data = cs; 527 tty->disc_data = cs;
529 528
529 /* Set the amount of data we're willing to receive per call
530 * from the hardware driver to half of the input buffer size
531 * to leave some reserve.
532 * Note: We don't do flow control towards the hardware driver.
533 * If more data is received than will fit into the input buffer,
534 * it will be dropped and an error will be logged. This should
535 * never happen as the device is slow and the buffer size ample.
536 */
537 tty->receive_room = RBUFSIZE/2;
538
530 /* OK.. Initialization of the datastructures and the HW is done.. Now 539 /* OK.. Initialization of the datastructures and the HW is done.. Now
531 * startup system and notify the LL that we are ready to run 540 * startup system and notify the LL that we are ready to run
532 */ 541 */
@@ -598,28 +607,6 @@ static int gigaset_tty_hangup(struct tty_struct *tty)
598} 607}
599 608
600/* 609/*
601 * Read on the tty.
602 * Unused, received data goes only to the Gigaset driver.
603 */
604static ssize_t
605gigaset_tty_read(struct tty_struct *tty, struct file *file,
606 unsigned char __user *buf, size_t count)
607{
608 return -EAGAIN;
609}
610
611/*
612 * Write on the tty.
613 * Unused, transmit data comes only from the Gigaset driver.
614 */
615static ssize_t
616gigaset_tty_write(struct tty_struct *tty, struct file *file,
617 const unsigned char *buf, size_t count)
618{
619 return -EAGAIN;
620}
621
622/*
623 * Ioctl on the tty. 610 * Ioctl on the tty.
624 * Called in process context only. 611 * Called in process context only.
625 * May be re-entered by multiple ioctl calling threads. 612 * May be re-entered by multiple ioctl calling threads.
@@ -752,8 +739,6 @@ static struct tty_ldisc_ops gigaset_ldisc = {
752 .open = gigaset_tty_open, 739 .open = gigaset_tty_open,
753 .close = gigaset_tty_close, 740 .close = gigaset_tty_close,
754 .hangup = gigaset_tty_hangup, 741 .hangup = gigaset_tty_hangup,
755 .read = gigaset_tty_read,
756 .write = gigaset_tty_write,
757 .ioctl = gigaset_tty_ioctl, 742 .ioctl = gigaset_tty_ioctl,
758 .receive_buf = gigaset_tty_receive, 743 .receive_buf = gigaset_tty_receive,
759 .write_wakeup = gigaset_tty_wakeup, 744 .write_wakeup = gigaset_tty_wakeup,
diff --git a/drivers/macintosh/ans-lcd.c b/drivers/macintosh/ans-lcd.c
index 1a57e88a38f7..cd35079c8c98 100644
--- a/drivers/macintosh/ans-lcd.c
+++ b/drivers/macintosh/ans-lcd.c
@@ -7,7 +7,7 @@
7#include <linux/kernel.h> 7#include <linux/kernel.h>
8#include <linux/miscdevice.h> 8#include <linux/miscdevice.h>
9#include <linux/fcntl.h> 9#include <linux/fcntl.h>
10#include <linux/init.h> 10#include <linux/module.h>
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/fs.h> 12#include <linux/fs.h>
13 13
diff --git a/drivers/md/Kconfig b/drivers/md/Kconfig
index b59727309072..bfec3bdfe598 100644
--- a/drivers/md/Kconfig
+++ b/drivers/md/Kconfig
@@ -259,7 +259,7 @@ config DM_CRYPT
259 the ciphers you're going to use in the cryptoapi configuration. 259 the ciphers you're going to use in the cryptoapi configuration.
260 260
261 For further information on dm-crypt and userspace tools see: 261 For further information on dm-crypt and userspace tools see:
262 <http://code.google.com/p/cryptsetup/wiki/DMCrypt> 262 <https://gitlab.com/cryptsetup/cryptsetup/wikis/DMCrypt>
263 263
264 To compile this code as a module, choose M here: the module will 264 To compile this code as a module, choose M here: the module will
265 be called dm-crypt. 265 be called dm-crypt.
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index ed2346ddf4c9..e51de52eeb94 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -494,7 +494,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
494 bitmap_super_t *sb; 494 bitmap_super_t *sb;
495 unsigned long chunksize, daemon_sleep, write_behind; 495 unsigned long chunksize, daemon_sleep, write_behind;
496 496
497 bitmap->storage.sb_page = alloc_page(GFP_KERNEL); 497 bitmap->storage.sb_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
498 if (bitmap->storage.sb_page == NULL) 498 if (bitmap->storage.sb_page == NULL)
499 return -ENOMEM; 499 return -ENOMEM;
500 bitmap->storage.sb_page->index = 0; 500 bitmap->storage.sb_page->index = 0;
@@ -541,6 +541,7 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
541 sb->state = cpu_to_le32(bitmap->flags); 541 sb->state = cpu_to_le32(bitmap->flags);
542 bitmap->events_cleared = bitmap->mddev->events; 542 bitmap->events_cleared = bitmap->mddev->events;
543 sb->events_cleared = cpu_to_le64(bitmap->mddev->events); 543 sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
544 bitmap->mddev->bitmap_info.nodes = 0;
544 545
545 kunmap_atomic(sb); 546 kunmap_atomic(sb);
546 547
@@ -558,6 +559,7 @@ static int bitmap_read_sb(struct bitmap *bitmap)
558 unsigned long sectors_reserved = 0; 559 unsigned long sectors_reserved = 0;
559 int err = -EINVAL; 560 int err = -EINVAL;
560 struct page *sb_page; 561 struct page *sb_page;
562 loff_t offset = bitmap->mddev->bitmap_info.offset;
561 563
562 if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) { 564 if (!bitmap->storage.file && !bitmap->mddev->bitmap_info.offset) {
563 chunksize = 128 * 1024 * 1024; 565 chunksize = 128 * 1024 * 1024;
@@ -584,9 +586,9 @@ re_read:
584 bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t); 586 bm_blocks = ((bm_blocks+7) >> 3) + sizeof(bitmap_super_t);
585 /* to 4k blocks */ 587 /* to 4k blocks */
586 bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096); 588 bm_blocks = DIV_ROUND_UP_SECTOR_T(bm_blocks, 4096);
587 bitmap->mddev->bitmap_info.offset += bitmap->cluster_slot * (bm_blocks << 3); 589 offset = bitmap->mddev->bitmap_info.offset + (bitmap->cluster_slot * (bm_blocks << 3));
588 pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__, 590 pr_info("%s:%d bm slot: %d offset: %llu\n", __func__, __LINE__,
589 bitmap->cluster_slot, (unsigned long long)bitmap->mddev->bitmap_info.offset); 591 bitmap->cluster_slot, offset);
590 } 592 }
591 593
592 if (bitmap->storage.file) { 594 if (bitmap->storage.file) {
@@ -597,7 +599,7 @@ re_read:
597 bitmap, bytes, sb_page); 599 bitmap, bytes, sb_page);
598 } else { 600 } else {
599 err = read_sb_page(bitmap->mddev, 601 err = read_sb_page(bitmap->mddev,
600 bitmap->mddev->bitmap_info.offset, 602 offset,
601 sb_page, 603 sb_page,
602 0, sizeof(bitmap_super_t)); 604 0, sizeof(bitmap_super_t));
603 } 605 }
@@ -611,8 +613,16 @@ re_read:
611 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ; 613 daemon_sleep = le32_to_cpu(sb->daemon_sleep) * HZ;
612 write_behind = le32_to_cpu(sb->write_behind); 614 write_behind = le32_to_cpu(sb->write_behind);
613 sectors_reserved = le32_to_cpu(sb->sectors_reserved); 615 sectors_reserved = le32_to_cpu(sb->sectors_reserved);
614 nodes = le32_to_cpu(sb->nodes); 616 /* XXX: This is a hack to ensure that we don't use clustering
615 strlcpy(bitmap->mddev->bitmap_info.cluster_name, sb->cluster_name, 64); 617 * in case:
618 * - dm-raid is in use and
619 * - the nodes written in bitmap_sb is erroneous.
620 */
621 if (!bitmap->mddev->sync_super) {
622 nodes = le32_to_cpu(sb->nodes);
623 strlcpy(bitmap->mddev->bitmap_info.cluster_name,
624 sb->cluster_name, 64);
625 }
616 626
617 /* verify that the bitmap-specific fields are valid */ 627 /* verify that the bitmap-specific fields are valid */
618 if (sb->magic != cpu_to_le32(BITMAP_MAGIC)) 628 if (sb->magic != cpu_to_le32(BITMAP_MAGIC))
@@ -671,7 +681,7 @@ out:
671 kunmap_atomic(sb); 681 kunmap_atomic(sb);
672 /* Assiging chunksize is required for "re_read" */ 682 /* Assiging chunksize is required for "re_read" */
673 bitmap->mddev->bitmap_info.chunksize = chunksize; 683 bitmap->mddev->bitmap_info.chunksize = chunksize;
674 if (nodes && (bitmap->cluster_slot < 0)) { 684 if (err == 0 && nodes && (bitmap->cluster_slot < 0)) {
675 err = md_setup_cluster(bitmap->mddev, nodes); 685 err = md_setup_cluster(bitmap->mddev, nodes);
676 if (err) { 686 if (err) {
677 pr_err("%s: Could not setup cluster service (%d)\n", 687 pr_err("%s: Could not setup cluster service (%d)\n",
@@ -1866,10 +1876,6 @@ int bitmap_copy_from_slot(struct mddev *mddev, int slot,
1866 if (IS_ERR(bitmap)) 1876 if (IS_ERR(bitmap))
1867 return PTR_ERR(bitmap); 1877 return PTR_ERR(bitmap);
1868 1878
1869 rv = bitmap_read_sb(bitmap);
1870 if (rv)
1871 goto err;
1872
1873 rv = bitmap_init_from_disk(bitmap, 0); 1879 rv = bitmap_init_from_disk(bitmap, 0);
1874 if (rv) 1880 if (rv)
1875 goto err; 1881 goto err;
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 32814371b8d3..aa1b41ca40f7 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -1471,5 +1471,3 @@ module_exit(mq_exit);
1471MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 1471MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1472MODULE_LICENSE("GPL"); 1472MODULE_LICENSE("GPL");
1473MODULE_DESCRIPTION("mq cache policy"); 1473MODULE_DESCRIPTION("mq cache policy");
1474
1475MODULE_ALIAS("dm-cache-default");
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index b6f22651dd35..200366c62231 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1686,7 +1686,7 @@ static struct dm_cache_policy *smq_create(dm_cblock_t cache_size,
1686 1686
1687 if (from_cblock(cache_size)) { 1687 if (from_cblock(cache_size)) {
1688 mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size)); 1688 mq->cache_hit_bits = alloc_bitset(from_cblock(cache_size));
1689 if (!mq->cache_hit_bits && mq->cache_hit_bits) { 1689 if (!mq->cache_hit_bits) {
1690 DMERR("couldn't allocate cache hit bitset"); 1690 DMERR("couldn't allocate cache hit bitset");
1691 goto bad_cache_hit_bits; 1691 goto bad_cache_hit_bits;
1692 } 1692 }
@@ -1789,3 +1789,5 @@ module_exit(smq_exit);
1789MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); 1789MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1790MODULE_LICENSE("GPL"); 1790MODULE_LICENSE("GPL");
1791MODULE_DESCRIPTION("smq cache policy"); 1791MODULE_DESCRIPTION("smq cache policy");
1792
1793MODULE_ALIAS("dm-cache-default");
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index b680da5d7b93..1fe93cfea7d3 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -424,6 +424,7 @@ static void free_migration(struct dm_cache_migration *mg)
424 wake_up(&cache->migration_wait); 424 wake_up(&cache->migration_wait);
425 425
426 mempool_free(mg, cache->migration_pool); 426 mempool_free(mg, cache->migration_pool);
427 wake_worker(cache);
427} 428}
428 429
429static int prealloc_data_structs(struct cache *cache, struct prealloc *p) 430static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
@@ -1966,6 +1967,7 @@ static void process_deferred_bios(struct cache *cache)
1966 * this bio might require one, we pause until there are some 1967 * this bio might require one, we pause until there are some
1967 * prepared mappings to process. 1968 * prepared mappings to process.
1968 */ 1969 */
1970 prealloc_used = true;
1969 if (prealloc_data_structs(cache, &structs)) { 1971 if (prealloc_data_structs(cache, &structs)) {
1970 spin_lock_irqsave(&cache->lock, flags); 1972 spin_lock_irqsave(&cache->lock, flags);
1971 bio_list_merge(&cache->deferred_bios, &bios); 1973 bio_list_merge(&cache->deferred_bios, &bios);
@@ -1981,7 +1983,6 @@ static void process_deferred_bios(struct cache *cache)
1981 process_discard_bio(cache, &structs, bio); 1983 process_discard_bio(cache, &structs, bio);
1982 else 1984 else
1983 process_bio(cache, &structs, bio); 1985 process_bio(cache, &structs, bio);
1984 prealloc_used = true;
1985 } 1986 }
1986 1987
1987 if (prealloc_used) 1988 if (prealloc_used)
@@ -2010,6 +2011,7 @@ static void process_deferred_cells(struct cache *cache)
2010 * this bio might require one, we pause until there are some 2011 * this bio might require one, we pause until there are some
2011 * prepared mappings to process. 2012 * prepared mappings to process.
2012 */ 2013 */
2014 prealloc_used = true;
2013 if (prealloc_data_structs(cache, &structs)) { 2015 if (prealloc_data_structs(cache, &structs)) {
2014 spin_lock_irqsave(&cache->lock, flags); 2016 spin_lock_irqsave(&cache->lock, flags);
2015 list_splice(&cells, &cache->deferred_cells); 2017 list_splice(&cells, &cache->deferred_cells);
@@ -2018,7 +2020,6 @@ static void process_deferred_cells(struct cache *cache)
2018 } 2020 }
2019 2021
2020 process_cell(cache, &structs, cell); 2022 process_cell(cache, &structs, cell);
2021 prealloc_used = true;
2022 } 2023 }
2023 2024
2024 if (prealloc_used) 2025 if (prealloc_used)
@@ -2080,6 +2081,7 @@ static void writeback_some_dirty_blocks(struct cache *cache)
2080 if (policy_writeback_work(cache->policy, &oblock, &cblock, busy)) 2081 if (policy_writeback_work(cache->policy, &oblock, &cblock, busy))
2081 break; /* no work to do */ 2082 break; /* no work to do */
2082 2083
2084 prealloc_used = true;
2083 if (prealloc_data_structs(cache, &structs) || 2085 if (prealloc_data_structs(cache, &structs) ||
2084 get_cell(cache, oblock, &structs, &old_ocell)) { 2086 get_cell(cache, oblock, &structs, &old_ocell)) {
2085 policy_set_dirty(cache->policy, oblock); 2087 policy_set_dirty(cache->policy, oblock);
@@ -2087,7 +2089,6 @@ static void writeback_some_dirty_blocks(struct cache *cache)
2087 } 2089 }
2088 2090
2089 writeback(cache, &structs, oblock, cblock, old_ocell); 2091 writeback(cache, &structs, oblock, cblock, old_ocell);
2090 prealloc_used = true;
2091 } 2092 }
2092 2093
2093 if (prealloc_used) 2094 if (prealloc_used)
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 48dfe3c4d6aa..6ba47cfb1443 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1293,8 +1293,8 @@ static int __release_metadata_snap(struct dm_pool_metadata *pmd)
1293 return r; 1293 return r;
1294 1294
1295 disk_super = dm_block_data(copy); 1295 disk_super = dm_block_data(copy);
1296 dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root)); 1296 dm_btree_del(&pmd->info, le64_to_cpu(disk_super->data_mapping_root));
1297 dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root)); 1297 dm_btree_del(&pmd->details_info, le64_to_cpu(disk_super->device_details_root));
1298 dm_sm_dec_block(pmd->metadata_sm, held_root); 1298 dm_sm_dec_block(pmd->metadata_sm, held_root);
1299 1299
1300 return dm_tm_unlock(pmd->tm, copy); 1300 return dm_tm_unlock(pmd->tm, copy);
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 1c50c580215c..d2bbe8cc1e97 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -666,16 +666,21 @@ static void requeue_io(struct thin_c *tc)
666 requeue_deferred_cells(tc); 666 requeue_deferred_cells(tc);
667} 667}
668 668
669static void error_retry_list(struct pool *pool) 669static void error_retry_list_with_code(struct pool *pool, int error)
670{ 670{
671 struct thin_c *tc; 671 struct thin_c *tc;
672 672
673 rcu_read_lock(); 673 rcu_read_lock();
674 list_for_each_entry_rcu(tc, &pool->active_thins, list) 674 list_for_each_entry_rcu(tc, &pool->active_thins, list)
675 error_thin_bio_list(tc, &tc->retry_on_resume_list, -EIO); 675 error_thin_bio_list(tc, &tc->retry_on_resume_list, error);
676 rcu_read_unlock(); 676 rcu_read_unlock();
677} 677}
678 678
679static void error_retry_list(struct pool *pool)
680{
681 return error_retry_list_with_code(pool, -EIO);
682}
683
679/* 684/*
680 * This section of code contains the logic for processing a thin device's IO. 685 * This section of code contains the logic for processing a thin device's IO.
681 * Much of the code depends on pool object resources (lists, workqueues, etc) 686 * Much of the code depends on pool object resources (lists, workqueues, etc)
@@ -2297,7 +2302,7 @@ static void do_no_space_timeout(struct work_struct *ws)
2297 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) { 2302 if (get_pool_mode(pool) == PM_OUT_OF_DATA_SPACE && !pool->pf.error_if_no_space) {
2298 pool->pf.error_if_no_space = true; 2303 pool->pf.error_if_no_space = true;
2299 notify_of_pool_mode_change_to_oods(pool); 2304 notify_of_pool_mode_change_to_oods(pool);
2300 error_retry_list(pool); 2305 error_retry_list_with_code(pool, -ENOSPC);
2301 } 2306 }
2302} 2307}
2303 2308
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index ab37ae114e94..0d7ab20c58df 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1729,7 +1729,8 @@ static int dm_merge_bvec(struct request_queue *q,
1729 struct mapped_device *md = q->queuedata; 1729 struct mapped_device *md = q->queuedata;
1730 struct dm_table *map = dm_get_live_table_fast(md); 1730 struct dm_table *map = dm_get_live_table_fast(md);
1731 struct dm_target *ti; 1731 struct dm_target *ti;
1732 sector_t max_sectors, max_size = 0; 1732 sector_t max_sectors;
1733 int max_size = 0;
1733 1734
1734 if (unlikely(!map)) 1735 if (unlikely(!map))
1735 goto out; 1736 goto out;
@@ -1742,18 +1743,10 @@ static int dm_merge_bvec(struct request_queue *q,
1742 * Find maximum amount of I/O that won't need splitting 1743 * Find maximum amount of I/O that won't need splitting
1743 */ 1744 */
1744 max_sectors = min(max_io_len(bvm->bi_sector, ti), 1745 max_sectors = min(max_io_len(bvm->bi_sector, ti),
1745 (sector_t) queue_max_sectors(q)); 1746 (sector_t) BIO_MAX_SECTORS);
1746 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size; 1747 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1747 1748 if (max_size < 0)
1748 /* 1749 max_size = 0;
1749 * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
1750 * to the targets' merge function since it holds sectors not bytes).
1751 * Just doing this as an interim fix for stable@ because the more
1752 * comprehensive cleanup of switching to sector_t will impact every
1753 * DM target that implements a ->merge hook.
1754 */
1755 if (max_size > INT_MAX)
1756 max_size = INT_MAX;
1757 1750
1758 /* 1751 /*
1759 * merge_bvec_fn() returns number of bytes 1752 * merge_bvec_fn() returns number of bytes
@@ -1761,13 +1754,13 @@ static int dm_merge_bvec(struct request_queue *q,
1761 * max is precomputed maximal io size 1754 * max is precomputed maximal io size
1762 */ 1755 */
1763 if (max_size && ti->type->merge) 1756 if (max_size && ti->type->merge)
1764 max_size = ti->type->merge(ti, bvm, biovec, (int) max_size); 1757 max_size = ti->type->merge(ti, bvm, biovec, max_size);
1765 /* 1758 /*
1766 * If the target doesn't support merge method and some of the devices 1759 * If the target doesn't support merge method and some of the devices
1767 * provided their merge_bvec method (we know this by looking for the 1760 * provided their merge_bvec method (we know this by looking at
1768 * max_hw_sectors that dm_set_device_limits may set), then we can't 1761 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1769 * allow bios with multiple vector entries. So always set max_size 1762 * entries. So always set max_size to 0, and the code below allows
1770 * to 0, and the code below allows just one page. 1763 * just one page.
1771 */ 1764 */
1772 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9) 1765 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1773 max_size = 0; 1766 max_size = 0;
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index fcfc4b9b2672..0072190515e0 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -44,6 +44,7 @@ struct resync_info {
44 44
45/* md_cluster_info flags */ 45/* md_cluster_info flags */
46#define MD_CLUSTER_WAITING_FOR_NEWDISK 1 46#define MD_CLUSTER_WAITING_FOR_NEWDISK 1
47#define MD_CLUSTER_SUSPEND_READ_BALANCING 2
47 48
48 49
49struct md_cluster_info { 50struct md_cluster_info {
@@ -275,6 +276,9 @@ clear_bit:
275 276
276static void recover_prep(void *arg) 277static void recover_prep(void *arg)
277{ 278{
279 struct mddev *mddev = arg;
280 struct md_cluster_info *cinfo = mddev->cluster_info;
281 set_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
278} 282}
279 283
280static void recover_slot(void *arg, struct dlm_slot *slot) 284static void recover_slot(void *arg, struct dlm_slot *slot)
@@ -307,6 +311,7 @@ static void recover_done(void *arg, struct dlm_slot *slots,
307 311
308 cinfo->slot_number = our_slot; 312 cinfo->slot_number = our_slot;
309 complete(&cinfo->completion); 313 complete(&cinfo->completion);
314 clear_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state);
310} 315}
311 316
312static const struct dlm_lockspace_ops md_ls_ops = { 317static const struct dlm_lockspace_ops md_ls_ops = {
@@ -816,12 +821,17 @@ static void resync_finish(struct mddev *mddev)
816 resync_send(mddev, RESYNCING, 0, 0); 821 resync_send(mddev, RESYNCING, 0, 0);
817} 822}
818 823
819static int area_resyncing(struct mddev *mddev, sector_t lo, sector_t hi) 824static int area_resyncing(struct mddev *mddev, int direction,
825 sector_t lo, sector_t hi)
820{ 826{
821 struct md_cluster_info *cinfo = mddev->cluster_info; 827 struct md_cluster_info *cinfo = mddev->cluster_info;
822 int ret = 0; 828 int ret = 0;
823 struct suspend_info *s; 829 struct suspend_info *s;
824 830
831 if ((direction == READ) &&
832 test_bit(MD_CLUSTER_SUSPEND_READ_BALANCING, &cinfo->state))
833 return 1;
834
825 spin_lock_irq(&cinfo->suspend_lock); 835 spin_lock_irq(&cinfo->suspend_lock);
826 if (list_empty(&cinfo->suspend_list)) 836 if (list_empty(&cinfo->suspend_list))
827 goto out; 837 goto out;
diff --git a/drivers/md/md-cluster.h b/drivers/md/md-cluster.h
index 6817ee00e053..00defe2badbc 100644
--- a/drivers/md/md-cluster.h
+++ b/drivers/md/md-cluster.h
@@ -18,7 +18,7 @@ struct md_cluster_operations {
18 int (*metadata_update_start)(struct mddev *mddev); 18 int (*metadata_update_start)(struct mddev *mddev);
19 int (*metadata_update_finish)(struct mddev *mddev); 19 int (*metadata_update_finish)(struct mddev *mddev);
20 int (*metadata_update_cancel)(struct mddev *mddev); 20 int (*metadata_update_cancel)(struct mddev *mddev);
21 int (*area_resyncing)(struct mddev *mddev, sector_t lo, sector_t hi); 21 int (*area_resyncing)(struct mddev *mddev, int direction, sector_t lo, sector_t hi);
22 int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev); 22 int (*add_new_disk_start)(struct mddev *mddev, struct md_rdev *rdev);
23 int (*add_new_disk_finish)(struct mddev *mddev); 23 int (*add_new_disk_finish)(struct mddev *mddev);
24 int (*new_disk_ack)(struct mddev *mddev, bool ack); 24 int (*new_disk_ack)(struct mddev *mddev, bool ack);
diff --git a/drivers/md/md.c b/drivers/md/md.c
index d429c30cd514..e25f00f0138a 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -5382,6 +5382,8 @@ static void __md_stop(struct mddev *mddev)
5382{ 5382{
5383 struct md_personality *pers = mddev->pers; 5383 struct md_personality *pers = mddev->pers;
5384 mddev_detach(mddev); 5384 mddev_detach(mddev);
5385 /* Ensure ->event_work is done */
5386 flush_workqueue(md_misc_wq);
5385 spin_lock(&mddev->lock); 5387 spin_lock(&mddev->lock);
5386 mddev->ready = 0; 5388 mddev->ready = 0;
5387 mddev->pers = NULL; 5389 mddev->pers = NULL;
@@ -5757,7 +5759,7 @@ static int get_bitmap_file(struct mddev *mddev, void __user * arg)
5757 char *ptr; 5759 char *ptr;
5758 int err; 5760 int err;
5759 5761
5760 file = kmalloc(sizeof(*file), GFP_NOIO); 5762 file = kzalloc(sizeof(*file), GFP_NOIO);
5761 if (!file) 5763 if (!file)
5762 return -ENOMEM; 5764 return -ENOMEM;
5763 5765
@@ -7437,7 +7439,7 @@ int md_setup_cluster(struct mddev *mddev, int nodes)
7437 err = request_module("md-cluster"); 7439 err = request_module("md-cluster");
7438 if (err) { 7440 if (err) {
7439 pr_err("md-cluster module not found.\n"); 7441 pr_err("md-cluster module not found.\n");
7440 return err; 7442 return -ENOENT;
7441 } 7443 }
7442 7444
7443 spin_lock(&pers_lock); 7445 spin_lock(&pers_lock);
diff --git a/drivers/md/persistent-data/dm-btree-internal.h b/drivers/md/persistent-data/dm-btree-internal.h
index bf2b80d5c470..8731b6ea026b 100644
--- a/drivers/md/persistent-data/dm-btree-internal.h
+++ b/drivers/md/persistent-data/dm-btree-internal.h
@@ -138,4 +138,10 @@ int lower_bound(struct btree_node *n, uint64_t key);
138 138
139extern struct dm_block_validator btree_node_validator; 139extern struct dm_block_validator btree_node_validator;
140 140
141/*
142 * Value type for upper levels of multi-level btrees.
143 */
144extern void init_le64_type(struct dm_transaction_manager *tm,
145 struct dm_btree_value_type *vt);
146
141#endif /* DM_BTREE_INTERNAL_H */ 147#endif /* DM_BTREE_INTERNAL_H */
diff --git a/drivers/md/persistent-data/dm-btree-remove.c b/drivers/md/persistent-data/dm-btree-remove.c
index 9836c0ae897c..4222f774cf36 100644
--- a/drivers/md/persistent-data/dm-btree-remove.c
+++ b/drivers/md/persistent-data/dm-btree-remove.c
@@ -544,14 +544,6 @@ static int remove_raw(struct shadow_spine *s, struct dm_btree_info *info,
544 return r; 544 return r;
545} 545}
546 546
547static struct dm_btree_value_type le64_type = {
548 .context = NULL,
549 .size = sizeof(__le64),
550 .inc = NULL,
551 .dec = NULL,
552 .equal = NULL
553};
554
555int dm_btree_remove(struct dm_btree_info *info, dm_block_t root, 547int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
556 uint64_t *keys, dm_block_t *new_root) 548 uint64_t *keys, dm_block_t *new_root)
557{ 549{
@@ -559,12 +551,14 @@ int dm_btree_remove(struct dm_btree_info *info, dm_block_t root,
559 int index = 0, r = 0; 551 int index = 0, r = 0;
560 struct shadow_spine spine; 552 struct shadow_spine spine;
561 struct btree_node *n; 553 struct btree_node *n;
554 struct dm_btree_value_type le64_vt;
562 555
556 init_le64_type(info->tm, &le64_vt);
563 init_shadow_spine(&spine, info); 557 init_shadow_spine(&spine, info);
564 for (level = 0; level < info->levels; level++) { 558 for (level = 0; level < info->levels; level++) {
565 r = remove_raw(&spine, info, 559 r = remove_raw(&spine, info,
566 (level == last_level ? 560 (level == last_level ?
567 &info->value_type : &le64_type), 561 &info->value_type : &le64_vt),
568 root, keys[level], (unsigned *)&index); 562 root, keys[level], (unsigned *)&index);
569 if (r < 0) 563 if (r < 0)
570 break; 564 break;
@@ -654,11 +648,13 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
654 int index = 0, r = 0; 648 int index = 0, r = 0;
655 struct shadow_spine spine; 649 struct shadow_spine spine;
656 struct btree_node *n; 650 struct btree_node *n;
651 struct dm_btree_value_type le64_vt;
657 uint64_t k; 652 uint64_t k;
658 653
654 init_le64_type(info->tm, &le64_vt);
659 init_shadow_spine(&spine, info); 655 init_shadow_spine(&spine, info);
660 for (level = 0; level < last_level; level++) { 656 for (level = 0; level < last_level; level++) {
661 r = remove_raw(&spine, info, &le64_type, 657 r = remove_raw(&spine, info, &le64_vt,
662 root, keys[level], (unsigned *) &index); 658 root, keys[level], (unsigned *) &index);
663 if (r < 0) 659 if (r < 0)
664 goto out; 660 goto out;
@@ -689,6 +685,7 @@ static int remove_one(struct dm_btree_info *info, dm_block_t root,
689 value_ptr(n, index)); 685 value_ptr(n, index));
690 686
691 delete_at(n, index); 687 delete_at(n, index);
688 keys[last_level] = k + 1ull;
692 689
693 } else 690 } else
694 r = -ENODATA; 691 r = -ENODATA;
diff --git a/drivers/md/persistent-data/dm-btree-spine.c b/drivers/md/persistent-data/dm-btree-spine.c
index 1b5e13ec7f96..0dee514ba4c5 100644
--- a/drivers/md/persistent-data/dm-btree-spine.c
+++ b/drivers/md/persistent-data/dm-btree-spine.c
@@ -249,3 +249,40 @@ int shadow_root(struct shadow_spine *s)
249{ 249{
250 return s->root; 250 return s->root;
251} 251}
252
253static void le64_inc(void *context, const void *value_le)
254{
255 struct dm_transaction_manager *tm = context;
256 __le64 v_le;
257
258 memcpy(&v_le, value_le, sizeof(v_le));
259 dm_tm_inc(tm, le64_to_cpu(v_le));
260}
261
262static void le64_dec(void *context, const void *value_le)
263{
264 struct dm_transaction_manager *tm = context;
265 __le64 v_le;
266
267 memcpy(&v_le, value_le, sizeof(v_le));
268 dm_tm_dec(tm, le64_to_cpu(v_le));
269}
270
271static int le64_equal(void *context, const void *value1_le, const void *value2_le)
272{
273 __le64 v1_le, v2_le;
274
275 memcpy(&v1_le, value1_le, sizeof(v1_le));
276 memcpy(&v2_le, value2_le, sizeof(v2_le));
277 return v1_le == v2_le;
278}
279
280void init_le64_type(struct dm_transaction_manager *tm,
281 struct dm_btree_value_type *vt)
282{
283 vt->context = tm;
284 vt->size = sizeof(__le64);
285 vt->inc = le64_inc;
286 vt->dec = le64_dec;
287 vt->equal = le64_equal;
288}
diff --git a/drivers/md/persistent-data/dm-btree.c b/drivers/md/persistent-data/dm-btree.c
index fdd3793e22f9..c7726cebc495 100644
--- a/drivers/md/persistent-data/dm-btree.c
+++ b/drivers/md/persistent-data/dm-btree.c
@@ -667,12 +667,7 @@ static int insert(struct dm_btree_info *info, dm_block_t root,
667 struct btree_node *n; 667 struct btree_node *n;
668 struct dm_btree_value_type le64_type; 668 struct dm_btree_value_type le64_type;
669 669
670 le64_type.context = NULL; 670 init_le64_type(info->tm, &le64_type);
671 le64_type.size = sizeof(__le64);
672 le64_type.inc = NULL;
673 le64_type.dec = NULL;
674 le64_type.equal = NULL;
675
676 init_shadow_spine(&spine, info); 671 init_shadow_spine(&spine, info);
677 672
678 for (level = 0; level < (info->levels - 1); level++) { 673 for (level = 0; level < (info->levels - 1); level++) {
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index f80f1af61ce7..967a4ed73929 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -336,7 +336,7 @@ static void raid1_end_read_request(struct bio *bio, int error)
336 spin_lock_irqsave(&conf->device_lock, flags); 336 spin_lock_irqsave(&conf->device_lock, flags);
337 if (r1_bio->mddev->degraded == conf->raid_disks || 337 if (r1_bio->mddev->degraded == conf->raid_disks ||
338 (r1_bio->mddev->degraded == conf->raid_disks-1 && 338 (r1_bio->mddev->degraded == conf->raid_disks-1 &&
339 !test_bit(Faulty, &conf->mirrors[mirror].rdev->flags))) 339 test_bit(In_sync, &conf->mirrors[mirror].rdev->flags)))
340 uptodate = 1; 340 uptodate = 1;
341 spin_unlock_irqrestore(&conf->device_lock, flags); 341 spin_unlock_irqrestore(&conf->device_lock, flags);
342 } 342 }
@@ -541,7 +541,7 @@ static int read_balance(struct r1conf *conf, struct r1bio *r1_bio, int *max_sect
541 541
542 if ((conf->mddev->recovery_cp < this_sector + sectors) || 542 if ((conf->mddev->recovery_cp < this_sector + sectors) ||
543 (mddev_is_clustered(conf->mddev) && 543 (mddev_is_clustered(conf->mddev) &&
544 md_cluster_ops->area_resyncing(conf->mddev, this_sector, 544 md_cluster_ops->area_resyncing(conf->mddev, READ, this_sector,
545 this_sector + sectors))) 545 this_sector + sectors)))
546 choose_first = 1; 546 choose_first = 1;
547 else 547 else
@@ -1111,7 +1111,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1111 ((bio_end_sector(bio) > mddev->suspend_lo && 1111 ((bio_end_sector(bio) > mddev->suspend_lo &&
1112 bio->bi_iter.bi_sector < mddev->suspend_hi) || 1112 bio->bi_iter.bi_sector < mddev->suspend_hi) ||
1113 (mddev_is_clustered(mddev) && 1113 (mddev_is_clustered(mddev) &&
1114 md_cluster_ops->area_resyncing(mddev, bio->bi_iter.bi_sector, bio_end_sector(bio))))) { 1114 md_cluster_ops->area_resyncing(mddev, WRITE,
1115 bio->bi_iter.bi_sector, bio_end_sector(bio))))) {
1115 /* As the suspend_* range is controlled by 1116 /* As the suspend_* range is controlled by
1116 * userspace, we want an interruptible 1117 * userspace, we want an interruptible
1117 * wait. 1118 * wait.
@@ -1124,7 +1125,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1124 if (bio_end_sector(bio) <= mddev->suspend_lo || 1125 if (bio_end_sector(bio) <= mddev->suspend_lo ||
1125 bio->bi_iter.bi_sector >= mddev->suspend_hi || 1126 bio->bi_iter.bi_sector >= mddev->suspend_hi ||
1126 (mddev_is_clustered(mddev) && 1127 (mddev_is_clustered(mddev) &&
1127 !md_cluster_ops->area_resyncing(mddev, 1128 !md_cluster_ops->area_resyncing(mddev, WRITE,
1128 bio->bi_iter.bi_sector, bio_end_sector(bio)))) 1129 bio->bi_iter.bi_sector, bio_end_sector(bio))))
1129 break; 1130 break;
1130 schedule(); 1131 schedule();
@@ -1475,6 +1476,7 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
1475{ 1476{
1476 char b[BDEVNAME_SIZE]; 1477 char b[BDEVNAME_SIZE];
1477 struct r1conf *conf = mddev->private; 1478 struct r1conf *conf = mddev->private;
1479 unsigned long flags;
1478 1480
1479 /* 1481 /*
1480 * If it is not operational, then we have already marked it as dead 1482 * If it is not operational, then we have already marked it as dead
@@ -1494,14 +1496,13 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
1494 return; 1496 return;
1495 } 1497 }
1496 set_bit(Blocked, &rdev->flags); 1498 set_bit(Blocked, &rdev->flags);
1499 spin_lock_irqsave(&conf->device_lock, flags);
1497 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1500 if (test_and_clear_bit(In_sync, &rdev->flags)) {
1498 unsigned long flags;
1499 spin_lock_irqsave(&conf->device_lock, flags);
1500 mddev->degraded++; 1501 mddev->degraded++;
1501 set_bit(Faulty, &rdev->flags); 1502 set_bit(Faulty, &rdev->flags);
1502 spin_unlock_irqrestore(&conf->device_lock, flags);
1503 } else 1503 } else
1504 set_bit(Faulty, &rdev->flags); 1504 set_bit(Faulty, &rdev->flags);
1505 spin_unlock_irqrestore(&conf->device_lock, flags);
1505 /* 1506 /*
1506 * if recovery is running, make sure it aborts. 1507 * if recovery is running, make sure it aborts.
1507 */ 1508 */
@@ -1567,7 +1568,10 @@ static int raid1_spare_active(struct mddev *mddev)
1567 * Find all failed disks within the RAID1 configuration 1568 * Find all failed disks within the RAID1 configuration
1568 * and mark them readable. 1569 * and mark them readable.
1569 * Called under mddev lock, so rcu protection not needed. 1570 * Called under mddev lock, so rcu protection not needed.
1571 * device_lock used to avoid races with raid1_end_read_request
1572 * which expects 'In_sync' flags and ->degraded to be consistent.
1570 */ 1573 */
1574 spin_lock_irqsave(&conf->device_lock, flags);
1571 for (i = 0; i < conf->raid_disks; i++) { 1575 for (i = 0; i < conf->raid_disks; i++) {
1572 struct md_rdev *rdev = conf->mirrors[i].rdev; 1576 struct md_rdev *rdev = conf->mirrors[i].rdev;
1573 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev; 1577 struct md_rdev *repl = conf->mirrors[conf->raid_disks + i].rdev;
@@ -1598,7 +1602,6 @@ static int raid1_spare_active(struct mddev *mddev)
1598 sysfs_notify_dirent_safe(rdev->sysfs_state); 1602 sysfs_notify_dirent_safe(rdev->sysfs_state);
1599 } 1603 }
1600 } 1604 }
1601 spin_lock_irqsave(&conf->device_lock, flags);
1602 mddev->degraded -= count; 1605 mddev->degraded -= count;
1603 spin_unlock_irqrestore(&conf->device_lock, flags); 1606 spin_unlock_irqrestore(&conf->device_lock, flags);
1604 1607
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 940f2f365461..38c58e19cfce 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3556,6 +3556,7 @@ static struct r10conf *setup_conf(struct mddev *mddev)
3556 /* far_copies must be 1 */ 3556 /* far_copies must be 1 */
3557 conf->prev.stride = conf->dev_sectors; 3557 conf->prev.stride = conf->dev_sectors;
3558 } 3558 }
3559 conf->reshape_safe = conf->reshape_progress;
3559 spin_lock_init(&conf->device_lock); 3560 spin_lock_init(&conf->device_lock);
3560 INIT_LIST_HEAD(&conf->retry_list); 3561 INIT_LIST_HEAD(&conf->retry_list);
3561 3562
@@ -3760,7 +3761,6 @@ static int run(struct mddev *mddev)
3760 } 3761 }
3761 conf->offset_diff = min_offset_diff; 3762 conf->offset_diff = min_offset_diff;
3762 3763
3763 conf->reshape_safe = conf->reshape_progress;
3764 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery); 3764 clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
3765 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery); 3765 clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
3766 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery); 3766 set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
@@ -4103,6 +4103,7 @@ static int raid10_start_reshape(struct mddev *mddev)
4103 conf->reshape_progress = size; 4103 conf->reshape_progress = size;
4104 } else 4104 } else
4105 conf->reshape_progress = 0; 4105 conf->reshape_progress = 0;
4106 conf->reshape_safe = conf->reshape_progress;
4106 spin_unlock_irq(&conf->device_lock); 4107 spin_unlock_irq(&conf->device_lock);
4107 4108
4108 if (mddev->delta_disks && mddev->bitmap) { 4109 if (mddev->delta_disks && mddev->bitmap) {
@@ -4170,6 +4171,7 @@ abort:
4170 rdev->new_data_offset = rdev->data_offset; 4171 rdev->new_data_offset = rdev->data_offset;
4171 smp_wmb(); 4172 smp_wmb();
4172 conf->reshape_progress = MaxSector; 4173 conf->reshape_progress = MaxSector;
4174 conf->reshape_safe = MaxSector;
4173 mddev->reshape_position = MaxSector; 4175 mddev->reshape_position = MaxSector;
4174 spin_unlock_irq(&conf->device_lock); 4176 spin_unlock_irq(&conf->device_lock);
4175 return ret; 4177 return ret;
@@ -4524,6 +4526,7 @@ static void end_reshape(struct r10conf *conf)
4524 md_finish_reshape(conf->mddev); 4526 md_finish_reshape(conf->mddev);
4525 smp_wmb(); 4527 smp_wmb();
4526 conf->reshape_progress = MaxSector; 4528 conf->reshape_progress = MaxSector;
4529 conf->reshape_safe = MaxSector;
4527 spin_unlock_irq(&conf->device_lock); 4530 spin_unlock_irq(&conf->device_lock);
4528 4531
4529 /* read-ahead size must cover two whole stripes, which is 4532 /* read-ahead size must cover two whole stripes, which is
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 59e44e99eef3..f757023fc458 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2162,6 +2162,9 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2162 if (!sc) 2162 if (!sc)
2163 return -ENOMEM; 2163 return -ENOMEM;
2164 2164
2165 /* Need to ensure auto-resizing doesn't interfere */
2166 mutex_lock(&conf->cache_size_mutex);
2167
2165 for (i = conf->max_nr_stripes; i; i--) { 2168 for (i = conf->max_nr_stripes; i; i--) {
2166 nsh = alloc_stripe(sc, GFP_KERNEL); 2169 nsh = alloc_stripe(sc, GFP_KERNEL);
2167 if (!nsh) 2170 if (!nsh)
@@ -2178,6 +2181,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2178 kmem_cache_free(sc, nsh); 2181 kmem_cache_free(sc, nsh);
2179 } 2182 }
2180 kmem_cache_destroy(sc); 2183 kmem_cache_destroy(sc);
2184 mutex_unlock(&conf->cache_size_mutex);
2181 return -ENOMEM; 2185 return -ENOMEM;
2182 } 2186 }
2183 /* Step 2 - Must use GFP_NOIO now. 2187 /* Step 2 - Must use GFP_NOIO now.
@@ -2224,6 +2228,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2224 } else 2228 } else
2225 err = -ENOMEM; 2229 err = -ENOMEM;
2226 2230
2231 mutex_unlock(&conf->cache_size_mutex);
2227 /* Step 4, return new stripes to service */ 2232 /* Step 4, return new stripes to service */
2228 while(!list_empty(&newstripes)) { 2233 while(!list_empty(&newstripes)) {
2229 nsh = list_entry(newstripes.next, struct stripe_head, lru); 2234 nsh = list_entry(newstripes.next, struct stripe_head, lru);
@@ -2251,7 +2256,7 @@ static int resize_stripes(struct r5conf *conf, int newsize)
2251static int drop_one_stripe(struct r5conf *conf) 2256static int drop_one_stripe(struct r5conf *conf)
2252{ 2257{
2253 struct stripe_head *sh; 2258 struct stripe_head *sh;
2254 int hash = (conf->max_nr_stripes - 1) % NR_STRIPE_HASH_LOCKS; 2259 int hash = (conf->max_nr_stripes - 1) & STRIPE_HASH_LOCKS_MASK;
2255 2260
2256 spin_lock_irq(conf->hash_locks + hash); 2261 spin_lock_irq(conf->hash_locks + hash);
2257 sh = get_free_stripe(conf, hash); 2262 sh = get_free_stripe(conf, hash);
@@ -4061,8 +4066,10 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
4061 &first_bad, &bad_sectors)) 4066 &first_bad, &bad_sectors))
4062 set_bit(R5_ReadRepl, &dev->flags); 4067 set_bit(R5_ReadRepl, &dev->flags);
4063 else { 4068 else {
4064 if (rdev) 4069 if (rdev && !test_bit(Faulty, &rdev->flags))
4065 set_bit(R5_NeedReplace, &dev->flags); 4070 set_bit(R5_NeedReplace, &dev->flags);
4071 else
4072 clear_bit(R5_NeedReplace, &dev->flags);
4066 rdev = rcu_dereference(conf->disks[i].rdev); 4073 rdev = rcu_dereference(conf->disks[i].rdev);
4067 clear_bit(R5_ReadRepl, &dev->flags); 4074 clear_bit(R5_ReadRepl, &dev->flags);
4068 } 4075 }
@@ -5857,12 +5864,14 @@ static void raid5d(struct md_thread *thread)
5857 pr_debug("%d stripes handled\n", handled); 5864 pr_debug("%d stripes handled\n", handled);
5858 5865
5859 spin_unlock_irq(&conf->device_lock); 5866 spin_unlock_irq(&conf->device_lock);
5860 if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state)) { 5867 if (test_and_clear_bit(R5_ALLOC_MORE, &conf->cache_state) &&
5868 mutex_trylock(&conf->cache_size_mutex)) {
5861 grow_one_stripe(conf, __GFP_NOWARN); 5869 grow_one_stripe(conf, __GFP_NOWARN);
5862 /* Set flag even if allocation failed. This helps 5870 /* Set flag even if allocation failed. This helps
5863 * slow down allocation requests when mem is short 5871 * slow down allocation requests when mem is short
5864 */ 5872 */
5865 set_bit(R5_DID_ALLOC, &conf->cache_state); 5873 set_bit(R5_DID_ALLOC, &conf->cache_state);
5874 mutex_unlock(&conf->cache_size_mutex);
5866 } 5875 }
5867 5876
5868 async_tx_issue_pending_all(); 5877 async_tx_issue_pending_all();
@@ -5894,18 +5903,22 @@ raid5_set_cache_size(struct mddev *mddev, int size)
5894 return -EINVAL; 5903 return -EINVAL;
5895 5904
5896 conf->min_nr_stripes = size; 5905 conf->min_nr_stripes = size;
5906 mutex_lock(&conf->cache_size_mutex);
5897 while (size < conf->max_nr_stripes && 5907 while (size < conf->max_nr_stripes &&
5898 drop_one_stripe(conf)) 5908 drop_one_stripe(conf))
5899 ; 5909 ;
5910 mutex_unlock(&conf->cache_size_mutex);
5900 5911
5901 5912
5902 err = md_allow_write(mddev); 5913 err = md_allow_write(mddev);
5903 if (err) 5914 if (err)
5904 return err; 5915 return err;
5905 5916
5917 mutex_lock(&conf->cache_size_mutex);
5906 while (size > conf->max_nr_stripes) 5918 while (size > conf->max_nr_stripes)
5907 if (!grow_one_stripe(conf, GFP_KERNEL)) 5919 if (!grow_one_stripe(conf, GFP_KERNEL))
5908 break; 5920 break;
5921 mutex_unlock(&conf->cache_size_mutex);
5909 5922
5910 return 0; 5923 return 0;
5911} 5924}
@@ -6371,11 +6384,19 @@ static unsigned long raid5_cache_scan(struct shrinker *shrink,
6371 struct shrink_control *sc) 6384 struct shrink_control *sc)
6372{ 6385{
6373 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker); 6386 struct r5conf *conf = container_of(shrink, struct r5conf, shrinker);
6374 int ret = 0; 6387 unsigned long ret = SHRINK_STOP;
6375 while (ret < sc->nr_to_scan) { 6388
6376 if (drop_one_stripe(conf) == 0) 6389 if (mutex_trylock(&conf->cache_size_mutex)) {
6377 return SHRINK_STOP; 6390 ret= 0;
6378 ret++; 6391 while (ret < sc->nr_to_scan &&
6392 conf->max_nr_stripes > conf->min_nr_stripes) {
6393 if (drop_one_stripe(conf) == 0) {
6394 ret = SHRINK_STOP;
6395 break;
6396 }
6397 ret++;
6398 }
6399 mutex_unlock(&conf->cache_size_mutex);
6379 } 6400 }
6380 return ret; 6401 return ret;
6381} 6402}
@@ -6444,6 +6465,7 @@ static struct r5conf *setup_conf(struct mddev *mddev)
6444 goto abort; 6465 goto abort;
6445 spin_lock_init(&conf->device_lock); 6466 spin_lock_init(&conf->device_lock);
6446 seqcount_init(&conf->gen_lock); 6467 seqcount_init(&conf->gen_lock);
6468 mutex_init(&conf->cache_size_mutex);
6447 init_waitqueue_head(&conf->wait_for_quiescent); 6469 init_waitqueue_head(&conf->wait_for_quiescent);
6448 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) { 6470 for (i = 0; i < NR_STRIPE_HASH_LOCKS; i++) {
6449 init_waitqueue_head(&conf->wait_for_stripe[i]); 6471 init_waitqueue_head(&conf->wait_for_stripe[i]);
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 02c3bf8fbfe7..d05144278690 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -482,7 +482,8 @@ struct r5conf {
482 */ 482 */
483 int active_name; 483 int active_name;
484 char cache_name[2][32]; 484 char cache_name[2][32];
485 struct kmem_cache *slab_cache; /* for allocating stripes */ 485 struct kmem_cache *slab_cache; /* for allocating stripes */
486 struct mutex cache_size_mutex; /* Protect changes to cache size */
486 487
487 int seq_flush, seq_write; 488 int seq_flush, seq_write;
488 int quiesce; 489 int quiesce;
diff --git a/drivers/media/pci/ivtv/ivtvfb.c b/drivers/media/pci/ivtv/ivtvfb.c
index 4cb365d4ffdc..8b95eefb610b 100644
--- a/drivers/media/pci/ivtv/ivtvfb.c
+++ b/drivers/media/pci/ivtv/ivtvfb.c
@@ -38,6 +38,8 @@
38 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA 38 Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
39 */ 39 */
40 40
41#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
42
41#include <linux/module.h> 43#include <linux/module.h>
42#include <linux/kernel.h> 44#include <linux/kernel.h>
43#include <linux/fb.h> 45#include <linux/fb.h>
@@ -1171,6 +1173,13 @@ static int ivtvfb_init_card(struct ivtv *itv)
1171{ 1173{
1172 int rc; 1174 int rc;
1173 1175
1176#ifdef CONFIG_X86_64
1177 if (pat_enabled()) {
1178 pr_warn("ivtvfb needs PAT disabled, boot with nopat kernel parameter\n");
1179 return -ENODEV;
1180 }
1181#endif
1182
1174 if (itv->osd_info) { 1183 if (itv->osd_info) {
1175 IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id); 1184 IVTVFB_ERR("Card %d already initialised\n", ivtvfb_card_id);
1176 return -EBUSY; 1185 return -EBUSY;
@@ -1265,12 +1274,6 @@ static int __init ivtvfb_init(void)
1265 int registered = 0; 1274 int registered = 0;
1266 int err; 1275 int err;
1267 1276
1268#ifdef CONFIG_X86_64
1269 if (WARN(pat_enabled(),
1270 "ivtvfb needs PAT disabled, boot with nopat kernel parameter\n")) {
1271 return -ENODEV;
1272 }
1273#endif
1274 1277
1275 if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) { 1278 if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) {
1276 printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n", 1279 printk(KERN_ERR "ivtvfb: ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n",
diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c
index 3a27a84ad3ec..9426276dbe14 100644
--- a/drivers/memory/omap-gpmc.c
+++ b/drivers/memory/omap-gpmc.c
@@ -2245,6 +2245,9 @@ void omap3_gpmc_save_context(void)
2245{ 2245{
2246 int i; 2246 int i;
2247 2247
2248 if (!gpmc_base)
2249 return;
2250
2248 gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG); 2251 gpmc_context.sysconfig = gpmc_read_reg(GPMC_SYSCONFIG);
2249 gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE); 2252 gpmc_context.irqenable = gpmc_read_reg(GPMC_IRQENABLE);
2250 gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL); 2253 gpmc_context.timeout_ctrl = gpmc_read_reg(GPMC_TIMEOUT_CONTROL);
@@ -2277,6 +2280,9 @@ void omap3_gpmc_restore_context(void)
2277{ 2280{
2278 int i; 2281 int i;
2279 2282
2283 if (!gpmc_base)
2284 return;
2285
2280 gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig); 2286 gpmc_write_reg(GPMC_SYSCONFIG, gpmc_context.sysconfig);
2281 gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable); 2287 gpmc_write_reg(GPMC_IRQENABLE, gpmc_context.irqenable);
2282 gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl); 2288 gpmc_write_reg(GPMC_TIMEOUT_CONTROL, gpmc_context.timeout_ctrl);
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index 379a420245ea..0f0cad8dcaed 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -115,7 +115,7 @@ config MFD_CROS_EC_I2C
115 115
116config MFD_CROS_EC_SPI 116config MFD_CROS_EC_SPI
117 tristate "ChromeOS Embedded Controller (SPI)" 117 tristate "ChromeOS Embedded Controller (SPI)"
118 depends on MFD_CROS_EC && CROS_EC_PROTO && SPI && OF 118 depends on MFD_CROS_EC && CROS_EC_PROTO && SPI
119 119
120 ---help--- 120 ---help---
121 If you say Y here, you get support for talking to the ChromeOS EC 121 If you say Y here, you get support for talking to the ChromeOS EC
diff --git a/drivers/mfd/arizona-core.c b/drivers/mfd/arizona-core.c
index bebf58a06a6b..a72ddb295078 100644
--- a/drivers/mfd/arizona-core.c
+++ b/drivers/mfd/arizona-core.c
@@ -651,7 +651,7 @@ static int arizona_runtime_suspend(struct device *dev)
651 651
652 arizona->has_fully_powered_off = true; 652 arizona->has_fully_powered_off = true;
653 653
654 disable_irq(arizona->irq); 654 disable_irq_nosync(arizona->irq);
655 arizona_enable_reset(arizona); 655 arizona_enable_reset(arizona);
656 regulator_bulk_disable(arizona->num_core_supplies, 656 regulator_bulk_disable(arizona->num_core_supplies,
657 arizona->core_supplies); 657 arizona->core_supplies);
@@ -1141,10 +1141,6 @@ int arizona_dev_init(struct arizona *arizona)
1141 arizona->pdata.gpio_defaults[i]); 1141 arizona->pdata.gpio_defaults[i]);
1142 } 1142 }
1143 1143
1144 pm_runtime_set_autosuspend_delay(arizona->dev, 100);
1145 pm_runtime_use_autosuspend(arizona->dev);
1146 pm_runtime_enable(arizona->dev);
1147
1148 /* Chip default */ 1144 /* Chip default */
1149 if (!arizona->pdata.clk32k_src) 1145 if (!arizona->pdata.clk32k_src)
1150 arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2; 1146 arizona->pdata.clk32k_src = ARIZONA_32KZ_MCLK2;
@@ -1245,11 +1241,17 @@ int arizona_dev_init(struct arizona *arizona)
1245 arizona->pdata.spk_fmt[i]); 1241 arizona->pdata.spk_fmt[i]);
1246 } 1242 }
1247 1243
1244 pm_runtime_set_active(arizona->dev);
1245 pm_runtime_enable(arizona->dev);
1246
1248 /* Set up for interrupts */ 1247 /* Set up for interrupts */
1249 ret = arizona_irq_init(arizona); 1248 ret = arizona_irq_init(arizona);
1250 if (ret != 0) 1249 if (ret != 0)
1251 goto err_reset; 1250 goto err_reset;
1252 1251
1252 pm_runtime_set_autosuspend_delay(arizona->dev, 100);
1253 pm_runtime_use_autosuspend(arizona->dev);
1254
1253 arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error", 1255 arizona_request_irq(arizona, ARIZONA_IRQ_CLKGEN_ERR, "CLKGEN error",
1254 arizona_clkgen_err, arizona); 1256 arizona_clkgen_err, arizona);
1255 arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked", 1257 arizona_request_irq(arizona, ARIZONA_IRQ_OVERCLOCKED, "Overclocked",
@@ -1278,10 +1280,6 @@ int arizona_dev_init(struct arizona *arizona)
1278 goto err_irq; 1280 goto err_irq;
1279 } 1281 }
1280 1282
1281#ifdef CONFIG_PM
1282 regulator_disable(arizona->dcvdd);
1283#endif
1284
1285 return 0; 1283 return 0;
1286 1284
1287err_irq: 1285err_irq:
diff --git a/drivers/misc/eeprom/at24.c b/drivers/misc/eeprom/at24.c
index 2d3db81be099..6ded3dc36644 100644
--- a/drivers/misc/eeprom/at24.c
+++ b/drivers/misc/eeprom/at24.c
@@ -438,9 +438,6 @@ static ssize_t at24_bin_write(struct file *filp, struct kobject *kobj,
438{ 438{
439 struct at24_data *at24; 439 struct at24_data *at24;
440 440
441 if (unlikely(off >= attr->size))
442 return -EFBIG;
443
444 at24 = dev_get_drvdata(container_of(kobj, struct device, kobj)); 441 at24 = dev_get_drvdata(container_of(kobj, struct device, kobj));
445 return at24_write(at24, buf, off, count); 442 return at24_write(at24, buf, off, count);
446} 443}
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 8eb0a9500a90..e9513d651cd3 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -682,7 +682,7 @@ int mei_register(struct mei_device *dev, struct device *parent)
682 /* Fill in the data structures */ 682 /* Fill in the data structures */
683 devno = MKDEV(MAJOR(mei_devt), dev->minor); 683 devno = MKDEV(MAJOR(mei_devt), dev->minor);
684 cdev_init(&dev->cdev, &mei_fops); 684 cdev_init(&dev->cdev, &mei_fops);
685 dev->cdev.owner = mei_fops.owner; 685 dev->cdev.owner = parent->driver->owner;
686 686
687 /* Add the device */ 687 /* Add the device */
688 ret = cdev_add(&dev->cdev, devno, 1); 688 ret = cdev_add(&dev->cdev, devno, 1);
diff --git a/drivers/misc/mic/scif/scif_nodeqp.c b/drivers/misc/mic/scif/scif_nodeqp.c
index 41e3bdb10061..6dfdae3452d6 100644
--- a/drivers/misc/mic/scif/scif_nodeqp.c
+++ b/drivers/misc/mic/scif/scif_nodeqp.c
@@ -357,7 +357,7 @@ static void scif_p2p_freesg(struct scatterlist *sg)
357} 357}
358 358
359static struct scatterlist * 359static struct scatterlist *
360scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt) 360scif_p2p_setsg(phys_addr_t pa, int page_size, int page_cnt)
361{ 361{
362 struct scatterlist *sg; 362 struct scatterlist *sg;
363 struct page *page; 363 struct page *page;
@@ -368,16 +368,11 @@ scif_p2p_setsg(void __iomem *va, int page_size, int page_cnt)
368 return NULL; 368 return NULL;
369 sg_init_table(sg, page_cnt); 369 sg_init_table(sg, page_cnt);
370 for (i = 0; i < page_cnt; i++) { 370 for (i = 0; i < page_cnt; i++) {
371 page = vmalloc_to_page((void __force *)va); 371 page = pfn_to_page(pa >> PAGE_SHIFT);
372 if (!page)
373 goto p2p_sg_err;
374 sg_set_page(&sg[i], page, page_size, 0); 372 sg_set_page(&sg[i], page, page_size, 0);
375 va += page_size; 373 pa += page_size;
376 } 374 }
377 return sg; 375 return sg;
378p2p_sg_err:
379 kfree(sg);
380 return NULL;
381} 376}
382 377
383/* Init p2p mappings required to access peerdev from scifdev */ 378/* Init p2p mappings required to access peerdev from scifdev */
@@ -395,14 +390,14 @@ scif_init_p2p_info(struct scif_dev *scifdev, struct scif_dev *peerdev)
395 p2p = kzalloc(sizeof(*p2p), GFP_KERNEL); 390 p2p = kzalloc(sizeof(*p2p), GFP_KERNEL);
396 if (!p2p) 391 if (!p2p)
397 return NULL; 392 return NULL;
398 p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->va, 393 p2p->ppi_sg[SCIF_PPI_MMIO] = scif_p2p_setsg(psdev->mmio->pa,
399 PAGE_SIZE, num_mmio_pages); 394 PAGE_SIZE, num_mmio_pages);
400 if (!p2p->ppi_sg[SCIF_PPI_MMIO]) 395 if (!p2p->ppi_sg[SCIF_PPI_MMIO])
401 goto free_p2p; 396 goto free_p2p;
402 p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages; 397 p2p->sg_nentries[SCIF_PPI_MMIO] = num_mmio_pages;
403 sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30))); 398 sg_page_shift = get_order(min(psdev->aper->len, (u64)(1 << 30)));
404 num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT); 399 num_aper_chunks = num_aper_pages >> (sg_page_shift - PAGE_SHIFT);
405 p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->va, 400 p2p->ppi_sg[SCIF_PPI_APER] = scif_p2p_setsg(psdev->aper->pa,
406 1 << sg_page_shift, 401 1 << sg_page_shift,
407 num_aper_chunks); 402 num_aper_chunks);
408 p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks; 403 p2p->sg_nentries[SCIF_PPI_APER] = num_aper_chunks;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c9c3d20b784b..a1b820fcb2a6 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -208,6 +208,8 @@ static ssize_t power_ro_lock_show(struct device *dev,
208 208
209 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked); 209 ret = snprintf(buf, PAGE_SIZE, "%d\n", locked);
210 210
211 mmc_blk_put(md);
212
211 return ret; 213 return ret;
212} 214}
213 215
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index fd9a58e216a5..6a0f9c79be26 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -779,6 +779,7 @@ config MMC_TOSHIBA_PCI
779 779
780config MMC_MTK 780config MMC_MTK
781 tristate "MediaTek SD/MMC Card Interface support" 781 tristate "MediaTek SD/MMC Card Interface support"
782 depends on HAS_DMA
782 help 783 help
783 This selects the MediaTek(R) Secure digital and Multimedia card Interface. 784 This selects the MediaTek(R) Secure digital and Multimedia card Interface.
784 If you have a machine with a integrated SD/MMC card reader, say Y or M here. 785 If you have a machine with a integrated SD/MMC card reader, say Y or M here.
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index b2b411da297b..4d1203236890 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -1062,9 +1062,14 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
1062 1062
1063 if (status & (CTO_EN | CCRC_EN)) 1063 if (status & (CTO_EN | CCRC_EN))
1064 end_cmd = 1; 1064 end_cmd = 1;
1065 if (host->data || host->response_busy) {
1066 end_trans = !end_cmd;
1067 host->response_busy = 0;
1068 }
1065 if (status & (CTO_EN | DTO_EN)) 1069 if (status & (CTO_EN | DTO_EN))
1066 hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd); 1070 hsmmc_command_incomplete(host, -ETIMEDOUT, end_cmd);
1067 else if (status & (CCRC_EN | DCRC_EN)) 1071 else if (status & (CCRC_EN | DCRC_EN | DEB_EN | CEB_EN |
1072 BADA_EN))
1068 hsmmc_command_incomplete(host, -EILSEQ, end_cmd); 1073 hsmmc_command_incomplete(host, -EILSEQ, end_cmd);
1069 1074
1070 if (status & ACE_EN) { 1075 if (status & ACE_EN) {
@@ -1081,10 +1086,6 @@ static void omap_hsmmc_do_irq(struct omap_hsmmc_host *host, int status)
1081 } 1086 }
1082 dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12); 1087 dev_dbg(mmc_dev(host->mmc), "AC12 err: 0x%x\n", ac12);
1083 } 1088 }
1084 if (host->data || host->response_busy) {
1085 end_trans = !end_cmd;
1086 host->response_busy = 0;
1087 }
1088 } 1089 }
1089 1090
1090 OMAP_HSMMC_WRITE(host->base, STAT, status); 1091 OMAP_HSMMC_WRITE(host->base, STAT, status);
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index faf0cb910c96..c6b9f6492e1a 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -581,13 +581,8 @@ static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg)
581static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host) 581static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host)
582{ 582{
583 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 583 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
584 struct pltfm_imx_data *imx_data = pltfm_host->priv;
585 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
586 584
587 if (boarddata->f_max && (boarddata->f_max < pltfm_host->clock)) 585 return pltfm_host->clock;
588 return boarddata->f_max;
589 else
590 return pltfm_host->clock;
591} 586}
592 587
593static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host) 588static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host)
@@ -878,34 +873,19 @@ static const struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = {
878static int 873static int
879sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, 874sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
880 struct sdhci_host *host, 875 struct sdhci_host *host,
881 struct esdhc_platform_data *boarddata) 876 struct pltfm_imx_data *imx_data)
882{ 877{
883 struct device_node *np = pdev->dev.of_node; 878 struct device_node *np = pdev->dev.of_node;
884 879 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
885 if (!np) 880 int ret;
886 return -ENODEV;
887
888 if (of_get_property(np, "non-removable", NULL))
889 boarddata->cd_type = ESDHC_CD_PERMANENT;
890
891 if (of_get_property(np, "fsl,cd-controller", NULL))
892 boarddata->cd_type = ESDHC_CD_CONTROLLER;
893 881
894 if (of_get_property(np, "fsl,wp-controller", NULL)) 882 if (of_get_property(np, "fsl,wp-controller", NULL))
895 boarddata->wp_type = ESDHC_WP_CONTROLLER; 883 boarddata->wp_type = ESDHC_WP_CONTROLLER;
896 884
897 boarddata->cd_gpio = of_get_named_gpio(np, "cd-gpios", 0);
898 if (gpio_is_valid(boarddata->cd_gpio))
899 boarddata->cd_type = ESDHC_CD_GPIO;
900
901 boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0); 885 boarddata->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
902 if (gpio_is_valid(boarddata->wp_gpio)) 886 if (gpio_is_valid(boarddata->wp_gpio))
903 boarddata->wp_type = ESDHC_WP_GPIO; 887 boarddata->wp_type = ESDHC_WP_GPIO;
904 888
905 of_property_read_u32(np, "bus-width", &boarddata->max_bus_width);
906
907 of_property_read_u32(np, "max-frequency", &boarddata->f_max);
908
909 if (of_find_property(np, "no-1-8-v", NULL)) 889 if (of_find_property(np, "no-1-8-v", NULL))
910 boarddata->support_vsel = false; 890 boarddata->support_vsel = false;
911 else 891 else
@@ -916,29 +896,119 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
916 896
917 mmc_of_parse_voltage(np, &host->ocr_mask); 897 mmc_of_parse_voltage(np, &host->ocr_mask);
918 898
899 /* sdr50 and sdr104 needs work on 1.8v signal voltage */
900 if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
901 !IS_ERR(imx_data->pins_default)) {
902 imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
903 ESDHC_PINCTRL_STATE_100MHZ);
904 imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
905 ESDHC_PINCTRL_STATE_200MHZ);
906 if (IS_ERR(imx_data->pins_100mhz) ||
907 IS_ERR(imx_data->pins_200mhz)) {
908 dev_warn(mmc_dev(host->mmc),
909 "could not get ultra high speed state, work on normal mode\n");
910 /*
911 * fall back to not support uhs by specify no 1.8v quirk
912 */
913 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
914 }
915 } else {
916 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
917 }
918
919 /* call to generic mmc_of_parse to support additional capabilities */ 919 /* call to generic mmc_of_parse to support additional capabilities */
920 return mmc_of_parse(host->mmc); 920 ret = mmc_of_parse(host->mmc);
921 if (ret)
922 return ret;
923
924 if (!IS_ERR_VALUE(mmc_gpio_get_cd(host->mmc)))
925 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
926
927 return 0;
921} 928}
922#else 929#else
923static inline int 930static inline int
924sdhci_esdhc_imx_probe_dt(struct platform_device *pdev, 931sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
925 struct sdhci_host *host, 932 struct sdhci_host *host,
926 struct esdhc_platform_data *boarddata) 933 struct pltfm_imx_data *imx_data)
927{ 934{
928 return -ENODEV; 935 return -ENODEV;
929} 936}
930#endif 937#endif
931 938
939static int sdhci_esdhc_imx_probe_nondt(struct platform_device *pdev,
940 struct sdhci_host *host,
941 struct pltfm_imx_data *imx_data)
942{
943 struct esdhc_platform_data *boarddata = &imx_data->boarddata;
944 int err;
945
946 if (!host->mmc->parent->platform_data) {
947 dev_err(mmc_dev(host->mmc), "no board data!\n");
948 return -EINVAL;
949 }
950
951 imx_data->boarddata = *((struct esdhc_platform_data *)
952 host->mmc->parent->platform_data);
953 /* write_protect */
954 if (boarddata->wp_type == ESDHC_WP_GPIO) {
955 err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
956 if (err) {
957 dev_err(mmc_dev(host->mmc),
958 "failed to request write-protect gpio!\n");
959 return err;
960 }
961 host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
962 }
963
964 /* card_detect */
965 switch (boarddata->cd_type) {
966 case ESDHC_CD_GPIO:
967 err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
968 if (err) {
969 dev_err(mmc_dev(host->mmc),
970 "failed to request card-detect gpio!\n");
971 return err;
972 }
973 /* fall through */
974
975 case ESDHC_CD_CONTROLLER:
976 /* we have a working card_detect back */
977 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
978 break;
979
980 case ESDHC_CD_PERMANENT:
981 host->mmc->caps |= MMC_CAP_NONREMOVABLE;
982 break;
983
984 case ESDHC_CD_NONE:
985 break;
986 }
987
988 switch (boarddata->max_bus_width) {
989 case 8:
990 host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
991 break;
992 case 4:
993 host->mmc->caps |= MMC_CAP_4_BIT_DATA;
994 break;
995 case 1:
996 default:
997 host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
998 break;
999 }
1000
1001 return 0;
1002}
1003
932static int sdhci_esdhc_imx_probe(struct platform_device *pdev) 1004static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
933{ 1005{
934 const struct of_device_id *of_id = 1006 const struct of_device_id *of_id =
935 of_match_device(imx_esdhc_dt_ids, &pdev->dev); 1007 of_match_device(imx_esdhc_dt_ids, &pdev->dev);
936 struct sdhci_pltfm_host *pltfm_host; 1008 struct sdhci_pltfm_host *pltfm_host;
937 struct sdhci_host *host; 1009 struct sdhci_host *host;
938 struct esdhc_platform_data *boarddata;
939 int err; 1010 int err;
940 struct pltfm_imx_data *imx_data; 1011 struct pltfm_imx_data *imx_data;
941 bool dt = true;
942 1012
943 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0); 1013 host = sdhci_pltfm_init(pdev, &sdhci_esdhc_imx_pdata, 0);
944 if (IS_ERR(host)) 1014 if (IS_ERR(host))
@@ -1030,84 +1100,12 @@ static int sdhci_esdhc_imx_probe(struct platform_device *pdev)
1030 if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536) 1100 if (imx_data->socdata->flags & ESDHC_FLAG_ERR004536)
1031 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA; 1101 host->quirks |= SDHCI_QUIRK_BROKEN_ADMA;
1032 1102
1033 boarddata = &imx_data->boarddata; 1103 if (of_id)
1034 if (sdhci_esdhc_imx_probe_dt(pdev, host, boarddata) < 0) { 1104 err = sdhci_esdhc_imx_probe_dt(pdev, host, imx_data);
1035 if (!host->mmc->parent->platform_data) { 1105 else
1036 dev_err(mmc_dev(host->mmc), "no board data!\n"); 1106 err = sdhci_esdhc_imx_probe_nondt(pdev, host, imx_data);
1037 err = -EINVAL; 1107 if (err)
1038 goto disable_clk; 1108 goto disable_clk;
1039 }
1040 imx_data->boarddata = *((struct esdhc_platform_data *)
1041 host->mmc->parent->platform_data);
1042 dt = false;
1043 }
1044 /* write_protect */
1045 if (boarddata->wp_type == ESDHC_WP_GPIO && !dt) {
1046 err = mmc_gpio_request_ro(host->mmc, boarddata->wp_gpio);
1047 if (err) {
1048 dev_err(mmc_dev(host->mmc),
1049 "failed to request write-protect gpio!\n");
1050 goto disable_clk;
1051 }
1052 host->mmc->caps2 |= MMC_CAP2_RO_ACTIVE_HIGH;
1053 }
1054
1055 /* card_detect */
1056 switch (boarddata->cd_type) {
1057 case ESDHC_CD_GPIO:
1058 if (dt)
1059 break;
1060 err = mmc_gpio_request_cd(host->mmc, boarddata->cd_gpio, 0);
1061 if (err) {
1062 dev_err(mmc_dev(host->mmc),
1063 "failed to request card-detect gpio!\n");
1064 goto disable_clk;
1065 }
1066 /* fall through */
1067
1068 case ESDHC_CD_CONTROLLER:
1069 /* we have a working card_detect back */
1070 host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION;
1071 break;
1072
1073 case ESDHC_CD_PERMANENT:
1074 host->mmc->caps |= MMC_CAP_NONREMOVABLE;
1075 break;
1076
1077 case ESDHC_CD_NONE:
1078 break;
1079 }
1080
1081 switch (boarddata->max_bus_width) {
1082 case 8:
1083 host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_4_BIT_DATA;
1084 break;
1085 case 4:
1086 host->mmc->caps |= MMC_CAP_4_BIT_DATA;
1087 break;
1088 case 1:
1089 default:
1090 host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
1091 break;
1092 }
1093
1094 /* sdr50 and sdr104 needs work on 1.8v signal voltage */
1095 if ((boarddata->support_vsel) && esdhc_is_usdhc(imx_data) &&
1096 !IS_ERR(imx_data->pins_default)) {
1097 imx_data->pins_100mhz = pinctrl_lookup_state(imx_data->pinctrl,
1098 ESDHC_PINCTRL_STATE_100MHZ);
1099 imx_data->pins_200mhz = pinctrl_lookup_state(imx_data->pinctrl,
1100 ESDHC_PINCTRL_STATE_200MHZ);
1101 if (IS_ERR(imx_data->pins_100mhz) ||
1102 IS_ERR(imx_data->pins_200mhz)) {
1103 dev_warn(mmc_dev(host->mmc),
1104 "could not get ultra high speed state, work on normal mode\n");
1105 /* fall back to not support uhs by specify no 1.8v quirk */
1106 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
1107 }
1108 } else {
1109 host->quirks2 |= SDHCI_QUIRK2_NO_1_8_V;
1110 }
1111 1109
1112 err = sdhci_add_host(host); 1110 err = sdhci_add_host(host);
1113 if (err) 1111 if (err)
diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h
index 3497cfaf683c..a870c42731d7 100644
--- a/drivers/mmc/host/sdhci-esdhc.h
+++ b/drivers/mmc/host/sdhci-esdhc.h
@@ -45,6 +45,6 @@
45#define ESDHC_DMA_SYSCTL 0x40c 45#define ESDHC_DMA_SYSCTL 0x40c
46#define ESDHC_DMA_SNOOP 0x00000040 46#define ESDHC_DMA_SNOOP 0x00000040
47 47
48#define ESDHC_HOST_CONTROL_RES 0x05 48#define ESDHC_HOST_CONTROL_RES 0x01
49 49
50#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */ 50#endif /* _DRIVERS_MMC_SDHCI_ESDHC_H */
diff --git a/drivers/mmc/host/sdhci-pxav3.c b/drivers/mmc/host/sdhci-pxav3.c
index 9cd5fc62f130..946d37f94a31 100644
--- a/drivers/mmc/host/sdhci-pxav3.c
+++ b/drivers/mmc/host/sdhci-pxav3.c
@@ -411,6 +411,7 @@ static int sdhci_pxav3_probe(struct platform_device *pdev)
411 goto err_of_parse; 411 goto err_of_parse;
412 sdhci_get_of_property(pdev); 412 sdhci_get_of_property(pdev);
413 pdata = pxav3_get_mmc_pdata(dev); 413 pdata = pxav3_get_mmc_pdata(dev);
414 pdev->dev.platform_data = pdata;
414 } else if (pdata) { 415 } else if (pdata) {
415 /* on-chip device */ 416 /* on-chip device */
416 if (pdata->flags & PXA_FLAG_CARD_PERMANENT) 417 if (pdata->flags & PXA_FLAG_CARD_PERMANENT)
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index bc1445238fb3..1dbe93232030 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -2866,6 +2866,7 @@ int sdhci_add_host(struct sdhci_host *host)
2866 u32 max_current_caps; 2866 u32 max_current_caps;
2867 unsigned int ocr_avail; 2867 unsigned int ocr_avail;
2868 unsigned int override_timeout_clk; 2868 unsigned int override_timeout_clk;
2869 u32 max_clk;
2869 int ret; 2870 int ret;
2870 2871
2871 WARN_ON(host == NULL); 2872 WARN_ON(host == NULL);
@@ -2978,8 +2979,11 @@ int sdhci_add_host(struct sdhci_host *host)
2978 GFP_KERNEL); 2979 GFP_KERNEL);
2979 host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL); 2980 host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL);
2980 if (!host->adma_table || !host->align_buffer) { 2981 if (!host->adma_table || !host->align_buffer) {
2981 dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, 2982 if (host->adma_table)
2982 host->adma_table, host->adma_addr); 2983 dma_free_coherent(mmc_dev(mmc),
2984 host->adma_table_sz,
2985 host->adma_table,
2986 host->adma_addr);
2983 kfree(host->align_buffer); 2987 kfree(host->align_buffer);
2984 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", 2988 pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n",
2985 mmc_hostname(mmc)); 2989 mmc_hostname(mmc));
@@ -3047,18 +3051,22 @@ int sdhci_add_host(struct sdhci_host *host)
3047 * Set host parameters. 3051 * Set host parameters.
3048 */ 3052 */
3049 mmc->ops = &sdhci_ops; 3053 mmc->ops = &sdhci_ops;
3050 mmc->f_max = host->max_clk; 3054 max_clk = host->max_clk;
3055
3051 if (host->ops->get_min_clock) 3056 if (host->ops->get_min_clock)
3052 mmc->f_min = host->ops->get_min_clock(host); 3057 mmc->f_min = host->ops->get_min_clock(host);
3053 else if (host->version >= SDHCI_SPEC_300) { 3058 else if (host->version >= SDHCI_SPEC_300) {
3054 if (host->clk_mul) { 3059 if (host->clk_mul) {
3055 mmc->f_min = (host->max_clk * host->clk_mul) / 1024; 3060 mmc->f_min = (host->max_clk * host->clk_mul) / 1024;
3056 mmc->f_max = host->max_clk * host->clk_mul; 3061 max_clk = host->max_clk * host->clk_mul;
3057 } else 3062 } else
3058 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300; 3063 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_300;
3059 } else 3064 } else
3060 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200; 3065 mmc->f_min = host->max_clk / SDHCI_MAX_DIV_SPEC_200;
3061 3066
3067 if (!mmc->f_max || (mmc->f_max && (mmc->f_max > max_clk)))
3068 mmc->f_max = max_clk;
3069
3062 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) { 3070 if (!(host->quirks & SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK)) {
3063 host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >> 3071 host->timeout_clk = (caps[0] & SDHCI_TIMEOUT_CLK_MASK) >>
3064 SDHCI_TIMEOUT_CLK_SHIFT; 3072 SDHCI_TIMEOUT_CLK_SHIFT;
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 317a49480475..a98dd4f1b0e3 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -625,6 +625,23 @@ static void bond_set_dev_addr(struct net_device *bond_dev,
625 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev); 625 call_netdevice_notifiers(NETDEV_CHANGEADDR, bond_dev);
626} 626}
627 627
628static struct slave *bond_get_old_active(struct bonding *bond,
629 struct slave *new_active)
630{
631 struct slave *slave;
632 struct list_head *iter;
633
634 bond_for_each_slave(bond, slave, iter) {
635 if (slave == new_active)
636 continue;
637
638 if (ether_addr_equal(bond->dev->dev_addr, slave->dev->dev_addr))
639 return slave;
640 }
641
642 return NULL;
643}
644
628/* bond_do_fail_over_mac 645/* bond_do_fail_over_mac
629 * 646 *
630 * Perform special MAC address swapping for fail_over_mac settings 647 * Perform special MAC address swapping for fail_over_mac settings
@@ -652,6 +669,9 @@ static void bond_do_fail_over_mac(struct bonding *bond,
652 if (!new_active) 669 if (!new_active)
653 return; 670 return;
654 671
672 if (!old_active)
673 old_active = bond_get_old_active(bond, new_active);
674
655 if (old_active) { 675 if (old_active) {
656 ether_addr_copy(tmp_mac, new_active->dev->dev_addr); 676 ether_addr_copy(tmp_mac, new_active->dev->dev_addr);
657 ether_addr_copy(saddr.sa_data, 677 ether_addr_copy(saddr.sa_data,
@@ -766,6 +786,7 @@ static bool bond_should_notify_peers(struct bonding *bond)
766 slave ? slave->dev->name : "NULL"); 786 slave ? slave->dev->name : "NULL");
767 787
768 if (!slave || !bond->send_peer_notif || 788 if (!slave || !bond->send_peer_notif ||
789 !netif_carrier_ok(bond->dev) ||
769 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state)) 790 test_bit(__LINK_STATE_LINKWATCH_PENDING, &slave->dev->state))
770 return false; 791 return false;
771 792
@@ -1725,9 +1746,16 @@ err_free:
1725 1746
1726err_undo_flags: 1747err_undo_flags:
1727 /* Enslave of first slave has failed and we need to fix master's mac */ 1748 /* Enslave of first slave has failed and we need to fix master's mac */
1728 if (!bond_has_slaves(bond) && 1749 if (!bond_has_slaves(bond)) {
1729 ether_addr_equal_64bits(bond_dev->dev_addr, slave_dev->dev_addr)) 1750 if (ether_addr_equal_64bits(bond_dev->dev_addr,
1730 eth_hw_addr_random(bond_dev); 1751 slave_dev->dev_addr))
1752 eth_hw_addr_random(bond_dev);
1753 if (bond_dev->type != ARPHRD_ETHER) {
1754 ether_setup(bond_dev);
1755 bond_dev->flags |= IFF_MASTER;
1756 bond_dev->priv_flags &= ~IFF_TX_SKB_SHARING;
1757 }
1758 }
1731 1759
1732 return res; 1760 return res;
1733} 1761}
@@ -1916,6 +1944,7 @@ static int bond_release_and_destroy(struct net_device *bond_dev,
1916 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL; 1944 bond_dev->priv_flags |= IFF_DISABLE_NETPOLL;
1917 netdev_info(bond_dev, "Destroying bond %s\n", 1945 netdev_info(bond_dev, "Destroying bond %s\n",
1918 bond_dev->name); 1946 bond_dev->name);
1947 bond_remove_proc_entry(bond);
1919 unregister_netdevice(bond_dev); 1948 unregister_netdevice(bond_dev);
1920 } 1949 }
1921 return ret; 1950 return ret;
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index f4e40aa4d2a2..945c0955a967 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -577,10 +577,10 @@ static void at91_rx_overflow_err(struct net_device *dev)
577 577
578 cf->can_id |= CAN_ERR_CRTL; 578 cf->can_id |= CAN_ERR_CRTL;
579 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; 579 cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
580 netif_receive_skb(skb);
581 580
582 stats->rx_packets++; 581 stats->rx_packets++;
583 stats->rx_bytes += cf->can_dlc; 582 stats->rx_bytes += cf->can_dlc;
583 netif_receive_skb(skb);
584} 584}
585 585
586/** 586/**
@@ -642,10 +642,10 @@ static void at91_read_msg(struct net_device *dev, unsigned int mb)
642 } 642 }
643 643
644 at91_read_mb(dev, mb, cf); 644 at91_read_mb(dev, mb, cf);
645 netif_receive_skb(skb);
646 645
647 stats->rx_packets++; 646 stats->rx_packets++;
648 stats->rx_bytes += cf->can_dlc; 647 stats->rx_bytes += cf->can_dlc;
648 netif_receive_skb(skb);
649 649
650 can_led_event(dev, CAN_LED_EVENT_RX); 650 can_led_event(dev, CAN_LED_EVENT_RX);
651} 651}
@@ -802,10 +802,10 @@ static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr)
802 return 0; 802 return 0;
803 803
804 at91_poll_err_frame(dev, cf, reg_sr); 804 at91_poll_err_frame(dev, cf, reg_sr);
805 netif_receive_skb(skb);
806 805
807 dev->stats.rx_packets++; 806 dev->stats.rx_packets++;
808 dev->stats.rx_bytes += cf->can_dlc; 807 dev->stats.rx_bytes += cf->can_dlc;
808 netif_receive_skb(skb);
809 809
810 return 1; 810 return 1;
811} 811}
@@ -1067,10 +1067,10 @@ static void at91_irq_err(struct net_device *dev)
1067 return; 1067 return;
1068 1068
1069 at91_irq_err_state(dev, cf, new_state); 1069 at91_irq_err_state(dev, cf, new_state);
1070 netif_rx(skb);
1071 1070
1072 dev->stats.rx_packets++; 1071 dev->stats.rx_packets++;
1073 dev->stats.rx_bytes += cf->can_dlc; 1072 dev->stats.rx_bytes += cf->can_dlc;
1073 netif_rx(skb);
1074 1074
1075 priv->can.state = new_state; 1075 priv->can.state = new_state;
1076} 1076}
diff --git a/drivers/net/can/bfin_can.c b/drivers/net/can/bfin_can.c
index 27ad312e7abf..57dadd52b428 100644
--- a/drivers/net/can/bfin_can.c
+++ b/drivers/net/can/bfin_can.c
@@ -424,10 +424,9 @@ static void bfin_can_rx(struct net_device *dev, u16 isrc)
424 cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0; 424 cf->data[6 - i] = (6 - i) < cf->can_dlc ? (val >> 8) : 0;
425 } 425 }
426 426
427 netif_rx(skb);
428
429 stats->rx_packets++; 427 stats->rx_packets++;
430 stats->rx_bytes += cf->can_dlc; 428 stats->rx_bytes += cf->can_dlc;
429 netif_rx(skb);
431} 430}
432 431
433static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status) 432static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
@@ -508,10 +507,9 @@ static int bfin_can_err(struct net_device *dev, u16 isrc, u16 status)
508 507
509 priv->can.state = state; 508 priv->can.state = state;
510 509
511 netif_rx(skb);
512
513 stats->rx_packets++; 510 stats->rx_packets++;
514 stats->rx_bytes += cf->can_dlc; 511 stats->rx_bytes += cf->can_dlc;
512 netif_rx(skb);
515 513
516 return 0; 514 return 0;
517} 515}
diff --git a/drivers/net/can/cc770/cc770.c b/drivers/net/can/cc770/cc770.c
index c11d44984036..70a8cbb29e75 100644
--- a/drivers/net/can/cc770/cc770.c
+++ b/drivers/net/can/cc770/cc770.c
@@ -504,10 +504,10 @@ static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1)
504 for (i = 0; i < cf->can_dlc; i++) 504 for (i = 0; i < cf->can_dlc; i++)
505 cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]); 505 cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]);
506 } 506 }
507 netif_rx(skb);
508 507
509 stats->rx_packets++; 508 stats->rx_packets++;
510 stats->rx_bytes += cf->can_dlc; 509 stats->rx_bytes += cf->can_dlc;
510 netif_rx(skb);
511} 511}
512 512
513static int cc770_err(struct net_device *dev, u8 status) 513static int cc770_err(struct net_device *dev, u8 status)
@@ -584,10 +584,10 @@ static int cc770_err(struct net_device *dev, u8 status)
584 } 584 }
585 } 585 }
586 586
587 netif_rx(skb);
588 587
589 stats->rx_packets++; 588 stats->rx_packets++;
590 stats->rx_bytes += cf->can_dlc; 589 stats->rx_bytes += cf->can_dlc;
590 netif_rx(skb);
591 591
592 return 0; 592 return 0;
593} 593}
diff --git a/drivers/net/can/flexcan.c b/drivers/net/can/flexcan.c
index 6201c5a1a884..b1e8d729851c 100644
--- a/drivers/net/can/flexcan.c
+++ b/drivers/net/can/flexcan.c
@@ -577,10 +577,10 @@ static int flexcan_poll_bus_err(struct net_device *dev, u32 reg_esr)
577 return 0; 577 return 0;
578 578
579 do_bus_err(dev, cf, reg_esr); 579 do_bus_err(dev, cf, reg_esr);
580 netif_receive_skb(skb);
581 580
582 dev->stats.rx_packets++; 581 dev->stats.rx_packets++;
583 dev->stats.rx_bytes += cf->can_dlc; 582 dev->stats.rx_bytes += cf->can_dlc;
583 netif_receive_skb(skb);
584 584
585 return 1; 585 return 1;
586} 586}
@@ -622,10 +622,9 @@ static int flexcan_poll_state(struct net_device *dev, u32 reg_esr)
622 if (unlikely(new_state == CAN_STATE_BUS_OFF)) 622 if (unlikely(new_state == CAN_STATE_BUS_OFF))
623 can_bus_off(dev); 623 can_bus_off(dev);
624 624
625 netif_receive_skb(skb);
626
627 dev->stats.rx_packets++; 625 dev->stats.rx_packets++;
628 dev->stats.rx_bytes += cf->can_dlc; 626 dev->stats.rx_bytes += cf->can_dlc;
627 netif_receive_skb(skb);
629 628
630 return 1; 629 return 1;
631} 630}
@@ -670,10 +669,10 @@ static int flexcan_read_frame(struct net_device *dev)
670 } 669 }
671 670
672 flexcan_read_fifo(dev, cf); 671 flexcan_read_fifo(dev, cf);
673 netif_receive_skb(skb);
674 672
675 stats->rx_packets++; 673 stats->rx_packets++;
676 stats->rx_bytes += cf->can_dlc; 674 stats->rx_bytes += cf->can_dlc;
675 netif_receive_skb(skb);
677 676
678 can_led_event(dev, CAN_LED_EVENT_RX); 677 can_led_event(dev, CAN_LED_EVENT_RX);
679 678
diff --git a/drivers/net/can/grcan.c b/drivers/net/can/grcan.c
index e3d7e22a4fa0..db9538d4b358 100644
--- a/drivers/net/can/grcan.c
+++ b/drivers/net/can/grcan.c
@@ -1216,11 +1216,12 @@ static int grcan_receive(struct net_device *dev, int budget)
1216 cf->data[i] = (u8)(slot[j] >> shift); 1216 cf->data[i] = (u8)(slot[j] >> shift);
1217 } 1217 }
1218 } 1218 }
1219 netif_receive_skb(skb);
1220 1219
1221 /* Update statistics and read pointer */ 1220 /* Update statistics and read pointer */
1222 stats->rx_packets++; 1221 stats->rx_packets++;
1223 stats->rx_bytes += cf->can_dlc; 1222 stats->rx_bytes += cf->can_dlc;
1223 netif_receive_skb(skb);
1224
1224 rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size); 1225 rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size);
1225 } 1226 }
1226 1227
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 32bd7f451aa4..7b92e911a616 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -377,10 +377,9 @@ static void sja1000_rx(struct net_device *dev)
377 /* release receive buffer */ 377 /* release receive buffer */
378 sja1000_write_cmdreg(priv, CMD_RRB); 378 sja1000_write_cmdreg(priv, CMD_RRB);
379 379
380 netif_rx(skb);
381
382 stats->rx_packets++; 380 stats->rx_packets++;
383 stats->rx_bytes += cf->can_dlc; 381 stats->rx_bytes += cf->can_dlc;
382 netif_rx(skb);
384 383
385 can_led_event(dev, CAN_LED_EVENT_RX); 384 can_led_event(dev, CAN_LED_EVENT_RX);
386} 385}
@@ -484,10 +483,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
484 can_bus_off(dev); 483 can_bus_off(dev);
485 } 484 }
486 485
487 netif_rx(skb);
488
489 stats->rx_packets++; 486 stats->rx_packets++;
490 stats->rx_bytes += cf->can_dlc; 487 stats->rx_bytes += cf->can_dlc;
488 netif_rx(skb);
491 489
492 return 0; 490 return 0;
493} 491}
diff --git a/drivers/net/can/slcan.c b/drivers/net/can/slcan.c
index a23a7af8eb9a..9a3f15cb7ef4 100644
--- a/drivers/net/can/slcan.c
+++ b/drivers/net/can/slcan.c
@@ -218,10 +218,10 @@ static void slc_bump(struct slcan *sl)
218 218
219 memcpy(skb_put(skb, sizeof(struct can_frame)), 219 memcpy(skb_put(skb, sizeof(struct can_frame)),
220 &cf, sizeof(struct can_frame)); 220 &cf, sizeof(struct can_frame));
221 netif_rx_ni(skb);
222 221
223 sl->dev->stats.rx_packets++; 222 sl->dev->stats.rx_packets++;
224 sl->dev->stats.rx_bytes += cf.can_dlc; 223 sl->dev->stats.rx_bytes += cf.can_dlc;
224 netif_rx_ni(skb);
225} 225}
226 226
227/* parse tty input stream */ 227/* parse tty input stream */
diff --git a/drivers/net/can/spi/mcp251x.c b/drivers/net/can/spi/mcp251x.c
index c1a95a34d62e..b7e83c212023 100644
--- a/drivers/net/can/spi/mcp251x.c
+++ b/drivers/net/can/spi/mcp251x.c
@@ -1086,8 +1086,8 @@ static int mcp251x_can_probe(struct spi_device *spi)
1086 if (ret) 1086 if (ret)
1087 goto out_clk; 1087 goto out_clk;
1088 1088
1089 priv->power = devm_regulator_get(&spi->dev, "vdd"); 1089 priv->power = devm_regulator_get_optional(&spi->dev, "vdd");
1090 priv->transceiver = devm_regulator_get(&spi->dev, "xceiver"); 1090 priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver");
1091 if ((PTR_ERR(priv->power) == -EPROBE_DEFER) || 1091 if ((PTR_ERR(priv->power) == -EPROBE_DEFER) ||
1092 (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) { 1092 (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) {
1093 ret = -EPROBE_DEFER; 1093 ret = -EPROBE_DEFER;
@@ -1222,17 +1222,16 @@ static int __maybe_unused mcp251x_can_resume(struct device *dev)
1222 struct spi_device *spi = to_spi_device(dev); 1222 struct spi_device *spi = to_spi_device(dev);
1223 struct mcp251x_priv *priv = spi_get_drvdata(spi); 1223 struct mcp251x_priv *priv = spi_get_drvdata(spi);
1224 1224
1225 if (priv->after_suspend & AFTER_SUSPEND_POWER) { 1225 if (priv->after_suspend & AFTER_SUSPEND_POWER)
1226 mcp251x_power_enable(priv->power, 1); 1226 mcp251x_power_enable(priv->power, 1);
1227
1228 if (priv->after_suspend & AFTER_SUSPEND_UP) {
1229 mcp251x_power_enable(priv->transceiver, 1);
1227 queue_work(priv->wq, &priv->restart_work); 1230 queue_work(priv->wq, &priv->restart_work);
1228 } else { 1231 } else {
1229 if (priv->after_suspend & AFTER_SUSPEND_UP) { 1232 priv->after_suspend = 0;
1230 mcp251x_power_enable(priv->transceiver, 1);
1231 queue_work(priv->wq, &priv->restart_work);
1232 } else {
1233 priv->after_suspend = 0;
1234 }
1235 } 1233 }
1234
1236 priv->force_quit = 0; 1235 priv->force_quit = 0;
1237 enable_irq(spi->irq); 1236 enable_irq(spi->irq);
1238 return 0; 1237 return 0;
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
index e95a9e1a889f..cf345cbfe819 100644
--- a/drivers/net/can/ti_hecc.c
+++ b/drivers/net/can/ti_hecc.c
@@ -747,9 +747,9 @@ static int ti_hecc_error(struct net_device *ndev, int int_status,
747 } 747 }
748 } 748 }
749 749
750 netif_rx(skb);
751 stats->rx_packets++; 750 stats->rx_packets++;
752 stats->rx_bytes += cf->can_dlc; 751 stats->rx_bytes += cf->can_dlc;
752 netif_rx(skb);
753 753
754 return 0; 754 return 0;
755} 755}
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index 866bac0ae7e9..2d390384ef3b 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -324,10 +324,9 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
324 cf->data[i] = msg->msg.can_msg.msg[i]; 324 cf->data[i] = msg->msg.can_msg.msg[i];
325 } 325 }
326 326
327 netif_rx(skb);
328
329 stats->rx_packets++; 327 stats->rx_packets++;
330 stats->rx_bytes += cf->can_dlc; 328 stats->rx_bytes += cf->can_dlc;
329 netif_rx(skb);
331} 330}
332 331
333static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) 332static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
@@ -400,10 +399,9 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
400 stats->rx_errors++; 399 stats->rx_errors++;
401 } 400 }
402 401
403 netif_rx(skb);
404
405 stats->rx_packets++; 402 stats->rx_packets++;
406 stats->rx_bytes += cf->can_dlc; 403 stats->rx_bytes += cf->can_dlc;
404 netif_rx(skb);
407} 405}
408 406
409/* 407/*
diff --git a/drivers/net/can/usb/esd_usb2.c b/drivers/net/can/usb/esd_usb2.c
index 411c1af92c62..0e5a4493ba4f 100644
--- a/drivers/net/can/usb/esd_usb2.c
+++ b/drivers/net/can/usb/esd_usb2.c
@@ -301,13 +301,12 @@ static void esd_usb2_rx_event(struct esd_usb2_net_priv *priv,
301 cf->data[7] = rxerr; 301 cf->data[7] = rxerr;
302 } 302 }
303 303
304 netif_rx(skb);
305
306 priv->bec.txerr = txerr; 304 priv->bec.txerr = txerr;
307 priv->bec.rxerr = rxerr; 305 priv->bec.rxerr = rxerr;
308 306
309 stats->rx_packets++; 307 stats->rx_packets++;
310 stats->rx_bytes += cf->can_dlc; 308 stats->rx_bytes += cf->can_dlc;
309 netif_rx(skb);
311 } 310 }
312} 311}
313 312
@@ -347,10 +346,9 @@ static void esd_usb2_rx_can_msg(struct esd_usb2_net_priv *priv,
347 cf->data[i] = msg->msg.rx.data[i]; 346 cf->data[i] = msg->msg.rx.data[i];
348 } 347 }
349 348
350 netif_rx(skb);
351
352 stats->rx_packets++; 349 stats->rx_packets++;
353 stats->rx_bytes += cf->can_dlc; 350 stats->rx_bytes += cf->can_dlc;
351 netif_rx(skb);
354 } 352 }
355 353
356 return; 354 return;
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb.c b/drivers/net/can/usb/peak_usb/pcan_usb.c
index 72427f21edff..6b94007ae052 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb.c
@@ -526,9 +526,9 @@ static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n,
526 hwts->hwtstamp = timeval_to_ktime(tv); 526 hwts->hwtstamp = timeval_to_ktime(tv);
527 } 527 }
528 528
529 netif_rx(skb);
530 mc->netdev->stats.rx_packets++; 529 mc->netdev->stats.rx_packets++;
531 mc->netdev->stats.rx_bytes += cf->can_dlc; 530 mc->netdev->stats.rx_bytes += cf->can_dlc;
531 netif_rx(skb);
532 532
533 return 0; 533 return 0;
534} 534}
@@ -659,12 +659,11 @@ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len)
659 hwts = skb_hwtstamps(skb); 659 hwts = skb_hwtstamps(skb);
660 hwts->hwtstamp = timeval_to_ktime(tv); 660 hwts->hwtstamp = timeval_to_ktime(tv);
661 661
662 /* push the skb */
663 netif_rx(skb);
664
665 /* update statistics */ 662 /* update statistics */
666 mc->netdev->stats.rx_packets++; 663 mc->netdev->stats.rx_packets++;
667 mc->netdev->stats.rx_bytes += cf->can_dlc; 664 mc->netdev->stats.rx_bytes += cf->can_dlc;
665 /* push the skb */
666 netif_rx(skb);
668 667
669 return 0; 668 return 0;
670 669
diff --git a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
index dec51717635e..7d61b3279798 100644
--- a/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
+++ b/drivers/net/can/usb/peak_usb/pcan_usb_pro.c
@@ -553,9 +553,9 @@ static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if,
553 hwts = skb_hwtstamps(skb); 553 hwts = skb_hwtstamps(skb);
554 hwts->hwtstamp = timeval_to_ktime(tv); 554 hwts->hwtstamp = timeval_to_ktime(tv);
555 555
556 netif_rx(skb);
557 netdev->stats.rx_packets++; 556 netdev->stats.rx_packets++;
558 netdev->stats.rx_bytes += can_frame->can_dlc; 557 netdev->stats.rx_bytes += can_frame->can_dlc;
558 netif_rx(skb);
559 559
560 return 0; 560 return 0;
561} 561}
@@ -670,9 +670,9 @@ static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if,
670 peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv); 670 peak_usb_get_ts_tv(&usb_if->time_ref, le32_to_cpu(er->ts32), &tv);
671 hwts = skb_hwtstamps(skb); 671 hwts = skb_hwtstamps(skb);
672 hwts->hwtstamp = timeval_to_ktime(tv); 672 hwts->hwtstamp = timeval_to_ktime(tv);
673 netif_rx(skb);
674 netdev->stats.rx_packets++; 673 netdev->stats.rx_packets++;
675 netdev->stats.rx_bytes += can_frame->can_dlc; 674 netdev->stats.rx_bytes += can_frame->can_dlc;
675 netif_rx(skb);
676 676
677 return 0; 677 return 0;
678} 678}
diff --git a/drivers/net/can/usb/usb_8dev.c b/drivers/net/can/usb/usb_8dev.c
index dd52c7a4c80d..de95b1ccba3e 100644
--- a/drivers/net/can/usb/usb_8dev.c
+++ b/drivers/net/can/usb/usb_8dev.c
@@ -461,10 +461,9 @@ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv,
461 priv->bec.txerr = txerr; 461 priv->bec.txerr = txerr;
462 priv->bec.rxerr = rxerr; 462 priv->bec.rxerr = rxerr;
463 463
464 netif_rx(skb);
465
466 stats->rx_packets++; 464 stats->rx_packets++;
467 stats->rx_bytes += cf->can_dlc; 465 stats->rx_bytes += cf->can_dlc;
466 netif_rx(skb);
468} 467}
469 468
470/* Read data and status frames */ 469/* Read data and status frames */
@@ -494,10 +493,9 @@ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv,
494 else 493 else
495 memcpy(cf->data, msg->data, cf->can_dlc); 494 memcpy(cf->data, msg->data, cf->can_dlc);
496 495
497 netif_rx(skb);
498
499 stats->rx_packets++; 496 stats->rx_packets++;
500 stats->rx_bytes += cf->can_dlc; 497 stats->rx_bytes += cf->can_dlc;
498 netif_rx(skb);
501 499
502 can_led_event(priv->netdev, CAN_LED_EVENT_RX); 500 can_led_event(priv->netdev, CAN_LED_EVENT_RX);
503 } else { 501 } else {
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 972982f8bea7..079897b3a955 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -696,9 +696,20 @@ static int bcm_sf2_sw_setup(struct dsa_switch *ds)
696 } 696 }
697 697
698 /* Include the pseudo-PHY address and the broadcast PHY address to 698 /* Include the pseudo-PHY address and the broadcast PHY address to
699 * divert reads towards our workaround 699 * divert reads towards our workaround. This is only required for
700 * 7445D0, since 7445E0 disconnects the internal switch pseudo-PHY such
701 * that we can use the regular SWITCH_MDIO master controller instead.
702 *
703 * By default, DSA initializes ds->phys_mii_mask to ds->phys_port_mask
704 * to have a 1:1 mapping between Port address and PHY address in order
705 * to utilize the slave_mii_bus instance to read from Port PHYs. This is
706 * not what we want here, so we initialize phys_mii_mask 0 to always
707 * utilize the "master" MDIO bus backed by the "mdio-unimac" driver.
700 */ 708 */
701 ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0)); 709 if (of_machine_is_compatible("brcm,bcm7445d0"))
710 ds->phys_mii_mask |= ((1 << BRCM_PSEUDO_PHY_ADDR) | (1 << 0));
711 else
712 ds->phys_mii_mask = 0;
702 713
703 rev = reg_readl(priv, REG_SWITCH_REVISION); 714 rev = reg_readl(priv, REG_SWITCH_REVISION);
704 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) & 715 priv->hw_params.top_rev = (rev >> SWITCH_TOP_REV_SHIFT) &
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index fd8547c2b79d..561342466076 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -1163,7 +1163,7 @@ int mv88e6xxx_leave_bridge(struct dsa_switch *ds, int port, u32 br_port_mask)
1163 1163
1164 newfid = __ffs(ps->fid_mask); 1164 newfid = __ffs(ps->fid_mask);
1165 ps->fid[port] = newfid; 1165 ps->fid[port] = newfid;
1166 ps->fid_mask &= (1 << newfid); 1166 ps->fid_mask &= ~(1 << newfid);
1167 ps->bridge_mask[fid] &= ~(1 << port); 1167 ps->bridge_mask[fid] &= ~(1 << port);
1168 ps->bridge_mask[newfid] = 1 << port; 1168 ps->bridge_mask[newfid] = 1 << port;
1169 1169
diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c
index 2d1ce3c5d0dd..753887d02b46 100644
--- a/drivers/net/ethernet/3com/3c59x.c
+++ b/drivers/net/ethernet/3com/3c59x.c
@@ -1763,16 +1763,9 @@ vortex_open(struct net_device *dev)
1763 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 1763 vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
1764 } 1764 }
1765 if (i != RX_RING_SIZE) { 1765 if (i != RX_RING_SIZE) {
1766 int j;
1767 pr_emerg("%s: no memory for rx ring\n", dev->name); 1766 pr_emerg("%s: no memory for rx ring\n", dev->name);
1768 for (j = 0; j < i; j++) {
1769 if (vp->rx_skbuff[j]) {
1770 dev_kfree_skb(vp->rx_skbuff[j]);
1771 vp->rx_skbuff[j] = NULL;
1772 }
1773 }
1774 retval = -ENOMEM; 1767 retval = -ENOMEM;
1775 goto err_free_irq; 1768 goto err_free_skb;
1776 } 1769 }
1777 /* Wrap the ring. */ 1770 /* Wrap the ring. */
1778 vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma); 1771 vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
@@ -1782,7 +1775,13 @@ vortex_open(struct net_device *dev)
1782 if (!retval) 1775 if (!retval)
1783 goto out; 1776 goto out;
1784 1777
1785err_free_irq: 1778err_free_skb:
1779 for (i = 0; i < RX_RING_SIZE; i++) {
1780 if (vp->rx_skbuff[i]) {
1781 dev_kfree_skb(vp->rx_skbuff[i]);
1782 vp->rx_skbuff[i] = NULL;
1783 }
1784 }
1786 free_irq(dev->irq, dev); 1785 free_irq(dev->irq, dev);
1787err: 1786err:
1788 if (vortex_debug > 1) 1787 if (vortex_debug > 1)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index a90d7364334f..f7fbdc9d1325 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -262,9 +262,9 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata,
262 if (likely(skb)) { 262 if (likely(skb)) {
263 (*pkts_compl)++; 263 (*pkts_compl)++;
264 (*bytes_compl) += skb->len; 264 (*bytes_compl) += skb->len;
265 dev_kfree_skb_any(skb);
265 } 266 }
266 267
267 dev_kfree_skb_any(skb);
268 tx_buf->first_bd = 0; 268 tx_buf->first_bd = 0;
269 tx_buf->skb = NULL; 269 tx_buf->skb = NULL;
270 270
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
index 76b9052a961c..5907c821d131 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
@@ -1718,6 +1718,22 @@ static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
1718 offset += sizeof(u32); 1718 offset += sizeof(u32);
1719 data_buf += sizeof(u32); 1719 data_buf += sizeof(u32);
1720 written_so_far += sizeof(u32); 1720 written_so_far += sizeof(u32);
1721
1722 /* At end of each 4Kb page, release nvram lock to allow MFW
1723 * chance to take it for its own use.
1724 */
1725 if ((cmd_flags & MCPR_NVM_COMMAND_LAST) &&
1726 (written_so_far < buf_size)) {
1727 DP(BNX2X_MSG_ETHTOOL | BNX2X_MSG_NVM,
1728 "Releasing NVM lock after offset 0x%x\n",
1729 (u32)(offset - sizeof(u32)));
1730 bnx2x_release_nvram_lock(bp);
1731 usleep_range(1000, 2000);
1732 rc = bnx2x_acquire_nvram_lock(bp);
1733 if (rc)
1734 return rc;
1735 }
1736
1721 cmd_flags = 0; 1737 cmd_flags = 0;
1722 } 1738 }
1723 1739
diff --git a/drivers/net/ethernet/brocade/bna/bnad.c b/drivers/net/ethernet/brocade/bna/bnad.c
index 0612b19f6313..506047c38607 100644
--- a/drivers/net/ethernet/brocade/bna/bnad.c
+++ b/drivers/net/ethernet/brocade/bna/bnad.c
@@ -676,6 +676,7 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
676 if (!next_cmpl->valid) 676 if (!next_cmpl->valid)
677 break; 677 break;
678 } 678 }
679 packets++;
679 680
680 /* TODO: BNA_CQ_EF_LOCAL ? */ 681 /* TODO: BNA_CQ_EF_LOCAL ? */
681 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR | 682 if (unlikely(flags & (BNA_CQ_EF_MAC_ERROR |
@@ -692,7 +693,6 @@ bnad_cq_process(struct bnad *bnad, struct bna_ccb *ccb, int budget)
692 else 693 else
693 bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len); 694 bnad_cq_setup_skb_frags(rcb, skb, sop_ci, nvecs, len);
694 695
695 packets++;
696 rcb->rxq->rx_packets++; 696 rcb->rxq->rx_packets++;
697 rcb->rxq->rx_bytes += totlen; 697 rcb->rxq->rx_bytes += totlen;
698 ccb->bytes_per_intr += totlen; 698 ccb->bytes_per_intr += totlen;
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index caeb39561567..bf9eb2ecf960 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -104,6 +104,57 @@ static void *macb_rx_buffer(struct macb *bp, unsigned int index)
104 return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); 104 return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
105} 105}
106 106
107/* I/O accessors */
108static u32 hw_readl_native(struct macb *bp, int offset)
109{
110 return __raw_readl(bp->regs + offset);
111}
112
113static void hw_writel_native(struct macb *bp, int offset, u32 value)
114{
115 __raw_writel(value, bp->regs + offset);
116}
117
118static u32 hw_readl(struct macb *bp, int offset)
119{
120 return readl_relaxed(bp->regs + offset);
121}
122
123static void hw_writel(struct macb *bp, int offset, u32 value)
124{
125 writel_relaxed(value, bp->regs + offset);
126}
127
128/*
129 * Find the CPU endianness by using the loopback bit of NCR register. When the
130 * CPU is in big endian we need to program swaped mode for management
131 * descriptor access.
132 */
133static bool hw_is_native_io(void __iomem *addr)
134{
135 u32 value = MACB_BIT(LLB);
136
137 __raw_writel(value, addr + MACB_NCR);
138 value = __raw_readl(addr + MACB_NCR);
139
140 /* Write 0 back to disable everything */
141 __raw_writel(0, addr + MACB_NCR);
142
143 return value == MACB_BIT(LLB);
144}
145
146static bool hw_is_gem(void __iomem *addr, bool native_io)
147{
148 u32 id;
149
150 if (native_io)
151 id = __raw_readl(addr + MACB_MID);
152 else
153 id = readl_relaxed(addr + MACB_MID);
154
155 return MACB_BFEXT(IDNUM, id) >= 0x2;
156}
157
107static void macb_set_hwaddr(struct macb *bp) 158static void macb_set_hwaddr(struct macb *bp)
108{ 159{
109 u32 bottom; 160 u32 bottom;
@@ -160,7 +211,7 @@ static void macb_get_hwaddr(struct macb *bp)
160 } 211 }
161 } 212 }
162 213
163 netdev_info(bp->dev, "invalid hw address, using random\n"); 214 dev_info(&bp->pdev->dev, "invalid hw address, using random\n");
164 eth_hw_addr_random(bp->dev); 215 eth_hw_addr_random(bp->dev);
165} 216}
166 217
@@ -252,7 +303,6 @@ static void macb_handle_link_change(struct net_device *dev)
252 struct macb *bp = netdev_priv(dev); 303 struct macb *bp = netdev_priv(dev);
253 struct phy_device *phydev = bp->phy_dev; 304 struct phy_device *phydev = bp->phy_dev;
254 unsigned long flags; 305 unsigned long flags;
255
256 int status_change = 0; 306 int status_change = 0;
257 307
258 spin_lock_irqsave(&bp->lock, flags); 308 spin_lock_irqsave(&bp->lock, flags);
@@ -449,14 +499,14 @@ err_out:
449 499
450static void macb_update_stats(struct macb *bp) 500static void macb_update_stats(struct macb *bp)
451{ 501{
452 u32 __iomem *reg = bp->regs + MACB_PFR;
453 u32 *p = &bp->hw_stats.macb.rx_pause_frames; 502 u32 *p = &bp->hw_stats.macb.rx_pause_frames;
454 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; 503 u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
504 int offset = MACB_PFR;
455 505
456 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); 506 WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
457 507
458 for(; p < end; p++, reg++) 508 for(; p < end; p++, offset += 4)
459 *p += readl_relaxed(reg); 509 *p += bp->macb_reg_readl(bp, offset);
460} 510}
461 511
462static int macb_halt_tx(struct macb *bp) 512static int macb_halt_tx(struct macb *bp)
@@ -1107,12 +1157,6 @@ static void macb_poll_controller(struct net_device *dev)
1107} 1157}
1108#endif 1158#endif
1109 1159
1110static inline unsigned int macb_count_tx_descriptors(struct macb *bp,
1111 unsigned int len)
1112{
1113 return (len + bp->max_tx_length - 1) / bp->max_tx_length;
1114}
1115
1116static unsigned int macb_tx_map(struct macb *bp, 1160static unsigned int macb_tx_map(struct macb *bp,
1117 struct macb_queue *queue, 1161 struct macb_queue *queue,
1118 struct sk_buff *skb) 1162 struct sk_buff *skb)
@@ -1263,11 +1307,11 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
1263 * socket buffer: skb fragments of jumbo frames may need to be 1307 * socket buffer: skb fragments of jumbo frames may need to be
1264 * splitted into many buffer descriptors. 1308 * splitted into many buffer descriptors.
1265 */ 1309 */
1266 count = macb_count_tx_descriptors(bp, skb_headlen(skb)); 1310 count = DIV_ROUND_UP(skb_headlen(skb), bp->max_tx_length);
1267 nr_frags = skb_shinfo(skb)->nr_frags; 1311 nr_frags = skb_shinfo(skb)->nr_frags;
1268 for (f = 0; f < nr_frags; f++) { 1312 for (f = 0; f < nr_frags; f++) {
1269 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]); 1313 frag_size = skb_frag_size(&skb_shinfo(skb)->frags[f]);
1270 count += macb_count_tx_descriptors(bp, frag_size); 1314 count += DIV_ROUND_UP(frag_size, bp->max_tx_length);
1271 } 1315 }
1272 1316
1273 spin_lock_irqsave(&bp->lock, flags); 1317 spin_lock_irqsave(&bp->lock, flags);
@@ -1603,7 +1647,6 @@ static u32 macb_dbw(struct macb *bp)
1603static void macb_configure_dma(struct macb *bp) 1647static void macb_configure_dma(struct macb *bp)
1604{ 1648{
1605 u32 dmacfg; 1649 u32 dmacfg;
1606 u32 tmp, ncr;
1607 1650
1608 if (macb_is_gem(bp)) { 1651 if (macb_is_gem(bp)) {
1609 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); 1652 dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
@@ -1613,22 +1656,11 @@ static void macb_configure_dma(struct macb *bp)
1613 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); 1656 dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
1614 dmacfg &= ~GEM_BIT(ENDIA_PKT); 1657 dmacfg &= ~GEM_BIT(ENDIA_PKT);
1615 1658
1616 /* Find the CPU endianness by using the loopback bit of net_ctrl 1659 if (bp->native_io)
1617 * register. save it first. When the CPU is in big endian we
1618 * need to program swaped mode for management descriptor access.
1619 */
1620 ncr = macb_readl(bp, NCR);
1621 __raw_writel(MACB_BIT(LLB), bp->regs + MACB_NCR);
1622 tmp = __raw_readl(bp->regs + MACB_NCR);
1623
1624 if (tmp == MACB_BIT(LLB))
1625 dmacfg &= ~GEM_BIT(ENDIA_DESC); 1660 dmacfg &= ~GEM_BIT(ENDIA_DESC);
1626 else 1661 else
1627 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */ 1662 dmacfg |= GEM_BIT(ENDIA_DESC); /* CPU in big endian */
1628 1663
1629 /* Restore net_ctrl */
1630 macb_writel(bp, NCR, ncr);
1631
1632 if (bp->dev->features & NETIF_F_HW_CSUM) 1664 if (bp->dev->features & NETIF_F_HW_CSUM)
1633 dmacfg |= GEM_BIT(TXCOEN); 1665 dmacfg |= GEM_BIT(TXCOEN);
1634 else 1666 else
@@ -1897,19 +1929,19 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu)
1897 1929
1898static void gem_update_stats(struct macb *bp) 1930static void gem_update_stats(struct macb *bp)
1899{ 1931{
1900 int i; 1932 unsigned int i;
1901 u32 *p = &bp->hw_stats.gem.tx_octets_31_0; 1933 u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
1902 1934
1903 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { 1935 for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
1904 u32 offset = gem_statistics[i].offset; 1936 u32 offset = gem_statistics[i].offset;
1905 u64 val = readl_relaxed(bp->regs + offset); 1937 u64 val = bp->macb_reg_readl(bp, offset);
1906 1938
1907 bp->ethtool_stats[i] += val; 1939 bp->ethtool_stats[i] += val;
1908 *p += val; 1940 *p += val;
1909 1941
1910 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) { 1942 if (offset == GEM_OCTTXL || offset == GEM_OCTRXL) {
1911 /* Add GEM_OCTTXH, GEM_OCTRXH */ 1943 /* Add GEM_OCTTXH, GEM_OCTRXH */
1912 val = readl_relaxed(bp->regs + offset + 4); 1944 val = bp->macb_reg_readl(bp, offset + 4);
1913 bp->ethtool_stats[i] += ((u64)val) << 32; 1945 bp->ethtool_stats[i] += ((u64)val) << 32;
1914 *(++p) += val; 1946 *(++p) += val;
1915 } 1947 }
@@ -1976,7 +2008,7 @@ static int gem_get_sset_count(struct net_device *dev, int sset)
1976 2008
1977static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) 2009static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
1978{ 2010{
1979 int i; 2011 unsigned int i;
1980 2012
1981 switch (sset) { 2013 switch (sset) {
1982 case ETH_SS_STATS: 2014 case ETH_SS_STATS:
@@ -2190,7 +2222,7 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co
2190 if (dt_conf) 2222 if (dt_conf)
2191 bp->caps = dt_conf->caps; 2223 bp->caps = dt_conf->caps;
2192 2224
2193 if (macb_is_gem_hw(bp->regs)) { 2225 if (hw_is_gem(bp->regs, bp->native_io)) {
2194 bp->caps |= MACB_CAPS_MACB_IS_GEM; 2226 bp->caps |= MACB_CAPS_MACB_IS_GEM;
2195 2227
2196 dcfg = gem_readl(bp, DCFG1); 2228 dcfg = gem_readl(bp, DCFG1);
@@ -2201,10 +2233,11 @@ static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_co
2201 bp->caps |= MACB_CAPS_FIFO_MODE; 2233 bp->caps |= MACB_CAPS_FIFO_MODE;
2202 } 2234 }
2203 2235
2204 netdev_dbg(bp->dev, "Cadence caps 0x%08x\n", bp->caps); 2236 dev_dbg(&bp->pdev->dev, "Cadence caps 0x%08x\n", bp->caps);
2205} 2237}
2206 2238
2207static void macb_probe_queues(void __iomem *mem, 2239static void macb_probe_queues(void __iomem *mem,
2240 bool native_io,
2208 unsigned int *queue_mask, 2241 unsigned int *queue_mask,
2209 unsigned int *num_queues) 2242 unsigned int *num_queues)
2210{ 2243{
@@ -2219,7 +2252,7 @@ static void macb_probe_queues(void __iomem *mem,
2219 * we are early in the probe process and don't have the 2252 * we are early in the probe process and don't have the
2220 * MACB_CAPS_MACB_IS_GEM flag positioned 2253 * MACB_CAPS_MACB_IS_GEM flag positioned
2221 */ 2254 */
2222 if (!macb_is_gem_hw(mem)) 2255 if (!hw_is_gem(mem, native_io))
2223 return; 2256 return;
2224 2257
2225 /* bit 0 is never set but queue 0 always exists */ 2258 /* bit 0 is never set but queue 0 always exists */
@@ -2786,6 +2819,7 @@ static int macb_probe(struct platform_device *pdev)
2786 struct clk *pclk, *hclk, *tx_clk; 2819 struct clk *pclk, *hclk, *tx_clk;
2787 unsigned int queue_mask, num_queues; 2820 unsigned int queue_mask, num_queues;
2788 struct macb_platform_data *pdata; 2821 struct macb_platform_data *pdata;
2822 bool native_io;
2789 struct phy_device *phydev; 2823 struct phy_device *phydev;
2790 struct net_device *dev; 2824 struct net_device *dev;
2791 struct resource *regs; 2825 struct resource *regs;
@@ -2794,6 +2828,11 @@ static int macb_probe(struct platform_device *pdev)
2794 struct macb *bp; 2828 struct macb *bp;
2795 int err; 2829 int err;
2796 2830
2831 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2832 mem = devm_ioremap_resource(&pdev->dev, regs);
2833 if (IS_ERR(mem))
2834 return PTR_ERR(mem);
2835
2797 if (np) { 2836 if (np) {
2798 const struct of_device_id *match; 2837 const struct of_device_id *match;
2799 2838
@@ -2809,14 +2848,9 @@ static int macb_probe(struct platform_device *pdev)
2809 if (err) 2848 if (err)
2810 return err; 2849 return err;
2811 2850
2812 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2851 native_io = hw_is_native_io(mem);
2813 mem = devm_ioremap_resource(&pdev->dev, regs);
2814 if (IS_ERR(mem)) {
2815 err = PTR_ERR(mem);
2816 goto err_disable_clocks;
2817 }
2818 2852
2819 macb_probe_queues(mem, &queue_mask, &num_queues); 2853 macb_probe_queues(mem, native_io, &queue_mask, &num_queues);
2820 dev = alloc_etherdev_mq(sizeof(*bp), num_queues); 2854 dev = alloc_etherdev_mq(sizeof(*bp), num_queues);
2821 if (!dev) { 2855 if (!dev) {
2822 err = -ENOMEM; 2856 err = -ENOMEM;
@@ -2831,6 +2865,14 @@ static int macb_probe(struct platform_device *pdev)
2831 bp->pdev = pdev; 2865 bp->pdev = pdev;
2832 bp->dev = dev; 2866 bp->dev = dev;
2833 bp->regs = mem; 2867 bp->regs = mem;
2868 bp->native_io = native_io;
2869 if (native_io) {
2870 bp->macb_reg_readl = hw_readl_native;
2871 bp->macb_reg_writel = hw_writel_native;
2872 } else {
2873 bp->macb_reg_readl = hw_readl;
2874 bp->macb_reg_writel = hw_writel;
2875 }
2834 bp->num_queues = num_queues; 2876 bp->num_queues = num_queues;
2835 bp->queue_mask = queue_mask; 2877 bp->queue_mask = queue_mask;
2836 if (macb_config) 2878 if (macb_config)
@@ -2838,9 +2880,8 @@ static int macb_probe(struct platform_device *pdev)
2838 bp->pclk = pclk; 2880 bp->pclk = pclk;
2839 bp->hclk = hclk; 2881 bp->hclk = hclk;
2840 bp->tx_clk = tx_clk; 2882 bp->tx_clk = tx_clk;
2841 if (macb_config->jumbo_max_len) { 2883 if (macb_config)
2842 bp->jumbo_max_len = macb_config->jumbo_max_len; 2884 bp->jumbo_max_len = macb_config->jumbo_max_len;
2843 }
2844 2885
2845 spin_lock_init(&bp->lock); 2886 spin_lock_init(&bp->lock);
2846 2887
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index d74655993d4b..1895b6b2addd 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -429,18 +429,12 @@
429 | GEM_BF(name, value)) 429 | GEM_BF(name, value))
430 430
431/* Register access macros */ 431/* Register access macros */
432#define macb_readl(port,reg) \ 432#define macb_readl(port, reg) (port)->macb_reg_readl((port), MACB_##reg)
433 readl_relaxed((port)->regs + MACB_##reg) 433#define macb_writel(port, reg, value) (port)->macb_reg_writel((port), MACB_##reg, (value))
434#define macb_writel(port,reg,value) \ 434#define gem_readl(port, reg) (port)->macb_reg_readl((port), GEM_##reg)
435 writel_relaxed((value), (port)->regs + MACB_##reg) 435#define gem_writel(port, reg, value) (port)->macb_reg_writel((port), GEM_##reg, (value))
436#define gem_readl(port, reg) \ 436#define queue_readl(queue, reg) (queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg)
437 readl_relaxed((port)->regs + GEM_##reg) 437#define queue_writel(queue, reg, value) (queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value))
438#define gem_writel(port, reg, value) \
439 writel_relaxed((value), (port)->regs + GEM_##reg)
440#define queue_readl(queue, reg) \
441 readl_relaxed((queue)->bp->regs + (queue)->reg)
442#define queue_writel(queue, reg, value) \
443 writel_relaxed((value), (queue)->bp->regs + (queue)->reg)
444 438
445/* Conditional GEM/MACB macros. These perform the operation to the correct 439/* Conditional GEM/MACB macros. These perform the operation to the correct
446 * register dependent on whether the device is a GEM or a MACB. For registers 440 * register dependent on whether the device is a GEM or a MACB. For registers
@@ -785,6 +779,11 @@ struct macb_queue {
785 779
786struct macb { 780struct macb {
787 void __iomem *regs; 781 void __iomem *regs;
782 bool native_io;
783
784 /* hardware IO accessors */
785 u32 (*macb_reg_readl)(struct macb *bp, int offset);
786 void (*macb_reg_writel)(struct macb *bp, int offset, u32 value);
788 787
789 unsigned int rx_tail; 788 unsigned int rx_tail;
790 unsigned int rx_prepared_head; 789 unsigned int rx_prepared_head;
@@ -817,9 +816,9 @@ struct macb {
817 816
818 struct mii_bus *mii_bus; 817 struct mii_bus *mii_bus;
819 struct phy_device *phy_dev; 818 struct phy_device *phy_dev;
820 unsigned int link; 819 int link;
821 unsigned int speed; 820 int speed;
822 unsigned int duplex; 821 int duplex;
823 822
824 u32 caps; 823 u32 caps;
825 unsigned int dma_burst_length; 824 unsigned int dma_burst_length;
@@ -843,9 +842,4 @@ static inline bool macb_is_gem(struct macb *bp)
843 return !!(bp->caps & MACB_CAPS_MACB_IS_GEM); 842 return !!(bp->caps & MACB_CAPS_MACB_IS_GEM);
844} 843}
845 844
846static inline bool macb_is_gem_hw(void __iomem *addr)
847{
848 return !!(MACB_BFEXT(IDNUM, readl_relaxed(addr + MACB_MID)) >= 0x2);
849}
850
851#endif /* _MACB_H */ 845#endif /* _MACB_H */
diff --git a/drivers/net/ethernet/cavium/Kconfig b/drivers/net/ethernet/cavium/Kconfig
index c4d6bbe9458d..02e23e6f1424 100644
--- a/drivers/net/ethernet/cavium/Kconfig
+++ b/drivers/net/ethernet/cavium/Kconfig
@@ -16,7 +16,6 @@ if NET_VENDOR_CAVIUM
16config THUNDER_NIC_PF 16config THUNDER_NIC_PF
17 tristate "Thunder Physical function driver" 17 tristate "Thunder Physical function driver"
18 depends on 64BIT 18 depends on 64BIT
19 default ARCH_THUNDER
20 select THUNDER_NIC_BGX 19 select THUNDER_NIC_BGX
21 ---help--- 20 ---help---
22 This driver supports Thunder's NIC physical function. 21 This driver supports Thunder's NIC physical function.
@@ -29,14 +28,12 @@ config THUNDER_NIC_PF
29config THUNDER_NIC_VF 28config THUNDER_NIC_VF
30 tristate "Thunder Virtual function driver" 29 tristate "Thunder Virtual function driver"
31 depends on 64BIT 30 depends on 64BIT
32 default ARCH_THUNDER
33 ---help--- 31 ---help---
34 This driver supports Thunder's NIC virtual function 32 This driver supports Thunder's NIC virtual function
35 33
36config THUNDER_NIC_BGX 34config THUNDER_NIC_BGX
37 tristate "Thunder MAC interface driver (BGX)" 35 tristate "Thunder MAC interface driver (BGX)"
38 depends on 64BIT 36 depends on 64BIT
39 default ARCH_THUNDER
40 ---help--- 37 ---help---
41 This driver supports programming and controlling of MAC 38 This driver supports programming and controlling of MAC
42 interface from NIC physical function driver. 39 interface from NIC physical function driver.
diff --git a/drivers/net/ethernet/cavium/thunder/nic.h b/drivers/net/ethernet/cavium/thunder/nic.h
index dda8a02b7322..8aee250904ec 100644
--- a/drivers/net/ethernet/cavium/thunder/nic.h
+++ b/drivers/net/ethernet/cavium/thunder/nic.h
@@ -125,6 +125,15 @@
125 */ 125 */
126#define NICPF_CLK_PER_INT_TICK 2 126#define NICPF_CLK_PER_INT_TICK 2
127 127
128/* Time to wait before we decide that a SQ is stuck.
129 *
130 * Since both pkt rx and tx notifications are done with same CQ,
131 * when packets are being received at very high rate (eg: L2 forwarding)
132 * then freeing transmitted skbs will be delayed and watchdog
133 * will kick in, resetting interface. Hence keeping this value high.
134 */
135#define NICVF_TX_TIMEOUT (50 * HZ)
136
128struct nicvf_cq_poll { 137struct nicvf_cq_poll {
129 u8 cq_idx; /* Completion queue index */ 138 u8 cq_idx; /* Completion queue index */
130 struct napi_struct napi; 139 struct napi_struct napi;
@@ -216,8 +225,9 @@ struct nicvf_drv_stats {
216 /* Tx */ 225 /* Tx */
217 u64 tx_frames_ok; 226 u64 tx_frames_ok;
218 u64 tx_drops; 227 u64 tx_drops;
219 u64 tx_busy;
220 u64 tx_tso; 228 u64 tx_tso;
229 u64 txq_stop;
230 u64 txq_wake;
221}; 231};
222 232
223struct nicvf { 233struct nicvf {
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
index 16bd2d772db9..a4228e664567 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_ethtool.c
@@ -66,9 +66,10 @@ static const struct nicvf_stat nicvf_drv_stats[] = {
66 NICVF_DRV_STAT(rx_frames_jumbo), 66 NICVF_DRV_STAT(rx_frames_jumbo),
67 NICVF_DRV_STAT(rx_drops), 67 NICVF_DRV_STAT(rx_drops),
68 NICVF_DRV_STAT(tx_frames_ok), 68 NICVF_DRV_STAT(tx_frames_ok),
69 NICVF_DRV_STAT(tx_busy),
70 NICVF_DRV_STAT(tx_tso), 69 NICVF_DRV_STAT(tx_tso),
71 NICVF_DRV_STAT(tx_drops), 70 NICVF_DRV_STAT(tx_drops),
71 NICVF_DRV_STAT(txq_stop),
72 NICVF_DRV_STAT(txq_wake),
72}; 73};
73 74
74static const struct nicvf_stat nicvf_queue_stats[] = { 75static const struct nicvf_stat nicvf_queue_stats[] = {
@@ -126,6 +127,7 @@ static void nicvf_set_msglevel(struct net_device *netdev, u32 lvl)
126 127
127static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data) 128static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
128{ 129{
130 struct nicvf *nic = netdev_priv(netdev);
129 int stats, qidx; 131 int stats, qidx;
130 132
131 if (sset != ETH_SS_STATS) 133 if (sset != ETH_SS_STATS)
@@ -141,7 +143,7 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
141 data += ETH_GSTRING_LEN; 143 data += ETH_GSTRING_LEN;
142 } 144 }
143 145
144 for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { 146 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
145 for (stats = 0; stats < nicvf_n_queue_stats; stats++) { 147 for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
146 sprintf(data, "rxq%d: %s", qidx, 148 sprintf(data, "rxq%d: %s", qidx,
147 nicvf_queue_stats[stats].name); 149 nicvf_queue_stats[stats].name);
@@ -149,7 +151,7 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
149 } 151 }
150 } 152 }
151 153
152 for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { 154 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
153 for (stats = 0; stats < nicvf_n_queue_stats; stats++) { 155 for (stats = 0; stats < nicvf_n_queue_stats; stats++) {
154 sprintf(data, "txq%d: %s", qidx, 156 sprintf(data, "txq%d: %s", qidx,
155 nicvf_queue_stats[stats].name); 157 nicvf_queue_stats[stats].name);
@@ -170,12 +172,14 @@ static void nicvf_get_strings(struct net_device *netdev, u32 sset, u8 *data)
170 172
171static int nicvf_get_sset_count(struct net_device *netdev, int sset) 173static int nicvf_get_sset_count(struct net_device *netdev, int sset)
172{ 174{
175 struct nicvf *nic = netdev_priv(netdev);
176
173 if (sset != ETH_SS_STATS) 177 if (sset != ETH_SS_STATS)
174 return -EINVAL; 178 return -EINVAL;
175 179
176 return nicvf_n_hw_stats + nicvf_n_drv_stats + 180 return nicvf_n_hw_stats + nicvf_n_drv_stats +
177 (nicvf_n_queue_stats * 181 (nicvf_n_queue_stats *
178 (MAX_RCV_QUEUES_PER_QS + MAX_SND_QUEUES_PER_QS)) + 182 (nic->qs->rq_cnt + nic->qs->sq_cnt)) +
179 BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT; 183 BGX_RX_STATS_COUNT + BGX_TX_STATS_COUNT;
180} 184}
181 185
@@ -197,13 +201,13 @@ static void nicvf_get_ethtool_stats(struct net_device *netdev,
197 *(data++) = ((u64 *)&nic->drv_stats) 201 *(data++) = ((u64 *)&nic->drv_stats)
198 [nicvf_drv_stats[stat].index]; 202 [nicvf_drv_stats[stat].index];
199 203
200 for (qidx = 0; qidx < MAX_RCV_QUEUES_PER_QS; qidx++) { 204 for (qidx = 0; qidx < nic->qs->rq_cnt; qidx++) {
201 for (stat = 0; stat < nicvf_n_queue_stats; stat++) 205 for (stat = 0; stat < nicvf_n_queue_stats; stat++)
202 *(data++) = ((u64 *)&nic->qs->rq[qidx].stats) 206 *(data++) = ((u64 *)&nic->qs->rq[qidx].stats)
203 [nicvf_queue_stats[stat].index]; 207 [nicvf_queue_stats[stat].index];
204 } 208 }
205 209
206 for (qidx = 0; qidx < MAX_SND_QUEUES_PER_QS; qidx++) { 210 for (qidx = 0; qidx < nic->qs->sq_cnt; qidx++) {
207 for (stat = 0; stat < nicvf_n_queue_stats; stat++) 211 for (stat = 0; stat < nicvf_n_queue_stats; stat++)
208 *(data++) = ((u64 *)&nic->qs->sq[qidx].stats) 212 *(data++) = ((u64 *)&nic->qs->sq[qidx].stats)
209 [nicvf_queue_stats[stat].index]; 213 [nicvf_queue_stats[stat].index];
@@ -543,6 +547,7 @@ static int nicvf_set_channels(struct net_device *dev,
543{ 547{
544 struct nicvf *nic = netdev_priv(dev); 548 struct nicvf *nic = netdev_priv(dev);
545 int err = 0; 549 int err = 0;
550 bool if_up = netif_running(dev);
546 551
547 if (!channel->rx_count || !channel->tx_count) 552 if (!channel->rx_count || !channel->tx_count)
548 return -EINVAL; 553 return -EINVAL;
@@ -551,6 +556,9 @@ static int nicvf_set_channels(struct net_device *dev,
551 if (channel->tx_count > MAX_SND_QUEUES_PER_QS) 556 if (channel->tx_count > MAX_SND_QUEUES_PER_QS)
552 return -EINVAL; 557 return -EINVAL;
553 558
559 if (if_up)
560 nicvf_stop(dev);
561
554 nic->qs->rq_cnt = channel->rx_count; 562 nic->qs->rq_cnt = channel->rx_count;
555 nic->qs->sq_cnt = channel->tx_count; 563 nic->qs->sq_cnt = channel->tx_count;
556 nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt); 564 nic->qs->cq_cnt = max(nic->qs->rq_cnt, nic->qs->sq_cnt);
@@ -559,11 +567,9 @@ static int nicvf_set_channels(struct net_device *dev,
559 if (err) 567 if (err)
560 return err; 568 return err;
561 569
562 if (!netif_running(dev)) 570 if (if_up)
563 return err; 571 nicvf_open(dev);
564 572
565 nicvf_stop(dev);
566 nicvf_open(dev);
567 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n", 573 netdev_info(dev, "Setting num Tx rings to %d, Rx rings to %d success\n",
568 nic->qs->sq_cnt, nic->qs->rq_cnt); 574 nic->qs->sq_cnt, nic->qs->rq_cnt);
569 575
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index 8b119a035b7e..3b90afb8c293 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -234,7 +234,7 @@ static void nicvf_handle_mbx_intr(struct nicvf *nic)
234 nic->duplex == DUPLEX_FULL ? 234 nic->duplex == DUPLEX_FULL ?
235 "Full duplex" : "Half duplex"); 235 "Full duplex" : "Half duplex");
236 netif_carrier_on(nic->netdev); 236 netif_carrier_on(nic->netdev);
237 netif_tx_wake_all_queues(nic->netdev); 237 netif_tx_start_all_queues(nic->netdev);
238 } else { 238 } else {
239 netdev_info(nic->netdev, "%s: Link is Down\n", 239 netdev_info(nic->netdev, "%s: Link is Down\n",
240 nic->netdev->name); 240 nic->netdev->name);
@@ -425,6 +425,7 @@ static void nicvf_snd_pkt_handler(struct net_device *netdev,
425 if (skb) { 425 if (skb) {
426 prefetch(skb); 426 prefetch(skb);
427 dev_consume_skb_any(skb); 427 dev_consume_skb_any(skb);
428 sq->skbuff[cqe_tx->sqe_ptr] = (u64)NULL;
428 } 429 }
429} 430}
430 431
@@ -476,12 +477,13 @@ static void nicvf_rcv_pkt_handler(struct net_device *netdev,
476static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx, 477static int nicvf_cq_intr_handler(struct net_device *netdev, u8 cq_idx,
477 struct napi_struct *napi, int budget) 478 struct napi_struct *napi, int budget)
478{ 479{
479 int processed_cqe, work_done = 0; 480 int processed_cqe, work_done = 0, tx_done = 0;
480 int cqe_count, cqe_head; 481 int cqe_count, cqe_head;
481 struct nicvf *nic = netdev_priv(netdev); 482 struct nicvf *nic = netdev_priv(netdev);
482 struct queue_set *qs = nic->qs; 483 struct queue_set *qs = nic->qs;
483 struct cmp_queue *cq = &qs->cq[cq_idx]; 484 struct cmp_queue *cq = &qs->cq[cq_idx];
484 struct cqe_rx_t *cq_desc; 485 struct cqe_rx_t *cq_desc;
486 struct netdev_queue *txq;
485 487
486 spin_lock_bh(&cq->lock); 488 spin_lock_bh(&cq->lock);
487loop: 489loop:
@@ -496,8 +498,8 @@ loop:
496 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9; 498 cqe_head = nicvf_queue_reg_read(nic, NIC_QSET_CQ_0_7_HEAD, cq_idx) >> 9;
497 cqe_head &= 0xFFFF; 499 cqe_head &= 0xFFFF;
498 500
499 netdev_dbg(nic->netdev, "%s cqe_count %d cqe_head %d\n", 501 netdev_dbg(nic->netdev, "%s CQ%d cqe_count %d cqe_head %d\n",
500 __func__, cqe_count, cqe_head); 502 __func__, cq_idx, cqe_count, cqe_head);
501 while (processed_cqe < cqe_count) { 503 while (processed_cqe < cqe_count) {
502 /* Get the CQ descriptor */ 504 /* Get the CQ descriptor */
503 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head); 505 cq_desc = (struct cqe_rx_t *)GET_CQ_DESC(cq, cqe_head);
@@ -511,8 +513,8 @@ loop:
511 break; 513 break;
512 } 514 }
513 515
514 netdev_dbg(nic->netdev, "cq_desc->cqe_type %d\n", 516 netdev_dbg(nic->netdev, "CQ%d cq_desc->cqe_type %d\n",
515 cq_desc->cqe_type); 517 cq_idx, cq_desc->cqe_type);
516 switch (cq_desc->cqe_type) { 518 switch (cq_desc->cqe_type) {
517 case CQE_TYPE_RX: 519 case CQE_TYPE_RX:
518 nicvf_rcv_pkt_handler(netdev, napi, cq, 520 nicvf_rcv_pkt_handler(netdev, napi, cq,
@@ -522,6 +524,7 @@ loop:
522 case CQE_TYPE_SEND: 524 case CQE_TYPE_SEND:
523 nicvf_snd_pkt_handler(netdev, cq, 525 nicvf_snd_pkt_handler(netdev, cq,
524 (void *)cq_desc, CQE_TYPE_SEND); 526 (void *)cq_desc, CQE_TYPE_SEND);
527 tx_done++;
525 break; 528 break;
526 case CQE_TYPE_INVALID: 529 case CQE_TYPE_INVALID:
527 case CQE_TYPE_RX_SPLIT: 530 case CQE_TYPE_RX_SPLIT:
@@ -532,8 +535,9 @@ loop:
532 } 535 }
533 processed_cqe++; 536 processed_cqe++;
534 } 537 }
535 netdev_dbg(nic->netdev, "%s processed_cqe %d work_done %d budget %d\n", 538 netdev_dbg(nic->netdev,
536 __func__, processed_cqe, work_done, budget); 539 "%s CQ%d processed_cqe %d work_done %d budget %d\n",
540 __func__, cq_idx, processed_cqe, work_done, budget);
537 541
538 /* Ring doorbell to inform H/W to reuse processed CQEs */ 542 /* Ring doorbell to inform H/W to reuse processed CQEs */
539 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR, 543 nicvf_queue_reg_write(nic, NIC_QSET_CQ_0_7_DOOR,
@@ -543,6 +547,19 @@ loop:
543 goto loop; 547 goto loop;
544 548
545done: 549done:
550 /* Wakeup TXQ if its stopped earlier due to SQ full */
551 if (tx_done) {
552 txq = netdev_get_tx_queue(netdev, cq_idx);
553 if (netif_tx_queue_stopped(txq)) {
554 netif_tx_start_queue(txq);
555 nic->drv_stats.txq_wake++;
556 if (netif_msg_tx_err(nic))
557 netdev_warn(netdev,
558 "%s: Transmit queue wakeup SQ%d\n",
559 netdev->name, cq_idx);
560 }
561 }
562
546 spin_unlock_bh(&cq->lock); 563 spin_unlock_bh(&cq->lock);
547 return work_done; 564 return work_done;
548} 565}
@@ -554,15 +571,10 @@ static int nicvf_poll(struct napi_struct *napi, int budget)
554 struct net_device *netdev = napi->dev; 571 struct net_device *netdev = napi->dev;
555 struct nicvf *nic = netdev_priv(netdev); 572 struct nicvf *nic = netdev_priv(netdev);
556 struct nicvf_cq_poll *cq; 573 struct nicvf_cq_poll *cq;
557 struct netdev_queue *txq;
558 574
559 cq = container_of(napi, struct nicvf_cq_poll, napi); 575 cq = container_of(napi, struct nicvf_cq_poll, napi);
560 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget); 576 work_done = nicvf_cq_intr_handler(netdev, cq->cq_idx, napi, budget);
561 577
562 txq = netdev_get_tx_queue(netdev, cq->cq_idx);
563 if (netif_tx_queue_stopped(txq))
564 netif_tx_wake_queue(txq);
565
566 if (work_done < budget) { 578 if (work_done < budget) {
567 /* Slow packet rate, exit polling */ 579 /* Slow packet rate, exit polling */
568 napi_complete(napi); 580 napi_complete(napi);
@@ -833,9 +845,9 @@ static netdev_tx_t nicvf_xmit(struct sk_buff *skb, struct net_device *netdev)
833 return NETDEV_TX_OK; 845 return NETDEV_TX_OK;
834 } 846 }
835 847
836 if (!nicvf_sq_append_skb(nic, skb) && !netif_tx_queue_stopped(txq)) { 848 if (!netif_tx_queue_stopped(txq) && !nicvf_sq_append_skb(nic, skb)) {
837 netif_tx_stop_queue(txq); 849 netif_tx_stop_queue(txq);
838 nic->drv_stats.tx_busy++; 850 nic->drv_stats.txq_stop++;
839 if (netif_msg_tx_err(nic)) 851 if (netif_msg_tx_err(nic))
840 netdev_warn(netdev, 852 netdev_warn(netdev,
841 "%s: Transmit ring full, stopping SQ%d\n", 853 "%s: Transmit ring full, stopping SQ%d\n",
@@ -859,7 +871,6 @@ int nicvf_stop(struct net_device *netdev)
859 nicvf_send_msg_to_pf(nic, &mbx); 871 nicvf_send_msg_to_pf(nic, &mbx);
860 872
861 netif_carrier_off(netdev); 873 netif_carrier_off(netdev);
862 netif_tx_disable(netdev);
863 874
864 /* Disable RBDR & QS error interrupts */ 875 /* Disable RBDR & QS error interrupts */
865 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) { 876 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) {
@@ -894,6 +905,8 @@ int nicvf_stop(struct net_device *netdev)
894 kfree(cq_poll); 905 kfree(cq_poll);
895 } 906 }
896 907
908 netif_tx_disable(netdev);
909
897 /* Free resources */ 910 /* Free resources */
898 nicvf_config_data_transfer(nic, false); 911 nicvf_config_data_transfer(nic, false);
899 912
@@ -988,6 +1001,9 @@ int nicvf_open(struct net_device *netdev)
988 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++) 1001 for (qidx = 0; qidx < qs->rbdr_cnt; qidx++)
989 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx); 1002 nicvf_enable_intr(nic, NICVF_INTR_RBDR, qidx);
990 1003
1004 nic->drv_stats.txq_stop = 0;
1005 nic->drv_stats.txq_wake = 0;
1006
991 netif_carrier_on(netdev); 1007 netif_carrier_on(netdev);
992 netif_tx_start_all_queues(netdev); 1008 netif_tx_start_all_queues(netdev);
993 1009
@@ -1278,6 +1294,7 @@ static int nicvf_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1278 netdev->hw_features = netdev->features; 1294 netdev->hw_features = netdev->features;
1279 1295
1280 netdev->netdev_ops = &nicvf_netdev_ops; 1296 netdev->netdev_ops = &nicvf_netdev_ops;
1297 netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
1281 1298
1282 INIT_WORK(&nic->reset_task, nicvf_reset_task); 1299 INIT_WORK(&nic->reset_task, nicvf_reset_task);
1283 1300
@@ -1318,11 +1335,17 @@ static void nicvf_remove(struct pci_dev *pdev)
1318 pci_disable_device(pdev); 1335 pci_disable_device(pdev);
1319} 1336}
1320 1337
1338static void nicvf_shutdown(struct pci_dev *pdev)
1339{
1340 nicvf_remove(pdev);
1341}
1342
1321static struct pci_driver nicvf_driver = { 1343static struct pci_driver nicvf_driver = {
1322 .name = DRV_NAME, 1344 .name = DRV_NAME,
1323 .id_table = nicvf_id_table, 1345 .id_table = nicvf_id_table,
1324 .probe = nicvf_probe, 1346 .probe = nicvf_probe,
1325 .remove = nicvf_remove, 1347 .remove = nicvf_remove,
1348 .shutdown = nicvf_shutdown,
1326}; 1349};
1327 1350
1328static int __init nicvf_init_module(void) 1351static int __init nicvf_init_module(void)
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d69d228d11a0..ca4240aa6d15 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -103,9 +103,11 @@ static inline int nicvf_alloc_rcv_buffer(struct nicvf *nic, gfp_t gfp,
103 103
104 /* Allocate a new page */ 104 /* Allocate a new page */
105 if (!nic->rb_page) { 105 if (!nic->rb_page) {
106 nic->rb_page = alloc_pages(gfp | __GFP_COMP, order); 106 nic->rb_page = alloc_pages(gfp | __GFP_COMP | __GFP_NOWARN,
107 order);
107 if (!nic->rb_page) { 108 if (!nic->rb_page) {
108 netdev_err(nic->netdev, "Failed to allocate new rcv buffer\n"); 109 netdev_err(nic->netdev,
110 "Failed to allocate new rcv buffer\n");
109 return -ENOMEM; 111 return -ENOMEM;
110 } 112 }
111 nic->rb_page_offset = 0; 113 nic->rb_page_offset = 0;
@@ -382,7 +384,8 @@ static void nicvf_free_snd_queue(struct nicvf *nic, struct snd_queue *sq)
382 return; 384 return;
383 385
384 if (sq->tso_hdrs) 386 if (sq->tso_hdrs)
385 dma_free_coherent(&nic->pdev->dev, sq->dmem.q_len, 387 dma_free_coherent(&nic->pdev->dev,
388 sq->dmem.q_len * TSO_HEADER_SIZE,
386 sq->tso_hdrs, sq->tso_hdrs_phys); 389 sq->tso_hdrs, sq->tso_hdrs_phys);
387 390
388 kfree(sq->skbuff); 391 kfree(sq->skbuff);
@@ -863,10 +866,11 @@ void nicvf_sq_free_used_descs(struct net_device *netdev, struct snd_queue *sq,
863 continue; 866 continue;
864 } 867 }
865 skb = (struct sk_buff *)sq->skbuff[sq->head]; 868 skb = (struct sk_buff *)sq->skbuff[sq->head];
869 if (skb)
870 dev_kfree_skb_any(skb);
866 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets); 871 atomic64_add(1, (atomic64_t *)&netdev->stats.tx_packets);
867 atomic64_add(hdr->tot_len, 872 atomic64_add(hdr->tot_len,
868 (atomic64_t *)&netdev->stats.tx_bytes); 873 (atomic64_t *)&netdev->stats.tx_bytes);
869 dev_kfree_skb_any(skb);
870 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1); 874 nicvf_put_sq_desc(sq, hdr->subdesc_cnt + 1);
871 } 875 }
872} 876}
@@ -992,7 +996,7 @@ static inline void nicvf_sq_add_gather_subdesc(struct snd_queue *sq, int qentry,
992 996
993 memset(gather, 0, SND_QUEUE_DESC_SIZE); 997 memset(gather, 0, SND_QUEUE_DESC_SIZE);
994 gather->subdesc_type = SQ_DESC_TYPE_GATHER; 998 gather->subdesc_type = SQ_DESC_TYPE_GATHER;
995 gather->ld_type = NIC_SEND_LD_TYPE_E_LDWB; 999 gather->ld_type = NIC_SEND_LD_TYPE_E_LDD;
996 gather->size = size; 1000 gather->size = size;
997 gather->addr = data; 1001 gather->addr = data;
998} 1002}
@@ -1048,7 +1052,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1048 } 1052 }
1049 nicvf_sq_add_hdr_subdesc(sq, hdr_qentry, 1053 nicvf_sq_add_hdr_subdesc(sq, hdr_qentry,
1050 seg_subdescs - 1, skb, seg_len); 1054 seg_subdescs - 1, skb, seg_len);
1051 sq->skbuff[hdr_qentry] = 0; 1055 sq->skbuff[hdr_qentry] = (u64)NULL;
1052 qentry = nicvf_get_nxt_sqentry(sq, qentry); 1056 qentry = nicvf_get_nxt_sqentry(sq, qentry);
1053 1057
1054 desc_cnt += seg_subdescs; 1058 desc_cnt += seg_subdescs;
@@ -1062,6 +1066,7 @@ static int nicvf_sq_append_tso(struct nicvf *nic, struct snd_queue *sq,
1062 /* Inform HW to xmit all TSO segments */ 1066 /* Inform HW to xmit all TSO segments */
1063 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR, 1067 nicvf_queue_reg_write(nic, NIC_QSET_SQ_0_7_DOOR,
1064 skb_get_queue_mapping(skb), desc_cnt); 1068 skb_get_queue_mapping(skb), desc_cnt);
1069 nic->drv_stats.tx_tso++;
1065 return 1; 1070 return 1;
1066} 1071}
1067 1072
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index 8341bdf755d1..f0937b7bfe9f 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -62,7 +62,7 @@
62#define SND_QUEUE_CNT 8 62#define SND_QUEUE_CNT 8
63#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */ 63#define CMP_QUEUE_CNT 8 /* Max of RCV and SND qcount */
64 64
65#define SND_QSIZE SND_QUEUE_SIZE4 65#define SND_QSIZE SND_QUEUE_SIZE2
66#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10)) 66#define SND_QUEUE_LEN (1ULL << (SND_QSIZE + 10))
67#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10)) 67#define MAX_SND_QUEUE_LEN (1ULL << (SND_QUEUE_SIZE6 + 10))
68#define SND_QUEUE_THRESH 2ULL 68#define SND_QUEUE_THRESH 2ULL
@@ -70,7 +70,10 @@
70/* Since timestamp not enabled, otherwise 2 */ 70/* Since timestamp not enabled, otherwise 2 */
71#define MAX_CQE_PER_PKT_XMIT 1 71#define MAX_CQE_PER_PKT_XMIT 1
72 72
73#define CMP_QSIZE CMP_QUEUE_SIZE4 73/* Keep CQ and SQ sizes same, if timestamping
74 * is enabled this equation will change.
75 */
76#define CMP_QSIZE CMP_QUEUE_SIZE2
74#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10)) 77#define CMP_QUEUE_LEN (1ULL << (CMP_QSIZE + 10))
75#define CMP_QUEUE_CQE_THRESH 0 78#define CMP_QUEUE_CQE_THRESH 0
76#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */ 79#define CMP_QUEUE_TIMER_THRESH 220 /* 10usec */
@@ -87,7 +90,12 @@
87 90
88#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \ 91#define MAX_CQES_FOR_TX ((SND_QUEUE_LEN / MIN_SQ_DESC_PER_PKT_XMIT) * \
89 MAX_CQE_PER_PKT_XMIT) 92 MAX_CQE_PER_PKT_XMIT)
90#define RQ_CQ_DROP ((CMP_QUEUE_LEN - MAX_CQES_FOR_TX) / 256) 93/* Calculate number of CQEs to reserve for all SQEs.
94 * Its 1/256th level of CQ size.
95 * '+ 1' to account for pipelining
96 */
97#define RQ_CQ_DROP ((256 / (CMP_QUEUE_LEN / \
98 (CMP_QUEUE_LEN - MAX_CQES_FOR_TX))) + 1)
91 99
92/* Descriptor size in bytes */ 100/* Descriptor size in bytes */
93#define SND_QUEUE_DESC_SIZE 16 101#define SND_QUEUE_DESC_SIZE 16
diff --git a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
index 633ec05dfe05..b961a89dc626 100644
--- a/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
+++ b/drivers/net/ethernet/cavium/thunder/thunder_bgx.c
@@ -673,7 +673,10 @@ static void bgx_lmac_disable(struct bgx *bgx, u8 lmacid)
673 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg); 673 bgx_reg_write(bgx, lmacid, BGX_CMRX_CFG, cmrx_cfg);
674 bgx_flush_dmac_addrs(bgx, lmacid); 674 bgx_flush_dmac_addrs(bgx, lmacid);
675 675
676 if (lmac->phydev) 676 if ((bgx->lmac_type != BGX_MODE_XFI) &&
677 (bgx->lmac_type != BGX_MODE_XLAUI) &&
678 (bgx->lmac_type != BGX_MODE_40G_KR) &&
679 (bgx->lmac_type != BGX_MODE_10G_KR) && lmac->phydev)
677 phy_disconnect(lmac->phydev); 680 phy_disconnect(lmac->phydev);
678 681
679 lmac->phydev = NULL; 682 lmac->phydev = NULL;
diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
index a11485fbb33f..c3c7db41819d 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
+++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_debugfs.c
@@ -2332,10 +2332,11 @@ int t4_setup_debugfs(struct adapter *adap)
2332 EXT_MEM1_SIZE_G(size)); 2332 EXT_MEM1_SIZE_G(size));
2333 } 2333 }
2334 } else { 2334 } else {
2335 if (i & EXT_MEM_ENABLE_F) 2335 if (i & EXT_MEM_ENABLE_F) {
2336 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A); 2336 size = t4_read_reg(adap, MA_EXT_MEMORY_BAR_A);
2337 add_debugfs_mem(adap, "mc", MEM_MC, 2337 add_debugfs_mem(adap, "mc", MEM_MC,
2338 EXT_MEM_SIZE_G(size)); 2338 EXT_MEM_SIZE_G(size));
2339 }
2339 } 2340 }
2340 2341
2341 de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap, 2342 de = debugfs_create_file_size("flash", S_IRUSR, adap->debugfs_root, adap,
diff --git a/drivers/net/ethernet/emulex/benet/be_cmds.h b/drivers/net/ethernet/emulex/benet/be_cmds.h
index 2716e6f30d9a..00e3a6b6b822 100644
--- a/drivers/net/ethernet/emulex/benet/be_cmds.h
+++ b/drivers/net/ethernet/emulex/benet/be_cmds.h
@@ -620,6 +620,11 @@ enum be_if_flags {
620 BE_IF_FLAGS_VLAN_PROMISCUOUS |\ 620 BE_IF_FLAGS_VLAN_PROMISCUOUS |\
621 BE_IF_FLAGS_MCAST_PROMISCUOUS) 621 BE_IF_FLAGS_MCAST_PROMISCUOUS)
622 622
623#define BE_IF_EN_FLAGS (BE_IF_FLAGS_BROADCAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |\
624 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_UNTAGGED)
625
626#define BE_IF_ALL_FILT_FLAGS (BE_IF_EN_FLAGS | BE_IF_FLAGS_ALL_PROMISCUOUS)
627
623/* An RX interface is an object with one or more MAC addresses and 628/* An RX interface is an object with one or more MAC addresses and
624 * filtering capabilities. */ 629 * filtering capabilities. */
625struct be_cmd_req_if_create { 630struct be_cmd_req_if_create {
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 6f642426308c..c28e3bfdccd7 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -273,6 +273,10 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr)) 273 if (ether_addr_equal(addr->sa_data, netdev->dev_addr))
274 return 0; 274 return 0;
275 275
276 /* if device is not running, copy MAC to netdev->dev_addr */
277 if (!netif_running(netdev))
278 goto done;
279
276 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT 280 /* The PMAC_ADD cmd may fail if the VF doesn't have FILTMGMT
277 * privilege or if PF did not provision the new MAC address. 281 * privilege or if PF did not provision the new MAC address.
278 * On BE3, this cmd will always fail if the VF doesn't have the 282 * On BE3, this cmd will always fail if the VF doesn't have the
@@ -307,9 +311,9 @@ static int be_mac_addr_set(struct net_device *netdev, void *p)
307 status = -EPERM; 311 status = -EPERM;
308 goto err; 312 goto err;
309 } 313 }
310 314done:
311 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 315 ether_addr_copy(netdev->dev_addr, addr->sa_data);
312 dev_info(dev, "MAC address changed to %pM\n", mac); 316 dev_info(dev, "MAC address changed to %pM\n", addr->sa_data);
313 return 0; 317 return 0;
314err: 318err:
315 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data); 319 dev_warn(dev, "MAC address change to %pM failed\n", addr->sa_data);
@@ -2447,10 +2451,24 @@ static void be_eq_clean(struct be_eq_obj *eqo)
2447 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0); 2451 be_eq_notify(eqo->adapter, eqo->q.id, false, true, num, 0);
2448} 2452}
2449 2453
2450static void be_rx_cq_clean(struct be_rx_obj *rxo) 2454/* Free posted rx buffers that were not used */
2455static void be_rxq_clean(struct be_rx_obj *rxo)
2451{ 2456{
2452 struct be_rx_page_info *page_info;
2453 struct be_queue_info *rxq = &rxo->q; 2457 struct be_queue_info *rxq = &rxo->q;
2458 struct be_rx_page_info *page_info;
2459
2460 while (atomic_read(&rxq->used) > 0) {
2461 page_info = get_rx_page_info(rxo);
2462 put_page(page_info->page);
2463 memset(page_info, 0, sizeof(*page_info));
2464 }
2465 BUG_ON(atomic_read(&rxq->used));
2466 rxq->tail = 0;
2467 rxq->head = 0;
2468}
2469
2470static void be_rx_cq_clean(struct be_rx_obj *rxo)
2471{
2454 struct be_queue_info *rx_cq = &rxo->cq; 2472 struct be_queue_info *rx_cq = &rxo->cq;
2455 struct be_rx_compl_info *rxcp; 2473 struct be_rx_compl_info *rxcp;
2456 struct be_adapter *adapter = rxo->adapter; 2474 struct be_adapter *adapter = rxo->adapter;
@@ -2487,16 +2505,6 @@ static void be_rx_cq_clean(struct be_rx_obj *rxo)
2487 2505
2488 /* After cleanup, leave the CQ in unarmed state */ 2506 /* After cleanup, leave the CQ in unarmed state */
2489 be_cq_notify(adapter, rx_cq->id, false, 0); 2507 be_cq_notify(adapter, rx_cq->id, false, 0);
2490
2491 /* Then free posted rx buffers that were not used */
2492 while (atomic_read(&rxq->used) > 0) {
2493 page_info = get_rx_page_info(rxo);
2494 put_page(page_info->page);
2495 memset(page_info, 0, sizeof(*page_info));
2496 }
2497 BUG_ON(atomic_read(&rxq->used));
2498 rxq->tail = 0;
2499 rxq->head = 0;
2500} 2508}
2501 2509
2502static void be_tx_compl_clean(struct be_adapter *adapter) 2510static void be_tx_compl_clean(struct be_adapter *adapter)
@@ -2576,8 +2584,8 @@ static void be_evt_queues_destroy(struct be_adapter *adapter)
2576 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ); 2584 be_cmd_q_destroy(adapter, &eqo->q, QTYPE_EQ);
2577 napi_hash_del(&eqo->napi); 2585 napi_hash_del(&eqo->napi);
2578 netif_napi_del(&eqo->napi); 2586 netif_napi_del(&eqo->napi);
2587 free_cpumask_var(eqo->affinity_mask);
2579 } 2588 }
2580 free_cpumask_var(eqo->affinity_mask);
2581 be_queue_free(adapter, &eqo->q); 2589 be_queue_free(adapter, &eqo->q);
2582 } 2590 }
2583} 2591}
@@ -2594,13 +2602,7 @@ static int be_evt_queues_create(struct be_adapter *adapter)
2594 2602
2595 for_all_evt_queues(adapter, eqo, i) { 2603 for_all_evt_queues(adapter, eqo, i) {
2596 int numa_node = dev_to_node(&adapter->pdev->dev); 2604 int numa_node = dev_to_node(&adapter->pdev->dev);
2597 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL)) 2605
2598 return -ENOMEM;
2599 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2600 eqo->affinity_mask);
2601 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2602 BE_NAPI_WEIGHT);
2603 napi_hash_add(&eqo->napi);
2604 aic = &adapter->aic_obj[i]; 2606 aic = &adapter->aic_obj[i];
2605 eqo->adapter = adapter; 2607 eqo->adapter = adapter;
2606 eqo->idx = i; 2608 eqo->idx = i;
@@ -2616,6 +2618,14 @@ static int be_evt_queues_create(struct be_adapter *adapter)
2616 rc = be_cmd_eq_create(adapter, eqo); 2618 rc = be_cmd_eq_create(adapter, eqo);
2617 if (rc) 2619 if (rc)
2618 return rc; 2620 return rc;
2621
2622 if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
2623 return -ENOMEM;
2624 cpumask_set_cpu(cpumask_local_spread(i, numa_node),
2625 eqo->affinity_mask);
2626 netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
2627 BE_NAPI_WEIGHT);
2628 napi_hash_add(&eqo->napi);
2619 } 2629 }
2620 return 0; 2630 return 0;
2621} 2631}
@@ -3354,13 +3364,54 @@ static void be_rx_qs_destroy(struct be_adapter *adapter)
3354 for_all_rx_queues(adapter, rxo, i) { 3364 for_all_rx_queues(adapter, rxo, i) {
3355 q = &rxo->q; 3365 q = &rxo->q;
3356 if (q->created) { 3366 if (q->created) {
3367 /* If RXQs are destroyed while in an "out of buffer"
3368 * state, there is a possibility of an HW stall on
3369 * Lancer. So, post 64 buffers to each queue to relieve
3370 * the "out of buffer" condition.
3371 * Make sure there's space in the RXQ before posting.
3372 */
3373 if (lancer_chip(adapter)) {
3374 be_rx_cq_clean(rxo);
3375 if (atomic_read(&q->used) == 0)
3376 be_post_rx_frags(rxo, GFP_KERNEL,
3377 MAX_RX_POST);
3378 }
3379
3357 be_cmd_rxq_destroy(adapter, q); 3380 be_cmd_rxq_destroy(adapter, q);
3358 be_rx_cq_clean(rxo); 3381 be_rx_cq_clean(rxo);
3382 be_rxq_clean(rxo);
3359 } 3383 }
3360 be_queue_free(adapter, q); 3384 be_queue_free(adapter, q);
3361 } 3385 }
3362} 3386}
3363 3387
3388static void be_disable_if_filters(struct be_adapter *adapter)
3389{
3390 be_cmd_pmac_del(adapter, adapter->if_handle,
3391 adapter->pmac_id[0], 0);
3392
3393 be_clear_uc_list(adapter);
3394
3395 /* The IFACE flags are enabled in the open path and cleared
3396 * in the close path. When a VF gets detached from the host and
3397 * assigned to a VM the following happens:
3398 * - VF's IFACE flags get cleared in the detach path
3399 * - IFACE create is issued by the VF in the attach path
3400 * Due to a bug in the BE3/Skyhawk-R FW
3401 * (Lancer FW doesn't have the bug), the IFACE capability flags
3402 * specified along with the IFACE create cmd issued by a VF are not
3403 * honoured by FW. As a consequence, if a *new* driver
3404 * (that enables/disables IFACE flags in open/close)
3405 * is loaded in the host and an *old* driver is * used by a VM/VF,
3406 * the IFACE gets created *without* the needed flags.
3407 * To avoid this, disable RX-filter flags only for Lancer.
3408 */
3409 if (lancer_chip(adapter)) {
3410 be_cmd_rx_filter(adapter, BE_IF_ALL_FILT_FLAGS, OFF);
3411 adapter->if_flags &= ~BE_IF_ALL_FILT_FLAGS;
3412 }
3413}
3414
3364static int be_close(struct net_device *netdev) 3415static int be_close(struct net_device *netdev)
3365{ 3416{
3366 struct be_adapter *adapter = netdev_priv(netdev); 3417 struct be_adapter *adapter = netdev_priv(netdev);
@@ -3373,6 +3424,8 @@ static int be_close(struct net_device *netdev)
3373 if (!(adapter->flags & BE_FLAGS_SETUP_DONE)) 3424 if (!(adapter->flags & BE_FLAGS_SETUP_DONE))
3374 return 0; 3425 return 0;
3375 3426
3427 be_disable_if_filters(adapter);
3428
3376 be_roce_dev_close(adapter); 3429 be_roce_dev_close(adapter);
3377 3430
3378 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) { 3431 if (adapter->flags & BE_FLAGS_NAPI_ENABLED) {
@@ -3392,7 +3445,6 @@ static int be_close(struct net_device *netdev)
3392 be_tx_compl_clean(adapter); 3445 be_tx_compl_clean(adapter);
3393 3446
3394 be_rx_qs_destroy(adapter); 3447 be_rx_qs_destroy(adapter);
3395 be_clear_uc_list(adapter);
3396 3448
3397 for_all_evt_queues(adapter, eqo, i) { 3449 for_all_evt_queues(adapter, eqo, i) {
3398 if (msix_enabled(adapter)) 3450 if (msix_enabled(adapter))
@@ -3477,6 +3529,31 @@ static int be_rx_qs_create(struct be_adapter *adapter)
3477 return 0; 3529 return 0;
3478} 3530}
3479 3531
3532static int be_enable_if_filters(struct be_adapter *adapter)
3533{
3534 int status;
3535
3536 status = be_cmd_rx_filter(adapter, BE_IF_EN_FLAGS, ON);
3537 if (status)
3538 return status;
3539
3540 /* For BE3 VFs, the PF programs the initial MAC address */
3541 if (!(BEx_chip(adapter) && be_virtfn(adapter))) {
3542 status = be_cmd_pmac_add(adapter, adapter->netdev->dev_addr,
3543 adapter->if_handle,
3544 &adapter->pmac_id[0], 0);
3545 if (status)
3546 return status;
3547 }
3548
3549 if (adapter->vlans_added)
3550 be_vid_config(adapter);
3551
3552 be_set_rx_mode(adapter->netdev);
3553
3554 return 0;
3555}
3556
3480static int be_open(struct net_device *netdev) 3557static int be_open(struct net_device *netdev)
3481{ 3558{
3482 struct be_adapter *adapter = netdev_priv(netdev); 3559 struct be_adapter *adapter = netdev_priv(netdev);
@@ -3490,6 +3567,10 @@ static int be_open(struct net_device *netdev)
3490 if (status) 3567 if (status)
3491 goto err; 3568 goto err;
3492 3569
3570 status = be_enable_if_filters(adapter);
3571 if (status)
3572 goto err;
3573
3493 status = be_irq_register(adapter); 3574 status = be_irq_register(adapter);
3494 if (status) 3575 if (status)
3495 goto err; 3576 goto err;
@@ -3686,16 +3767,6 @@ static void be_cancel_err_detection(struct be_adapter *adapter)
3686 } 3767 }
3687} 3768}
3688 3769
3689static void be_mac_clear(struct be_adapter *adapter)
3690{
3691 if (adapter->pmac_id) {
3692 be_cmd_pmac_del(adapter, adapter->if_handle,
3693 adapter->pmac_id[0], 0);
3694 kfree(adapter->pmac_id);
3695 adapter->pmac_id = NULL;
3696 }
3697}
3698
3699#ifdef CONFIG_BE2NET_VXLAN 3770#ifdef CONFIG_BE2NET_VXLAN
3700static void be_disable_vxlan_offloads(struct be_adapter *adapter) 3771static void be_disable_vxlan_offloads(struct be_adapter *adapter)
3701{ 3772{
@@ -3770,8 +3841,8 @@ static int be_clear(struct be_adapter *adapter)
3770#ifdef CONFIG_BE2NET_VXLAN 3841#ifdef CONFIG_BE2NET_VXLAN
3771 be_disable_vxlan_offloads(adapter); 3842 be_disable_vxlan_offloads(adapter);
3772#endif 3843#endif
3773 /* delete the primary mac along with the uc-mac list */ 3844 kfree(adapter->pmac_id);
3774 be_mac_clear(adapter); 3845 adapter->pmac_id = NULL;
3775 3846
3776 be_cmd_if_destroy(adapter, adapter->if_handle, 0); 3847 be_cmd_if_destroy(adapter, adapter->if_handle, 0);
3777 3848
@@ -3782,25 +3853,11 @@ static int be_clear(struct be_adapter *adapter)
3782 return 0; 3853 return 0;
3783} 3854}
3784 3855
3785static int be_if_create(struct be_adapter *adapter, u32 *if_handle,
3786 u32 cap_flags, u32 vf)
3787{
3788 u32 en_flags;
3789
3790 en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
3791 BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS |
3792 BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
3793
3794 en_flags &= cap_flags;
3795
3796 return be_cmd_if_create(adapter, cap_flags, en_flags, if_handle, vf);
3797}
3798
3799static int be_vfs_if_create(struct be_adapter *adapter) 3856static int be_vfs_if_create(struct be_adapter *adapter)
3800{ 3857{
3801 struct be_resources res = {0}; 3858 struct be_resources res = {0};
3859 u32 cap_flags, en_flags, vf;
3802 struct be_vf_cfg *vf_cfg; 3860 struct be_vf_cfg *vf_cfg;
3803 u32 cap_flags, vf;
3804 int status; 3861 int status;
3805 3862
3806 /* If a FW profile exists, then cap_flags are updated */ 3863 /* If a FW profile exists, then cap_flags are updated */
@@ -3821,8 +3878,12 @@ static int be_vfs_if_create(struct be_adapter *adapter)
3821 } 3878 }
3822 } 3879 }
3823 3880
3824 status = be_if_create(adapter, &vf_cfg->if_handle, 3881 en_flags = cap_flags & (BE_IF_FLAGS_UNTAGGED |
3825 cap_flags, vf + 1); 3882 BE_IF_FLAGS_BROADCAST |
3883 BE_IF_FLAGS_MULTICAST |
3884 BE_IF_FLAGS_PASS_L3L4_ERRORS);
3885 status = be_cmd_if_create(adapter, cap_flags, en_flags,
3886 &vf_cfg->if_handle, vf + 1);
3826 if (status) 3887 if (status)
3827 return status; 3888 return status;
3828 } 3889 }
@@ -4194,15 +4255,8 @@ static int be_mac_setup(struct be_adapter *adapter)
4194 4255
4195 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN); 4256 memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
4196 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN); 4257 memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
4197 } else {
4198 /* Maybe the HW was reset; dev_addr must be re-programmed */
4199 memcpy(mac, adapter->netdev->dev_addr, ETH_ALEN);
4200 } 4258 }
4201 4259
4202 /* For BE3-R VFs, the PF programs the initial MAC address */
4203 if (!(BEx_chip(adapter) && be_virtfn(adapter)))
4204 be_cmd_pmac_add(adapter, mac, adapter->if_handle,
4205 &adapter->pmac_id[0], 0);
4206 return 0; 4260 return 0;
4207} 4261}
4208 4262
@@ -4342,6 +4396,7 @@ static int be_func_init(struct be_adapter *adapter)
4342static int be_setup(struct be_adapter *adapter) 4396static int be_setup(struct be_adapter *adapter)
4343{ 4397{
4344 struct device *dev = &adapter->pdev->dev; 4398 struct device *dev = &adapter->pdev->dev;
4399 u32 en_flags;
4345 int status; 4400 int status;
4346 4401
4347 status = be_func_init(adapter); 4402 status = be_func_init(adapter);
@@ -4364,8 +4419,11 @@ static int be_setup(struct be_adapter *adapter)
4364 if (status) 4419 if (status)
4365 goto err; 4420 goto err;
4366 4421
4367 status = be_if_create(adapter, &adapter->if_handle, 4422 /* will enable all the needed filter flags in be_open() */
4368 be_if_cap_flags(adapter), 0); 4423 en_flags = BE_IF_FLAGS_RSS | BE_IF_FLAGS_DEFQ_RSS;
4424 en_flags = en_flags & be_if_cap_flags(adapter);
4425 status = be_cmd_if_create(adapter, be_if_cap_flags(adapter), en_flags,
4426 &adapter->if_handle, 0);
4369 if (status) 4427 if (status)
4370 goto err; 4428 goto err;
4371 4429
@@ -4391,11 +4449,6 @@ static int be_setup(struct be_adapter *adapter)
4391 dev_err(dev, "Please upgrade firmware to version >= 4.0\n"); 4449 dev_err(dev, "Please upgrade firmware to version >= 4.0\n");
4392 } 4450 }
4393 4451
4394 if (adapter->vlans_added)
4395 be_vid_config(adapter);
4396
4397 be_set_rx_mode(adapter->netdev);
4398
4399 status = be_cmd_set_flow_control(adapter, adapter->tx_fc, 4452 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
4400 adapter->rx_fc); 4453 adapter->rx_fc);
4401 if (status) 4454 if (status)
diff --git a/drivers/net/ethernet/freescale/fec.h b/drivers/net/ethernet/freescale/fec.h
index 1eee73cccdf5..99d33e2d35e6 100644
--- a/drivers/net/ethernet/freescale/fec.h
+++ b/drivers/net/ethernet/freescale/fec.h
@@ -562,6 +562,7 @@ struct fec_enet_private {
562}; 562};
563 563
564void fec_ptp_init(struct platform_device *pdev); 564void fec_ptp_init(struct platform_device *pdev);
565void fec_ptp_stop(struct platform_device *pdev);
565void fec_ptp_start_cyclecounter(struct net_device *ndev); 566void fec_ptp_start_cyclecounter(struct net_device *ndev);
566int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr); 567int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
567int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr); 568int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
diff --git a/drivers/net/ethernet/freescale/fec_main.c b/drivers/net/ethernet/freescale/fec_main.c
index 42e20e5385ac..271bb5862346 100644
--- a/drivers/net/ethernet/freescale/fec_main.c
+++ b/drivers/net/ethernet/freescale/fec_main.c
@@ -3142,8 +3142,8 @@ static int fec_enet_init(struct net_device *ndev)
3142 fep->bufdesc_size; 3142 fep->bufdesc_size;
3143 3143
3144 /* Allocate memory for buffer descriptors. */ 3144 /* Allocate memory for buffer descriptors. */
3145 cbd_base = dma_alloc_coherent(NULL, bd_size, &bd_dma, 3145 cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
3146 GFP_KERNEL); 3146 GFP_KERNEL);
3147 if (!cbd_base) { 3147 if (!cbd_base) {
3148 return -ENOMEM; 3148 return -ENOMEM;
3149 } 3149 }
@@ -3431,6 +3431,12 @@ fec_probe(struct platform_device *pdev)
3431 fep->reg_phy = NULL; 3431 fep->reg_phy = NULL;
3432 } 3432 }
3433 3433
3434 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
3435 pm_runtime_use_autosuspend(&pdev->dev);
3436 pm_runtime_get_noresume(&pdev->dev);
3437 pm_runtime_set_active(&pdev->dev);
3438 pm_runtime_enable(&pdev->dev);
3439
3434 fec_reset_phy(pdev); 3440 fec_reset_phy(pdev);
3435 3441
3436 if (fep->bufdesc_ex) 3442 if (fep->bufdesc_ex)
@@ -3465,8 +3471,6 @@ fec_probe(struct platform_device *pdev)
3465 netif_carrier_off(ndev); 3471 netif_carrier_off(ndev);
3466 fec_enet_clk_enable(ndev, false); 3472 fec_enet_clk_enable(ndev, false);
3467 pinctrl_pm_select_sleep_state(&pdev->dev); 3473 pinctrl_pm_select_sleep_state(&pdev->dev);
3468 pm_runtime_set_active(&pdev->dev);
3469 pm_runtime_enable(&pdev->dev);
3470 3474
3471 ret = register_netdev(ndev); 3475 ret = register_netdev(ndev);
3472 if (ret) 3476 if (ret)
@@ -3481,8 +3485,6 @@ fec_probe(struct platform_device *pdev)
3481 fep->rx_copybreak = COPYBREAK_DEFAULT; 3485 fep->rx_copybreak = COPYBREAK_DEFAULT;
3482 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work); 3486 INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
3483 3487
3484 pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
3485 pm_runtime_use_autosuspend(&pdev->dev);
3486 pm_runtime_mark_last_busy(&pdev->dev); 3488 pm_runtime_mark_last_busy(&pdev->dev);
3487 pm_runtime_put_autosuspend(&pdev->dev); 3489 pm_runtime_put_autosuspend(&pdev->dev);
3488 3490
@@ -3493,6 +3495,7 @@ failed_register:
3493failed_mii_init: 3495failed_mii_init:
3494failed_irq: 3496failed_irq:
3495failed_init: 3497failed_init:
3498 fec_ptp_stop(pdev);
3496 if (fep->reg_phy) 3499 if (fep->reg_phy)
3497 regulator_disable(fep->reg_phy); 3500 regulator_disable(fep->reg_phy);
3498failed_regulator: 3501failed_regulator:
@@ -3514,14 +3517,12 @@ fec_drv_remove(struct platform_device *pdev)
3514 struct net_device *ndev = platform_get_drvdata(pdev); 3517 struct net_device *ndev = platform_get_drvdata(pdev);
3515 struct fec_enet_private *fep = netdev_priv(ndev); 3518 struct fec_enet_private *fep = netdev_priv(ndev);
3516 3519
3517 cancel_delayed_work_sync(&fep->time_keep);
3518 cancel_work_sync(&fep->tx_timeout_work); 3520 cancel_work_sync(&fep->tx_timeout_work);
3521 fec_ptp_stop(pdev);
3519 unregister_netdev(ndev); 3522 unregister_netdev(ndev);
3520 fec_enet_mii_remove(fep); 3523 fec_enet_mii_remove(fep);
3521 if (fep->reg_phy) 3524 if (fep->reg_phy)
3522 regulator_disable(fep->reg_phy); 3525 regulator_disable(fep->reg_phy);
3523 if (fep->ptp_clock)
3524 ptp_clock_unregister(fep->ptp_clock);
3525 of_node_put(fep->phy_node); 3526 of_node_put(fep->phy_node);
3526 free_netdev(ndev); 3527 free_netdev(ndev);
3527 3528
diff --git a/drivers/net/ethernet/freescale/fec_ptp.c b/drivers/net/ethernet/freescale/fec_ptp.c
index a15663ad7f5e..f457a23d0bfb 100644
--- a/drivers/net/ethernet/freescale/fec_ptp.c
+++ b/drivers/net/ethernet/freescale/fec_ptp.c
@@ -604,6 +604,16 @@ void fec_ptp_init(struct platform_device *pdev)
604 schedule_delayed_work(&fep->time_keep, HZ); 604 schedule_delayed_work(&fep->time_keep, HZ);
605} 605}
606 606
607void fec_ptp_stop(struct platform_device *pdev)
608{
609 struct net_device *ndev = platform_get_drvdata(pdev);
610 struct fec_enet_private *fep = netdev_priv(ndev);
611
612 cancel_delayed_work_sync(&fep->time_keep);
613 if (fep->ptp_clock)
614 ptp_clock_unregister(fep->ptp_clock);
615}
616
607/** 617/**
608 * fec_ptp_check_pps_event 618 * fec_ptp_check_pps_event
609 * @fep: the fec_enet_private structure handle 619 * @fep: the fec_enet_private structure handle
diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
index 56316db6c5a6..cf8e54652df9 100644
--- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c
@@ -586,7 +586,8 @@ static int fs_enet_start_xmit(struct sk_buff *skb, struct net_device *dev)
586 frag = skb_shinfo(skb)->frags; 586 frag = skb_shinfo(skb)->frags;
587 while (nr_frags) { 587 while (nr_frags) {
588 CBDC_SC(bdp, 588 CBDC_SC(bdp,
589 BD_ENET_TX_STATS | BD_ENET_TX_LAST | BD_ENET_TX_TC); 589 BD_ENET_TX_STATS | BD_ENET_TX_INTR | BD_ENET_TX_LAST |
590 BD_ENET_TX_TC);
590 CBDS_SC(bdp, BD_ENET_TX_READY); 591 CBDS_SC(bdp, BD_ENET_TX_READY);
591 592
592 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0) 593 if ((CBDR_SC(bdp) & BD_ENET_TX_WRAP) == 0)
diff --git a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
index b34214e2df5f..016743e355de 100644
--- a/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
+++ b/drivers/net/ethernet/freescale/fs_enet/mac-fec.c
@@ -110,7 +110,7 @@ static int do_pd_setup(struct fs_enet_private *fep)
110} 110}
111 111
112#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB) 112#define FEC_NAPI_RX_EVENT_MSK (FEC_ENET_RXF | FEC_ENET_RXB)
113#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF | FEC_ENET_TXB) 113#define FEC_NAPI_TX_EVENT_MSK (FEC_ENET_TXF)
114#define FEC_RX_EVENT (FEC_ENET_RXF) 114#define FEC_RX_EVENT (FEC_ENET_RXF)
115#define FEC_TX_EVENT (FEC_ENET_TXF) 115#define FEC_TX_EVENT (FEC_ENET_TXF)
116#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \ 116#define FEC_ERR_EVENT_MSK (FEC_ENET_HBERR | FEC_ENET_BABR | \
diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c
index ff875028fdff..2b7610f341b0 100644
--- a/drivers/net/ethernet/freescale/gianfar.c
+++ b/drivers/net/ethernet/freescale/gianfar.c
@@ -565,22 +565,6 @@ static void gfar_ints_enable(struct gfar_private *priv)
565 } 565 }
566} 566}
567 567
568static void lock_tx_qs(struct gfar_private *priv)
569{
570 int i;
571
572 for (i = 0; i < priv->num_tx_queues; i++)
573 spin_lock(&priv->tx_queue[i]->txlock);
574}
575
576static void unlock_tx_qs(struct gfar_private *priv)
577{
578 int i;
579
580 for (i = 0; i < priv->num_tx_queues; i++)
581 spin_unlock(&priv->tx_queue[i]->txlock);
582}
583
584static int gfar_alloc_tx_queues(struct gfar_private *priv) 568static int gfar_alloc_tx_queues(struct gfar_private *priv)
585{ 569{
586 int i; 570 int i;
@@ -1376,7 +1360,6 @@ static int gfar_probe(struct platform_device *ofdev)
1376 priv->dev = &ofdev->dev; 1360 priv->dev = &ofdev->dev;
1377 SET_NETDEV_DEV(dev, &ofdev->dev); 1361 SET_NETDEV_DEV(dev, &ofdev->dev);
1378 1362
1379 spin_lock_init(&priv->bflock);
1380 INIT_WORK(&priv->reset_task, gfar_reset_task); 1363 INIT_WORK(&priv->reset_task, gfar_reset_task);
1381 1364
1382 platform_set_drvdata(ofdev, priv); 1365 platform_set_drvdata(ofdev, priv);
@@ -1470,9 +1453,8 @@ static int gfar_probe(struct platform_device *ofdev)
1470 goto register_fail; 1453 goto register_fail;
1471 } 1454 }
1472 1455
1473 device_init_wakeup(&dev->dev, 1456 device_set_wakeup_capable(&dev->dev, priv->device_flags &
1474 priv->device_flags & 1457 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1475 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1476 1458
1477 /* fill out IRQ number and name fields */ 1459 /* fill out IRQ number and name fields */
1478 for (i = 0; i < priv->num_grps; i++) { 1460 for (i = 0; i < priv->num_grps; i++) {
@@ -1540,48 +1522,37 @@ static int gfar_suspend(struct device *dev)
1540 struct gfar_private *priv = dev_get_drvdata(dev); 1522 struct gfar_private *priv = dev_get_drvdata(dev);
1541 struct net_device *ndev = priv->ndev; 1523 struct net_device *ndev = priv->ndev;
1542 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1524 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1543 unsigned long flags;
1544 u32 tempval; 1525 u32 tempval;
1545
1546 int magic_packet = priv->wol_en && 1526 int magic_packet = priv->wol_en &&
1547 (priv->device_flags & 1527 (priv->device_flags &
1548 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1528 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1549 1529
1530 if (!netif_running(ndev))
1531 return 0;
1532
1533 disable_napi(priv);
1534 netif_tx_lock(ndev);
1550 netif_device_detach(ndev); 1535 netif_device_detach(ndev);
1536 netif_tx_unlock(ndev);
1551 1537
1552 if (netif_running(ndev)) { 1538 gfar_halt(priv);
1553 1539
1554 local_irq_save(flags); 1540 if (magic_packet) {
1555 lock_tx_qs(priv); 1541 /* Enable interrupt on Magic Packet */
1542 gfar_write(&regs->imask, IMASK_MAG);
1556 1543
1557 gfar_halt_nodisable(priv); 1544 /* Enable Magic Packet mode */
1545 tempval = gfar_read(&regs->maccfg2);
1546 tempval |= MACCFG2_MPEN;
1547 gfar_write(&regs->maccfg2, tempval);
1558 1548
1559 /* Disable Tx, and Rx if wake-on-LAN is disabled. */ 1549 /* re-enable the Rx block */
1560 tempval = gfar_read(&regs->maccfg1); 1550 tempval = gfar_read(&regs->maccfg1);
1561 1551 tempval |= MACCFG1_RX_EN;
1562 tempval &= ~MACCFG1_TX_EN;
1563
1564 if (!magic_packet)
1565 tempval &= ~MACCFG1_RX_EN;
1566
1567 gfar_write(&regs->maccfg1, tempval); 1552 gfar_write(&regs->maccfg1, tempval);
1568 1553
1569 unlock_tx_qs(priv); 1554 } else {
1570 local_irq_restore(flags); 1555 phy_stop(priv->phydev);
1571
1572 disable_napi(priv);
1573
1574 if (magic_packet) {
1575 /* Enable interrupt on Magic Packet */
1576 gfar_write(&regs->imask, IMASK_MAG);
1577
1578 /* Enable Magic Packet mode */
1579 tempval = gfar_read(&regs->maccfg2);
1580 tempval |= MACCFG2_MPEN;
1581 gfar_write(&regs->maccfg2, tempval);
1582 } else {
1583 phy_stop(priv->phydev);
1584 }
1585 } 1556 }
1586 1557
1587 return 0; 1558 return 0;
@@ -1592,37 +1563,26 @@ static int gfar_resume(struct device *dev)
1592 struct gfar_private *priv = dev_get_drvdata(dev); 1563 struct gfar_private *priv = dev_get_drvdata(dev);
1593 struct net_device *ndev = priv->ndev; 1564 struct net_device *ndev = priv->ndev;
1594 struct gfar __iomem *regs = priv->gfargrp[0].regs; 1565 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1595 unsigned long flags;
1596 u32 tempval; 1566 u32 tempval;
1597 int magic_packet = priv->wol_en && 1567 int magic_packet = priv->wol_en &&
1598 (priv->device_flags & 1568 (priv->device_flags &
1599 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1569 FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
1600 1570
1601 if (!netif_running(ndev)) { 1571 if (!netif_running(ndev))
1602 netif_device_attach(ndev);
1603 return 0; 1572 return 0;
1604 }
1605 1573
1606 if (!magic_packet && priv->phydev) 1574 if (magic_packet) {
1575 /* Disable Magic Packet mode */
1576 tempval = gfar_read(&regs->maccfg2);
1577 tempval &= ~MACCFG2_MPEN;
1578 gfar_write(&regs->maccfg2, tempval);
1579 } else {
1607 phy_start(priv->phydev); 1580 phy_start(priv->phydev);
1608 1581 }
1609 /* Disable Magic Packet mode, in case something
1610 * else woke us up.
1611 */
1612 local_irq_save(flags);
1613 lock_tx_qs(priv);
1614
1615 tempval = gfar_read(&regs->maccfg2);
1616 tempval &= ~MACCFG2_MPEN;
1617 gfar_write(&regs->maccfg2, tempval);
1618 1582
1619 gfar_start(priv); 1583 gfar_start(priv);
1620 1584
1621 unlock_tx_qs(priv);
1622 local_irq_restore(flags);
1623
1624 netif_device_attach(ndev); 1585 netif_device_attach(ndev);
1625
1626 enable_napi(priv); 1586 enable_napi(priv);
1627 1587
1628 return 0; 1588 return 0;
@@ -2045,7 +2005,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
2045 /* Install our interrupt handlers for Error, 2005 /* Install our interrupt handlers for Error,
2046 * Transmit, and Receive 2006 * Transmit, and Receive
2047 */ 2007 */
2048 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error, 0, 2008 err = request_irq(gfar_irq(grp, ER)->irq, gfar_error,
2009 IRQF_NO_SUSPEND,
2049 gfar_irq(grp, ER)->name, grp); 2010 gfar_irq(grp, ER)->name, grp);
2050 if (err < 0) { 2011 if (err < 0) {
2051 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2012 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -2068,7 +2029,8 @@ static int register_grp_irqs(struct gfar_priv_grp *grp)
2068 goto rx_irq_fail; 2029 goto rx_irq_fail;
2069 } 2030 }
2070 } else { 2031 } else {
2071 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt, 0, 2032 err = request_irq(gfar_irq(grp, TX)->irq, gfar_interrupt,
2033 IRQF_NO_SUSPEND,
2072 gfar_irq(grp, TX)->name, grp); 2034 gfar_irq(grp, TX)->name, grp);
2073 if (err < 0) { 2035 if (err < 0) {
2074 netif_err(priv, intr, dev, "Can't get IRQ %d\n", 2036 netif_err(priv, intr, dev, "Can't get IRQ %d\n",
@@ -2169,8 +2131,6 @@ static int gfar_enet_open(struct net_device *dev)
2169 if (err) 2131 if (err)
2170 return err; 2132 return err;
2171 2133
2172 device_set_wakeup_enable(&dev->dev, priv->wol_en);
2173
2174 return err; 2134 return err;
2175} 2135}
2176 2136
diff --git a/drivers/net/ethernet/freescale/gianfar.h b/drivers/net/ethernet/freescale/gianfar.h
index daa1d37de642..5545e4103368 100644
--- a/drivers/net/ethernet/freescale/gianfar.h
+++ b/drivers/net/ethernet/freescale/gianfar.h
@@ -1145,9 +1145,6 @@ struct gfar_private {
1145 int oldduplex; 1145 int oldduplex;
1146 int oldlink; 1146 int oldlink;
1147 1147
1148 /* Bitfield update lock */
1149 spinlock_t bflock;
1150
1151 uint32_t msg_enable; 1148 uint32_t msg_enable;
1152 1149
1153 struct work_struct reset_task; 1150 struct work_struct reset_task;
diff --git a/drivers/net/ethernet/freescale/gianfar_ethtool.c b/drivers/net/ethernet/freescale/gianfar_ethtool.c
index fda12fb32ec7..5b90fcf96265 100644
--- a/drivers/net/ethernet/freescale/gianfar_ethtool.c
+++ b/drivers/net/ethernet/freescale/gianfar_ethtool.c
@@ -653,7 +653,6 @@ static void gfar_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
653static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol) 653static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
654{ 654{
655 struct gfar_private *priv = netdev_priv(dev); 655 struct gfar_private *priv = netdev_priv(dev);
656 unsigned long flags;
657 656
658 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 657 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
659 wol->wolopts != 0) 658 wol->wolopts != 0)
@@ -664,9 +663,7 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
664 663
665 device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC); 664 device_set_wakeup_enable(&dev->dev, wol->wolopts & WAKE_MAGIC);
666 665
667 spin_lock_irqsave(&priv->bflock, flags); 666 priv->wol_en = !!device_may_wakeup(&dev->dev);
668 priv->wol_en = !!device_may_wakeup(&dev->dev);
669 spin_unlock_irqrestore(&priv->bflock, flags);
670 667
671 return 0; 668 return 0;
672} 669}
@@ -903,27 +900,6 @@ static int gfar_check_filer_hardware(struct gfar_private *priv)
903 return 0; 900 return 0;
904} 901}
905 902
906static int gfar_comp_asc(const void *a, const void *b)
907{
908 return memcmp(a, b, 4);
909}
910
911static int gfar_comp_desc(const void *a, const void *b)
912{
913 return -memcmp(a, b, 4);
914}
915
916static void gfar_swap(void *a, void *b, int size)
917{
918 u32 *_a = a;
919 u32 *_b = b;
920
921 swap(_a[0], _b[0]);
922 swap(_a[1], _b[1]);
923 swap(_a[2], _b[2]);
924 swap(_a[3], _b[3]);
925}
926
927/* Write a mask to filer cache */ 903/* Write a mask to filer cache */
928static void gfar_set_mask(u32 mask, struct filer_table *tab) 904static void gfar_set_mask(u32 mask, struct filer_table *tab)
929{ 905{
@@ -1273,310 +1249,6 @@ static int gfar_convert_to_filer(struct ethtool_rx_flow_spec *rule,
1273 return 0; 1249 return 0;
1274} 1250}
1275 1251
1276/* Copy size filer entries */
1277static void gfar_copy_filer_entries(struct gfar_filer_entry dst[0],
1278 struct gfar_filer_entry src[0], s32 size)
1279{
1280 while (size > 0) {
1281 size--;
1282 dst[size].ctrl = src[size].ctrl;
1283 dst[size].prop = src[size].prop;
1284 }
1285}
1286
1287/* Delete the contents of the filer-table between start and end
1288 * and collapse them
1289 */
1290static int gfar_trim_filer_entries(u32 begin, u32 end, struct filer_table *tab)
1291{
1292 int length;
1293
1294 if (end > MAX_FILER_CACHE_IDX || end < begin)
1295 return -EINVAL;
1296
1297 end++;
1298 length = end - begin;
1299
1300 /* Copy */
1301 while (end < tab->index) {
1302 tab->fe[begin].ctrl = tab->fe[end].ctrl;
1303 tab->fe[begin++].prop = tab->fe[end++].prop;
1304
1305 }
1306 /* Fill up with don't cares */
1307 while (begin < tab->index) {
1308 tab->fe[begin].ctrl = 0x60;
1309 tab->fe[begin].prop = 0xFFFFFFFF;
1310 begin++;
1311 }
1312
1313 tab->index -= length;
1314 return 0;
1315}
1316
1317/* Make space on the wanted location */
1318static int gfar_expand_filer_entries(u32 begin, u32 length,
1319 struct filer_table *tab)
1320{
1321 if (length == 0 || length + tab->index > MAX_FILER_CACHE_IDX ||
1322 begin > MAX_FILER_CACHE_IDX)
1323 return -EINVAL;
1324
1325 gfar_copy_filer_entries(&(tab->fe[begin + length]), &(tab->fe[begin]),
1326 tab->index - length + 1);
1327
1328 tab->index += length;
1329 return 0;
1330}
1331
1332static int gfar_get_next_cluster_start(int start, struct filer_table *tab)
1333{
1334 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1335 start++) {
1336 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1337 (RQFCR_AND | RQFCR_CLE))
1338 return start;
1339 }
1340 return -1;
1341}
1342
1343static int gfar_get_next_cluster_end(int start, struct filer_table *tab)
1344{
1345 for (; (start < tab->index) && (start < MAX_FILER_CACHE_IDX - 1);
1346 start++) {
1347 if ((tab->fe[start].ctrl & (RQFCR_AND | RQFCR_CLE)) ==
1348 (RQFCR_CLE))
1349 return start;
1350 }
1351 return -1;
1352}
1353
1354/* Uses hardwares clustering option to reduce
1355 * the number of filer table entries
1356 */
1357static void gfar_cluster_filer(struct filer_table *tab)
1358{
1359 s32 i = -1, j, iend, jend;
1360
1361 while ((i = gfar_get_next_cluster_start(++i, tab)) != -1) {
1362 j = i;
1363 while ((j = gfar_get_next_cluster_start(++j, tab)) != -1) {
1364 /* The cluster entries self and the previous one
1365 * (a mask) must be identical!
1366 */
1367 if (tab->fe[i].ctrl != tab->fe[j].ctrl)
1368 break;
1369 if (tab->fe[i].prop != tab->fe[j].prop)
1370 break;
1371 if (tab->fe[i - 1].ctrl != tab->fe[j - 1].ctrl)
1372 break;
1373 if (tab->fe[i - 1].prop != tab->fe[j - 1].prop)
1374 break;
1375 iend = gfar_get_next_cluster_end(i, tab);
1376 jend = gfar_get_next_cluster_end(j, tab);
1377 if (jend == -1 || iend == -1)
1378 break;
1379
1380 /* First we make some free space, where our cluster
1381 * element should be. Then we copy it there and finally
1382 * delete in from its old location.
1383 */
1384 if (gfar_expand_filer_entries(iend, (jend - j), tab) ==
1385 -EINVAL)
1386 break;
1387
1388 gfar_copy_filer_entries(&(tab->fe[iend + 1]),
1389 &(tab->fe[jend + 1]), jend - j);
1390
1391 if (gfar_trim_filer_entries(jend - 1,
1392 jend + (jend - j),
1393 tab) == -EINVAL)
1394 return;
1395
1396 /* Mask out cluster bit */
1397 tab->fe[iend].ctrl &= ~(RQFCR_CLE);
1398 }
1399 }
1400}
1401
1402/* Swaps the masked bits of a1<>a2 and b1<>b2 */
1403static void gfar_swap_bits(struct gfar_filer_entry *a1,
1404 struct gfar_filer_entry *a2,
1405 struct gfar_filer_entry *b1,
1406 struct gfar_filer_entry *b2, u32 mask)
1407{
1408 u32 temp[4];
1409 temp[0] = a1->ctrl & mask;
1410 temp[1] = a2->ctrl & mask;
1411 temp[2] = b1->ctrl & mask;
1412 temp[3] = b2->ctrl & mask;
1413
1414 a1->ctrl &= ~mask;
1415 a2->ctrl &= ~mask;
1416 b1->ctrl &= ~mask;
1417 b2->ctrl &= ~mask;
1418
1419 a1->ctrl |= temp[1];
1420 a2->ctrl |= temp[0];
1421 b1->ctrl |= temp[3];
1422 b2->ctrl |= temp[2];
1423}
1424
1425/* Generate a list consisting of masks values with their start and
1426 * end of validity and block as indicator for parts belonging
1427 * together (glued by ANDs) in mask_table
1428 */
1429static u32 gfar_generate_mask_table(struct gfar_mask_entry *mask_table,
1430 struct filer_table *tab)
1431{
1432 u32 i, and_index = 0, block_index = 1;
1433
1434 for (i = 0; i < tab->index; i++) {
1435
1436 /* LSByte of control = 0 sets a mask */
1437 if (!(tab->fe[i].ctrl & 0xF)) {
1438 mask_table[and_index].mask = tab->fe[i].prop;
1439 mask_table[and_index].start = i;
1440 mask_table[and_index].block = block_index;
1441 if (and_index >= 1)
1442 mask_table[and_index - 1].end = i - 1;
1443 and_index++;
1444 }
1445 /* cluster starts and ends will be separated because they should
1446 * hold their position
1447 */
1448 if (tab->fe[i].ctrl & RQFCR_CLE)
1449 block_index++;
1450 /* A not set AND indicates the end of a depended block */
1451 if (!(tab->fe[i].ctrl & RQFCR_AND))
1452 block_index++;
1453 }
1454
1455 mask_table[and_index - 1].end = i - 1;
1456
1457 return and_index;
1458}
1459
1460/* Sorts the entries of mask_table by the values of the masks.
1461 * Important: The 0xFF80 flags of the first and last entry of a
1462 * block must hold their position (which queue, CLusterEnable, ReJEct,
1463 * AND)
1464 */
1465static void gfar_sort_mask_table(struct gfar_mask_entry *mask_table,
1466 struct filer_table *temp_table, u32 and_index)
1467{
1468 /* Pointer to compare function (_asc or _desc) */
1469 int (*gfar_comp)(const void *, const void *);
1470
1471 u32 i, size = 0, start = 0, prev = 1;
1472 u32 old_first, old_last, new_first, new_last;
1473
1474 gfar_comp = &gfar_comp_desc;
1475
1476 for (i = 0; i < and_index; i++) {
1477 if (prev != mask_table[i].block) {
1478 old_first = mask_table[start].start + 1;
1479 old_last = mask_table[i - 1].end;
1480 sort(mask_table + start, size,
1481 sizeof(struct gfar_mask_entry),
1482 gfar_comp, &gfar_swap);
1483
1484 /* Toggle order for every block. This makes the
1485 * thing more efficient!
1486 */
1487 if (gfar_comp == gfar_comp_desc)
1488 gfar_comp = &gfar_comp_asc;
1489 else
1490 gfar_comp = &gfar_comp_desc;
1491
1492 new_first = mask_table[start].start + 1;
1493 new_last = mask_table[i - 1].end;
1494
1495 gfar_swap_bits(&temp_table->fe[new_first],
1496 &temp_table->fe[old_first],
1497 &temp_table->fe[new_last],
1498 &temp_table->fe[old_last],
1499 RQFCR_QUEUE | RQFCR_CLE |
1500 RQFCR_RJE | RQFCR_AND);
1501
1502 start = i;
1503 size = 0;
1504 }
1505 size++;
1506 prev = mask_table[i].block;
1507 }
1508}
1509
1510/* Reduces the number of masks needed in the filer table to save entries
1511 * This is done by sorting the masks of a depended block. A depended block is
1512 * identified by gluing ANDs or CLE. The sorting order toggles after every
1513 * block. Of course entries in scope of a mask must change their location with
1514 * it.
1515 */
1516static int gfar_optimize_filer_masks(struct filer_table *tab)
1517{
1518 struct filer_table *temp_table;
1519 struct gfar_mask_entry *mask_table;
1520
1521 u32 and_index = 0, previous_mask = 0, i = 0, j = 0, size = 0;
1522 s32 ret = 0;
1523
1524 /* We need a copy of the filer table because
1525 * we want to change its order
1526 */
1527 temp_table = kmemdup(tab, sizeof(*temp_table), GFP_KERNEL);
1528 if (temp_table == NULL)
1529 return -ENOMEM;
1530
1531 mask_table = kcalloc(MAX_FILER_CACHE_IDX / 2 + 1,
1532 sizeof(struct gfar_mask_entry), GFP_KERNEL);
1533
1534 if (mask_table == NULL) {
1535 ret = -ENOMEM;
1536 goto end;
1537 }
1538
1539 and_index = gfar_generate_mask_table(mask_table, tab);
1540
1541 gfar_sort_mask_table(mask_table, temp_table, and_index);
1542
1543 /* Now we can copy the data from our duplicated filer table to
1544 * the real one in the order the mask table says
1545 */
1546 for (i = 0; i < and_index; i++) {
1547 size = mask_table[i].end - mask_table[i].start + 1;
1548 gfar_copy_filer_entries(&(tab->fe[j]),
1549 &(temp_table->fe[mask_table[i].start]), size);
1550 j += size;
1551 }
1552
1553 /* And finally we just have to check for duplicated masks and drop the
1554 * second ones
1555 */
1556 for (i = 0; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1557 if (tab->fe[i].ctrl == 0x80) {
1558 previous_mask = i++;
1559 break;
1560 }
1561 }
1562 for (; i < tab->index && i < MAX_FILER_CACHE_IDX; i++) {
1563 if (tab->fe[i].ctrl == 0x80) {
1564 if (tab->fe[i].prop == tab->fe[previous_mask].prop) {
1565 /* Two identical ones found!
1566 * So drop the second one!
1567 */
1568 gfar_trim_filer_entries(i, i, tab);
1569 } else
1570 /* Not identical! */
1571 previous_mask = i;
1572 }
1573 }
1574
1575 kfree(mask_table);
1576end: kfree(temp_table);
1577 return ret;
1578}
1579
1580/* Write the bit-pattern from software's buffer to hardware registers */ 1252/* Write the bit-pattern from software's buffer to hardware registers */
1581static int gfar_write_filer_table(struct gfar_private *priv, 1253static int gfar_write_filer_table(struct gfar_private *priv,
1582 struct filer_table *tab) 1254 struct filer_table *tab)
@@ -1586,11 +1258,10 @@ static int gfar_write_filer_table(struct gfar_private *priv,
1586 return -EBUSY; 1258 return -EBUSY;
1587 1259
1588 /* Fill regular entries */ 1260 /* Fill regular entries */
1589 for (; i < MAX_FILER_IDX - 1 && (tab->fe[i].ctrl | tab->fe[i].prop); 1261 for (; i < MAX_FILER_IDX && (tab->fe[i].ctrl | tab->fe[i].prop); i++)
1590 i++)
1591 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop); 1262 gfar_write_filer(priv, i, tab->fe[i].ctrl, tab->fe[i].prop);
1592 /* Fill the rest with fall-troughs */ 1263 /* Fill the rest with fall-troughs */
1593 for (; i < MAX_FILER_IDX - 1; i++) 1264 for (; i < MAX_FILER_IDX; i++)
1594 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF); 1265 gfar_write_filer(priv, i, 0x60, 0xFFFFFFFF);
1595 /* Last entry must be default accept 1266 /* Last entry must be default accept
1596 * because that's what people expect 1267 * because that's what people expect
@@ -1624,7 +1295,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
1624{ 1295{
1625 struct ethtool_flow_spec_container *j; 1296 struct ethtool_flow_spec_container *j;
1626 struct filer_table *tab; 1297 struct filer_table *tab;
1627 s32 i = 0;
1628 s32 ret = 0; 1298 s32 ret = 0;
1629 1299
1630 /* So index is set to zero, too! */ 1300 /* So index is set to zero, too! */
@@ -1649,17 +1319,6 @@ static int gfar_process_filer_changes(struct gfar_private *priv)
1649 } 1319 }
1650 } 1320 }
1651 1321
1652 i = tab->index;
1653
1654 /* Optimizations to save entries */
1655 gfar_cluster_filer(tab);
1656 gfar_optimize_filer_masks(tab);
1657
1658 pr_debug("\tSummary:\n"
1659 "\tData on hardware: %d\n"
1660 "\tCompression rate: %d%%\n",
1661 tab->index, 100 - (100 * tab->index) / i);
1662
1663 /* Write everything to hardware */ 1322 /* Write everything to hardware */
1664 ret = gfar_write_filer_table(priv, tab); 1323 ret = gfar_write_filer_table(priv, tab);
1665 if (ret == -EBUSY) { 1324 if (ret == -EBUSY) {
@@ -1725,13 +1384,14 @@ static int gfar_add_cls(struct gfar_private *priv,
1725 } 1384 }
1726 1385
1727process: 1386process:
1387 priv->rx_list.count++;
1728 ret = gfar_process_filer_changes(priv); 1388 ret = gfar_process_filer_changes(priv);
1729 if (ret) 1389 if (ret)
1730 goto clean_list; 1390 goto clean_list;
1731 priv->rx_list.count++;
1732 return ret; 1391 return ret;
1733 1392
1734clean_list: 1393clean_list:
1394 priv->rx_list.count--;
1735 list_del(&temp->list); 1395 list_del(&temp->list);
1736clean_mem: 1396clean_mem:
1737 kfree(temp); 1397 kfree(temp);
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 370e20ed224c..62e48bc0cb23 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -1462,7 +1462,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1462 struct mvneta_rx_queue *rxq) 1462 struct mvneta_rx_queue *rxq)
1463{ 1463{
1464 struct net_device *dev = pp->dev; 1464 struct net_device *dev = pp->dev;
1465 int rx_done, rx_filled; 1465 int rx_done;
1466 u32 rcvd_pkts = 0; 1466 u32 rcvd_pkts = 0;
1467 u32 rcvd_bytes = 0; 1467 u32 rcvd_bytes = 0;
1468 1468
@@ -1473,7 +1473,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1473 rx_todo = rx_done; 1473 rx_todo = rx_done;
1474 1474
1475 rx_done = 0; 1475 rx_done = 0;
1476 rx_filled = 0;
1477 1476
1478 /* Fairness NAPI loop */ 1477 /* Fairness NAPI loop */
1479 while (rx_done < rx_todo) { 1478 while (rx_done < rx_todo) {
@@ -1484,7 +1483,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1484 int rx_bytes, err; 1483 int rx_bytes, err;
1485 1484
1486 rx_done++; 1485 rx_done++;
1487 rx_filled++;
1488 rx_status = rx_desc->status; 1486 rx_status = rx_desc->status;
1489 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); 1487 rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE);
1490 data = (unsigned char *)rx_desc->buf_cookie; 1488 data = (unsigned char *)rx_desc->buf_cookie;
@@ -1524,6 +1522,14 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1524 continue; 1522 continue;
1525 } 1523 }
1526 1524
1525 /* Refill processing */
1526 err = mvneta_rx_refill(pp, rx_desc);
1527 if (err) {
1528 netdev_err(dev, "Linux processing - Can't refill\n");
1529 rxq->missed++;
1530 goto err_drop_frame;
1531 }
1532
1527 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); 1533 skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size);
1528 if (!skb) 1534 if (!skb)
1529 goto err_drop_frame; 1535 goto err_drop_frame;
@@ -1543,14 +1549,6 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1543 mvneta_rx_csum(pp, rx_status, skb); 1549 mvneta_rx_csum(pp, rx_status, skb);
1544 1550
1545 napi_gro_receive(&pp->napi, skb); 1551 napi_gro_receive(&pp->napi, skb);
1546
1547 /* Refill processing */
1548 err = mvneta_rx_refill(pp, rx_desc);
1549 if (err) {
1550 netdev_err(dev, "Linux processing - Can't refill\n");
1551 rxq->missed++;
1552 rx_filled--;
1553 }
1554 } 1552 }
1555 1553
1556 if (rcvd_pkts) { 1554 if (rcvd_pkts) {
@@ -1563,7 +1561,7 @@ static int mvneta_rx(struct mvneta_port *pp, int rx_todo,
1563 } 1561 }
1564 1562
1565 /* Update rxq management counters */ 1563 /* Update rxq management counters */
1566 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_filled); 1564 mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done);
1567 1565
1568 return rx_done; 1566 return rx_done;
1569} 1567}
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 3e8b1bfb1f2e..d9884fd15b45 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -27,6 +27,8 @@
27#include <linux/of_address.h> 27#include <linux/of_address.h>
28#include <linux/phy.h> 28#include <linux/phy.h>
29#include <linux/clk.h> 29#include <linux/clk.h>
30#include <linux/hrtimer.h>
31#include <linux/ktime.h>
30#include <uapi/linux/ppp_defs.h> 32#include <uapi/linux/ppp_defs.h>
31#include <net/ip.h> 33#include <net/ip.h>
32#include <net/ipv6.h> 34#include <net/ipv6.h>
@@ -299,6 +301,7 @@
299 301
300/* Coalescing */ 302/* Coalescing */
301#define MVPP2_TXDONE_COAL_PKTS_THRESH 15 303#define MVPP2_TXDONE_COAL_PKTS_THRESH 15
304#define MVPP2_TXDONE_HRTIMER_PERIOD_NS 1000000UL
302#define MVPP2_RX_COAL_PKTS 32 305#define MVPP2_RX_COAL_PKTS 32
303#define MVPP2_RX_COAL_USEC 100 306#define MVPP2_RX_COAL_USEC 100
304 307
@@ -660,6 +663,14 @@ struct mvpp2_pcpu_stats {
660 u64 tx_bytes; 663 u64 tx_bytes;
661}; 664};
662 665
666/* Per-CPU port control */
667struct mvpp2_port_pcpu {
668 struct hrtimer tx_done_timer;
669 bool timer_scheduled;
670 /* Tasklet for egress finalization */
671 struct tasklet_struct tx_done_tasklet;
672};
673
663struct mvpp2_port { 674struct mvpp2_port {
664 u8 id; 675 u8 id;
665 676
@@ -679,6 +690,9 @@ struct mvpp2_port {
679 u32 pending_cause_rx; 690 u32 pending_cause_rx;
680 struct napi_struct napi; 691 struct napi_struct napi;
681 692
693 /* Per-CPU port control */
694 struct mvpp2_port_pcpu __percpu *pcpu;
695
682 /* Flags */ 696 /* Flags */
683 unsigned long flags; 697 unsigned long flags;
684 698
@@ -776,6 +790,9 @@ struct mvpp2_txq_pcpu {
776 /* Array of transmitted skb */ 790 /* Array of transmitted skb */
777 struct sk_buff **tx_skb; 791 struct sk_buff **tx_skb;
778 792
793 /* Array of transmitted buffers' physical addresses */
794 dma_addr_t *tx_buffs;
795
779 /* Index of last TX DMA descriptor that was inserted */ 796 /* Index of last TX DMA descriptor that was inserted */
780 int txq_put_index; 797 int txq_put_index;
781 798
@@ -913,8 +930,6 @@ struct mvpp2_bm_pool {
913 /* Occupied buffers indicator */ 930 /* Occupied buffers indicator */
914 atomic_t in_use; 931 atomic_t in_use;
915 int in_use_thresh; 932 int in_use_thresh;
916
917 spinlock_t lock;
918}; 933};
919 934
920struct mvpp2_buff_hdr { 935struct mvpp2_buff_hdr {
@@ -963,9 +978,13 @@ static void mvpp2_txq_inc_get(struct mvpp2_txq_pcpu *txq_pcpu)
963} 978}
964 979
965static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu, 980static void mvpp2_txq_inc_put(struct mvpp2_txq_pcpu *txq_pcpu,
966 struct sk_buff *skb) 981 struct sk_buff *skb,
982 struct mvpp2_tx_desc *tx_desc)
967{ 983{
968 txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb; 984 txq_pcpu->tx_skb[txq_pcpu->txq_put_index] = skb;
985 if (skb)
986 txq_pcpu->tx_buffs[txq_pcpu->txq_put_index] =
987 tx_desc->buf_phys_addr;
969 txq_pcpu->txq_put_index++; 988 txq_pcpu->txq_put_index++;
970 if (txq_pcpu->txq_put_index == txq_pcpu->size) 989 if (txq_pcpu->txq_put_index == txq_pcpu->size)
971 txq_pcpu->txq_put_index = 0; 990 txq_pcpu->txq_put_index = 0;
@@ -3376,7 +3395,6 @@ static int mvpp2_bm_pool_create(struct platform_device *pdev,
3376 bm_pool->pkt_size = 0; 3395 bm_pool->pkt_size = 0;
3377 bm_pool->buf_num = 0; 3396 bm_pool->buf_num = 0;
3378 atomic_set(&bm_pool->in_use, 0); 3397 atomic_set(&bm_pool->in_use, 0);
3379 spin_lock_init(&bm_pool->lock);
3380 3398
3381 return 0; 3399 return 0;
3382} 3400}
@@ -3647,7 +3665,6 @@ static struct mvpp2_bm_pool *
3647mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type, 3665mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3648 int pkt_size) 3666 int pkt_size)
3649{ 3667{
3650 unsigned long flags = 0;
3651 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool]; 3668 struct mvpp2_bm_pool *new_pool = &port->priv->bm_pools[pool];
3652 int num; 3669 int num;
3653 3670
@@ -3656,8 +3673,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3656 return NULL; 3673 return NULL;
3657 } 3674 }
3658 3675
3659 spin_lock_irqsave(&new_pool->lock, flags);
3660
3661 if (new_pool->type == MVPP2_BM_FREE) 3676 if (new_pool->type == MVPP2_BM_FREE)
3662 new_pool->type = type; 3677 new_pool->type = type;
3663 3678
@@ -3686,8 +3701,6 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3686 if (num != pkts_num) { 3701 if (num != pkts_num) {
3687 WARN(1, "pool %d: %d of %d allocated\n", 3702 WARN(1, "pool %d: %d of %d allocated\n",
3688 new_pool->id, num, pkts_num); 3703 new_pool->id, num, pkts_num);
3689 /* We need to undo the bufs_add() allocations */
3690 spin_unlock_irqrestore(&new_pool->lock, flags);
3691 return NULL; 3704 return NULL;
3692 } 3705 }
3693 } 3706 }
@@ -3695,15 +3708,12 @@ mvpp2_bm_pool_use(struct mvpp2_port *port, int pool, enum mvpp2_bm_type type,
3695 mvpp2_bm_pool_bufsize_set(port->priv, new_pool, 3708 mvpp2_bm_pool_bufsize_set(port->priv, new_pool,
3696 MVPP2_RX_BUF_SIZE(new_pool->pkt_size)); 3709 MVPP2_RX_BUF_SIZE(new_pool->pkt_size));
3697 3710
3698 spin_unlock_irqrestore(&new_pool->lock, flags);
3699
3700 return new_pool; 3711 return new_pool;
3701} 3712}
3702 3713
3703/* Initialize pools for swf */ 3714/* Initialize pools for swf */
3704static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port) 3715static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3705{ 3716{
3706 unsigned long flags = 0;
3707 int rxq; 3717 int rxq;
3708 3718
3709 if (!port->pool_long) { 3719 if (!port->pool_long) {
@@ -3714,9 +3724,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3714 if (!port->pool_long) 3724 if (!port->pool_long)
3715 return -ENOMEM; 3725 return -ENOMEM;
3716 3726
3717 spin_lock_irqsave(&port->pool_long->lock, flags);
3718 port->pool_long->port_map |= (1 << port->id); 3727 port->pool_long->port_map |= (1 << port->id);
3719 spin_unlock_irqrestore(&port->pool_long->lock, flags);
3720 3728
3721 for (rxq = 0; rxq < rxq_number; rxq++) 3729 for (rxq = 0; rxq < rxq_number; rxq++)
3722 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id); 3730 mvpp2_rxq_long_pool_set(port, rxq, port->pool_long->id);
@@ -3730,9 +3738,7 @@ static int mvpp2_swf_bm_pool_init(struct mvpp2_port *port)
3730 if (!port->pool_short) 3738 if (!port->pool_short)
3731 return -ENOMEM; 3739 return -ENOMEM;
3732 3740
3733 spin_lock_irqsave(&port->pool_short->lock, flags);
3734 port->pool_short->port_map |= (1 << port->id); 3741 port->pool_short->port_map |= (1 << port->id);
3735 spin_unlock_irqrestore(&port->pool_short->lock, flags);
3736 3742
3737 for (rxq = 0; rxq < rxq_number; rxq++) 3743 for (rxq = 0; rxq < rxq_number; rxq++)
3738 mvpp2_rxq_short_pool_set(port, rxq, 3744 mvpp2_rxq_short_pool_set(port, rxq,
@@ -3806,7 +3812,6 @@ static void mvpp2_interrupts_unmask(void *arg)
3806 3812
3807 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id), 3813 mvpp2_write(port->priv, MVPP2_ISR_RX_TX_MASK_REG(port->id),
3808 (MVPP2_CAUSE_MISC_SUM_MASK | 3814 (MVPP2_CAUSE_MISC_SUM_MASK |
3809 MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK |
3810 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK)); 3815 MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK));
3811} 3816}
3812 3817
@@ -4382,23 +4387,6 @@ static void mvpp2_rx_time_coal_set(struct mvpp2_port *port,
4382 rxq->time_coal = usec; 4387 rxq->time_coal = usec;
4383} 4388}
4384 4389
4385/* Set threshold for TX_DONE pkts coalescing */
4386static void mvpp2_tx_done_pkts_coal_set(void *arg)
4387{
4388 struct mvpp2_port *port = arg;
4389 int queue;
4390 u32 val;
4391
4392 for (queue = 0; queue < txq_number; queue++) {
4393 struct mvpp2_tx_queue *txq = port->txqs[queue];
4394
4395 val = (txq->done_pkts_coal << MVPP2_TRANSMITTED_THRESH_OFFSET) &
4396 MVPP2_TRANSMITTED_THRESH_MASK;
4397 mvpp2_write(port->priv, MVPP2_TXQ_NUM_REG, txq->id);
4398 mvpp2_write(port->priv, MVPP2_TXQ_THRESH_REG, val);
4399 }
4400}
4401
4402/* Free Tx queue skbuffs */ 4390/* Free Tx queue skbuffs */
4403static void mvpp2_txq_bufs_free(struct mvpp2_port *port, 4391static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4404 struct mvpp2_tx_queue *txq, 4392 struct mvpp2_tx_queue *txq,
@@ -4407,8 +4395,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4407 int i; 4395 int i;
4408 4396
4409 for (i = 0; i < num; i++) { 4397 for (i = 0; i < num; i++) {
4410 struct mvpp2_tx_desc *tx_desc = txq->descs + 4398 dma_addr_t buf_phys_addr =
4411 txq_pcpu->txq_get_index; 4399 txq_pcpu->tx_buffs[txq_pcpu->txq_get_index];
4412 struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index]; 4400 struct sk_buff *skb = txq_pcpu->tx_skb[txq_pcpu->txq_get_index];
4413 4401
4414 mvpp2_txq_inc_get(txq_pcpu); 4402 mvpp2_txq_inc_get(txq_pcpu);
@@ -4416,8 +4404,8 @@ static void mvpp2_txq_bufs_free(struct mvpp2_port *port,
4416 if (!skb) 4404 if (!skb)
4417 continue; 4405 continue;
4418 4406
4419 dma_unmap_single(port->dev->dev.parent, tx_desc->buf_phys_addr, 4407 dma_unmap_single(port->dev->dev.parent, buf_phys_addr,
4420 tx_desc->data_size, DMA_TO_DEVICE); 4408 skb_headlen(skb), DMA_TO_DEVICE);
4421 dev_kfree_skb_any(skb); 4409 dev_kfree_skb_any(skb);
4422 } 4410 }
4423} 4411}
@@ -4433,7 +4421,7 @@ static inline struct mvpp2_rx_queue *mvpp2_get_rx_queue(struct mvpp2_port *port,
4433static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port, 4421static inline struct mvpp2_tx_queue *mvpp2_get_tx_queue(struct mvpp2_port *port,
4434 u32 cause) 4422 u32 cause)
4435{ 4423{
4436 int queue = fls(cause >> 16) - 1; 4424 int queue = fls(cause) - 1;
4437 4425
4438 return port->txqs[queue]; 4426 return port->txqs[queue];
4439} 4427}
@@ -4460,6 +4448,29 @@ static void mvpp2_txq_done(struct mvpp2_port *port, struct mvpp2_tx_queue *txq,
4460 netif_tx_wake_queue(nq); 4448 netif_tx_wake_queue(nq);
4461} 4449}
4462 4450
4451static unsigned int mvpp2_tx_done(struct mvpp2_port *port, u32 cause)
4452{
4453 struct mvpp2_tx_queue *txq;
4454 struct mvpp2_txq_pcpu *txq_pcpu;
4455 unsigned int tx_todo = 0;
4456
4457 while (cause) {
4458 txq = mvpp2_get_tx_queue(port, cause);
4459 if (!txq)
4460 break;
4461
4462 txq_pcpu = this_cpu_ptr(txq->pcpu);
4463
4464 if (txq_pcpu->count) {
4465 mvpp2_txq_done(port, txq, txq_pcpu);
4466 tx_todo += txq_pcpu->count;
4467 }
4468
4469 cause &= ~(1 << txq->log_id);
4470 }
4471 return tx_todo;
4472}
4473
4463/* Rx/Tx queue initialization/cleanup methods */ 4474/* Rx/Tx queue initialization/cleanup methods */
4464 4475
4465/* Allocate and initialize descriptors for aggr TXQ */ 4476/* Allocate and initialize descriptors for aggr TXQ */
@@ -4649,12 +4660,13 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
4649 txq_pcpu->tx_skb = kmalloc(txq_pcpu->size * 4660 txq_pcpu->tx_skb = kmalloc(txq_pcpu->size *
4650 sizeof(*txq_pcpu->tx_skb), 4661 sizeof(*txq_pcpu->tx_skb),
4651 GFP_KERNEL); 4662 GFP_KERNEL);
4652 if (!txq_pcpu->tx_skb) { 4663 if (!txq_pcpu->tx_skb)
4653 dma_free_coherent(port->dev->dev.parent, 4664 goto error;
4654 txq->size * MVPP2_DESC_ALIGNED_SIZE, 4665
4655 txq->descs, txq->descs_phys); 4666 txq_pcpu->tx_buffs = kmalloc(txq_pcpu->size *
4656 return -ENOMEM; 4667 sizeof(dma_addr_t), GFP_KERNEL);
4657 } 4668 if (!txq_pcpu->tx_buffs)
4669 goto error;
4658 4670
4659 txq_pcpu->count = 0; 4671 txq_pcpu->count = 0;
4660 txq_pcpu->reserved_num = 0; 4672 txq_pcpu->reserved_num = 0;
@@ -4663,6 +4675,19 @@ static int mvpp2_txq_init(struct mvpp2_port *port,
4663 } 4675 }
4664 4676
4665 return 0; 4677 return 0;
4678
4679error:
4680 for_each_present_cpu(cpu) {
4681 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4682 kfree(txq_pcpu->tx_skb);
4683 kfree(txq_pcpu->tx_buffs);
4684 }
4685
4686 dma_free_coherent(port->dev->dev.parent,
4687 txq->size * MVPP2_DESC_ALIGNED_SIZE,
4688 txq->descs, txq->descs_phys);
4689
4690 return -ENOMEM;
4666} 4691}
4667 4692
4668/* Free allocated TXQ resources */ 4693/* Free allocated TXQ resources */
@@ -4675,6 +4700,7 @@ static void mvpp2_txq_deinit(struct mvpp2_port *port,
4675 for_each_present_cpu(cpu) { 4700 for_each_present_cpu(cpu) {
4676 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu); 4701 txq_pcpu = per_cpu_ptr(txq->pcpu, cpu);
4677 kfree(txq_pcpu->tx_skb); 4702 kfree(txq_pcpu->tx_skb);
4703 kfree(txq_pcpu->tx_buffs);
4678 } 4704 }
4679 4705
4680 if (txq->descs) 4706 if (txq->descs)
@@ -4805,7 +4831,6 @@ static int mvpp2_setup_txqs(struct mvpp2_port *port)
4805 goto err_cleanup; 4831 goto err_cleanup;
4806 } 4832 }
4807 4833
4808 on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
4809 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1); 4834 on_each_cpu(mvpp2_txq_sent_counter_clear, port, 1);
4810 return 0; 4835 return 0;
4811 4836
@@ -4887,6 +4912,49 @@ static void mvpp2_link_event(struct net_device *dev)
4887 } 4912 }
4888} 4913}
4889 4914
4915static void mvpp2_timer_set(struct mvpp2_port_pcpu *port_pcpu)
4916{
4917 ktime_t interval;
4918
4919 if (!port_pcpu->timer_scheduled) {
4920 port_pcpu->timer_scheduled = true;
4921 interval = ktime_set(0, MVPP2_TXDONE_HRTIMER_PERIOD_NS);
4922 hrtimer_start(&port_pcpu->tx_done_timer, interval,
4923 HRTIMER_MODE_REL_PINNED);
4924 }
4925}
4926
4927static void mvpp2_tx_proc_cb(unsigned long data)
4928{
4929 struct net_device *dev = (struct net_device *)data;
4930 struct mvpp2_port *port = netdev_priv(dev);
4931 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
4932 unsigned int tx_todo, cause;
4933
4934 if (!netif_running(dev))
4935 return;
4936 port_pcpu->timer_scheduled = false;
4937
4938 /* Process all the Tx queues */
4939 cause = (1 << txq_number) - 1;
4940 tx_todo = mvpp2_tx_done(port, cause);
4941
4942 /* Set the timer in case not all the packets were processed */
4943 if (tx_todo)
4944 mvpp2_timer_set(port_pcpu);
4945}
4946
4947static enum hrtimer_restart mvpp2_hr_timer_cb(struct hrtimer *timer)
4948{
4949 struct mvpp2_port_pcpu *port_pcpu = container_of(timer,
4950 struct mvpp2_port_pcpu,
4951 tx_done_timer);
4952
4953 tasklet_schedule(&port_pcpu->tx_done_tasklet);
4954
4955 return HRTIMER_NORESTART;
4956}
4957
4890/* Main RX/TX processing routines */ 4958/* Main RX/TX processing routines */
4891 4959
4892/* Display more error info */ 4960/* Display more error info */
@@ -5144,11 +5212,11 @@ static int mvpp2_tx_frag_process(struct mvpp2_port *port, struct sk_buff *skb,
5144 if (i == (skb_shinfo(skb)->nr_frags - 1)) { 5212 if (i == (skb_shinfo(skb)->nr_frags - 1)) {
5145 /* Last descriptor */ 5213 /* Last descriptor */
5146 tx_desc->command = MVPP2_TXD_L_DESC; 5214 tx_desc->command = MVPP2_TXD_L_DESC;
5147 mvpp2_txq_inc_put(txq_pcpu, skb); 5215 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
5148 } else { 5216 } else {
5149 /* Descriptor in the middle: Not First, Not Last */ 5217 /* Descriptor in the middle: Not First, Not Last */
5150 tx_desc->command = 0; 5218 tx_desc->command = 0;
5151 mvpp2_txq_inc_put(txq_pcpu, NULL); 5219 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
5152 } 5220 }
5153 } 5221 }
5154 5222
@@ -5214,12 +5282,12 @@ static int mvpp2_tx(struct sk_buff *skb, struct net_device *dev)
5214 /* First and Last descriptor */ 5282 /* First and Last descriptor */
5215 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC; 5283 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_L_DESC;
5216 tx_desc->command = tx_cmd; 5284 tx_desc->command = tx_cmd;
5217 mvpp2_txq_inc_put(txq_pcpu, skb); 5285 mvpp2_txq_inc_put(txq_pcpu, skb, tx_desc);
5218 } else { 5286 } else {
5219 /* First but not Last */ 5287 /* First but not Last */
5220 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE; 5288 tx_cmd |= MVPP2_TXD_F_DESC | MVPP2_TXD_PADDING_DISABLE;
5221 tx_desc->command = tx_cmd; 5289 tx_desc->command = tx_cmd;
5222 mvpp2_txq_inc_put(txq_pcpu, NULL); 5290 mvpp2_txq_inc_put(txq_pcpu, NULL, tx_desc);
5223 5291
5224 /* Continue with other skb fragments */ 5292 /* Continue with other skb fragments */
5225 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) { 5293 if (mvpp2_tx_frag_process(port, skb, aggr_txq, txq)) {
@@ -5255,6 +5323,17 @@ out:
5255 dev_kfree_skb_any(skb); 5323 dev_kfree_skb_any(skb);
5256 } 5324 }
5257 5325
5326 /* Finalize TX processing */
5327 if (txq_pcpu->count >= txq->done_pkts_coal)
5328 mvpp2_txq_done(port, txq, txq_pcpu);
5329
5330 /* Set the timer in case not all frags were processed */
5331 if (txq_pcpu->count <= frags && txq_pcpu->count > 0) {
5332 struct mvpp2_port_pcpu *port_pcpu = this_cpu_ptr(port->pcpu);
5333
5334 mvpp2_timer_set(port_pcpu);
5335 }
5336
5258 return NETDEV_TX_OK; 5337 return NETDEV_TX_OK;
5259} 5338}
5260 5339
@@ -5268,10 +5347,11 @@ static inline void mvpp2_cause_error(struct net_device *dev, int cause)
5268 netdev_err(dev, "tx fifo underrun error\n"); 5347 netdev_err(dev, "tx fifo underrun error\n");
5269} 5348}
5270 5349
5271static void mvpp2_txq_done_percpu(void *arg) 5350static int mvpp2_poll(struct napi_struct *napi, int budget)
5272{ 5351{
5273 struct mvpp2_port *port = arg; 5352 u32 cause_rx_tx, cause_rx, cause_misc;
5274 u32 cause_rx_tx, cause_tx, cause_misc; 5353 int rx_done = 0;
5354 struct mvpp2_port *port = netdev_priv(napi->dev);
5275 5355
5276 /* Rx/Tx cause register 5356 /* Rx/Tx cause register
5277 * 5357 *
@@ -5285,7 +5365,7 @@ static void mvpp2_txq_done_percpu(void *arg)
5285 */ 5365 */
5286 cause_rx_tx = mvpp2_read(port->priv, 5366 cause_rx_tx = mvpp2_read(port->priv,
5287 MVPP2_ISR_RX_TX_CAUSE_REG(port->id)); 5367 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5288 cause_tx = cause_rx_tx & MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK; 5368 cause_rx_tx &= ~MVPP2_CAUSE_TXQ_OCCUP_DESC_ALL_MASK;
5289 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK; 5369 cause_misc = cause_rx_tx & MVPP2_CAUSE_MISC_SUM_MASK;
5290 5370
5291 if (cause_misc) { 5371 if (cause_misc) {
@@ -5297,26 +5377,6 @@ static void mvpp2_txq_done_percpu(void *arg)
5297 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK); 5377 cause_rx_tx & ~MVPP2_CAUSE_MISC_SUM_MASK);
5298 } 5378 }
5299 5379
5300 /* Release TX descriptors */
5301 if (cause_tx) {
5302 struct mvpp2_tx_queue *txq = mvpp2_get_tx_queue(port, cause_tx);
5303 struct mvpp2_txq_pcpu *txq_pcpu = this_cpu_ptr(txq->pcpu);
5304
5305 if (txq_pcpu->count)
5306 mvpp2_txq_done(port, txq, txq_pcpu);
5307 }
5308}
5309
5310static int mvpp2_poll(struct napi_struct *napi, int budget)
5311{
5312 u32 cause_rx_tx, cause_rx;
5313 int rx_done = 0;
5314 struct mvpp2_port *port = netdev_priv(napi->dev);
5315
5316 on_each_cpu(mvpp2_txq_done_percpu, port, 1);
5317
5318 cause_rx_tx = mvpp2_read(port->priv,
5319 MVPP2_ISR_RX_TX_CAUSE_REG(port->id));
5320 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK; 5380 cause_rx = cause_rx_tx & MVPP2_CAUSE_RXQ_OCCUP_DESC_ALL_MASK;
5321 5381
5322 /* Process RX packets */ 5382 /* Process RX packets */
@@ -5561,6 +5621,8 @@ err_cleanup_rxqs:
5561static int mvpp2_stop(struct net_device *dev) 5621static int mvpp2_stop(struct net_device *dev)
5562{ 5622{
5563 struct mvpp2_port *port = netdev_priv(dev); 5623 struct mvpp2_port *port = netdev_priv(dev);
5624 struct mvpp2_port_pcpu *port_pcpu;
5625 int cpu;
5564 5626
5565 mvpp2_stop_dev(port); 5627 mvpp2_stop_dev(port);
5566 mvpp2_phy_disconnect(port); 5628 mvpp2_phy_disconnect(port);
@@ -5569,6 +5631,13 @@ static int mvpp2_stop(struct net_device *dev)
5569 on_each_cpu(mvpp2_interrupts_mask, port, 1); 5631 on_each_cpu(mvpp2_interrupts_mask, port, 1);
5570 5632
5571 free_irq(port->irq, port); 5633 free_irq(port->irq, port);
5634 for_each_present_cpu(cpu) {
5635 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
5636
5637 hrtimer_cancel(&port_pcpu->tx_done_timer);
5638 port_pcpu->timer_scheduled = false;
5639 tasklet_kill(&port_pcpu->tx_done_tasklet);
5640 }
5572 mvpp2_cleanup_rxqs(port); 5641 mvpp2_cleanup_rxqs(port);
5573 mvpp2_cleanup_txqs(port); 5642 mvpp2_cleanup_txqs(port);
5574 5643
@@ -5784,7 +5853,6 @@ static int mvpp2_ethtool_set_coalesce(struct net_device *dev,
5784 txq->done_pkts_coal = c->tx_max_coalesced_frames; 5853 txq->done_pkts_coal = c->tx_max_coalesced_frames;
5785 } 5854 }
5786 5855
5787 on_each_cpu(mvpp2_tx_done_pkts_coal_set, port, 1);
5788 return 0; 5856 return 0;
5789} 5857}
5790 5858
@@ -6035,6 +6103,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6035{ 6103{
6036 struct device_node *phy_node; 6104 struct device_node *phy_node;
6037 struct mvpp2_port *port; 6105 struct mvpp2_port *port;
6106 struct mvpp2_port_pcpu *port_pcpu;
6038 struct net_device *dev; 6107 struct net_device *dev;
6039 struct resource *res; 6108 struct resource *res;
6040 const char *dt_mac_addr; 6109 const char *dt_mac_addr;
@@ -6044,7 +6113,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6044 int features; 6113 int features;
6045 int phy_mode; 6114 int phy_mode;
6046 int priv_common_regs_num = 2; 6115 int priv_common_regs_num = 2;
6047 int err, i; 6116 int err, i, cpu;
6048 6117
6049 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number, 6118 dev = alloc_etherdev_mqs(sizeof(struct mvpp2_port), txq_number,
6050 rxq_number); 6119 rxq_number);
@@ -6135,6 +6204,24 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6135 } 6204 }
6136 mvpp2_port_power_up(port); 6205 mvpp2_port_power_up(port);
6137 6206
6207 port->pcpu = alloc_percpu(struct mvpp2_port_pcpu);
6208 if (!port->pcpu) {
6209 err = -ENOMEM;
6210 goto err_free_txq_pcpu;
6211 }
6212
6213 for_each_present_cpu(cpu) {
6214 port_pcpu = per_cpu_ptr(port->pcpu, cpu);
6215
6216 hrtimer_init(&port_pcpu->tx_done_timer, CLOCK_MONOTONIC,
6217 HRTIMER_MODE_REL_PINNED);
6218 port_pcpu->tx_done_timer.function = mvpp2_hr_timer_cb;
6219 port_pcpu->timer_scheduled = false;
6220
6221 tasklet_init(&port_pcpu->tx_done_tasklet, mvpp2_tx_proc_cb,
6222 (unsigned long)dev);
6223 }
6224
6138 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT); 6225 netif_napi_add(dev, &port->napi, mvpp2_poll, NAPI_POLL_WEIGHT);
6139 features = NETIF_F_SG | NETIF_F_IP_CSUM; 6226 features = NETIF_F_SG | NETIF_F_IP_CSUM;
6140 dev->features = features | NETIF_F_RXCSUM; 6227 dev->features = features | NETIF_F_RXCSUM;
@@ -6144,7 +6231,7 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6144 err = register_netdev(dev); 6231 err = register_netdev(dev);
6145 if (err < 0) { 6232 if (err < 0) {
6146 dev_err(&pdev->dev, "failed to register netdev\n"); 6233 dev_err(&pdev->dev, "failed to register netdev\n");
6147 goto err_free_txq_pcpu; 6234 goto err_free_port_pcpu;
6148 } 6235 }
6149 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr); 6236 netdev_info(dev, "Using %s mac address %pM\n", mac_from, dev->dev_addr);
6150 6237
@@ -6153,6 +6240,8 @@ static int mvpp2_port_probe(struct platform_device *pdev,
6153 priv->port_list[id] = port; 6240 priv->port_list[id] = port;
6154 return 0; 6241 return 0;
6155 6242
6243err_free_port_pcpu:
6244 free_percpu(port->pcpu);
6156err_free_txq_pcpu: 6245err_free_txq_pcpu:
6157 for (i = 0; i < txq_number; i++) 6246 for (i = 0; i < txq_number; i++)
6158 free_percpu(port->txqs[i]->pcpu); 6247 free_percpu(port->txqs[i]->pcpu);
@@ -6171,6 +6260,7 @@ static void mvpp2_port_remove(struct mvpp2_port *port)
6171 int i; 6260 int i;
6172 6261
6173 unregister_netdev(port->dev); 6262 unregister_netdev(port->dev);
6263 free_percpu(port->pcpu);
6174 free_percpu(port->stats); 6264 free_percpu(port->stats);
6175 for (i = 0; i < txq_number; i++) 6265 for (i = 0; i < txq_number; i++)
6176 free_percpu(port->txqs[i]->pcpu); 6266 free_percpu(port->txqs[i]->pcpu);
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 82040137d7d9..0a3202047569 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -686,6 +686,7 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
686{ 686{
687 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd; 687 struct mlx4_cmd *cmd = &mlx4_priv(dev)->cmd;
688 struct mlx4_cmd_context *context; 688 struct mlx4_cmd_context *context;
689 long ret_wait;
689 int err = 0; 690 int err = 0;
690 691
691 down(&cmd->event_sem); 692 down(&cmd->event_sem);
@@ -711,8 +712,20 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
711 if (err) 712 if (err)
712 goto out_reset; 713 goto out_reset;
713 714
714 if (!wait_for_completion_timeout(&context->done, 715 if (op == MLX4_CMD_SENSE_PORT) {
715 msecs_to_jiffies(timeout))) { 716 ret_wait =
717 wait_for_completion_interruptible_timeout(&context->done,
718 msecs_to_jiffies(timeout));
719 if (ret_wait < 0) {
720 context->fw_status = 0;
721 context->out_param = 0;
722 context->result = 0;
723 }
724 } else {
725 ret_wait = (long)wait_for_completion_timeout(&context->done,
726 msecs_to_jiffies(timeout));
727 }
728 if (!ret_wait) {
716 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n", 729 mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
717 op); 730 op);
718 if (op == MLX4_CMD_NOP) { 731 if (op == MLX4_CMD_NOP) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_rx.c b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
index 7a4f20bb7fcb..9c145dddd717 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_rx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_rx.c
@@ -246,7 +246,6 @@ static int mlx4_en_prepare_rx_desc(struct mlx4_en_priv *priv,
246 246
247static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring) 247static inline bool mlx4_en_is_ring_empty(struct mlx4_en_rx_ring *ring)
248{ 248{
249 BUG_ON((u32)(ring->prod - ring->cons) > ring->actual_size);
250 return ring->prod == ring->cons; 249 return ring->prod == ring->cons;
251} 250}
252 251
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index aae13adfb492..8e81e53c370e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -601,7 +601,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
601 continue; 601 continue;
602 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n", 602 mlx4_dbg(dev, "%s: Sending MLX4_PORT_CHANGE_SUBTYPE_DOWN to slave: %d, port:%d\n",
603 __func__, i, port); 603 __func__, i, port);
604 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 604 s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
605 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { 605 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
606 eqe->event.port_change.port = 606 eqe->event.port_change.port =
607 cpu_to_be32( 607 cpu_to_be32(
@@ -640,7 +640,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
640 continue; 640 continue;
641 if (i == mlx4_master_func_num(dev)) 641 if (i == mlx4_master_func_num(dev))
642 continue; 642 continue;
643 s_info = &priv->mfunc.master.vf_oper[slave].vport[port].state; 643 s_info = &priv->mfunc.master.vf_oper[i].vport[port].state;
644 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) { 644 if (IFLA_VF_LINK_STATE_AUTO == s_info->link_state) {
645 eqe->event.port_change.port = 645 eqe->event.port_change.port =
646 cpu_to_be32( 646 cpu_to_be32(
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 12fbfcb44d8a..29c2a017a450 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -2273,6 +2273,11 @@ static int mlx4_allocate_default_counters(struct mlx4_dev *dev)
2273 } else if (err == -ENOENT) { 2273 } else if (err == -ENOENT) {
2274 err = 0; 2274 err = 0;
2275 continue; 2275 continue;
2276 } else if (mlx4_is_slave(dev) && err == -EINVAL) {
2277 priv->def_counter[port] = MLX4_SINK_COUNTER_INDEX(dev);
2278 mlx4_warn(dev, "can't allocate counter from old PF driver, using index %d\n",
2279 MLX4_SINK_COUNTER_INDEX(dev));
2280 err = 0;
2276 } else { 2281 } else {
2277 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n", 2282 mlx4_err(dev, "%s: failed to allocate default counter port %d err %d\n",
2278 __func__, port + 1, err); 2283 __func__, port + 1, err);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/main.c b/drivers/net/ethernet/mellanox/mlx5/core/main.c
index afad529838de..06e3e1e54c35 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/main.c
@@ -391,6 +391,8 @@ static int handle_hca_cap(struct mlx5_core_dev *dev)
391 /* disable cmdif checksum */ 391 /* disable cmdif checksum */
392 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0); 392 MLX5_SET(cmd_hca_cap, set_hca_cap, cmdif_checksum, 0);
393 393
394 MLX5_SET(cmd_hca_cap, set_hca_cap, log_uar_page_sz, PAGE_SHIFT - 12);
395
394 err = set_caps(dev, set_ctx, set_sz); 396 err = set_caps(dev, set_ctx, set_sz);
395 397
396query_ex: 398query_ex:
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
index 33669c29b341..753ea8bad953 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
@@ -1415,7 +1415,7 @@ static int qlcnic_83xx_copy_fw_file(struct qlcnic_adapter *adapter)
1415 if (fw->size & 0xF) { 1415 if (fw->size & 0xF) {
1416 addr = dest + size; 1416 addr = dest + size;
1417 for (i = 0; i < (fw->size & 0xF); i++) 1417 for (i = 0; i < (fw->size & 0xF); i++)
1418 data[i] = temp[size + i]; 1418 data[i] = ((u8 *)temp)[size + i];
1419 for (; i < 16; i++) 1419 for (; i < 16; i++)
1420 data[i] = 0; 1420 data[i] = 0;
1421 ret = qlcnic_ms_mem_write128(adapter, addr, 1421 ret = qlcnic_ms_mem_write128(adapter, addr,
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 3df51faf18ae..f790f61ea78a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -4875,10 +4875,12 @@ static void rtl_init_rxcfg(struct rtl8169_private *tp)
4875 case RTL_GIGA_MAC_VER_46: 4875 case RTL_GIGA_MAC_VER_46:
4876 case RTL_GIGA_MAC_VER_47: 4876 case RTL_GIGA_MAC_VER_47:
4877 case RTL_GIGA_MAC_VER_48: 4877 case RTL_GIGA_MAC_VER_48:
4878 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF);
4879 break;
4878 case RTL_GIGA_MAC_VER_49: 4880 case RTL_GIGA_MAC_VER_49:
4879 case RTL_GIGA_MAC_VER_50: 4881 case RTL_GIGA_MAC_VER_50:
4880 case RTL_GIGA_MAC_VER_51: 4882 case RTL_GIGA_MAC_VER_51:
4881 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST | RX_EARLY_OFF); 4883 RTL_W32(RxConfig, RX128_INT_EN | RX_MULTI_EN | RX_DMA_BURST | RX_EARLY_OFF);
4882 break; 4884 break;
4883 default: 4885 default:
4884 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST); 4886 RTL_W32(RxConfig, RX128_INT_EN | RX_DMA_BURST);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index fd9745714d90..78849dd4ef8e 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -228,9 +228,7 @@ static void ravb_ring_format(struct net_device *ndev, int q)
228 struct ravb_desc *desc = NULL; 228 struct ravb_desc *desc = NULL;
229 int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q]; 229 int rx_ring_size = sizeof(*rx_desc) * priv->num_rx_ring[q];
230 int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q]; 230 int tx_ring_size = sizeof(*tx_desc) * priv->num_tx_ring[q];
231 struct sk_buff *skb;
232 dma_addr_t dma_addr; 231 dma_addr_t dma_addr;
233 void *buffer;
234 int i; 232 int i;
235 233
236 priv->cur_rx[q] = 0; 234 priv->cur_rx[q] = 0;
@@ -241,41 +239,28 @@ static void ravb_ring_format(struct net_device *ndev, int q)
241 memset(priv->rx_ring[q], 0, rx_ring_size); 239 memset(priv->rx_ring[q], 0, rx_ring_size);
242 /* Build RX ring buffer */ 240 /* Build RX ring buffer */
243 for (i = 0; i < priv->num_rx_ring[q]; i++) { 241 for (i = 0; i < priv->num_rx_ring[q]; i++) {
244 priv->rx_skb[q][i] = NULL;
245 skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
246 if (!skb)
247 break;
248 ravb_set_buffer_align(skb);
249 /* RX descriptor */ 242 /* RX descriptor */
250 rx_desc = &priv->rx_ring[q][i]; 243 rx_desc = &priv->rx_ring[q][i];
251 /* The size of the buffer should be on 16-byte boundary. */ 244 /* The size of the buffer should be on 16-byte boundary. */
252 rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16)); 245 rx_desc->ds_cc = cpu_to_le16(ALIGN(PKT_BUF_SZ, 16));
253 dma_addr = dma_map_single(&ndev->dev, skb->data, 246 dma_addr = dma_map_single(&ndev->dev, priv->rx_skb[q][i]->data,
254 ALIGN(PKT_BUF_SZ, 16), 247 ALIGN(PKT_BUF_SZ, 16),
255 DMA_FROM_DEVICE); 248 DMA_FROM_DEVICE);
256 if (dma_mapping_error(&ndev->dev, dma_addr)) { 249 /* We just set the data size to 0 for a failed mapping which
257 dev_kfree_skb(skb); 250 * should prevent DMA from happening...
258 break; 251 */
259 } 252 if (dma_mapping_error(&ndev->dev, dma_addr))
260 priv->rx_skb[q][i] = skb; 253 rx_desc->ds_cc = cpu_to_le16(0);
261 rx_desc->dptr = cpu_to_le32(dma_addr); 254 rx_desc->dptr = cpu_to_le32(dma_addr);
262 rx_desc->die_dt = DT_FEMPTY; 255 rx_desc->die_dt = DT_FEMPTY;
263 } 256 }
264 rx_desc = &priv->rx_ring[q][i]; 257 rx_desc = &priv->rx_ring[q][i];
265 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]); 258 rx_desc->dptr = cpu_to_le32((u32)priv->rx_desc_dma[q]);
266 rx_desc->die_dt = DT_LINKFIX; /* type */ 259 rx_desc->die_dt = DT_LINKFIX; /* type */
267 priv->dirty_rx[q] = (u32)(i - priv->num_rx_ring[q]);
268 260
269 memset(priv->tx_ring[q], 0, tx_ring_size); 261 memset(priv->tx_ring[q], 0, tx_ring_size);
270 /* Build TX ring buffer */ 262 /* Build TX ring buffer */
271 for (i = 0; i < priv->num_tx_ring[q]; i++) { 263 for (i = 0; i < priv->num_tx_ring[q]; i++) {
272 priv->tx_skb[q][i] = NULL;
273 priv->tx_buffers[q][i] = NULL;
274 buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
275 if (!buffer)
276 break;
277 /* Aligned TX buffer */
278 priv->tx_buffers[q][i] = buffer;
279 tx_desc = &priv->tx_ring[q][i]; 264 tx_desc = &priv->tx_ring[q][i];
280 tx_desc->die_dt = DT_EEMPTY; 265 tx_desc->die_dt = DT_EEMPTY;
281 } 266 }
@@ -298,7 +283,10 @@ static void ravb_ring_format(struct net_device *ndev, int q)
298static int ravb_ring_init(struct net_device *ndev, int q) 283static int ravb_ring_init(struct net_device *ndev, int q)
299{ 284{
300 struct ravb_private *priv = netdev_priv(ndev); 285 struct ravb_private *priv = netdev_priv(ndev);
286 struct sk_buff *skb;
301 int ring_size; 287 int ring_size;
288 void *buffer;
289 int i;
302 290
303 /* Allocate RX and TX skb rings */ 291 /* Allocate RX and TX skb rings */
304 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q], 292 priv->rx_skb[q] = kcalloc(priv->num_rx_ring[q],
@@ -308,12 +296,28 @@ static int ravb_ring_init(struct net_device *ndev, int q)
308 if (!priv->rx_skb[q] || !priv->tx_skb[q]) 296 if (!priv->rx_skb[q] || !priv->tx_skb[q])
309 goto error; 297 goto error;
310 298
299 for (i = 0; i < priv->num_rx_ring[q]; i++) {
300 skb = netdev_alloc_skb(ndev, PKT_BUF_SZ + RAVB_ALIGN - 1);
301 if (!skb)
302 goto error;
303 ravb_set_buffer_align(skb);
304 priv->rx_skb[q][i] = skb;
305 }
306
311 /* Allocate rings for the aligned buffers */ 307 /* Allocate rings for the aligned buffers */
312 priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q], 308 priv->tx_buffers[q] = kcalloc(priv->num_tx_ring[q],
313 sizeof(*priv->tx_buffers[q]), GFP_KERNEL); 309 sizeof(*priv->tx_buffers[q]), GFP_KERNEL);
314 if (!priv->tx_buffers[q]) 310 if (!priv->tx_buffers[q])
315 goto error; 311 goto error;
316 312
313 for (i = 0; i < priv->num_tx_ring[q]; i++) {
314 buffer = kmalloc(PKT_BUF_SZ + RAVB_ALIGN - 1, GFP_KERNEL);
315 if (!buffer)
316 goto error;
317 /* Aligned TX buffer */
318 priv->tx_buffers[q][i] = buffer;
319 }
320
317 /* Allocate all RX descriptors. */ 321 /* Allocate all RX descriptors. */
318 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1); 322 ring_size = sizeof(struct ravb_ex_rx_desc) * (priv->num_rx_ring[q] + 1);
319 priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size, 323 priv->rx_ring[q] = dma_alloc_coherent(NULL, ring_size,
@@ -524,6 +528,10 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
524 if (--boguscnt < 0) 528 if (--boguscnt < 0)
525 break; 529 break;
526 530
531 /* We use 0-byte descriptors to mark the DMA mapping errors */
532 if (!pkt_len)
533 continue;
534
527 if (desc_status & MSC_MC) 535 if (desc_status & MSC_MC)
528 stats->multicast++; 536 stats->multicast++;
529 537
@@ -543,10 +551,9 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
543 551
544 skb = priv->rx_skb[q][entry]; 552 skb = priv->rx_skb[q][entry];
545 priv->rx_skb[q][entry] = NULL; 553 priv->rx_skb[q][entry] = NULL;
546 dma_sync_single_for_cpu(&ndev->dev, 554 dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
547 le32_to_cpu(desc->dptr), 555 ALIGN(PKT_BUF_SZ, 16),
548 ALIGN(PKT_BUF_SZ, 16), 556 DMA_FROM_DEVICE);
549 DMA_FROM_DEVICE);
550 get_ts &= (q == RAVB_NC) ? 557 get_ts &= (q == RAVB_NC) ?
551 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT : 558 RAVB_RXTSTAMP_TYPE_V2_L2_EVENT :
552 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT; 559 ~RAVB_RXTSTAMP_TYPE_V2_L2_EVENT;
@@ -584,17 +591,15 @@ static bool ravb_rx(struct net_device *ndev, int *quota, int q)
584 if (!skb) 591 if (!skb)
585 break; /* Better luck next round. */ 592 break; /* Better luck next round. */
586 ravb_set_buffer_align(skb); 593 ravb_set_buffer_align(skb);
587 dma_unmap_single(&ndev->dev, le32_to_cpu(desc->dptr),
588 ALIGN(PKT_BUF_SZ, 16),
589 DMA_FROM_DEVICE);
590 dma_addr = dma_map_single(&ndev->dev, skb->data, 594 dma_addr = dma_map_single(&ndev->dev, skb->data,
591 le16_to_cpu(desc->ds_cc), 595 le16_to_cpu(desc->ds_cc),
592 DMA_FROM_DEVICE); 596 DMA_FROM_DEVICE);
593 skb_checksum_none_assert(skb); 597 skb_checksum_none_assert(skb);
594 if (dma_mapping_error(&ndev->dev, dma_addr)) { 598 /* We just set the data size to 0 for a failed mapping
595 dev_kfree_skb_any(skb); 599 * which should prevent DMA from happening...
596 break; 600 */
597 } 601 if (dma_mapping_error(&ndev->dev, dma_addr))
602 desc->ds_cc = cpu_to_le16(0);
598 desc->dptr = cpu_to_le32(dma_addr); 603 desc->dptr = cpu_to_le32(dma_addr);
599 priv->rx_skb[q][entry] = skb; 604 priv->rx_skb[q][entry] = skb;
600 } 605 }
@@ -1279,7 +1284,6 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1279 u32 dma_addr; 1284 u32 dma_addr;
1280 void *buffer; 1285 void *buffer;
1281 u32 entry; 1286 u32 entry;
1282 u32 tccr;
1283 1287
1284 spin_lock_irqsave(&priv->lock, flags); 1288 spin_lock_irqsave(&priv->lock, flags);
1285 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) { 1289 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q]) {
@@ -1328,9 +1332,7 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
1328 dma_wmb(); 1332 dma_wmb();
1329 desc->die_dt = DT_FSINGLE; 1333 desc->die_dt = DT_FSINGLE;
1330 1334
1331 tccr = ravb_read(ndev, TCCR); 1335 ravb_write(ndev, ravb_read(ndev, TCCR) | (TCCR_TSRQ0 << q), TCCR);
1332 if (!(tccr & (TCCR_TSRQ0 << q)))
1333 ravb_write(ndev, tccr | (TCCR_TSRQ0 << q), TCCR);
1334 1336
1335 priv->cur_tx[q]++; 1337 priv->cur_tx[q]++;
1336 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] && 1338 if (priv->cur_tx[q] - priv->dirty_tx[q] >= priv->num_tx_ring[q] &&
diff --git a/drivers/net/ethernet/rocker/rocker.c b/drivers/net/ethernet/rocker/rocker.c
index 2d8578cade03..2e7f9a2834be 100644
--- a/drivers/net/ethernet/rocker/rocker.c
+++ b/drivers/net/ethernet/rocker/rocker.c
@@ -4821,6 +4821,7 @@ static void rocker_remove_ports(const struct rocker *rocker)
4821 rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE, 4821 rocker_port_ig_tbl(rocker_port, SWITCHDEV_TRANS_NONE,
4822 ROCKER_OP_FLAG_REMOVE); 4822 ROCKER_OP_FLAG_REMOVE);
4823 unregister_netdev(rocker_port->dev); 4823 unregister_netdev(rocker_port->dev);
4824 free_netdev(rocker_port->dev);
4824 } 4825 }
4825 kfree(rocker->ports); 4826 kfree(rocker->ports);
4826} 4827}
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 7e3129e7f143..f0e4bb4e3ec5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -42,7 +42,7 @@
42#define NSS_COMMON_CLK_DIV_MASK 0x7f 42#define NSS_COMMON_CLK_DIV_MASK 0x7f
43 43
44#define NSS_COMMON_CLK_SRC_CTRL 0x14 44#define NSS_COMMON_CLK_SRC_CTRL 0x14
45#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (1 << x) 45#define NSS_COMMON_CLK_SRC_CTRL_OFFSET(x) (x)
46/* Mode is coded on 1 bit but is different depending on the MAC ID: 46/* Mode is coded on 1 bit but is different depending on the MAC ID:
47 * MAC0: QSGMII=0 RGMII=1 47 * MAC0: QSGMII=0 RGMII=1
48 * MAC1: QSGMII=0 SGMII=0 RGMII=1 48 * MAC1: QSGMII=0 SGMII=0 RGMII=1
@@ -291,7 +291,7 @@ static void *ipq806x_gmac_setup(struct platform_device *pdev)
291 291
292 /* Configure the clock src according to the mode */ 292 /* Configure the clock src according to the mode */
293 regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val); 293 regmap_read(gmac->nss_common, NSS_COMMON_CLK_SRC_CTRL, &val);
294 val &= ~NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id); 294 val &= ~(1 << NSS_COMMON_CLK_SRC_CTRL_OFFSET(gmac->id));
295 switch (gmac->phy_mode) { 295 switch (gmac->phy_mode) {
296 case PHY_INTERFACE_MODE_RGMII: 296 case PHY_INTERFACE_MODE_RGMII:
297 val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) << 297 val |= NSS_COMMON_CLK_SRC_CTRL_RGMII(gmac->id) <<
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 50f7a7a26821..864b476f7fd5 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2843,7 +2843,7 @@ int stmmac_dvr_probe(struct device *device,
2843 if (res->mac) 2843 if (res->mac)
2844 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN); 2844 memcpy(priv->dev->dev_addr, res->mac, ETH_ALEN);
2845 2845
2846 dev_set_drvdata(device, priv); 2846 dev_set_drvdata(device, priv->dev);
2847 2847
2848 /* Verify driver arguments */ 2848 /* Verify driver arguments */
2849 stmmac_verify_args(); 2849 stmmac_verify_args();
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index f3918c7e7eeb..bcdc8955c719 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -413,3 +413,7 @@ static int stmmac_pltfr_resume(struct device *dev)
413SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend, 413SIMPLE_DEV_PM_OPS(stmmac_pltfr_pm_ops, stmmac_pltfr_suspend,
414 stmmac_pltfr_resume); 414 stmmac_pltfr_resume);
415EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops); 415EXPORT_SYMBOL_GPL(stmmac_pltfr_pm_ops);
416
417MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet platform support");
418MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
419MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 0c5842aeb807..ab6051a43134 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -6658,10 +6658,8 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
6658 struct sk_buff *skb_new; 6658 struct sk_buff *skb_new;
6659 6659
6660 skb_new = skb_realloc_headroom(skb, len); 6660 skb_new = skb_realloc_headroom(skb, len);
6661 if (!skb_new) { 6661 if (!skb_new)
6662 rp->tx_errors++;
6663 goto out_drop; 6662 goto out_drop;
6664 }
6665 kfree_skb(skb); 6663 kfree_skb(skb);
6666 skb = skb_new; 6664 skb = skb_new;
6667 } else 6665 } else
diff --git a/drivers/net/ethernet/ti/cpsw.c b/drivers/net/ethernet/ti/cpsw.c
index f335bf119ab5..d155bf2573cd 100644
--- a/drivers/net/ethernet/ti/cpsw.c
+++ b/drivers/net/ethernet/ti/cpsw.c
@@ -793,9 +793,7 @@ static irqreturn_t cpsw_rx_interrupt(int irq, void *dev_id)
793static int cpsw_poll(struct napi_struct *napi, int budget) 793static int cpsw_poll(struct napi_struct *napi, int budget)
794{ 794{
795 struct cpsw_priv *priv = napi_to_priv(napi); 795 struct cpsw_priv *priv = napi_to_priv(napi);
796 int num_tx, num_rx; 796 int num_rx;
797
798 num_tx = cpdma_chan_process(priv->txch, 128);
799 797
800 num_rx = cpdma_chan_process(priv->rxch, budget); 798 num_rx = cpdma_chan_process(priv->rxch, budget);
801 if (num_rx < budget) { 799 if (num_rx < budget) {
@@ -810,9 +808,8 @@ static int cpsw_poll(struct napi_struct *napi, int budget)
810 } 808 }
811 } 809 }
812 810
813 if (num_rx || num_tx) 811 if (num_rx)
814 cpsw_dbg(priv, intr, "poll %d rx, %d tx pkts\n", 812 cpsw_dbg(priv, intr, "poll %d rx pkts\n", num_rx);
815 num_rx, num_tx);
816 813
817 return num_rx; 814 return num_rx;
818} 815}
diff --git a/drivers/net/ethernet/ti/netcp.h b/drivers/net/ethernet/ti/netcp.h
index bbacf5cccec2..bb1bb72121c0 100644
--- a/drivers/net/ethernet/ti/netcp.h
+++ b/drivers/net/ethernet/ti/netcp.h
@@ -85,7 +85,6 @@ struct netcp_intf {
85 struct list_head rxhook_list_head; 85 struct list_head rxhook_list_head;
86 unsigned int rx_queue_id; 86 unsigned int rx_queue_id;
87 void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN]; 87 void *rx_fdq[KNAV_DMA_FDQ_PER_CHAN];
88 u32 rx_buffer_sizes[KNAV_DMA_FDQ_PER_CHAN];
89 struct napi_struct rx_napi; 88 struct napi_struct rx_napi;
90 struct napi_struct tx_napi; 89 struct napi_struct tx_napi;
91 90
@@ -223,6 +222,7 @@ void *netcp_device_find_module(struct netcp_device *netcp_device,
223 222
224/* SGMII functions */ 223/* SGMII functions */
225int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port); 224int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port);
225bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set);
226int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port); 226int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port);
227int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface); 227int netcp_sgmii_config(void __iomem *sgmii_ofs, int port, u32 interface);
228 228
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index 5ec4ed3f6c8d..4755838c6137 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -34,6 +34,7 @@
34#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD) 34#define NETCP_SOP_OFFSET (NET_IP_ALIGN + NET_SKB_PAD)
35#define NETCP_NAPI_WEIGHT 64 35#define NETCP_NAPI_WEIGHT 64
36#define NETCP_TX_TIMEOUT (5 * HZ) 36#define NETCP_TX_TIMEOUT (5 * HZ)
37#define NETCP_PACKET_SIZE (ETH_FRAME_LEN + ETH_FCS_LEN)
37#define NETCP_MIN_PACKET_SIZE ETH_ZLEN 38#define NETCP_MIN_PACKET_SIZE ETH_ZLEN
38#define NETCP_MAX_MCAST_ADDR 16 39#define NETCP_MAX_MCAST_ADDR 16
39 40
@@ -804,30 +805,28 @@ static void netcp_allocate_rx_buf(struct netcp_intf *netcp, int fdq)
804 if (likely(fdq == 0)) { 805 if (likely(fdq == 0)) {
805 unsigned int primary_buf_len; 806 unsigned int primary_buf_len;
806 /* Allocate a primary receive queue entry */ 807 /* Allocate a primary receive queue entry */
807 buf_len = netcp->rx_buffer_sizes[0] + NETCP_SOP_OFFSET; 808 buf_len = NETCP_PACKET_SIZE + NETCP_SOP_OFFSET;
808 primary_buf_len = SKB_DATA_ALIGN(buf_len) + 809 primary_buf_len = SKB_DATA_ALIGN(buf_len) +
809 SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); 810 SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
810 811
811 if (primary_buf_len <= PAGE_SIZE) { 812 bufptr = netdev_alloc_frag(primary_buf_len);
812 bufptr = netdev_alloc_frag(primary_buf_len); 813 pad[1] = primary_buf_len;
813 pad[1] = primary_buf_len;
814 } else {
815 bufptr = kmalloc(primary_buf_len, GFP_ATOMIC |
816 GFP_DMA32 | __GFP_COLD);
817 pad[1] = 0;
818 }
819 814
820 if (unlikely(!bufptr)) { 815 if (unlikely(!bufptr)) {
821 dev_warn_ratelimited(netcp->ndev_dev, "Primary RX buffer alloc failed\n"); 816 dev_warn_ratelimited(netcp->ndev_dev,
817 "Primary RX buffer alloc failed\n");
822 goto fail; 818 goto fail;
823 } 819 }
824 dma = dma_map_single(netcp->dev, bufptr, buf_len, 820 dma = dma_map_single(netcp->dev, bufptr, buf_len,
825 DMA_TO_DEVICE); 821 DMA_TO_DEVICE);
822 if (unlikely(dma_mapping_error(netcp->dev, dma)))
823 goto fail;
824
826 pad[0] = (u32)bufptr; 825 pad[0] = (u32)bufptr;
827 826
828 } else { 827 } else {
829 /* Allocate a secondary receive queue entry */ 828 /* Allocate a secondary receive queue entry */
830 page = alloc_page(GFP_ATOMIC | GFP_DMA32 | __GFP_COLD); 829 page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
831 if (unlikely(!page)) { 830 if (unlikely(!page)) {
832 dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n"); 831 dev_warn_ratelimited(netcp->ndev_dev, "Secondary page alloc failed\n");
833 goto fail; 832 goto fail;
@@ -1010,7 +1009,7 @@ netcp_tx_map_skb(struct sk_buff *skb, struct netcp_intf *netcp)
1010 1009
1011 /* Map the linear buffer */ 1010 /* Map the linear buffer */
1012 dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE); 1011 dma_addr = dma_map_single(dev, skb->data, pkt_len, DMA_TO_DEVICE);
1013 if (unlikely(!dma_addr)) { 1012 if (unlikely(dma_mapping_error(dev, dma_addr))) {
1014 dev_err(netcp->ndev_dev, "Failed to map skb buffer\n"); 1013 dev_err(netcp->ndev_dev, "Failed to map skb buffer\n");
1015 return NULL; 1014 return NULL;
1016 } 1015 }
@@ -1546,8 +1545,8 @@ static int netcp_setup_navigator_resources(struct net_device *ndev)
1546 knav_queue_disable_notify(netcp->rx_queue); 1545 knav_queue_disable_notify(netcp->rx_queue);
1547 1546
1548 /* open Rx FDQs */ 1547 /* open Rx FDQs */
1549 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && 1548 for (i = 0; i < KNAV_DMA_FDQ_PER_CHAN && netcp->rx_queue_depths[i];
1550 netcp->rx_queue_depths[i] && netcp->rx_buffer_sizes[i]; ++i) { 1549 ++i) {
1551 snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i); 1550 snprintf(name, sizeof(name), "rx-fdq-%s-%d", ndev->name, i);
1552 netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0); 1551 netcp->rx_fdq[i] = knav_queue_open(name, KNAV_QUEUE_GP, 0);
1553 if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) { 1552 if (IS_ERR_OR_NULL(netcp->rx_fdq[i])) {
@@ -1617,11 +1616,11 @@ static int netcp_ndo_open(struct net_device *ndev)
1617 } 1616 }
1618 mutex_unlock(&netcp_modules_lock); 1617 mutex_unlock(&netcp_modules_lock);
1619 1618
1620 netcp_rxpool_refill(netcp);
1621 napi_enable(&netcp->rx_napi); 1619 napi_enable(&netcp->rx_napi);
1622 napi_enable(&netcp->tx_napi); 1620 napi_enable(&netcp->tx_napi);
1623 knav_queue_enable_notify(netcp->tx_compl_q); 1621 knav_queue_enable_notify(netcp->tx_compl_q);
1624 knav_queue_enable_notify(netcp->rx_queue); 1622 knav_queue_enable_notify(netcp->rx_queue);
1623 netcp_rxpool_refill(netcp);
1625 netif_tx_wake_all_queues(ndev); 1624 netif_tx_wake_all_queues(ndev);
1626 dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name); 1625 dev_dbg(netcp->ndev_dev, "netcp device %s opened\n", ndev->name);
1627 return 0; 1626 return 0;
@@ -1941,14 +1940,6 @@ static int netcp_create_interface(struct netcp_device *netcp_device,
1941 netcp->rx_queue_depths[0] = 128; 1940 netcp->rx_queue_depths[0] = 128;
1942 } 1941 }
1943 1942
1944 ret = of_property_read_u32_array(node_interface, "rx-buffer-size",
1945 netcp->rx_buffer_sizes,
1946 KNAV_DMA_FDQ_PER_CHAN);
1947 if (ret) {
1948 dev_err(dev, "missing \"rx-buffer-size\" parameter\n");
1949 netcp->rx_buffer_sizes[0] = 1536;
1950 }
1951
1952 ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2); 1943 ret = of_property_read_u32_array(node_interface, "rx-pool", temp, 2);
1953 if (ret < 0) { 1944 if (ret < 0) {
1954 dev_err(dev, "missing \"rx-pool\" parameter\n"); 1945 dev_err(dev, "missing \"rx-pool\" parameter\n");
@@ -2112,6 +2103,7 @@ probe_quit:
2112static int netcp_remove(struct platform_device *pdev) 2103static int netcp_remove(struct platform_device *pdev)
2113{ 2104{
2114 struct netcp_device *netcp_device = platform_get_drvdata(pdev); 2105 struct netcp_device *netcp_device = platform_get_drvdata(pdev);
2106 struct netcp_intf *netcp_intf, *netcp_tmp;
2115 struct netcp_inst_modpriv *inst_modpriv, *tmp; 2107 struct netcp_inst_modpriv *inst_modpriv, *tmp;
2116 struct netcp_module *module; 2108 struct netcp_module *module;
2117 2109
@@ -2123,10 +2115,17 @@ static int netcp_remove(struct platform_device *pdev)
2123 list_del(&inst_modpriv->inst_list); 2115 list_del(&inst_modpriv->inst_list);
2124 kfree(inst_modpriv); 2116 kfree(inst_modpriv);
2125 } 2117 }
2126 WARN(!list_empty(&netcp_device->interface_head), "%s interface list not empty!\n",
2127 pdev->name);
2128 2118
2129 devm_kfree(&pdev->dev, netcp_device); 2119 /* now that all modules are removed, clean up the interfaces */
2120 list_for_each_entry_safe(netcp_intf, netcp_tmp,
2121 &netcp_device->interface_head,
2122 interface_list) {
2123 netcp_delete_interface(netcp_device, netcp_intf->ndev);
2124 }
2125
2126 WARN(!list_empty(&netcp_device->interface_head),
2127 "%s interface list not empty!\n", pdev->name);
2128
2130 pm_runtime_put_sync(&pdev->dev); 2129 pm_runtime_put_sync(&pdev->dev);
2131 pm_runtime_disable(&pdev->dev); 2130 pm_runtime_disable(&pdev->dev);
2132 platform_set_drvdata(pdev, NULL); 2131 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/ti/netcp_ethss.c b/drivers/net/ethernet/ti/netcp_ethss.c
index 9b7e0a34c98b..1974a8ae764a 100644
--- a/drivers/net/ethernet/ti/netcp_ethss.c
+++ b/drivers/net/ethernet/ti/netcp_ethss.c
@@ -1901,11 +1901,28 @@ static void gbe_port_config(struct gbe_priv *gbe_dev, struct gbe_slave *slave,
1901 writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control)); 1901 writel(slave->mac_control, GBE_REG_ADDR(slave, emac_regs, mac_control));
1902} 1902}
1903 1903
1904static void gbe_sgmii_rtreset(struct gbe_priv *priv,
1905 struct gbe_slave *slave, bool set)
1906{
1907 void __iomem *sgmii_port_regs;
1908
1909 if (SLAVE_LINK_IS_XGMII(slave))
1910 return;
1911
1912 if ((priv->ss_version == GBE_SS_VERSION_14) && (slave->slave_num >= 2))
1913 sgmii_port_regs = priv->sgmii_port34_regs;
1914 else
1915 sgmii_port_regs = priv->sgmii_port_regs;
1916
1917 netcp_sgmii_rtreset(sgmii_port_regs, slave->slave_num, set);
1918}
1919
1904static void gbe_slave_stop(struct gbe_intf *intf) 1920static void gbe_slave_stop(struct gbe_intf *intf)
1905{ 1921{
1906 struct gbe_priv *gbe_dev = intf->gbe_dev; 1922 struct gbe_priv *gbe_dev = intf->gbe_dev;
1907 struct gbe_slave *slave = intf->slave; 1923 struct gbe_slave *slave = intf->slave;
1908 1924
1925 gbe_sgmii_rtreset(gbe_dev, slave, true);
1909 gbe_port_reset(slave); 1926 gbe_port_reset(slave);
1910 /* Disable forwarding */ 1927 /* Disable forwarding */
1911 cpsw_ale_control_set(gbe_dev->ale, slave->port_num, 1928 cpsw_ale_control_set(gbe_dev->ale, slave->port_num,
@@ -1947,6 +1964,7 @@ static int gbe_slave_open(struct gbe_intf *gbe_intf)
1947 1964
1948 gbe_sgmii_config(priv, slave); 1965 gbe_sgmii_config(priv, slave);
1949 gbe_port_reset(slave); 1966 gbe_port_reset(slave);
1967 gbe_sgmii_rtreset(priv, slave, false);
1950 gbe_port_config(priv, slave, priv->rx_packet_max); 1968 gbe_port_config(priv, slave, priv->rx_packet_max);
1951 gbe_set_slave_mac(slave, gbe_intf); 1969 gbe_set_slave_mac(slave, gbe_intf);
1952 /* enable forwarding */ 1970 /* enable forwarding */
@@ -2490,10 +2508,9 @@ static void free_secondary_ports(struct gbe_priv *gbe_dev)
2490{ 2508{
2491 struct gbe_slave *slave; 2509 struct gbe_slave *slave;
2492 2510
2493 for (;;) { 2511 while (!list_empty(&gbe_dev->secondary_slaves)) {
2494 slave = first_sec_slave(gbe_dev); 2512 slave = first_sec_slave(gbe_dev);
2495 if (!slave) 2513
2496 break;
2497 if (slave->phy) 2514 if (slave->phy)
2498 phy_disconnect(slave->phy); 2515 phy_disconnect(slave->phy);
2499 list_del(&slave->slave_list); 2516 list_del(&slave->slave_list);
@@ -2839,14 +2856,13 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2839 &gbe_dev->dma_chan_name); 2856 &gbe_dev->dma_chan_name);
2840 if (ret < 0) { 2857 if (ret < 0) {
2841 dev_err(dev, "missing \"tx-channel\" parameter\n"); 2858 dev_err(dev, "missing \"tx-channel\" parameter\n");
2842 ret = -ENODEV; 2859 return -EINVAL;
2843 goto quit;
2844 } 2860 }
2845 2861
2846 if (!strcmp(node->name, "gbe")) { 2862 if (!strcmp(node->name, "gbe")) {
2847 ret = get_gbe_resource_version(gbe_dev, node); 2863 ret = get_gbe_resource_version(gbe_dev, node);
2848 if (ret) 2864 if (ret)
2849 goto quit; 2865 return ret;
2850 2866
2851 dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version); 2867 dev_dbg(dev, "ss_version: 0x%08x\n", gbe_dev->ss_version);
2852 2868
@@ -2857,22 +2873,20 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2857 else 2873 else
2858 ret = -ENODEV; 2874 ret = -ENODEV;
2859 2875
2860 if (ret)
2861 goto quit;
2862 } else if (!strcmp(node->name, "xgbe")) { 2876 } else if (!strcmp(node->name, "xgbe")) {
2863 ret = set_xgbe_ethss10_priv(gbe_dev, node); 2877 ret = set_xgbe_ethss10_priv(gbe_dev, node);
2864 if (ret) 2878 if (ret)
2865 goto quit; 2879 return ret;
2866 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs, 2880 ret = netcp_xgbe_serdes_init(gbe_dev->xgbe_serdes_regs,
2867 gbe_dev->ss_regs); 2881 gbe_dev->ss_regs);
2868 if (ret)
2869 goto quit;
2870 } else { 2882 } else {
2871 dev_err(dev, "unknown GBE node(%s)\n", node->name); 2883 dev_err(dev, "unknown GBE node(%s)\n", node->name);
2872 ret = -ENODEV; 2884 ret = -ENODEV;
2873 goto quit;
2874 } 2885 }
2875 2886
2887 if (ret)
2888 return ret;
2889
2876 interfaces = of_get_child_by_name(node, "interfaces"); 2890 interfaces = of_get_child_by_name(node, "interfaces");
2877 if (!interfaces) 2891 if (!interfaces)
2878 dev_err(dev, "could not find interfaces\n"); 2892 dev_err(dev, "could not find interfaces\n");
@@ -2880,11 +2894,11 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2880 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device, 2894 ret = netcp_txpipe_init(&gbe_dev->tx_pipe, netcp_device,
2881 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id); 2895 gbe_dev->dma_chan_name, gbe_dev->tx_queue_id);
2882 if (ret) 2896 if (ret)
2883 goto quit; 2897 return ret;
2884 2898
2885 ret = netcp_txpipe_open(&gbe_dev->tx_pipe); 2899 ret = netcp_txpipe_open(&gbe_dev->tx_pipe);
2886 if (ret) 2900 if (ret)
2887 goto quit; 2901 return ret;
2888 2902
2889 /* Create network interfaces */ 2903 /* Create network interfaces */
2890 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head); 2904 INIT_LIST_HEAD(&gbe_dev->gbe_intf_head);
@@ -2899,6 +2913,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2899 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves) 2913 if (gbe_dev->num_slaves >= gbe_dev->max_num_slaves)
2900 break; 2914 break;
2901 } 2915 }
2916 of_node_put(interfaces);
2902 2917
2903 if (!gbe_dev->num_slaves) 2918 if (!gbe_dev->num_slaves)
2904 dev_warn(dev, "No network interface configured\n"); 2919 dev_warn(dev, "No network interface configured\n");
@@ -2911,9 +2926,10 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2911 of_node_put(secondary_ports); 2926 of_node_put(secondary_ports);
2912 2927
2913 if (!gbe_dev->num_slaves) { 2928 if (!gbe_dev->num_slaves) {
2914 dev_err(dev, "No network interface or secondary ports configured\n"); 2929 dev_err(dev,
2930 "No network interface or secondary ports configured\n");
2915 ret = -ENODEV; 2931 ret = -ENODEV;
2916 goto quit; 2932 goto free_sec_ports;
2917 } 2933 }
2918 2934
2919 memset(&ale_params, 0, sizeof(ale_params)); 2935 memset(&ale_params, 0, sizeof(ale_params));
@@ -2927,7 +2943,7 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2927 if (!gbe_dev->ale) { 2943 if (!gbe_dev->ale) {
2928 dev_err(gbe_dev->dev, "error initializing ale engine\n"); 2944 dev_err(gbe_dev->dev, "error initializing ale engine\n");
2929 ret = -ENODEV; 2945 ret = -ENODEV;
2930 goto quit; 2946 goto free_sec_ports;
2931 } else { 2947 } else {
2932 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n"); 2948 dev_dbg(gbe_dev->dev, "Created a gbe ale engine\n");
2933 } 2949 }
@@ -2943,14 +2959,8 @@ static int gbe_probe(struct netcp_device *netcp_device, struct device *dev,
2943 *inst_priv = gbe_dev; 2959 *inst_priv = gbe_dev;
2944 return 0; 2960 return 0;
2945 2961
2946quit: 2962free_sec_ports:
2947 if (gbe_dev->hw_stats) 2963 free_secondary_ports(gbe_dev);
2948 devm_kfree(dev, gbe_dev->hw_stats);
2949 cpsw_ale_destroy(gbe_dev->ale);
2950 if (gbe_dev->ss_regs)
2951 devm_iounmap(dev, gbe_dev->ss_regs);
2952 of_node_put(interfaces);
2953 devm_kfree(dev, gbe_dev);
2954 return ret; 2964 return ret;
2955} 2965}
2956 2966
@@ -3023,12 +3033,9 @@ static int gbe_remove(struct netcp_device *netcp_device, void *inst_priv)
3023 free_secondary_ports(gbe_dev); 3033 free_secondary_ports(gbe_dev);
3024 3034
3025 if (!list_empty(&gbe_dev->gbe_intf_head)) 3035 if (!list_empty(&gbe_dev->gbe_intf_head))
3026 dev_alert(gbe_dev->dev, "unreleased ethss interfaces present\n"); 3036 dev_alert(gbe_dev->dev,
3037 "unreleased ethss interfaces present\n");
3027 3038
3028 devm_kfree(gbe_dev->dev, gbe_dev->hw_stats);
3029 devm_iounmap(gbe_dev->dev, gbe_dev->ss_regs);
3030 memset(gbe_dev, 0x00, sizeof(*gbe_dev));
3031 devm_kfree(gbe_dev->dev, gbe_dev);
3032 return 0; 3039 return 0;
3033} 3040}
3034 3041
diff --git a/drivers/net/ethernet/ti/netcp_sgmii.c b/drivers/net/ethernet/ti/netcp_sgmii.c
index dbeb14266e2f..5d8419f658d0 100644
--- a/drivers/net/ethernet/ti/netcp_sgmii.c
+++ b/drivers/net/ethernet/ti/netcp_sgmii.c
@@ -18,6 +18,9 @@
18 18
19#include "netcp.h" 19#include "netcp.h"
20 20
21#define SGMII_SRESET_RESET BIT(0)
22#define SGMII_SRESET_RTRESET BIT(1)
23
21#define SGMII_REG_STATUS_LOCK BIT(4) 24#define SGMII_REG_STATUS_LOCK BIT(4)
22#define SGMII_REG_STATUS_LINK BIT(0) 25#define SGMII_REG_STATUS_LINK BIT(0)
23#define SGMII_REG_STATUS_AUTONEG BIT(2) 26#define SGMII_REG_STATUS_AUTONEG BIT(2)
@@ -51,12 +54,35 @@ static void sgmii_write_reg_bit(void __iomem *base, int reg, u32 val)
51int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port) 54int netcp_sgmii_reset(void __iomem *sgmii_ofs, int port)
52{ 55{
53 /* Soft reset */ 56 /* Soft reset */
54 sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port), 0x1); 57 sgmii_write_reg_bit(sgmii_ofs, SGMII_SRESET_REG(port),
55 while (sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) != 0x0) 58 SGMII_SRESET_RESET);
59
60 while ((sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port)) &
61 SGMII_SRESET_RESET) != 0x0)
56 ; 62 ;
63
57 return 0; 64 return 0;
58} 65}
59 66
67/* port is 0 based */
68bool netcp_sgmii_rtreset(void __iomem *sgmii_ofs, int port, bool set)
69{
70 u32 reg;
71 bool oldval;
72
73 /* Initiate a soft reset */
74 reg = sgmii_read_reg(sgmii_ofs, SGMII_SRESET_REG(port));
75 oldval = (reg & SGMII_SRESET_RTRESET) != 0x0;
76 if (set)
77 reg |= SGMII_SRESET_RTRESET;
78 else
79 reg &= ~SGMII_SRESET_RTRESET;
80 sgmii_write_reg(sgmii_ofs, SGMII_SRESET_REG(port), reg);
81 wmb();
82
83 return oldval;
84}
85
60int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port) 86int netcp_sgmii_get_port_link(void __iomem *sgmii_ofs, int port)
61{ 87{
62 u32 status = 0, link = 0; 88 u32 status = 0, link = 0;
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index 2ffbf13471d0..216bfd350169 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -728,11 +728,12 @@ static int mkiss_open(struct tty_struct *tty)
728 dev->type = ARPHRD_AX25; 728 dev->type = ARPHRD_AX25;
729 729
730 /* Perform the low-level AX25 initialization. */ 730 /* Perform the low-level AX25 initialization. */
731 if ((err = ax_open(ax->dev))) { 731 err = ax_open(ax->dev);
732 if (err)
732 goto out_free_netdev; 733 goto out_free_netdev;
733 }
734 734
735 if (register_netdev(dev)) 735 err = register_netdev(dev);
736 if (err)
736 goto out_free_buffers; 737 goto out_free_buffers;
737 738
738 /* after register_netdev() - because else printk smashes the kernel */ 739 /* after register_netdev() - because else printk smashes the kernel */
diff --git a/drivers/net/ipvlan/ipvlan.h b/drivers/net/ipvlan/ipvlan.h
index 953a97492fab..9542b7bac61a 100644
--- a/drivers/net/ipvlan/ipvlan.h
+++ b/drivers/net/ipvlan/ipvlan.h
@@ -67,8 +67,6 @@ struct ipvl_dev {
67 struct ipvl_port *port; 67 struct ipvl_port *port;
68 struct net_device *phy_dev; 68 struct net_device *phy_dev;
69 struct list_head addrs; 69 struct list_head addrs;
70 int ipv4cnt;
71 int ipv6cnt;
72 struct ipvl_pcpu_stats __percpu *pcpu_stats; 70 struct ipvl_pcpu_stats __percpu *pcpu_stats;
73 DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE); 71 DECLARE_BITMAP(mac_filters, IPVLAN_MAC_FILTER_SIZE);
74 netdev_features_t sfeatures; 72 netdev_features_t sfeatures;
@@ -106,6 +104,11 @@ static inline struct ipvl_port *ipvlan_port_get_rcu(const struct net_device *d)
106 return rcu_dereference(d->rx_handler_data); 104 return rcu_dereference(d->rx_handler_data);
107} 105}
108 106
107static inline struct ipvl_port *ipvlan_port_get_rcu_bh(const struct net_device *d)
108{
109 return rcu_dereference_bh(d->rx_handler_data);
110}
111
109static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d) 112static inline struct ipvl_port *ipvlan_port_get_rtnl(const struct net_device *d)
110{ 113{
111 return rtnl_dereference(d->rx_handler_data); 114 return rtnl_dereference(d->rx_handler_data);
@@ -124,5 +127,5 @@ struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
124bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6); 127bool ipvlan_addr_busy(struct ipvl_port *port, void *iaddr, bool is_v6);
125struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port, 128struct ipvl_addr *ipvlan_ht_addr_lookup(const struct ipvl_port *port,
126 const void *iaddr, bool is_v6); 129 const void *iaddr, bool is_v6);
127void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync); 130void ipvlan_ht_addr_del(struct ipvl_addr *addr);
128#endif /* __IPVLAN_H */ 131#endif /* __IPVLAN_H */
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index 8afbedad620d..207f62e8de9a 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -85,11 +85,9 @@ void ipvlan_ht_addr_add(struct ipvl_dev *ipvlan, struct ipvl_addr *addr)
85 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]); 85 hlist_add_head_rcu(&addr->hlnode, &port->hlhead[hash]);
86} 86}
87 87
88void ipvlan_ht_addr_del(struct ipvl_addr *addr, bool sync) 88void ipvlan_ht_addr_del(struct ipvl_addr *addr)
89{ 89{
90 hlist_del_init_rcu(&addr->hlnode); 90 hlist_del_init_rcu(&addr->hlnode);
91 if (sync)
92 synchronize_rcu();
93} 91}
94 92
95struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan, 93struct ipvl_addr *ipvlan_find_addr(const struct ipvl_dev *ipvlan,
@@ -531,7 +529,7 @@ static int ipvlan_xmit_mode_l2(struct sk_buff *skb, struct net_device *dev)
531int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) 529int ipvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev)
532{ 530{
533 struct ipvl_dev *ipvlan = netdev_priv(dev); 531 struct ipvl_dev *ipvlan = netdev_priv(dev);
534 struct ipvl_port *port = ipvlan_port_get_rcu(ipvlan->phy_dev); 532 struct ipvl_port *port = ipvlan_port_get_rcu_bh(ipvlan->phy_dev);
535 533
536 if (!port) 534 if (!port)
537 goto out; 535 goto out;
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 1acc283160d9..20b58bdecf75 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -153,10 +153,9 @@ static int ipvlan_open(struct net_device *dev)
153 else 153 else
154 dev->flags &= ~IFF_NOARP; 154 dev->flags &= ~IFF_NOARP;
155 155
156 if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 156 list_for_each_entry(addr, &ipvlan->addrs, anode)
157 list_for_each_entry(addr, &ipvlan->addrs, anode) 157 ipvlan_ht_addr_add(ipvlan, addr);
158 ipvlan_ht_addr_add(ipvlan, addr); 158
159 }
160 return dev_uc_add(phy_dev, phy_dev->dev_addr); 159 return dev_uc_add(phy_dev, phy_dev->dev_addr);
161} 160}
162 161
@@ -171,10 +170,9 @@ static int ipvlan_stop(struct net_device *dev)
171 170
172 dev_uc_del(phy_dev, phy_dev->dev_addr); 171 dev_uc_del(phy_dev, phy_dev->dev_addr);
173 172
174 if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 173 list_for_each_entry(addr, &ipvlan->addrs, anode)
175 list_for_each_entry(addr, &ipvlan->addrs, anode) 174 ipvlan_ht_addr_del(addr);
176 ipvlan_ht_addr_del(addr, !dev->dismantle); 175
177 }
178 return 0; 176 return 0;
179} 177}
180 178
@@ -471,8 +469,6 @@ static int ipvlan_link_new(struct net *src_net, struct net_device *dev,
471 ipvlan->port = port; 469 ipvlan->port = port;
472 ipvlan->sfeatures = IPVLAN_FEATURES; 470 ipvlan->sfeatures = IPVLAN_FEATURES;
473 INIT_LIST_HEAD(&ipvlan->addrs); 471 INIT_LIST_HEAD(&ipvlan->addrs);
474 ipvlan->ipv4cnt = 0;
475 ipvlan->ipv6cnt = 0;
476 472
477 /* TODO Probably put random address here to be presented to the 473 /* TODO Probably put random address here to be presented to the
478 * world but keep using the physical-dev address for the outgoing 474 * world but keep using the physical-dev address for the outgoing
@@ -508,12 +504,12 @@ static void ipvlan_link_delete(struct net_device *dev, struct list_head *head)
508 struct ipvl_dev *ipvlan = netdev_priv(dev); 504 struct ipvl_dev *ipvlan = netdev_priv(dev);
509 struct ipvl_addr *addr, *next; 505 struct ipvl_addr *addr, *next;
510 506
511 if (ipvlan->ipv6cnt > 0 || ipvlan->ipv4cnt > 0) { 507 list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) {
512 list_for_each_entry_safe(addr, next, &ipvlan->addrs, anode) { 508 ipvlan_ht_addr_del(addr);
513 ipvlan_ht_addr_del(addr, !dev->dismantle); 509 list_del(&addr->anode);
514 list_del(&addr->anode); 510 kfree_rcu(addr, rcu);
515 }
516 } 511 }
512
517 list_del_rcu(&ipvlan->pnode); 513 list_del_rcu(&ipvlan->pnode);
518 unregister_netdevice_queue(dev, head); 514 unregister_netdevice_queue(dev, head);
519 netdev_upper_dev_unlink(ipvlan->phy_dev, dev); 515 netdev_upper_dev_unlink(ipvlan->phy_dev, dev);
@@ -627,7 +623,7 @@ static int ipvlan_add_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
627 memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr)); 623 memcpy(&addr->ip6addr, ip6_addr, sizeof(struct in6_addr));
628 addr->atype = IPVL_IPV6; 624 addr->atype = IPVL_IPV6;
629 list_add_tail(&addr->anode, &ipvlan->addrs); 625 list_add_tail(&addr->anode, &ipvlan->addrs);
630 ipvlan->ipv6cnt++; 626
631 /* If the interface is not up, the address will be added to the hash 627 /* If the interface is not up, the address will be added to the hash
632 * list by ipvlan_open. 628 * list by ipvlan_open.
633 */ 629 */
@@ -645,10 +641,8 @@ static void ipvlan_del_addr6(struct ipvl_dev *ipvlan, struct in6_addr *ip6_addr)
645 if (!addr) 641 if (!addr)
646 return; 642 return;
647 643
648 ipvlan_ht_addr_del(addr, true); 644 ipvlan_ht_addr_del(addr);
649 list_del(&addr->anode); 645 list_del(&addr->anode);
650 ipvlan->ipv6cnt--;
651 WARN_ON(ipvlan->ipv6cnt < 0);
652 kfree_rcu(addr, rcu); 646 kfree_rcu(addr, rcu);
653 647
654 return; 648 return;
@@ -661,6 +655,10 @@ static int ipvlan_addr6_event(struct notifier_block *unused,
661 struct net_device *dev = (struct net_device *)if6->idev->dev; 655 struct net_device *dev = (struct net_device *)if6->idev->dev;
662 struct ipvl_dev *ipvlan = netdev_priv(dev); 656 struct ipvl_dev *ipvlan = netdev_priv(dev);
663 657
658 /* FIXME IPv6 autoconf calls us from bh without RTNL */
659 if (in_softirq())
660 return NOTIFY_DONE;
661
664 if (!netif_is_ipvlan(dev)) 662 if (!netif_is_ipvlan(dev))
665 return NOTIFY_DONE; 663 return NOTIFY_DONE;
666 664
@@ -699,7 +697,7 @@ static int ipvlan_add_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
699 memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr)); 697 memcpy(&addr->ip4addr, ip4_addr, sizeof(struct in_addr));
700 addr->atype = IPVL_IPV4; 698 addr->atype = IPVL_IPV4;
701 list_add_tail(&addr->anode, &ipvlan->addrs); 699 list_add_tail(&addr->anode, &ipvlan->addrs);
702 ipvlan->ipv4cnt++; 700
703 /* If the interface is not up, the address will be added to the hash 701 /* If the interface is not up, the address will be added to the hash
704 * list by ipvlan_open. 702 * list by ipvlan_open.
705 */ 703 */
@@ -717,10 +715,8 @@ static void ipvlan_del_addr4(struct ipvl_dev *ipvlan, struct in_addr *ip4_addr)
717 if (!addr) 715 if (!addr)
718 return; 716 return;
719 717
720 ipvlan_ht_addr_del(addr, true); 718 ipvlan_ht_addr_del(addr);
721 list_del(&addr->anode); 719 list_del(&addr->anode);
722 ipvlan->ipv4cnt--;
723 WARN_ON(ipvlan->ipv4cnt < 0);
724 kfree_rcu(addr, rcu); 720 kfree_rcu(addr, rcu);
725 721
726 return; 722 return;
diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
index 3b933bb5a8d5..edd77342773a 100644
--- a/drivers/net/macvtap.c
+++ b/drivers/net/macvtap.c
@@ -719,6 +719,7 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
719 struct virtio_net_hdr vnet_hdr = { 0 }; 719 struct virtio_net_hdr vnet_hdr = { 0 };
720 int vnet_hdr_len = 0; 720 int vnet_hdr_len = 0;
721 int copylen = 0; 721 int copylen = 0;
722 int depth;
722 bool zerocopy = false; 723 bool zerocopy = false;
723 size_t linear; 724 size_t linear;
724 ssize_t n; 725 ssize_t n;
@@ -804,6 +805,12 @@ static ssize_t macvtap_get_user(struct macvtap_queue *q, struct msghdr *m,
804 805
805 skb_probe_transport_header(skb, ETH_HLEN); 806 skb_probe_transport_header(skb, ETH_HLEN);
806 807
808 /* Move network header to the right position for VLAN tagged packets */
809 if ((skb->protocol == htons(ETH_P_8021Q) ||
810 skb->protocol == htons(ETH_P_8021AD)) &&
811 __vlan_get_protocol(skb, skb->protocol, &depth) != 0)
812 skb_set_network_header(skb, depth);
813
807 rcu_read_lock(); 814 rcu_read_lock();
808 vlan = rcu_dereference(q->vlan); 815 vlan = rcu_dereference(q->vlan);
809 /* copy skb_ubuf_info for callback when skb has no error */ 816 /* copy skb_ubuf_info for callback when skb has no error */
diff --git a/drivers/net/ntb_netdev.c b/drivers/net/ntb_netdev.c
index 3cc316cb7e6b..d8757bf9ad75 100644
--- a/drivers/net/ntb_netdev.c
+++ b/drivers/net/ntb_netdev.c
@@ -102,6 +102,12 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
102 102
103 netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len); 103 netdev_dbg(ndev, "%s: %d byte payload received\n", __func__, len);
104 104
105 if (len < 0) {
106 ndev->stats.rx_errors++;
107 ndev->stats.rx_length_errors++;
108 goto enqueue_again;
109 }
110
105 skb_put(skb, len); 111 skb_put(skb, len);
106 skb->protocol = eth_type_trans(skb, ndev); 112 skb->protocol = eth_type_trans(skb, ndev);
107 skb->ip_summed = CHECKSUM_NONE; 113 skb->ip_summed = CHECKSUM_NONE;
@@ -121,6 +127,7 @@ static void ntb_netdev_rx_handler(struct ntb_transport_qp *qp, void *qp_data,
121 return; 127 return;
122 } 128 }
123 129
130enqueue_again:
124 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); 131 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN);
125 if (rc) { 132 if (rc) {
126 dev_kfree_skb(skb); 133 dev_kfree_skb(skb);
@@ -184,7 +191,7 @@ static int ntb_netdev_open(struct net_device *ndev)
184 191
185 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data, 192 rc = ntb_transport_rx_enqueue(dev->qp, skb, skb->data,
186 ndev->mtu + ETH_HLEN); 193 ndev->mtu + ETH_HLEN);
187 if (rc == -EINVAL) { 194 if (rc) {
188 dev_kfree_skb(skb); 195 dev_kfree_skb(skb);
189 goto err; 196 goto err;
190 } 197 }
diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c
index c7a12e2e07b7..8a3bf5469892 100644
--- a/drivers/net/phy/dp83867.c
+++ b/drivers/net/phy/dp83867.c
@@ -164,7 +164,7 @@ static int dp83867_config_init(struct phy_device *phydev)
164 return ret; 164 return ret;
165 } 165 }
166 166
167 if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) || 167 if ((phydev->interface >= PHY_INTERFACE_MODE_RGMII_ID) &&
168 (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) { 168 (phydev->interface <= PHY_INTERFACE_MODE_RGMII_RXID)) {
169 val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL, 169 val = phy_read_mmd_indirect(phydev, DP83867_RGMIICTL,
170 DP83867_DEVADDR, phydev->addr); 170 DP83867_DEVADDR, phydev->addr);
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 095ef3fe369a..46a14cbb0215 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -421,6 +421,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
421{ 421{
422 struct phy_device *phydev = to_phy_device(dev); 422 struct phy_device *phydev = to_phy_device(dev);
423 struct phy_driver *phydrv = to_phy_driver(drv); 423 struct phy_driver *phydrv = to_phy_driver(drv);
424 const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids);
425 int i;
424 426
425 if (of_driver_match_device(dev, drv)) 427 if (of_driver_match_device(dev, drv))
426 return 1; 428 return 1;
@@ -428,8 +430,21 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv)
428 if (phydrv->match_phy_device) 430 if (phydrv->match_phy_device)
429 return phydrv->match_phy_device(phydev); 431 return phydrv->match_phy_device(phydev);
430 432
431 return (phydrv->phy_id & phydrv->phy_id_mask) == 433 if (phydev->is_c45) {
432 (phydev->phy_id & phydrv->phy_id_mask); 434 for (i = 1; i < num_ids; i++) {
435 if (!(phydev->c45_ids.devices_in_package & (1 << i)))
436 continue;
437
438 if ((phydrv->phy_id & phydrv->phy_id_mask) ==
439 (phydev->c45_ids.device_ids[i] &
440 phydrv->phy_id_mask))
441 return 1;
442 }
443 return 0;
444 } else {
445 return (phydrv->phy_id & phydrv->phy_id_mask) ==
446 (phydev->phy_id & phydrv->phy_id_mask);
447 }
433} 448}
434 449
435#ifdef CONFIG_PM 450#ifdef CONFIG_PM
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index f603f362504b..9d43460ce3c7 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -757,6 +757,7 @@ static const struct usb_device_id products[] = {
757 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */ 757 {QMI_FIXED_INTF(0x1199, 0x901c, 8)}, /* Sierra Wireless EM7700 */
758 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */ 758 {QMI_FIXED_INTF(0x1199, 0x901f, 8)}, /* Sierra Wireless EM7355 */
759 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */ 759 {QMI_FIXED_INTF(0x1199, 0x9041, 8)}, /* Sierra Wireless MC7305/MC7355 */
760 {QMI_FIXED_INTF(0x1199, 0x9041, 10)}, /* Sierra Wireless MC7305/MC7355 */
760 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */ 761 {QMI_FIXED_INTF(0x1199, 0x9051, 8)}, /* Netgear AirCard 340U */
761 {QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */ 762 {QMI_FIXED_INTF(0x1199, 0x9053, 8)}, /* Sierra Wireless Modem */
762 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */ 763 {QMI_FIXED_INTF(0x1199, 0x9054, 8)}, /* Sierra Wireless Modem */
diff --git a/drivers/net/usb/r8152.c b/drivers/net/usb/r8152.c
index 7f6419ebb5e1..ad8cbc6c9ee7 100644
--- a/drivers/net/usb/r8152.c
+++ b/drivers/net/usb/r8152.c
@@ -27,7 +27,7 @@
27#include <linux/usb/cdc.h> 27#include <linux/usb/cdc.h>
28 28
29/* Version Information */ 29/* Version Information */
30#define DRIVER_VERSION "v1.08.0 (2015/01/13)" 30#define DRIVER_VERSION "v1.08.1 (2015/07/28)"
31#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>" 31#define DRIVER_AUTHOR "Realtek linux nic maintainers <nic_swsd@realtek.com>"
32#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters" 32#define DRIVER_DESC "Realtek RTL8152/RTL8153 Based USB Ethernet Adapters"
33#define MODULENAME "r8152" 33#define MODULENAME "r8152"
@@ -1902,11 +1902,10 @@ static void rtl_drop_queued_tx(struct r8152 *tp)
1902static void rtl8152_tx_timeout(struct net_device *netdev) 1902static void rtl8152_tx_timeout(struct net_device *netdev)
1903{ 1903{
1904 struct r8152 *tp = netdev_priv(netdev); 1904 struct r8152 *tp = netdev_priv(netdev);
1905 int i;
1906 1905
1907 netif_warn(tp, tx_err, netdev, "Tx timeout\n"); 1906 netif_warn(tp, tx_err, netdev, "Tx timeout\n");
1908 for (i = 0; i < RTL8152_MAX_TX; i++) 1907
1909 usb_unlink_urb(tp->tx_info[i].urb); 1908 usb_queue_reset_device(tp->intf);
1910} 1909}
1911 1910
1912static void rtl8152_set_rx_mode(struct net_device *netdev) 1911static void rtl8152_set_rx_mode(struct net_device *netdev)
@@ -2075,7 +2074,6 @@ static int rtl_start_rx(struct r8152 *tp)
2075{ 2074{
2076 int i, ret = 0; 2075 int i, ret = 0;
2077 2076
2078 napi_disable(&tp->napi);
2079 INIT_LIST_HEAD(&tp->rx_done); 2077 INIT_LIST_HEAD(&tp->rx_done);
2080 for (i = 0; i < RTL8152_MAX_RX; i++) { 2078 for (i = 0; i < RTL8152_MAX_RX; i++) {
2081 INIT_LIST_HEAD(&tp->rx_info[i].list); 2079 INIT_LIST_HEAD(&tp->rx_info[i].list);
@@ -2083,7 +2081,6 @@ static int rtl_start_rx(struct r8152 *tp)
2083 if (ret) 2081 if (ret)
2084 break; 2082 break;
2085 } 2083 }
2086 napi_enable(&tp->napi);
2087 2084
2088 if (ret && ++i < RTL8152_MAX_RX) { 2085 if (ret && ++i < RTL8152_MAX_RX) {
2089 struct list_head rx_queue; 2086 struct list_head rx_queue;
@@ -2166,6 +2163,7 @@ static int rtl8153_enable(struct r8152 *tp)
2166 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 2163 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2167 return -ENODEV; 2164 return -ENODEV;
2168 2165
2166 usb_disable_lpm(tp->udev);
2169 set_tx_qlen(tp); 2167 set_tx_qlen(tp);
2170 rtl_set_eee_plus(tp); 2168 rtl_set_eee_plus(tp);
2171 r8153_set_rx_early_timeout(tp); 2169 r8153_set_rx_early_timeout(tp);
@@ -2337,11 +2335,61 @@ static void __rtl_set_wol(struct r8152 *tp, u32 wolopts)
2337 device_set_wakeup_enable(&tp->udev->dev, false); 2335 device_set_wakeup_enable(&tp->udev->dev, false);
2338} 2336}
2339 2337
2338static void r8153_u1u2en(struct r8152 *tp, bool enable)
2339{
2340 u8 u1u2[8];
2341
2342 if (enable)
2343 memset(u1u2, 0xff, sizeof(u1u2));
2344 else
2345 memset(u1u2, 0x00, sizeof(u1u2));
2346
2347 usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
2348}
2349
2350static void r8153_u2p3en(struct r8152 *tp, bool enable)
2351{
2352 u32 ocp_data;
2353
2354 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
2355 if (enable && tp->version != RTL_VER_03 && tp->version != RTL_VER_04)
2356 ocp_data |= U2P3_ENABLE;
2357 else
2358 ocp_data &= ~U2P3_ENABLE;
2359 ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
2360}
2361
2362static void r8153_power_cut_en(struct r8152 *tp, bool enable)
2363{
2364 u32 ocp_data;
2365
2366 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
2367 if (enable)
2368 ocp_data |= PWR_EN | PHASE2_EN;
2369 else
2370 ocp_data &= ~(PWR_EN | PHASE2_EN);
2371 ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
2372
2373 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
2374 ocp_data &= ~PCUT_STATUS;
2375 ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
2376}
2377
2378static bool rtl_can_wakeup(struct r8152 *tp)
2379{
2380 struct usb_device *udev = tp->udev;
2381
2382 return (udev->actconfig->desc.bmAttributes & USB_CONFIG_ATT_WAKEUP);
2383}
2384
2340static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable) 2385static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
2341{ 2386{
2342 if (enable) { 2387 if (enable) {
2343 u32 ocp_data; 2388 u32 ocp_data;
2344 2389
2390 r8153_u1u2en(tp, false);
2391 r8153_u2p3en(tp, false);
2392
2345 __rtl_set_wol(tp, WAKE_ANY); 2393 __rtl_set_wol(tp, WAKE_ANY);
2346 2394
2347 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG); 2395 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_CONFIG);
@@ -2353,6 +2401,8 @@ static void rtl_runtime_suspend_enable(struct r8152 *tp, bool enable)
2353 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML); 2401 ocp_write_byte(tp, MCU_TYPE_PLA, PLA_CRWECR, CRWECR_NORAML);
2354 } else { 2402 } else {
2355 __rtl_set_wol(tp, tp->saved_wolopts); 2403 __rtl_set_wol(tp, tp->saved_wolopts);
2404 r8153_u2p3en(tp, true);
2405 r8153_u1u2en(tp, true);
2356 } 2406 }
2357} 2407}
2358 2408
@@ -2599,46 +2649,6 @@ static void r8153_hw_phy_cfg(struct r8152 *tp)
2599 set_bit(PHY_RESET, &tp->flags); 2649 set_bit(PHY_RESET, &tp->flags);
2600} 2650}
2601 2651
2602static void r8153_u1u2en(struct r8152 *tp, bool enable)
2603{
2604 u8 u1u2[8];
2605
2606 if (enable)
2607 memset(u1u2, 0xff, sizeof(u1u2));
2608 else
2609 memset(u1u2, 0x00, sizeof(u1u2));
2610
2611 usb_ocp_write(tp, USB_TOLERANCE, BYTE_EN_SIX_BYTES, sizeof(u1u2), u1u2);
2612}
2613
2614static void r8153_u2p3en(struct r8152 *tp, bool enable)
2615{
2616 u32 ocp_data;
2617
2618 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL);
2619 if (enable)
2620 ocp_data |= U2P3_ENABLE;
2621 else
2622 ocp_data &= ~U2P3_ENABLE;
2623 ocp_write_word(tp, MCU_TYPE_USB, USB_U2P3_CTRL, ocp_data);
2624}
2625
2626static void r8153_power_cut_en(struct r8152 *tp, bool enable)
2627{
2628 u32 ocp_data;
2629
2630 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_POWER_CUT);
2631 if (enable)
2632 ocp_data |= PWR_EN | PHASE2_EN;
2633 else
2634 ocp_data &= ~(PWR_EN | PHASE2_EN);
2635 ocp_write_word(tp, MCU_TYPE_USB, USB_POWER_CUT, ocp_data);
2636
2637 ocp_data = ocp_read_word(tp, MCU_TYPE_USB, USB_MISC_0);
2638 ocp_data &= ~PCUT_STATUS;
2639 ocp_write_word(tp, MCU_TYPE_USB, USB_MISC_0, ocp_data);
2640}
2641
2642static void r8153_first_init(struct r8152 *tp) 2652static void r8153_first_init(struct r8152 *tp)
2643{ 2653{
2644 u32 ocp_data; 2654 u32 ocp_data;
@@ -2781,6 +2791,7 @@ static void rtl8153_disable(struct r8152 *tp)
2781 r8153_disable_aldps(tp); 2791 r8153_disable_aldps(tp);
2782 rtl_disable(tp); 2792 rtl_disable(tp);
2783 r8153_enable_aldps(tp); 2793 r8153_enable_aldps(tp);
2794 usb_enable_lpm(tp->udev);
2784} 2795}
2785 2796
2786static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex) 2797static int rtl8152_set_speed(struct r8152 *tp, u8 autoneg, u16 speed, u8 duplex)
@@ -2901,9 +2912,13 @@ static void rtl8153_up(struct r8152 *tp)
2901 if (test_bit(RTL8152_UNPLUG, &tp->flags)) 2912 if (test_bit(RTL8152_UNPLUG, &tp->flags))
2902 return; 2913 return;
2903 2914
2915 r8153_u1u2en(tp, false);
2904 r8153_disable_aldps(tp); 2916 r8153_disable_aldps(tp);
2905 r8153_first_init(tp); 2917 r8153_first_init(tp);
2906 r8153_enable_aldps(tp); 2918 r8153_enable_aldps(tp);
2919 r8153_u2p3en(tp, true);
2920 r8153_u1u2en(tp, true);
2921 usb_enable_lpm(tp->udev);
2907} 2922}
2908 2923
2909static void rtl8153_down(struct r8152 *tp) 2924static void rtl8153_down(struct r8152 *tp)
@@ -2914,6 +2929,7 @@ static void rtl8153_down(struct r8152 *tp)
2914 } 2929 }
2915 2930
2916 r8153_u1u2en(tp, false); 2931 r8153_u1u2en(tp, false);
2932 r8153_u2p3en(tp, false);
2917 r8153_power_cut_en(tp, false); 2933 r8153_power_cut_en(tp, false);
2918 r8153_disable_aldps(tp); 2934 r8153_disable_aldps(tp);
2919 r8153_enter_oob(tp); 2935 r8153_enter_oob(tp);
@@ -2932,8 +2948,10 @@ static void set_carrier(struct r8152 *tp)
2932 if (!netif_carrier_ok(netdev)) { 2948 if (!netif_carrier_ok(netdev)) {
2933 tp->rtl_ops.enable(tp); 2949 tp->rtl_ops.enable(tp);
2934 set_bit(RTL8152_SET_RX_MODE, &tp->flags); 2950 set_bit(RTL8152_SET_RX_MODE, &tp->flags);
2951 napi_disable(&tp->napi);
2935 netif_carrier_on(netdev); 2952 netif_carrier_on(netdev);
2936 rtl_start_rx(tp); 2953 rtl_start_rx(tp);
2954 napi_enable(&tp->napi);
2937 } 2955 }
2938 } else { 2956 } else {
2939 if (netif_carrier_ok(netdev)) { 2957 if (netif_carrier_ok(netdev)) {
@@ -3252,6 +3270,7 @@ static void r8153_init(struct r8152 *tp)
3252 msleep(20); 3270 msleep(20);
3253 } 3271 }
3254 3272
3273 usb_disable_lpm(tp->udev);
3255 r8153_u2p3en(tp, false); 3274 r8153_u2p3en(tp, false);
3256 3275
3257 if (tp->version == RTL_VER_04) { 3276 if (tp->version == RTL_VER_04) {
@@ -3319,6 +3338,59 @@ static void r8153_init(struct r8152 *tp)
3319 r8153_enable_aldps(tp); 3338 r8153_enable_aldps(tp);
3320 r8152b_enable_fc(tp); 3339 r8152b_enable_fc(tp);
3321 rtl_tally_reset(tp); 3340 rtl_tally_reset(tp);
3341 r8153_u2p3en(tp, true);
3342}
3343
3344static int rtl8152_pre_reset(struct usb_interface *intf)
3345{
3346 struct r8152 *tp = usb_get_intfdata(intf);
3347 struct net_device *netdev;
3348
3349 if (!tp)
3350 return 0;
3351
3352 netdev = tp->netdev;
3353 if (!netif_running(netdev))
3354 return 0;
3355
3356 napi_disable(&tp->napi);
3357 clear_bit(WORK_ENABLE, &tp->flags);
3358 usb_kill_urb(tp->intr_urb);
3359 cancel_delayed_work_sync(&tp->schedule);
3360 if (netif_carrier_ok(netdev)) {
3361 netif_stop_queue(netdev);
3362 mutex_lock(&tp->control);
3363 tp->rtl_ops.disable(tp);
3364 mutex_unlock(&tp->control);
3365 }
3366
3367 return 0;
3368}
3369
3370static int rtl8152_post_reset(struct usb_interface *intf)
3371{
3372 struct r8152 *tp = usb_get_intfdata(intf);
3373 struct net_device *netdev;
3374
3375 if (!tp)
3376 return 0;
3377
3378 netdev = tp->netdev;
3379 if (!netif_running(netdev))
3380 return 0;
3381
3382 set_bit(WORK_ENABLE, &tp->flags);
3383 if (netif_carrier_ok(netdev)) {
3384 mutex_lock(&tp->control);
3385 tp->rtl_ops.enable(tp);
3386 rtl8152_set_rx_mode(netdev);
3387 mutex_unlock(&tp->control);
3388 netif_wake_queue(netdev);
3389 }
3390
3391 napi_enable(&tp->napi);
3392
3393 return 0;
3322} 3394}
3323 3395
3324static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message) 3396static int rtl8152_suspend(struct usb_interface *intf, pm_message_t message)
@@ -3374,9 +3446,11 @@ static int rtl8152_resume(struct usb_interface *intf)
3374 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) { 3446 if (test_bit(SELECTIVE_SUSPEND, &tp->flags)) {
3375 rtl_runtime_suspend_enable(tp, false); 3447 rtl_runtime_suspend_enable(tp, false);
3376 clear_bit(SELECTIVE_SUSPEND, &tp->flags); 3448 clear_bit(SELECTIVE_SUSPEND, &tp->flags);
3449 napi_disable(&tp->napi);
3377 set_bit(WORK_ENABLE, &tp->flags); 3450 set_bit(WORK_ENABLE, &tp->flags);
3378 if (netif_carrier_ok(tp->netdev)) 3451 if (netif_carrier_ok(tp->netdev))
3379 rtl_start_rx(tp); 3452 rtl_start_rx(tp);
3453 napi_enable(&tp->napi);
3380 } else { 3454 } else {
3381 tp->rtl_ops.up(tp); 3455 tp->rtl_ops.up(tp);
3382 rtl8152_set_speed(tp, AUTONEG_ENABLE, 3456 rtl8152_set_speed(tp, AUTONEG_ENABLE,
@@ -3403,12 +3477,15 @@ static void rtl8152_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3403 if (usb_autopm_get_interface(tp->intf) < 0) 3477 if (usb_autopm_get_interface(tp->intf) < 0)
3404 return; 3478 return;
3405 3479
3406 mutex_lock(&tp->control); 3480 if (!rtl_can_wakeup(tp)) {
3407 3481 wol->supported = 0;
3408 wol->supported = WAKE_ANY; 3482 wol->wolopts = 0;
3409 wol->wolopts = __rtl_get_wol(tp); 3483 } else {
3410 3484 mutex_lock(&tp->control);
3411 mutex_unlock(&tp->control); 3485 wol->supported = WAKE_ANY;
3486 wol->wolopts = __rtl_get_wol(tp);
3487 mutex_unlock(&tp->control);
3488 }
3412 3489
3413 usb_autopm_put_interface(tp->intf); 3490 usb_autopm_put_interface(tp->intf);
3414} 3491}
@@ -3418,6 +3495,9 @@ static int rtl8152_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
3418 struct r8152 *tp = netdev_priv(dev); 3495 struct r8152 *tp = netdev_priv(dev);
3419 int ret; 3496 int ret;
3420 3497
3498 if (!rtl_can_wakeup(tp))
3499 return -EOPNOTSUPP;
3500
3421 ret = usb_autopm_get_interface(tp->intf); 3501 ret = usb_autopm_get_interface(tp->intf);
3422 if (ret < 0) 3502 if (ret < 0)
3423 goto out_set_wol; 3503 goto out_set_wol;
@@ -4059,6 +4139,9 @@ static int rtl8152_probe(struct usb_interface *intf,
4059 goto out1; 4139 goto out1;
4060 } 4140 }
4061 4141
4142 if (!rtl_can_wakeup(tp))
4143 __rtl_set_wol(tp, 0);
4144
4062 tp->saved_wolopts = __rtl_get_wol(tp); 4145 tp->saved_wolopts = __rtl_get_wol(tp);
4063 if (tp->saved_wolopts) 4146 if (tp->saved_wolopts)
4064 device_set_wakeup_enable(&udev->dev, true); 4147 device_set_wakeup_enable(&udev->dev, true);
@@ -4132,6 +4215,8 @@ static struct usb_driver rtl8152_driver = {
4132 .suspend = rtl8152_suspend, 4215 .suspend = rtl8152_suspend,
4133 .resume = rtl8152_resume, 4216 .resume = rtl8152_resume,
4134 .reset_resume = rtl8152_resume, 4217 .reset_resume = rtl8152_resume,
4218 .pre_reset = rtl8152_pre_reset,
4219 .post_reset = rtl8152_post_reset,
4135 .supports_autosuspend = 1, 4220 .supports_autosuspend = 1,
4136 .disable_hub_initiated_lpm = 1, 4221 .disable_hub_initiated_lpm = 1,
4137}; 4222};
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 63c7810e1545..237f8e5e493d 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -1756,9 +1756,9 @@ static int virtnet_probe(struct virtio_device *vdev)
1756 /* Do we support "hardware" checksums? */ 1756 /* Do we support "hardware" checksums? */
1757 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) { 1757 if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
1758 /* This opens up the world of extra features. */ 1758 /* This opens up the world of extra features. */
1759 dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 1759 dev->hw_features |= NETIF_F_HW_CSUM | NETIF_F_SG;
1760 if (csum) 1760 if (csum)
1761 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST; 1761 dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG;
1762 1762
1763 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) { 1763 if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
1764 dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO 1764 dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
@@ -1828,7 +1828,8 @@ static int virtnet_probe(struct virtio_device *vdev)
1828 else 1828 else
1829 vi->hdr_len = sizeof(struct virtio_net_hdr); 1829 vi->hdr_len = sizeof(struct virtio_net_hdr);
1830 1830
1831 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT)) 1831 if (virtio_has_feature(vdev, VIRTIO_F_ANY_LAYOUT) ||
1832 virtio_has_feature(vdev, VIRTIO_F_VERSION_1))
1832 vi->any_header_sg = true; 1833 vi->any_header_sg = true;
1833 1834
1834 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ)) 1835 if (virtio_has_feature(vdev, VIRTIO_NET_F_CTRL_VQ))
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 7193b7304fdd..848ea6a399f2 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -589,7 +589,8 @@ static int cosa_probe(int base, int irq, int dma)
589 chan->netdev->base_addr = chan->cosa->datareg; 589 chan->netdev->base_addr = chan->cosa->datareg;
590 chan->netdev->irq = chan->cosa->irq; 590 chan->netdev->irq = chan->cosa->irq;
591 chan->netdev->dma = chan->cosa->dma; 591 chan->netdev->dma = chan->cosa->dma;
592 if (register_hdlc_device(chan->netdev)) { 592 err = register_hdlc_device(chan->netdev);
593 if (err) {
593 netdev_warn(chan->netdev, 594 netdev_warn(chan->netdev,
594 "register_hdlc_device() failed\n"); 595 "register_hdlc_device() failed\n");
595 free_netdev(chan->netdev); 596 free_netdev(chan->netdev);
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index 5e15e8e10ed3..a31a6804dc34 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -279,6 +279,7 @@ static void ath9k_hw_read_revisions(struct ath_hw *ah)
279 return; 279 return;
280 case AR9300_DEVID_QCA956X: 280 case AR9300_DEVID_QCA956X:
281 ah->hw_version.macVersion = AR_SREV_VERSION_9561; 281 ah->hw_version.macVersion = AR_SREV_VERSION_9561;
282 return;
282 } 283 }
283 284
284 val = REG_READ(ah, AR_SREV) & AR_SREV_ID; 285 val = REG_READ(ah, AR_SREV) & AR_SREV_ID;
diff --git a/drivers/net/wireless/b43/tables_nphy.c b/drivers/net/wireless/b43/tables_nphy.c
index 25d1cbd34306..b2f0d245bcf3 100644
--- a/drivers/net/wireless/b43/tables_nphy.c
+++ b/drivers/net/wireless/b43/tables_nphy.c
@@ -3728,7 +3728,7 @@ const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev)
3728 switch (phy->rev) { 3728 switch (phy->rev) {
3729 case 6: 3729 case 6:
3730 case 5: 3730 case 5:
3731 if (sprom->fem.ghz5.extpa_gain == 3) 3731 if (sprom->fem.ghz2.extpa_gain == 3)
3732 return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g; 3732 return b43_ntab_tx_gain_epa_rev3_hi_pwr_2g;
3733 /* fall through */ 3733 /* fall through */
3734 case 4: 3734 case 4:
diff --git a/drivers/net/wireless/iwlwifi/iwl-fh.h b/drivers/net/wireless/iwlwifi/iwl-fh.h
index d56064861a9c..d45dc021cda2 100644
--- a/drivers/net/wireless/iwlwifi/iwl-fh.h
+++ b/drivers/net/wireless/iwlwifi/iwl-fh.h
@@ -438,6 +438,12 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(unsigned int chnl)
438#define RX_QUEUE_MASK 255 438#define RX_QUEUE_MASK 255
439#define RX_QUEUE_SIZE_LOG 8 439#define RX_QUEUE_SIZE_LOG 8
440 440
441/*
442 * RX related structures and functions
443 */
444#define RX_FREE_BUFFERS 64
445#define RX_LOW_WATERMARK 8
446
441/** 447/**
442 * struct iwl_rb_status - reserve buffer status 448 * struct iwl_rb_status - reserve buffer status
443 * host memory mapped FH registers 449 * host memory mapped FH registers
diff --git a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
index 80fefe7d7b8c..3b8e85e51002 100644
--- a/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
+++ b/drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
@@ -540,13 +540,11 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
540 hw_addr = (const u8 *)(mac_override + 540 hw_addr = (const u8 *)(mac_override +
541 MAC_ADDRESS_OVERRIDE_FAMILY_8000); 541 MAC_ADDRESS_OVERRIDE_FAMILY_8000);
542 542
543 /* The byte order is little endian 16 bit, meaning 214365 */ 543 /*
544 data->hw_addr[0] = hw_addr[1]; 544 * Store the MAC address from MAO section.
545 data->hw_addr[1] = hw_addr[0]; 545 * No byte swapping is required in MAO section
546 data->hw_addr[2] = hw_addr[3]; 546 */
547 data->hw_addr[3] = hw_addr[2]; 547 memcpy(data->hw_addr, hw_addr, ETH_ALEN);
548 data->hw_addr[4] = hw_addr[5];
549 data->hw_addr[5] = hw_addr[4];
550 548
551 /* 549 /*
552 * Force the use of the OTP MAC address in case of reserved MAC 550 * Force the use of the OTP MAC address in case of reserved MAC
diff --git a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
index 5e4cbdb44c60..737774a01c74 100644
--- a/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
+++ b/drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
@@ -660,7 +660,8 @@ struct iwl_scan_config {
660 * iwl_umac_scan_flags 660 * iwl_umac_scan_flags
661 *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request 661 *@IWL_UMAC_SCAN_FLAG_PREEMPTIVE: scan process triggered by this scan request
662 * can be preempted by other scan requests with higher priority. 662 * can be preempted by other scan requests with higher priority.
663 * The low priority scan is aborted. 663 * The low priority scan will be resumed when the higher proirity scan is
664 * completed.
664 *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver 665 *@IWL_UMAC_SCAN_FLAG_START_NOTIF: notification will be sent to the driver
665 * when scan starts. 666 * when scan starts.
666 */ 667 */
diff --git a/drivers/net/wireless/iwlwifi/mvm/scan.c b/drivers/net/wireless/iwlwifi/mvm/scan.c
index 5de144968723..5514ad6d4e54 100644
--- a/drivers/net/wireless/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/iwlwifi/mvm/scan.c
@@ -1023,7 +1023,7 @@ static void iwl_mvm_scan_umac_dwell(struct iwl_mvm *mvm,
1023 cmd->scan_priority = 1023 cmd->scan_priority =
1024 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); 1024 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
1025 1025
1026 if (iwl_mvm_scan_total_iterations(params) == 0) 1026 if (iwl_mvm_scan_total_iterations(params) == 1)
1027 cmd->ooc_priority = 1027 cmd->ooc_priority =
1028 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6); 1028 iwl_mvm_scan_priority(mvm, IWL_SCAN_PRIORITY_EXT_6);
1029 else 1029 else
@@ -1109,6 +1109,9 @@ static int iwl_mvm_scan_umac(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
1109 cmd->uid = cpu_to_le32(uid); 1109 cmd->uid = cpu_to_le32(uid);
1110 cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params)); 1110 cmd->general_flags = cpu_to_le32(iwl_mvm_scan_umac_flags(mvm, params));
1111 1111
1112 if (type == IWL_MVM_SCAN_SCHED)
1113 cmd->flags = cpu_to_le32(IWL_UMAC_SCAN_FLAG_PREEMPTIVE);
1114
1112 if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations)) 1115 if (iwl_mvm_scan_use_ebs(mvm, vif, n_iterations))
1113 cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS | 1116 cmd->channel_flags = IWL_SCAN_CHANNEL_FLAG_EBS |
1114 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE | 1117 IWL_SCAN_CHANNEL_FLAG_EBS_ACCURATE |
diff --git a/drivers/net/wireless/iwlwifi/mvm/sta.c b/drivers/net/wireless/iwlwifi/mvm/sta.c
index d68dc697a4a0..26f076e82149 100644
--- a/drivers/net/wireless/iwlwifi/mvm/sta.c
+++ b/drivers/net/wireless/iwlwifi/mvm/sta.c
@@ -1401,6 +1401,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1401 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 1401 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
1402 u8 sta_id; 1402 u8 sta_id;
1403 int ret; 1403 int ret;
1404 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
1404 1405
1405 lockdep_assert_held(&mvm->mutex); 1406 lockdep_assert_held(&mvm->mutex);
1406 1407
@@ -1467,7 +1468,7 @@ int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
1467end: 1468end:
1468 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", 1469 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
1469 keyconf->cipher, keyconf->keylen, keyconf->keyidx, 1470 keyconf->cipher, keyconf->keylen, keyconf->keyidx,
1470 sta->addr, ret); 1471 sta ? sta->addr : zero_addr, ret);
1471 return ret; 1472 return ret;
1472} 1473}
1473 1474
diff --git a/drivers/net/wireless/iwlwifi/mvm/time-event.c b/drivers/net/wireless/iwlwifi/mvm/time-event.c
index d24b6a83e68c..e472729e5f14 100644
--- a/drivers/net/wireless/iwlwifi/mvm/time-event.c
+++ b/drivers/net/wireless/iwlwifi/mvm/time-event.c
@@ -86,7 +86,7 @@ void iwl_mvm_te_clear_data(struct iwl_mvm *mvm,
86{ 86{
87 lockdep_assert_held(&mvm->time_event_lock); 87 lockdep_assert_held(&mvm->time_event_lock);
88 88
89 if (te_data->id == TE_MAX) 89 if (!te_data->vif)
90 return; 90 return;
91 91
92 list_del(&te_data->list); 92 list_del(&te_data->list);
diff --git a/drivers/net/wireless/iwlwifi/mvm/tx.c b/drivers/net/wireless/iwlwifi/mvm/tx.c
index 7ba7a118ff5c..89116864d2a0 100644
--- a/drivers/net/wireless/iwlwifi/mvm/tx.c
+++ b/drivers/net/wireless/iwlwifi/mvm/tx.c
@@ -252,7 +252,7 @@ void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
252 252
253 if (info->band == IEEE80211_BAND_2GHZ && 253 if (info->band == IEEE80211_BAND_2GHZ &&
254 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm)) 254 !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
255 rate_flags = BIT(mvm->cfg->non_shared_ant) << RATE_MCS_ANT_POS; 255 rate_flags = mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
256 else 256 else
257 rate_flags = 257 rate_flags =
258 BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS; 258 BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 2ed1e4d2774d..9f65c1cff1b1 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -368,12 +368,14 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
368/* 3165 Series */ 368/* 3165 Series */
369 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)}, 369 {IWL_PCI_DEVICE(0x3165, 0x4010, iwl3165_2ac_cfg)},
370 {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)}, 370 {IWL_PCI_DEVICE(0x3165, 0x4012, iwl3165_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x3166, 0x4212, iwl3165_2ac_cfg)},
371 {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)}, 372 {IWL_PCI_DEVICE(0x3165, 0x4410, iwl3165_2ac_cfg)},
372 {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)}, 373 {IWL_PCI_DEVICE(0x3165, 0x4510, iwl3165_2ac_cfg)},
373 {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)}, 374 {IWL_PCI_DEVICE(0x3165, 0x4110, iwl3165_2ac_cfg)},
374 {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)}, 375 {IWL_PCI_DEVICE(0x3166, 0x4310, iwl3165_2ac_cfg)},
375 {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)}, 376 {IWL_PCI_DEVICE(0x3166, 0x4210, iwl3165_2ac_cfg)},
376 {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)}, 377 {IWL_PCI_DEVICE(0x3165, 0x8010, iwl3165_2ac_cfg)},
378 {IWL_PCI_DEVICE(0x3165, 0x8110, iwl3165_2ac_cfg)},
377 379
378/* 7265 Series */ 380/* 7265 Series */
379 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)}, 381 {IWL_PCI_DEVICE(0x095A, 0x5010, iwl7265_2ac_cfg)},
@@ -426,9 +428,8 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
426 {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)}, 428 {IWL_PCI_DEVICE(0x24F4, 0x1130, iwl8260_2ac_cfg)},
427 {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)}, 429 {IWL_PCI_DEVICE(0x24F4, 0x1030, iwl8260_2ac_cfg)},
428 {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)}, 430 {IWL_PCI_DEVICE(0x24F3, 0xC010, iwl8260_2ac_cfg)},
431 {IWL_PCI_DEVICE(0x24F3, 0xC110, iwl8260_2ac_cfg)},
429 {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)}, 432 {IWL_PCI_DEVICE(0x24F3, 0xD010, iwl8260_2ac_cfg)},
430 {IWL_PCI_DEVICE(0x24F4, 0xC030, iwl8260_2ac_cfg)},
431 {IWL_PCI_DEVICE(0x24F4, 0xD030, iwl8260_2ac_cfg)},
432 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)}, 433 {IWL_PCI_DEVICE(0x24F3, 0xC050, iwl8260_2ac_cfg)},
433 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)}, 434 {IWL_PCI_DEVICE(0x24F3, 0xD050, iwl8260_2ac_cfg)},
434 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)}, 435 {IWL_PCI_DEVICE(0x24F3, 0x8010, iwl8260_2ac_cfg)},
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 31f72a61cc3f..376b84e54ad7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -44,15 +44,6 @@
44#include "iwl-io.h" 44#include "iwl-io.h"
45#include "iwl-op-mode.h" 45#include "iwl-op-mode.h"
46 46
47/*
48 * RX related structures and functions
49 */
50#define RX_NUM_QUEUES 1
51#define RX_POST_REQ_ALLOC 2
52#define RX_CLAIM_REQ_ALLOC 8
53#define RX_POOL_SIZE ((RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC) * RX_NUM_QUEUES)
54#define RX_LOW_WATERMARK 8
55
56struct iwl_host_cmd; 47struct iwl_host_cmd;
57 48
58/*This file includes the declaration that are internal to the 49/*This file includes the declaration that are internal to the
@@ -86,29 +77,29 @@ struct isr_statistics {
86 * struct iwl_rxq - Rx queue 77 * struct iwl_rxq - Rx queue
87 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd) 78 * @bd: driver's pointer to buffer of receive buffer descriptors (rbd)
88 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) 79 * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
80 * @pool:
81 * @queue:
89 * @read: Shared index to newest available Rx buffer 82 * @read: Shared index to newest available Rx buffer
90 * @write: Shared index to oldest written Rx packet 83 * @write: Shared index to oldest written Rx packet
91 * @free_count: Number of pre-allocated buffers in rx_free 84 * @free_count: Number of pre-allocated buffers in rx_free
92 * @used_count: Number of RBDs handled to allocator to use for allocation
93 * @write_actual: 85 * @write_actual:
94 * @rx_free: list of RBDs with allocated RB ready for use 86 * @rx_free: list of free SKBs for use
95 * @rx_used: list of RBDs with no RB attached 87 * @rx_used: List of Rx buffers with no SKB
96 * @need_update: flag to indicate we need to update read/write index 88 * @need_update: flag to indicate we need to update read/write index
97 * @rb_stts: driver's pointer to receive buffer status 89 * @rb_stts: driver's pointer to receive buffer status
98 * @rb_stts_dma: bus address of receive buffer status 90 * @rb_stts_dma: bus address of receive buffer status
99 * @lock: 91 * @lock:
100 * @pool: initial pool of iwl_rx_mem_buffer for the queue
101 * @queue: actual rx queue
102 * 92 *
103 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers 93 * NOTE: rx_free and rx_used are used as a FIFO for iwl_rx_mem_buffers
104 */ 94 */
105struct iwl_rxq { 95struct iwl_rxq {
106 __le32 *bd; 96 __le32 *bd;
107 dma_addr_t bd_dma; 97 dma_addr_t bd_dma;
98 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE + RX_FREE_BUFFERS];
99 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
108 u32 read; 100 u32 read;
109 u32 write; 101 u32 write;
110 u32 free_count; 102 u32 free_count;
111 u32 used_count;
112 u32 write_actual; 103 u32 write_actual;
113 struct list_head rx_free; 104 struct list_head rx_free;
114 struct list_head rx_used; 105 struct list_head rx_used;
@@ -116,32 +107,6 @@ struct iwl_rxq {
116 struct iwl_rb_status *rb_stts; 107 struct iwl_rb_status *rb_stts;
117 dma_addr_t rb_stts_dma; 108 dma_addr_t rb_stts_dma;
118 spinlock_t lock; 109 spinlock_t lock;
119 struct iwl_rx_mem_buffer pool[RX_QUEUE_SIZE];
120 struct iwl_rx_mem_buffer *queue[RX_QUEUE_SIZE];
121};
122
123/**
124 * struct iwl_rb_allocator - Rx allocator
125 * @pool: initial pool of allocator
126 * @req_pending: number of requests the allcator had not processed yet
127 * @req_ready: number of requests honored and ready for claiming
128 * @rbd_allocated: RBDs with pages allocated and ready to be handled to
129 * the queue. This is a list of &struct iwl_rx_mem_buffer
130 * @rbd_empty: RBDs with no page attached for allocator use. This is a list
131 * of &struct iwl_rx_mem_buffer
132 * @lock: protects the rbd_allocated and rbd_empty lists
133 * @alloc_wq: work queue for background calls
134 * @rx_alloc: work struct for background calls
135 */
136struct iwl_rb_allocator {
137 struct iwl_rx_mem_buffer pool[RX_POOL_SIZE];
138 atomic_t req_pending;
139 atomic_t req_ready;
140 struct list_head rbd_allocated;
141 struct list_head rbd_empty;
142 spinlock_t lock;
143 struct workqueue_struct *alloc_wq;
144 struct work_struct rx_alloc;
145}; 110};
146 111
147struct iwl_dma_ptr { 112struct iwl_dma_ptr {
@@ -285,7 +250,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
285/** 250/**
286 * struct iwl_trans_pcie - PCIe transport specific data 251 * struct iwl_trans_pcie - PCIe transport specific data
287 * @rxq: all the RX queue data 252 * @rxq: all the RX queue data
288 * @rba: allocator for RX replenishing 253 * @rx_replenish: work that will be called when buffers need to be allocated
289 * @drv - pointer to iwl_drv 254 * @drv - pointer to iwl_drv
290 * @trans: pointer to the generic transport area 255 * @trans: pointer to the generic transport area
291 * @scd_base_addr: scheduler sram base address in SRAM 256 * @scd_base_addr: scheduler sram base address in SRAM
@@ -308,7 +273,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
308 */ 273 */
309struct iwl_trans_pcie { 274struct iwl_trans_pcie {
310 struct iwl_rxq rxq; 275 struct iwl_rxq rxq;
311 struct iwl_rb_allocator rba; 276 struct work_struct rx_replenish;
312 struct iwl_trans *trans; 277 struct iwl_trans *trans;
313 struct iwl_drv *drv; 278 struct iwl_drv *drv;
314 279
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index a3fbaa0ef5e0..adad8d0fae7f 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -1,7 +1,7 @@
1/****************************************************************************** 1/******************************************************************************
2 * 2 *
3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved. 3 * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 4 * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
5 * 5 *
6 * Portions of this file are derived from the ipw3945 project, as well 6 * Portions of this file are derived from the ipw3945 project, as well
7 * as portions of the ieee80211 subsystem header files. 7 * as portions of the ieee80211 subsystem header files.
@@ -74,29 +74,16 @@
74 * resets the Rx queue buffers with new memory. 74 * resets the Rx queue buffers with new memory.
75 * 75 *
76 * The management in the driver is as follows: 76 * The management in the driver is as follows:
77 * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free. 77 * + A list of pre-allocated SKBs is stored in iwl->rxq->rx_free. When
78 * When the interrupt handler is called, the request is processed. 78 * iwl->rxq->free_count drops to or below RX_LOW_WATERMARK, work is scheduled
79 * The page is either stolen - transferred to the upper layer 79 * to replenish the iwl->rxq->rx_free.
80 * or reused - added immediately to the iwl->rxq->rx_free list. 80 * + In iwl_pcie_rx_replenish (scheduled) if 'processed' != 'read' then the
81 * + When the page is stolen - the driver updates the matching queue's used 81 * iwl->rxq is replenished and the READ INDEX is updated (updating the
82 * count, detaches the RBD and transfers it to the queue used list. 82 * 'processed' and 'read' driver indexes as well)
83 * When there are two used RBDs - they are transferred to the allocator empty
84 * list. Work is then scheduled for the allocator to start allocating
85 * eight buffers.
86 * When there are another 6 used RBDs - they are transferred to the allocator
87 * empty list and the driver tries to claim the pre-allocated buffers and
88 * add them to iwl->rxq->rx_free. If it fails - it continues to claim them
89 * until ready.
90 * When there are 8+ buffers in the free list - either from allocation or from
91 * 8 reused unstolen pages - restock is called to update the FW and indexes.
92 * + In order to make sure the allocator always has RBDs to use for allocation
93 * the allocator has initial pool in the size of num_queues*(8-2) - the
94 * maximum missing RBDs per allocation request (request posted with 2
95 * empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
96 * The queues supplies the recycle of the rest of the RBDs.
97 * + A received packet is processed and handed to the kernel network stack, 83 * + A received packet is processed and handed to the kernel network stack,
98 * detached from the iwl->rxq. The driver 'processed' index is updated. 84 * detached from the iwl->rxq. The driver 'processed' index is updated.
99 * + If there are no allocated buffers in iwl->rxq->rx_free, 85 * + The Host/Firmware iwl->rxq is replenished at irq thread time from the
86 * rx_free list. If there are no allocated buffers in iwl->rxq->rx_free,
100 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set. 87 * the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
101 * If there were enough free buffers and RX_STALLED is set it is cleared. 88 * If there were enough free buffers and RX_STALLED is set it is cleared.
102 * 89 *
@@ -105,32 +92,18 @@
105 * 92 *
106 * iwl_rxq_alloc() Allocates rx_free 93 * iwl_rxq_alloc() Allocates rx_free
107 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls 94 * iwl_pcie_rx_replenish() Replenishes rx_free list from rx_used, and calls
108 * iwl_pcie_rxq_restock. 95 * iwl_pcie_rxq_restock
109 * Used only during initialization.
110 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx 96 * iwl_pcie_rxq_restock() Moves available buffers from rx_free into Rx
111 * queue, updates firmware pointers, and updates 97 * queue, updates firmware pointers, and updates
112 * the WRITE index. 98 * the WRITE index. If insufficient rx_free buffers
113 * iwl_pcie_rx_allocator() Background work for allocating pages. 99 * are available, schedules iwl_pcie_rx_replenish
114 * 100 *
115 * -- enable interrupts -- 101 * -- enable interrupts --
116 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the 102 * ISR - iwl_rx() Detach iwl_rx_mem_buffers from pool up to the
117 * READ INDEX, detaching the SKB from the pool. 103 * READ INDEX, detaching the SKB from the pool.
118 * Moves the packet buffer from queue to rx_used. 104 * Moves the packet buffer from queue to rx_used.
119 * Posts and claims requests to the allocator.
120 * Calls iwl_pcie_rxq_restock to refill any empty 105 * Calls iwl_pcie_rxq_restock to refill any empty
121 * slots. 106 * slots.
122 *
123 * RBD life-cycle:
124 *
125 * Init:
126 * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
127 *
128 * Regular Receive interrupt:
129 * Page Stolen:
130 * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
131 * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
132 * Page not Stolen:
133 * rxq.queue -> rxq.rx_free -> rxq.queue
134 * ... 107 * ...
135 * 108 *
136 */ 109 */
@@ -267,6 +240,10 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
267 rxq->free_count--; 240 rxq->free_count--;
268 } 241 }
269 spin_unlock(&rxq->lock); 242 spin_unlock(&rxq->lock);
243 /* If the pre-allocated buffer pool is dropping low, schedule to
244 * refill it */
245 if (rxq->free_count <= RX_LOW_WATERMARK)
246 schedule_work(&trans_pcie->rx_replenish);
270 247
271 /* If we've added more space for the firmware to place data, tell it. 248 /* If we've added more space for the firmware to place data, tell it.
272 * Increment device's write pointer in multiples of 8. */ 249 * Increment device's write pointer in multiples of 8. */
@@ -278,44 +255,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
278} 255}
279 256
280/* 257/*
281 * iwl_pcie_rx_alloc_page - allocates and returns a page.
282 *
283 */
284static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
285{
286 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
287 struct iwl_rxq *rxq = &trans_pcie->rxq;
288 struct page *page;
289 gfp_t gfp_mask = GFP_KERNEL;
290
291 if (rxq->free_count > RX_LOW_WATERMARK)
292 gfp_mask |= __GFP_NOWARN;
293
294 if (trans_pcie->rx_page_order > 0)
295 gfp_mask |= __GFP_COMP;
296
297 /* Alloc a new receive buffer */
298 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
299 if (!page) {
300 if (net_ratelimit())
301 IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
302 trans_pcie->rx_page_order);
303 /* Issue an error if the hardware has consumed more than half
304 * of its free buffer list and we don't have enough
305 * pre-allocated buffers.
306` */
307 if (rxq->free_count <= RX_LOW_WATERMARK &&
308 iwl_rxq_space(rxq) > (RX_QUEUE_SIZE / 2) &&
309 net_ratelimit())
310 IWL_CRIT(trans,
311 "Failed to alloc_pages with GFP_KERNEL. Only %u free buffers remaining.\n",
312 rxq->free_count);
313 return NULL;
314 }
315 return page;
316}
317
318/*
319 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD 258 * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
320 * 259 *
321 * A used RBD is an Rx buffer that has been given to the stack. To use it again 260 * A used RBD is an Rx buffer that has been given to the stack. To use it again
@@ -324,12 +263,13 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans)
324 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly 263 * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
325 * allocated buffers. 264 * allocated buffers.
326 */ 265 */
327static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans) 266static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
328{ 267{
329 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 268 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
330 struct iwl_rxq *rxq = &trans_pcie->rxq; 269 struct iwl_rxq *rxq = &trans_pcie->rxq;
331 struct iwl_rx_mem_buffer *rxb; 270 struct iwl_rx_mem_buffer *rxb;
332 struct page *page; 271 struct page *page;
272 gfp_t gfp_mask = priority;
333 273
334 while (1) { 274 while (1) {
335 spin_lock(&rxq->lock); 275 spin_lock(&rxq->lock);
@@ -339,10 +279,32 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans)
339 } 279 }
340 spin_unlock(&rxq->lock); 280 spin_unlock(&rxq->lock);
341 281
282 if (rxq->free_count > RX_LOW_WATERMARK)
283 gfp_mask |= __GFP_NOWARN;
284
285 if (trans_pcie->rx_page_order > 0)
286 gfp_mask |= __GFP_COMP;
287
342 /* Alloc a new receive buffer */ 288 /* Alloc a new receive buffer */
343 page = iwl_pcie_rx_alloc_page(trans); 289 page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
344 if (!page) 290 if (!page) {
291 if (net_ratelimit())
292 IWL_DEBUG_INFO(trans, "alloc_pages failed, "
293 "order: %d\n",
294 trans_pcie->rx_page_order);
295
296 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
297 net_ratelimit())
298 IWL_CRIT(trans, "Failed to alloc_pages with %s."
299 "Only %u free buffers remaining.\n",
300 priority == GFP_ATOMIC ?
301 "GFP_ATOMIC" : "GFP_KERNEL",
302 rxq->free_count);
303 /* We don't reschedule replenish work here -- we will
304 * call the restock method and if it still needs
305 * more buffers it will schedule replenish */
345 return; 306 return;
307 }
346 308
347 spin_lock(&rxq->lock); 309 spin_lock(&rxq->lock);
348 310
@@ -393,7 +355,7 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
393 355
394 lockdep_assert_held(&rxq->lock); 356 lockdep_assert_held(&rxq->lock);
395 357
396 for (i = 0; i < RX_QUEUE_SIZE; i++) { 358 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
397 if (!rxq->pool[i].page) 359 if (!rxq->pool[i].page)
398 continue; 360 continue;
399 dma_unmap_page(trans->dev, rxq->pool[i].page_dma, 361 dma_unmap_page(trans->dev, rxq->pool[i].page_dma,
@@ -410,144 +372,32 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
410 * When moving to rx_free an page is allocated for the slot. 372 * When moving to rx_free an page is allocated for the slot.
411 * 373 *
412 * Also restock the Rx queue via iwl_pcie_rxq_restock. 374 * Also restock the Rx queue via iwl_pcie_rxq_restock.
413 * This is called only during initialization 375 * This is called as a scheduled work item (except for during initialization)
414 */ 376 */
415static void iwl_pcie_rx_replenish(struct iwl_trans *trans) 377static void iwl_pcie_rx_replenish(struct iwl_trans *trans, gfp_t gfp)
416{ 378{
417 iwl_pcie_rxq_alloc_rbs(trans); 379 iwl_pcie_rxq_alloc_rbs(trans, gfp);
418 380
419 iwl_pcie_rxq_restock(trans); 381 iwl_pcie_rxq_restock(trans);
420} 382}
421 383
422/* 384static void iwl_pcie_rx_replenish_work(struct work_struct *data)
423 * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
424 *
425 * Allocates for each received request 8 pages
426 * Called as a scheduled work item.
427 */
428static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
429{
430 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
431 struct iwl_rb_allocator *rba = &trans_pcie->rba;
432
433 while (atomic_read(&rba->req_pending)) {
434 int i;
435 struct list_head local_empty;
436 struct list_head local_allocated;
437
438 INIT_LIST_HEAD(&local_allocated);
439 spin_lock(&rba->lock);
440 /* swap out the entire rba->rbd_empty to a local list */
441 list_replace_init(&rba->rbd_empty, &local_empty);
442 spin_unlock(&rba->lock);
443
444 for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
445 struct iwl_rx_mem_buffer *rxb;
446 struct page *page;
447
448 /* List should never be empty - each reused RBD is
449 * returned to the list, and initial pool covers any
450 * possible gap between the time the page is allocated
451 * to the time the RBD is added.
452 */
453 BUG_ON(list_empty(&local_empty));
454 /* Get the first rxb from the rbd list */
455 rxb = list_first_entry(&local_empty,
456 struct iwl_rx_mem_buffer, list);
457 BUG_ON(rxb->page);
458
459 /* Alloc a new receive buffer */
460 page = iwl_pcie_rx_alloc_page(trans);
461 if (!page)
462 continue;
463 rxb->page = page;
464
465 /* Get physical address of the RB */
466 rxb->page_dma = dma_map_page(trans->dev, page, 0,
467 PAGE_SIZE << trans_pcie->rx_page_order,
468 DMA_FROM_DEVICE);
469 if (dma_mapping_error(trans->dev, rxb->page_dma)) {
470 rxb->page = NULL;
471 __free_pages(page, trans_pcie->rx_page_order);
472 continue;
473 }
474 /* dma address must be no more than 36 bits */
475 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
476 /* and also 256 byte aligned! */
477 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
478
479 /* move the allocated entry to the out list */
480 list_move(&rxb->list, &local_allocated);
481 i++;
482 }
483
484 spin_lock(&rba->lock);
485 /* add the allocated rbds to the allocator allocated list */
486 list_splice_tail(&local_allocated, &rba->rbd_allocated);
487 /* add the unused rbds back to the allocator empty list */
488 list_splice_tail(&local_empty, &rba->rbd_empty);
489 spin_unlock(&rba->lock);
490
491 atomic_dec(&rba->req_pending);
492 atomic_inc(&rba->req_ready);
493 }
494}
495
496/*
497 * iwl_pcie_rx_allocator_get - Returns the pre-allocated pages
498.*
499.* Called by queue when the queue posted allocation request and
500 * has freed 8 RBDs in order to restock itself.
501 */
502static int iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
503 struct iwl_rx_mem_buffer
504 *out[RX_CLAIM_REQ_ALLOC])
505{
506 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
507 struct iwl_rb_allocator *rba = &trans_pcie->rba;
508 int i;
509
510 if (atomic_dec_return(&rba->req_ready) < 0) {
511 atomic_inc(&rba->req_ready);
512 IWL_DEBUG_RX(trans,
513 "Allocation request not ready, pending requests = %d\n",
514 atomic_read(&rba->req_pending));
515 return -ENOMEM;
516 }
517
518 spin_lock(&rba->lock);
519 for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
520 /* Get next free Rx buffer, remove it from free list */
521 out[i] = list_first_entry(&rba->rbd_allocated,
522 struct iwl_rx_mem_buffer, list);
523 list_del(&out[i]->list);
524 }
525 spin_unlock(&rba->lock);
526
527 return 0;
528}
529
530static void iwl_pcie_rx_allocator_work(struct work_struct *data)
531{ 385{
532 struct iwl_rb_allocator *rba_p =
533 container_of(data, struct iwl_rb_allocator, rx_alloc);
534 struct iwl_trans_pcie *trans_pcie = 386 struct iwl_trans_pcie *trans_pcie =
535 container_of(rba_p, struct iwl_trans_pcie, rba); 387 container_of(data, struct iwl_trans_pcie, rx_replenish);
536 388
537 iwl_pcie_rx_allocator(trans_pcie->trans); 389 iwl_pcie_rx_replenish(trans_pcie->trans, GFP_KERNEL);
538} 390}
539 391
540static int iwl_pcie_rx_alloc(struct iwl_trans *trans) 392static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
541{ 393{
542 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 394 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
543 struct iwl_rxq *rxq = &trans_pcie->rxq; 395 struct iwl_rxq *rxq = &trans_pcie->rxq;
544 struct iwl_rb_allocator *rba = &trans_pcie->rba;
545 struct device *dev = trans->dev; 396 struct device *dev = trans->dev;
546 397
547 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq)); 398 memset(&trans_pcie->rxq, 0, sizeof(trans_pcie->rxq));
548 399
549 spin_lock_init(&rxq->lock); 400 spin_lock_init(&rxq->lock);
550 spin_lock_init(&rba->lock);
551 401
552 if (WARN_ON(rxq->bd || rxq->rb_stts)) 402 if (WARN_ON(rxq->bd || rxq->rb_stts))
553 return -EINVAL; 403 return -EINVAL;
@@ -637,49 +487,15 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
637 INIT_LIST_HEAD(&rxq->rx_free); 487 INIT_LIST_HEAD(&rxq->rx_free);
638 INIT_LIST_HEAD(&rxq->rx_used); 488 INIT_LIST_HEAD(&rxq->rx_used);
639 rxq->free_count = 0; 489 rxq->free_count = 0;
640 rxq->used_count = 0;
641 490
642 for (i = 0; i < RX_QUEUE_SIZE; i++) 491 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++)
643 list_add(&rxq->pool[i].list, &rxq->rx_used); 492 list_add(&rxq->pool[i].list, &rxq->rx_used);
644} 493}
645 494
646static void iwl_pcie_rx_init_rba(struct iwl_rb_allocator *rba)
647{
648 int i;
649
650 lockdep_assert_held(&rba->lock);
651
652 INIT_LIST_HEAD(&rba->rbd_allocated);
653 INIT_LIST_HEAD(&rba->rbd_empty);
654
655 for (i = 0; i < RX_POOL_SIZE; i++)
656 list_add(&rba->pool[i].list, &rba->rbd_empty);
657}
658
659static void iwl_pcie_rx_free_rba(struct iwl_trans *trans)
660{
661 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
662 struct iwl_rb_allocator *rba = &trans_pcie->rba;
663 int i;
664
665 lockdep_assert_held(&rba->lock);
666
667 for (i = 0; i < RX_POOL_SIZE; i++) {
668 if (!rba->pool[i].page)
669 continue;
670 dma_unmap_page(trans->dev, rba->pool[i].page_dma,
671 PAGE_SIZE << trans_pcie->rx_page_order,
672 DMA_FROM_DEVICE);
673 __free_pages(rba->pool[i].page, trans_pcie->rx_page_order);
674 rba->pool[i].page = NULL;
675 }
676}
677
678int iwl_pcie_rx_init(struct iwl_trans *trans) 495int iwl_pcie_rx_init(struct iwl_trans *trans)
679{ 496{
680 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 497 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
681 struct iwl_rxq *rxq = &trans_pcie->rxq; 498 struct iwl_rxq *rxq = &trans_pcie->rxq;
682 struct iwl_rb_allocator *rba = &trans_pcie->rba;
683 int i, err; 499 int i, err;
684 500
685 if (!rxq->bd) { 501 if (!rxq->bd) {
@@ -687,21 +503,11 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
687 if (err) 503 if (err)
688 return err; 504 return err;
689 } 505 }
690 if (!rba->alloc_wq)
691 rba->alloc_wq = alloc_workqueue("rb_allocator",
692 WQ_HIGHPRI | WQ_UNBOUND, 1);
693 INIT_WORK(&rba->rx_alloc, iwl_pcie_rx_allocator_work);
694
695 spin_lock(&rba->lock);
696 atomic_set(&rba->req_pending, 0);
697 atomic_set(&rba->req_ready, 0);
698 /* free all first - we might be reconfigured for a different size */
699 iwl_pcie_rx_free_rba(trans);
700 iwl_pcie_rx_init_rba(rba);
701 spin_unlock(&rba->lock);
702 506
703 spin_lock(&rxq->lock); 507 spin_lock(&rxq->lock);
704 508
509 INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
510
705 /* free all first - we might be reconfigured for a different size */ 511 /* free all first - we might be reconfigured for a different size */
706 iwl_pcie_rxq_free_rbs(trans); 512 iwl_pcie_rxq_free_rbs(trans);
707 iwl_pcie_rx_init_rxb_lists(rxq); 513 iwl_pcie_rx_init_rxb_lists(rxq);
@@ -716,7 +522,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
716 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); 522 memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
717 spin_unlock(&rxq->lock); 523 spin_unlock(&rxq->lock);
718 524
719 iwl_pcie_rx_replenish(trans); 525 iwl_pcie_rx_replenish(trans, GFP_KERNEL);
720 526
721 iwl_pcie_rx_hw_init(trans, rxq); 527 iwl_pcie_rx_hw_init(trans, rxq);
722 528
@@ -731,7 +537,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
731{ 537{
732 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 538 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
733 struct iwl_rxq *rxq = &trans_pcie->rxq; 539 struct iwl_rxq *rxq = &trans_pcie->rxq;
734 struct iwl_rb_allocator *rba = &trans_pcie->rba;
735 540
736 /*if rxq->bd is NULL, it means that nothing has been allocated, 541 /*if rxq->bd is NULL, it means that nothing has been allocated,
737 * exit now */ 542 * exit now */
@@ -740,15 +545,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
740 return; 545 return;
741 } 546 }
742 547
743 cancel_work_sync(&rba->rx_alloc); 548 cancel_work_sync(&trans_pcie->rx_replenish);
744 if (rba->alloc_wq) {
745 destroy_workqueue(rba->alloc_wq);
746 rba->alloc_wq = NULL;
747 }
748
749 spin_lock(&rba->lock);
750 iwl_pcie_rx_free_rba(trans);
751 spin_unlock(&rba->lock);
752 549
753 spin_lock(&rxq->lock); 550 spin_lock(&rxq->lock);
754 iwl_pcie_rxq_free_rbs(trans); 551 iwl_pcie_rxq_free_rbs(trans);
@@ -769,43 +566,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
769 rxq->rb_stts = NULL; 566 rxq->rb_stts = NULL;
770} 567}
771 568
772/*
773 * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
774 *
775 * Called when a RBD can be reused. The RBD is transferred to the allocator.
776 * When there are 2 empty RBDs - a request for allocation is posted
777 */
778static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
779 struct iwl_rx_mem_buffer *rxb,
780 struct iwl_rxq *rxq)
781{
782 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
783 struct iwl_rb_allocator *rba = &trans_pcie->rba;
784
785 /* Count the used RBDs */
786 rxq->used_count++;
787
788 /* Move the RBD to the used list, will be moved to allocator in batches
789 * before claiming or posting a request*/
790 list_add_tail(&rxb->list, &rxq->rx_used);
791
792 /* If we have RX_POST_REQ_ALLOC new released rx buffers -
793 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
794 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
795 * after but we still need to post another request.
796 */
797 if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
798 /* Move the 2 RBDs to the allocator ownership.
799 Allocator has another 6 from pool for the request completion*/
800 spin_lock(&rba->lock);
801 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
802 spin_unlock(&rba->lock);
803
804 atomic_inc(&rba->req_pending);
805 queue_work(rba->alloc_wq, &rba->rx_alloc);
806 }
807}
808
809static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, 569static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
810 struct iwl_rx_mem_buffer *rxb) 570 struct iwl_rx_mem_buffer *rxb)
811{ 571{
@@ -928,13 +688,13 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
928 */ 688 */
929 __free_pages(rxb->page, trans_pcie->rx_page_order); 689 __free_pages(rxb->page, trans_pcie->rx_page_order);
930 rxb->page = NULL; 690 rxb->page = NULL;
931 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); 691 list_add_tail(&rxb->list, &rxq->rx_used);
932 } else { 692 } else {
933 list_add_tail(&rxb->list, &rxq->rx_free); 693 list_add_tail(&rxb->list, &rxq->rx_free);
934 rxq->free_count++; 694 rxq->free_count++;
935 } 695 }
936 } else 696 } else
937 iwl_pcie_rx_reuse_rbd(trans, rxb, rxq); 697 list_add_tail(&rxb->list, &rxq->rx_used);
938} 698}
939 699
940/* 700/*
@@ -944,7 +704,10 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans)
944{ 704{
945 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 705 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
946 struct iwl_rxq *rxq = &trans_pcie->rxq; 706 struct iwl_rxq *rxq = &trans_pcie->rxq;
947 u32 r, i, j; 707 u32 r, i;
708 u8 fill_rx = 0;
709 u32 count = 8;
710 int total_empty;
948 711
949restart: 712restart:
950 spin_lock(&rxq->lock); 713 spin_lock(&rxq->lock);
@@ -957,6 +720,14 @@ restart:
957 if (i == r) 720 if (i == r)
958 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r); 721 IWL_DEBUG_RX(trans, "HW = SW = %d\n", r);
959 722
723 /* calculate total frames need to be restock after handling RX */
724 total_empty = r - rxq->write_actual;
725 if (total_empty < 0)
726 total_empty += RX_QUEUE_SIZE;
727
728 if (total_empty > (RX_QUEUE_SIZE / 2))
729 fill_rx = 1;
730
960 while (i != r) { 731 while (i != r) {
961 struct iwl_rx_mem_buffer *rxb; 732 struct iwl_rx_mem_buffer *rxb;
962 733
@@ -968,48 +739,29 @@ restart:
968 iwl_pcie_rx_handle_rb(trans, rxb); 739 iwl_pcie_rx_handle_rb(trans, rxb);
969 740
970 i = (i + 1) & RX_QUEUE_MASK; 741 i = (i + 1) & RX_QUEUE_MASK;
971 742 /* If there are a lot of unused frames,
972 /* If we have RX_CLAIM_REQ_ALLOC released rx buffers - 743 * restock the Rx queue so ucode wont assert. */
973 * try to claim the pre-allocated buffers from the allocator */ 744 if (fill_rx) {
974 if (rxq->used_count >= RX_CLAIM_REQ_ALLOC) { 745 count++;
975 struct iwl_rb_allocator *rba = &trans_pcie->rba; 746 if (count >= 8) {
976 struct iwl_rx_mem_buffer *out[RX_CLAIM_REQ_ALLOC]; 747 rxq->read = i;
977 748 spin_unlock(&rxq->lock);
978 /* Add the remaining 6 empty RBDs for allocator use */ 749 iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
979 spin_lock(&rba->lock); 750 count = 0;
980 list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty); 751 goto restart;
981 spin_unlock(&rba->lock);
982
983 /* If not ready - continue, will try to reclaim later.
984 * No need to reschedule work - allocator exits only on
985 * success */
986 if (!iwl_pcie_rx_allocator_get(trans, out)) {
987 /* If success - then RX_CLAIM_REQ_ALLOC
988 * buffers were retrieved and should be added
989 * to free list */
990 rxq->used_count -= RX_CLAIM_REQ_ALLOC;
991 for (j = 0; j < RX_CLAIM_REQ_ALLOC; j++) {
992 list_add_tail(&out[j]->list,
993 &rxq->rx_free);
994 rxq->free_count++;
995 }
996 } 752 }
997 } 753 }
998 /* handle restock for two cases:
999 * - we just pulled buffers from the allocator
1000 * - we have 8+ unstolen pages accumulated */
1001 if (rxq->free_count >= RX_CLAIM_REQ_ALLOC) {
1002 rxq->read = i;
1003 spin_unlock(&rxq->lock);
1004 iwl_pcie_rxq_restock(trans);
1005 goto restart;
1006 }
1007 } 754 }
1008 755
1009 /* Backtrack one entry */ 756 /* Backtrack one entry */
1010 rxq->read = i; 757 rxq->read = i;
1011 spin_unlock(&rxq->lock); 758 spin_unlock(&rxq->lock);
1012 759
760 if (fill_rx)
761 iwl_pcie_rx_replenish(trans, GFP_ATOMIC);
762 else
763 iwl_pcie_rxq_restock(trans);
764
1013 if (trans_pcie->napi.poll) 765 if (trans_pcie->napi.poll)
1014 napi_gro_flush(&trans_pcie->napi, false); 766 napi_gro_flush(&trans_pcie->napi, false);
1015} 767}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index 43ae658af6ec..9e144e71da0b 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -182,7 +182,7 @@ static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
182 182
183static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux) 183static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
184{ 184{
185 if (!trans->cfg->apmg_not_supported) 185 if (trans->cfg->apmg_not_supported)
186 return; 186 return;
187 187
188 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold)) 188 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -478,10 +478,16 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
478 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) 478 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_7000)
479 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG, 479 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
480 APMG_PCIDEV_STT_VAL_WAKE_ME); 480 APMG_PCIDEV_STT_VAL_WAKE_ME);
481 else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) 481 else if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
482 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
483 CSR_RESET_LINK_PWR_MGMT_DISABLED);
482 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 484 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
483 CSR_HW_IF_CONFIG_REG_PREPARE | 485 CSR_HW_IF_CONFIG_REG_PREPARE |
484 CSR_HW_IF_CONFIG_REG_ENABLE_PME); 486 CSR_HW_IF_CONFIG_REG_ENABLE_PME);
487 mdelay(1);
488 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
489 CSR_RESET_LINK_PWR_MGMT_DISABLED);
490 }
485 mdelay(5); 491 mdelay(5);
486 } 492 }
487 493
@@ -575,6 +581,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
575 if (ret >= 0) 581 if (ret >= 0)
576 return 0; 582 return 0;
577 583
584 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
585 CSR_RESET_LINK_PWR_MGMT_DISABLED);
586 msleep(1);
587
578 for (iter = 0; iter < 10; iter++) { 588 for (iter = 0; iter < 10; iter++) {
579 /* If HW is not ready, prepare the conditions to check again */ 589 /* If HW is not ready, prepare the conditions to check again */
580 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG, 590 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
@@ -582,8 +592,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
582 592
583 do { 593 do {
584 ret = iwl_pcie_set_hw_ready(trans); 594 ret = iwl_pcie_set_hw_ready(trans);
585 if (ret >= 0) 595 if (ret >= 0) {
586 return 0; 596 ret = 0;
597 goto out;
598 }
587 599
588 usleep_range(200, 1000); 600 usleep_range(200, 1000);
589 t += 200; 601 t += 200;
@@ -593,6 +605,10 @@ static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
593 605
594 IWL_ERR(trans, "Couldn't prepare the card\n"); 606 IWL_ERR(trans, "Couldn't prepare the card\n");
595 607
608out:
609 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
610 CSR_RESET_LINK_PWR_MGMT_DISABLED);
611
596 return ret; 612 return ret;
597} 613}
598 614
@@ -2459,7 +2475,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2459 struct iwl_trans_pcie *trans_pcie; 2475 struct iwl_trans_pcie *trans_pcie;
2460 struct iwl_trans *trans; 2476 struct iwl_trans *trans;
2461 u16 pci_cmd; 2477 u16 pci_cmd;
2462 int err; 2478 int ret;
2463 2479
2464 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), 2480 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie),
2465 &pdev->dev, cfg, &trans_ops_pcie, 0); 2481 &pdev->dev, cfg, &trans_ops_pcie, 0);
@@ -2474,8 +2490,8 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2474 spin_lock_init(&trans_pcie->ref_lock); 2490 spin_lock_init(&trans_pcie->ref_lock);
2475 init_waitqueue_head(&trans_pcie->ucode_write_waitq); 2491 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
2476 2492
2477 err = pci_enable_device(pdev); 2493 ret = pci_enable_device(pdev);
2478 if (err) 2494 if (ret)
2479 goto out_no_pci; 2495 goto out_no_pci;
2480 2496
2481 if (!cfg->base_params->pcie_l1_allowed) { 2497 if (!cfg->base_params->pcie_l1_allowed) {
@@ -2491,23 +2507,23 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2491 2507
2492 pci_set_master(pdev); 2508 pci_set_master(pdev);
2493 2509
2494 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36)); 2510 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
2495 if (!err) 2511 if (!ret)
2496 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36)); 2512 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
2497 if (err) { 2513 if (ret) {
2498 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); 2514 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2499 if (!err) 2515 if (!ret)
2500 err = pci_set_consistent_dma_mask(pdev, 2516 ret = pci_set_consistent_dma_mask(pdev,
2501 DMA_BIT_MASK(32)); 2517 DMA_BIT_MASK(32));
2502 /* both attempts failed: */ 2518 /* both attempts failed: */
2503 if (err) { 2519 if (ret) {
2504 dev_err(&pdev->dev, "No suitable DMA available\n"); 2520 dev_err(&pdev->dev, "No suitable DMA available\n");
2505 goto out_pci_disable_device; 2521 goto out_pci_disable_device;
2506 } 2522 }
2507 } 2523 }
2508 2524
2509 err = pci_request_regions(pdev, DRV_NAME); 2525 ret = pci_request_regions(pdev, DRV_NAME);
2510 if (err) { 2526 if (ret) {
2511 dev_err(&pdev->dev, "pci_request_regions failed\n"); 2527 dev_err(&pdev->dev, "pci_request_regions failed\n");
2512 goto out_pci_disable_device; 2528 goto out_pci_disable_device;
2513 } 2529 }
@@ -2515,7 +2531,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2515 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0); 2531 trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
2516 if (!trans_pcie->hw_base) { 2532 if (!trans_pcie->hw_base) {
2517 dev_err(&pdev->dev, "pci_ioremap_bar failed\n"); 2533 dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
2518 err = -ENODEV; 2534 ret = -ENODEV;
2519 goto out_pci_release_regions; 2535 goto out_pci_release_regions;
2520 } 2536 }
2521 2537
@@ -2527,9 +2543,9 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2527 trans_pcie->pci_dev = pdev; 2543 trans_pcie->pci_dev = pdev;
2528 iwl_disable_interrupts(trans); 2544 iwl_disable_interrupts(trans);
2529 2545
2530 err = pci_enable_msi(pdev); 2546 ret = pci_enable_msi(pdev);
2531 if (err) { 2547 if (ret) {
2532 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err); 2548 dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", ret);
2533 /* enable rfkill interrupt: hw bug w/a */ 2549 /* enable rfkill interrupt: hw bug w/a */
2534 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); 2550 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
2535 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) { 2551 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
@@ -2547,11 +2563,16 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2547 */ 2563 */
2548 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) { 2564 if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
2549 unsigned long flags; 2565 unsigned long flags;
2550 int ret;
2551 2566
2552 trans->hw_rev = (trans->hw_rev & 0xfff0) | 2567 trans->hw_rev = (trans->hw_rev & 0xfff0) |
2553 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2); 2568 (CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
2554 2569
2570 ret = iwl_pcie_prepare_card_hw(trans);
2571 if (ret) {
2572 IWL_WARN(trans, "Exit HW not ready\n");
2573 goto out_pci_disable_msi;
2574 }
2575
2555 /* 2576 /*
2556 * in-order to recognize C step driver should read chip version 2577 * in-order to recognize C step driver should read chip version
2557 * id located at the AUX bus MISC address space. 2578 * id located at the AUX bus MISC address space.
@@ -2591,13 +2612,14 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
2591 /* Initialize the wait queue for commands */ 2612 /* Initialize the wait queue for commands */
2592 init_waitqueue_head(&trans_pcie->wait_command_queue); 2613 init_waitqueue_head(&trans_pcie->wait_command_queue);
2593 2614
2594 if (iwl_pcie_alloc_ict(trans)) 2615 ret = iwl_pcie_alloc_ict(trans);
2616 if (ret)
2595 goto out_pci_disable_msi; 2617 goto out_pci_disable_msi;
2596 2618
2597 err = request_threaded_irq(pdev->irq, iwl_pcie_isr, 2619 ret = request_threaded_irq(pdev->irq, iwl_pcie_isr,
2598 iwl_pcie_irq_handler, 2620 iwl_pcie_irq_handler,
2599 IRQF_SHARED, DRV_NAME, trans); 2621 IRQF_SHARED, DRV_NAME, trans);
2600 if (err) { 2622 if (ret) {
2601 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq); 2623 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
2602 goto out_free_ict; 2624 goto out_free_ict;
2603 } 2625 }
@@ -2617,5 +2639,5 @@ out_pci_disable_device:
2617 pci_disable_device(pdev); 2639 pci_disable_device(pdev);
2618out_no_pci: 2640out_no_pci:
2619 iwl_trans_free(trans); 2641 iwl_trans_free(trans);
2620 return ERR_PTR(err); 2642 return ERR_PTR(ret);
2621} 2643}
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 2b86c2135de3..607acb53c847 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1875,8 +1875,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
1875 1875
1876 /* start timer if queue currently empty */ 1876 /* start timer if queue currently empty */
1877 if (q->read_ptr == q->write_ptr) { 1877 if (q->read_ptr == q->write_ptr) {
1878 if (txq->wd_timeout) 1878 if (txq->wd_timeout) {
1879 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1879 /*
1880 * If the TXQ is active, then set the timer, if not,
1881 * set the timer in remainder so that the timer will
1882 * be armed with the right value when the station will
1883 * wake up.
1884 */
1885 if (!txq->frozen)
1886 mod_timer(&txq->stuck_timer,
1887 jiffies + txq->wd_timeout);
1888 else
1889 txq->frozen_expiry_remainder = txq->wd_timeout;
1890 }
1880 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id); 1891 IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", q->id);
1881 iwl_trans_pcie_ref(trans); 1892 iwl_trans_pcie_ref(trans);
1882 } 1893 }
diff --git a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
index b6cc9ff47fc2..1c6788aecc62 100644
--- a/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_sdio_ops.c
@@ -172,6 +172,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
172 (struct rsi_91x_sdiodev *)adapter->rsi_dev; 172 (struct rsi_91x_sdiodev *)adapter->rsi_dev;
173 u32 len; 173 u32 len;
174 u32 num_blocks; 174 u32 num_blocks;
175 const u8 *fw;
175 const struct firmware *fw_entry = NULL; 176 const struct firmware *fw_entry = NULL;
176 u32 block_size = dev->tx_blk_size; 177 u32 block_size = dev->tx_blk_size;
177 int status = 0; 178 int status = 0;
@@ -200,6 +201,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
200 return status; 201 return status;
201 } 202 }
202 203
204 /* Copy firmware into DMA-accessible memory */
205 fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
206 if (!fw)
207 return -ENOMEM;
203 len = fw_entry->size; 208 len = fw_entry->size;
204 209
205 if (len % 4) 210 if (len % 4)
@@ -210,7 +215,8 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
210 rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len); 215 rsi_dbg(INIT_ZONE, "%s: Instruction size:%d\n", __func__, len);
211 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks); 216 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
212 217
213 status = rsi_copy_to_card(common, fw_entry->data, len, num_blocks); 218 status = rsi_copy_to_card(common, fw, len, num_blocks);
219 kfree(fw);
214 release_firmware(fw_entry); 220 release_firmware(fw_entry);
215 return status; 221 return status;
216} 222}
diff --git a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
index 1106ce76707e..30c2cf7fa93b 100644
--- a/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
+++ b/drivers/net/wireless/rsi/rsi_91x_usb_ops.c
@@ -146,7 +146,10 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
146 return status; 146 return status;
147 } 147 }
148 148
149 /* Copy firmware into DMA-accessible memory */
149 fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL); 150 fw = kmemdup(fw_entry->data, fw_entry->size, GFP_KERNEL);
151 if (!fw)
152 return -ENOMEM;
150 len = fw_entry->size; 153 len = fw_entry->size;
151 154
152 if (len % 4) 155 if (len % 4)
@@ -158,6 +161,7 @@ static int rsi_load_ta_instructions(struct rsi_common *common)
158 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks); 161 rsi_dbg(INIT_ZONE, "%s: num blocks: %d\n", __func__, num_blocks);
159 162
160 status = rsi_copy_to_card(common, fw, len, num_blocks); 163 status = rsi_copy_to_card(common, fw, len, num_blocks);
164 kfree(fw);
161 release_firmware(fw_entry); 165 release_firmware(fw_entry);
162 return status; 166 return status;
163} 167}
diff --git a/drivers/net/wireless/rtlwifi/core.c b/drivers/net/wireless/rtlwifi/core.c
index 3b3a88b53b11..585d0883c7e5 100644
--- a/drivers/net/wireless/rtlwifi/core.c
+++ b/drivers/net/wireless/rtlwifi/core.c
@@ -1015,9 +1015,12 @@ static void send_beacon_frame(struct ieee80211_hw *hw,
1015{ 1015{
1016 struct rtl_priv *rtlpriv = rtl_priv(hw); 1016 struct rtl_priv *rtlpriv = rtl_priv(hw);
1017 struct sk_buff *skb = ieee80211_beacon_get(hw, vif); 1017 struct sk_buff *skb = ieee80211_beacon_get(hw, vif);
1018 struct rtl_tcb_desc tcb_desc;
1018 1019
1019 if (skb) 1020 if (skb) {
1020 rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, NULL); 1021 memset(&tcb_desc, 0, sizeof(struct rtl_tcb_desc));
1022 rtlpriv->intf_ops->adapter_tx(hw, NULL, skb, &tcb_desc);
1023 }
1021} 1024}
1022 1025
1023static void rtl_op_bss_info_changed(struct ieee80211_hw *hw, 1026static void rtl_op_bss_info_changed(struct ieee80211_hw *hw,
diff --git a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
index 1017f02d7bf7..7bf88d9dcdc3 100644
--- a/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
+++ b/drivers/net/wireless/rtlwifi/rtl8723be/sw.c
@@ -385,6 +385,7 @@ module_param_named(debug, rtl8723be_mod_params.debug, int, 0444);
385module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444); 385module_param_named(ips, rtl8723be_mod_params.inactiveps, bool, 0444);
386module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444); 386module_param_named(swlps, rtl8723be_mod_params.swctrl_lps, bool, 0444);
387module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444); 387module_param_named(fwlps, rtl8723be_mod_params.fwctrl_lps, bool, 0444);
388module_param_named(msi, rtl8723be_mod_params.msi_support, bool, 0444);
388module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog, 389module_param_named(disable_watchdog, rtl8723be_mod_params.disable_watchdog,
389 bool, 0444); 390 bool, 0444);
390MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n"); 391MODULE_PARM_DESC(swenc, "Set to 1 for software crypto (default 0)\n");
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index 1a83e190fc15..28577a31549d 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -61,6 +61,12 @@ void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue,
61void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue) 61void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue)
62{ 62{
63 atomic_dec(&queue->inflight_packets); 63 atomic_dec(&queue->inflight_packets);
64
65 /* Wake the dealloc thread _after_ decrementing inflight_packets so
66 * that if kthread_stop() has already been called, the dealloc thread
67 * does not wait forever with nothing to wake it.
68 */
69 wake_up(&queue->dealloc_wq);
64} 70}
65 71
66int xenvif_schedulable(struct xenvif *vif) 72int xenvif_schedulable(struct xenvif *vif)
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 880d0d63e872..3f44b522b831 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -810,23 +810,17 @@ static inline struct sk_buff *xenvif_alloc_skb(unsigned int size)
810static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue, 810static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *queue,
811 struct sk_buff *skb, 811 struct sk_buff *skb,
812 struct xen_netif_tx_request *txp, 812 struct xen_netif_tx_request *txp,
813 struct gnttab_map_grant_ref *gop) 813 struct gnttab_map_grant_ref *gop,
814 unsigned int frag_overflow,
815 struct sk_buff *nskb)
814{ 816{
815 struct skb_shared_info *shinfo = skb_shinfo(skb); 817 struct skb_shared_info *shinfo = skb_shinfo(skb);
816 skb_frag_t *frags = shinfo->frags; 818 skb_frag_t *frags = shinfo->frags;
817 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx; 819 u16 pending_idx = XENVIF_TX_CB(skb)->pending_idx;
818 int start; 820 int start;
819 pending_ring_idx_t index; 821 pending_ring_idx_t index;
820 unsigned int nr_slots, frag_overflow = 0; 822 unsigned int nr_slots;
821 823
822 /* At this point shinfo->nr_frags is in fact the number of
823 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
824 */
825 if (shinfo->nr_frags > MAX_SKB_FRAGS) {
826 frag_overflow = shinfo->nr_frags - MAX_SKB_FRAGS;
827 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
828 shinfo->nr_frags = MAX_SKB_FRAGS;
829 }
830 nr_slots = shinfo->nr_frags; 824 nr_slots = shinfo->nr_frags;
831 825
832 /* Skip first skb fragment if it is on same page as header fragment. */ 826 /* Skip first skb fragment if it is on same page as header fragment. */
@@ -841,13 +835,6 @@ static struct gnttab_map_grant_ref *xenvif_get_requests(struct xenvif_queue *que
841 } 835 }
842 836
843 if (frag_overflow) { 837 if (frag_overflow) {
844 struct sk_buff *nskb = xenvif_alloc_skb(0);
845 if (unlikely(nskb == NULL)) {
846 if (net_ratelimit())
847 netdev_err(queue->vif->dev,
848 "Can't allocate the frag_list skb.\n");
849 return NULL;
850 }
851 838
852 shinfo = skb_shinfo(nskb); 839 shinfo = skb_shinfo(nskb);
853 frags = shinfo->frags; 840 frags = shinfo->frags;
@@ -1175,9 +1162,10 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1175 unsigned *copy_ops, 1162 unsigned *copy_ops,
1176 unsigned *map_ops) 1163 unsigned *map_ops)
1177{ 1164{
1178 struct gnttab_map_grant_ref *gop = queue->tx_map_ops, *request_gop; 1165 struct gnttab_map_grant_ref *gop = queue->tx_map_ops;
1179 struct sk_buff *skb; 1166 struct sk_buff *skb, *nskb;
1180 int ret; 1167 int ret;
1168 unsigned int frag_overflow;
1181 1169
1182 while (skb_queue_len(&queue->tx_queue) < budget) { 1170 while (skb_queue_len(&queue->tx_queue) < budget) {
1183 struct xen_netif_tx_request txreq; 1171 struct xen_netif_tx_request txreq;
@@ -1265,6 +1253,29 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1265 break; 1253 break;
1266 } 1254 }
1267 1255
1256 skb_shinfo(skb)->nr_frags = ret;
1257 if (data_len < txreq.size)
1258 skb_shinfo(skb)->nr_frags++;
1259 /* At this point shinfo->nr_frags is in fact the number of
1260 * slots, which can be as large as XEN_NETBK_LEGACY_SLOTS_MAX.
1261 */
1262 frag_overflow = 0;
1263 nskb = NULL;
1264 if (skb_shinfo(skb)->nr_frags > MAX_SKB_FRAGS) {
1265 frag_overflow = skb_shinfo(skb)->nr_frags - MAX_SKB_FRAGS;
1266 BUG_ON(frag_overflow > MAX_SKB_FRAGS);
1267 skb_shinfo(skb)->nr_frags = MAX_SKB_FRAGS;
1268 nskb = xenvif_alloc_skb(0);
1269 if (unlikely(nskb == NULL)) {
1270 kfree_skb(skb);
1271 xenvif_tx_err(queue, &txreq, idx);
1272 if (net_ratelimit())
1273 netdev_err(queue->vif->dev,
1274 "Can't allocate the frag_list skb.\n");
1275 break;
1276 }
1277 }
1278
1268 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) { 1279 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1269 struct xen_netif_extra_info *gso; 1280 struct xen_netif_extra_info *gso;
1270 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1]; 1281 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
@@ -1272,6 +1283,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1272 if (xenvif_set_skb_gso(queue->vif, skb, gso)) { 1283 if (xenvif_set_skb_gso(queue->vif, skb, gso)) {
1273 /* Failure in xenvif_set_skb_gso is fatal. */ 1284 /* Failure in xenvif_set_skb_gso is fatal. */
1274 kfree_skb(skb); 1285 kfree_skb(skb);
1286 kfree_skb(nskb);
1275 break; 1287 break;
1276 } 1288 }
1277 } 1289 }
@@ -1294,9 +1306,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1294 1306
1295 (*copy_ops)++; 1307 (*copy_ops)++;
1296 1308
1297 skb_shinfo(skb)->nr_frags = ret;
1298 if (data_len < txreq.size) { 1309 if (data_len < txreq.size) {
1299 skb_shinfo(skb)->nr_frags++;
1300 frag_set_pending_idx(&skb_shinfo(skb)->frags[0], 1310 frag_set_pending_idx(&skb_shinfo(skb)->frags[0],
1301 pending_idx); 1311 pending_idx);
1302 xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop); 1312 xenvif_tx_create_map_op(queue, pending_idx, &txreq, gop);
@@ -1310,13 +1320,8 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
1310 1320
1311 queue->pending_cons++; 1321 queue->pending_cons++;
1312 1322
1313 request_gop = xenvif_get_requests(queue, skb, txfrags, gop); 1323 gop = xenvif_get_requests(queue, skb, txfrags, gop,
1314 if (request_gop == NULL) { 1324 frag_overflow, nskb);
1315 kfree_skb(skb);
1316 xenvif_tx_err(queue, &txreq, idx);
1317 break;
1318 }
1319 gop = request_gop;
1320 1325
1321 __skb_queue_tail(&queue->tx_queue, skb); 1326 __skb_queue_tail(&queue->tx_queue, skb);
1322 1327
@@ -1536,7 +1541,6 @@ void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success)
1536 smp_wmb(); 1541 smp_wmb();
1537 queue->dealloc_prod++; 1542 queue->dealloc_prod++;
1538 } while (ubuf); 1543 } while (ubuf);
1539 wake_up(&queue->dealloc_wq);
1540 spin_unlock_irqrestore(&queue->callback_lock, flags); 1544 spin_unlock_irqrestore(&queue->callback_lock, flags);
1541 1545
1542 if (likely(zerocopy_success)) 1546 if (likely(zerocopy_success))
@@ -1566,13 +1570,13 @@ static inline void xenvif_tx_dealloc_action(struct xenvif_queue *queue)
1566 smp_rmb(); 1570 smp_rmb();
1567 1571
1568 while (dc != dp) { 1572 while (dc != dp) {
1569 BUG_ON(gop - queue->tx_unmap_ops > MAX_PENDING_REQS); 1573 BUG_ON(gop - queue->tx_unmap_ops >= MAX_PENDING_REQS);
1570 pending_idx = 1574 pending_idx =
1571 queue->dealloc_ring[pending_index(dc++)]; 1575 queue->dealloc_ring[pending_index(dc++)];
1572 1576
1573 pending_idx_release[gop-queue->tx_unmap_ops] = 1577 pending_idx_release[gop - queue->tx_unmap_ops] =
1574 pending_idx; 1578 pending_idx;
1575 queue->pages_to_unmap[gop-queue->tx_unmap_ops] = 1579 queue->pages_to_unmap[gop - queue->tx_unmap_ops] =
1576 queue->mmap_pages[pending_idx]; 1580 queue->mmap_pages[pending_idx];
1577 gnttab_set_unmap_op(gop, 1581 gnttab_set_unmap_op(gop,
1578 idx_to_kaddr(queue, pending_idx), 1582 idx_to_kaddr(queue, pending_idx),
diff --git a/drivers/ntb/ntb.c b/drivers/ntb/ntb.c
index 23435f2a5486..2e2530743831 100644
--- a/drivers/ntb/ntb.c
+++ b/drivers/ntb/ntb.c
@@ -114,7 +114,7 @@ int ntb_register_device(struct ntb_dev *ntb)
114 ntb->dev.bus = &ntb_bus; 114 ntb->dev.bus = &ntb_bus;
115 ntb->dev.parent = &ntb->pdev->dev; 115 ntb->dev.parent = &ntb->pdev->dev;
116 ntb->dev.release = ntb_dev_release; 116 ntb->dev.release = ntb_dev_release;
117 dev_set_name(&ntb->dev, pci_name(ntb->pdev)); 117 dev_set_name(&ntb->dev, "%s", pci_name(ntb->pdev));
118 118
119 ntb->ctx = NULL; 119 ntb->ctx = NULL;
120 ntb->ctx_ops = NULL; 120 ntb->ctx_ops = NULL;
diff --git a/drivers/ntb/ntb_transport.c b/drivers/ntb/ntb_transport.c
index efe3ad4122f2..1c6386d5f79c 100644
--- a/drivers/ntb/ntb_transport.c
+++ b/drivers/ntb/ntb_transport.c
@@ -142,10 +142,11 @@ struct ntb_transport_qp {
142 142
143 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data, 143 void (*rx_handler)(struct ntb_transport_qp *qp, void *qp_data,
144 void *data, int len); 144 void *data, int len);
145 struct list_head rx_post_q;
145 struct list_head rx_pend_q; 146 struct list_head rx_pend_q;
146 struct list_head rx_free_q; 147 struct list_head rx_free_q;
147 spinlock_t ntb_rx_pend_q_lock; 148 /* ntb_rx_q_lock: synchronize access to rx_XXXX_q */
148 spinlock_t ntb_rx_free_q_lock; 149 spinlock_t ntb_rx_q_lock;
149 void *rx_buff; 150 void *rx_buff;
150 unsigned int rx_index; 151 unsigned int rx_index;
151 unsigned int rx_max_entry; 152 unsigned int rx_max_entry;
@@ -211,6 +212,8 @@ struct ntb_transport_ctx {
211 bool link_is_up; 212 bool link_is_up;
212 struct delayed_work link_work; 213 struct delayed_work link_work;
213 struct work_struct link_cleanup; 214 struct work_struct link_cleanup;
215
216 struct dentry *debugfs_node_dir;
214}; 217};
215 218
216enum { 219enum {
@@ -436,13 +439,17 @@ static ssize_t debugfs_read(struct file *filp, char __user *ubuf, size_t count,
436 char *buf; 439 char *buf;
437 ssize_t ret, out_offset, out_count; 440 ssize_t ret, out_offset, out_count;
438 441
442 qp = filp->private_data;
443
444 if (!qp || !qp->link_is_up)
445 return 0;
446
439 out_count = 1000; 447 out_count = 1000;
440 448
441 buf = kmalloc(out_count, GFP_KERNEL); 449 buf = kmalloc(out_count, GFP_KERNEL);
442 if (!buf) 450 if (!buf)
443 return -ENOMEM; 451 return -ENOMEM;
444 452
445 qp = filp->private_data;
446 out_offset = 0; 453 out_offset = 0;
447 out_offset += snprintf(buf + out_offset, out_count - out_offset, 454 out_offset += snprintf(buf + out_offset, out_count - out_offset,
448 "NTB QP stats\n"); 455 "NTB QP stats\n");
@@ -534,6 +541,27 @@ out:
534 return entry; 541 return entry;
535} 542}
536 543
544static struct ntb_queue_entry *ntb_list_mv(spinlock_t *lock,
545 struct list_head *list,
546 struct list_head *to_list)
547{
548 struct ntb_queue_entry *entry;
549 unsigned long flags;
550
551 spin_lock_irqsave(lock, flags);
552
553 if (list_empty(list)) {
554 entry = NULL;
555 } else {
556 entry = list_first_entry(list, struct ntb_queue_entry, entry);
557 list_move_tail(&entry->entry, to_list);
558 }
559
560 spin_unlock_irqrestore(lock, flags);
561
562 return entry;
563}
564
537static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt, 565static int ntb_transport_setup_qp_mw(struct ntb_transport_ctx *nt,
538 unsigned int qp_num) 566 unsigned int qp_num)
539{ 567{
@@ -601,13 +629,16 @@ static void ntb_free_mw(struct ntb_transport_ctx *nt, int num_mw)
601} 629}
602 630
603static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw, 631static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
604 unsigned int size) 632 resource_size_t size)
605{ 633{
606 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw]; 634 struct ntb_transport_mw *mw = &nt->mw_vec[num_mw];
607 struct pci_dev *pdev = nt->ndev->pdev; 635 struct pci_dev *pdev = nt->ndev->pdev;
608 unsigned int xlat_size, buff_size; 636 size_t xlat_size, buff_size;
609 int rc; 637 int rc;
610 638
639 if (!size)
640 return -EINVAL;
641
611 xlat_size = round_up(size, mw->xlat_align_size); 642 xlat_size = round_up(size, mw->xlat_align_size);
612 buff_size = round_up(size, mw->xlat_align); 643 buff_size = round_up(size, mw->xlat_align);
613 644
@@ -627,7 +658,7 @@ static int ntb_set_mw(struct ntb_transport_ctx *nt, int num_mw,
627 if (!mw->virt_addr) { 658 if (!mw->virt_addr) {
628 mw->xlat_size = 0; 659 mw->xlat_size = 0;
629 mw->buff_size = 0; 660 mw->buff_size = 0;
630 dev_err(&pdev->dev, "Unable to alloc MW buff of size %d\n", 661 dev_err(&pdev->dev, "Unable to alloc MW buff of size %zu\n",
631 buff_size); 662 buff_size);
632 return -ENOMEM; 663 return -ENOMEM;
633 } 664 }
@@ -867,6 +898,8 @@ static void ntb_qp_link_work(struct work_struct *work)
867 898
868 if (qp->event_handler) 899 if (qp->event_handler)
869 qp->event_handler(qp->cb_data, qp->link_is_up); 900 qp->event_handler(qp->cb_data, qp->link_is_up);
901
902 tasklet_schedule(&qp->rxc_db_work);
870 } else if (nt->link_is_up) 903 } else if (nt->link_is_up)
871 schedule_delayed_work(&qp->link_work, 904 schedule_delayed_work(&qp->link_work,
872 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT)); 905 msecs_to_jiffies(NTB_LINK_DOWN_TIMEOUT));
@@ -923,12 +956,12 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
923 qp->tx_max_frame = min(transport_mtu, tx_size / 2); 956 qp->tx_max_frame = min(transport_mtu, tx_size / 2);
924 qp->tx_max_entry = tx_size / qp->tx_max_frame; 957 qp->tx_max_entry = tx_size / qp->tx_max_frame;
925 958
926 if (nt_debugfs_dir) { 959 if (nt->debugfs_node_dir) {
927 char debugfs_name[4]; 960 char debugfs_name[4];
928 961
929 snprintf(debugfs_name, 4, "qp%d", qp_num); 962 snprintf(debugfs_name, 4, "qp%d", qp_num);
930 qp->debugfs_dir = debugfs_create_dir(debugfs_name, 963 qp->debugfs_dir = debugfs_create_dir(debugfs_name,
931 nt_debugfs_dir); 964 nt->debugfs_node_dir);
932 965
933 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR, 966 qp->debugfs_stats = debugfs_create_file("stats", S_IRUSR,
934 qp->debugfs_dir, qp, 967 qp->debugfs_dir, qp,
@@ -941,10 +974,10 @@ static int ntb_transport_init_queue(struct ntb_transport_ctx *nt,
941 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work); 974 INIT_DELAYED_WORK(&qp->link_work, ntb_qp_link_work);
942 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work); 975 INIT_WORK(&qp->link_cleanup, ntb_qp_link_cleanup_work);
943 976
944 spin_lock_init(&qp->ntb_rx_pend_q_lock); 977 spin_lock_init(&qp->ntb_rx_q_lock);
945 spin_lock_init(&qp->ntb_rx_free_q_lock);
946 spin_lock_init(&qp->ntb_tx_free_q_lock); 978 spin_lock_init(&qp->ntb_tx_free_q_lock);
947 979
980 INIT_LIST_HEAD(&qp->rx_post_q);
948 INIT_LIST_HEAD(&qp->rx_pend_q); 981 INIT_LIST_HEAD(&qp->rx_pend_q);
949 INIT_LIST_HEAD(&qp->rx_free_q); 982 INIT_LIST_HEAD(&qp->rx_free_q);
950 INIT_LIST_HEAD(&qp->tx_free_q); 983 INIT_LIST_HEAD(&qp->tx_free_q);
@@ -1031,6 +1064,12 @@ static int ntb_transport_probe(struct ntb_client *self, struct ntb_dev *ndev)
1031 goto err2; 1064 goto err2;
1032 } 1065 }
1033 1066
1067 if (nt_debugfs_dir) {
1068 nt->debugfs_node_dir =
1069 debugfs_create_dir(pci_name(ndev->pdev),
1070 nt_debugfs_dir);
1071 }
1072
1034 for (i = 0; i < qp_count; i++) { 1073 for (i = 0; i < qp_count; i++) {
1035 rc = ntb_transport_init_queue(nt, i); 1074 rc = ntb_transport_init_queue(nt, i);
1036 if (rc) 1075 if (rc)
@@ -1107,22 +1146,47 @@ static void ntb_transport_free(struct ntb_client *self, struct ntb_dev *ndev)
1107 kfree(nt); 1146 kfree(nt);
1108} 1147}
1109 1148
1110static void ntb_rx_copy_callback(void *data) 1149static void ntb_complete_rxc(struct ntb_transport_qp *qp)
1111{ 1150{
1112 struct ntb_queue_entry *entry = data; 1151 struct ntb_queue_entry *entry;
1113 struct ntb_transport_qp *qp = entry->qp; 1152 void *cb_data;
1114 void *cb_data = entry->cb_data; 1153 unsigned int len;
1115 unsigned int len = entry->len; 1154 unsigned long irqflags;
1116 struct ntb_payload_header *hdr = entry->rx_hdr; 1155
1156 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1157
1158 while (!list_empty(&qp->rx_post_q)) {
1159 entry = list_first_entry(&qp->rx_post_q,
1160 struct ntb_queue_entry, entry);
1161 if (!(entry->flags & DESC_DONE_FLAG))
1162 break;
1163
1164 entry->rx_hdr->flags = 0;
1165 iowrite32(entry->index, &qp->rx_info->entry);
1117 1166
1118 hdr->flags = 0; 1167 cb_data = entry->cb_data;
1168 len = entry->len;
1119 1169
1120 iowrite32(entry->index, &qp->rx_info->entry); 1170 list_move_tail(&entry->entry, &qp->rx_free_q);
1121 1171
1122 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); 1172 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1123 1173
1124 if (qp->rx_handler && qp->client_ready) 1174 if (qp->rx_handler && qp->client_ready)
1125 qp->rx_handler(qp, qp->cb_data, cb_data, len); 1175 qp->rx_handler(qp, qp->cb_data, cb_data, len);
1176
1177 spin_lock_irqsave(&qp->ntb_rx_q_lock, irqflags);
1178 }
1179
1180 spin_unlock_irqrestore(&qp->ntb_rx_q_lock, irqflags);
1181}
1182
1183static void ntb_rx_copy_callback(void *data)
1184{
1185 struct ntb_queue_entry *entry = data;
1186
1187 entry->flags |= DESC_DONE_FLAG;
1188
1189 ntb_complete_rxc(entry->qp);
1126} 1190}
1127 1191
1128static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset) 1192static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
@@ -1138,19 +1202,18 @@ static void ntb_memcpy_rx(struct ntb_queue_entry *entry, void *offset)
1138 ntb_rx_copy_callback(entry); 1202 ntb_rx_copy_callback(entry);
1139} 1203}
1140 1204
1141static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset, 1205static void ntb_async_rx(struct ntb_queue_entry *entry, void *offset)
1142 size_t len)
1143{ 1206{
1144 struct dma_async_tx_descriptor *txd; 1207 struct dma_async_tx_descriptor *txd;
1145 struct ntb_transport_qp *qp = entry->qp; 1208 struct ntb_transport_qp *qp = entry->qp;
1146 struct dma_chan *chan = qp->dma_chan; 1209 struct dma_chan *chan = qp->dma_chan;
1147 struct dma_device *device; 1210 struct dma_device *device;
1148 size_t pay_off, buff_off; 1211 size_t pay_off, buff_off, len;
1149 struct dmaengine_unmap_data *unmap; 1212 struct dmaengine_unmap_data *unmap;
1150 dma_cookie_t cookie; 1213 dma_cookie_t cookie;
1151 void *buf = entry->buf; 1214 void *buf = entry->buf;
1152 1215
1153 entry->len = len; 1216 len = entry->len;
1154 1217
1155 if (!chan) 1218 if (!chan)
1156 goto err; 1219 goto err;
@@ -1226,7 +1289,6 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
1226 struct ntb_payload_header *hdr; 1289 struct ntb_payload_header *hdr;
1227 struct ntb_queue_entry *entry; 1290 struct ntb_queue_entry *entry;
1228 void *offset; 1291 void *offset;
1229 int rc;
1230 1292
1231 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index; 1293 offset = qp->rx_buff + qp->rx_max_frame * qp->rx_index;
1232 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header); 1294 hdr = offset + qp->rx_max_frame - sizeof(struct ntb_payload_header);
@@ -1255,65 +1317,43 @@ static int ntb_process_rxc(struct ntb_transport_qp *qp)
1255 return -EIO; 1317 return -EIO;
1256 } 1318 }
1257 1319
1258 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); 1320 entry = ntb_list_mv(&qp->ntb_rx_q_lock, &qp->rx_pend_q, &qp->rx_post_q);
1259 if (!entry) { 1321 if (!entry) {
1260 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n"); 1322 dev_dbg(&qp->ndev->pdev->dev, "no receive buffer\n");
1261 qp->rx_err_no_buf++; 1323 qp->rx_err_no_buf++;
1262 1324 return -EAGAIN;
1263 rc = -ENOMEM;
1264 goto err;
1265 } 1325 }
1266 1326
1327 entry->rx_hdr = hdr;
1328 entry->index = qp->rx_index;
1329
1267 if (hdr->len > entry->len) { 1330 if (hdr->len > entry->len) {
1268 dev_dbg(&qp->ndev->pdev->dev, 1331 dev_dbg(&qp->ndev->pdev->dev,
1269 "receive buffer overflow! Wanted %d got %d\n", 1332 "receive buffer overflow! Wanted %d got %d\n",
1270 hdr->len, entry->len); 1333 hdr->len, entry->len);
1271 qp->rx_err_oflow++; 1334 qp->rx_err_oflow++;
1272 1335
1273 rc = -EIO; 1336 entry->len = -EIO;
1274 goto err; 1337 entry->flags |= DESC_DONE_FLAG;
1275 }
1276 1338
1277 dev_dbg(&qp->ndev->pdev->dev, 1339 ntb_complete_rxc(qp);
1278 "RX OK index %u ver %u size %d into buf size %d\n", 1340 } else {
1279 qp->rx_index, hdr->ver, hdr->len, entry->len); 1341 dev_dbg(&qp->ndev->pdev->dev,
1342 "RX OK index %u ver %u size %d into buf size %d\n",
1343 qp->rx_index, hdr->ver, hdr->len, entry->len);
1280 1344
1281 qp->rx_bytes += hdr->len; 1345 qp->rx_bytes += hdr->len;
1282 qp->rx_pkts++; 1346 qp->rx_pkts++;
1283 1347
1284 entry->index = qp->rx_index; 1348 entry->len = hdr->len;
1285 entry->rx_hdr = hdr;
1286 1349
1287 ntb_async_rx(entry, offset, hdr->len); 1350 ntb_async_rx(entry, offset);
1351 }
1288 1352
1289 qp->rx_index++; 1353 qp->rx_index++;
1290 qp->rx_index %= qp->rx_max_entry; 1354 qp->rx_index %= qp->rx_max_entry;
1291 1355
1292 return 0; 1356 return 0;
1293
1294err:
1295 /* FIXME: if this syncrhonous update of the rx_index gets ahead of
1296 * asyncrhonous ntb_rx_copy_callback of previous entry, there are three
1297 * scenarios:
1298 *
1299 * 1) The peer might miss this update, but observe the update
1300 * from the memcpy completion callback. In this case, the buffer will
1301 * not be freed on the peer to be reused for a different packet. The
1302 * successful rx of a later packet would clear the condition, but the
1303 * condition could persist if several rx fail in a row.
1304 *
1305 * 2) The peer may observe this update before the asyncrhonous copy of
1306 * prior packets is completed. The peer may overwrite the buffers of
1307 * the prior packets before they are copied.
1308 *
1309 * 3) Both: the peer may observe the update, and then observe the index
1310 * decrement by the asynchronous completion callback. Who knows what
1311 * badness that will cause.
1312 */
1313 hdr->flags = 0;
1314 iowrite32(qp->rx_index, &qp->rx_info->entry);
1315
1316 return rc;
1317} 1357}
1318 1358
1319static void ntb_transport_rxc_db(unsigned long data) 1359static void ntb_transport_rxc_db(unsigned long data)
@@ -1333,7 +1373,7 @@ static void ntb_transport_rxc_db(unsigned long data)
1333 break; 1373 break;
1334 } 1374 }
1335 1375
1336 if (qp->dma_chan) 1376 if (i && qp->dma_chan)
1337 dma_async_issue_pending(qp->dma_chan); 1377 dma_async_issue_pending(qp->dma_chan);
1338 1378
1339 if (i == qp->rx_max_entry) { 1379 if (i == qp->rx_max_entry) {
@@ -1609,7 +1649,7 @@ ntb_transport_create_queue(void *data, struct device *client_dev,
1609 goto err1; 1649 goto err1;
1610 1650
1611 entry->qp = qp; 1651 entry->qp = qp;
1612 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, 1652 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry,
1613 &qp->rx_free_q); 1653 &qp->rx_free_q);
1614 } 1654 }
1615 1655
@@ -1634,7 +1674,7 @@ err2:
1634 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1674 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1635 kfree(entry); 1675 kfree(entry);
1636err1: 1676err1:
1637 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 1677 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
1638 kfree(entry); 1678 kfree(entry);
1639 if (qp->dma_chan) 1679 if (qp->dma_chan)
1640 dma_release_channel(qp->dma_chan); 1680 dma_release_channel(qp->dma_chan);
@@ -1652,7 +1692,6 @@ EXPORT_SYMBOL_GPL(ntb_transport_create_queue);
1652 */ 1692 */
1653void ntb_transport_free_queue(struct ntb_transport_qp *qp) 1693void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1654{ 1694{
1655 struct ntb_transport_ctx *nt = qp->transport;
1656 struct pci_dev *pdev; 1695 struct pci_dev *pdev;
1657 struct ntb_queue_entry *entry; 1696 struct ntb_queue_entry *entry;
1658 u64 qp_bit; 1697 u64 qp_bit;
@@ -1689,18 +1728,23 @@ void ntb_transport_free_queue(struct ntb_transport_qp *qp)
1689 qp->tx_handler = NULL; 1728 qp->tx_handler = NULL;
1690 qp->event_handler = NULL; 1729 qp->event_handler = NULL;
1691 1730
1692 while ((entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q))) 1731 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q)))
1693 kfree(entry); 1732 kfree(entry);
1694 1733
1695 while ((entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q))) { 1734 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q))) {
1696 dev_warn(&pdev->dev, "Freeing item from a non-empty queue\n"); 1735 dev_warn(&pdev->dev, "Freeing item from non-empty rx_pend_q\n");
1736 kfree(entry);
1737 }
1738
1739 while ((entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_post_q))) {
1740 dev_warn(&pdev->dev, "Freeing item from non-empty rx_post_q\n");
1697 kfree(entry); 1741 kfree(entry);
1698 } 1742 }
1699 1743
1700 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q))) 1744 while ((entry = ntb_list_rm(&qp->ntb_tx_free_q_lock, &qp->tx_free_q)))
1701 kfree(entry); 1745 kfree(entry);
1702 1746
1703 nt->qp_bitmap_free |= qp_bit; 1747 qp->transport->qp_bitmap_free |= qp_bit;
1704 1748
1705 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num); 1749 dev_info(&pdev->dev, "NTB Transport QP %d freed\n", qp->qp_num);
1706} 1750}
@@ -1724,14 +1768,14 @@ void *ntb_transport_rx_remove(struct ntb_transport_qp *qp, unsigned int *len)
1724 if (!qp || qp->client_ready) 1768 if (!qp || qp->client_ready)
1725 return NULL; 1769 return NULL;
1726 1770
1727 entry = ntb_list_rm(&qp->ntb_rx_pend_q_lock, &qp->rx_pend_q); 1771 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_pend_q);
1728 if (!entry) 1772 if (!entry)
1729 return NULL; 1773 return NULL;
1730 1774
1731 buf = entry->cb_data; 1775 buf = entry->cb_data;
1732 *len = entry->len; 1776 *len = entry->len;
1733 1777
1734 ntb_list_add(&qp->ntb_rx_free_q_lock, &entry->entry, &qp->rx_free_q); 1778 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_free_q);
1735 1779
1736 return buf; 1780 return buf;
1737} 1781}
@@ -1757,15 +1801,18 @@ int ntb_transport_rx_enqueue(struct ntb_transport_qp *qp, void *cb, void *data,
1757 if (!qp) 1801 if (!qp)
1758 return -EINVAL; 1802 return -EINVAL;
1759 1803
1760 entry = ntb_list_rm(&qp->ntb_rx_free_q_lock, &qp->rx_free_q); 1804 entry = ntb_list_rm(&qp->ntb_rx_q_lock, &qp->rx_free_q);
1761 if (!entry) 1805 if (!entry)
1762 return -ENOMEM; 1806 return -ENOMEM;
1763 1807
1764 entry->cb_data = cb; 1808 entry->cb_data = cb;
1765 entry->buf = data; 1809 entry->buf = data;
1766 entry->len = len; 1810 entry->len = len;
1811 entry->flags = 0;
1812
1813 ntb_list_add(&qp->ntb_rx_q_lock, &entry->entry, &qp->rx_pend_q);
1767 1814
1768 ntb_list_add(&qp->ntb_rx_pend_q_lock, &entry->entry, &qp->rx_pend_q); 1815 tasklet_schedule(&qp->rxc_db_work);
1769 1816
1770 return 0; 1817 return 0;
1771} 1818}
diff --git a/drivers/nvdimm/region_devs.c b/drivers/nvdimm/region_devs.c
index a5233422f9dc..7384455792bf 100644
--- a/drivers/nvdimm/region_devs.c
+++ b/drivers/nvdimm/region_devs.c
@@ -458,10 +458,15 @@ static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
458 nvdimm_bus_unlock(dev); 458 nvdimm_bus_unlock(dev);
459 } 459 }
460 if (is_nd_btt(dev) && probe) { 460 if (is_nd_btt(dev) && probe) {
461 struct nd_btt *nd_btt = to_nd_btt(dev);
462
461 nd_region = to_nd_region(dev->parent); 463 nd_region = to_nd_region(dev->parent);
462 nvdimm_bus_lock(dev); 464 nvdimm_bus_lock(dev);
463 if (nd_region->btt_seed == dev) 465 if (nd_region->btt_seed == dev)
464 nd_region_create_btt_seed(nd_region); 466 nd_region_create_btt_seed(nd_region);
467 if (nd_region->ns_seed == &nd_btt->ndns->dev &&
468 is_nd_blk(dev->parent))
469 nd_region_create_blk_seed(nd_region);
465 nvdimm_bus_unlock(dev); 470 nvdimm_bus_unlock(dev);
466 } 471 }
467} 472}
diff --git a/drivers/of/Kconfig b/drivers/of/Kconfig
index 8df1b1777745..59bb8556e43a 100644
--- a/drivers/of/Kconfig
+++ b/drivers/of/Kconfig
@@ -47,7 +47,7 @@ config OF_DYNAMIC
47 47
48config OF_ADDRESS 48config OF_ADDRESS
49 def_bool y 49 def_bool y
50 depends on !SPARC 50 depends on !SPARC && HAS_IOMEM
51 select OF_ADDRESS_PCI if PCI 51 select OF_ADDRESS_PCI if PCI
52 52
53config OF_ADDRESS_PCI 53config OF_ADDRESS_PCI
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 18016341d5a9..9f71770b6226 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -979,7 +979,6 @@ static struct platform_driver unittest_driver = {
979 .remove = unittest_remove, 979 .remove = unittest_remove,
980 .driver = { 980 .driver = {
981 .name = "unittest", 981 .name = "unittest",
982 .owner = THIS_MODULE,
983 .of_match_table = of_match_ptr(unittest_match), 982 .of_match_table = of_match_ptr(unittest_match),
984 }, 983 },
985}; 984};
@@ -1666,7 +1665,6 @@ static const struct i2c_device_id unittest_i2c_dev_id[] = {
1666static struct i2c_driver unittest_i2c_dev_driver = { 1665static struct i2c_driver unittest_i2c_dev_driver = {
1667 .driver = { 1666 .driver = {
1668 .name = "unittest-i2c-dev", 1667 .name = "unittest-i2c-dev",
1669 .owner = THIS_MODULE,
1670 }, 1668 },
1671 .probe = unittest_i2c_dev_probe, 1669 .probe = unittest_i2c_dev_probe,
1672 .remove = unittest_i2c_dev_remove, 1670 .remove = unittest_i2c_dev_remove,
@@ -1761,7 +1759,6 @@ static const struct i2c_device_id unittest_i2c_mux_id[] = {
1761static struct i2c_driver unittest_i2c_mux_driver = { 1759static struct i2c_driver unittest_i2c_mux_driver = {
1762 .driver = { 1760 .driver = {
1763 .name = "unittest-i2c-mux", 1761 .name = "unittest-i2c-mux",
1764 .owner = THIS_MODULE,
1765 }, 1762 },
1766 .probe = unittest_i2c_mux_probe, 1763 .probe = unittest_i2c_mux_probe,
1767 .remove = unittest_i2c_mux_remove, 1764 .remove = unittest_i2c_mux_remove,
diff --git a/drivers/parport/share.c b/drivers/parport/share.c
index 8067f54ce050..5ce5ef211bdb 100644
--- a/drivers/parport/share.c
+++ b/drivers/parport/share.c
@@ -891,8 +891,10 @@ parport_register_dev_model(struct parport *port, const char *name,
891 par_dev->dev.release = free_pardevice; 891 par_dev->dev.release = free_pardevice;
892 par_dev->devmodel = true; 892 par_dev->devmodel = true;
893 ret = device_register(&par_dev->dev); 893 ret = device_register(&par_dev->dev);
894 if (ret) 894 if (ret) {
895 goto err_put_dev; 895 put_device(&par_dev->dev);
896 goto err_put_port;
897 }
896 898
897 /* Chain this onto the list */ 899 /* Chain this onto the list */
898 par_dev->prev = NULL; 900 par_dev->prev = NULL;
@@ -907,7 +909,8 @@ parport_register_dev_model(struct parport *port, const char *name,
907 spin_unlock(&port->physport->pardevice_lock); 909 spin_unlock(&port->physport->pardevice_lock);
908 pr_debug("%s: cannot grant exclusive access for device %s\n", 910 pr_debug("%s: cannot grant exclusive access for device %s\n",
909 port->name, name); 911 port->name, name);
910 goto err_put_dev; 912 device_unregister(&par_dev->dev);
913 goto err_put_port;
911 } 914 }
912 port->flags |= PARPORT_FLAG_EXCL; 915 port->flags |= PARPORT_FLAG_EXCL;
913 } 916 }
@@ -938,8 +941,6 @@ parport_register_dev_model(struct parport *port, const char *name,
938 941
939 return par_dev; 942 return par_dev;
940 943
941err_put_dev:
942 put_device(&par_dev->dev);
943err_free_devname: 944err_free_devname:
944 kfree(devname); 945 kfree(devname);
945err_free_par_dev: 946err_free_par_dev:
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index c0e6ede3e27d..6b8dd162f644 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -56,6 +56,7 @@ config PHY_EXYNOS_MIPI_VIDEO
56 56
57config PHY_PXA_28NM_HSIC 57config PHY_PXA_28NM_HSIC
58 tristate "Marvell USB HSIC 28nm PHY Driver" 58 tristate "Marvell USB HSIC 28nm PHY Driver"
59 depends on HAS_IOMEM
59 select GENERIC_PHY 60 select GENERIC_PHY
60 help 61 help
61 Enable this to support Marvell USB HSIC PHY driver for Marvell 62 Enable this to support Marvell USB HSIC PHY driver for Marvell
@@ -66,6 +67,7 @@ config PHY_PXA_28NM_HSIC
66 67
67config PHY_PXA_28NM_USB2 68config PHY_PXA_28NM_USB2
68 tristate "Marvell USB 2.0 28nm PHY Driver" 69 tristate "Marvell USB 2.0 28nm PHY Driver"
70 depends on HAS_IOMEM
69 select GENERIC_PHY 71 select GENERIC_PHY
70 help 72 help
71 Enable this to support Marvell USB 2.0 PHY driver for Marvell 73 Enable this to support Marvell USB 2.0 PHY driver for Marvell
diff --git a/drivers/phy/phy-berlin-usb.c b/drivers/phy/phy-berlin-usb.c
index c6fc95b53083..335e06d66ed9 100644
--- a/drivers/phy/phy-berlin-usb.c
+++ b/drivers/phy/phy-berlin-usb.c
@@ -105,9 +105,9 @@
105 105
106static const u32 phy_berlin_pll_dividers[] = { 106static const u32 phy_berlin_pll_dividers[] = {
107 /* Berlin 2 */ 107 /* Berlin 2 */
108 CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
109 /* Berlin 2CD */
110 CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55), 108 CLK_REF_DIV(0x6) | FEEDBACK_CLK_DIV(0x55),
109 /* Berlin 2CD/Q */
110 CLK_REF_DIV(0xc) | FEEDBACK_CLK_DIV(0x54),
111}; 111};
112 112
113struct phy_berlin_usb_priv { 113struct phy_berlin_usb_priv {
diff --git a/drivers/phy/phy-sun4i-usb.c b/drivers/phy/phy-sun4i-usb.c
index e17c539e4f6f..2dad7e820ff0 100644
--- a/drivers/phy/phy-sun4i-usb.c
+++ b/drivers/phy/phy-sun4i-usb.c
@@ -212,6 +212,7 @@ void sun4i_usb_phy_set_squelch_detect(struct phy *_phy, bool enabled)
212 212
213 sun4i_usb_phy_write(phy, PHY_SQUELCH_DETECT, enabled ? 0 : 2, 2); 213 sun4i_usb_phy_write(phy, PHY_SQUELCH_DETECT, enabled ? 0 : 2, 2);
214} 214}
215EXPORT_SYMBOL_GPL(sun4i_usb_phy_set_squelch_detect);
215 216
216static struct phy_ops sun4i_usb_phy_ops = { 217static struct phy_ops sun4i_usb_phy_ops = {
217 .init = sun4i_usb_phy_init, 218 .init = sun4i_usb_phy_init,
diff --git a/drivers/phy/phy-ti-pipe3.c b/drivers/phy/phy-ti-pipe3.c
index 53f295c1bab1..08020dc2c7c8 100644
--- a/drivers/phy/phy-ti-pipe3.c
+++ b/drivers/phy/phy-ti-pipe3.c
@@ -28,7 +28,8 @@
28#include <linux/delay.h> 28#include <linux/delay.h>
29#include <linux/phy/omap_control_phy.h> 29#include <linux/phy/omap_control_phy.h>
30#include <linux/of_platform.h> 30#include <linux/of_platform.h>
31#include <linux/spinlock.h> 31#include <linux/mfd/syscon.h>
32#include <linux/regmap.h>
32 33
33#define PLL_STATUS 0x00000004 34#define PLL_STATUS 0x00000004
34#define PLL_GO 0x00000008 35#define PLL_GO 0x00000008
@@ -53,6 +54,8 @@
53#define PLL_LOCK 0x2 54#define PLL_LOCK 0x2
54#define PLL_IDLE 0x1 55#define PLL_IDLE 0x1
55 56
57#define SATA_PLL_SOFT_RESET BIT(18)
58
56/* 59/*
57 * This is an Empirical value that works, need to confirm the actual 60 * This is an Empirical value that works, need to confirm the actual
58 * value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status 61 * value required for the PIPE3PHY_PLL_CONFIGURATION2.PLL_IDLE status
@@ -83,10 +86,9 @@ struct ti_pipe3 {
83 struct clk *refclk; 86 struct clk *refclk;
84 struct clk *div_clk; 87 struct clk *div_clk;
85 struct pipe3_dpll_map *dpll_map; 88 struct pipe3_dpll_map *dpll_map;
86 bool enabled; 89 struct regmap *dpll_reset_syscon; /* ctrl. reg. acces */
87 spinlock_t lock; /* serialize clock enable/disable */ 90 unsigned int dpll_reset_reg; /* reg. index within syscon */
88 /* the below flag is needed specifically for SATA */ 91 bool sata_refclk_enabled;
89 bool refclk_enabled;
90}; 92};
91 93
92static struct pipe3_dpll_map dpll_map_usb[] = { 94static struct pipe3_dpll_map dpll_map_usb[] = {
@@ -137,6 +139,9 @@ static struct pipe3_dpll_params *ti_pipe3_get_dpll_params(struct ti_pipe3 *phy)
137 return NULL; 139 return NULL;
138} 140}
139 141
142static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy);
143static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy);
144
140static int ti_pipe3_power_off(struct phy *x) 145static int ti_pipe3_power_off(struct phy *x)
141{ 146{
142 struct ti_pipe3 *phy = phy_get_drvdata(x); 147 struct ti_pipe3 *phy = phy_get_drvdata(x);
@@ -217,6 +222,7 @@ static int ti_pipe3_init(struct phy *x)
217 u32 val; 222 u32 val;
218 int ret = 0; 223 int ret = 0;
219 224
225 ti_pipe3_enable_clocks(phy);
220 /* 226 /*
221 * Set pcie_pcs register to 0x96 for proper functioning of phy 227 * Set pcie_pcs register to 0x96 for proper functioning of phy
222 * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table 228 * as recommended in AM572x TRM SPRUHZ6, section 18.5.2.2, table
@@ -250,33 +256,46 @@ static int ti_pipe3_exit(struct phy *x)
250 u32 val; 256 u32 val;
251 unsigned long timeout; 257 unsigned long timeout;
252 258
253 /* SATA DPLL can't be powered down due to Errata i783 and PCIe 259 /* If dpll_reset_syscon is not present we wont power down SATA DPLL
254 * does not have internal DPLL 260 * due to Errata i783
255 */ 261 */
256 if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") || 262 if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata") &&
257 of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) 263 !phy->dpll_reset_syscon)
258 return 0; 264 return 0;
259 265
260 /* Put DPLL in IDLE mode */ 266 /* PCIe doesn't have internal DPLL */
261 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2); 267 if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-pcie")) {
262 val |= PLL_IDLE; 268 /* Put DPLL in IDLE mode */
263 ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val); 269 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_CONFIGURATION2);
270 val |= PLL_IDLE;
271 ti_pipe3_writel(phy->pll_ctrl_base, PLL_CONFIGURATION2, val);
264 272
265 /* wait for LDO and Oscillator to power down */ 273 /* wait for LDO and Oscillator to power down */
266 timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME); 274 timeout = jiffies + msecs_to_jiffies(PLL_IDLE_TIME);
267 do { 275 do {
268 cpu_relax(); 276 cpu_relax();
269 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS); 277 val = ti_pipe3_readl(phy->pll_ctrl_base, PLL_STATUS);
270 if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN)) 278 if ((val & PLL_TICOPWDN) && (val & PLL_LDOPWDN))
271 break; 279 break;
272 } while (!time_after(jiffies, timeout)); 280 } while (!time_after(jiffies, timeout));
281
282 if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) {
283 dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n",
284 val);
285 return -EBUSY;
286 }
287 }
273 288
274 if (!(val & PLL_TICOPWDN) || !(val & PLL_LDOPWDN)) { 289 /* i783: SATA needs control bit toggle after PLL unlock */
275 dev_err(phy->dev, "Failed to power down: PLL_STATUS 0x%x\n", 290 if (of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) {
276 val); 291 regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg,
277 return -EBUSY; 292 SATA_PLL_SOFT_RESET, SATA_PLL_SOFT_RESET);
293 regmap_update_bits(phy->dpll_reset_syscon, phy->dpll_reset_reg,
294 SATA_PLL_SOFT_RESET, 0);
278 } 295 }
279 296
297 ti_pipe3_disable_clocks(phy);
298
280 return 0; 299 return 0;
281} 300}
282static struct phy_ops ops = { 301static struct phy_ops ops = {
@@ -306,7 +325,6 @@ static int ti_pipe3_probe(struct platform_device *pdev)
306 return -ENOMEM; 325 return -ENOMEM;
307 326
308 phy->dev = &pdev->dev; 327 phy->dev = &pdev->dev;
309 spin_lock_init(&phy->lock);
310 328
311 if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { 329 if (!of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
312 match = of_match_device(ti_pipe3_id_table, &pdev->dev); 330 match = of_match_device(ti_pipe3_id_table, &pdev->dev);
@@ -350,6 +368,21 @@ static int ti_pipe3_probe(struct platform_device *pdev)
350 } 368 }
351 } else { 369 } else {
352 phy->wkupclk = ERR_PTR(-ENODEV); 370 phy->wkupclk = ERR_PTR(-ENODEV);
371 phy->dpll_reset_syscon = syscon_regmap_lookup_by_phandle(node,
372 "syscon-pllreset");
373 if (IS_ERR(phy->dpll_reset_syscon)) {
374 dev_info(&pdev->dev,
375 "can't get syscon-pllreset, sata dpll won't idle\n");
376 phy->dpll_reset_syscon = NULL;
377 } else {
378 if (of_property_read_u32_index(node,
379 "syscon-pllreset", 1,
380 &phy->dpll_reset_reg)) {
381 dev_err(&pdev->dev,
382 "couldn't get pllreset reg. offset\n");
383 return -EINVAL;
384 }
385 }
353 } 386 }
354 387
355 if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) { 388 if (of_device_is_compatible(node, "ti,phy-pipe3-pcie")) {
@@ -403,6 +436,16 @@ static int ti_pipe3_probe(struct platform_device *pdev)
403 platform_set_drvdata(pdev, phy); 436 platform_set_drvdata(pdev, phy);
404 pm_runtime_enable(phy->dev); 437 pm_runtime_enable(phy->dev);
405 438
439 /*
440 * Prevent auto-disable of refclk for SATA PHY due to Errata i783
441 */
442 if (of_device_is_compatible(node, "ti,phy-pipe3-sata")) {
443 if (!IS_ERR(phy->refclk)) {
444 clk_prepare_enable(phy->refclk);
445 phy->sata_refclk_enabled = true;
446 }
447 }
448
406 generic_phy = devm_phy_create(phy->dev, NULL, &ops); 449 generic_phy = devm_phy_create(phy->dev, NULL, &ops);
407 if (IS_ERR(generic_phy)) 450 if (IS_ERR(generic_phy))
408 return PTR_ERR(generic_phy); 451 return PTR_ERR(generic_phy);
@@ -413,63 +456,33 @@ static int ti_pipe3_probe(struct platform_device *pdev)
413 if (IS_ERR(phy_provider)) 456 if (IS_ERR(phy_provider))
414 return PTR_ERR(phy_provider); 457 return PTR_ERR(phy_provider);
415 458
416 pm_runtime_get(&pdev->dev);
417
418 return 0; 459 return 0;
419} 460}
420 461
421static int ti_pipe3_remove(struct platform_device *pdev) 462static int ti_pipe3_remove(struct platform_device *pdev)
422{ 463{
423 if (!pm_runtime_suspended(&pdev->dev))
424 pm_runtime_put(&pdev->dev);
425 pm_runtime_disable(&pdev->dev); 464 pm_runtime_disable(&pdev->dev);
426 465
427 return 0; 466 return 0;
428} 467}
429 468
430#ifdef CONFIG_PM 469static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
431static int ti_pipe3_enable_refclk(struct ti_pipe3 *phy)
432{ 470{
433 if (!IS_ERR(phy->refclk) && !phy->refclk_enabled) { 471 int ret = 0;
434 int ret;
435 472
473 if (!IS_ERR(phy->refclk)) {
436 ret = clk_prepare_enable(phy->refclk); 474 ret = clk_prepare_enable(phy->refclk);
437 if (ret) { 475 if (ret) {
438 dev_err(phy->dev, "Failed to enable refclk %d\n", ret); 476 dev_err(phy->dev, "Failed to enable refclk %d\n", ret);
439 return ret; 477 return ret;
440 } 478 }
441 phy->refclk_enabled = true;
442 } 479 }
443 480
444 return 0;
445}
446
447static void ti_pipe3_disable_refclk(struct ti_pipe3 *phy)
448{
449 if (!IS_ERR(phy->refclk))
450 clk_disable_unprepare(phy->refclk);
451
452 phy->refclk_enabled = false;
453}
454
455static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
456{
457 int ret = 0;
458 unsigned long flags;
459
460 spin_lock_irqsave(&phy->lock, flags);
461 if (phy->enabled)
462 goto err1;
463
464 ret = ti_pipe3_enable_refclk(phy);
465 if (ret)
466 goto err1;
467
468 if (!IS_ERR(phy->wkupclk)) { 481 if (!IS_ERR(phy->wkupclk)) {
469 ret = clk_prepare_enable(phy->wkupclk); 482 ret = clk_prepare_enable(phy->wkupclk);
470 if (ret) { 483 if (ret) {
471 dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret); 484 dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret);
472 goto err2; 485 goto disable_refclk;
473 } 486 }
474 } 487 }
475 488
@@ -477,96 +490,43 @@ static int ti_pipe3_enable_clocks(struct ti_pipe3 *phy)
477 ret = clk_prepare_enable(phy->div_clk); 490 ret = clk_prepare_enable(phy->div_clk);
478 if (ret) { 491 if (ret) {
479 dev_err(phy->dev, "Failed to enable div_clk %d\n", ret); 492 dev_err(phy->dev, "Failed to enable div_clk %d\n", ret);
480 goto err3; 493 goto disable_wkupclk;
481 } 494 }
482 } 495 }
483 496
484 phy->enabled = true;
485 spin_unlock_irqrestore(&phy->lock, flags);
486 return 0; 497 return 0;
487 498
488err3: 499disable_wkupclk:
489 if (!IS_ERR(phy->wkupclk)) 500 if (!IS_ERR(phy->wkupclk))
490 clk_disable_unprepare(phy->wkupclk); 501 clk_disable_unprepare(phy->wkupclk);
491 502
492err2: 503disable_refclk:
493 if (!IS_ERR(phy->refclk)) 504 if (!IS_ERR(phy->refclk))
494 clk_disable_unprepare(phy->refclk); 505 clk_disable_unprepare(phy->refclk);
495 506
496 ti_pipe3_disable_refclk(phy);
497err1:
498 spin_unlock_irqrestore(&phy->lock, flags);
499 return ret; 507 return ret;
500} 508}
501 509
502static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy) 510static void ti_pipe3_disable_clocks(struct ti_pipe3 *phy)
503{ 511{
504 unsigned long flags;
505
506 spin_lock_irqsave(&phy->lock, flags);
507 if (!phy->enabled) {
508 spin_unlock_irqrestore(&phy->lock, flags);
509 return;
510 }
511
512 if (!IS_ERR(phy->wkupclk)) 512 if (!IS_ERR(phy->wkupclk))
513 clk_disable_unprepare(phy->wkupclk); 513 clk_disable_unprepare(phy->wkupclk);
514 /* Don't disable refclk for SATA PHY due to Errata i783 */ 514 if (!IS_ERR(phy->refclk)) {
515 if (!of_device_is_compatible(phy->dev->of_node, "ti,phy-pipe3-sata")) 515 clk_disable_unprepare(phy->refclk);
516 ti_pipe3_disable_refclk(phy); 516 /*
517 * SATA refclk needs an additional disable as we left it
518 * on in probe to avoid Errata i783
519 */
520 if (phy->sata_refclk_enabled) {
521 clk_disable_unprepare(phy->refclk);
522 phy->sata_refclk_enabled = false;
523 }
524 }
525
517 if (!IS_ERR(phy->div_clk)) 526 if (!IS_ERR(phy->div_clk))
518 clk_disable_unprepare(phy->div_clk); 527 clk_disable_unprepare(phy->div_clk);
519 phy->enabled = false;
520 spin_unlock_irqrestore(&phy->lock, flags);
521} 528}
522 529
523static int ti_pipe3_runtime_suspend(struct device *dev)
524{
525 struct ti_pipe3 *phy = dev_get_drvdata(dev);
526
527 ti_pipe3_disable_clocks(phy);
528 return 0;
529}
530
531static int ti_pipe3_runtime_resume(struct device *dev)
532{
533 struct ti_pipe3 *phy = dev_get_drvdata(dev);
534 int ret = 0;
535
536 ret = ti_pipe3_enable_clocks(phy);
537 return ret;
538}
539
540static int ti_pipe3_suspend(struct device *dev)
541{
542 struct ti_pipe3 *phy = dev_get_drvdata(dev);
543
544 ti_pipe3_disable_clocks(phy);
545 return 0;
546}
547
548static int ti_pipe3_resume(struct device *dev)
549{
550 struct ti_pipe3 *phy = dev_get_drvdata(dev);
551 int ret;
552
553 ret = ti_pipe3_enable_clocks(phy);
554 if (ret)
555 return ret;
556
557 pm_runtime_disable(dev);
558 pm_runtime_set_active(dev);
559 pm_runtime_enable(dev);
560 return 0;
561}
562#endif
563
564static const struct dev_pm_ops ti_pipe3_pm_ops = {
565 SET_RUNTIME_PM_OPS(ti_pipe3_runtime_suspend,
566 ti_pipe3_runtime_resume, NULL)
567 SET_SYSTEM_SLEEP_PM_OPS(ti_pipe3_suspend, ti_pipe3_resume)
568};
569
570static const struct of_device_id ti_pipe3_id_table[] = { 530static const struct of_device_id ti_pipe3_id_table[] = {
571 { 531 {
572 .compatible = "ti,phy-usb3", 532 .compatible = "ti,phy-usb3",
@@ -592,7 +552,6 @@ static struct platform_driver ti_pipe3_driver = {
592 .remove = ti_pipe3_remove, 552 .remove = ti_pipe3_remove,
593 .driver = { 553 .driver = {
594 .name = "ti-pipe3", 554 .name = "ti-pipe3",
595 .pm = &ti_pipe3_pm_ops,
596 .of_match_table = ti_pipe3_id_table, 555 .of_match_table = ti_pipe3_id_table,
597 }, 556 },
598}; 557};
diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index efcf2a2b3975..6177315ab74e 100644
--- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -473,6 +473,8 @@ static void bcm2835_gpio_irq_disable(struct irq_data *data)
473 473
474 spin_lock_irqsave(&pc->irq_lock[bank], flags); 474 spin_lock_irqsave(&pc->irq_lock[bank], flags);
475 bcm2835_gpio_irq_config(pc, gpio, false); 475 bcm2835_gpio_irq_config(pc, gpio, false);
476 /* Clear events that were latched prior to clearing event sources */
477 bcm2835_gpio_set_bit(pc, GPEDS0, gpio);
476 clear_bit(offset, &pc->enabled_irq_map[bank]); 478 clear_bit(offset, &pc->enabled_irq_map[bank]);
477 spin_unlock_irqrestore(&pc->irq_lock[bank], flags); 479 spin_unlock_irqrestore(&pc->irq_lock[bank], flags);
478} 480}
diff --git a/drivers/pinctrl/freescale/pinctrl-imx1-core.c b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
index 5fd4437cee15..88a7fac11bd4 100644
--- a/drivers/pinctrl/freescale/pinctrl-imx1-core.c
+++ b/drivers/pinctrl/freescale/pinctrl-imx1-core.c
@@ -403,14 +403,13 @@ static int imx1_pinconf_set(struct pinctrl_dev *pctldev,
403 unsigned num_configs) 403 unsigned num_configs)
404{ 404{
405 struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev); 405 struct imx1_pinctrl *ipctl = pinctrl_dev_get_drvdata(pctldev);
406 const struct imx1_pinctrl_soc_info *info = ipctl->info;
407 int i; 406 int i;
408 407
409 for (i = 0; i != num_configs; ++i) { 408 for (i = 0; i != num_configs; ++i) {
410 imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN); 409 imx1_write_bit(ipctl, pin_id, configs[i] & 0x01, MX1_PUEN);
411 410
412 dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n", 411 dev_dbg(ipctl->dev, "pinconf set pullup pin %s\n",
413 info->pins[pin_id].name); 412 pin_desc_get(pctldev, pin_id)->name);
414 } 413 }
415 414
416 return 0; 415 return 0;
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 557d0f2a3031..97681fac082e 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -787,7 +787,6 @@ static const struct pinmux_ops abx500_pinmux_ops = {
787 .set_mux = abx500_pmx_set, 787 .set_mux = abx500_pmx_set,
788 .gpio_request_enable = abx500_gpio_request_enable, 788 .gpio_request_enable = abx500_gpio_request_enable,
789 .gpio_disable_free = abx500_gpio_disable_free, 789 .gpio_disable_free = abx500_gpio_disable_free,
790 .strict = true,
791}; 790};
792 791
793static int abx500_get_groups_cnt(struct pinctrl_dev *pctldev) 792static int abx500_get_groups_cnt(struct pinctrl_dev *pctldev)
diff --git a/drivers/pinctrl/pinctrl-lpc18xx.c b/drivers/pinctrl/pinctrl-lpc18xx.c
index ef0b697639a7..347c763a6a78 100644
--- a/drivers/pinctrl/pinctrl-lpc18xx.c
+++ b/drivers/pinctrl/pinctrl-lpc18xx.c
@@ -823,7 +823,7 @@ static int lpc18xx_pconf_set_i2c0(struct pinctrl_dev *pctldev,
823 break; 823 break;
824 824
825 case PIN_CONFIG_INPUT_SCHMITT_ENABLE: 825 case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
826 if (param) 826 if (param_val)
827 *reg &= ~(LPC18XX_SCU_I2C0_ZIF << shift); 827 *reg &= ~(LPC18XX_SCU_I2C0_ZIF << shift);
828 else 828 else
829 *reg |= (LPC18XX_SCU_I2C0_ZIF << shift); 829 *reg |= (LPC18XX_SCU_I2C0_ZIF << shift);
@@ -876,7 +876,7 @@ static int lpc18xx_pconf_set_pin(struct pinctrl_dev *pctldev,
876 break; 876 break;
877 877
878 case PIN_CONFIG_INPUT_SCHMITT_ENABLE: 878 case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
879 if (param) 879 if (param_val)
880 *reg &= ~LPC18XX_SCU_PIN_ZIF; 880 *reg &= ~LPC18XX_SCU_PIN_ZIF;
881 else 881 else
882 *reg |= LPC18XX_SCU_PIN_ZIF; 882 *reg |= LPC18XX_SCU_PIN_ZIF;
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index b2de09d3b1a0..0b8d480171a3 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1760,7 +1760,8 @@ static int pcs_irq_init_chained_handler(struct pcs_device *pcs,
1760 int res; 1760 int res;
1761 1761
1762 res = request_irq(pcs_soc->irq, pcs_irq_handler, 1762 res = request_irq(pcs_soc->irq, pcs_irq_handler,
1763 IRQF_SHARED | IRQF_NO_SUSPEND, 1763 IRQF_SHARED | IRQF_NO_SUSPEND |
1764 IRQF_NO_THREAD,
1764 name, pcs_soc); 1765 name, pcs_soc);
1765 if (res) { 1766 if (res) {
1766 pcs_soc->irq = -1; 1767 pcs_soc->irq = -1;
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index 3dd5a3b2ac62..c760bf43d116 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -33,11 +33,6 @@
33#include "../core.h" 33#include "../core.h"
34#include "pinctrl-samsung.h" 34#include "pinctrl-samsung.h"
35 35
36#define GROUP_SUFFIX "-grp"
37#define GSUFFIX_LEN sizeof(GROUP_SUFFIX)
38#define FUNCTION_SUFFIX "-mux"
39#define FSUFFIX_LEN sizeof(FUNCTION_SUFFIX)
40
41/* list of all possible config options supported */ 36/* list of all possible config options supported */
42static struct pin_config { 37static struct pin_config {
43 const char *property; 38 const char *property;
diff --git a/drivers/pinctrl/sh-pfc/sh_pfc.h b/drivers/pinctrl/sh-pfc/sh_pfc.h
index c7508d5f6886..0874cfee6889 100644
--- a/drivers/pinctrl/sh-pfc/sh_pfc.h
+++ b/drivers/pinctrl/sh-pfc/sh_pfc.h
@@ -224,7 +224,7 @@ struct sh_pfc_soc_info {
224 224
225/* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */ 225/* PINMUX_GPIO_GP_ALL - Expand to a list of sh_pfc_pin entries */
226#define _GP_GPIO(bank, _pin, _name, sfx) \ 226#define _GP_GPIO(bank, _pin, _name, sfx) \
227 [(bank * 32) + _pin] = { \ 227 { \
228 .pin = (bank * 32) + _pin, \ 228 .pin = (bank * 32) + _pin, \
229 .name = __stringify(_name), \ 229 .name = __stringify(_name), \
230 .enum_id = _name##_DATA, \ 230 .enum_id = _name##_DATA, \
diff --git a/drivers/platform/chrome/Kconfig b/drivers/platform/chrome/Kconfig
index cb1329919527..3271cd1abe7c 100644
--- a/drivers/platform/chrome/Kconfig
+++ b/drivers/platform/chrome/Kconfig
@@ -4,7 +4,6 @@
4 4
5menuconfig CHROME_PLATFORMS 5menuconfig CHROME_PLATFORMS
6 bool "Platform support for Chrome hardware" 6 bool "Platform support for Chrome hardware"
7 depends on X86 || ARM
8 ---help--- 7 ---help---
9 Say Y here to get to see options for platform support for 8 Say Y here to get to see options for platform support for
10 various Chromebooks and Chromeboxes. This option alone does 9 various Chromebooks and Chromeboxes. This option alone does
diff --git a/drivers/regulator/88pm800.c b/drivers/regulator/88pm800.c
index 832932bdc977..7fd4f511d78f 100644
--- a/drivers/regulator/88pm800.c
+++ b/drivers/regulator/88pm800.c
@@ -130,7 +130,7 @@ struct pm800_regulators {
130 .owner = THIS_MODULE, \ 130 .owner = THIS_MODULE, \
131 .n_voltages = ARRAY_SIZE(ldo_volt_table), \ 131 .n_voltages = ARRAY_SIZE(ldo_volt_table), \
132 .vsel_reg = PM800_##vreg##_VOUT, \ 132 .vsel_reg = PM800_##vreg##_VOUT, \
133 .vsel_mask = 0x1f, \ 133 .vsel_mask = 0xf, \
134 .enable_reg = PM800_##ereg, \ 134 .enable_reg = PM800_##ereg, \
135 .enable_mask = 1 << (ebit), \ 135 .enable_mask = 1 << (ebit), \
136 .volt_table = ldo_volt_table, \ 136 .volt_table = ldo_volt_table, \
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index c9f72019bd68..78387a6cbae5 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -109,6 +109,7 @@ static int _regulator_do_set_voltage(struct regulator_dev *rdev,
109static struct regulator *create_regulator(struct regulator_dev *rdev, 109static struct regulator *create_regulator(struct regulator_dev *rdev,
110 struct device *dev, 110 struct device *dev,
111 const char *supply_name); 111 const char *supply_name);
112static void _regulator_put(struct regulator *regulator);
112 113
113static const char *rdev_get_name(struct regulator_dev *rdev) 114static const char *rdev_get_name(struct regulator_dev *rdev)
114{ 115{
@@ -1105,6 +1106,9 @@ static int set_supply(struct regulator_dev *rdev,
1105 1106
1106 rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev)); 1107 rdev_info(rdev, "supplied by %s\n", rdev_get_name(supply_rdev));
1107 1108
1109 if (!try_module_get(supply_rdev->owner))
1110 return -ENODEV;
1111
1108 rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY"); 1112 rdev->supply = create_regulator(supply_rdev, &rdev->dev, "SUPPLY");
1109 if (rdev->supply == NULL) { 1113 if (rdev->supply == NULL) {
1110 err = -ENOMEM; 1114 err = -ENOMEM;
@@ -1381,9 +1385,13 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
1381 } 1385 }
1382 1386
1383 if (!r) { 1387 if (!r) {
1384 dev_err(dev, "Failed to resolve %s-supply for %s\n", 1388 if (have_full_constraints()) {
1385 rdev->supply_name, rdev->desc->name); 1389 r = dummy_regulator_rdev;
1386 return -EPROBE_DEFER; 1390 } else {
1391 dev_err(dev, "Failed to resolve %s-supply for %s\n",
1392 rdev->supply_name, rdev->desc->name);
1393 return -EPROBE_DEFER;
1394 }
1387 } 1395 }
1388 1396
1389 /* Recursively resolve the supply of the supply */ 1397 /* Recursively resolve the supply of the supply */
@@ -1398,8 +1406,11 @@ static int regulator_resolve_supply(struct regulator_dev *rdev)
1398 /* Cascade always-on state to supply */ 1406 /* Cascade always-on state to supply */
1399 if (_regulator_is_enabled(rdev)) { 1407 if (_regulator_is_enabled(rdev)) {
1400 ret = regulator_enable(rdev->supply); 1408 ret = regulator_enable(rdev->supply);
1401 if (ret < 0) 1409 if (ret < 0) {
1410 if (rdev->supply)
1411 _regulator_put(rdev->supply);
1402 return ret; 1412 return ret;
1413 }
1403 } 1414 }
1404 1415
1405 return 0; 1416 return 0;
diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
index 6f2bdad8b4d8..e94ddcf97722 100644
--- a/drivers/regulator/max8973-regulator.c
+++ b/drivers/regulator/max8973-regulator.c
@@ -450,7 +450,7 @@ static struct max8973_regulator_platform_data *max8973_parse_dt(
450 pdata->control_flags |= MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE; 450 pdata->control_flags |= MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE;
451 451
452 if (of_property_read_bool(np, "maxim,enable-bias-control")) 452 if (of_property_read_bool(np, "maxim,enable-bias-control"))
453 pdata->control_flags |= MAX8973_BIAS_ENABLE; 453 pdata->control_flags |= MAX8973_CONTROL_BIAS_ENABLE;
454 454
455 return pdata; 455 return pdata;
456} 456}
diff --git a/drivers/regulator/s2mps11.c b/drivers/regulator/s2mps11.c
index 326ffb553371..72fc3c32db49 100644
--- a/drivers/regulator/s2mps11.c
+++ b/drivers/regulator/s2mps11.c
@@ -34,6 +34,8 @@
34#include <linux/mfd/samsung/s2mps14.h> 34#include <linux/mfd/samsung/s2mps14.h>
35#include <linux/mfd/samsung/s2mpu02.h> 35#include <linux/mfd/samsung/s2mpu02.h>
36 36
37/* The highest number of possible regulators for supported devices. */
38#define S2MPS_REGULATOR_MAX S2MPS13_REGULATOR_MAX
37struct s2mps11_info { 39struct s2mps11_info {
38 unsigned int rdev_num; 40 unsigned int rdev_num;
39 int ramp_delay2; 41 int ramp_delay2;
@@ -49,7 +51,7 @@ struct s2mps11_info {
49 * One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether 51 * One bit for each S2MPS13/S2MPS14/S2MPU02 regulator whether
50 * the suspend mode was enabled. 52 * the suspend mode was enabled.
51 */ 53 */
52 unsigned long long s2mps14_suspend_state:50; 54 DECLARE_BITMAP(suspend_state, S2MPS_REGULATOR_MAX);
53 55
54 /* Array of size rdev_num with GPIO-s for external sleep control */ 56 /* Array of size rdev_num with GPIO-s for external sleep control */
55 int *ext_control_gpio; 57 int *ext_control_gpio;
@@ -500,7 +502,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
500 switch (s2mps11->dev_type) { 502 switch (s2mps11->dev_type) {
501 case S2MPS13X: 503 case S2MPS13X:
502 case S2MPS14X: 504 case S2MPS14X:
503 if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) 505 if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
504 val = S2MPS14_ENABLE_SUSPEND; 506 val = S2MPS14_ENABLE_SUSPEND;
505 else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)])) 507 else if (gpio_is_valid(s2mps11->ext_control_gpio[rdev_get_id(rdev)]))
506 val = S2MPS14_ENABLE_EXT_CONTROL; 508 val = S2MPS14_ENABLE_EXT_CONTROL;
@@ -508,7 +510,7 @@ static int s2mps14_regulator_enable(struct regulator_dev *rdev)
508 val = rdev->desc->enable_mask; 510 val = rdev->desc->enable_mask;
509 break; 511 break;
510 case S2MPU02: 512 case S2MPU02:
511 if (s2mps11->s2mps14_suspend_state & (1 << rdev_get_id(rdev))) 513 if (test_bit(rdev_get_id(rdev), s2mps11->suspend_state))
512 val = S2MPU02_ENABLE_SUSPEND; 514 val = S2MPU02_ENABLE_SUSPEND;
513 else 515 else
514 val = rdev->desc->enable_mask; 516 val = rdev->desc->enable_mask;
@@ -562,7 +564,7 @@ static int s2mps14_regulator_set_suspend_disable(struct regulator_dev *rdev)
562 if (ret < 0) 564 if (ret < 0)
563 return ret; 565 return ret;
564 566
565 s2mps11->s2mps14_suspend_state |= (1 << rdev_get_id(rdev)); 567 set_bit(rdev_get_id(rdev), s2mps11->suspend_state);
566 /* 568 /*
567 * Don't enable suspend mode if regulator is already disabled because 569 * Don't enable suspend mode if regulator is already disabled because
568 * this would effectively for a short time turn on the regulator after 570 * this would effectively for a short time turn on the regulator after
@@ -960,18 +962,22 @@ static int s2mps11_pmic_probe(struct platform_device *pdev)
960 case S2MPS11X: 962 case S2MPS11X:
961 s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators); 963 s2mps11->rdev_num = ARRAY_SIZE(s2mps11_regulators);
962 regulators = s2mps11_regulators; 964 regulators = s2mps11_regulators;
965 BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
963 break; 966 break;
964 case S2MPS13X: 967 case S2MPS13X:
965 s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators); 968 s2mps11->rdev_num = ARRAY_SIZE(s2mps13_regulators);
966 regulators = s2mps13_regulators; 969 regulators = s2mps13_regulators;
970 BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
967 break; 971 break;
968 case S2MPS14X: 972 case S2MPS14X:
969 s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators); 973 s2mps11->rdev_num = ARRAY_SIZE(s2mps14_regulators);
970 regulators = s2mps14_regulators; 974 regulators = s2mps14_regulators;
975 BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
971 break; 976 break;
972 case S2MPU02: 977 case S2MPU02:
973 s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators); 978 s2mps11->rdev_num = ARRAY_SIZE(s2mpu02_regulators);
974 regulators = s2mpu02_regulators; 979 regulators = s2mpu02_regulators;
980 BUILD_BUG_ON(S2MPS_REGULATOR_MAX < s2mps11->rdev_num);
975 break; 981 break;
976 default: 982 default:
977 dev_err(&pdev->dev, "Invalid device type: %u\n", 983 dev_err(&pdev->dev, "Invalid device type: %u\n",
diff --git a/drivers/s390/Makefile b/drivers/s390/Makefile
index 95bccfd3f169..e5225ad9c5b1 100644
--- a/drivers/s390/Makefile
+++ b/drivers/s390/Makefile
@@ -2,7 +2,7 @@
2# Makefile for the S/390 specific device drivers 2# Makefile for the S/390 specific device drivers
3# 3#
4 4
5obj-y += cio/ block/ char/ crypto/ net/ scsi/ kvm/ 5obj-y += cio/ block/ char/ crypto/ net/ scsi/ virtio/
6 6
7drivers-y += drivers/s390/built-in.o 7drivers-y += drivers/s390/built-in.o
8 8
diff --git a/drivers/s390/kvm/Makefile b/drivers/s390/virtio/Makefile
index 241891a57caf..241891a57caf 100644
--- a/drivers/s390/kvm/Makefile
+++ b/drivers/s390/virtio/Makefile
diff --git a/drivers/s390/kvm/kvm_virtio.c b/drivers/s390/virtio/kvm_virtio.c
index 53fb975c404b..53fb975c404b 100644
--- a/drivers/s390/kvm/kvm_virtio.c
+++ b/drivers/s390/virtio/kvm_virtio.c
diff --git a/drivers/s390/kvm/virtio_ccw.c b/drivers/s390/virtio/virtio_ccw.c
index f8d8fdb26b72..f8d8fdb26b72 100644
--- a/drivers/s390/kvm/virtio_ccw.c
+++ b/drivers/s390/virtio/virtio_ccw.c
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c
index 882744852aac..a9aa38903efe 100644
--- a/drivers/scsi/ipr.c
+++ b/drivers/scsi/ipr.c
@@ -599,9 +599,10 @@ static void ipr_trc_hook(struct ipr_cmnd *ipr_cmd,
599{ 599{
600 struct ipr_trace_entry *trace_entry; 600 struct ipr_trace_entry *trace_entry;
601 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 601 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
602 unsigned int trace_index;
602 603
603 trace_entry = &ioa_cfg->trace[atomic_add_return 604 trace_index = atomic_add_return(1, &ioa_cfg->trace_index) & IPR_TRACE_INDEX_MASK;
604 (1, &ioa_cfg->trace_index)%IPR_NUM_TRACE_ENTRIES]; 605 trace_entry = &ioa_cfg->trace[trace_index];
605 trace_entry->time = jiffies; 606 trace_entry->time = jiffies;
606 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0]; 607 trace_entry->op_code = ipr_cmd->ioarcb.cmd_pkt.cdb[0];
607 trace_entry->type = type; 608 trace_entry->type = type;
@@ -1051,10 +1052,15 @@ static void ipr_send_blocking_cmd(struct ipr_cmnd *ipr_cmd,
1051 1052
1052static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg) 1053static int ipr_get_hrrq_index(struct ipr_ioa_cfg *ioa_cfg)
1053{ 1054{
1055 unsigned int hrrq;
1056
1054 if (ioa_cfg->hrrq_num == 1) 1057 if (ioa_cfg->hrrq_num == 1)
1055 return 0; 1058 hrrq = 0;
1056 else 1059 else {
1057 return (atomic_add_return(1, &ioa_cfg->hrrq_index) % (ioa_cfg->hrrq_num - 1)) + 1; 1060 hrrq = atomic_add_return(1, &ioa_cfg->hrrq_index);
1061 hrrq = (hrrq % (ioa_cfg->hrrq_num - 1)) + 1;
1062 }
1063 return hrrq;
1058} 1064}
1059 1065
1060/** 1066/**
@@ -6263,21 +6269,23 @@ static void ipr_scsi_done(struct ipr_cmnd *ipr_cmd)
6263 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg; 6269 struct ipr_ioa_cfg *ioa_cfg = ipr_cmd->ioa_cfg;
6264 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd; 6270 struct scsi_cmnd *scsi_cmd = ipr_cmd->scsi_cmd;
6265 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc); 6271 u32 ioasc = be32_to_cpu(ipr_cmd->s.ioasa.hdr.ioasc);
6266 unsigned long hrrq_flags; 6272 unsigned long lock_flags;
6267 6273
6268 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len)); 6274 scsi_set_resid(scsi_cmd, be32_to_cpu(ipr_cmd->s.ioasa.hdr.residual_data_len));
6269 6275
6270 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) { 6276 if (likely(IPR_IOASC_SENSE_KEY(ioasc) == 0)) {
6271 scsi_dma_unmap(scsi_cmd); 6277 scsi_dma_unmap(scsi_cmd);
6272 6278
6273 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); 6279 spin_lock_irqsave(ipr_cmd->hrrq->lock, lock_flags);
6274 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q); 6280 list_add_tail(&ipr_cmd->queue, &ipr_cmd->hrrq->hrrq_free_q);
6275 scsi_cmd->scsi_done(scsi_cmd); 6281 scsi_cmd->scsi_done(scsi_cmd);
6276 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); 6282 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, lock_flags);
6277 } else { 6283 } else {
6278 spin_lock_irqsave(ipr_cmd->hrrq->lock, hrrq_flags); 6284 spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags);
6285 spin_lock(&ipr_cmd->hrrq->_lock);
6279 ipr_erp_start(ioa_cfg, ipr_cmd); 6286 ipr_erp_start(ioa_cfg, ipr_cmd);
6280 spin_unlock_irqrestore(ipr_cmd->hrrq->lock, hrrq_flags); 6287 spin_unlock(&ipr_cmd->hrrq->_lock);
6288 spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags);
6281 } 6289 }
6282} 6290}
6283 6291
diff --git a/drivers/scsi/ipr.h b/drivers/scsi/ipr.h
index 73790a1d0969..6b97ee45c7b4 100644
--- a/drivers/scsi/ipr.h
+++ b/drivers/scsi/ipr.h
@@ -1486,6 +1486,7 @@ struct ipr_ioa_cfg {
1486 1486
1487#define IPR_NUM_TRACE_INDEX_BITS 8 1487#define IPR_NUM_TRACE_INDEX_BITS 8
1488#define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS) 1488#define IPR_NUM_TRACE_ENTRIES (1 << IPR_NUM_TRACE_INDEX_BITS)
1489#define IPR_TRACE_INDEX_MASK (IPR_NUM_TRACE_ENTRIES - 1)
1489#define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES) 1490#define IPR_TRACE_SIZE (sizeof(struct ipr_trace_entry) * IPR_NUM_TRACE_ENTRIES)
1490 char trace_start[8]; 1491 char trace_start[8];
1491#define IPR_TRACE_START_LABEL "trace" 1492#define IPR_TRACE_START_LABEL "trace"
diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
index 1b3a09473452..30f9ef0c0d4f 100644
--- a/drivers/scsi/libfc/fc_exch.c
+++ b/drivers/scsi/libfc/fc_exch.c
@@ -733,8 +733,6 @@ static bool fc_invoke_resp(struct fc_exch *ep, struct fc_seq *sp,
733 if (resp) { 733 if (resp) {
734 resp(sp, fp, arg); 734 resp(sp, fp, arg);
735 res = true; 735 res = true;
736 } else if (!IS_ERR(fp)) {
737 fc_frame_free(fp);
738 } 736 }
739 737
740 spin_lock_bh(&ep->ex_lock); 738 spin_lock_bh(&ep->ex_lock);
@@ -1596,7 +1594,8 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
1596 * If new exch resp handler is valid then call that 1594 * If new exch resp handler is valid then call that
1597 * first. 1595 * first.
1598 */ 1596 */
1599 fc_invoke_resp(ep, sp, fp); 1597 if (!fc_invoke_resp(ep, sp, fp))
1598 fc_frame_free(fp);
1600 1599
1601 fc_exch_release(ep); 1600 fc_exch_release(ep);
1602 return; 1601 return;
@@ -1695,7 +1694,8 @@ static void fc_exch_abts_resp(struct fc_exch *ep, struct fc_frame *fp)
1695 fc_exch_hold(ep); 1694 fc_exch_hold(ep);
1696 if (!rc) 1695 if (!rc)
1697 fc_exch_delete(ep); 1696 fc_exch_delete(ep);
1698 fc_invoke_resp(ep, sp, fp); 1697 if (!fc_invoke_resp(ep, sp, fp))
1698 fc_frame_free(fp);
1699 if (has_rec) 1699 if (has_rec)
1700 fc_exch_timer_set(ep, ep->r_a_tov); 1700 fc_exch_timer_set(ep, ep->r_a_tov);
1701 fc_exch_release(ep); 1701 fc_exch_release(ep);
diff --git a/drivers/scsi/libfc/fc_fcp.c b/drivers/scsi/libfc/fc_fcp.c
index c6795941b45d..2d5909c4685c 100644
--- a/drivers/scsi/libfc/fc_fcp.c
+++ b/drivers/scsi/libfc/fc_fcp.c
@@ -1039,11 +1039,26 @@ restart:
1039 fc_fcp_pkt_hold(fsp); 1039 fc_fcp_pkt_hold(fsp);
1040 spin_unlock_irqrestore(&si->scsi_queue_lock, flags); 1040 spin_unlock_irqrestore(&si->scsi_queue_lock, flags);
1041 1041
1042 if (!fc_fcp_lock_pkt(fsp)) { 1042 spin_lock_bh(&fsp->scsi_pkt_lock);
1043 if (!(fsp->state & FC_SRB_COMPL)) {
1044 fsp->state |= FC_SRB_COMPL;
1045 /*
1046 * TODO: dropping scsi_pkt_lock and then reacquiring
1047 * again around fc_fcp_cleanup_cmd() is required,
1048 * since fc_fcp_cleanup_cmd() calls into
1049 * fc_seq_set_resp() and that func preempts cpu using
1050 * schedule. May be schedule and related code should be
1051 * removed instead of unlocking here to avoid scheduling
1052 * while atomic bug.
1053 */
1054 spin_unlock_bh(&fsp->scsi_pkt_lock);
1055
1043 fc_fcp_cleanup_cmd(fsp, error); 1056 fc_fcp_cleanup_cmd(fsp, error);
1057
1058 spin_lock_bh(&fsp->scsi_pkt_lock);
1044 fc_io_compl(fsp); 1059 fc_io_compl(fsp);
1045 fc_fcp_unlock_pkt(fsp);
1046 } 1060 }
1061 spin_unlock_bh(&fsp->scsi_pkt_lock);
1047 1062
1048 fc_fcp_pkt_release(fsp); 1063 fc_fcp_pkt_release(fsp);
1049 spin_lock_irqsave(&si->scsi_queue_lock, flags); 1064 spin_lock_irqsave(&si->scsi_queue_lock, flags);
diff --git a/drivers/scsi/libiscsi.c b/drivers/scsi/libiscsi.c
index 8053f24f0349..98d9bb6ff725 100644
--- a/drivers/scsi/libiscsi.c
+++ b/drivers/scsi/libiscsi.c
@@ -2941,10 +2941,10 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2941{ 2941{
2942 struct iscsi_conn *conn = cls_conn->dd_data; 2942 struct iscsi_conn *conn = cls_conn->dd_data;
2943 struct iscsi_session *session = conn->session; 2943 struct iscsi_session *session = conn->session;
2944 unsigned long flags;
2945 2944
2946 del_timer_sync(&conn->transport_timer); 2945 del_timer_sync(&conn->transport_timer);
2947 2946
2947 mutex_lock(&session->eh_mutex);
2948 spin_lock_bh(&session->frwd_lock); 2948 spin_lock_bh(&session->frwd_lock);
2949 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT; 2949 conn->c_stage = ISCSI_CONN_CLEANUP_WAIT;
2950 if (session->leadconn == conn) { 2950 if (session->leadconn == conn) {
@@ -2956,28 +2956,6 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2956 } 2956 }
2957 spin_unlock_bh(&session->frwd_lock); 2957 spin_unlock_bh(&session->frwd_lock);
2958 2958
2959 /*
2960 * Block until all in-progress commands for this connection
2961 * time out or fail.
2962 */
2963 for (;;) {
2964 spin_lock_irqsave(session->host->host_lock, flags);
2965 if (!atomic_read(&session->host->host_busy)) { /* OK for ERL == 0 */
2966 spin_unlock_irqrestore(session->host->host_lock, flags);
2967 break;
2968 }
2969 spin_unlock_irqrestore(session->host->host_lock, flags);
2970 msleep_interruptible(500);
2971 iscsi_conn_printk(KERN_INFO, conn, "iscsi conn_destroy(): "
2972 "host_busy %d host_failed %d\n",
2973 atomic_read(&session->host->host_busy),
2974 session->host->host_failed);
2975 /*
2976 * force eh_abort() to unblock
2977 */
2978 wake_up(&conn->ehwait);
2979 }
2980
2981 /* flush queued up work because we free the connection below */ 2959 /* flush queued up work because we free the connection below */
2982 iscsi_suspend_tx(conn); 2960 iscsi_suspend_tx(conn);
2983 2961
@@ -2994,6 +2972,7 @@ void iscsi_conn_teardown(struct iscsi_cls_conn *cls_conn)
2994 if (session->leadconn == conn) 2972 if (session->leadconn == conn)
2995 session->leadconn = NULL; 2973 session->leadconn = NULL;
2996 spin_unlock_bh(&session->frwd_lock); 2974 spin_unlock_bh(&session->frwd_lock);
2975 mutex_unlock(&session->eh_mutex);
2997 2976
2998 iscsi_destroy_conn(cls_conn); 2977 iscsi_destroy_conn(cls_conn);
2999} 2978}
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 82b92c414a9c..437254e1c4de 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -738,7 +738,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
738 ql_log(ql_log_info, vha, 0x706f, 738 ql_log(ql_log_info, vha, 0x706f,
739 "Issuing MPI reset.\n"); 739 "Issuing MPI reset.\n");
740 740
741 if (IS_QLA83XX(ha)) { 741 if (IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
742 uint32_t idc_control; 742 uint32_t idc_control;
743 743
744 qla83xx_idc_lock(vha, 0); 744 qla83xx_idc_lock(vha, 0);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 0e6ee3ca30e6..8b011aef12bd 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -67,10 +67,10 @@
67 * | | | 0xd031-0xd0ff | 67 * | | | 0xd031-0xd0ff |
68 * | | | 0xd101-0xd1fe | 68 * | | | 0xd101-0xd1fe |
69 * | | | 0xd214-0xd2fe | 69 * | | | 0xd214-0xd2fe |
70 * | Target Mode | 0xe079 | | 70 * | Target Mode | 0xe080 | |
71 * | Target Mode Management | 0xf072 | 0xf002 | 71 * | Target Mode Management | 0xf096 | 0xf002 |
72 * | | | 0xf046-0xf049 | 72 * | | | 0xf046-0xf049 |
73 * | Target Mode Task Management | 0x1000b | | 73 * | Target Mode Task Management | 0x1000d | |
74 * ---------------------------------------------------------------------- 74 * ----------------------------------------------------------------------
75 */ 75 */
76 76
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index e86201d3b8c6..9ad819edcd67 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -274,6 +274,7 @@
274#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/ 274#define RESPONSE_ENTRY_CNT_FX00 256 /* Number of response entries.*/
275 275
276struct req_que; 276struct req_que;
277struct qla_tgt_sess;
277 278
278/* 279/*
279 * (sd.h is not exported, hence local inclusion) 280 * (sd.h is not exported, hence local inclusion)
@@ -2026,6 +2027,7 @@ typedef struct fc_port {
2026 uint16_t port_id; 2027 uint16_t port_id;
2027 2028
2028 unsigned long retry_delay_timestamp; 2029 unsigned long retry_delay_timestamp;
2030 struct qla_tgt_sess *tgt_session;
2029} fc_port_t; 2031} fc_port_t;
2030 2032
2031#include "qla_mr.h" 2033#include "qla_mr.h"
@@ -3154,13 +3156,13 @@ struct qla_hw_data {
3154/* Bit 21 of fw_attributes decides the MCTP capabilities */ 3156/* Bit 21 of fw_attributes decides the MCTP capabilities */
3155#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \ 3157#define IS_MCTP_CAPABLE(ha) (IS_QLA2031(ha) && \
3156 ((ha)->fw_attributes_ext[0] & BIT_0)) 3158 ((ha)->fw_attributes_ext[0] & BIT_0))
3157#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha)) 3159#define IS_PI_UNINIT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3158#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha)) 3160#define IS_PI_IPGUARD_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3159#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0) 3161#define IS_PI_DIFB_DIX0_CAPABLE(ha) (0)
3160#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha)) 3162#define IS_PI_SPLIT_DET_CAPABLE_HBA(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3161#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \ 3163#define IS_PI_SPLIT_DET_CAPABLE(ha) (IS_PI_SPLIT_DET_CAPABLE_HBA(ha) && \
3162 (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22)) 3164 (((ha)->fw_attributes_h << 16 | (ha)->fw_attributes) & BIT_22))
3163#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha)) 3165#define IS_ATIO_MSIX_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
3164#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length) 3166#define IS_TGT_MODE_CAPABLE(ha) (ha->tgt.atio_q_length)
3165#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha)) 3167#define IS_SHADOW_REG_CAPABLE(ha) (IS_QLA27XX(ha))
3166#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha)) 3168#define IS_DPORT_CAPABLE(ha) (IS_QLA83XX(ha) || IS_QLA27XX(ha))
@@ -3579,6 +3581,16 @@ typedef struct scsi_qla_host {
3579 uint16_t fcoe_fcf_idx; 3581 uint16_t fcoe_fcf_idx;
3580 uint8_t fcoe_vn_port_mac[6]; 3582 uint8_t fcoe_vn_port_mac[6];
3581 3583
3584 /* list of commands waiting on workqueue */
3585 struct list_head qla_cmd_list;
3586 struct list_head qla_sess_op_cmd_list;
3587 spinlock_t cmd_list_lock;
3588
3589 /* Counter to detect races between ELS and RSCN events */
3590 atomic_t generation_tick;
3591 /* Time when global fcport update has been scheduled */
3592 int total_fcport_update_gen;
3593
3582 uint32_t vp_abort_cnt; 3594 uint32_t vp_abort_cnt;
3583 3595
3584 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */ 3596 struct fc_vport *fc_vport; /* holds fc_vport * for each vport */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 664013115c9d..11f2f3279eab 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -115,6 +115,8 @@ qla2x00_async_iocb_timeout(void *data)
115 QLA_LOGIO_LOGIN_RETRIED : 0; 115 QLA_LOGIO_LOGIN_RETRIED : 0;
116 qla2x00_post_async_login_done_work(fcport->vha, fcport, 116 qla2x00_post_async_login_done_work(fcport->vha, fcport,
117 lio->u.logio.data); 117 lio->u.logio.data);
118 } else if (sp->type == SRB_LOGOUT_CMD) {
119 qlt_logo_completion_handler(fcport, QLA_FUNCTION_TIMEOUT);
118 } 120 }
119} 121}
120 122
@@ -497,7 +499,10 @@ void
497qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport, 499qla2x00_async_logout_done(struct scsi_qla_host *vha, fc_port_t *fcport,
498 uint16_t *data) 500 uint16_t *data)
499{ 501{
500 qla2x00_mark_device_lost(vha, fcport, 1, 0); 502 /* Don't re-login in target mode */
503 if (!fcport->tgt_session)
504 qla2x00_mark_device_lost(vha, fcport, 1, 0);
505 qlt_logo_completion_handler(fcport, data[0]);
501 return; 506 return;
502} 507}
503 508
@@ -1538,7 +1543,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1538 mem_size = (ha->fw_memory_size - 0x11000 + 1) * 1543 mem_size = (ha->fw_memory_size - 0x11000 + 1) *
1539 sizeof(uint16_t); 1544 sizeof(uint16_t);
1540 } else if (IS_FWI2_CAPABLE(ha)) { 1545 } else if (IS_FWI2_CAPABLE(ha)) {
1541 if (IS_QLA83XX(ha)) 1546 if (IS_QLA83XX(ha) || IS_QLA27XX(ha))
1542 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem); 1547 fixed_size = offsetof(struct qla83xx_fw_dump, ext_mem);
1543 else if (IS_QLA81XX(ha)) 1548 else if (IS_QLA81XX(ha))
1544 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem); 1549 fixed_size = offsetof(struct qla81xx_fw_dump, ext_mem);
@@ -1550,7 +1555,7 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1550 mem_size = (ha->fw_memory_size - 0x100000 + 1) * 1555 mem_size = (ha->fw_memory_size - 0x100000 + 1) *
1551 sizeof(uint32_t); 1556 sizeof(uint32_t);
1552 if (ha->mqenable) { 1557 if (ha->mqenable) {
1553 if (!IS_QLA83XX(ha)) 1558 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
1554 mq_size = sizeof(struct qla2xxx_mq_chain); 1559 mq_size = sizeof(struct qla2xxx_mq_chain);
1555 /* 1560 /*
1556 * Allocate maximum buffer size for all queues. 1561 * Allocate maximum buffer size for all queues.
@@ -2922,21 +2927,14 @@ qla2x00_rport_del(void *data)
2922{ 2927{
2923 fc_port_t *fcport = data; 2928 fc_port_t *fcport = data;
2924 struct fc_rport *rport; 2929 struct fc_rport *rport;
2925 scsi_qla_host_t *vha = fcport->vha;
2926 unsigned long flags; 2930 unsigned long flags;
2927 2931
2928 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 2932 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2929 rport = fcport->drport ? fcport->drport: fcport->rport; 2933 rport = fcport->drport ? fcport->drport: fcport->rport;
2930 fcport->drport = NULL; 2934 fcport->drport = NULL;
2931 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 2935 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2932 if (rport) { 2936 if (rport)
2933 fc_remote_port_delete(rport); 2937 fc_remote_port_delete(rport);
2934 /*
2935 * Release the target mode FC NEXUS in qla_target.c code
2936 * if target mod is enabled.
2937 */
2938 qlt_fc_port_deleted(vha, fcport);
2939 }
2940} 2938}
2941 2939
2942/** 2940/**
@@ -3303,6 +3301,7 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
3303 * Create target mode FC NEXUS in qla_target.c if target mode is 3301 * Create target mode FC NEXUS in qla_target.c if target mode is
3304 * enabled.. 3302 * enabled..
3305 */ 3303 */
3304
3306 qlt_fc_port_added(vha, fcport); 3305 qlt_fc_port_added(vha, fcport);
3307 3306
3308 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 3307 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
@@ -3341,8 +3340,7 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
3341 3340
3342 if (IS_QLAFX00(vha->hw)) { 3341 if (IS_QLAFX00(vha->hw)) {
3343 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 3342 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
3344 qla2x00_reg_remote_port(vha, fcport); 3343 goto reg_port;
3345 return;
3346 } 3344 }
3347 fcport->login_retry = 0; 3345 fcport->login_retry = 0;
3348 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT); 3346 fcport->flags &= ~(FCF_LOGIN_NEEDED | FCF_ASYNC_SENT);
@@ -3350,7 +3348,16 @@ qla2x00_update_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
3350 qla2x00_set_fcport_state(fcport, FCS_ONLINE); 3348 qla2x00_set_fcport_state(fcport, FCS_ONLINE);
3351 qla2x00_iidma_fcport(vha, fcport); 3349 qla2x00_iidma_fcport(vha, fcport);
3352 qla24xx_update_fcport_fcp_prio(vha, fcport); 3350 qla24xx_update_fcport_fcp_prio(vha, fcport);
3353 qla2x00_reg_remote_port(vha, fcport); 3351
3352reg_port:
3353 if (qla_ini_mode_enabled(vha))
3354 qla2x00_reg_remote_port(vha, fcport);
3355 else {
3356 /*
3357 * Create target mode FC NEXUS in qla_target.c
3358 */
3359 qlt_fc_port_added(vha, fcport);
3360 }
3354} 3361}
3355 3362
3356/* 3363/*
@@ -3375,6 +3382,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3375 LIST_HEAD(new_fcports); 3382 LIST_HEAD(new_fcports);
3376 struct qla_hw_data *ha = vha->hw; 3383 struct qla_hw_data *ha = vha->hw;
3377 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 3384 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3385 int discovery_gen;
3378 3386
3379 /* If FL port exists, then SNS is present */ 3387 /* If FL port exists, then SNS is present */
3380 if (IS_FWI2_CAPABLE(ha)) 3388 if (IS_FWI2_CAPABLE(ha))
@@ -3445,6 +3453,14 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3445 fcport->scan_state = QLA_FCPORT_SCAN; 3453 fcport->scan_state = QLA_FCPORT_SCAN;
3446 } 3454 }
3447 3455
3456 /* Mark the time right before querying FW for connected ports.
3457 * This process is long, asynchronous and by the time it's done,
3458 * collected information might not be accurate anymore. E.g.
3459 * disconnected port might have re-connected and a brand new
3460 * session has been created. In this case session's generation
3461 * will be newer than discovery_gen. */
3462 qlt_do_generation_tick(vha, &discovery_gen);
3463
3448 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); 3464 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
3449 if (rval != QLA_SUCCESS) 3465 if (rval != QLA_SUCCESS)
3450 break; 3466 break;
@@ -3460,20 +3476,44 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3460 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) 3476 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
3461 continue; 3477 continue;
3462 3478
3463 if (fcport->scan_state == QLA_FCPORT_SCAN && 3479 if (fcport->scan_state == QLA_FCPORT_SCAN) {
3464 atomic_read(&fcport->state) == FCS_ONLINE) { 3480 if (qla_ini_mode_enabled(base_vha) &&
3465 qla2x00_mark_device_lost(vha, fcport, 3481 atomic_read(&fcport->state) == FCS_ONLINE) {
3466 ql2xplogiabsentdevice, 0); 3482 qla2x00_mark_device_lost(vha, fcport,
3467 if (fcport->loop_id != FC_NO_LOOP_ID && 3483 ql2xplogiabsentdevice, 0);
3468 (fcport->flags & FCF_FCP2_DEVICE) == 0 && 3484 if (fcport->loop_id != FC_NO_LOOP_ID &&
3469 fcport->port_type != FCT_INITIATOR && 3485 (fcport->flags & FCF_FCP2_DEVICE) == 0 &&
3470 fcport->port_type != FCT_BROADCAST) { 3486 fcport->port_type != FCT_INITIATOR &&
3471 ha->isp_ops->fabric_logout(vha, 3487 fcport->port_type != FCT_BROADCAST) {
3472 fcport->loop_id, 3488 ha->isp_ops->fabric_logout(vha,
3473 fcport->d_id.b.domain, 3489 fcport->loop_id,
3474 fcport->d_id.b.area, 3490 fcport->d_id.b.domain,
3475 fcport->d_id.b.al_pa); 3491 fcport->d_id.b.area,
3476 qla2x00_clear_loop_id(fcport); 3492 fcport->d_id.b.al_pa);
3493 qla2x00_clear_loop_id(fcport);
3494 }
3495 } else if (!qla_ini_mode_enabled(base_vha)) {
3496 /*
3497 * In target mode, explicitly kill
3498 * sessions and log out of devices
3499 * that are gone, so that we don't
3500 * end up with an initiator using the
3501 * wrong ACL (if the fabric recycles
3502 * an FC address and we have a stale
3503 * session around) and so that we don't
3504 * report initiators that are no longer
3505 * on the fabric.
3506 */
3507 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf077,
3508 "port gone, logging out/killing session: "
3509 "%8phC state 0x%x flags 0x%x fc4_type 0x%x "
3510 "scan_state %d\n",
3511 fcport->port_name,
3512 atomic_read(&fcport->state),
3513 fcport->flags, fcport->fc4_type,
3514 fcport->scan_state);
3515 qlt_fc_port_deleted(vha, fcport,
3516 discovery_gen);
3477 } 3517 }
3478 } 3518 }
3479 } 3519 }
@@ -3494,6 +3534,28 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3494 (fcport->flags & FCF_LOGIN_NEEDED) == 0) 3534 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
3495 continue; 3535 continue;
3496 3536
3537 /*
3538 * If we're not an initiator, skip looking for devices
3539 * and logging in. There's no reason for us to do it,
3540 * and it seems to actively cause problems in target
3541 * mode if we race with the initiator logging into us
3542 * (we might get the "port ID used" status back from
3543 * our login command and log out the initiator, which
3544 * seems to cause havoc).
3545 */
3546 if (!qla_ini_mode_enabled(base_vha)) {
3547 if (fcport->scan_state == QLA_FCPORT_FOUND) {
3548 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf078,
3549 "port %8phC state 0x%x flags 0x%x fc4_type 0x%x "
3550 "scan_state %d (initiator mode disabled; skipping "
3551 "login)\n", fcport->port_name,
3552 atomic_read(&fcport->state),
3553 fcport->flags, fcport->fc4_type,
3554 fcport->scan_state);
3555 }
3556 continue;
3557 }
3558
3497 if (fcport->loop_id == FC_NO_LOOP_ID) { 3559 if (fcport->loop_id == FC_NO_LOOP_ID) {
3498 fcport->loop_id = next_loopid; 3560 fcport->loop_id = next_loopid;
3499 rval = qla2x00_find_new_loop_id( 3561 rval = qla2x00_find_new_loop_id(
@@ -3520,16 +3582,38 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3520 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 3582 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3521 break; 3583 break;
3522 3584
3523 /* Find a new loop ID to use. */ 3585 /*
3524 fcport->loop_id = next_loopid; 3586 * If we're not an initiator, skip looking for devices
3525 rval = qla2x00_find_new_loop_id(base_vha, fcport); 3587 * and logging in. There's no reason for us to do it,
3526 if (rval != QLA_SUCCESS) { 3588 * and it seems to actively cause problems in target
3527 /* Ran out of IDs to use */ 3589 * mode if we race with the initiator logging into us
3528 break; 3590 * (we might get the "port ID used" status back from
3529 } 3591 * our login command and log out the initiator, which
3592 * seems to cause havoc).
3593 */
3594 if (qla_ini_mode_enabled(base_vha)) {
3595 /* Find a new loop ID to use. */
3596 fcport->loop_id = next_loopid;
3597 rval = qla2x00_find_new_loop_id(base_vha,
3598 fcport);
3599 if (rval != QLA_SUCCESS) {
3600 /* Ran out of IDs to use */
3601 break;
3602 }
3530 3603
3531 /* Login and update database */ 3604 /* Login and update database */
3532 qla2x00_fabric_dev_login(vha, fcport, &next_loopid); 3605 qla2x00_fabric_dev_login(vha, fcport,
3606 &next_loopid);
3607 } else {
3608 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf079,
3609 "new port %8phC state 0x%x flags 0x%x fc4_type "
3610 "0x%x scan_state %d (initiator mode disabled; "
3611 "skipping login)\n",
3612 fcport->port_name,
3613 atomic_read(&fcport->state),
3614 fcport->flags, fcport->fc4_type,
3615 fcport->scan_state);
3616 }
3533 3617
3534 list_move_tail(&fcport->list, &vha->vp_fcports); 3618 list_move_tail(&fcport->list, &vha->vp_fcports);
3535 } 3619 }
@@ -3725,11 +3809,12 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3725 fcport->fp_speed = new_fcport->fp_speed; 3809 fcport->fp_speed = new_fcport->fp_speed;
3726 3810
3727 /* 3811 /*
3728 * If address the same and state FCS_ONLINE, nothing 3812 * If address the same and state FCS_ONLINE
3729 * changed. 3813 * (or in target mode), nothing changed.
3730 */ 3814 */
3731 if (fcport->d_id.b24 == new_fcport->d_id.b24 && 3815 if (fcport->d_id.b24 == new_fcport->d_id.b24 &&
3732 atomic_read(&fcport->state) == FCS_ONLINE) { 3816 (atomic_read(&fcport->state) == FCS_ONLINE ||
3817 !qla_ini_mode_enabled(base_vha))) {
3733 break; 3818 break;
3734 } 3819 }
3735 3820
@@ -3749,6 +3834,22 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3749 * Log it out if still logged in and mark it for 3834 * Log it out if still logged in and mark it for
3750 * relogin later. 3835 * relogin later.
3751 */ 3836 */
3837 if (!qla_ini_mode_enabled(base_vha)) {
3838 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf080,
3839 "port changed FC ID, %8phC"
3840 " old %x:%x:%x (loop_id 0x%04x)-> new %x:%x:%x\n",
3841 fcport->port_name,
3842 fcport->d_id.b.domain,
3843 fcport->d_id.b.area,
3844 fcport->d_id.b.al_pa,
3845 fcport->loop_id,
3846 new_fcport->d_id.b.domain,
3847 new_fcport->d_id.b.area,
3848 new_fcport->d_id.b.al_pa);
3849 fcport->d_id.b24 = new_fcport->d_id.b24;
3850 break;
3851 }
3852
3752 fcport->d_id.b24 = new_fcport->d_id.b24; 3853 fcport->d_id.b24 = new_fcport->d_id.b24;
3753 fcport->flags |= FCF_LOGIN_NEEDED; 3854 fcport->flags |= FCF_LOGIN_NEEDED;
3754 if (fcport->loop_id != FC_NO_LOOP_ID && 3855 if (fcport->loop_id != FC_NO_LOOP_ID &&
@@ -3768,6 +3869,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3768 if (found) 3869 if (found)
3769 continue; 3870 continue;
3770 /* If device was not in our fcports list, then add it. */ 3871 /* If device was not in our fcports list, then add it. */
3872 new_fcport->scan_state = QLA_FCPORT_FOUND;
3771 list_add_tail(&new_fcport->list, new_fcports); 3873 list_add_tail(&new_fcport->list, new_fcports);
3772 3874
3773 /* Allocate a new replacement fcport. */ 3875 /* Allocate a new replacement fcport. */
@@ -4188,6 +4290,14 @@ qla2x00_update_fcports(scsi_qla_host_t *base_vha)
4188 atomic_read(&fcport->state) != FCS_UNCONFIGURED) { 4290 atomic_read(&fcport->state) != FCS_UNCONFIGURED) {
4189 spin_unlock_irqrestore(&ha->vport_slock, flags); 4291 spin_unlock_irqrestore(&ha->vport_slock, flags);
4190 qla2x00_rport_del(fcport); 4292 qla2x00_rport_del(fcport);
4293
4294 /*
4295 * Release the target mode FC NEXUS in
4296 * qla_target.c, if target mod is enabled.
4297 */
4298 qlt_fc_port_deleted(vha, fcport,
4299 base_vha->total_fcport_update_gen);
4300
4191 spin_lock_irqsave(&ha->vport_slock, flags); 4301 spin_lock_irqsave(&ha->vport_slock, flags);
4192 } 4302 }
4193 } 4303 }
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index 36fbd4c7af8f..6f02b26a35cf 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -1943,6 +1943,9 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1943 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1943 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1944 logio->control_flags = 1944 logio->control_flags =
1945 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO); 1945 cpu_to_le16(LCF_COMMAND_LOGO|LCF_IMPL_LOGO);
1946 if (!sp->fcport->tgt_session ||
1947 !sp->fcport->tgt_session->keep_nport_handle)
1948 logio->control_flags |= cpu_to_le16(LCF_FREE_NPORT);
1946 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1949 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1947 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 1950 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1948 logio->port_id[1] = sp->fcport->d_id.b.area; 1951 logio->port_id[1] = sp->fcport->d_id.b.area;
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index 02b1c1c5355b..b2f713ad9034 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -2415,7 +2415,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2415 *orig_iocb_cnt = mcp->mb[10]; 2415 *orig_iocb_cnt = mcp->mb[10];
2416 if (vha->hw->flags.npiv_supported && max_npiv_vports) 2416 if (vha->hw->flags.npiv_supported && max_npiv_vports)
2417 *max_npiv_vports = mcp->mb[11]; 2417 *max_npiv_vports = mcp->mb[11];
2418 if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw)) && max_fcfs) 2418 if ((IS_QLA81XX(vha->hw) || IS_QLA83XX(vha->hw) ||
2419 IS_QLA27XX(vha->hw)) && max_fcfs)
2419 *max_fcfs = mcp->mb[12]; 2420 *max_fcfs = mcp->mb[12];
2420 } 2421 }
2421 2422
@@ -3898,7 +3899,7 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3898 spin_lock_irqsave(&ha->hardware_lock, flags); 3899 spin_lock_irqsave(&ha->hardware_lock, flags);
3899 if (!(rsp->options & BIT_0)) { 3900 if (!(rsp->options & BIT_0)) {
3900 WRT_REG_DWORD(rsp->rsp_q_out, 0); 3901 WRT_REG_DWORD(rsp->rsp_q_out, 0);
3901 if (!IS_QLA83XX(ha)) 3902 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
3902 WRT_REG_DWORD(rsp->rsp_q_in, 0); 3903 WRT_REG_DWORD(rsp->rsp_q_in, 0);
3903 } 3904 }
3904 3905
@@ -5345,7 +5346,7 @@ qla83xx_restart_nic_firmware(scsi_qla_host_t *vha)
5345 mbx_cmd_t *mcp = &mc; 5346 mbx_cmd_t *mcp = &mc;
5346 struct qla_hw_data *ha = vha->hw; 5347 struct qla_hw_data *ha = vha->hw;
5347 5348
5348 if (!IS_QLA83XX(ha)) 5349 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
5349 return QLA_FUNCTION_FAILED; 5350 return QLA_FUNCTION_FAILED;
5350 5351
5351 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__); 5352 ql_dbg(ql_dbg_mbx, vha, 0x1143, "Entered %s.\n", __func__);
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index a28815b8276f..8a5cac8448c7 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -2504,6 +2504,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2504 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2504 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2505 req_length = REQUEST_ENTRY_CNT_24XX; 2505 req_length = REQUEST_ENTRY_CNT_24XX;
2506 rsp_length = RESPONSE_ENTRY_CNT_2300; 2506 rsp_length = RESPONSE_ENTRY_CNT_2300;
2507 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
2507 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2508 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2508 ha->init_cb_size = sizeof(struct mid_init_cb_81xx); 2509 ha->init_cb_size = sizeof(struct mid_init_cb_81xx);
2509 ha->gid_list_info_size = 8; 2510 ha->gid_list_info_size = 8;
@@ -3229,11 +3230,15 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
3229 spin_lock_irqsave(vha->host->host_lock, flags); 3230 spin_lock_irqsave(vha->host->host_lock, flags);
3230 fcport->drport = rport; 3231 fcport->drport = rport;
3231 spin_unlock_irqrestore(vha->host->host_lock, flags); 3232 spin_unlock_irqrestore(vha->host->host_lock, flags);
3233 qlt_do_generation_tick(vha, &base_vha->total_fcport_update_gen);
3232 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); 3234 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
3233 qla2xxx_wake_dpc(base_vha); 3235 qla2xxx_wake_dpc(base_vha);
3234 } else { 3236 } else {
3235 fc_remote_port_delete(rport); 3237 int now;
3236 qlt_fc_port_deleted(vha, fcport); 3238 if (rport)
3239 fc_remote_port_delete(rport);
3240 qlt_do_generation_tick(vha, &now);
3241 qlt_fc_port_deleted(vha, fcport, now);
3237 } 3242 }
3238} 3243}
3239 3244
@@ -3763,8 +3768,11 @@ struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
3763 INIT_LIST_HEAD(&vha->vp_fcports); 3768 INIT_LIST_HEAD(&vha->vp_fcports);
3764 INIT_LIST_HEAD(&vha->work_list); 3769 INIT_LIST_HEAD(&vha->work_list);
3765 INIT_LIST_HEAD(&vha->list); 3770 INIT_LIST_HEAD(&vha->list);
3771 INIT_LIST_HEAD(&vha->qla_cmd_list);
3772 INIT_LIST_HEAD(&vha->qla_sess_op_cmd_list);
3766 3773
3767 spin_lock_init(&vha->work_lock); 3774 spin_lock_init(&vha->work_lock);
3775 spin_lock_init(&vha->cmd_list_lock);
3768 3776
3769 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no); 3777 sprintf(vha->host_str, "%s_%ld", QLA2XXX_DRIVER_NAME, vha->host_no);
3770 ql_dbg(ql_dbg_init, vha, 0x0041, 3778 ql_dbg(ql_dbg_init, vha, 0x0041,
diff --git a/drivers/scsi/qla2xxx/qla_sup.c b/drivers/scsi/qla2xxx/qla_sup.c
index 028e8c8a7de9..2feb5f38edcd 100644
--- a/drivers/scsi/qla2xxx/qla_sup.c
+++ b/drivers/scsi/qla2xxx/qla_sup.c
@@ -1697,7 +1697,7 @@ qla83xx_select_led_port(struct qla_hw_data *ha)
1697{ 1697{
1698 uint32_t led_select_value = 0; 1698 uint32_t led_select_value = 0;
1699 1699
1700 if (!IS_QLA83XX(ha)) 1700 if (!IS_QLA83XX(ha) && !IS_QLA27XX(ha))
1701 goto out; 1701 goto out;
1702 1702
1703 if (ha->port_no == 0) 1703 if (ha->port_no == 0)
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index b749026aa592..58651ecbd88c 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -113,6 +113,11 @@ static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
113static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha, 113static void qlt_alloc_qfull_cmd(struct scsi_qla_host *vha,
114 struct atio_from_isp *atio, uint16_t status, int qfull); 114 struct atio_from_isp *atio, uint16_t status, int qfull);
115static void qlt_disable_vha(struct scsi_qla_host *vha); 115static void qlt_disable_vha(struct scsi_qla_host *vha);
116static void qlt_clear_tgt_db(struct qla_tgt *tgt);
117static void qlt_send_notify_ack(struct scsi_qla_host *vha,
118 struct imm_ntfy_from_isp *ntfy,
119 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
120 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan);
116/* 121/*
117 * Global Variables 122 * Global Variables
118 */ 123 */
@@ -122,6 +127,16 @@ static struct workqueue_struct *qla_tgt_wq;
122static DEFINE_MUTEX(qla_tgt_mutex); 127static DEFINE_MUTEX(qla_tgt_mutex);
123static LIST_HEAD(qla_tgt_glist); 128static LIST_HEAD(qla_tgt_glist);
124 129
130/* This API intentionally takes dest as a parameter, rather than returning
131 * int value to avoid caller forgetting to issue wmb() after the store */
132void qlt_do_generation_tick(struct scsi_qla_host *vha, int *dest)
133{
134 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
135 *dest = atomic_inc_return(&base_vha->generation_tick);
136 /* memory barrier */
137 wmb();
138}
139
125/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */ 140/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
126static struct qla_tgt_sess *qlt_find_sess_by_port_name( 141static struct qla_tgt_sess *qlt_find_sess_by_port_name(
127 struct qla_tgt *tgt, 142 struct qla_tgt *tgt,
@@ -381,14 +396,73 @@ static void qlt_free_session_done(struct work_struct *work)
381 struct qla_tgt *tgt = sess->tgt; 396 struct qla_tgt *tgt = sess->tgt;
382 struct scsi_qla_host *vha = sess->vha; 397 struct scsi_qla_host *vha = sess->vha;
383 struct qla_hw_data *ha = vha->hw; 398 struct qla_hw_data *ha = vha->hw;
399 unsigned long flags;
400 bool logout_started = false;
401 fc_port_t fcport;
402
403 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf084,
404 "%s: se_sess %p / sess %p from port %8phC loop_id %#04x"
405 " s_id %02x:%02x:%02x logout %d keep %d plogi %d\n",
406 __func__, sess->se_sess, sess, sess->port_name, sess->loop_id,
407 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
408 sess->logout_on_delete, sess->keep_nport_handle,
409 sess->plogi_ack_needed);
384 410
385 BUG_ON(!tgt); 411 BUG_ON(!tgt);
412
413 if (sess->logout_on_delete) {
414 int rc;
415
416 memset(&fcport, 0, sizeof(fcport));
417 fcport.loop_id = sess->loop_id;
418 fcport.d_id = sess->s_id;
419 memcpy(fcport.port_name, sess->port_name, WWN_SIZE);
420 fcport.vha = vha;
421 fcport.tgt_session = sess;
422
423 rc = qla2x00_post_async_logout_work(vha, &fcport, NULL);
424 if (rc != QLA_SUCCESS)
425 ql_log(ql_log_warn, vha, 0xf085,
426 "Schedule logo failed sess %p rc %d\n",
427 sess, rc);
428 else
429 logout_started = true;
430 }
431
386 /* 432 /*
387 * Release the target session for FC Nexus from fabric module code. 433 * Release the target session for FC Nexus from fabric module code.
388 */ 434 */
389 if (sess->se_sess != NULL) 435 if (sess->se_sess != NULL)
390 ha->tgt.tgt_ops->free_session(sess); 436 ha->tgt.tgt_ops->free_session(sess);
391 437
438 if (logout_started) {
439 bool traced = false;
440
441 while (!ACCESS_ONCE(sess->logout_completed)) {
442 if (!traced) {
443 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf086,
444 "%s: waiting for sess %p logout\n",
445 __func__, sess);
446 traced = true;
447 }
448 msleep(100);
449 }
450
451 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf087,
452 "%s: sess %p logout completed\n",
453 __func__, sess);
454 }
455
456 spin_lock_irqsave(&ha->hardware_lock, flags);
457
458 if (sess->plogi_ack_needed)
459 qlt_send_notify_ack(vha, &sess->tm_iocb,
460 0, 0, 0, 0, 0, 0);
461
462 list_del(&sess->sess_list_entry);
463
464 spin_unlock_irqrestore(&ha->hardware_lock, flags);
465
392 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001, 466 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
393 "Unregistration of sess %p finished\n", sess); 467 "Unregistration of sess %p finished\n", sess);
394 468
@@ -409,9 +483,9 @@ void qlt_unreg_sess(struct qla_tgt_sess *sess)
409 483
410 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess); 484 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
411 485
412 list_del(&sess->sess_list_entry); 486 if (!list_empty(&sess->del_list_entry))
413 if (sess->deleted) 487 list_del_init(&sess->del_list_entry);
414 list_del(&sess->del_list_entry); 488 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
415 489
416 INIT_WORK(&sess->free_work, qlt_free_session_done); 490 INIT_WORK(&sess->free_work, qlt_free_session_done);
417 schedule_work(&sess->free_work); 491 schedule_work(&sess->free_work);
@@ -431,10 +505,10 @@ static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
431 505
432 loop_id = le16_to_cpu(n->u.isp24.nport_handle); 506 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
433 if (loop_id == 0xFFFF) { 507 if (loop_id == 0xFFFF) {
434#if 0 /* FIXME: Re-enable Global event handling.. */
435 /* Global event */ 508 /* Global event */
436 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count); 509 atomic_inc(&vha->vha_tgt.qla_tgt->tgt_global_resets_count);
437 qlt_clear_tgt_db(ha->tgt.qla_tgt); 510 qlt_clear_tgt_db(vha->vha_tgt.qla_tgt);
511#if 0 /* FIXME: do we need to choose a session here? */
438 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) { 512 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
439 sess = list_entry(ha->tgt.qla_tgt->sess_list.next, 513 sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
440 typeof(*sess), sess_list_entry); 514 typeof(*sess), sess_list_entry);
@@ -489,27 +563,38 @@ static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
489 struct qla_tgt *tgt = sess->tgt; 563 struct qla_tgt *tgt = sess->tgt;
490 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5; 564 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
491 565
492 if (sess->deleted) 566 if (sess->deleted) {
493 return; 567 /* Upgrade to unconditional deletion in case it was temporary */
568 if (immediate && sess->deleted == QLA_SESS_DELETION_PENDING)
569 list_del(&sess->del_list_entry);
570 else
571 return;
572 }
494 573
495 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001, 574 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
496 "Scheduling sess %p for deletion\n", sess); 575 "Scheduling sess %p for deletion\n", sess);
497 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
498 sess->deleted = 1;
499 576
500 if (immediate) 577 if (immediate) {
501 dev_loss_tmo = 0; 578 dev_loss_tmo = 0;
579 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
580 list_add(&sess->del_list_entry, &tgt->del_sess_list);
581 } else {
582 sess->deleted = QLA_SESS_DELETION_PENDING;
583 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
584 }
502 585
503 sess->expires = jiffies + dev_loss_tmo * HZ; 586 sess->expires = jiffies + dev_loss_tmo * HZ;
504 587
505 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048, 588 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
506 "qla_target(%d): session for port %8phC (loop ID %d) scheduled for " 589 "qla_target(%d): session for port %8phC (loop ID %d s_id %02x:%02x:%02x)"
507 "deletion in %u secs (expires: %lu) immed: %d\n", 590 " scheduled for deletion in %u secs (expires: %lu) immed: %d, logout: %d, gen: %#x\n",
508 sess->vha->vp_idx, sess->port_name, sess->loop_id, dev_loss_tmo, 591 sess->vha->vp_idx, sess->port_name, sess->loop_id,
509 sess->expires, immediate); 592 sess->s_id.b.domain, sess->s_id.b.area, sess->s_id.b.al_pa,
593 dev_loss_tmo, sess->expires, immediate, sess->logout_on_delete,
594 sess->generation);
510 595
511 if (immediate) 596 if (immediate)
512 schedule_delayed_work(&tgt->sess_del_work, 0); 597 mod_delayed_work(system_wq, &tgt->sess_del_work, 0);
513 else 598 else
514 schedule_delayed_work(&tgt->sess_del_work, 599 schedule_delayed_work(&tgt->sess_del_work,
515 sess->expires - jiffies); 600 sess->expires - jiffies);
@@ -578,9 +663,9 @@ out_free_id_list:
578/* ha->hardware_lock supposed to be held on entry */ 663/* ha->hardware_lock supposed to be held on entry */
579static void qlt_undelete_sess(struct qla_tgt_sess *sess) 664static void qlt_undelete_sess(struct qla_tgt_sess *sess)
580{ 665{
581 BUG_ON(!sess->deleted); 666 BUG_ON(sess->deleted != QLA_SESS_DELETION_PENDING);
582 667
583 list_del(&sess->del_list_entry); 668 list_del_init(&sess->del_list_entry);
584 sess->deleted = 0; 669 sess->deleted = 0;
585} 670}
586 671
@@ -599,7 +684,9 @@ static void qlt_del_sess_work_fn(struct delayed_work *work)
599 del_list_entry); 684 del_list_entry);
600 elapsed = jiffies; 685 elapsed = jiffies;
601 if (time_after_eq(elapsed, sess->expires)) { 686 if (time_after_eq(elapsed, sess->expires)) {
602 qlt_undelete_sess(sess); 687 /* No turning back */
688 list_del_init(&sess->del_list_entry);
689 sess->deleted = QLA_SESS_DELETION_IN_PROGRESS;
603 690
604 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004, 691 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
605 "Timeout: sess %p about to be deleted\n", 692 "Timeout: sess %p about to be deleted\n",
@@ -643,6 +730,13 @@ static struct qla_tgt_sess *qlt_create_sess(
643 fcport->d_id.b.al_pa, fcport->d_id.b.area, 730 fcport->d_id.b.al_pa, fcport->d_id.b.area,
644 fcport->loop_id); 731 fcport->loop_id);
645 732
733 /* Cannot undelete at this point */
734 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
735 spin_unlock_irqrestore(&ha->hardware_lock,
736 flags);
737 return NULL;
738 }
739
646 if (sess->deleted) 740 if (sess->deleted)
647 qlt_undelete_sess(sess); 741 qlt_undelete_sess(sess);
648 742
@@ -652,6 +746,9 @@ static struct qla_tgt_sess *qlt_create_sess(
652 746
653 if (sess->local && !local) 747 if (sess->local && !local)
654 sess->local = 0; 748 sess->local = 0;
749
750 qlt_do_generation_tick(vha, &sess->generation);
751
655 spin_unlock_irqrestore(&ha->hardware_lock, flags); 752 spin_unlock_irqrestore(&ha->hardware_lock, flags);
656 753
657 return sess; 754 return sess;
@@ -673,6 +770,14 @@ static struct qla_tgt_sess *qlt_create_sess(
673 sess->s_id = fcport->d_id; 770 sess->s_id = fcport->d_id;
674 sess->loop_id = fcport->loop_id; 771 sess->loop_id = fcport->loop_id;
675 sess->local = local; 772 sess->local = local;
773 INIT_LIST_HEAD(&sess->del_list_entry);
774
775 /* Under normal circumstances we want to logout from firmware when
776 * session eventually ends and release corresponding nport handle.
777 * In the exception cases (e.g. when new PLOGI is waiting) corresponding
778 * code will adjust these flags as necessary. */
779 sess->logout_on_delete = 1;
780 sess->keep_nport_handle = 0;
676 781
677 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006, 782 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
678 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n", 783 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
@@ -705,6 +810,7 @@ static struct qla_tgt_sess *qlt_create_sess(
705 spin_lock_irqsave(&ha->hardware_lock, flags); 810 spin_lock_irqsave(&ha->hardware_lock, flags);
706 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list); 811 list_add_tail(&sess->sess_list_entry, &vha->vha_tgt.qla_tgt->sess_list);
707 vha->vha_tgt.qla_tgt->sess_count++; 812 vha->vha_tgt.qla_tgt->sess_count++;
813 qlt_do_generation_tick(vha, &sess->generation);
708 spin_unlock_irqrestore(&ha->hardware_lock, flags); 814 spin_unlock_irqrestore(&ha->hardware_lock, flags);
709 815
710 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b, 816 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
@@ -718,7 +824,7 @@ static struct qla_tgt_sess *qlt_create_sess(
718} 824}
719 825
720/* 826/*
721 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port() 827 * Called from qla2x00_reg_remote_port()
722 */ 828 */
723void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport) 829void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
724{ 830{
@@ -750,6 +856,10 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
750 mutex_unlock(&vha->vha_tgt.tgt_mutex); 856 mutex_unlock(&vha->vha_tgt.tgt_mutex);
751 857
752 spin_lock_irqsave(&ha->hardware_lock, flags); 858 spin_lock_irqsave(&ha->hardware_lock, flags);
859 } else if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
860 /* Point of no return */
861 spin_unlock_irqrestore(&ha->hardware_lock, flags);
862 return;
753 } else { 863 } else {
754 kref_get(&sess->se_sess->sess_kref); 864 kref_get(&sess->se_sess->sess_kref);
755 865
@@ -780,27 +890,36 @@ void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
780 spin_unlock_irqrestore(&ha->hardware_lock, flags); 890 spin_unlock_irqrestore(&ha->hardware_lock, flags);
781} 891}
782 892
783void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport) 893/*
894 * max_gen - specifies maximum session generation
895 * at which this deletion requestion is still valid
896 */
897void
898qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport, int max_gen)
784{ 899{
785 struct qla_hw_data *ha = vha->hw;
786 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt; 900 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
787 struct qla_tgt_sess *sess; 901 struct qla_tgt_sess *sess;
788 unsigned long flags;
789 902
790 if (!vha->hw->tgt.tgt_ops) 903 if (!vha->hw->tgt.tgt_ops)
791 return; 904 return;
792 905
793 if (!tgt || (fcport->port_type != FCT_INITIATOR)) 906 if (!tgt)
794 return; 907 return;
795 908
796 spin_lock_irqsave(&ha->hardware_lock, flags);
797 if (tgt->tgt_stop) { 909 if (tgt->tgt_stop) {
798 spin_unlock_irqrestore(&ha->hardware_lock, flags);
799 return; 910 return;
800 } 911 }
801 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name); 912 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
802 if (!sess) { 913 if (!sess) {
803 spin_unlock_irqrestore(&ha->hardware_lock, flags); 914 return;
915 }
916
917 if (max_gen - sess->generation < 0) {
918 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf092,
919 "Ignoring stale deletion request for se_sess %p / sess %p"
920 " for port %8phC, req_gen %d, sess_gen %d\n",
921 sess->se_sess, sess, sess->port_name, max_gen,
922 sess->generation);
804 return; 923 return;
805 } 924 }
806 925
@@ -808,7 +927,6 @@ void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
808 927
809 sess->local = 1; 928 sess->local = 1;
810 qlt_schedule_sess_for_deletion(sess, false); 929 qlt_schedule_sess_for_deletion(sess, false);
811 spin_unlock_irqrestore(&ha->hardware_lock, flags);
812} 930}
813 931
814static inline int test_tgt_sess_count(struct qla_tgt *tgt) 932static inline int test_tgt_sess_count(struct qla_tgt *tgt)
@@ -1175,6 +1293,70 @@ static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1175 FCP_TMF_CMPL, true); 1293 FCP_TMF_CMPL, true);
1176} 1294}
1177 1295
1296static int abort_cmd_for_tag(struct scsi_qla_host *vha, uint32_t tag)
1297{
1298 struct qla_tgt_sess_op *op;
1299 struct qla_tgt_cmd *cmd;
1300
1301 spin_lock(&vha->cmd_list_lock);
1302
1303 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1304 if (tag == op->atio.u.isp24.exchange_addr) {
1305 op->aborted = true;
1306 spin_unlock(&vha->cmd_list_lock);
1307 return 1;
1308 }
1309 }
1310
1311 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1312 if (tag == cmd->atio.u.isp24.exchange_addr) {
1313 cmd->state = QLA_TGT_STATE_ABORTED;
1314 spin_unlock(&vha->cmd_list_lock);
1315 return 1;
1316 }
1317 }
1318
1319 spin_unlock(&vha->cmd_list_lock);
1320 return 0;
1321}
1322
1323/* drop cmds for the given lun
1324 * XXX only looks for cmds on the port through which lun reset was recieved
1325 * XXX does not go through the list of other port (which may have cmds
1326 * for the same lun)
1327 */
1328static void abort_cmds_for_lun(struct scsi_qla_host *vha,
1329 uint32_t lun, uint8_t *s_id)
1330{
1331 struct qla_tgt_sess_op *op;
1332 struct qla_tgt_cmd *cmd;
1333 uint32_t key;
1334
1335 key = sid_to_key(s_id);
1336 spin_lock(&vha->cmd_list_lock);
1337 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
1338 uint32_t op_key;
1339 uint32_t op_lun;
1340
1341 op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
1342 op_lun = scsilun_to_int(
1343 (struct scsi_lun *)&op->atio.u.isp24.fcp_cmnd.lun);
1344 if (op_key == key && op_lun == lun)
1345 op->aborted = true;
1346 }
1347 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
1348 uint32_t cmd_key;
1349 uint32_t cmd_lun;
1350
1351 cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
1352 cmd_lun = scsilun_to_int(
1353 (struct scsi_lun *)&cmd->atio.u.isp24.fcp_cmnd.lun);
1354 if (cmd_key == key && cmd_lun == lun)
1355 cmd->state = QLA_TGT_STATE_ABORTED;
1356 }
1357 spin_unlock(&vha->cmd_list_lock);
1358}
1359
1178/* ha->hardware_lock supposed to be held on entry */ 1360/* ha->hardware_lock supposed to be held on entry */
1179static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha, 1361static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1180 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess) 1362 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
@@ -1199,8 +1381,19 @@ static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1199 } 1381 }
1200 spin_unlock(&se_sess->sess_cmd_lock); 1382 spin_unlock(&se_sess->sess_cmd_lock);
1201 1383
1202 if (!found_lun) 1384 /* cmd not in LIO lists, look in qla list */
1203 return -ENOENT; 1385 if (!found_lun) {
1386 if (abort_cmd_for_tag(vha, abts->exchange_addr_to_abort)) {
1387 /* send TASK_ABORT response immediately */
1388 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_CMPL, false);
1389 return 0;
1390 } else {
1391 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf081,
1392 "unable to find cmd in driver or LIO for tag 0x%x\n",
1393 abts->exchange_addr_to_abort);
1394 return -ENOENT;
1395 }
1396 }
1204 1397
1205 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f, 1398 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
1206 "qla_target(%d): task abort (tag=%d)\n", 1399 "qla_target(%d): task abort (tag=%d)\n",
@@ -1284,6 +1477,11 @@ static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1284 return; 1477 return;
1285 } 1478 }
1286 1479
1480 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
1481 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1482 return;
1483 }
1484
1287 rc = __qlt_24xx_handle_abts(vha, abts, sess); 1485 rc = __qlt_24xx_handle_abts(vha, abts, sess);
1288 if (rc != 0) { 1486 if (rc != 0) {
1289 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054, 1487 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
@@ -1726,20 +1924,6 @@ static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
1726 struct qla_hw_data *ha = vha->hw; 1924 struct qla_hw_data *ha = vha->hw;
1727 struct se_cmd *se_cmd = &cmd->se_cmd; 1925 struct se_cmd *se_cmd = &cmd->se_cmd;
1728 1926
1729 if (unlikely(cmd->aborted)) {
1730 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
1731 "qla_target(%d): terminating exchange for aborted cmd=%p (se_cmd=%p, tag=%lld)",
1732 vha->vp_idx, cmd, se_cmd, se_cmd->tag);
1733
1734 cmd->state = QLA_TGT_STATE_ABORTED;
1735 cmd->cmd_flags |= BIT_6;
1736
1737 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
1738
1739 /* !! At this point cmd could be already freed !! */
1740 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
1741 }
1742
1743 prm->cmd = cmd; 1927 prm->cmd = cmd;
1744 prm->tgt = tgt; 1928 prm->tgt = tgt;
1745 prm->rq_result = scsi_status; 1929 prm->rq_result = scsi_status;
@@ -2301,6 +2485,19 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2301 unsigned long flags = 0; 2485 unsigned long flags = 0;
2302 int res; 2486 int res;
2303 2487
2488 spin_lock_irqsave(&ha->hardware_lock, flags);
2489 if (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
2490 cmd->state = QLA_TGT_STATE_PROCESSED;
2491 if (cmd->sess->logout_completed)
2492 /* no need to terminate. FW already freed exchange. */
2493 qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
2494 else
2495 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
2496 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2497 return 0;
2498 }
2499 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2500
2304 memset(&prm, 0, sizeof(prm)); 2501 memset(&prm, 0, sizeof(prm));
2305 qlt_check_srr_debug(cmd, &xmit_type); 2502 qlt_check_srr_debug(cmd, &xmit_type);
2306 2503
@@ -2313,9 +2510,6 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2313 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status, 2510 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
2314 &full_req_cnt); 2511 &full_req_cnt);
2315 if (unlikely(res != 0)) { 2512 if (unlikely(res != 0)) {
2316 if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
2317 return 0;
2318
2319 return res; 2513 return res;
2320 } 2514 }
2321 2515
@@ -2345,9 +2539,10 @@ int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2345 res = qlt_build_ctio_crc2_pkt(&prm, vha); 2539 res = qlt_build_ctio_crc2_pkt(&prm, vha);
2346 else 2540 else
2347 res = qlt_24xx_build_ctio_pkt(&prm, vha); 2541 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2348 if (unlikely(res != 0)) 2542 if (unlikely(res != 0)) {
2543 vha->req->cnt += full_req_cnt;
2349 goto out_unmap_unlock; 2544 goto out_unmap_unlock;
2350 2545 }
2351 2546
2352 pkt = (struct ctio7_to_24xx *)prm.pkt; 2547 pkt = (struct ctio7_to_24xx *)prm.pkt;
2353 2548
@@ -2461,7 +2656,8 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2461 2656
2462 spin_lock_irqsave(&ha->hardware_lock, flags); 2657 spin_lock_irqsave(&ha->hardware_lock, flags);
2463 2658
2464 if (qla2x00_reset_active(vha) || cmd->reset_count != ha->chip_reset) { 2659 if (qla2x00_reset_active(vha) || (cmd->reset_count != ha->chip_reset) ||
2660 (cmd->sess && cmd->sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)) {
2465 /* 2661 /*
2466 * Either a chip reset is active or this request was from 2662 * Either a chip reset is active or this request was from
2467 * previous life, just abort the processing. 2663 * previous life, just abort the processing.
@@ -2485,8 +2681,11 @@ int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2485 else 2681 else
2486 res = qlt_24xx_build_ctio_pkt(&prm, vha); 2682 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2487 2683
2488 if (unlikely(res != 0)) 2684 if (unlikely(res != 0)) {
2685 vha->req->cnt += prm.req_cnt;
2489 goto out_unlock_free_unmap; 2686 goto out_unlock_free_unmap;
2687 }
2688
2490 pkt = (struct ctio7_to_24xx *)prm.pkt; 2689 pkt = (struct ctio7_to_24xx *)prm.pkt;
2491 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT | 2690 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
2492 CTIO7_FLAGS_STATUS_MODE_0); 2691 CTIO7_FLAGS_STATUS_MODE_0);
@@ -2651,6 +2850,89 @@ out:
2651 2850
2652/* If hardware_lock held on entry, might drop it, then reaquire */ 2851/* If hardware_lock held on entry, might drop it, then reaquire */
2653/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */ 2852/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2853static int __qlt_send_term_imm_notif(struct scsi_qla_host *vha,
2854 struct imm_ntfy_from_isp *ntfy)
2855{
2856 struct nack_to_isp *nack;
2857 struct qla_hw_data *ha = vha->hw;
2858 request_t *pkt;
2859 int ret = 0;
2860
2861 ql_dbg(ql_dbg_tgt_tmr, vha, 0xe01c,
2862 "Sending TERM ELS CTIO (ha=%p)\n", ha);
2863
2864 pkt = (request_t *)qla2x00_alloc_iocbs_ready(vha, NULL);
2865 if (pkt == NULL) {
2866 ql_dbg(ql_dbg_tgt, vha, 0xe080,
2867 "qla_target(%d): %s failed: unable to allocate "
2868 "request packet\n", vha->vp_idx, __func__);
2869 return -ENOMEM;
2870 }
2871
2872 pkt->entry_type = NOTIFY_ACK_TYPE;
2873 pkt->entry_count = 1;
2874 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2875
2876 nack = (struct nack_to_isp *)pkt;
2877 nack->ox_id = ntfy->ox_id;
2878
2879 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
2880 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
2881 nack->u.isp24.flags = ntfy->u.isp24.flags &
2882 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
2883 }
2884
2885 /* terminate */
2886 nack->u.isp24.flags |=
2887 __constant_cpu_to_le16(NOTIFY_ACK_FLAGS_TERMINATE);
2888
2889 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
2890 nack->u.isp24.status = ntfy->u.isp24.status;
2891 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
2892 nack->u.isp24.fw_handle = ntfy->u.isp24.fw_handle;
2893 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
2894 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
2895 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
2896 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
2897
2898 qla2x00_start_iocbs(vha, vha->req);
2899 return ret;
2900}
2901
2902static void qlt_send_term_imm_notif(struct scsi_qla_host *vha,
2903 struct imm_ntfy_from_isp *imm, int ha_locked)
2904{
2905 unsigned long flags = 0;
2906 int rc;
2907
2908 if (qlt_issue_marker(vha, ha_locked) < 0)
2909 return;
2910
2911 if (ha_locked) {
2912 rc = __qlt_send_term_imm_notif(vha, imm);
2913
2914#if 0 /* Todo */
2915 if (rc == -ENOMEM)
2916 qlt_alloc_qfull_cmd(vha, imm, 0, 0);
2917#endif
2918 goto done;
2919 }
2920
2921 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
2922 rc = __qlt_send_term_imm_notif(vha, imm);
2923
2924#if 0 /* Todo */
2925 if (rc == -ENOMEM)
2926 qlt_alloc_qfull_cmd(vha, imm, 0, 0);
2927#endif
2928
2929done:
2930 if (!ha_locked)
2931 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
2932}
2933
2934/* If hardware_lock held on entry, might drop it, then reaquire */
2935/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2654static int __qlt_send_term_exchange(struct scsi_qla_host *vha, 2936static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2655 struct qla_tgt_cmd *cmd, 2937 struct qla_tgt_cmd *cmd,
2656 struct atio_from_isp *atio) 2938 struct atio_from_isp *atio)
@@ -2715,7 +2997,7 @@ static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2715static void qlt_send_term_exchange(struct scsi_qla_host *vha, 2997static void qlt_send_term_exchange(struct scsi_qla_host *vha,
2716 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked) 2998 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
2717{ 2999{
2718 unsigned long flags; 3000 unsigned long flags = 0;
2719 int rc; 3001 int rc;
2720 3002
2721 if (qlt_issue_marker(vha, ha_locked) < 0) 3003 if (qlt_issue_marker(vha, ha_locked) < 0)
@@ -2731,17 +3013,18 @@ static void qlt_send_term_exchange(struct scsi_qla_host *vha,
2731 rc = __qlt_send_term_exchange(vha, cmd, atio); 3013 rc = __qlt_send_term_exchange(vha, cmd, atio);
2732 if (rc == -ENOMEM) 3014 if (rc == -ENOMEM)
2733 qlt_alloc_qfull_cmd(vha, atio, 0, 0); 3015 qlt_alloc_qfull_cmd(vha, atio, 0, 0);
2734 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
2735 3016
2736done: 3017done:
2737 if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) || 3018 if (cmd && ((cmd->state != QLA_TGT_STATE_ABORTED) ||
2738 !cmd->cmd_sent_to_fw)) { 3019 !cmd->cmd_sent_to_fw)) {
2739 if (!ha_locked && !in_interrupt()) 3020 if (cmd->sg_mapped)
2740 msleep(250); /* just in case */ 3021 qlt_unmap_sg(vha, cmd);
2741
2742 qlt_unmap_sg(vha, cmd);
2743 vha->hw->tgt.tgt_ops->free_cmd(cmd); 3022 vha->hw->tgt.tgt_ops->free_cmd(cmd);
2744 } 3023 }
3024
3025 if (!ha_locked)
3026 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
3027
2745 return; 3028 return;
2746} 3029}
2747 3030
@@ -2792,6 +3075,24 @@ static void qlt_chk_exch_leak_thresh_hold(struct scsi_qla_host *vha)
2792 3075
2793} 3076}
2794 3077
3078void qlt_abort_cmd(struct qla_tgt_cmd *cmd)
3079{
3080 struct qla_tgt *tgt = cmd->tgt;
3081 struct scsi_qla_host *vha = tgt->vha;
3082 struct se_cmd *se_cmd = &cmd->se_cmd;
3083
3084 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
3085 "qla_target(%d): terminating exchange for aborted cmd=%p "
3086 "(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
3087 se_cmd->tag);
3088
3089 cmd->state = QLA_TGT_STATE_ABORTED;
3090 cmd->cmd_flags |= BIT_6;
3091
3092 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
3093}
3094EXPORT_SYMBOL(qlt_abort_cmd);
3095
2795void qlt_free_cmd(struct qla_tgt_cmd *cmd) 3096void qlt_free_cmd(struct qla_tgt_cmd *cmd)
2796{ 3097{
2797 struct qla_tgt_sess *sess = cmd->sess; 3098 struct qla_tgt_sess *sess = cmd->sess;
@@ -3015,7 +3316,7 @@ qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha, struct qla_tgt_cmd *cmd)
3015 dump_stack(); 3316 dump_stack();
3016 } 3317 }
3017 3318
3018 cmd->cmd_flags |= BIT_12; 3319 cmd->cmd_flags |= BIT_17;
3019 ha->tgt.tgt_ops->free_cmd(cmd); 3320 ha->tgt.tgt_ops->free_cmd(cmd);
3020} 3321}
3021 3322
@@ -3177,7 +3478,7 @@ static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
3177skip_term: 3478skip_term:
3178 3479
3179 if (cmd->state == QLA_TGT_STATE_PROCESSED) { 3480 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
3180 ; 3481 cmd->cmd_flags |= BIT_12;
3181 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) { 3482 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3182 int rx_status = 0; 3483 int rx_status = 0;
3183 3484
@@ -3191,9 +3492,11 @@ skip_term:
3191 ha->tgt.tgt_ops->handle_data(cmd); 3492 ha->tgt.tgt_ops->handle_data(cmd);
3192 return; 3493 return;
3193 } else if (cmd->state == QLA_TGT_STATE_ABORTED) { 3494 } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
3495 cmd->cmd_flags |= BIT_18;
3194 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e, 3496 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
3195 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag); 3497 "Aborted command %p (tag %lld) finished\n", cmd, se_cmd->tag);
3196 } else { 3498 } else {
3499 cmd->cmd_flags |= BIT_19;
3197 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c, 3500 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
3198 "qla_target(%d): A command in state (%d) should " 3501 "qla_target(%d): A command in state (%d) should "
3199 "not return a CTIO complete\n", vha->vp_idx, cmd->state); 3502 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
@@ -3205,7 +3508,6 @@ skip_term:
3205 dump_stack(); 3508 dump_stack();
3206 } 3509 }
3207 3510
3208
3209 ha->tgt.tgt_ops->free_cmd(cmd); 3511 ha->tgt.tgt_ops->free_cmd(cmd);
3210} 3512}
3211 3513
@@ -3263,6 +3565,13 @@ static void __qlt_do_work(struct qla_tgt_cmd *cmd)
3263 if (tgt->tgt_stop) 3565 if (tgt->tgt_stop)
3264 goto out_term; 3566 goto out_term;
3265 3567
3568 if (cmd->state == QLA_TGT_STATE_ABORTED) {
3569 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf082,
3570 "cmd with tag %u is aborted\n",
3571 cmd->atio.u.isp24.exchange_addr);
3572 goto out_term;
3573 }
3574
3266 cdb = &atio->u.isp24.fcp_cmnd.cdb[0]; 3575 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
3267 cmd->se_cmd.tag = atio->u.isp24.exchange_addr; 3576 cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
3268 cmd->unpacked_lun = scsilun_to_int( 3577 cmd->unpacked_lun = scsilun_to_int(
@@ -3316,6 +3625,12 @@ out_term:
3316static void qlt_do_work(struct work_struct *work) 3625static void qlt_do_work(struct work_struct *work)
3317{ 3626{
3318 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work); 3627 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
3628 scsi_qla_host_t *vha = cmd->vha;
3629 unsigned long flags;
3630
3631 spin_lock_irqsave(&vha->cmd_list_lock, flags);
3632 list_del(&cmd->cmd_list);
3633 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
3319 3634
3320 __qlt_do_work(cmd); 3635 __qlt_do_work(cmd);
3321} 3636}
@@ -3345,6 +3660,11 @@ static struct qla_tgt_cmd *qlt_get_tag(scsi_qla_host_t *vha,
3345 cmd->loop_id = sess->loop_id; 3660 cmd->loop_id = sess->loop_id;
3346 cmd->conf_compl_supported = sess->conf_compl_supported; 3661 cmd->conf_compl_supported = sess->conf_compl_supported;
3347 3662
3663 cmd->cmd_flags = 0;
3664 cmd->jiffies_at_alloc = get_jiffies_64();
3665
3666 cmd->reset_count = vha->hw->chip_reset;
3667
3348 return cmd; 3668 return cmd;
3349} 3669}
3350 3670
@@ -3362,14 +3682,25 @@ static void qlt_create_sess_from_atio(struct work_struct *work)
3362 unsigned long flags; 3682 unsigned long flags;
3363 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id; 3683 uint8_t *s_id = op->atio.u.isp24.fcp_hdr.s_id;
3364 3684
3685 spin_lock_irqsave(&vha->cmd_list_lock, flags);
3686 list_del(&op->cmd_list);
3687 spin_unlock_irqrestore(&vha->cmd_list_lock, flags);
3688
3689 if (op->aborted) {
3690 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf083,
3691 "sess_op with tag %u is aborted\n",
3692 op->atio.u.isp24.exchange_addr);
3693 goto out_term;
3694 }
3695
3365 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022, 3696 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
3366 "qla_target(%d): Unable to find wwn login" 3697 "qla_target(%d): Unable to find wwn login"
3367 " (s_id %x:%x:%x), trying to create it manually\n", 3698 " (s_id %x:%x:%x), trying to create it manually\n",
3368 vha->vp_idx, s_id[0], s_id[1], s_id[2]); 3699 vha->vp_idx, s_id[0], s_id[1], s_id[2]);
3369 3700
3370 if (op->atio.u.raw.entry_count > 1) { 3701 if (op->atio.u.raw.entry_count > 1) {
3371 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023, 3702 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
3372 "Dropping multy entry atio %p\n", &op->atio); 3703 "Dropping multy entry atio %p\n", &op->atio);
3373 goto out_term; 3704 goto out_term;
3374 } 3705 }
3375 3706
@@ -3434,10 +3765,25 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3434 3765
3435 memcpy(&op->atio, atio, sizeof(*atio)); 3766 memcpy(&op->atio, atio, sizeof(*atio));
3436 op->vha = vha; 3767 op->vha = vha;
3768
3769 spin_lock(&vha->cmd_list_lock);
3770 list_add_tail(&op->cmd_list, &vha->qla_sess_op_cmd_list);
3771 spin_unlock(&vha->cmd_list_lock);
3772
3437 INIT_WORK(&op->work, qlt_create_sess_from_atio); 3773 INIT_WORK(&op->work, qlt_create_sess_from_atio);
3438 queue_work(qla_tgt_wq, &op->work); 3774 queue_work(qla_tgt_wq, &op->work);
3439 return 0; 3775 return 0;
3440 } 3776 }
3777
3778 /* Another WWN used to have our s_id. Our PLOGI scheduled its
3779 * session deletion, but it's still in sess_del_work wq */
3780 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
3781 ql_dbg(ql_dbg_io, vha, 0x3061,
3782 "New command while old session %p is being deleted\n",
3783 sess);
3784 return -EFAULT;
3785 }
3786
3441 /* 3787 /*
3442 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock. 3788 * Do kref_get() before returning + dropping qla_hw_data->hardware_lock.
3443 */ 3789 */
@@ -3451,13 +3797,13 @@ static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
3451 return -ENOMEM; 3797 return -ENOMEM;
3452 } 3798 }
3453 3799
3454 cmd->cmd_flags = 0;
3455 cmd->jiffies_at_alloc = get_jiffies_64();
3456
3457 cmd->reset_count = vha->hw->chip_reset;
3458
3459 cmd->cmd_in_wq = 1; 3800 cmd->cmd_in_wq = 1;
3460 cmd->cmd_flags |= BIT_0; 3801 cmd->cmd_flags |= BIT_0;
3802
3803 spin_lock(&vha->cmd_list_lock);
3804 list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
3805 spin_unlock(&vha->cmd_list_lock);
3806
3461 INIT_WORK(&cmd->work, qlt_do_work); 3807 INIT_WORK(&cmd->work, qlt_do_work);
3462 queue_work(qla_tgt_wq, &cmd->work); 3808 queue_work(qla_tgt_wq, &cmd->work);
3463 return 0; 3809 return 0;
@@ -3471,6 +3817,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
3471 struct scsi_qla_host *vha = sess->vha; 3817 struct scsi_qla_host *vha = sess->vha;
3472 struct qla_hw_data *ha = vha->hw; 3818 struct qla_hw_data *ha = vha->hw;
3473 struct qla_tgt_mgmt_cmd *mcmd; 3819 struct qla_tgt_mgmt_cmd *mcmd;
3820 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
3474 int res; 3821 int res;
3475 uint8_t tmr_func; 3822 uint8_t tmr_func;
3476 3823
@@ -3511,6 +3858,7 @@ static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
3511 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002, 3858 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
3512 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx); 3859 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
3513 tmr_func = TMR_LUN_RESET; 3860 tmr_func = TMR_LUN_RESET;
3861 abort_cmds_for_lun(vha, lun, a->u.isp24.fcp_hdr.s_id);
3514 break; 3862 break;
3515 3863
3516 case QLA_TGT_CLEAR_TS: 3864 case QLA_TGT_CLEAR_TS:
@@ -3599,6 +3947,9 @@ static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
3599 sizeof(struct atio_from_isp)); 3947 sizeof(struct atio_from_isp));
3600 } 3948 }
3601 3949
3950 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS)
3951 return -EFAULT;
3952
3602 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0); 3953 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
3603} 3954}
3604 3955
@@ -3664,22 +4015,280 @@ static int qlt_abort_task(struct scsi_qla_host *vha,
3664 return __qlt_abort_task(vha, iocb, sess); 4015 return __qlt_abort_task(vha, iocb, sess);
3665} 4016}
3666 4017
4018void qlt_logo_completion_handler(fc_port_t *fcport, int rc)
4019{
4020 if (fcport->tgt_session) {
4021 if (rc != MBS_COMMAND_COMPLETE) {
4022 ql_dbg(ql_dbg_tgt_mgt, fcport->vha, 0xf093,
4023 "%s: se_sess %p / sess %p from"
4024 " port %8phC loop_id %#04x s_id %02x:%02x:%02x"
4025 " LOGO failed: %#x\n",
4026 __func__,
4027 fcport->tgt_session->se_sess,
4028 fcport->tgt_session,
4029 fcport->port_name, fcport->loop_id,
4030 fcport->d_id.b.domain, fcport->d_id.b.area,
4031 fcport->d_id.b.al_pa, rc);
4032 }
4033
4034 fcport->tgt_session->logout_completed = 1;
4035 }
4036}
4037
4038static void qlt_swap_imm_ntfy_iocb(struct imm_ntfy_from_isp *a,
4039 struct imm_ntfy_from_isp *b)
4040{
4041 struct imm_ntfy_from_isp tmp;
4042 memcpy(&tmp, a, sizeof(struct imm_ntfy_from_isp));
4043 memcpy(a, b, sizeof(struct imm_ntfy_from_isp));
4044 memcpy(b, &tmp, sizeof(struct imm_ntfy_from_isp));
4045}
4046
4047/*
4048* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list)
4049*
4050* Schedules sessions with matching port_id/loop_id but different wwn for
4051* deletion. Returns existing session with matching wwn if present.
4052* Null otherwise.
4053*/
4054static struct qla_tgt_sess *
4055qlt_find_sess_invalidate_other(struct qla_tgt *tgt, uint64_t wwn,
4056 port_id_t port_id, uint16_t loop_id)
4057{
4058 struct qla_tgt_sess *sess = NULL, *other_sess;
4059 uint64_t other_wwn;
4060
4061 list_for_each_entry(other_sess, &tgt->sess_list, sess_list_entry) {
4062
4063 other_wwn = wwn_to_u64(other_sess->port_name);
4064
4065 if (wwn == other_wwn) {
4066 WARN_ON(sess);
4067 sess = other_sess;
4068 continue;
4069 }
4070
4071 /* find other sess with nport_id collision */
4072 if (port_id.b24 == other_sess->s_id.b24) {
4073 if (loop_id != other_sess->loop_id) {
4074 ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000c,
4075 "Invalidating sess %p loop_id %d wwn %llx.\n",
4076 other_sess, other_sess->loop_id, other_wwn);
4077
4078 /*
4079 * logout_on_delete is set by default, but another
4080 * session that has the same s_id/loop_id combo
4081 * might have cleared it when requested this session
4082 * deletion, so don't touch it
4083 */
4084 qlt_schedule_sess_for_deletion(other_sess, true);
4085 } else {
4086 /*
4087 * Another wwn used to have our s_id/loop_id
4088 * combo - kill the session, but don't log out
4089 */
4090 sess->logout_on_delete = 0;
4091 qlt_schedule_sess_for_deletion(other_sess,
4092 true);
4093 }
4094 continue;
4095 }
4096
4097 /* find other sess with nport handle collision */
4098 if (loop_id == other_sess->loop_id) {
4099 ql_dbg(ql_dbg_tgt_tmr, tgt->vha, 0x1000d,
4100 "Invalidating sess %p loop_id %d wwn %llx.\n",
4101 other_sess, other_sess->loop_id, other_wwn);
4102
4103 /* Same loop_id but different s_id
4104 * Ok to kill and logout */
4105 qlt_schedule_sess_for_deletion(other_sess, true);
4106 }
4107 }
4108
4109 return sess;
4110}
4111
4112/* Abort any commands for this s_id waiting on qla_tgt_wq workqueue */
4113static int abort_cmds_for_s_id(struct scsi_qla_host *vha, port_id_t *s_id)
4114{
4115 struct qla_tgt_sess_op *op;
4116 struct qla_tgt_cmd *cmd;
4117 uint32_t key;
4118 int count = 0;
4119
4120 key = (((u32)s_id->b.domain << 16) |
4121 ((u32)s_id->b.area << 8) |
4122 ((u32)s_id->b.al_pa));
4123
4124 spin_lock(&vha->cmd_list_lock);
4125 list_for_each_entry(op, &vha->qla_sess_op_cmd_list, cmd_list) {
4126 uint32_t op_key = sid_to_key(op->atio.u.isp24.fcp_hdr.s_id);
4127 if (op_key == key) {
4128 op->aborted = true;
4129 count++;
4130 }
4131 }
4132 list_for_each_entry(cmd, &vha->qla_cmd_list, cmd_list) {
4133 uint32_t cmd_key = sid_to_key(cmd->atio.u.isp24.fcp_hdr.s_id);
4134 if (cmd_key == key) {
4135 cmd->state = QLA_TGT_STATE_ABORTED;
4136 count++;
4137 }
4138 }
4139 spin_unlock(&vha->cmd_list_lock);
4140
4141 return count;
4142}
4143
3667/* 4144/*
3668 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire 4145 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3669 */ 4146 */
3670static int qlt_24xx_handle_els(struct scsi_qla_host *vha, 4147static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
3671 struct imm_ntfy_from_isp *iocb) 4148 struct imm_ntfy_from_isp *iocb)
3672{ 4149{
4150 struct qla_tgt *tgt = vha->vha_tgt.qla_tgt;
4151 struct qla_hw_data *ha = vha->hw;
4152 struct qla_tgt_sess *sess = NULL;
4153 uint64_t wwn;
4154 port_id_t port_id;
4155 uint16_t loop_id;
4156 uint16_t wd3_lo;
3673 int res = 0; 4157 int res = 0;
3674 4158
4159 wwn = wwn_to_u64(iocb->u.isp24.port_name);
4160
4161 port_id.b.domain = iocb->u.isp24.port_id[2];
4162 port_id.b.area = iocb->u.isp24.port_id[1];
4163 port_id.b.al_pa = iocb->u.isp24.port_id[0];
4164 port_id.b.rsvd_1 = 0;
4165
4166 loop_id = le16_to_cpu(iocb->u.isp24.nport_handle);
4167
3675 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026, 4168 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
3676 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n", 4169 "qla_target(%d): Port ID: 0x%3phC ELS opcode: 0x%02x\n",
3677 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode); 4170 vha->vp_idx, iocb->u.isp24.port_id, iocb->u.isp24.status_subcode);
3678 4171
4172 /* res = 1 means ack at the end of thread
4173 * res = 0 means ack async/later.
4174 */
3679 switch (iocb->u.isp24.status_subcode) { 4175 switch (iocb->u.isp24.status_subcode) {
3680 case ELS_PLOGI: 4176 case ELS_PLOGI:
3681 case ELS_FLOGI: 4177
4178 /* Mark all stale commands in qla_tgt_wq for deletion */
4179 abort_cmds_for_s_id(vha, &port_id);
4180
4181 if (wwn)
4182 sess = qlt_find_sess_invalidate_other(tgt, wwn,
4183 port_id, loop_id);
4184
4185 if (!sess || IS_SW_RESV_ADDR(sess->s_id)) {
4186 res = 1;
4187 break;
4188 }
4189
4190 if (sess->plogi_ack_needed) {
4191 /*
4192 * Initiator sent another PLOGI before last PLOGI could
4193 * finish. Swap plogi iocbs and terminate old one
4194 * without acking, new one will get acked when session
4195 * deletion completes.
4196 */
4197 ql_log(ql_log_warn, sess->vha, 0xf094,
4198 "sess %p received double plogi.\n", sess);
4199
4200 qlt_swap_imm_ntfy_iocb(iocb, &sess->tm_iocb);
4201
4202 qlt_send_term_imm_notif(vha, iocb, 1);
4203
4204 res = 0;
4205 break;
4206 }
4207
4208 res = 0;
4209
4210 /*
4211 * Save immediate Notif IOCB for Ack when sess is done
4212 * and being deleted.
4213 */
4214 memcpy(&sess->tm_iocb, iocb, sizeof(sess->tm_iocb));
4215 sess->plogi_ack_needed = 1;
4216
4217 /*
4218 * Under normal circumstances we want to release nport handle
4219 * during LOGO process to avoid nport handle leaks inside FW.
4220 * The exception is when LOGO is done while another PLOGI with
4221 * the same nport handle is waiting as might be the case here.
4222 * Note: there is always a possibily of a race where session
4223 * deletion has already started for other reasons (e.g. ACL
4224 * removal) and now PLOGI arrives:
4225 * 1. if PLOGI arrived in FW after nport handle has been freed,
4226 * FW must have assigned this PLOGI a new/same handle and we
4227 * can proceed ACK'ing it as usual when session deletion
4228 * completes.
4229 * 2. if PLOGI arrived in FW before LOGO with LCF_FREE_NPORT
4230 * bit reached it, the handle has now been released. We'll
4231 * get an error when we ACK this PLOGI. Nothing will be sent
4232 * back to initiator. Initiator should eventually retry
4233 * PLOGI and situation will correct itself.
4234 */
4235 sess->keep_nport_handle = ((sess->loop_id == loop_id) &&
4236 (sess->s_id.b24 == port_id.b24));
4237 qlt_schedule_sess_for_deletion(sess, true);
4238 break;
4239
3682 case ELS_PRLI: 4240 case ELS_PRLI:
4241 wd3_lo = le16_to_cpu(iocb->u.isp24.u.prli.wd3_lo);
4242
4243 if (wwn)
4244 sess = qlt_find_sess_invalidate_other(tgt, wwn, port_id,
4245 loop_id);
4246
4247 if (sess != NULL) {
4248 if (sess->deleted) {
4249 /*
4250 * Impatient initiator sent PRLI before last
4251 * PLOGI could finish. Will force him to re-try,
4252 * while last one finishes.
4253 */
4254 ql_log(ql_log_warn, sess->vha, 0xf095,
4255 "sess %p PRLI received, before plogi ack.\n",
4256 sess);
4257 qlt_send_term_imm_notif(vha, iocb, 1);
4258 res = 0;
4259 break;
4260 }
4261
4262 /*
4263 * This shouldn't happen under normal circumstances,
4264 * since we have deleted the old session during PLOGI
4265 */
4266 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf096,
4267 "PRLI (loop_id %#04x) for existing sess %p (loop_id %#04x)\n",
4268 sess->loop_id, sess, iocb->u.isp24.nport_handle);
4269
4270 sess->local = 0;
4271 sess->loop_id = loop_id;
4272 sess->s_id = port_id;
4273
4274 if (wd3_lo & BIT_7)
4275 sess->conf_compl_supported = 1;
4276
4277 }
4278 res = 1; /* send notify ack */
4279
4280 /* Make session global (not used in fabric mode) */
4281 if (ha->current_topology != ISP_CFG_F) {
4282 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
4283 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
4284 qla2xxx_wake_dpc(vha);
4285 } else {
4286 /* todo: else - create sess here. */
4287 res = 1; /* send notify ack */
4288 }
4289
4290 break;
4291
3683 case ELS_LOGO: 4292 case ELS_LOGO:
3684 case ELS_PRLO: 4293 case ELS_PRLO:
3685 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS); 4294 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
@@ -3697,6 +4306,7 @@ static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
3697 break; 4306 break;
3698 } 4307 }
3699 4308
4309 case ELS_FLOGI: /* should never happen */
3700 default: 4310 default:
3701 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061, 4311 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
3702 "qla_target(%d): Unsupported ELS command %x " 4312 "qla_target(%d): Unsupported ELS command %x "
@@ -5012,6 +5622,11 @@ static void qlt_abort_work(struct qla_tgt *tgt,
5012 if (!sess) 5622 if (!sess)
5013 goto out_term; 5623 goto out_term;
5014 } else { 5624 } else {
5625 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
5626 sess = NULL;
5627 goto out_term;
5628 }
5629
5015 kref_get(&sess->se_sess->sess_kref); 5630 kref_get(&sess->se_sess->sess_kref);
5016 } 5631 }
5017 5632
@@ -5066,6 +5681,11 @@ static void qlt_tmr_work(struct qla_tgt *tgt,
5066 if (!sess) 5681 if (!sess)
5067 goto out_term; 5682 goto out_term;
5068 } else { 5683 } else {
5684 if (sess->deleted == QLA_SESS_DELETION_IN_PROGRESS) {
5685 sess = NULL;
5686 goto out_term;
5687 }
5688
5069 kref_get(&sess->se_sess->sess_kref); 5689 kref_get(&sess->se_sess->sess_kref);
5070 } 5690 }
5071 5691
@@ -5552,6 +6172,7 @@ qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
5552 6172
5553 /* Adjust ring index */ 6173 /* Adjust ring index */
5554 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index); 6174 WRT_REG_DWORD(ISP_ATIO_Q_OUT(vha), ha->tgt.atio_ring_index);
6175 RD_REG_DWORD_RELAXED(ISP_ATIO_Q_OUT(vha));
5555} 6176}
5556 6177
5557void 6178void
@@ -5793,7 +6414,7 @@ qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
5793 if (!QLA_TGT_MODE_ENABLED()) 6414 if (!QLA_TGT_MODE_ENABLED())
5794 return; 6415 return;
5795 6416
5796 if (ha->mqenable || IS_QLA83XX(ha)) { 6417 if (ha->mqenable || IS_QLA83XX(ha) || IS_QLA27XX(ha)) {
5797 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in; 6418 ISP_ATIO_Q_IN(base_vha) = &ha->mqiobase->isp25mq.atio_q_in;
5798 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out; 6419 ISP_ATIO_Q_OUT(base_vha) = &ha->mqiobase->isp25mq.atio_q_out;
5799 } else { 6420 } else {
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 985d76dd706b..bca584ae45b7 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -167,7 +167,24 @@ struct imm_ntfy_from_isp {
167 uint32_t srr_rel_offs; 167 uint32_t srr_rel_offs;
168 uint16_t srr_ui; 168 uint16_t srr_ui;
169 uint16_t srr_ox_id; 169 uint16_t srr_ox_id;
170 uint8_t reserved_4[19]; 170 union {
171 struct {
172 uint8_t node_name[8];
173 } plogi; /* PLOGI/ADISC/PDISC */
174 struct {
175 /* PRLI word 3 bit 0-15 */
176 uint16_t wd3_lo;
177 uint8_t resv0[6];
178 } prli;
179 struct {
180 uint8_t port_id[3];
181 uint8_t resv1;
182 uint16_t nport_handle;
183 uint16_t resv2;
184 } req_els;
185 } u;
186 uint8_t port_name[8];
187 uint8_t resv3[3];
171 uint8_t vp_index; 188 uint8_t vp_index;
172 uint32_t reserved_5; 189 uint32_t reserved_5;
173 uint8_t port_id[3]; 190 uint8_t port_id[3];
@@ -234,6 +251,7 @@ struct nack_to_isp {
234 uint8_t reserved[2]; 251 uint8_t reserved[2];
235 uint16_t ox_id; 252 uint16_t ox_id;
236} __packed; 253} __packed;
254#define NOTIFY_ACK_FLAGS_TERMINATE BIT_3
237#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0 255#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
238#define NOTIFY_ACK_SRR_FLAGS_REJECT 1 256#define NOTIFY_ACK_SRR_FLAGS_REJECT 1
239 257
@@ -790,13 +808,6 @@ int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
790#define FC_TM_REJECT 4 808#define FC_TM_REJECT 4
791#define FC_TM_FAILED 5 809#define FC_TM_FAILED 5
792 810
793/*
794 * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was
795 * terminated, so no more actions is needed and success should be returned
796 * to target.
797 */
798#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED 0x1717
799
800#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G) 811#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
801#define pci_dma_lo32(a) (a & 0xffffffff) 812#define pci_dma_lo32(a) (a & 0xffffffff)
802#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff) 813#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
@@ -874,6 +885,15 @@ struct qla_tgt_sess_op {
874 struct scsi_qla_host *vha; 885 struct scsi_qla_host *vha;
875 struct atio_from_isp atio; 886 struct atio_from_isp atio;
876 struct work_struct work; 887 struct work_struct work;
888 struct list_head cmd_list;
889 bool aborted;
890};
891
892enum qla_sess_deletion {
893 QLA_SESS_DELETION_NONE = 0,
894 QLA_SESS_DELETION_PENDING = 1, /* hopefully we can get rid of
895 * this one */
896 QLA_SESS_DELETION_IN_PROGRESS = 2,
877}; 897};
878 898
879/* 899/*
@@ -884,8 +904,15 @@ struct qla_tgt_sess {
884 port_id_t s_id; 904 port_id_t s_id;
885 905
886 unsigned int conf_compl_supported:1; 906 unsigned int conf_compl_supported:1;
887 unsigned int deleted:1; 907 unsigned int deleted:2;
888 unsigned int local:1; 908 unsigned int local:1;
909 unsigned int logout_on_delete:1;
910 unsigned int plogi_ack_needed:1;
911 unsigned int keep_nport_handle:1;
912
913 unsigned char logout_completed;
914
915 int generation;
889 916
890 struct se_session *se_sess; 917 struct se_session *se_sess;
891 struct scsi_qla_host *vha; 918 struct scsi_qla_host *vha;
@@ -897,6 +924,10 @@ struct qla_tgt_sess {
897 924
898 uint8_t port_name[WWN_SIZE]; 925 uint8_t port_name[WWN_SIZE];
899 struct work_struct free_work; 926 struct work_struct free_work;
927
928 union {
929 struct imm_ntfy_from_isp tm_iocb;
930 };
900}; 931};
901 932
902struct qla_tgt_cmd { 933struct qla_tgt_cmd {
@@ -912,7 +943,6 @@ struct qla_tgt_cmd {
912 unsigned int conf_compl_supported:1; 943 unsigned int conf_compl_supported:1;
913 unsigned int sg_mapped:1; 944 unsigned int sg_mapped:1;
914 unsigned int free_sg:1; 945 unsigned int free_sg:1;
915 unsigned int aborted:1; /* Needed in case of SRR */
916 unsigned int write_data_transferred:1; 946 unsigned int write_data_transferred:1;
917 unsigned int ctx_dsd_alloced:1; 947 unsigned int ctx_dsd_alloced:1;
918 unsigned int q_full:1; 948 unsigned int q_full:1;
@@ -961,6 +991,9 @@ struct qla_tgt_cmd {
961 * BIT_14 - Back end data received/sent. 991 * BIT_14 - Back end data received/sent.
962 * BIT_15 - SRR prepare ctio 992 * BIT_15 - SRR prepare ctio
963 * BIT_16 - complete free 993 * BIT_16 - complete free
994 * BIT_17 - flush - qlt_abort_cmd_on_host_reset
995 * BIT_18 - completion w/abort status
996 * BIT_19 - completion w/unknown status
964 */ 997 */
965 uint32_t cmd_flags; 998 uint32_t cmd_flags;
966}; 999};
@@ -1026,6 +1059,10 @@ struct qla_tgt_srr_ctio {
1026 struct qla_tgt_cmd *cmd; 1059 struct qla_tgt_cmd *cmd;
1027}; 1060};
1028 1061
1062/* Check for Switch reserved address */
1063#define IS_SW_RESV_ADDR(_s_id) \
1064 ((_s_id.b.domain == 0xff) && (_s_id.b.area == 0xfc))
1065
1029#define QLA_TGT_XMIT_DATA 1 1066#define QLA_TGT_XMIT_DATA 1
1030#define QLA_TGT_XMIT_STATUS 2 1067#define QLA_TGT_XMIT_STATUS 2
1031#define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA) 1068#define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
@@ -1043,7 +1080,7 @@ extern int qlt_lport_register(void *, u64, u64, u64,
1043extern void qlt_lport_deregister(struct scsi_qla_host *); 1080extern void qlt_lport_deregister(struct scsi_qla_host *);
1044extern void qlt_unreg_sess(struct qla_tgt_sess *); 1081extern void qlt_unreg_sess(struct qla_tgt_sess *);
1045extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *); 1082extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
1046extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *); 1083extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *, int);
1047extern int __init qlt_init(void); 1084extern int __init qlt_init(void);
1048extern void qlt_exit(void); 1085extern void qlt_exit(void);
1049extern void qlt_update_vp_map(struct scsi_qla_host *, int); 1086extern void qlt_update_vp_map(struct scsi_qla_host *, int);
@@ -1073,12 +1110,23 @@ static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
1073 ha->host->active_mode |= MODE_INITIATOR; 1110 ha->host->active_mode |= MODE_INITIATOR;
1074} 1111}
1075 1112
1113static inline uint32_t sid_to_key(const uint8_t *s_id)
1114{
1115 uint32_t key;
1116
1117 key = (((unsigned long)s_id[0] << 16) |
1118 ((unsigned long)s_id[1] << 8) |
1119 (unsigned long)s_id[2]);
1120 return key;
1121}
1122
1076/* 1123/*
1077 * Exported symbols from qla_target.c LLD logic used by qla2xxx code.. 1124 * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
1078 */ 1125 */
1079extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *); 1126extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
1080extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *); 1127extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
1081extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t); 1128extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
1129extern void qlt_abort_cmd(struct qla_tgt_cmd *);
1082extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *); 1130extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
1083extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *); 1131extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
1084extern void qlt_free_cmd(struct qla_tgt_cmd *cmd); 1132extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
@@ -1109,5 +1157,7 @@ extern void qlt_stop_phase2(struct qla_tgt *);
1109extern irqreturn_t qla83xx_msix_atio_q(int, void *); 1157extern irqreturn_t qla83xx_msix_atio_q(int, void *);
1110extern void qlt_83xx_iospace_config(struct qla_hw_data *); 1158extern void qlt_83xx_iospace_config(struct qla_hw_data *);
1111extern int qlt_free_qfull_cmds(struct scsi_qla_host *); 1159extern int qlt_free_qfull_cmds(struct scsi_qla_host *);
1160extern void qlt_logo_completion_handler(fc_port_t *, int);
1161extern void qlt_do_generation_tick(struct scsi_qla_host *, int *);
1112 1162
1113#endif /* __QLA_TARGET_H */ 1163#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index d9a8c6084346..9224a06646e6 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -374,7 +374,7 @@ static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
374{ 374{
375 struct qla_tgt_cmd *cmd = container_of(se_cmd, 375 struct qla_tgt_cmd *cmd = container_of(se_cmd,
376 struct qla_tgt_cmd, se_cmd); 376 struct qla_tgt_cmd, se_cmd);
377 377 cmd->cmd_flags |= BIT_3;
378 cmd->bufflen = se_cmd->data_length; 378 cmd->bufflen = se_cmd->data_length;
379 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 379 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
380 380
@@ -405,7 +405,7 @@ static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
405 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) { 405 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
406 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 406 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
407 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp, 407 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
408 3000); 408 3 * HZ);
409 return 0; 409 return 0;
410 } 410 }
411 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags); 411 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -541,12 +541,10 @@ static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
541 cmd->cmd_flags |= BIT_4; 541 cmd->cmd_flags |= BIT_4;
542 cmd->bufflen = se_cmd->data_length; 542 cmd->bufflen = se_cmd->data_length;
543 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 543 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
544 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
545 544
546 cmd->sg_cnt = se_cmd->t_data_nents; 545 cmd->sg_cnt = se_cmd->t_data_nents;
547 cmd->sg = se_cmd->t_data_sg; 546 cmd->sg = se_cmd->t_data_sg;
548 cmd->offset = 0; 547 cmd->offset = 0;
549 cmd->cmd_flags |= BIT_3;
550 548
551 cmd->prot_sg_cnt = se_cmd->t_prot_nents; 549 cmd->prot_sg_cnt = se_cmd->t_prot_nents;
552 cmd->prot_sg = se_cmd->t_prot_sg; 550 cmd->prot_sg = se_cmd->t_prot_sg;
@@ -571,7 +569,6 @@ static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
571 cmd->sg_cnt = 0; 569 cmd->sg_cnt = 0;
572 cmd->offset = 0; 570 cmd->offset = 0;
573 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd); 571 cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
574 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
575 if (cmd->cmd_flags & BIT_5) { 572 if (cmd->cmd_flags & BIT_5) {
576 pr_crit("Bit_5 already set for cmd = %p.\n", cmd); 573 pr_crit("Bit_5 already set for cmd = %p.\n", cmd);
577 dump_stack(); 574 dump_stack();
@@ -636,14 +633,7 @@ static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
636{ 633{
637 struct qla_tgt_cmd *cmd = container_of(se_cmd, 634 struct qla_tgt_cmd *cmd = container_of(se_cmd,
638 struct qla_tgt_cmd, se_cmd); 635 struct qla_tgt_cmd, se_cmd);
639 struct scsi_qla_host *vha = cmd->vha; 636 qlt_abort_cmd(cmd);
640 struct qla_hw_data *ha = vha->hw;
641
642 if (!cmd->sg_mapped)
643 return;
644
645 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
646 cmd->sg_mapped = 0;
647} 637}
648 638
649static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *, 639static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
@@ -1149,9 +1139,7 @@ static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
1149 return NULL; 1139 return NULL;
1150 } 1140 }
1151 1141
1152 key = (((unsigned long)s_id[0] << 16) | 1142 key = sid_to_key(s_id);
1153 ((unsigned long)s_id[1] << 8) |
1154 (unsigned long)s_id[2]);
1155 pr_debug("find_sess_by_s_id: 0x%06x\n", key); 1143 pr_debug("find_sess_by_s_id: 0x%06x\n", key);
1156 1144
1157 se_nacl = btree_lookup32(&lport->lport_fcport_map, key); 1145 se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
@@ -1186,9 +1174,7 @@ static void tcm_qla2xxx_set_sess_by_s_id(
1186 void *slot; 1174 void *slot;
1187 int rc; 1175 int rc;
1188 1176
1189 key = (((unsigned long)s_id[0] << 16) | 1177 key = sid_to_key(s_id);
1190 ((unsigned long)s_id[1] << 8) |
1191 (unsigned long)s_id[2]);
1192 pr_debug("set_sess_by_s_id: %06x\n", key); 1178 pr_debug("set_sess_by_s_id: %06x\n", key);
1193 1179
1194 slot = btree_lookup32(&lport->lport_fcport_map, key); 1180 slot = btree_lookup32(&lport->lport_fcport_map, key);
@@ -1544,6 +1530,10 @@ static void tcm_qla2xxx_update_sess(struct qla_tgt_sess *sess, port_id_t s_id,
1544 } 1530 }
1545 1531
1546 sess->conf_compl_supported = conf_compl_supported; 1532 sess->conf_compl_supported = conf_compl_supported;
1533
1534 /* Reset logout parameters to default */
1535 sess->logout_on_delete = 1;
1536 sess->keep_nport_handle = 0;
1547} 1537}
1548 1538
1549/* 1539/*
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index 106884a5444e..cfadccef045c 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -944,7 +944,7 @@ void scsi_eh_prep_cmnd(struct scsi_cmnd *scmd, struct scsi_eh_save *ses,
944 scmd->sdb.length); 944 scmd->sdb.length);
945 scmd->sdb.table.sgl = &ses->sense_sgl; 945 scmd->sdb.table.sgl = &ses->sense_sgl;
946 scmd->sc_data_direction = DMA_FROM_DEVICE; 946 scmd->sc_data_direction = DMA_FROM_DEVICE;
947 scmd->sdb.table.nents = 1; 947 scmd->sdb.table.nents = scmd->sdb.table.orig_nents = 1;
948 scmd->cmnd[0] = REQUEST_SENSE; 948 scmd->cmnd[0] = REQUEST_SENSE;
949 scmd->cmnd[4] = scmd->sdb.length; 949 scmd->cmnd[4] = scmd->sdb.length;
950 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]); 950 scmd->cmd_len = COMMAND_SIZE(scmd->cmnd[0]);
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index b1a263137a23..448ebdaa3d69 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -583,7 +583,7 @@ static struct scatterlist *scsi_sg_alloc(unsigned int nents, gfp_t gfp_mask)
583 583
584static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq) 584static void scsi_free_sgtable(struct scsi_data_buffer *sdb, bool mq)
585{ 585{
586 if (mq && sdb->table.nents <= SCSI_MAX_SG_SEGMENTS) 586 if (mq && sdb->table.orig_nents <= SCSI_MAX_SG_SEGMENTS)
587 return; 587 return;
588 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free); 588 __sg_free_table(&sdb->table, SCSI_MAX_SG_SEGMENTS, mq, scsi_sg_free);
589} 589}
@@ -597,8 +597,8 @@ static int scsi_alloc_sgtable(struct scsi_data_buffer *sdb, int nents, bool mq)
597 597
598 if (mq) { 598 if (mq) {
599 if (nents <= SCSI_MAX_SG_SEGMENTS) { 599 if (nents <= SCSI_MAX_SG_SEGMENTS) {
600 sdb->table.nents = nents; 600 sdb->table.nents = sdb->table.orig_nents = nents;
601 sg_init_table(sdb->table.sgl, sdb->table.nents); 601 sg_init_table(sdb->table.sgl, nents);
602 return 0; 602 return 0;
603 } 603 }
604 first_chunk = sdb->table.sgl; 604 first_chunk = sdb->table.sgl;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 3b2fcb4fada0..a20da8c25b4f 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -2770,9 +2770,9 @@ static int sd_revalidate_disk(struct gendisk *disk)
2770 max_xfer = sdkp->max_xfer_blocks; 2770 max_xfer = sdkp->max_xfer_blocks;
2771 max_xfer <<= ilog2(sdp->sector_size) - 9; 2771 max_xfer <<= ilog2(sdp->sector_size) - 9;
2772 2772
2773 max_xfer = min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), 2773 sdkp->disk->queue->limits.max_sectors =
2774 max_xfer); 2774 min_not_zero(queue_max_hw_sectors(sdkp->disk->queue), max_xfer);
2775 blk_queue_max_hw_sectors(sdkp->disk->queue, max_xfer); 2775
2776 set_capacity(disk, sdkp->capacity); 2776 set_capacity(disk, sdkp->capacity);
2777 sd_config_write_same(sdkp); 2777 sd_config_write_same(sdkp);
2778 kfree(buffer); 2778 kfree(buffer);
diff --git a/drivers/scsi/virtio_scsi.c b/drivers/scsi/virtio_scsi.c
index 285f77544c36..7dbbb29d24c6 100644
--- a/drivers/scsi/virtio_scsi.c
+++ b/drivers/scsi/virtio_scsi.c
@@ -949,7 +949,7 @@ static int virtscsi_probe(struct virtio_device *vdev)
949{ 949{
950 struct Scsi_Host *shost; 950 struct Scsi_Host *shost;
951 struct virtio_scsi *vscsi; 951 struct virtio_scsi *vscsi;
952 int err, host_prot; 952 int err;
953 u32 sg_elems, num_targets; 953 u32 sg_elems, num_targets;
954 u32 cmd_per_lun; 954 u32 cmd_per_lun;
955 u32 num_queues; 955 u32 num_queues;
@@ -1009,6 +1009,8 @@ static int virtscsi_probe(struct virtio_device *vdev)
1009 1009
1010#ifdef CONFIG_BLK_DEV_INTEGRITY 1010#ifdef CONFIG_BLK_DEV_INTEGRITY
1011 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) { 1011 if (virtio_has_feature(vdev, VIRTIO_SCSI_F_T10_PI)) {
1012 int host_prot;
1013
1012 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION | 1014 host_prot = SHOST_DIF_TYPE1_PROTECTION | SHOST_DIF_TYPE2_PROTECTION |
1013 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION | 1015 SHOST_DIF_TYPE3_PROTECTION | SHOST_DIX_TYPE1_PROTECTION |
1014 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION; 1016 SHOST_DIX_TYPE2_PROTECTION | SHOST_DIX_TYPE3_PROTECTION;
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 0cae1694014d..b0f30fb68914 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -612,7 +612,7 @@ config SPI_XTENSA_XTFPGA
612 612
613config SPI_ZYNQMP_GQSPI 613config SPI_ZYNQMP_GQSPI
614 tristate "Xilinx ZynqMP GQSPI controller" 614 tristate "Xilinx ZynqMP GQSPI controller"
615 depends on SPI_MASTER 615 depends on SPI_MASTER && HAS_DMA
616 help 616 help
617 Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC. 617 Enables Xilinx GQSPI controller driver for Zynq UltraScale+ MPSoC.
618 618
diff --git a/drivers/spi/spi-img-spfi.c b/drivers/spi/spi-img-spfi.c
index 788e2b176a4f..acce90ac7371 100644
--- a/drivers/spi/spi-img-spfi.c
+++ b/drivers/spi/spi-img-spfi.c
@@ -40,6 +40,7 @@
40#define SPFI_CONTROL_SOFT_RESET BIT(11) 40#define SPFI_CONTROL_SOFT_RESET BIT(11)
41#define SPFI_CONTROL_SEND_DMA BIT(10) 41#define SPFI_CONTROL_SEND_DMA BIT(10)
42#define SPFI_CONTROL_GET_DMA BIT(9) 42#define SPFI_CONTROL_GET_DMA BIT(9)
43#define SPFI_CONTROL_SE BIT(8)
43#define SPFI_CONTROL_TMODE_SHIFT 5 44#define SPFI_CONTROL_TMODE_SHIFT 5
44#define SPFI_CONTROL_TMODE_MASK 0x7 45#define SPFI_CONTROL_TMODE_MASK 0x7
45#define SPFI_CONTROL_TMODE_SINGLE 0 46#define SPFI_CONTROL_TMODE_SINGLE 0
@@ -491,6 +492,7 @@ static void img_spfi_config(struct spi_master *master, struct spi_device *spi,
491 else if (xfer->tx_nbits == SPI_NBITS_QUAD && 492 else if (xfer->tx_nbits == SPI_NBITS_QUAD &&
492 xfer->rx_nbits == SPI_NBITS_QUAD) 493 xfer->rx_nbits == SPI_NBITS_QUAD)
493 val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT; 494 val |= SPFI_CONTROL_TMODE_QUAD << SPFI_CONTROL_TMODE_SHIFT;
495 val |= SPFI_CONTROL_SE;
494 spfi_writel(spfi, val, SPFI_CONTROL); 496 spfi_writel(spfi, val, SPFI_CONTROL);
495} 497}
496 498
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index eb7d3a6fb14c..f9deb84e4e55 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -201,8 +201,9 @@ static bool spi_imx_can_dma(struct spi_master *master, struct spi_device *spi,
201{ 201{
202 struct spi_imx_data *spi_imx = spi_master_get_devdata(master); 202 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
203 203
204 if (spi_imx->dma_is_inited && (transfer->len > spi_imx->rx_wml) 204 if (spi_imx->dma_is_inited
205 && (transfer->len > spi_imx->tx_wml)) 205 && transfer->len > spi_imx->rx_wml * sizeof(u32)
206 && transfer->len > spi_imx->tx_wml * sizeof(u32))
206 return true; 207 return true;
207 return false; 208 return false;
208} 209}
diff --git a/drivers/spi/spi-zynqmp-gqspi.c b/drivers/spi/spi-zynqmp-gqspi.c
index 87b20a511a6b..f23f36ebaf3d 100644
--- a/drivers/spi/spi-zynqmp-gqspi.c
+++ b/drivers/spi/spi-zynqmp-gqspi.c
@@ -214,6 +214,7 @@ static void zynqmp_gqspi_selectslave(struct zynqmp_qspi *instanceptr,
214 case GQSPI_SELECT_FLASH_CS_BOTH: 214 case GQSPI_SELECT_FLASH_CS_BOTH:
215 instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER | 215 instanceptr->genfifocs = GQSPI_GENFIFO_CS_LOWER |
216 GQSPI_GENFIFO_CS_UPPER; 216 GQSPI_GENFIFO_CS_UPPER;
217 break;
217 case GQSPI_SELECT_FLASH_CS_UPPER: 218 case GQSPI_SELECT_FLASH_CS_UPPER:
218 instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER; 219 instanceptr->genfifocs = GQSPI_GENFIFO_CS_UPPER;
219 break; 220 break;
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index dd616ff0ffc5..c7de64171c45 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -693,6 +693,7 @@ static struct class *spidev_class;
693#ifdef CONFIG_OF 693#ifdef CONFIG_OF
694static const struct of_device_id spidev_dt_ids[] = { 694static const struct of_device_id spidev_dt_ids[] = {
695 { .compatible = "rohm,dh2228fv" }, 695 { .compatible = "rohm,dh2228fv" },
696 { .compatible = "lineartechnology,ltc2488" },
696 {}, 697 {},
697}; 698};
698MODULE_DEVICE_TABLE(of, spidev_dt_ids); 699MODULE_DEVICE_TABLE(of, spidev_dt_ids);
diff --git a/drivers/staging/comedi/drivers/das1800.c b/drivers/staging/comedi/drivers/das1800.c
index bfa42620a3f6..940781183fac 100644
--- a/drivers/staging/comedi/drivers/das1800.c
+++ b/drivers/staging/comedi/drivers/das1800.c
@@ -1266,6 +1266,7 @@ static const struct das1800_board *das1800_probe(struct comedi_device *dev)
1266 if (index == das1801hc || index == das1802hc) 1266 if (index == das1801hc || index == das1802hc)
1267 return board; 1267 return board;
1268 index = das1801hc; 1268 index = das1801hc;
1269 break;
1269 default: 1270 default:
1270 dev_err(dev->class_dev, 1271 dev_err(dev->class_dev,
1271 "Board model: probe returned 0x%x (unknown, please report)\n", 1272 "Board model: probe returned 0x%x (unknown, please report)\n",
diff --git a/drivers/staging/lustre/lustre/obdclass/debug.c b/drivers/staging/lustre/lustre/obdclass/debug.c
index 9c934e6d2ea1..c61add46b426 100644
--- a/drivers/staging/lustre/lustre/obdclass/debug.c
+++ b/drivers/staging/lustre/lustre/obdclass/debug.c
@@ -40,7 +40,7 @@
40 40
41#define DEBUG_SUBSYSTEM D_OTHER 41#define DEBUG_SUBSYSTEM D_OTHER
42 42
43#include <linux/unaligned/access_ok.h> 43#include <asm/unaligned.h>
44 44
45#include "../include/obd_support.h" 45#include "../include/obd_support.h"
46#include "../include/lustre_debug.h" 46#include "../include/lustre_debug.h"
diff --git a/drivers/staging/vt6655/device_main.c b/drivers/staging/vt6655/device_main.c
index b0c8e235b982..69bdc8f29b59 100644
--- a/drivers/staging/vt6655/device_main.c
+++ b/drivers/staging/vt6655/device_main.c
@@ -1483,8 +1483,9 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
1483 } 1483 }
1484 } 1484 }
1485 1485
1486 if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) { 1486 if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BEACON_INFO) &&
1487 if (conf->assoc) { 1487 priv->op_mode != NL80211_IFTYPE_AP) {
1488 if (conf->assoc && conf->beacon_rate) {
1488 CARDbUpdateTSF(priv, conf->beacon_rate->hw_value, 1489 CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
1489 conf->sync_tsf); 1490 conf->sync_tsf);
1490 1491
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 4e68b62193ed..cd77a064c772 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -3998,7 +3998,13 @@ get_immediate:
3998 } 3998 }
3999 3999
4000transport_err: 4000transport_err:
4001 iscsit_take_action_for_connection_exit(conn); 4001 /*
4002 * Avoid the normal connection failure code-path if this connection
4003 * is still within LOGIN mode, and iscsi_np process context is
4004 * responsible for cleaning up the early connection failure.
4005 */
4006 if (conn->conn_state != TARG_CONN_STATE_IN_LOGIN)
4007 iscsit_take_action_for_connection_exit(conn);
4002out: 4008out:
4003 return 0; 4009 return 0;
4004} 4010}
@@ -4082,7 +4088,7 @@ reject:
4082 4088
4083int iscsi_target_rx_thread(void *arg) 4089int iscsi_target_rx_thread(void *arg)
4084{ 4090{
4085 int ret; 4091 int ret, rc;
4086 u8 buffer[ISCSI_HDR_LEN], opcode; 4092 u8 buffer[ISCSI_HDR_LEN], opcode;
4087 u32 checksum = 0, digest = 0; 4093 u32 checksum = 0, digest = 0;
4088 struct iscsi_conn *conn = arg; 4094 struct iscsi_conn *conn = arg;
@@ -4092,10 +4098,16 @@ int iscsi_target_rx_thread(void *arg)
4092 * connection recovery / failure event can be triggered externally. 4098 * connection recovery / failure event can be triggered externally.
4093 */ 4099 */
4094 allow_signal(SIGINT); 4100 allow_signal(SIGINT);
4101 /*
4102 * Wait for iscsi_post_login_handler() to complete before allowing
4103 * incoming iscsi/tcp socket I/O, and/or failing the connection.
4104 */
4105 rc = wait_for_completion_interruptible(&conn->rx_login_comp);
4106 if (rc < 0)
4107 return 0;
4095 4108
4096 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) { 4109 if (conn->conn_transport->transport_type == ISCSI_INFINIBAND) {
4097 struct completion comp; 4110 struct completion comp;
4098 int rc;
4099 4111
4100 init_completion(&comp); 4112 init_completion(&comp);
4101 rc = wait_for_completion_interruptible(&comp); 4113 rc = wait_for_completion_interruptible(&comp);
@@ -4532,7 +4544,18 @@ static void iscsit_logout_post_handler_closesession(
4532 struct iscsi_conn *conn) 4544 struct iscsi_conn *conn)
4533{ 4545{
4534 struct iscsi_session *sess = conn->sess; 4546 struct iscsi_session *sess = conn->sess;
4535 int sleep = cmpxchg(&conn->tx_thread_active, true, false); 4547 int sleep = 1;
4548 /*
4549 * Traditional iscsi/tcp will invoke this logic from TX thread
4550 * context during session logout, so clear tx_thread_active and
4551 * sleep if iscsit_close_connection() has not already occured.
4552 *
4553 * Since iser-target invokes this logic from it's own workqueue,
4554 * always sleep waiting for RX/TX thread shutdown to complete
4555 * within iscsit_close_connection().
4556 */
4557 if (conn->conn_transport->transport_type == ISCSI_TCP)
4558 sleep = cmpxchg(&conn->tx_thread_active, true, false);
4536 4559
4537 atomic_set(&conn->conn_logout_remove, 0); 4560 atomic_set(&conn->conn_logout_remove, 0);
4538 complete(&conn->conn_logout_comp); 4561 complete(&conn->conn_logout_comp);
@@ -4546,7 +4569,10 @@ static void iscsit_logout_post_handler_closesession(
4546static void iscsit_logout_post_handler_samecid( 4569static void iscsit_logout_post_handler_samecid(
4547 struct iscsi_conn *conn) 4570 struct iscsi_conn *conn)
4548{ 4571{
4549 int sleep = cmpxchg(&conn->tx_thread_active, true, false); 4572 int sleep = 1;
4573
4574 if (conn->conn_transport->transport_type == ISCSI_TCP)
4575 sleep = cmpxchg(&conn->tx_thread_active, true, false);
4550 4576
4551 atomic_set(&conn->conn_logout_remove, 0); 4577 atomic_set(&conn->conn_logout_remove, 0);
4552 complete(&conn->conn_logout_comp); 4578 complete(&conn->conn_logout_comp);
@@ -4765,6 +4791,7 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4765 struct iscsi_session *sess; 4791 struct iscsi_session *sess;
4766 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 4792 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
4767 struct se_session *se_sess, *se_sess_tmp; 4793 struct se_session *se_sess, *se_sess_tmp;
4794 LIST_HEAD(free_list);
4768 int session_count = 0; 4795 int session_count = 0;
4769 4796
4770 spin_lock_bh(&se_tpg->session_lock); 4797 spin_lock_bh(&se_tpg->session_lock);
@@ -4786,14 +4813,17 @@ int iscsit_release_sessions_for_tpg(struct iscsi_portal_group *tpg, int force)
4786 } 4813 }
4787 atomic_set(&sess->session_reinstatement, 1); 4814 atomic_set(&sess->session_reinstatement, 1);
4788 spin_unlock(&sess->conn_lock); 4815 spin_unlock(&sess->conn_lock);
4789 spin_unlock_bh(&se_tpg->session_lock);
4790 4816
4791 iscsit_free_session(sess); 4817 list_move_tail(&se_sess->sess_list, &free_list);
4792 spin_lock_bh(&se_tpg->session_lock); 4818 }
4819 spin_unlock_bh(&se_tpg->session_lock);
4820
4821 list_for_each_entry_safe(se_sess, se_sess_tmp, &free_list, sess_list) {
4822 sess = (struct iscsi_session *)se_sess->fabric_sess_ptr;
4793 4823
4824 iscsit_free_session(sess);
4794 session_count++; 4825 session_count++;
4795 } 4826 }
4796 spin_unlock_bh(&se_tpg->session_lock);
4797 4827
4798 pr_debug("Released %d iSCSI Session(s) from Target Portal" 4828 pr_debug("Released %d iSCSI Session(s) from Target Portal"
4799 " Group: %hu\n", session_count, tpg->tpgt); 4829 " Group: %hu\n", session_count, tpg->tpgt);
diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
index 3d0fe4ff5590..7e8f65e5448f 100644
--- a/drivers/target/iscsi/iscsi_target_login.c
+++ b/drivers/target/iscsi/iscsi_target_login.c
@@ -82,6 +82,7 @@ static struct iscsi_login *iscsi_login_init_conn(struct iscsi_conn *conn)
82 init_completion(&conn->conn_logout_comp); 82 init_completion(&conn->conn_logout_comp);
83 init_completion(&conn->rx_half_close_comp); 83 init_completion(&conn->rx_half_close_comp);
84 init_completion(&conn->tx_half_close_comp); 84 init_completion(&conn->tx_half_close_comp);
85 init_completion(&conn->rx_login_comp);
85 spin_lock_init(&conn->cmd_lock); 86 spin_lock_init(&conn->cmd_lock);
86 spin_lock_init(&conn->conn_usage_lock); 87 spin_lock_init(&conn->conn_usage_lock);
87 spin_lock_init(&conn->immed_queue_lock); 88 spin_lock_init(&conn->immed_queue_lock);
@@ -644,7 +645,7 @@ static void iscsi_post_login_start_timers(struct iscsi_conn *conn)
644 iscsit_start_nopin_timer(conn); 645 iscsit_start_nopin_timer(conn);
645} 646}
646 647
647static int iscsit_start_kthreads(struct iscsi_conn *conn) 648int iscsit_start_kthreads(struct iscsi_conn *conn)
648{ 649{
649 int ret = 0; 650 int ret = 0;
650 651
@@ -679,6 +680,7 @@ static int iscsit_start_kthreads(struct iscsi_conn *conn)
679 680
680 return 0; 681 return 0;
681out_tx: 682out_tx:
683 send_sig(SIGINT, conn->tx_thread, 1);
682 kthread_stop(conn->tx_thread); 684 kthread_stop(conn->tx_thread);
683 conn->tx_thread_active = false; 685 conn->tx_thread_active = false;
684out_bitmap: 686out_bitmap:
@@ -689,7 +691,7 @@ out_bitmap:
689 return ret; 691 return ret;
690} 692}
691 693
692int iscsi_post_login_handler( 694void iscsi_post_login_handler(
693 struct iscsi_np *np, 695 struct iscsi_np *np,
694 struct iscsi_conn *conn, 696 struct iscsi_conn *conn,
695 u8 zero_tsih) 697 u8 zero_tsih)
@@ -699,7 +701,6 @@ int iscsi_post_login_handler(
699 struct se_session *se_sess = sess->se_sess; 701 struct se_session *se_sess = sess->se_sess;
700 struct iscsi_portal_group *tpg = sess->tpg; 702 struct iscsi_portal_group *tpg = sess->tpg;
701 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg; 703 struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
702 int rc;
703 704
704 iscsit_inc_conn_usage_count(conn); 705 iscsit_inc_conn_usage_count(conn);
705 706
@@ -739,10 +740,6 @@ int iscsi_post_login_handler(
739 sess->sess_ops->InitiatorName); 740 sess->sess_ops->InitiatorName);
740 spin_unlock_bh(&sess->conn_lock); 741 spin_unlock_bh(&sess->conn_lock);
741 742
742 rc = iscsit_start_kthreads(conn);
743 if (rc)
744 return rc;
745
746 iscsi_post_login_start_timers(conn); 743 iscsi_post_login_start_timers(conn);
747 /* 744 /*
748 * Determine CPU mask to ensure connection's RX and TX kthreads 745 * Determine CPU mask to ensure connection's RX and TX kthreads
@@ -751,15 +748,20 @@ int iscsi_post_login_handler(
751 iscsit_thread_get_cpumask(conn); 748 iscsit_thread_get_cpumask(conn);
752 conn->conn_rx_reset_cpumask = 1; 749 conn->conn_rx_reset_cpumask = 1;
753 conn->conn_tx_reset_cpumask = 1; 750 conn->conn_tx_reset_cpumask = 1;
754 751 /*
752 * Wakeup the sleeping iscsi_target_rx_thread() now that
753 * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
754 */
755 complete(&conn->rx_login_comp);
755 iscsit_dec_conn_usage_count(conn); 756 iscsit_dec_conn_usage_count(conn);
757
756 if (stop_timer) { 758 if (stop_timer) {
757 spin_lock_bh(&se_tpg->session_lock); 759 spin_lock_bh(&se_tpg->session_lock);
758 iscsit_stop_time2retain_timer(sess); 760 iscsit_stop_time2retain_timer(sess);
759 spin_unlock_bh(&se_tpg->session_lock); 761 spin_unlock_bh(&se_tpg->session_lock);
760 } 762 }
761 iscsit_dec_session_usage_count(sess); 763 iscsit_dec_session_usage_count(sess);
762 return 0; 764 return;
763 } 765 }
764 766
765 iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1); 767 iscsi_set_session_parameters(sess->sess_ops, conn->param_list, 1);
@@ -800,10 +802,6 @@ int iscsi_post_login_handler(
800 " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt); 802 " iSCSI Target Portal Group: %hu\n", tpg->nsessions, tpg->tpgt);
801 spin_unlock_bh(&se_tpg->session_lock); 803 spin_unlock_bh(&se_tpg->session_lock);
802 804
803 rc = iscsit_start_kthreads(conn);
804 if (rc)
805 return rc;
806
807 iscsi_post_login_start_timers(conn); 805 iscsi_post_login_start_timers(conn);
808 /* 806 /*
809 * Determine CPU mask to ensure connection's RX and TX kthreads 807 * Determine CPU mask to ensure connection's RX and TX kthreads
@@ -812,10 +810,12 @@ int iscsi_post_login_handler(
812 iscsit_thread_get_cpumask(conn); 810 iscsit_thread_get_cpumask(conn);
813 conn->conn_rx_reset_cpumask = 1; 811 conn->conn_rx_reset_cpumask = 1;
814 conn->conn_tx_reset_cpumask = 1; 812 conn->conn_tx_reset_cpumask = 1;
815 813 /*
814 * Wakeup the sleeping iscsi_target_rx_thread() now that
815 * iscsi_conn is in TARG_CONN_STATE_LOGGED_IN state.
816 */
817 complete(&conn->rx_login_comp);
816 iscsit_dec_conn_usage_count(conn); 818 iscsit_dec_conn_usage_count(conn);
817
818 return 0;
819} 819}
820 820
821static void iscsi_handle_login_thread_timeout(unsigned long data) 821static void iscsi_handle_login_thread_timeout(unsigned long data)
@@ -1380,23 +1380,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
1380 if (ret < 0) 1380 if (ret < 0)
1381 goto new_sess_out; 1381 goto new_sess_out;
1382 1382
1383 if (!conn->sess) {
1384 pr_err("struct iscsi_conn session pointer is NULL!\n");
1385 goto new_sess_out;
1386 }
1387
1388 iscsi_stop_login_thread_timer(np); 1383 iscsi_stop_login_thread_timer(np);
1389 1384
1390 if (signal_pending(current))
1391 goto new_sess_out;
1392
1393 if (ret == 1) { 1385 if (ret == 1) {
1394 tpg_np = conn->tpg_np; 1386 tpg_np = conn->tpg_np;
1395 1387
1396 ret = iscsi_post_login_handler(np, conn, zero_tsih); 1388 iscsi_post_login_handler(np, conn, zero_tsih);
1397 if (ret < 0)
1398 goto new_sess_out;
1399
1400 iscsit_deaccess_np(np, tpg, tpg_np); 1389 iscsit_deaccess_np(np, tpg, tpg_np);
1401 } 1390 }
1402 1391
diff --git a/drivers/target/iscsi/iscsi_target_login.h b/drivers/target/iscsi/iscsi_target_login.h
index 1c7358081533..57aa0d0fd820 100644
--- a/drivers/target/iscsi/iscsi_target_login.h
+++ b/drivers/target/iscsi/iscsi_target_login.h
@@ -12,7 +12,8 @@ extern int iscsit_accept_np(struct iscsi_np *, struct iscsi_conn *);
12extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *); 12extern int iscsit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
13extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32); 13extern int iscsit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
14extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *); 14extern void iscsit_free_conn(struct iscsi_np *, struct iscsi_conn *);
15extern int iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8); 15extern int iscsit_start_kthreads(struct iscsi_conn *);
16extern void iscsi_post_login_handler(struct iscsi_np *, struct iscsi_conn *, u8);
16extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *, 17extern void iscsi_target_login_sess_out(struct iscsi_conn *, struct iscsi_np *,
17 bool, bool); 18 bool, bool);
18extern int iscsi_target_login_thread(void *); 19extern int iscsi_target_login_thread(void *);
diff --git a/drivers/target/iscsi/iscsi_target_nego.c b/drivers/target/iscsi/iscsi_target_nego.c
index 8c02fa34716f..f9cde9141836 100644
--- a/drivers/target/iscsi/iscsi_target_nego.c
+++ b/drivers/target/iscsi/iscsi_target_nego.c
@@ -17,6 +17,7 @@
17 ******************************************************************************/ 17 ******************************************************************************/
18 18
19#include <linux/ctype.h> 19#include <linux/ctype.h>
20#include <linux/kthread.h>
20#include <scsi/iscsi_proto.h> 21#include <scsi/iscsi_proto.h>
21#include <target/target_core_base.h> 22#include <target/target_core_base.h>
22#include <target/target_core_fabric.h> 23#include <target/target_core_fabric.h>
@@ -361,10 +362,24 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
361 ntohl(login_rsp->statsn), login->rsp_length); 362 ntohl(login_rsp->statsn), login->rsp_length);
362 363
363 padding = ((-login->rsp_length) & 3); 364 padding = ((-login->rsp_length) & 3);
365 /*
366 * Before sending the last login response containing the transition
367 * bit for full-feature-phase, go ahead and start up TX/RX threads
368 * now to avoid potential resource allocation failures after the
369 * final login response has been sent.
370 */
371 if (login->login_complete) {
372 int rc = iscsit_start_kthreads(conn);
373 if (rc) {
374 iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
375 ISCSI_LOGIN_STATUS_NO_RESOURCES);
376 return -1;
377 }
378 }
364 379
365 if (conn->conn_transport->iscsit_put_login_tx(conn, login, 380 if (conn->conn_transport->iscsit_put_login_tx(conn, login,
366 login->rsp_length + padding) < 0) 381 login->rsp_length + padding) < 0)
367 return -1; 382 goto err;
368 383
369 login->rsp_length = 0; 384 login->rsp_length = 0;
370 mutex_lock(&sess->cmdsn_mutex); 385 mutex_lock(&sess->cmdsn_mutex);
@@ -373,6 +388,23 @@ static int iscsi_target_do_tx_login_io(struct iscsi_conn *conn, struct iscsi_log
373 mutex_unlock(&sess->cmdsn_mutex); 388 mutex_unlock(&sess->cmdsn_mutex);
374 389
375 return 0; 390 return 0;
391
392err:
393 if (login->login_complete) {
394 if (conn->rx_thread && conn->rx_thread_active) {
395 send_sig(SIGINT, conn->rx_thread, 1);
396 kthread_stop(conn->rx_thread);
397 }
398 if (conn->tx_thread && conn->tx_thread_active) {
399 send_sig(SIGINT, conn->tx_thread, 1);
400 kthread_stop(conn->tx_thread);
401 }
402 spin_lock(&iscsit_global->ts_bitmap_lock);
403 bitmap_release_region(iscsit_global->ts_bitmap, conn->bitmap_id,
404 get_order(1));
405 spin_unlock(&iscsit_global->ts_bitmap_lock);
406 }
407 return -1;
376} 408}
377 409
378static void iscsi_target_sk_data_ready(struct sock *sk) 410static void iscsi_target_sk_data_ready(struct sock *sk)
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 0b0de3647478..c2e9fea90b4a 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -747,7 +747,7 @@ static ssize_t store_pi_prot_type(struct se_dev_attrib *da,
747 if (!dev->transport->init_prot || !dev->transport->free_prot) { 747 if (!dev->transport->init_prot || !dev->transport->free_prot) {
748 /* 0 is only allowed value for non-supporting backends */ 748 /* 0 is only allowed value for non-supporting backends */
749 if (flag == 0) 749 if (flag == 0)
750 return 0; 750 return count;
751 751
752 pr_err("DIF protection not supported by backend: %s\n", 752 pr_err("DIF protection not supported by backend: %s\n",
753 dev->transport->name); 753 dev->transport->name);
@@ -1590,9 +1590,9 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1590 u8 type = 0; 1590 u8 type = 0;
1591 1591
1592 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) 1592 if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
1593 return 0; 1593 return count;
1594 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS) 1594 if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
1595 return 0; 1595 return count;
1596 1596
1597 if (dev->export_count) { 1597 if (dev->export_count) {
1598 pr_debug("Unable to process APTPL metadata while" 1598 pr_debug("Unable to process APTPL metadata while"
@@ -1658,22 +1658,32 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1658 * PR APTPL Metadata for Reservation 1658 * PR APTPL Metadata for Reservation
1659 */ 1659 */
1660 case Opt_res_holder: 1660 case Opt_res_holder:
1661 match_int(args, &arg); 1661 ret = match_int(args, &arg);
1662 if (ret)
1663 goto out;
1662 res_holder = arg; 1664 res_holder = arg;
1663 break; 1665 break;
1664 case Opt_res_type: 1666 case Opt_res_type:
1665 match_int(args, &arg); 1667 ret = match_int(args, &arg);
1668 if (ret)
1669 goto out;
1666 type = (u8)arg; 1670 type = (u8)arg;
1667 break; 1671 break;
1668 case Opt_res_scope: 1672 case Opt_res_scope:
1669 match_int(args, &arg); 1673 ret = match_int(args, &arg);
1674 if (ret)
1675 goto out;
1670 break; 1676 break;
1671 case Opt_res_all_tg_pt: 1677 case Opt_res_all_tg_pt:
1672 match_int(args, &arg); 1678 ret = match_int(args, &arg);
1679 if (ret)
1680 goto out;
1673 all_tg_pt = (int)arg; 1681 all_tg_pt = (int)arg;
1674 break; 1682 break;
1675 case Opt_mapped_lun: 1683 case Opt_mapped_lun:
1676 match_int(args, &arg); 1684 ret = match_int(args, &arg);
1685 if (ret)
1686 goto out;
1677 mapped_lun = (u64)arg; 1687 mapped_lun = (u64)arg;
1678 break; 1688 break;
1679 /* 1689 /*
@@ -1701,14 +1711,20 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
1701 } 1711 }
1702 break; 1712 break;
1703 case Opt_tpgt: 1713 case Opt_tpgt:
1704 match_int(args, &arg); 1714 ret = match_int(args, &arg);
1715 if (ret)
1716 goto out;
1705 tpgt = (u16)arg; 1717 tpgt = (u16)arg;
1706 break; 1718 break;
1707 case Opt_port_rtpi: 1719 case Opt_port_rtpi:
1708 match_int(args, &arg); 1720 ret = match_int(args, &arg);
1721 if (ret)
1722 goto out;
1709 break; 1723 break;
1710 case Opt_target_lun: 1724 case Opt_target_lun:
1711 match_int(args, &arg); 1725 ret = match_int(args, &arg);
1726 if (ret)
1727 goto out;
1712 target_lun = (u64)arg; 1728 target_lun = (u64)arg;
1713 break; 1729 break;
1714 default: 1730 default:
@@ -1985,7 +2001,7 @@ static ssize_t target_core_store_alua_lu_gp(
1985 2001
1986 lu_gp_mem = dev->dev_alua_lu_gp_mem; 2002 lu_gp_mem = dev->dev_alua_lu_gp_mem;
1987 if (!lu_gp_mem) 2003 if (!lu_gp_mem)
1988 return 0; 2004 return count;
1989 2005
1990 if (count > LU_GROUP_NAME_BUF) { 2006 if (count > LU_GROUP_NAME_BUF) {
1991 pr_err("ALUA LU Group Alias too large!\n"); 2007 pr_err("ALUA LU Group Alias too large!\n");
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 0fdbe43b7dad..5ab7100de17e 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -1474,7 +1474,7 @@ core_scsi3_decode_spec_i_port(
1474 LIST_HEAD(tid_dest_list); 1474 LIST_HEAD(tid_dest_list);
1475 struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp; 1475 struct pr_transport_id_holder *tidh_new, *tidh, *tidh_tmp;
1476 unsigned char *buf, *ptr, proto_ident; 1476 unsigned char *buf, *ptr, proto_ident;
1477 const unsigned char *i_str; 1477 const unsigned char *i_str = NULL;
1478 char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN]; 1478 char *iport_ptr = NULL, i_buf[PR_REG_ISID_ID_LEN];
1479 sense_reason_t ret; 1479 sense_reason_t ret;
1480 u32 tpdl, tid_len = 0; 1480 u32 tpdl, tid_len = 0;
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 4703f403f31c..384cf8894411 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -333,6 +333,7 @@ static int rd_configure_device(struct se_device *dev)
333 dev->dev_attrib.hw_block_size = RD_BLOCKSIZE; 333 dev->dev_attrib.hw_block_size = RD_BLOCKSIZE;
334 dev->dev_attrib.hw_max_sectors = UINT_MAX; 334 dev->dev_attrib.hw_max_sectors = UINT_MAX;
335 dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH; 335 dev->dev_attrib.hw_queue_depth = RD_MAX_DEVICE_QUEUE_DEPTH;
336 dev->dev_attrib.is_nonrot = 1;
336 337
337 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++; 338 rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
338 339
diff --git a/drivers/target/target_core_spc.c b/drivers/target/target_core_spc.c
index b0744433315a..b5ba1ec3c354 100644
--- a/drivers/target/target_core_spc.c
+++ b/drivers/target/target_core_spc.c
@@ -454,10 +454,17 @@ spc_emulate_evpd_86(struct se_cmd *cmd, unsigned char *buf)
454 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT) 454 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE1_PROT)
455 buf[4] = 0x5; 455 buf[4] = 0x5;
456 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT || 456 else if (dev->dev_attrib.pi_prot_type == TARGET_DIF_TYPE3_PROT ||
457 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT) 457 cmd->se_sess->sess_prot_type == TARGET_DIF_TYPE3_PROT)
458 buf[4] = 0x4; 458 buf[4] = 0x4;
459 } 459 }
460 460
461 /* logical unit supports type 1 and type 3 protection */
462 if ((dev->transport->get_device_type(dev) == TYPE_DISK) &&
463 (sess->sup_prot_ops & (TARGET_PROT_DIN_PASS | TARGET_PROT_DOUT_PASS)) &&
464 (dev->dev_attrib.pi_prot_type || cmd->se_sess->sess_prot_type)) {
465 buf[4] |= (0x3 << 3);
466 }
467
461 /* Set HEADSUP, ORDSUP, SIMPSUP */ 468 /* Set HEADSUP, ORDSUP, SIMPSUP */
462 buf[5] = 0x07; 469 buf[5] = 0x07;
463 470
diff --git a/drivers/thermal/hisi_thermal.c b/drivers/thermal/hisi_thermal.c
index d5dd357ba57c..b49f97c734d0 100644
--- a/drivers/thermal/hisi_thermal.c
+++ b/drivers/thermal/hisi_thermal.c
@@ -405,7 +405,6 @@ static SIMPLE_DEV_PM_OPS(hisi_thermal_pm_ops,
405static struct platform_driver hisi_thermal_driver = { 405static struct platform_driver hisi_thermal_driver = {
406 .driver = { 406 .driver = {
407 .name = "hisi_thermal", 407 .name = "hisi_thermal",
408 .owner = THIS_MODULE,
409 .pm = &hisi_thermal_pm_ops, 408 .pm = &hisi_thermal_pm_ops,
410 .of_match_table = of_hisi_thermal_match, 409 .of_match_table = of_hisi_thermal_match,
411 }, 410 },
diff --git a/drivers/thermal/power_allocator.c b/drivers/thermal/power_allocator.c
index 4672250b329f..63a448f9d93b 100644
--- a/drivers/thermal/power_allocator.c
+++ b/drivers/thermal/power_allocator.c
@@ -229,7 +229,8 @@ static int allocate_power(struct thermal_zone_device *tz,
229 struct thermal_instance *instance; 229 struct thermal_instance *instance;
230 struct power_allocator_params *params = tz->governor_data; 230 struct power_allocator_params *params = tz->governor_data;
231 u32 *req_power, *max_power, *granted_power, *extra_actor_power; 231 u32 *req_power, *max_power, *granted_power, *extra_actor_power;
232 u32 total_req_power, max_allocatable_power; 232 u32 *weighted_req_power;
233 u32 total_req_power, max_allocatable_power, total_weighted_req_power;
233 u32 total_granted_power, power_range; 234 u32 total_granted_power, power_range;
234 int i, num_actors, total_weight, ret = 0; 235 int i, num_actors, total_weight, ret = 0;
235 int trip_max_desired_temperature = params->trip_max_desired_temperature; 236 int trip_max_desired_temperature = params->trip_max_desired_temperature;
@@ -247,16 +248,17 @@ static int allocate_power(struct thermal_zone_device *tz,
247 } 248 }
248 249
249 /* 250 /*
250 * We need to allocate three arrays of the same size: 251 * We need to allocate five arrays of the same size:
251 * req_power, max_power and granted_power. They are going to 252 * req_power, max_power, granted_power, extra_actor_power and
252 * be needed until this function returns. Allocate them all 253 * weighted_req_power. They are going to be needed until this
253 * in one go to simplify the allocation and deallocation 254 * function returns. Allocate them all in one go to simplify
254 * logic. 255 * the allocation and deallocation logic.
255 */ 256 */
256 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power)); 257 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*max_power));
257 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power)); 258 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*granted_power));
258 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power)); 259 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*extra_actor_power));
259 req_power = devm_kcalloc(&tz->device, num_actors * 4, 260 BUILD_BUG_ON(sizeof(*req_power) != sizeof(*weighted_req_power));
261 req_power = devm_kcalloc(&tz->device, num_actors * 5,
260 sizeof(*req_power), GFP_KERNEL); 262 sizeof(*req_power), GFP_KERNEL);
261 if (!req_power) { 263 if (!req_power) {
262 ret = -ENOMEM; 264 ret = -ENOMEM;
@@ -266,8 +268,10 @@ static int allocate_power(struct thermal_zone_device *tz,
266 max_power = &req_power[num_actors]; 268 max_power = &req_power[num_actors];
267 granted_power = &req_power[2 * num_actors]; 269 granted_power = &req_power[2 * num_actors];
268 extra_actor_power = &req_power[3 * num_actors]; 270 extra_actor_power = &req_power[3 * num_actors];
271 weighted_req_power = &req_power[4 * num_actors];
269 272
270 i = 0; 273 i = 0;
274 total_weighted_req_power = 0;
271 total_req_power = 0; 275 total_req_power = 0;
272 max_allocatable_power = 0; 276 max_allocatable_power = 0;
273 277
@@ -289,13 +293,14 @@ static int allocate_power(struct thermal_zone_device *tz,
289 else 293 else
290 weight = instance->weight; 294 weight = instance->weight;
291 295
292 req_power[i] = frac_to_int(weight * req_power[i]); 296 weighted_req_power[i] = frac_to_int(weight * req_power[i]);
293 297
294 if (power_actor_get_max_power(cdev, tz, &max_power[i])) 298 if (power_actor_get_max_power(cdev, tz, &max_power[i]))
295 continue; 299 continue;
296 300
297 total_req_power += req_power[i]; 301 total_req_power += req_power[i];
298 max_allocatable_power += max_power[i]; 302 max_allocatable_power += max_power[i];
303 total_weighted_req_power += weighted_req_power[i];
299 304
300 i++; 305 i++;
301 } 306 }
@@ -303,8 +308,9 @@ static int allocate_power(struct thermal_zone_device *tz,
303 power_range = pid_controller(tz, current_temp, control_temp, 308 power_range = pid_controller(tz, current_temp, control_temp,
304 max_allocatable_power); 309 max_allocatable_power);
305 310
306 divvy_up_power(req_power, max_power, num_actors, total_req_power, 311 divvy_up_power(weighted_req_power, max_power, num_actors,
307 power_range, granted_power, extra_actor_power); 312 total_weighted_req_power, power_range, granted_power,
313 extra_actor_power);
308 314
309 total_granted_power = 0; 315 total_granted_power = 0;
310 i = 0; 316 i = 0;
diff --git a/drivers/thermal/samsung/Kconfig b/drivers/thermal/samsung/Kconfig
index c8e35c1a43dc..e0da3865e060 100644
--- a/drivers/thermal/samsung/Kconfig
+++ b/drivers/thermal/samsung/Kconfig
@@ -1,6 +1,6 @@
1config EXYNOS_THERMAL 1config EXYNOS_THERMAL
2 tristate "Exynos thermal management unit driver" 2 tristate "Exynos thermal management unit driver"
3 depends on OF 3 depends on THERMAL_OF
4 help 4 help
5 If you say yes here you get support for the TMU (Thermal Management 5 If you say yes here you get support for the TMU (Thermal Management
6 Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises 6 Unit) driver for SAMSUNG EXYNOS series of SoCs. This driver initialises
diff --git a/drivers/thermal/samsung/exynos_tmu.c b/drivers/thermal/samsung/exynos_tmu.c
index 531f4b179871..c96ff10b869e 100644
--- a/drivers/thermal/samsung/exynos_tmu.c
+++ b/drivers/thermal/samsung/exynos_tmu.c
@@ -1296,7 +1296,6 @@ static struct thermal_zone_of_device_ops exynos_sensor_ops = {
1296 1296
1297static int exynos_tmu_probe(struct platform_device *pdev) 1297static int exynos_tmu_probe(struct platform_device *pdev)
1298{ 1298{
1299 struct exynos_tmu_platform_data *pdata;
1300 struct exynos_tmu_data *data; 1299 struct exynos_tmu_data *data;
1301 int ret; 1300 int ret;
1302 1301
@@ -1318,8 +1317,6 @@ static int exynos_tmu_probe(struct platform_device *pdev)
1318 if (ret) 1317 if (ret)
1319 goto err_sensor; 1318 goto err_sensor;
1320 1319
1321 pdata = data->pdata;
1322
1323 INIT_WORK(&data->irq_work, exynos_tmu_work); 1320 INIT_WORK(&data->irq_work, exynos_tmu_work);
1324 1321
1325 data->clk = devm_clk_get(&pdev->dev, "tmu_apbif"); 1322 data->clk = devm_clk_get(&pdev->dev, "tmu_apbif");
@@ -1392,6 +1389,8 @@ err_clk_sec:
1392 if (!IS_ERR(data->clk_sec)) 1389 if (!IS_ERR(data->clk_sec))
1393 clk_unprepare(data->clk_sec); 1390 clk_unprepare(data->clk_sec);
1394err_sensor: 1391err_sensor:
1392 if (!IS_ERR_OR_NULL(data->regulator))
1393 regulator_disable(data->regulator);
1395 thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd); 1394 thermal_zone_of_sensor_unregister(&pdev->dev, data->tzd);
1396 1395
1397 return ret; 1396 return ret;
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 04659bfb888b..4ca211be4c0f 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -1333,6 +1333,7 @@ int thermal_zone_unbind_cooling_device(struct thermal_zone_device *tz,
1333 return -ENODEV; 1333 return -ENODEV;
1334 1334
1335unbind: 1335unbind:
1336 device_remove_file(&tz->device, &pos->weight_attr);
1336 device_remove_file(&tz->device, &pos->attr); 1337 device_remove_file(&tz->device, &pos->attr);
1337 sysfs_remove_link(&tz->device.kobj, pos->name); 1338 sysfs_remove_link(&tz->device.kobj, pos->name);
1338 release_idr(&tz->idr, &tz->lock, pos->id); 1339 release_idr(&tz->idr, &tz->lock, pos->id);
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index c9c27f69e101..ee8bfacf2071 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -1108,19 +1108,29 @@ static void eraser(unsigned char c, struct tty_struct *tty)
1108 * Locking: ctrl_lock 1108 * Locking: ctrl_lock
1109 */ 1109 */
1110 1110
1111static void isig(int sig, struct tty_struct *tty) 1111static void __isig(int sig, struct tty_struct *tty)
1112{ 1112{
1113 struct n_tty_data *ldata = tty->disc_data;
1114 struct pid *tty_pgrp = tty_get_pgrp(tty); 1113 struct pid *tty_pgrp = tty_get_pgrp(tty);
1115 if (tty_pgrp) { 1114 if (tty_pgrp) {
1116 kill_pgrp(tty_pgrp, sig, 1); 1115 kill_pgrp(tty_pgrp, sig, 1);
1117 put_pid(tty_pgrp); 1116 put_pid(tty_pgrp);
1118 } 1117 }
1118}
1119 1119
1120 if (!L_NOFLSH(tty)) { 1120static void isig(int sig, struct tty_struct *tty)
1121{
1122 struct n_tty_data *ldata = tty->disc_data;
1123
1124 if (L_NOFLSH(tty)) {
1125 /* signal only */
1126 __isig(sig, tty);
1127
1128 } else { /* signal and flush */
1121 up_read(&tty->termios_rwsem); 1129 up_read(&tty->termios_rwsem);
1122 down_write(&tty->termios_rwsem); 1130 down_write(&tty->termios_rwsem);
1123 1131
1132 __isig(sig, tty);
1133
1124 /* clear echo buffer */ 1134 /* clear echo buffer */
1125 mutex_lock(&ldata->output_lock); 1135 mutex_lock(&ldata->output_lock);
1126 ldata->echo_head = ldata->echo_tail = 0; 1136 ldata->echo_head = ldata->echo_tail = 0;
diff --git a/drivers/tty/serial/Kconfig b/drivers/tty/serial/Kconfig
index 76e65b714471..15b4079a335e 100644
--- a/drivers/tty/serial/Kconfig
+++ b/drivers/tty/serial/Kconfig
@@ -1185,7 +1185,7 @@ config SERIAL_SC16IS7XX_CORE
1185config SERIAL_SC16IS7XX 1185config SERIAL_SC16IS7XX
1186 tristate "SC16IS7xx serial support" 1186 tristate "SC16IS7xx serial support"
1187 select SERIAL_CORE 1187 select SERIAL_CORE
1188 depends on I2C || SPI_MASTER 1188 depends on (SPI_MASTER && !I2C) || I2C
1189 help 1189 help
1190 This selects support for SC16IS7xx serial ports. 1190 This selects support for SC16IS7xx serial ports.
1191 Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752, 1191 Supported ICs are SC16IS740, SC16IS741, SC16IS750, SC16IS752,
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 50cf5b10ceed..fd27e986b1dd 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -2310,8 +2310,8 @@ static int pl011_setup_port(struct device *dev, struct uart_amba_port *uap,
2310 void __iomem *base; 2310 void __iomem *base;
2311 2311
2312 base = devm_ioremap_resource(dev, mmiobase); 2312 base = devm_ioremap_resource(dev, mmiobase);
2313 if (!base) 2313 if (IS_ERR(base))
2314 return -ENOMEM; 2314 return PTR_ERR(base);
2315 2315
2316 index = pl011_probe_dt_alias(index, dev); 2316 index = pl011_probe_dt_alias(index, dev);
2317 2317
diff --git a/drivers/tty/serial/etraxfs-uart.c b/drivers/tty/serial/etraxfs-uart.c
index a57301a6fe42..679709f51fd4 100644
--- a/drivers/tty/serial/etraxfs-uart.c
+++ b/drivers/tty/serial/etraxfs-uart.c
@@ -950,7 +950,7 @@ static int etraxfs_uart_remove(struct platform_device *pdev)
950 950
951 port = platform_get_drvdata(pdev); 951 port = platform_get_drvdata(pdev);
952 uart_remove_one_port(&etraxfs_uart_driver, port); 952 uart_remove_one_port(&etraxfs_uart_driver, port);
953 etraxfs_uart_ports[pdev->id] = NULL; 953 etraxfs_uart_ports[port->line] = NULL;
954 954
955 return 0; 955 return 0;
956} 956}
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index 2c90dc31bfaa..54fdc7866ea1 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -1121,11 +1121,6 @@ static int imx_startup(struct uart_port *port)
1121 1121
1122 writel(temp & ~UCR4_DREN, sport->port.membase + UCR4); 1122 writel(temp & ~UCR4_DREN, sport->port.membase + UCR4);
1123 1123
1124 /* Can we enable the DMA support? */
1125 if (is_imx6q_uart(sport) && !uart_console(port) &&
1126 !sport->dma_is_inited)
1127 imx_uart_dma_init(sport);
1128
1129 spin_lock_irqsave(&sport->port.lock, flags); 1124 spin_lock_irqsave(&sport->port.lock, flags);
1130 /* Reset fifo's and state machines */ 1125 /* Reset fifo's and state machines */
1131 i = 100; 1126 i = 100;
@@ -1143,9 +1138,6 @@ static int imx_startup(struct uart_port *port)
1143 writel(USR1_RTSD, sport->port.membase + USR1); 1138 writel(USR1_RTSD, sport->port.membase + USR1);
1144 writel(USR2_ORE, sport->port.membase + USR2); 1139 writel(USR2_ORE, sport->port.membase + USR2);
1145 1140
1146 if (sport->dma_is_inited && !sport->dma_is_enabled)
1147 imx_enable_dma(sport);
1148
1149 temp = readl(sport->port.membase + UCR1); 1141 temp = readl(sport->port.membase + UCR1);
1150 temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN; 1142 temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN;
1151 1143
@@ -1316,6 +1308,11 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
1316 } else { 1308 } else {
1317 ucr2 |= UCR2_CTSC; 1309 ucr2 |= UCR2_CTSC;
1318 } 1310 }
1311
1312 /* Can we enable the DMA support? */
1313 if (is_imx6q_uart(sport) && !uart_console(port)
1314 && !sport->dma_is_inited)
1315 imx_uart_dma_init(sport);
1319 } else { 1316 } else {
1320 termios->c_cflag &= ~CRTSCTS; 1317 termios->c_cflag &= ~CRTSCTS;
1321 } 1318 }
@@ -1432,6 +1429,8 @@ imx_set_termios(struct uart_port *port, struct ktermios *termios,
1432 if (UART_ENABLE_MS(&sport->port, termios->c_cflag)) 1429 if (UART_ENABLE_MS(&sport->port, termios->c_cflag))
1433 imx_enable_ms(&sport->port); 1430 imx_enable_ms(&sport->port);
1434 1431
1432 if (sport->dma_is_inited && !sport->dma_is_enabled)
1433 imx_enable_dma(sport);
1435 spin_unlock_irqrestore(&sport->port.lock, flags); 1434 spin_unlock_irqrestore(&sport->port.lock, flags);
1436} 1435}
1437 1436
diff --git a/drivers/tty/serial/sc16is7xx.c b/drivers/tty/serial/sc16is7xx.c
index 9e6576004a42..5ccc698cbbfa 100644
--- a/drivers/tty/serial/sc16is7xx.c
+++ b/drivers/tty/serial/sc16is7xx.c
@@ -354,6 +354,26 @@ static void sc16is7xx_port_write(struct uart_port *port, u8 reg, u8 val)
354 (reg << SC16IS7XX_REG_SHIFT) | port->line, val); 354 (reg << SC16IS7XX_REG_SHIFT) | port->line, val);
355} 355}
356 356
357static void sc16is7xx_fifo_read(struct uart_port *port, unsigned int rxlen)
358{
359 struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
360 u8 addr = (SC16IS7XX_RHR_REG << SC16IS7XX_REG_SHIFT) | port->line;
361
362 regcache_cache_bypass(s->regmap, true);
363 regmap_raw_read(s->regmap, addr, s->buf, rxlen);
364 regcache_cache_bypass(s->regmap, false);
365}
366
367static void sc16is7xx_fifo_write(struct uart_port *port, u8 to_send)
368{
369 struct sc16is7xx_port *s = dev_get_drvdata(port->dev);
370 u8 addr = (SC16IS7XX_THR_REG << SC16IS7XX_REG_SHIFT) | port->line;
371
372 regcache_cache_bypass(s->regmap, true);
373 regmap_raw_write(s->regmap, addr, s->buf, to_send);
374 regcache_cache_bypass(s->regmap, false);
375}
376
357static void sc16is7xx_port_update(struct uart_port *port, u8 reg, 377static void sc16is7xx_port_update(struct uart_port *port, u8 reg,
358 u8 mask, u8 val) 378 u8 mask, u8 val)
359{ 379{
@@ -508,10 +528,7 @@ static void sc16is7xx_handle_rx(struct uart_port *port, unsigned int rxlen,
508 s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG); 528 s->buf[0] = sc16is7xx_port_read(port, SC16IS7XX_RHR_REG);
509 bytes_read = 1; 529 bytes_read = 1;
510 } else { 530 } else {
511 regcache_cache_bypass(s->regmap, true); 531 sc16is7xx_fifo_read(port, rxlen);
512 regmap_raw_read(s->regmap, SC16IS7XX_RHR_REG,
513 s->buf, rxlen);
514 regcache_cache_bypass(s->regmap, false);
515 bytes_read = rxlen; 532 bytes_read = rxlen;
516 } 533 }
517 534
@@ -591,9 +608,8 @@ static void sc16is7xx_handle_tx(struct uart_port *port)
591 s->buf[i] = xmit->buf[xmit->tail]; 608 s->buf[i] = xmit->buf[xmit->tail];
592 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); 609 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
593 } 610 }
594 regcache_cache_bypass(s->regmap, true); 611
595 regmap_raw_write(s->regmap, SC16IS7XX_THR_REG, s->buf, to_send); 612 sc16is7xx_fifo_write(port, to_send);
596 regcache_cache_bypass(s->regmap, false);
597 } 613 }
598 614
599 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) 615 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
index 7ae1592f7ec9..f36852067f20 100644
--- a/drivers/tty/serial/serial_core.c
+++ b/drivers/tty/serial/serial_core.c
@@ -1418,7 +1418,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
1418 mutex_lock(&port->mutex); 1418 mutex_lock(&port->mutex);
1419 uart_shutdown(tty, state); 1419 uart_shutdown(tty, state);
1420 tty_port_tty_set(port, NULL); 1420 tty_port_tty_set(port, NULL);
1421 tty->closing = 0; 1421
1422 spin_lock_irqsave(&port->lock, flags); 1422 spin_lock_irqsave(&port->lock, flags);
1423 1423
1424 if (port->blocked_open) { 1424 if (port->blocked_open) {
@@ -1444,6 +1444,7 @@ static void uart_close(struct tty_struct *tty, struct file *filp)
1444 mutex_unlock(&port->mutex); 1444 mutex_unlock(&port->mutex);
1445 1445
1446 tty_ldisc_flush(tty); 1446 tty_ldisc_flush(tty);
1447 tty->closing = 0;
1447} 1448}
1448 1449
1449static void uart_wait_until_sent(struct tty_struct *tty, int timeout) 1450static void uart_wait_until_sent(struct tty_struct *tty, int timeout)
diff --git a/drivers/tty/vt/selection.c b/drivers/tty/vt/selection.c
index ea27804d87af..381a2b13682c 100644
--- a/drivers/tty/vt/selection.c
+++ b/drivers/tty/vt/selection.c
@@ -356,6 +356,7 @@ int paste_selection(struct tty_struct *tty)
356 schedule(); 356 schedule();
357 continue; 357 continue;
358 } 358 }
359 __set_current_state(TASK_RUNNING);
359 count = sel_buffer_lth - pasted; 360 count = sel_buffer_lth - pasted;
360 count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL, 361 count = tty_ldisc_receive_buf(ld, sel_buffer + pasted, NULL,
361 count); 362 count);
diff --git a/drivers/tty/vt/vt.c b/drivers/tty/vt/vt.c
index 8fe52989b380..4462d167900c 100644
--- a/drivers/tty/vt/vt.c
+++ b/drivers/tty/vt/vt.c
@@ -742,6 +742,8 @@ static void visual_init(struct vc_data *vc, int num, int init)
742 __module_get(vc->vc_sw->owner); 742 __module_get(vc->vc_sw->owner);
743 vc->vc_num = num; 743 vc->vc_num = num;
744 vc->vc_display_fg = &master_display_fg; 744 vc->vc_display_fg = &master_display_fg;
745 if (vc->vc_uni_pagedir_loc)
746 con_free_unimap(vc);
745 vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir; 747 vc->vc_uni_pagedir_loc = &vc->vc_uni_pagedir;
746 vc->vc_uni_pagedir = NULL; 748 vc->vc_uni_pagedir = NULL;
747 vc->vc_hi_font_mask = 0; 749 vc->vc_hi_font_mask = 0;
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index 74fea4fa41b1..3ad48e1c0c57 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -1024,7 +1024,18 @@ static struct platform_driver ci_hdrc_driver = {
1024 }, 1024 },
1025}; 1025};
1026 1026
1027module_platform_driver(ci_hdrc_driver); 1027static int __init ci_hdrc_platform_register(void)
1028{
1029 ci_hdrc_host_driver_init();
1030 return platform_driver_register(&ci_hdrc_driver);
1031}
1032module_init(ci_hdrc_platform_register);
1033
1034static void __exit ci_hdrc_platform_unregister(void)
1035{
1036 platform_driver_unregister(&ci_hdrc_driver);
1037}
1038module_exit(ci_hdrc_platform_unregister);
1028 1039
1029MODULE_ALIAS("platform:ci_hdrc"); 1040MODULE_ALIAS("platform:ci_hdrc");
1030MODULE_LICENSE("GPL v2"); 1041MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 6cf87b8b13a8..7161439def19 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -249,9 +249,12 @@ int ci_hdrc_host_init(struct ci_hdrc *ci)
249 rdrv->name = "host"; 249 rdrv->name = "host";
250 ci->roles[CI_ROLE_HOST] = rdrv; 250 ci->roles[CI_ROLE_HOST] = rdrv;
251 251
252 return 0;
253}
254
255void ci_hdrc_host_driver_init(void)
256{
252 ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides); 257 ehci_init_driver(&ci_ehci_hc_driver, &ehci_ci_overrides);
253 orig_bus_suspend = ci_ehci_hc_driver.bus_suspend; 258 orig_bus_suspend = ci_ehci_hc_driver.bus_suspend;
254 ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend; 259 ci_ehci_hc_driver.bus_suspend = ci_ehci_bus_suspend;
255
256 return 0;
257} 260}
diff --git a/drivers/usb/chipidea/host.h b/drivers/usb/chipidea/host.h
index 5707bf379bfb..0f12f131bdd3 100644
--- a/drivers/usb/chipidea/host.h
+++ b/drivers/usb/chipidea/host.h
@@ -5,6 +5,7 @@
5 5
6int ci_hdrc_host_init(struct ci_hdrc *ci); 6int ci_hdrc_host_init(struct ci_hdrc *ci);
7void ci_hdrc_host_destroy(struct ci_hdrc *ci); 7void ci_hdrc_host_destroy(struct ci_hdrc *ci);
8void ci_hdrc_host_driver_init(void);
8 9
9#else 10#else
10 11
@@ -18,6 +19,11 @@ static inline void ci_hdrc_host_destroy(struct ci_hdrc *ci)
18 19
19} 20}
20 21
22static void ci_hdrc_host_driver_init(void)
23{
24
25}
26
21#endif 27#endif
22 28
23#endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */ 29#endif /* __DRIVERS_USB_CHIPIDEA_HOST_H */
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index 519a77ba214c..b30e7423549b 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1944,6 +1944,7 @@ static void __exit acm_exit(void)
1944 usb_deregister(&acm_driver); 1944 usb_deregister(&acm_driver);
1945 tty_unregister_driver(acm_tty_driver); 1945 tty_unregister_driver(acm_tty_driver);
1946 put_tty_driver(acm_tty_driver); 1946 put_tty_driver(acm_tty_driver);
1947 idr_destroy(&acm_minors);
1947} 1948}
1948 1949
1949module_init(acm_init); 1950module_init(acm_init);
diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c
index 0e6f968e93fe..01c0c0477a9e 100644
--- a/drivers/usb/common/ulpi.c
+++ b/drivers/usb/common/ulpi.c
@@ -242,7 +242,7 @@ static int __init ulpi_init(void)
242{ 242{
243 return bus_register(&ulpi_bus); 243 return bus_register(&ulpi_bus);
244} 244}
245module_init(ulpi_init); 245subsys_initcall(ulpi_init);
246 246
247static void __exit ulpi_exit(void) 247static void __exit ulpi_exit(void)
248{ 248{
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index be5b2074f906..cbcd0920fb51 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1022,9 +1022,12 @@ static int register_root_hub(struct usb_hcd *hcd)
1022 dev_name(&usb_dev->dev), retval); 1022 dev_name(&usb_dev->dev), retval);
1023 return (retval < 0) ? retval : -EMSGSIZE; 1023 return (retval < 0) ? retval : -EMSGSIZE;
1024 } 1024 }
1025 if (usb_dev->speed == USB_SPEED_SUPER) { 1025
1026 if (le16_to_cpu(usb_dev->descriptor.bcdUSB) >= 0x0201) {
1026 retval = usb_get_bos_descriptor(usb_dev); 1027 retval = usb_get_bos_descriptor(usb_dev);
1027 if (retval < 0) { 1028 if (!retval) {
1029 usb_dev->lpm_capable = usb_device_supports_lpm(usb_dev);
1030 } else if (usb_dev->speed == USB_SPEED_SUPER) {
1028 mutex_unlock(&usb_bus_list_lock); 1031 mutex_unlock(&usb_bus_list_lock);
1029 dev_dbg(parent_dev, "can't read %s bos descriptor %d\n", 1032 dev_dbg(parent_dev, "can't read %s bos descriptor %d\n",
1030 dev_name(&usb_dev->dev), retval); 1033 dev_name(&usb_dev->dev), retval);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 43cb2f2e3b43..73dfa194160b 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -122,7 +122,7 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev)
122 return usb_get_intfdata(hdev->actconfig->interface[0]); 122 return usb_get_intfdata(hdev->actconfig->interface[0]);
123} 123}
124 124
125static int usb_device_supports_lpm(struct usb_device *udev) 125int usb_device_supports_lpm(struct usb_device *udev)
126{ 126{
127 /* USB 2.1 (and greater) devices indicate LPM support through 127 /* USB 2.1 (and greater) devices indicate LPM support through
128 * their USB 2.0 Extended Capabilities BOS descriptor. 128 * their USB 2.0 Extended Capabilities BOS descriptor.
diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h
index 7eb1e26798e5..457255a3306a 100644
--- a/drivers/usb/core/usb.h
+++ b/drivers/usb/core/usb.h
@@ -65,6 +65,7 @@ extern int usb_hub_init(void);
65extern void usb_hub_cleanup(void); 65extern void usb_hub_cleanup(void);
66extern int usb_major_init(void); 66extern int usb_major_init(void);
67extern void usb_major_cleanup(void); 67extern void usb_major_cleanup(void);
68extern int usb_device_supports_lpm(struct usb_device *udev);
68 69
69#ifdef CONFIG_PM 70#ifdef CONFIG_PM
70 71
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 2ef3c8d6a9db..69e769c35cf5 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -727,6 +727,10 @@ static int dwc3_ep0_std_request(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl)
727 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY"); 727 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
728 ret = dwc3_ep0_set_isoch_delay(dwc, ctrl); 728 ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
729 break; 729 break;
730 case USB_REQ_SET_INTERFACE:
731 dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
732 dwc->start_config_issued = false;
733 /* Fall through */
730 default: 734 default:
731 dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver"); 735 dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
732 ret = dwc3_ep0_delegate_req(dwc, ctrl); 736 ret = dwc3_ep0_delegate_req(dwc, ctrl);
diff --git a/drivers/usb/gadget/function/f_hid.c b/drivers/usb/gadget/function/f_hid.c
index f7f35a36c09a..6df9715a4bcd 100644
--- a/drivers/usb/gadget/function/f_hid.c
+++ b/drivers/usb/gadget/function/f_hid.c
@@ -699,6 +699,10 @@ static inline int hidg_get_minor(void)
699 int ret; 699 int ret;
700 700
701 ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL); 701 ret = ida_simple_get(&hidg_ida, 0, 0, GFP_KERNEL);
702 if (ret >= HIDG_MINORS) {
703 ida_simple_remove(&hidg_ida, ret);
704 ret = -ENODEV;
705 }
702 706
703 return ret; 707 return ret;
704} 708}
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 44173df27273..357f63f47b42 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -1248,7 +1248,15 @@ static struct config_item_type printer_func_type = {
1248 1248
1249static inline int gprinter_get_minor(void) 1249static inline int gprinter_get_minor(void)
1250{ 1250{
1251 return ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL); 1251 int ret;
1252
1253 ret = ida_simple_get(&printer_ida, 0, 0, GFP_KERNEL);
1254 if (ret >= PRINTER_MINORS) {
1255 ida_simple_remove(&printer_ida, ret);
1256 ret = -ENODEV;
1257 }
1258
1259 return ret;
1252} 1260}
1253 1261
1254static inline void gprinter_put_minor(int minor) 1262static inline void gprinter_put_minor(int minor)
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 6d3eb8b00a48..531861547253 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -1162,14 +1162,14 @@ afunc_set_alt(struct usb_function *fn, unsigned intf, unsigned alt)
1162 factor = 1000; 1162 factor = 1000;
1163 } else { 1163 } else {
1164 ep_desc = &hs_epin_desc; 1164 ep_desc = &hs_epin_desc;
1165 factor = 125; 1165 factor = 8000;
1166 } 1166 }
1167 1167
1168 /* pre-compute some values for iso_complete() */ 1168 /* pre-compute some values for iso_complete() */
1169 uac2->p_framesize = opts->p_ssize * 1169 uac2->p_framesize = opts->p_ssize *
1170 num_channels(opts->p_chmask); 1170 num_channels(opts->p_chmask);
1171 rate = opts->p_srate * uac2->p_framesize; 1171 rate = opts->p_srate * uac2->p_framesize;
1172 uac2->p_interval = (1 << (ep_desc->bInterval - 1)) * factor; 1172 uac2->p_interval = factor / (1 << (ep_desc->bInterval - 1));
1173 uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval, 1173 uac2->p_pktsize = min_t(unsigned int, rate / uac2->p_interval,
1174 prm->max_psize); 1174 prm->max_psize);
1175 1175
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index b04980cf6dc4..1efa61265d8d 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -779,7 +779,7 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
779 /* The current hw dequeue pointer */ 779 /* The current hw dequeue pointer */
780 tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(0)); 780 tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(0));
781 deq_ptr_64 = tmp_32; 781 deq_ptr_64 = tmp_32;
782 tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS0(1)); 782 tmp_32 = bdc_readl(bdc->regs, BDC_EPSTS1(0));
783 deq_ptr_64 |= ((u64)tmp_32 << 32); 783 deq_ptr_64 |= ((u64)tmp_32 << 32);
784 784
785 /* we have the dma addr of next bd that will be fetched by hardware */ 785 /* we have the dma addr of next bd that will be fetched by hardware */
diff --git a/drivers/usb/gadget/udc/mv_udc_core.c b/drivers/usb/gadget/udc/mv_udc_core.c
index d32160d6463f..5da37c957b53 100644
--- a/drivers/usb/gadget/udc/mv_udc_core.c
+++ b/drivers/usb/gadget/udc/mv_udc_core.c
@@ -2167,7 +2167,7 @@ static int mv_udc_probe(struct platform_device *pdev)
2167 return -ENODEV; 2167 return -ENODEV;
2168 } 2168 }
2169 2169
2170 udc->phy_regs = ioremap(r->start, resource_size(r)); 2170 udc->phy_regs = devm_ioremap(&pdev->dev, r->start, resource_size(r));
2171 if (udc->phy_regs == NULL) { 2171 if (udc->phy_regs == NULL) {
2172 dev_err(&pdev->dev, "failed to map phy I/O memory\n"); 2172 dev_err(&pdev->dev, "failed to map phy I/O memory\n");
2173 return -EBUSY; 2173 return -EBUSY;
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index d69c35558f68..89ed5e71a199 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -60,13 +60,15 @@ static DEFINE_MUTEX(udc_lock);
60int usb_gadget_map_request(struct usb_gadget *gadget, 60int usb_gadget_map_request(struct usb_gadget *gadget,
61 struct usb_request *req, int is_in) 61 struct usb_request *req, int is_in)
62{ 62{
63 struct device *dev = gadget->dev.parent;
64
63 if (req->length == 0) 65 if (req->length == 0)
64 return 0; 66 return 0;
65 67
66 if (req->num_sgs) { 68 if (req->num_sgs) {
67 int mapped; 69 int mapped;
68 70
69 mapped = dma_map_sg(&gadget->dev, req->sg, req->num_sgs, 71 mapped = dma_map_sg(dev, req->sg, req->num_sgs,
70 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 72 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
71 if (mapped == 0) { 73 if (mapped == 0) {
72 dev_err(&gadget->dev, "failed to map SGs\n"); 74 dev_err(&gadget->dev, "failed to map SGs\n");
@@ -75,11 +77,11 @@ int usb_gadget_map_request(struct usb_gadget *gadget,
75 77
76 req->num_mapped_sgs = mapped; 78 req->num_mapped_sgs = mapped;
77 } else { 79 } else {
78 req->dma = dma_map_single(&gadget->dev, req->buf, req->length, 80 req->dma = dma_map_single(dev, req->buf, req->length,
79 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 81 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
80 82
81 if (dma_mapping_error(&gadget->dev, req->dma)) { 83 if (dma_mapping_error(dev, req->dma)) {
82 dev_err(&gadget->dev, "failed to map buffer\n"); 84 dev_err(dev, "failed to map buffer\n");
83 return -EFAULT; 85 return -EFAULT;
84 } 86 }
85 } 87 }
@@ -95,12 +97,12 @@ void usb_gadget_unmap_request(struct usb_gadget *gadget,
95 return; 97 return;
96 98
97 if (req->num_mapped_sgs) { 99 if (req->num_mapped_sgs) {
98 dma_unmap_sg(&gadget->dev, req->sg, req->num_mapped_sgs, 100 dma_unmap_sg(gadget->dev.parent, req->sg, req->num_mapped_sgs,
99 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 101 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
100 102
101 req->num_mapped_sgs = 0; 103 req->num_mapped_sgs = 0;
102 } else { 104 } else {
103 dma_unmap_single(&gadget->dev, req->dma, req->length, 105 dma_unmap_single(gadget->dev.parent, req->dma, req->length,
104 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 106 is_in ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
105 } 107 }
106} 108}
@@ -321,6 +323,7 @@ err4:
321 323
322err3: 324err3:
323 put_device(&udc->dev); 325 put_device(&udc->dev);
326 device_del(&gadget->dev);
324 327
325err2: 328err2:
326 put_device(&gadget->dev); 329 put_device(&gadget->dev);
diff --git a/drivers/usb/host/ohci-q.c b/drivers/usb/host/ohci-q.c
index f7d561ed3c23..d029bbe9eb36 100644
--- a/drivers/usb/host/ohci-q.c
+++ b/drivers/usb/host/ohci-q.c
@@ -981,10 +981,6 @@ rescan_all:
981 int completed, modified; 981 int completed, modified;
982 __hc32 *prev; 982 __hc32 *prev;
983 983
984 /* Is this ED already invisible to the hardware? */
985 if (ed->state == ED_IDLE)
986 goto ed_idle;
987
988 /* only take off EDs that the HC isn't using, accounting for 984 /* only take off EDs that the HC isn't using, accounting for
989 * frame counter wraps and EDs with partially retired TDs 985 * frame counter wraps and EDs with partially retired TDs
990 */ 986 */
@@ -1012,12 +1008,10 @@ skip_ed:
1012 } 1008 }
1013 1009
1014 /* ED's now officially unlinked, hc doesn't see */ 1010 /* ED's now officially unlinked, hc doesn't see */
1015 ed->state = ED_IDLE;
1016 ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H); 1011 ed->hwHeadP &= ~cpu_to_hc32(ohci, ED_H);
1017 ed->hwNextED = 0; 1012 ed->hwNextED = 0;
1018 wmb(); 1013 wmb();
1019 ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE); 1014 ed->hwINFO &= ~cpu_to_hc32(ohci, ED_SKIP | ED_DEQUEUE);
1020ed_idle:
1021 1015
1022 /* reentrancy: if we drop the schedule lock, someone might 1016 /* reentrancy: if we drop the schedule lock, someone might
1023 * have modified this list. normally it's just prepending 1017 * have modified this list. normally it's just prepending
@@ -1088,6 +1082,7 @@ rescan_this:
1088 if (list_empty(&ed->td_list)) { 1082 if (list_empty(&ed->td_list)) {
1089 *last = ed->ed_next; 1083 *last = ed->ed_next;
1090 ed->ed_next = NULL; 1084 ed->ed_next = NULL;
1085 ed->state = ED_IDLE;
1091 list_del(&ed->in_use_list); 1086 list_del(&ed->in_use_list);
1092 } else if (ohci->rh_state == OHCI_RH_RUNNING) { 1087 } else if (ohci->rh_state == OHCI_RH_RUNNING) {
1093 *last = ed->ed_next; 1088 *last = ed->ed_next;
diff --git a/drivers/usb/host/ohci-tmio.c b/drivers/usb/host/ohci-tmio.c
index e9a6eec39142..cfcfadfc94fc 100644
--- a/drivers/usb/host/ohci-tmio.c
+++ b/drivers/usb/host/ohci-tmio.c
@@ -58,7 +58,7 @@
58#define CCR_PM_CKRNEN 0x0002 58#define CCR_PM_CKRNEN 0x0002
59#define CCR_PM_USBPW1 0x0004 59#define CCR_PM_USBPW1 0x0004
60#define CCR_PM_USBPW2 0x0008 60#define CCR_PM_USBPW2 0x0008
61#define CCR_PM_USBPW3 0x0008 61#define CCR_PM_USBPW3 0x0010
62#define CCR_PM_PMEE 0x0100 62#define CCR_PM_PMEE 0x0100
63#define CCR_PM_PMES 0x8000 63#define CCR_PM_PMES 0x8000
64 64
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index e75c565feb53..78241b5550df 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -484,10 +484,13 @@ static void xhci_hub_report_usb3_link_state(struct xhci_hcd *xhci,
484 u32 pls = status_reg & PORT_PLS_MASK; 484 u32 pls = status_reg & PORT_PLS_MASK;
485 485
486 /* resume state is a xHCI internal state. 486 /* resume state is a xHCI internal state.
487 * Do not report it to usb core. 487 * Do not report it to usb core, instead, pretend to be U3,
488 * thus usb core knows it's not ready for transfer
488 */ 489 */
489 if (pls == XDEV_RESUME) 490 if (pls == XDEV_RESUME) {
491 *status |= USB_SS_PORT_LS_U3;
490 return; 492 return;
493 }
491 494
492 /* When the CAS bit is set then warm reset 495 /* When the CAS bit is set then warm reset
493 * should be performed on port 496 * should be performed on port
@@ -588,7 +591,14 @@ static u32 xhci_get_port_status(struct usb_hcd *hcd,
588 status |= USB_PORT_STAT_C_RESET << 16; 591 status |= USB_PORT_STAT_C_RESET << 16;
589 /* USB3.0 only */ 592 /* USB3.0 only */
590 if (hcd->speed == HCD_USB3) { 593 if (hcd->speed == HCD_USB3) {
591 if ((raw_port_status & PORT_PLC)) 594 /* Port link change with port in resume state should not be
595 * reported to usbcore, as this is an internal state to be
596 * handled by xhci driver. Reporting PLC to usbcore may
597 * cause usbcore clearing PLC first and port change event
598 * irq won't be generated.
599 */
600 if ((raw_port_status & PORT_PLC) &&
601 (raw_port_status & PORT_PLS_MASK) != XDEV_RESUME)
592 status |= USB_PORT_STAT_C_LINK_STATE << 16; 602 status |= USB_PORT_STAT_C_LINK_STATE << 16;
593 if ((raw_port_status & PORT_WRC)) 603 if ((raw_port_status & PORT_WRC))
594 status |= USB_PORT_STAT_C_BH_RESET << 16; 604 status |= USB_PORT_STAT_C_BH_RESET << 16;
@@ -1120,10 +1130,10 @@ int xhci_bus_suspend(struct usb_hcd *hcd)
1120 spin_lock_irqsave(&xhci->lock, flags); 1130 spin_lock_irqsave(&xhci->lock, flags);
1121 1131
1122 if (hcd->self.root_hub->do_remote_wakeup) { 1132 if (hcd->self.root_hub->do_remote_wakeup) {
1123 if (bus_state->resuming_ports) { 1133 if (bus_state->resuming_ports || /* USB2 */
1134 bus_state->port_remote_wakeup) { /* USB3 */
1124 spin_unlock_irqrestore(&xhci->lock, flags); 1135 spin_unlock_irqrestore(&xhci->lock, flags);
1125 xhci_dbg(xhci, "suspend failed because " 1136 xhci_dbg(xhci, "suspend failed because a port is resuming\n");
1126 "a port is resuming\n");
1127 return -EBUSY; 1137 return -EBUSY;
1128 } 1138 }
1129 } 1139 }
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index f8336408ef07..9a8c936cd42c 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1427,10 +1427,10 @@ int xhci_endpoint_init(struct xhci_hcd *xhci,
1427 /* Attempt to use the ring cache */ 1427 /* Attempt to use the ring cache */
1428 if (virt_dev->num_rings_cached == 0) 1428 if (virt_dev->num_rings_cached == 0)
1429 return -ENOMEM; 1429 return -ENOMEM;
1430 virt_dev->num_rings_cached--;
1430 virt_dev->eps[ep_index].new_ring = 1431 virt_dev->eps[ep_index].new_ring =
1431 virt_dev->ring_cache[virt_dev->num_rings_cached]; 1432 virt_dev->ring_cache[virt_dev->num_rings_cached];
1432 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; 1433 virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL;
1433 virt_dev->num_rings_cached--;
1434 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring, 1434 xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring,
1435 1, type); 1435 1, type);
1436 } 1436 }
@@ -1792,7 +1792,8 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1792 int size; 1792 int size;
1793 int i, j, num_ports; 1793 int i, j, num_ports;
1794 1794
1795 del_timer_sync(&xhci->cmd_timer); 1795 if (timer_pending(&xhci->cmd_timer))
1796 del_timer_sync(&xhci->cmd_timer);
1796 1797
1797 /* Free the Event Ring Segment Table and the actual Event Ring */ 1798 /* Free the Event Ring Segment Table and the actual Event Ring */
1798 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 1799 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 4a4cb1d91ac8..5590eac2b22d 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -23,10 +23,15 @@
23#include <linux/pci.h> 23#include <linux/pci.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/acpi.h>
26 27
27#include "xhci.h" 28#include "xhci.h"
28#include "xhci-trace.h" 29#include "xhci-trace.h"
29 30
31#define PORT2_SSIC_CONFIG_REG2 0x883c
32#define PROG_DONE (1 << 30)
33#define SSIC_PORT_UNUSED (1 << 31)
34
30/* Device for a quirk */ 35/* Device for a quirk */
31#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 36#define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73
32#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 37#define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000
@@ -176,20 +181,63 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci)
176} 181}
177 182
178/* 183/*
184 * In some Intel xHCI controllers, in order to get D3 working,
185 * through a vendor specific SSIC CONFIG register at offset 0x883c,
186 * SSIC PORT need to be marked as "unused" before putting xHCI
187 * into D3. After D3 exit, the SSIC port need to be marked as "used".
188 * Without this change, xHCI might not enter D3 state.
179 * Make sure PME works on some Intel xHCI controllers by writing 1 to clear 189 * Make sure PME works on some Intel xHCI controllers by writing 1 to clear
180 * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 190 * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4
181 */ 191 */
182static void xhci_pme_quirk(struct xhci_hcd *xhci) 192static void xhci_pme_quirk(struct usb_hcd *hcd, bool suspend)
183{ 193{
194 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
195 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
184 u32 val; 196 u32 val;
185 void __iomem *reg; 197 void __iomem *reg;
186 198
199 if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
200 pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
201
202 reg = (void __iomem *) xhci->cap_regs + PORT2_SSIC_CONFIG_REG2;
203
204 /* Notify SSIC that SSIC profile programming is not done */
205 val = readl(reg) & ~PROG_DONE;
206 writel(val, reg);
207
208 /* Mark SSIC port as unused(suspend) or used(resume) */
209 val = readl(reg);
210 if (suspend)
211 val |= SSIC_PORT_UNUSED;
212 else
213 val &= ~SSIC_PORT_UNUSED;
214 writel(val, reg);
215
216 /* Notify SSIC that SSIC profile programming is done */
217 val = readl(reg) | PROG_DONE;
218 writel(val, reg);
219 readl(reg);
220 }
221
187 reg = (void __iomem *) xhci->cap_regs + 0x80a4; 222 reg = (void __iomem *) xhci->cap_regs + 0x80a4;
188 val = readl(reg); 223 val = readl(reg);
189 writel(val | BIT(28), reg); 224 writel(val | BIT(28), reg);
190 readl(reg); 225 readl(reg);
191} 226}
192 227
228#ifdef CONFIG_ACPI
229static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev)
230{
231 static const u8 intel_dsm_uuid[] = {
232 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45,
233 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23,
234 };
235 acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL);
236}
237#else
238 static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { }
239#endif /* CONFIG_ACPI */
240
193/* called during probe() after chip reset completes */ 241/* called during probe() after chip reset completes */
194static int xhci_pci_setup(struct usb_hcd *hcd) 242static int xhci_pci_setup(struct usb_hcd *hcd)
195{ 243{
@@ -263,6 +311,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
263 HCC_MAX_PSA(xhci->hcc_params) >= 4) 311 HCC_MAX_PSA(xhci->hcc_params) >= 4)
264 xhci->shared_hcd->can_do_streams = 1; 312 xhci->shared_hcd->can_do_streams = 1;
265 313
314 if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
315 xhci_pme_acpi_rtd3_enable(dev);
316
266 /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */ 317 /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */
267 pm_runtime_put_noidle(&dev->dev); 318 pm_runtime_put_noidle(&dev->dev);
268 319
@@ -307,7 +358,7 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup)
307 pdev->no_d3cold = true; 358 pdev->no_d3cold = true;
308 359
309 if (xhci->quirks & XHCI_PME_STUCK_QUIRK) 360 if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
310 xhci_pme_quirk(xhci); 361 xhci_pme_quirk(hcd, true);
311 362
312 return xhci_suspend(xhci, do_wakeup); 363 return xhci_suspend(xhci, do_wakeup);
313} 364}
@@ -340,7 +391,7 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated)
340 usb_enable_intel_xhci_ports(pdev); 391 usb_enable_intel_xhci_ports(pdev);
341 392
342 if (xhci->quirks & XHCI_PME_STUCK_QUIRK) 393 if (xhci->quirks & XHCI_PME_STUCK_QUIRK)
343 xhci_pme_quirk(xhci); 394 xhci_pme_quirk(hcd, false);
344 395
345 retval = xhci_resume(xhci, hibernated); 396 retval = xhci_resume(xhci, hibernated);
346 return retval; 397 return retval;
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 94416ff70810..32f4d564494a 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -82,7 +82,7 @@ dma_addr_t xhci_trb_virt_to_dma(struct xhci_segment *seg,
82 return 0; 82 return 0;
83 /* offset in TRBs */ 83 /* offset in TRBs */
84 segment_offset = trb - seg->trbs; 84 segment_offset = trb - seg->trbs;
85 if (segment_offset > TRBS_PER_SEGMENT) 85 if (segment_offset >= TRBS_PER_SEGMENT)
86 return 0; 86 return 0;
87 return seg->dma + (segment_offset * sizeof(*trb)); 87 return seg->dma + (segment_offset * sizeof(*trb));
88} 88}
@@ -1546,6 +1546,9 @@ static void handle_port_status(struct xhci_hcd *xhci,
1546 usb_hcd_resume_root_hub(hcd); 1546 usb_hcd_resume_root_hub(hcd);
1547 } 1547 }
1548 1548
1549 if (hcd->speed == HCD_USB3 && (temp & PORT_PLS_MASK) == XDEV_INACTIVE)
1550 bus_state->port_remote_wakeup &= ~(1 << faked_port_index);
1551
1549 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) { 1552 if ((temp & PORT_PLC) && (temp & PORT_PLS_MASK) == XDEV_RESUME) {
1550 xhci_dbg(xhci, "port resume event for port %d\n", port_id); 1553 xhci_dbg(xhci, "port resume event for port %d\n", port_id);
1551 1554
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 7da0d6043d33..526ebc0c7e72 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -3453,6 +3453,9 @@ int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
3453 return -EINVAL; 3453 return -EINVAL;
3454 } 3454 }
3455 3455
3456 if (virt_dev->tt_info)
3457 old_active_eps = virt_dev->tt_info->active_eps;
3458
3456 if (virt_dev->udev != udev) { 3459 if (virt_dev->udev != udev) {
3457 /* If the virt_dev and the udev does not match, this virt_dev 3460 /* If the virt_dev and the udev does not match, this virt_dev
3458 * may belong to another udev. 3461 * may belong to another udev.
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 31e46cc55807..ed2ebf647c38 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -285,6 +285,7 @@ struct xhci_op_regs {
285#define XDEV_U0 (0x0 << 5) 285#define XDEV_U0 (0x0 << 5)
286#define XDEV_U2 (0x2 << 5) 286#define XDEV_U2 (0x2 << 5)
287#define XDEV_U3 (0x3 << 5) 287#define XDEV_U3 (0x3 << 5)
288#define XDEV_INACTIVE (0x6 << 5)
288#define XDEV_RESUME (0xf << 5) 289#define XDEV_RESUME (0xf << 5)
289/* true: port has power (see HCC_PPC) */ 290/* true: port has power (see HCC_PPC) */
290#define PORT_POWER (1 << 9) 291#define PORT_POWER (1 << 9)
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 19b85ee98a72..876423b8892c 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1099,6 +1099,8 @@ static const struct usb_device_id option_ids[] = {
1099 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ 1099 { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
1100 { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff), 1100 { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x68c0, 0xff),
1101 .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */ 1101 .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC73xx */
1102 { USB_DEVICE_INTERFACE_CLASS(SIERRA_VENDOR_ID, 0x9041, 0xff),
1103 .driver_info = (kernel_ulong_t)&sierra_mc73xx_blacklist }, /* MC7305/MC7355 */
1102 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) }, 1104 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6001) },
1103 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) }, 1105 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_CMU_300) },
1104 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003), 1106 { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6003),
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 9c63897b3a56..d156545728c2 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -145,7 +145,6 @@ static const struct usb_device_id id_table[] = {
145 {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */ 145 {DEVICE_SWI(0x1199, 0x901c)}, /* Sierra Wireless EM7700 */
146 {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */ 146 {DEVICE_SWI(0x1199, 0x901f)}, /* Sierra Wireless EM7355 */
147 {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */ 147 {DEVICE_SWI(0x1199, 0x9040)}, /* Sierra Wireless Modem */
148 {DEVICE_SWI(0x1199, 0x9041)}, /* Sierra Wireless MC7305/MC7355 */
149 {DEVICE_SWI(0x1199, 0x9051)}, /* Netgear AirCard 340U */ 148 {DEVICE_SWI(0x1199, 0x9051)}, /* Netgear AirCard 340U */
150 {DEVICE_SWI(0x1199, 0x9053)}, /* Sierra Wireless Modem */ 149 {DEVICE_SWI(0x1199, 0x9053)}, /* Sierra Wireless Modem */
151 {DEVICE_SWI(0x1199, 0x9054)}, /* Sierra Wireless Modem */ 150 {DEVICE_SWI(0x1199, 0x9054)}, /* Sierra Wireless Modem */
@@ -158,6 +157,7 @@ static const struct usb_device_id id_table[] = {
158 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */ 157 {DEVICE_SWI(0x413c, 0x81a4)}, /* Dell Wireless 5570e HSPA+ (42Mbps) Mobile Broadband Card */
159 {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */ 158 {DEVICE_SWI(0x413c, 0x81a8)}, /* Dell Wireless 5808 Gobi(TM) 4G LTE Mobile Broadband Card */
160 {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */ 159 {DEVICE_SWI(0x413c, 0x81a9)}, /* Dell Wireless 5808e Gobi(TM) 4G LTE Mobile Broadband Card */
160 {DEVICE_SWI(0x413c, 0x81b1)}, /* Dell Wireless 5809e Gobi(TM) 4G LTE Mobile Broadband Card */
161 161
162 /* Huawei devices */ 162 /* Huawei devices */
163 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */ 163 {DEVICE_HWI(0x03f0, 0x581d)}, /* HP lt4112 LTE/HSPA+ Gobi 4G Modem (Huawei me906e) */
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index 46179a0828eb..07d1ecd564f7 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -289,6 +289,7 @@ static const struct usb_device_id id_table[] = {
289 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF), 289 { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x68AA, 0xFF, 0xFF, 0xFF),
290 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 290 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
291 }, 291 },
292 { USB_DEVICE(0x1199, 0x68AB) }, /* Sierra Wireless AR8550 */
292 /* AT&T Direct IP LTE modems */ 293 /* AT&T Direct IP LTE modems */
293 { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF), 294 { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
294 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 295 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index caf188800c67..6b2479123de7 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -2065,6 +2065,18 @@ UNUSUAL_DEV( 0x1908, 0x3335, 0x0200, 0x0200,
2065 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2065 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2066 US_FL_NO_READ_DISC_INFO ), 2066 US_FL_NO_READ_DISC_INFO ),
2067 2067
2068/* Reported by Oliver Neukum <oneukum@suse.com>
2069 * This device morphes spontaneously into another device if the access
2070 * pattern of Windows isn't followed. Thus writable media would be dirty
2071 * if the initial instance is used. So the device is limited to its
2072 * virtual CD.
2073 * And yes, the concept that BCD goes up to 9 is not heeded */
2074UNUSUAL_DEV( 0x19d2, 0x1225, 0x0000, 0xffff,
2075 "ZTE,Incorporated",
2076 "ZTE WCDMA Technologies MSM",
2077 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2078 US_FL_SINGLE_LUN ),
2079
2068/* Reported by Sven Geggus <sven-usbst@geggus.net> 2080/* Reported by Sven Geggus <sven-usbst@geggus.net>
2069 * This encrypted pen drive returns bogus data for the initial READ(10). 2081 * This encrypted pen drive returns bogus data for the initial READ(10).
2070 */ 2082 */
@@ -2074,6 +2086,17 @@ UNUSUAL_DEV( 0x1b1c, 0x1ab5, 0x0200, 0x0200,
2074 USB_SC_DEVICE, USB_PR_DEVICE, NULL, 2086 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2075 US_FL_INITIAL_READ10 ), 2087 US_FL_INITIAL_READ10 ),
2076 2088
2089/* Reported by Hans de Goede <hdegoede@redhat.com>
2090 * These are mini projectors using USB for both power and video data transport
2091 * The usb-storage interface is a virtual windows driver CD, which the gm12u320
2092 * driver automatically converts into framebuffer & kms dri device nodes.
2093 */
2094UNUSUAL_DEV( 0x1de1, 0xc102, 0x0000, 0xffff,
2095 "Grain-media Technology Corp.",
2096 "USB3.0 Device GM12U320",
2097 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
2098 US_FL_IGNORE_DEVICE ),
2099
2077/* Patch by Richard Schütz <r.schtz@t-online.de> 2100/* Patch by Richard Schütz <r.schtz@t-online.de>
2078 * This external hard drive enclosure uses a JMicron chip which 2101 * This external hard drive enclosure uses a JMicron chip which
2079 * needs the US_FL_IGNORE_RESIDUE flag to work properly. */ 2102 * needs the US_FL_IGNORE_RESIDUE flag to work properly. */
diff --git a/drivers/vfio/vfio.c b/drivers/vfio/vfio.c
index 2fb29dfeffbd..563c510f285c 100644
--- a/drivers/vfio/vfio.c
+++ b/drivers/vfio/vfio.c
@@ -689,6 +689,23 @@ struct vfio_device *vfio_device_get_from_dev(struct device *dev)
689} 689}
690EXPORT_SYMBOL_GPL(vfio_device_get_from_dev); 690EXPORT_SYMBOL_GPL(vfio_device_get_from_dev);
691 691
692static struct vfio_device *vfio_device_get_from_name(struct vfio_group *group,
693 char *buf)
694{
695 struct vfio_device *device;
696
697 mutex_lock(&group->device_lock);
698 list_for_each_entry(device, &group->device_list, group_next) {
699 if (!strcmp(dev_name(device->dev), buf)) {
700 vfio_device_get(device);
701 break;
702 }
703 }
704 mutex_unlock(&group->device_lock);
705
706 return device;
707}
708
692/* 709/*
693 * Caller must hold a reference to the vfio_device 710 * Caller must hold a reference to the vfio_device
694 */ 711 */
@@ -1198,53 +1215,53 @@ static int vfio_group_get_device_fd(struct vfio_group *group, char *buf)
1198{ 1215{
1199 struct vfio_device *device; 1216 struct vfio_device *device;
1200 struct file *filep; 1217 struct file *filep;
1201 int ret = -ENODEV; 1218 int ret;
1202 1219
1203 if (0 == atomic_read(&group->container_users) || 1220 if (0 == atomic_read(&group->container_users) ||
1204 !group->container->iommu_driver || !vfio_group_viable(group)) 1221 !group->container->iommu_driver || !vfio_group_viable(group))
1205 return -EINVAL; 1222 return -EINVAL;
1206 1223
1207 mutex_lock(&group->device_lock); 1224 device = vfio_device_get_from_name(group, buf);
1208 list_for_each_entry(device, &group->device_list, group_next) { 1225 if (!device)
1209 if (strcmp(dev_name(device->dev), buf)) 1226 return -ENODEV;
1210 continue;
1211 1227
1212 ret = device->ops->open(device->device_data); 1228 ret = device->ops->open(device->device_data);
1213 if (ret) 1229 if (ret) {
1214 break; 1230 vfio_device_put(device);
1215 /* 1231 return ret;
1216 * We can't use anon_inode_getfd() because we need to modify 1232 }
1217 * the f_mode flags directly to allow more than just ioctls
1218 */
1219 ret = get_unused_fd_flags(O_CLOEXEC);
1220 if (ret < 0) {
1221 device->ops->release(device->device_data);
1222 break;
1223 }
1224 1233
1225 filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops, 1234 /*
1226 device, O_RDWR); 1235 * We can't use anon_inode_getfd() because we need to modify
1227 if (IS_ERR(filep)) { 1236 * the f_mode flags directly to allow more than just ioctls
1228 put_unused_fd(ret); 1237 */
1229 ret = PTR_ERR(filep); 1238 ret = get_unused_fd_flags(O_CLOEXEC);
1230 device->ops->release(device->device_data); 1239 if (ret < 0) {
1231 break; 1240 device->ops->release(device->device_data);
1232 } 1241 vfio_device_put(device);
1242 return ret;
1243 }
1233 1244
1234 /* 1245 filep = anon_inode_getfile("[vfio-device]", &vfio_device_fops,
1235 * TODO: add an anon_inode interface to do this. 1246 device, O_RDWR);
1236 * Appears to be missing by lack of need rather than 1247 if (IS_ERR(filep)) {
1237 * explicitly prevented. Now there's need. 1248 put_unused_fd(ret);
1238 */ 1249 ret = PTR_ERR(filep);
1239 filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE); 1250 device->ops->release(device->device_data);
1251 vfio_device_put(device);
1252 return ret;
1253 }
1254
1255 /*
1256 * TODO: add an anon_inode interface to do this.
1257 * Appears to be missing by lack of need rather than
1258 * explicitly prevented. Now there's need.
1259 */
1260 filep->f_mode |= (FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
1240 1261
1241 vfio_device_get(device); 1262 atomic_inc(&group->container_users);
1242 atomic_inc(&group->container_users);
1243 1263
1244 fd_install(ret, filep); 1264 fd_install(ret, filep);
1245 break;
1246 }
1247 mutex_unlock(&group->device_lock);
1248 1265
1249 return ret; 1266 return ret;
1250} 1267}
diff --git a/drivers/vhost/vhost.c b/drivers/vhost/vhost.c
index 9e8e004bb1c3..eec2f11809ff 100644
--- a/drivers/vhost/vhost.c
+++ b/drivers/vhost/vhost.c
@@ -22,14 +22,20 @@
22#include <linux/file.h> 22#include <linux/file.h>
23#include <linux/highmem.h> 23#include <linux/highmem.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/vmalloc.h>
25#include <linux/kthread.h> 26#include <linux/kthread.h>
26#include <linux/cgroup.h> 27#include <linux/cgroup.h>
27#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/sort.h>
28 30
29#include "vhost.h" 31#include "vhost.h"
30 32
33static ushort max_mem_regions = 64;
34module_param(max_mem_regions, ushort, 0444);
35MODULE_PARM_DESC(max_mem_regions,
36 "Maximum number of memory regions in memory map. (default: 64)");
37
31enum { 38enum {
32 VHOST_MEMORY_MAX_NREGIONS = 64,
33 VHOST_MEMORY_F_LOG = 0x1, 39 VHOST_MEMORY_F_LOG = 0x1,
34}; 40};
35 41
@@ -543,7 +549,7 @@ void vhost_dev_cleanup(struct vhost_dev *dev, bool locked)
543 fput(dev->log_file); 549 fput(dev->log_file);
544 dev->log_file = NULL; 550 dev->log_file = NULL;
545 /* No one will access memory at this point */ 551 /* No one will access memory at this point */
546 kfree(dev->memory); 552 kvfree(dev->memory);
547 dev->memory = NULL; 553 dev->memory = NULL;
548 WARN_ON(!list_empty(&dev->work_list)); 554 WARN_ON(!list_empty(&dev->work_list));
549 if (dev->worker) { 555 if (dev->worker) {
@@ -663,6 +669,25 @@ int vhost_vq_access_ok(struct vhost_virtqueue *vq)
663} 669}
664EXPORT_SYMBOL_GPL(vhost_vq_access_ok); 670EXPORT_SYMBOL_GPL(vhost_vq_access_ok);
665 671
672static int vhost_memory_reg_sort_cmp(const void *p1, const void *p2)
673{
674 const struct vhost_memory_region *r1 = p1, *r2 = p2;
675 if (r1->guest_phys_addr < r2->guest_phys_addr)
676 return 1;
677 if (r1->guest_phys_addr > r2->guest_phys_addr)
678 return -1;
679 return 0;
680}
681
682static void *vhost_kvzalloc(unsigned long size)
683{
684 void *n = kzalloc(size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
685
686 if (!n)
687 n = vzalloc(size);
688 return n;
689}
690
666static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m) 691static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
667{ 692{
668 struct vhost_memory mem, *newmem, *oldmem; 693 struct vhost_memory mem, *newmem, *oldmem;
@@ -673,21 +698,23 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
673 return -EFAULT; 698 return -EFAULT;
674 if (mem.padding) 699 if (mem.padding)
675 return -EOPNOTSUPP; 700 return -EOPNOTSUPP;
676 if (mem.nregions > VHOST_MEMORY_MAX_NREGIONS) 701 if (mem.nregions > max_mem_regions)
677 return -E2BIG; 702 return -E2BIG;
678 newmem = kmalloc(size + mem.nregions * sizeof *m->regions, GFP_KERNEL); 703 newmem = vhost_kvzalloc(size + mem.nregions * sizeof(*m->regions));
679 if (!newmem) 704 if (!newmem)
680 return -ENOMEM; 705 return -ENOMEM;
681 706
682 memcpy(newmem, &mem, size); 707 memcpy(newmem, &mem, size);
683 if (copy_from_user(newmem->regions, m->regions, 708 if (copy_from_user(newmem->regions, m->regions,
684 mem.nregions * sizeof *m->regions)) { 709 mem.nregions * sizeof *m->regions)) {
685 kfree(newmem); 710 kvfree(newmem);
686 return -EFAULT; 711 return -EFAULT;
687 } 712 }
713 sort(newmem->regions, newmem->nregions, sizeof(*newmem->regions),
714 vhost_memory_reg_sort_cmp, NULL);
688 715
689 if (!memory_access_ok(d, newmem, 0)) { 716 if (!memory_access_ok(d, newmem, 0)) {
690 kfree(newmem); 717 kvfree(newmem);
691 return -EFAULT; 718 return -EFAULT;
692 } 719 }
693 oldmem = d->memory; 720 oldmem = d->memory;
@@ -699,7 +726,7 @@ static long vhost_set_memory(struct vhost_dev *d, struct vhost_memory __user *m)
699 d->vqs[i]->memory = newmem; 726 d->vqs[i]->memory = newmem;
700 mutex_unlock(&d->vqs[i]->mutex); 727 mutex_unlock(&d->vqs[i]->mutex);
701 } 728 }
702 kfree(oldmem); 729 kvfree(oldmem);
703 return 0; 730 return 0;
704} 731}
705 732
@@ -965,6 +992,7 @@ long vhost_dev_ioctl(struct vhost_dev *d, unsigned int ioctl, void __user *argp)
965 } 992 }
966 if (eventfp != d->log_file) { 993 if (eventfp != d->log_file) {
967 filep = d->log_file; 994 filep = d->log_file;
995 d->log_file = eventfp;
968 ctx = d->log_ctx; 996 ctx = d->log_ctx;
969 d->log_ctx = eventfp ? 997 d->log_ctx = eventfp ?
970 eventfd_ctx_fileget(eventfp) : NULL; 998 eventfd_ctx_fileget(eventfp) : NULL;
@@ -992,17 +1020,22 @@ EXPORT_SYMBOL_GPL(vhost_dev_ioctl);
992static const struct vhost_memory_region *find_region(struct vhost_memory *mem, 1020static const struct vhost_memory_region *find_region(struct vhost_memory *mem,
993 __u64 addr, __u32 len) 1021 __u64 addr, __u32 len)
994{ 1022{
995 struct vhost_memory_region *reg; 1023 const struct vhost_memory_region *reg;
996 int i; 1024 int start = 0, end = mem->nregions;
997 1025
998 /* linear search is not brilliant, but we really have on the order of 6 1026 while (start < end) {
999 * regions in practice */ 1027 int slot = start + (end - start) / 2;
1000 for (i = 0; i < mem->nregions; ++i) { 1028 reg = mem->regions + slot;
1001 reg = mem->regions + i; 1029 if (addr >= reg->guest_phys_addr)
1002 if (reg->guest_phys_addr <= addr && 1030 end = slot;
1003 reg->guest_phys_addr + reg->memory_size - 1 >= addr) 1031 else
1004 return reg; 1032 start = slot + 1;
1005 } 1033 }
1034
1035 reg = mem->regions + start;
1036 if (addr >= reg->guest_phys_addr &&
1037 reg->guest_phys_addr + reg->memory_size > addr)
1038 return reg;
1006 return NULL; 1039 return NULL;
1007} 1040}
1008 1041
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 658c34bb9076..1aaf89300621 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1306,10 +1306,11 @@ static void fbcon_cursor(struct vc_data *vc, int mode)
1306 int y; 1306 int y;
1307 int c = scr_readw((u16 *) vc->vc_pos); 1307 int c = scr_readw((u16 *) vc->vc_pos);
1308 1308
1309 ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
1310
1309 if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1) 1311 if (fbcon_is_inactive(vc, info) || vc->vc_deccm != 1)
1310 return; 1312 return;
1311 1313
1312 ops->cur_blink_jiffies = msecs_to_jiffies(vc->vc_cur_blink_ms);
1313 if (vc->vc_cursor_type & 0x10) 1314 if (vc->vc_cursor_type & 0x10)
1314 fbcon_del_cursor_timer(info); 1315 fbcon_del_cursor_timer(info);
1315 else 1316 else
diff --git a/drivers/video/fbdev/Kconfig b/drivers/video/fbdev/Kconfig
index 2d98de535e0f..f888561568d9 100644
--- a/drivers/video/fbdev/Kconfig
+++ b/drivers/video/fbdev/Kconfig
@@ -298,7 +298,7 @@ config FB_ARMCLCD
298 298
299# Helper logic selected only by the ARM Versatile platform family. 299# Helper logic selected only by the ARM Versatile platform family.
300config PLAT_VERSATILE_CLCD 300config PLAT_VERSATILE_CLCD
301 def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS 301 def_bool ARCH_VERSATILE || ARCH_REALVIEW || ARCH_VEXPRESS || ARCH_INTEGRATOR
302 depends on ARM 302 depends on ARM
303 depends on FB_ARMCLCD && FB=y 303 depends on FB_ARMCLCD && FB=y
304 304
diff --git a/drivers/video/fbdev/omap2/dss/dss-of.c b/drivers/video/fbdev/omap2/dss/dss-of.c
index 928ee639c0c1..bf407b6ba15c 100644
--- a/drivers/video/fbdev/omap2/dss/dss-of.c
+++ b/drivers/video/fbdev/omap2/dss/dss-of.c
@@ -60,6 +60,8 @@ omapdss_of_get_next_port(const struct device_node *parent,
60 } 60 }
61 prev = port; 61 prev = port;
62 } while (of_node_cmp(port->name, "port") != 0); 62 } while (of_node_cmp(port->name, "port") != 0);
63
64 of_node_put(ports);
63 } 65 }
64 66
65 return port; 67 return port;
@@ -94,7 +96,7 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port)
94 if (!port) 96 if (!port)
95 return NULL; 97 return NULL;
96 98
97 np = of_get_next_parent(port); 99 np = of_get_parent(port);
98 100
99 for (i = 0; i < 2 && np; ++i) { 101 for (i = 0; i < 2 && np; ++i) {
100 struct property *prop; 102 struct property *prop;
diff --git a/drivers/video/fbdev/pxa3xx-gcu.c b/drivers/video/fbdev/pxa3xx-gcu.c
index 86bd457d039d..50bce45e7f3d 100644
--- a/drivers/video/fbdev/pxa3xx-gcu.c
+++ b/drivers/video/fbdev/pxa3xx-gcu.c
@@ -653,7 +653,7 @@ static int pxa3xx_gcu_probe(struct platform_device *pdev)
653 goto err_free_dma; 653 goto err_free_dma;
654 } 654 }
655 655
656 ret = clk_enable(priv->clk); 656 ret = clk_prepare_enable(priv->clk);
657 if (ret < 0) { 657 if (ret < 0) {
658 dev_err(dev, "failed to enable clock\n"); 658 dev_err(dev, "failed to enable clock\n");
659 goto err_misc_deregister; 659 goto err_misc_deregister;
@@ -685,7 +685,7 @@ err_misc_deregister:
685 misc_deregister(&priv->misc_dev); 685 misc_deregister(&priv->misc_dev);
686 686
687err_disable_clk: 687err_disable_clk:
688 clk_disable(priv->clk); 688 clk_disable_unprepare(priv->clk);
689 689
690 return ret; 690 return ret;
691} 691}
diff --git a/drivers/video/of_videomode.c b/drivers/video/of_videomode.c
index 111c2d1911d3..b5102aa6090d 100644
--- a/drivers/video/of_videomode.c
+++ b/drivers/video/of_videomode.c
@@ -44,11 +44,9 @@ int of_get_videomode(struct device_node *np, struct videomode *vm,
44 index = disp->native_mode; 44 index = disp->native_mode;
45 45
46 ret = videomode_from_timings(disp, vm, index); 46 ret = videomode_from_timings(disp, vm, index);
47 if (ret)
48 return ret;
49 47
50 display_timings_release(disp); 48 display_timings_release(disp);
51 49
52 return 0; 50 return ret;
53} 51}
54EXPORT_SYMBOL_GPL(of_get_videomode); 52EXPORT_SYMBOL_GPL(of_get_videomode);
diff --git a/drivers/virtio/virtio_input.c b/drivers/virtio/virtio_input.c
index 60e2a1677563..c96944b59856 100644
--- a/drivers/virtio/virtio_input.c
+++ b/drivers/virtio/virtio_input.c
@@ -313,6 +313,7 @@ err_init_vq:
313static void virtinput_remove(struct virtio_device *vdev) 313static void virtinput_remove(struct virtio_device *vdev)
314{ 314{
315 struct virtio_input *vi = vdev->priv; 315 struct virtio_input *vi = vdev->priv;
316 void *buf;
316 unsigned long flags; 317 unsigned long flags;
317 318
318 spin_lock_irqsave(&vi->lock, flags); 319 spin_lock_irqsave(&vi->lock, flags);
@@ -320,6 +321,9 @@ static void virtinput_remove(struct virtio_device *vdev)
320 spin_unlock_irqrestore(&vi->lock, flags); 321 spin_unlock_irqrestore(&vi->lock, flags);
321 322
322 input_unregister_device(vi->idev); 323 input_unregister_device(vi->idev);
324 vdev->config->reset(vdev);
325 while ((buf = virtqueue_detach_unused_buf(vi->sts)) != NULL)
326 kfree(buf);
323 vdev->config->del_vqs(vdev); 327 vdev->config->del_vqs(vdev);
324 kfree(vi); 328 kfree(vi);
325} 329}
diff --git a/drivers/xen/balloon.c b/drivers/xen/balloon.c
index fd933695f232..bf4a23c7c591 100644
--- a/drivers/xen/balloon.c
+++ b/drivers/xen/balloon.c
@@ -472,7 +472,7 @@ static enum bp_state decrease_reservation(unsigned long nr_pages, gfp_t gfp)
472} 472}
473 473
474/* 474/*
475 * We avoid multiple worker processes conflicting via the balloon mutex. 475 * As this is a work item it is guaranteed to run as a single instance only.
476 * We may of course race updates of the target counts (which are protected 476 * We may of course race updates of the target counts (which are protected
477 * by the balloon lock), or with changes to the Xen hard limit, but we will 477 * by the balloon lock), or with changes to the Xen hard limit, but we will
478 * recover from these in time. 478 * recover from these in time.
@@ -482,9 +482,10 @@ static void balloon_process(struct work_struct *work)
482 enum bp_state state = BP_DONE; 482 enum bp_state state = BP_DONE;
483 long credit; 483 long credit;
484 484
485 mutex_lock(&balloon_mutex);
486 485
487 do { 486 do {
487 mutex_lock(&balloon_mutex);
488
488 credit = current_credit(); 489 credit = current_credit();
489 490
490 if (credit > 0) { 491 if (credit > 0) {
@@ -499,17 +500,15 @@ static void balloon_process(struct work_struct *work)
499 500
500 state = update_schedule(state); 501 state = update_schedule(state);
501 502
502#ifndef CONFIG_PREEMPT 503 mutex_unlock(&balloon_mutex);
503 if (need_resched()) 504
504 schedule(); 505 cond_resched();
505#endif 506
506 } while (credit && state == BP_DONE); 507 } while (credit && state == BP_DONE);
507 508
508 /* Schedule more work if there is some still to be done. */ 509 /* Schedule more work if there is some still to be done. */
509 if (state == BP_EAGAIN) 510 if (state == BP_EAGAIN)
510 schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ); 511 schedule_delayed_work(&balloon_worker, balloon_stats.schedule_delay * HZ);
511
512 mutex_unlock(&balloon_mutex);
513} 512}
514 513
515/* Resets the Xen limit, sets new target, and kicks off processing. */ 514/* Resets the Xen limit, sets new target, and kicks off processing. */
diff --git a/drivers/xen/gntdev.c b/drivers/xen/gntdev.c
index 67b9163db718..0dbb222daaf1 100644
--- a/drivers/xen/gntdev.c
+++ b/drivers/xen/gntdev.c
@@ -568,12 +568,14 @@ static int gntdev_release(struct inode *inode, struct file *flip)
568 568
569 pr_debug("priv %p\n", priv); 569 pr_debug("priv %p\n", priv);
570 570
571 mutex_lock(&priv->lock);
571 while (!list_empty(&priv->maps)) { 572 while (!list_empty(&priv->maps)) {
572 map = list_entry(priv->maps.next, struct grant_map, next); 573 map = list_entry(priv->maps.next, struct grant_map, next);
573 list_del(&map->next); 574 list_del(&map->next);
574 gntdev_put_map(NULL /* already removed */, map); 575 gntdev_put_map(NULL /* already removed */, map);
575 } 576 }
576 WARN_ON(!list_empty(&priv->freeable_maps)); 577 WARN_ON(!list_empty(&priv->freeable_maps));
578 mutex_unlock(&priv->lock);
577 579
578 if (use_ptemod) 580 if (use_ptemod)
579 mmu_notifier_unregister(&priv->mn, priv->mm); 581 mmu_notifier_unregister(&priv->mn, priv->mm);
diff --git a/drivers/xen/xenbus/xenbus_client.c b/drivers/xen/xenbus/xenbus_client.c
index 9ad327238ba9..e30353575d5d 100644
--- a/drivers/xen/xenbus/xenbus_client.c
+++ b/drivers/xen/xenbus/xenbus_client.c
@@ -814,8 +814,10 @@ static int xenbus_unmap_ring_vfree_hvm(struct xenbus_device *dev, void *vaddr)
814 814
815 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles, 815 rv = xenbus_unmap_ring(dev, node->handles, node->nr_handles,
816 addrs); 816 addrs);
817 if (!rv) 817 if (!rv) {
818 vunmap(vaddr); 818 vunmap(vaddr);
819 free_xenballooned_pages(node->nr_handles, node->hvm.pages);
820 }
819 else 821 else
820 WARN(1, "Leaking %p, size %u page(s)\n", vaddr, 822 WARN(1, "Leaking %p, size %u page(s)\n", vaddr,
821 node->nr_handles); 823 node->nr_handles);
diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c
index 862fbc206755..564a7de17d99 100644
--- a/fs/btrfs/dev-replace.c
+++ b/fs/btrfs/dev-replace.c
@@ -378,7 +378,7 @@ int btrfs_dev_replace_start(struct btrfs_root *root,
378 378
379 ret = btrfs_kobj_add_device(tgt_device->fs_devices, tgt_device); 379 ret = btrfs_kobj_add_device(tgt_device->fs_devices, tgt_device);
380 if (ret) 380 if (ret)
381 btrfs_error(root->fs_info, ret, "kobj add dev failed"); 381 btrfs_err(root->fs_info, "kobj add dev failed %d\n", ret);
382 382
383 printk_in_rcu(KERN_INFO 383 printk_in_rcu(KERN_INFO
384 "BTRFS: dev_replace from %s (devid %llu) to %s started\n", 384 "BTRFS: dev_replace from %s (devid %llu) to %s started\n",
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index a9aadb2ad525..f556c3732c2c 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2842,6 +2842,7 @@ int open_ctree(struct super_block *sb,
2842 !extent_buffer_uptodate(chunk_root->node)) { 2842 !extent_buffer_uptodate(chunk_root->node)) {
2843 printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n", 2843 printk(KERN_ERR "BTRFS: failed to read chunk root on %s\n",
2844 sb->s_id); 2844 sb->s_id);
2845 chunk_root->node = NULL;
2845 goto fail_tree_roots; 2846 goto fail_tree_roots;
2846 } 2847 }
2847 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node); 2848 btrfs_set_root_node(&chunk_root->root_item, chunk_root->node);
@@ -2879,7 +2880,7 @@ retry_root_backup:
2879 !extent_buffer_uptodate(tree_root->node)) { 2880 !extent_buffer_uptodate(tree_root->node)) {
2880 printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n", 2881 printk(KERN_WARNING "BTRFS: failed to read tree root on %s\n",
2881 sb->s_id); 2882 sb->s_id);
2882 2883 tree_root->node = NULL;
2883 goto recovery_tree_root; 2884 goto recovery_tree_root;
2884 } 2885 }
2885 2886
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 171312d51799..07204bf601ed 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -4227,6 +4227,24 @@ out:
4227 space_info->chunk_alloc = 0; 4227 space_info->chunk_alloc = 0;
4228 spin_unlock(&space_info->lock); 4228 spin_unlock(&space_info->lock);
4229 mutex_unlock(&fs_info->chunk_mutex); 4229 mutex_unlock(&fs_info->chunk_mutex);
4230 /*
4231 * When we allocate a new chunk we reserve space in the chunk block
4232 * reserve to make sure we can COW nodes/leafs in the chunk tree or
4233 * add new nodes/leafs to it if we end up needing to do it when
4234 * inserting the chunk item and updating device items as part of the
4235 * second phase of chunk allocation, performed by
4236 * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4237 * large number of new block groups to create in our transaction
4238 * handle's new_bgs list to avoid exhausting the chunk block reserve
4239 * in extreme cases - like having a single transaction create many new
4240 * block groups when starting to write out the free space caches of all
4241 * the block groups that were made dirty during the lifetime of the
4242 * transaction.
4243 */
4244 if (trans->chunk_bytes_reserved >= (2 * 1024 * 1024ull)) {
4245 btrfs_create_pending_block_groups(trans, trans->root);
4246 btrfs_trans_release_chunk_metadata(trans);
4247 }
4230 return ret; 4248 return ret;
4231} 4249}
4232 4250
diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c
index e9ace099162c..8a8202956576 100644
--- a/fs/btrfs/qgroup.c
+++ b/fs/btrfs/qgroup.c
@@ -1651,6 +1651,11 @@ static int qgroup_update_counters(struct btrfs_fs_info *fs_info,
1651 /* Exclusive -> exclusive, nothing changed */ 1651 /* Exclusive -> exclusive, nothing changed */
1652 } 1652 }
1653 } 1653 }
1654
1655 /* For exclusive extent, free its reserved bytes too */
1656 if (nr_old_roots == 0 && nr_new_roots == 1 &&
1657 cur_new_count == nr_new_roots)
1658 qg->reserved -= num_bytes;
1654 if (dirty) 1659 if (dirty)
1655 qgroup_dirty(fs_info, qg); 1660 qgroup_dirty(fs_info, qg);
1656 } 1661 }
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 51e0f0d0053e..f5021fcb154e 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -2152,7 +2152,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
2152 2152
2153 kmem_cache_free(btrfs_trans_handle_cachep, trans); 2153 kmem_cache_free(btrfs_trans_handle_cachep, trans);
2154 2154
2155 if (current != root->fs_info->transaction_kthread) 2155 if (current != root->fs_info->transaction_kthread &&
2156 current != root->fs_info->cleaner_kthread)
2156 btrfs_run_delayed_iputs(root); 2157 btrfs_run_delayed_iputs(root);
2157 2158
2158 return ret; 2159 return ret;
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index dc10c9dd36c1..ddd5e9471290 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1506,7 +1506,6 @@ static int __mark_caps_flushing(struct inode *inode,
1506 1506
1507 swap(cf, ci->i_prealloc_cap_flush); 1507 swap(cf, ci->i_prealloc_cap_flush);
1508 cf->caps = flushing; 1508 cf->caps = flushing;
1509 cf->kick = false;
1510 1509
1511 spin_lock(&mdsc->cap_dirty_lock); 1510 spin_lock(&mdsc->cap_dirty_lock);
1512 list_del_init(&ci->i_dirty_item); 1511 list_del_init(&ci->i_dirty_item);
@@ -2123,8 +2122,7 @@ static void kick_flushing_capsnaps(struct ceph_mds_client *mdsc,
2123 2122
2124static int __kick_flushing_caps(struct ceph_mds_client *mdsc, 2123static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
2125 struct ceph_mds_session *session, 2124 struct ceph_mds_session *session,
2126 struct ceph_inode_info *ci, 2125 struct ceph_inode_info *ci)
2127 bool kick_all)
2128{ 2126{
2129 struct inode *inode = &ci->vfs_inode; 2127 struct inode *inode = &ci->vfs_inode;
2130 struct ceph_cap *cap; 2128 struct ceph_cap *cap;
@@ -2150,9 +2148,7 @@ static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
2150 2148
2151 for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) { 2149 for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
2152 cf = rb_entry(n, struct ceph_cap_flush, i_node); 2150 cf = rb_entry(n, struct ceph_cap_flush, i_node);
2153 if (cf->tid < first_tid) 2151 if (cf->tid >= first_tid)
2154 continue;
2155 if (kick_all || cf->kick)
2156 break; 2152 break;
2157 } 2153 }
2158 if (!n) { 2154 if (!n) {
@@ -2161,7 +2157,6 @@ static int __kick_flushing_caps(struct ceph_mds_client *mdsc,
2161 } 2157 }
2162 2158
2163 cf = rb_entry(n, struct ceph_cap_flush, i_node); 2159 cf = rb_entry(n, struct ceph_cap_flush, i_node);
2164 cf->kick = false;
2165 2160
2166 first_tid = cf->tid + 1; 2161 first_tid = cf->tid + 1;
2167 2162
@@ -2181,8 +2176,6 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
2181{ 2176{
2182 struct ceph_inode_info *ci; 2177 struct ceph_inode_info *ci;
2183 struct ceph_cap *cap; 2178 struct ceph_cap *cap;
2184 struct ceph_cap_flush *cf;
2185 struct rb_node *n;
2186 2179
2187 dout("early_kick_flushing_caps mds%d\n", session->s_mds); 2180 dout("early_kick_flushing_caps mds%d\n", session->s_mds);
2188 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { 2181 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
@@ -2205,16 +2198,11 @@ void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
2205 if ((cap->issued & ci->i_flushing_caps) != 2198 if ((cap->issued & ci->i_flushing_caps) !=
2206 ci->i_flushing_caps) { 2199 ci->i_flushing_caps) {
2207 spin_unlock(&ci->i_ceph_lock); 2200 spin_unlock(&ci->i_ceph_lock);
2208 if (!__kick_flushing_caps(mdsc, session, ci, true)) 2201 if (!__kick_flushing_caps(mdsc, session, ci))
2209 continue; 2202 continue;
2210 spin_lock(&ci->i_ceph_lock); 2203 spin_lock(&ci->i_ceph_lock);
2211 } 2204 }
2212 2205
2213 for (n = rb_first(&ci->i_cap_flush_tree); n; n = rb_next(n)) {
2214 cf = rb_entry(n, struct ceph_cap_flush, i_node);
2215 cf->kick = true;
2216 }
2217
2218 spin_unlock(&ci->i_ceph_lock); 2206 spin_unlock(&ci->i_ceph_lock);
2219 } 2207 }
2220} 2208}
@@ -2228,7 +2216,7 @@ void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
2228 2216
2229 dout("kick_flushing_caps mds%d\n", session->s_mds); 2217 dout("kick_flushing_caps mds%d\n", session->s_mds);
2230 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) { 2218 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
2231 int delayed = __kick_flushing_caps(mdsc, session, ci, false); 2219 int delayed = __kick_flushing_caps(mdsc, session, ci);
2232 if (delayed) { 2220 if (delayed) {
2233 spin_lock(&ci->i_ceph_lock); 2221 spin_lock(&ci->i_ceph_lock);
2234 __cap_delay_requeue(mdsc, ci); 2222 __cap_delay_requeue(mdsc, ci);
@@ -2261,7 +2249,7 @@ static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
2261 2249
2262 spin_unlock(&ci->i_ceph_lock); 2250 spin_unlock(&ci->i_ceph_lock);
2263 2251
2264 delayed = __kick_flushing_caps(mdsc, session, ci, true); 2252 delayed = __kick_flushing_caps(mdsc, session, ci);
2265 if (delayed) { 2253 if (delayed) {
2266 spin_lock(&ci->i_ceph_lock); 2254 spin_lock(&ci->i_ceph_lock);
2267 __cap_delay_requeue(mdsc, ci); 2255 __cap_delay_requeue(mdsc, ci);
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index 4347039ecc18..6706bde9ad1b 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -287,7 +287,7 @@ int ceph_encode_locks_to_buffer(struct inode *inode,
287 return 0; 287 return 0;
288 288
289 spin_lock(&ctx->flc_lock); 289 spin_lock(&ctx->flc_lock);
290 list_for_each_entry(lock, &ctx->flc_flock, fl_list) { 290 list_for_each_entry(lock, &ctx->flc_posix, fl_list) {
291 ++seen_fcntl; 291 ++seen_fcntl;
292 if (seen_fcntl > num_fcntl_locks) { 292 if (seen_fcntl > num_fcntl_locks) {
293 err = -ENOSPC; 293 err = -ENOSPC;
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 860cc016e70d..2f2460d23a06 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -189,7 +189,6 @@ static inline void ceph_put_cap_snap(struct ceph_cap_snap *capsnap)
189struct ceph_cap_flush { 189struct ceph_cap_flush {
190 u64 tid; 190 u64 tid;
191 int caps; 191 int caps;
192 bool kick;
193 struct rb_node g_node; // global 192 struct rb_node g_node; // global
194 union { 193 union {
195 struct rb_node i_node; // inode 194 struct rb_node i_node; // inode
diff --git a/fs/dax.c b/fs/dax.c
index c3e21ccfc358..a7f77e1fa18c 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -319,6 +319,12 @@ static int dax_insert_mapping(struct inode *inode, struct buffer_head *bh,
319 * @vma: The virtual memory area where the fault occurred 319 * @vma: The virtual memory area where the fault occurred
320 * @vmf: The description of the fault 320 * @vmf: The description of the fault
321 * @get_block: The filesystem method used to translate file offsets to blocks 321 * @get_block: The filesystem method used to translate file offsets to blocks
322 * @complete_unwritten: The filesystem method used to convert unwritten blocks
323 * to written so the data written to them is exposed. This is required for
324 * required by write faults for filesystems that will return unwritten
325 * extent mappings from @get_block, but it is optional for reads as
326 * dax_insert_mapping() will always zero unwritten blocks. If the fs does
327 * not support unwritten extents, the it should pass NULL.
322 * 328 *
323 * When a page fault occurs, filesystems may call this helper in their 329 * When a page fault occurs, filesystems may call this helper in their
324 * fault handler for DAX files. __dax_fault() assumes the caller has done all 330 * fault handler for DAX files. __dax_fault() assumes the caller has done all
@@ -437,8 +443,12 @@ int __dax_fault(struct vm_area_struct *vma, struct vm_fault *vmf,
437 * as for normal BH based IO completions. 443 * as for normal BH based IO completions.
438 */ 444 */
439 error = dax_insert_mapping(inode, &bh, vma, vmf); 445 error = dax_insert_mapping(inode, &bh, vma, vmf);
440 if (buffer_unwritten(&bh)) 446 if (buffer_unwritten(&bh)) {
441 complete_unwritten(&bh, !error); 447 if (complete_unwritten)
448 complete_unwritten(&bh, !error);
449 else
450 WARN_ON_ONCE(!(vmf->flags & FAULT_FLAG_WRITE));
451 }
442 452
443 out: 453 out:
444 if (error == -ENOMEM) 454 if (error == -ENOMEM)
diff --git a/fs/dcache.c b/fs/dcache.c
index 5c8ea15e73a5..9b5fe503f6cb 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -3442,22 +3442,15 @@ void __init vfs_caches_init_early(void)
3442 inode_init_early(); 3442 inode_init_early();
3443} 3443}
3444 3444
3445void __init vfs_caches_init(unsigned long mempages) 3445void __init vfs_caches_init(void)
3446{ 3446{
3447 unsigned long reserve;
3448
3449 /* Base hash sizes on available memory, with a reserve equal to
3450 150% of current kernel size */
3451
3452 reserve = min((mempages - nr_free_pages()) * 3/2, mempages - 1);
3453 mempages -= reserve;
3454
3455 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0, 3447 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
3456 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL); 3448 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3457 3449
3458 dcache_init(); 3450 dcache_init();
3459 inode_init(); 3451 inode_init();
3460 files_init(mempages); 3452 files_init();
3453 files_maxfiles_init();
3461 mnt_init(); 3454 mnt_init();
3462 bdev_cache_init(); 3455 bdev_cache_init();
3463 chrdev_init(); 3456 chrdev_init();
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index 9bedfa8dd3a5..f71e19a9dd3c 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -2072,8 +2072,6 @@ static int f2fs_set_data_page_dirty(struct page *page)
2072 return 1; 2072 return 1;
2073 } 2073 }
2074 2074
2075 mark_inode_dirty(inode);
2076
2077 if (!PageDirty(page)) { 2075 if (!PageDirty(page)) {
2078 __set_page_dirty_nobuffers(page); 2076 __set_page_dirty_nobuffers(page);
2079 update_dirty_page(inode, page); 2077 update_dirty_page(inode, page);
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index ada2a3dd701a..b0f38c3b37f4 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -1331,12 +1331,13 @@ static int f2fs_ioc_commit_atomic_write(struct file *filp)
1331 if (ret) 1331 if (ret)
1332 return ret; 1332 return ret;
1333 1333
1334 if (f2fs_is_atomic_file(inode)) 1334 if (f2fs_is_atomic_file(inode)) {
1335 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1335 commit_inmem_pages(inode, false); 1336 commit_inmem_pages(inode, false);
1337 }
1336 1338
1337 ret = f2fs_sync_file(filp, 0, LONG_MAX, 0); 1339 ret = f2fs_sync_file(filp, 0, LONG_MAX, 0);
1338 mnt_drop_write_file(filp); 1340 mnt_drop_write_file(filp);
1339 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1340 return ret; 1341 return ret;
1341} 1342}
1342 1343
@@ -1387,8 +1388,8 @@ static int f2fs_ioc_abort_volatile_write(struct file *filp)
1387 f2fs_balance_fs(F2FS_I_SB(inode)); 1388 f2fs_balance_fs(F2FS_I_SB(inode));
1388 1389
1389 if (f2fs_is_atomic_file(inode)) { 1390 if (f2fs_is_atomic_file(inode)) {
1390 commit_inmem_pages(inode, false);
1391 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE); 1391 clear_inode_flag(F2FS_I(inode), FI_ATOMIC_FILE);
1392 commit_inmem_pages(inode, false);
1392 } 1393 }
1393 1394
1394 if (f2fs_is_volatile_file(inode)) 1395 if (f2fs_is_volatile_file(inode))
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index e1e73617d13b..22fb5ef37966 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -556,27 +556,39 @@ static void move_encrypted_block(struct inode *inode, block_t bidx)
556 if (!fio.encrypted_page) 556 if (!fio.encrypted_page)
557 goto put_out; 557 goto put_out;
558 558
559 f2fs_submit_page_bio(&fio); 559 err = f2fs_submit_page_bio(&fio);
560 if (err)
561 goto put_page_out;
562
563 /* write page */
564 lock_page(fio.encrypted_page);
565
566 if (unlikely(!PageUptodate(fio.encrypted_page)))
567 goto put_page_out;
568 if (unlikely(fio.encrypted_page->mapping != META_MAPPING(fio.sbi)))
569 goto put_page_out;
570
571 set_page_dirty(fio.encrypted_page);
572 f2fs_wait_on_page_writeback(fio.encrypted_page, META);
573 if (clear_page_dirty_for_io(fio.encrypted_page))
574 dec_page_count(fio.sbi, F2FS_DIRTY_META);
575
576 set_page_writeback(fio.encrypted_page);
560 577
561 /* allocate block address */ 578 /* allocate block address */
562 f2fs_wait_on_page_writeback(dn.node_page, NODE); 579 f2fs_wait_on_page_writeback(dn.node_page, NODE);
563
564 allocate_data_block(fio.sbi, NULL, fio.blk_addr, 580 allocate_data_block(fio.sbi, NULL, fio.blk_addr,
565 &fio.blk_addr, &sum, CURSEG_COLD_DATA); 581 &fio.blk_addr, &sum, CURSEG_COLD_DATA);
566 dn.data_blkaddr = fio.blk_addr;
567
568 /* write page */
569 lock_page(fio.encrypted_page);
570 set_page_writeback(fio.encrypted_page);
571 fio.rw = WRITE_SYNC; 582 fio.rw = WRITE_SYNC;
572 f2fs_submit_page_mbio(&fio); 583 f2fs_submit_page_mbio(&fio);
573 584
585 dn.data_blkaddr = fio.blk_addr;
574 set_data_blkaddr(&dn); 586 set_data_blkaddr(&dn);
575 f2fs_update_extent_cache(&dn); 587 f2fs_update_extent_cache(&dn);
576 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE); 588 set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
577 if (page->index == 0) 589 if (page->index == 0)
578 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN); 590 set_inode_flag(F2FS_I(inode), FI_FIRST_BLOCK_WRITTEN);
579 591put_page_out:
580 f2fs_put_page(fio.encrypted_page, 1); 592 f2fs_put_page(fio.encrypted_page, 1);
581put_out: 593put_out:
582 f2fs_put_dnode(&dn); 594 f2fs_put_dnode(&dn);
@@ -605,8 +617,8 @@ static void move_data_page(struct inode *inode, block_t bidx, int gc_type)
605 .page = page, 617 .page = page,
606 .encrypted_page = NULL, 618 .encrypted_page = NULL,
607 }; 619 };
620 set_page_dirty(page);
608 f2fs_wait_on_page_writeback(page, DATA); 621 f2fs_wait_on_page_writeback(page, DATA);
609
610 if (clear_page_dirty_for_io(page)) 622 if (clear_page_dirty_for_io(page))
611 inode_dec_dirty_pages(inode); 623 inode_dec_dirty_pages(inode);
612 set_cold_data(page); 624 set_cold_data(page);
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 38e75fb1e488..a13ffcc32992 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -141,6 +141,8 @@ int f2fs_convert_inline_page(struct dnode_of_data *dn, struct page *page)
141 kunmap_atomic(dst_addr); 141 kunmap_atomic(dst_addr);
142 SetPageUptodate(page); 142 SetPageUptodate(page);
143no_update: 143no_update:
144 set_page_dirty(page);
145
144 /* clear dirty state */ 146 /* clear dirty state */
145 dirty = clear_page_dirty_for_io(page); 147 dirty = clear_page_dirty_for_io(page);
146 148
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 1eb343768781..61b97f9cb9f6 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -257,6 +257,7 @@ void commit_inmem_pages(struct inode *inode, bool abort)
257 if (!abort) { 257 if (!abort) {
258 lock_page(cur->page); 258 lock_page(cur->page);
259 if (cur->page->mapping == inode->i_mapping) { 259 if (cur->page->mapping == inode->i_mapping) {
260 set_page_dirty(cur->page);
260 f2fs_wait_on_page_writeback(cur->page, DATA); 261 f2fs_wait_on_page_writeback(cur->page, DATA);
261 if (clear_page_dirty_for_io(cur->page)) 262 if (clear_page_dirty_for_io(cur->page))
262 inode_dec_dirty_pages(inode); 263 inode_dec_dirty_pages(inode);
diff --git a/fs/file_table.c b/fs/file_table.c
index 7f9d407c7595..ad17e05ebf95 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -25,6 +25,7 @@
25#include <linux/hardirq.h> 25#include <linux/hardirq.h>
26#include <linux/task_work.h> 26#include <linux/task_work.h>
27#include <linux/ima.h> 27#include <linux/ima.h>
28#include <linux/swap.h>
28 29
29#include <linux/atomic.h> 30#include <linux/atomic.h>
30 31
@@ -308,19 +309,24 @@ void put_filp(struct file *file)
308 } 309 }
309} 310}
310 311
311void __init files_init(unsigned long mempages) 312void __init files_init(void)
312{ 313{
313 unsigned long n;
314
315 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0, 314 filp_cachep = kmem_cache_create("filp", sizeof(struct file), 0,
316 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); 315 SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
316 percpu_counter_init(&nr_files, 0, GFP_KERNEL);
317}
317 318
318 /* 319/*
319 * One file with associated inode and dcache is very roughly 1K. 320 * One file with associated inode and dcache is very roughly 1K. Per default
320 * Per default don't use more than 10% of our memory for files. 321 * do not use more than 10% of our memory for files.
321 */ 322 */
323void __init files_maxfiles_init(void)
324{
325 unsigned long n;
326 unsigned long memreserve = (totalram_pages - nr_free_pages()) * 3/2;
327
328 memreserve = min(memreserve, totalram_pages - 1);
329 n = ((totalram_pages - memreserve) * (PAGE_SIZE / 1024)) / 10;
322 330
323 n = (mempages * (PAGE_SIZE / 1024)) / 10;
324 files_stat.max_files = max_t(unsigned long, n, NR_FILE); 331 files_stat.max_files = max_t(unsigned long, n, NR_FILE);
325 percpu_counter_init(&nr_files, 0, GFP_KERNEL);
326} 332}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index f0520bcf2094..518c6294bf6c 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -702,6 +702,7 @@ void wbc_account_io(struct writeback_control *wbc, struct page *page,
702 else 702 else
703 wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes); 703 wbc->wb_tcand_bytes -= min(bytes, wbc->wb_tcand_bytes);
704} 704}
705EXPORT_SYMBOL_GPL(wbc_account_io);
705 706
706/** 707/**
707 * inode_congested - test whether an inode is congested 708 * inode_congested - test whether an inode is congested
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index 80cc1b35d460..ebb5e37455a0 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -2246,7 +2246,15 @@ static long fuse_dev_ioctl(struct file *file, unsigned int cmd,
2246 2246
2247 err = -EINVAL; 2247 err = -EINVAL;
2248 if (old) { 2248 if (old) {
2249 struct fuse_dev *fud = fuse_get_dev(old); 2249 struct fuse_dev *fud = NULL;
2250
2251 /*
2252 * Check against file->f_op because CUSE
2253 * uses the same ioctl handler.
2254 */
2255 if (old->f_op == file->f_op &&
2256 old->f_cred->user_ns == file->f_cred->user_ns)
2257 fud = fuse_get_dev(old);
2250 2258
2251 if (fud) { 2259 if (fud) {
2252 mutex_lock(&fuse_mutex); 2260 mutex_lock(&fuse_mutex);
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 0cf74df68617..973c24ce59ad 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -1010,6 +1010,8 @@ struct file *hugetlb_file_setup(const char *name, size_t size,
1010 inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0); 1010 inode = hugetlbfs_get_inode(sb, NULL, S_IFREG | S_IRWXUGO, 0);
1011 if (!inode) 1011 if (!inode)
1012 goto out_dentry; 1012 goto out_dentry;
1013 if (creat_flags == HUGETLB_SHMFS_INODE)
1014 inode->i_flags |= S_PRIVATE;
1013 1015
1014 file = ERR_PTR(-ENOMEM); 1016 file = ERR_PTR(-ENOMEM);
1015 if (hugetlb_reserve_pages(inode, 0, 1017 if (hugetlb_reserve_pages(inode, 0,
diff --git a/fs/namei.c b/fs/namei.c
index ae4e4c18b2ac..1c2105ed20c5 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -879,7 +879,7 @@ static inline int may_follow_link(struct nameidata *nd)
879 return 0; 879 return 0;
880 880
881 /* Allowed if parent directory not sticky and world-writable. */ 881 /* Allowed if parent directory not sticky and world-writable. */
882 parent = nd->path.dentry->d_inode; 882 parent = nd->inode;
883 if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH)) 883 if ((parent->i_mode & (S_ISVTX|S_IWOTH)) != (S_ISVTX|S_IWOTH))
884 return 0; 884 return 0;
885 885
@@ -1954,8 +1954,13 @@ OK:
1954 continue; 1954 continue;
1955 } 1955 }
1956 } 1956 }
1957 if (unlikely(!d_can_lookup(nd->path.dentry))) 1957 if (unlikely(!d_can_lookup(nd->path.dentry))) {
1958 if (nd->flags & LOOKUP_RCU) {
1959 if (unlazy_walk(nd, NULL, 0))
1960 return -ECHILD;
1961 }
1958 return -ENOTDIR; 1962 return -ENOTDIR;
1963 }
1959 } 1964 }
1960} 1965}
1961 1966
diff --git a/fs/namespace.c b/fs/namespace.c
index c7cb8a526c05..2b8aa15fd6df 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -1361,6 +1361,36 @@ enum umount_tree_flags {
1361 UMOUNT_PROPAGATE = 2, 1361 UMOUNT_PROPAGATE = 2,
1362 UMOUNT_CONNECTED = 4, 1362 UMOUNT_CONNECTED = 4,
1363}; 1363};
1364
1365static bool disconnect_mount(struct mount *mnt, enum umount_tree_flags how)
1366{
1367 /* Leaving mounts connected is only valid for lazy umounts */
1368 if (how & UMOUNT_SYNC)
1369 return true;
1370
1371 /* A mount without a parent has nothing to be connected to */
1372 if (!mnt_has_parent(mnt))
1373 return true;
1374
1375 /* Because the reference counting rules change when mounts are
1376 * unmounted and connected, umounted mounts may not be
1377 * connected to mounted mounts.
1378 */
1379 if (!(mnt->mnt_parent->mnt.mnt_flags & MNT_UMOUNT))
1380 return true;
1381
1382 /* Has it been requested that the mount remain connected? */
1383 if (how & UMOUNT_CONNECTED)
1384 return false;
1385
1386 /* Is the mount locked such that it needs to remain connected? */
1387 if (IS_MNT_LOCKED(mnt))
1388 return false;
1389
1390 /* By default disconnect the mount */
1391 return true;
1392}
1393
1364/* 1394/*
1365 * mount_lock must be held 1395 * mount_lock must be held
1366 * namespace_sem must be held for write 1396 * namespace_sem must be held for write
@@ -1398,10 +1428,7 @@ static void umount_tree(struct mount *mnt, enum umount_tree_flags how)
1398 if (how & UMOUNT_SYNC) 1428 if (how & UMOUNT_SYNC)
1399 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT; 1429 p->mnt.mnt_flags |= MNT_SYNC_UMOUNT;
1400 1430
1401 disconnect = !(((how & UMOUNT_CONNECTED) && 1431 disconnect = disconnect_mount(p, how);
1402 mnt_has_parent(p) &&
1403 (p->mnt_parent->mnt.mnt_flags & MNT_UMOUNT)) ||
1404 IS_MNT_LOCKED_AND_LAZY(p));
1405 1432
1406 pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt, 1433 pin_insert_group(&p->mnt_umount, &p->mnt_parent->mnt,
1407 disconnect ? &unmounted : NULL); 1434 disconnect ? &unmounted : NULL);
@@ -1538,11 +1565,8 @@ void __detach_mounts(struct dentry *dentry)
1538 while (!hlist_empty(&mp->m_list)) { 1565 while (!hlist_empty(&mp->m_list)) {
1539 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list); 1566 mnt = hlist_entry(mp->m_list.first, struct mount, mnt_mp_list);
1540 if (mnt->mnt.mnt_flags & MNT_UMOUNT) { 1567 if (mnt->mnt.mnt_flags & MNT_UMOUNT) {
1541 struct mount *p, *tmp; 1568 hlist_add_head(&mnt->mnt_umount.s_list, &unmounted);
1542 list_for_each_entry_safe(p, tmp, &mnt->mnt_mounts, mnt_child) { 1569 umount_mnt(mnt);
1543 hlist_add_head(&p->mnt_umount.s_list, &unmounted);
1544 umount_mnt(p);
1545 }
1546 } 1570 }
1547 else umount_tree(mnt, UMOUNT_CONNECTED); 1571 else umount_tree(mnt, UMOUNT_CONNECTED);
1548 } 1572 }
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index ecebb406cc1a..4a90c9bb3135 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -775,7 +775,7 @@ static int nfs_init_server(struct nfs_server *server,
775 server->options = data->options; 775 server->options = data->options;
776 server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID| 776 server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
777 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP| 777 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP|
778 NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME|NFS_CAP_CHANGE_ATTR; 778 NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME;
779 779
780 if (data->rsize) 780 if (data->rsize)
781 server->rsize = nfs_block_size(data->rsize, NULL); 781 server->rsize = nfs_block_size(data->rsize, NULL);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index c12951b9551e..b3289d701eea 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1852,7 +1852,7 @@ ff_layout_mirror_prepare_stats(struct nfs42_layoutstat_args *args,
1852 struct nfs42_layoutstat_devinfo *devinfo; 1852 struct nfs42_layoutstat_devinfo *devinfo;
1853 int i; 1853 int i;
1854 1854
1855 for (i = 0; i <= FF_LAYOUT_MIRROR_COUNT(pls); i++) { 1855 for (i = 0; i < FF_LAYOUT_MIRROR_COUNT(pls); i++) {
1856 if (*dev_count >= dev_limit) 1856 if (*dev_count >= dev_limit)
1857 break; 1857 break;
1858 mirror = FF_LAYOUT_COMP(pls, i); 1858 mirror = FF_LAYOUT_COMP(pls, i);
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index b77b328a06d7..0adc7d245b3d 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -442,8 +442,9 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr, st
442 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR); 442 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR);
443 if (fattr->valid & NFS_ATTR_FATTR_CHANGE) 443 if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
444 inode->i_version = fattr->change_attr; 444 inode->i_version = fattr->change_attr;
445 else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR)) 445 else
446 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR); 446 nfs_set_cache_invalid(inode, NFS_INO_INVALID_ATTR
447 | NFS_INO_REVAL_PAGECACHE);
447 if (fattr->valid & NFS_ATTR_FATTR_SIZE) 448 if (fattr->valid & NFS_ATTR_FATTR_SIZE)
448 inode->i_size = nfs_size_to_loff_t(fattr->size); 449 inode->i_size = nfs_size_to_loff_t(fattr->size);
449 else 450 else
@@ -1244,9 +1245,11 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
1244 if (fattr->valid & NFS_ATTR_FATTR_SIZE) { 1245 if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
1245 cur_size = i_size_read(inode); 1246 cur_size = i_size_read(inode);
1246 new_isize = nfs_size_to_loff_t(fattr->size); 1247 new_isize = nfs_size_to_loff_t(fattr->size);
1247 if (cur_size != new_isize && nfsi->nrequests == 0) 1248 if (cur_size != new_isize)
1248 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE; 1249 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE;
1249 } 1250 }
1251 if (nfsi->nrequests != 0)
1252 invalid &= ~NFS_INO_REVAL_PAGECACHE;
1250 1253
1251 /* Have any file permissions changed? */ 1254 /* Have any file permissions changed? */
1252 if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO)) 1255 if ((fattr->valid & NFS_ATTR_FATTR_MODE) && (inode->i_mode & S_IALLUGO) != (fattr->mode & S_IALLUGO))
@@ -1684,13 +1687,12 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1684 invalid |= NFS_INO_INVALID_ATTR 1687 invalid |= NFS_INO_INVALID_ATTR
1685 | NFS_INO_INVALID_DATA 1688 | NFS_INO_INVALID_DATA
1686 | NFS_INO_INVALID_ACCESS 1689 | NFS_INO_INVALID_ACCESS
1687 | NFS_INO_INVALID_ACL 1690 | NFS_INO_INVALID_ACL;
1688 | NFS_INO_REVAL_PAGECACHE;
1689 if (S_ISDIR(inode->i_mode)) 1691 if (S_ISDIR(inode->i_mode))
1690 nfs_force_lookup_revalidate(inode); 1692 nfs_force_lookup_revalidate(inode);
1691 inode->i_version = fattr->change_attr; 1693 inode->i_version = fattr->change_attr;
1692 } 1694 }
1693 } else if (server->caps & NFS_CAP_CHANGE_ATTR) 1695 } else
1694 nfsi->cache_validity |= save_cache_validity; 1696 nfsi->cache_validity |= save_cache_validity;
1695 1697
1696 if (fattr->valid & NFS_ATTR_FATTR_MTIME) { 1698 if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
@@ -1717,7 +1719,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1717 if ((nfsi->nrequests == 0) || new_isize > cur_isize) { 1719 if ((nfsi->nrequests == 0) || new_isize > cur_isize) {
1718 i_size_write(inode, new_isize); 1720 i_size_write(inode, new_isize);
1719 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA; 1721 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
1720 invalid &= ~NFS_INO_REVAL_PAGECACHE;
1721 } 1722 }
1722 dprintk("NFS: isize change on server for file %s/%ld " 1723 dprintk("NFS: isize change on server for file %s/%ld "
1723 "(%Ld to %Ld)\n", 1724 "(%Ld to %Ld)\n",
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 7e3c4604bea8..9b372b845f6a 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -296,6 +296,22 @@ extern struct rpc_procinfo nfs4_procedures[];
296 296
297#ifdef CONFIG_NFS_V4_SECURITY_LABEL 297#ifdef CONFIG_NFS_V4_SECURITY_LABEL
298extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags); 298extern struct nfs4_label *nfs4_label_alloc(struct nfs_server *server, gfp_t flags);
299static inline struct nfs4_label *
300nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src)
301{
302 if (!dst || !src)
303 return NULL;
304
305 if (src->len > NFS4_MAXLABELLEN)
306 return NULL;
307
308 dst->lfs = src->lfs;
309 dst->pi = src->pi;
310 dst->len = src->len;
311 memcpy(dst->label, src->label, src->len);
312
313 return dst;
314}
299static inline void nfs4_label_free(struct nfs4_label *label) 315static inline void nfs4_label_free(struct nfs4_label *label)
300{ 316{
301 if (label) { 317 if (label) {
@@ -316,6 +332,11 @@ static inline void nfs4_label_free(void *label) {}
316static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi) 332static inline void nfs_zap_label_cache_locked(struct nfs_inode *nfsi)
317{ 333{
318} 334}
335static inline struct nfs4_label *
336nfs4_label_copy(struct nfs4_label *dst, struct nfs4_label *src)
337{
338 return NULL;
339}
319#endif /* CONFIG_NFS_V4_SECURITY_LABEL */ 340#endif /* CONFIG_NFS_V4_SECURITY_LABEL */
320 341
321/* proc.c */ 342/* proc.c */
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index f486b80f927a..d731bbf974aa 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -135,7 +135,7 @@ int nfs42_proc_deallocate(struct file *filep, loff_t offset, loff_t len)
135 return err; 135 return err;
136} 136}
137 137
138loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence) 138static loff_t _nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
139{ 139{
140 struct inode *inode = file_inode(filep); 140 struct inode *inode = file_inode(filep);
141 struct nfs42_seek_args args = { 141 struct nfs42_seek_args args = {
@@ -171,6 +171,23 @@ loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
171 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes); 171 return vfs_setpos(filep, res.sr_offset, inode->i_sb->s_maxbytes);
172} 172}
173 173
174loff_t nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
175{
176 struct nfs_server *server = NFS_SERVER(file_inode(filep));
177 struct nfs4_exception exception = { };
178 int err;
179
180 do {
181 err = _nfs42_proc_llseek(filep, offset, whence);
182 if (err == -ENOTSUPP)
183 return -EOPNOTSUPP;
184 err = nfs4_handle_exception(server, err, &exception);
185 } while (exception.retry);
186
187 return err;
188}
189
190
174static void 191static void
175nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata) 192nfs42_layoutstat_prepare(struct rpc_task *task, void *calldata)
176{ 193{
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 8bee93469617..3acb1eb72930 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -467,7 +467,10 @@ static void do_renew_lease(struct nfs_client *clp, unsigned long timestamp)
467 467
468static void renew_lease(const struct nfs_server *server, unsigned long timestamp) 468static void renew_lease(const struct nfs_server *server, unsigned long timestamp)
469{ 469{
470 do_renew_lease(server->nfs_client, timestamp); 470 struct nfs_client *clp = server->nfs_client;
471
472 if (!nfs4_has_session(clp))
473 do_renew_lease(clp, timestamp);
471} 474}
472 475
473struct nfs4_call_sync_data { 476struct nfs4_call_sync_data {
@@ -616,8 +619,7 @@ int nfs41_sequence_done(struct rpc_task *task, struct nfs4_sequence_res *res)
616 clp = session->clp; 619 clp = session->clp;
617 do_renew_lease(clp, res->sr_timestamp); 620 do_renew_lease(clp, res->sr_timestamp);
618 /* Check sequence flags */ 621 /* Check sequence flags */
619 if (res->sr_status_flags != 0) 622 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
620 nfs4_schedule_lease_recovery(clp);
621 nfs41_update_target_slotid(slot->table, slot, res); 623 nfs41_update_target_slotid(slot->table, slot, res);
622 break; 624 break;
623 case 1: 625 case 1:
@@ -910,6 +912,7 @@ struct nfs4_opendata {
910 struct nfs_open_confirmres c_res; 912 struct nfs_open_confirmres c_res;
911 struct nfs4_string owner_name; 913 struct nfs4_string owner_name;
912 struct nfs4_string group_name; 914 struct nfs4_string group_name;
915 struct nfs4_label *a_label;
913 struct nfs_fattr f_attr; 916 struct nfs_fattr f_attr;
914 struct nfs4_label *f_label; 917 struct nfs4_label *f_label;
915 struct dentry *dir; 918 struct dentry *dir;
@@ -1013,6 +1016,10 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1013 if (IS_ERR(p->f_label)) 1016 if (IS_ERR(p->f_label))
1014 goto err_free_p; 1017 goto err_free_p;
1015 1018
1019 p->a_label = nfs4_label_alloc(server, gfp_mask);
1020 if (IS_ERR(p->a_label))
1021 goto err_free_f;
1022
1016 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid; 1023 alloc_seqid = server->nfs_client->cl_mvops->alloc_seqid;
1017 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask); 1024 p->o_arg.seqid = alloc_seqid(&sp->so_seqid, gfp_mask);
1018 if (IS_ERR(p->o_arg.seqid)) 1025 if (IS_ERR(p->o_arg.seqid))
@@ -1041,7 +1048,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1041 p->o_arg.server = server; 1048 p->o_arg.server = server;
1042 p->o_arg.bitmask = nfs4_bitmask(server, label); 1049 p->o_arg.bitmask = nfs4_bitmask(server, label);
1043 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0]; 1050 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
1044 p->o_arg.label = label; 1051 p->o_arg.label = nfs4_label_copy(p->a_label, label);
1045 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim); 1052 p->o_arg.claim = nfs4_map_atomic_open_claim(server, claim);
1046 switch (p->o_arg.claim) { 1053 switch (p->o_arg.claim) {
1047 case NFS4_OPEN_CLAIM_NULL: 1054 case NFS4_OPEN_CLAIM_NULL:
@@ -1074,6 +1081,8 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
1074 return p; 1081 return p;
1075 1082
1076err_free_label: 1083err_free_label:
1084 nfs4_label_free(p->a_label);
1085err_free_f:
1077 nfs4_label_free(p->f_label); 1086 nfs4_label_free(p->f_label);
1078err_free_p: 1087err_free_p:
1079 kfree(p); 1088 kfree(p);
@@ -1093,6 +1102,7 @@ static void nfs4_opendata_free(struct kref *kref)
1093 nfs4_put_open_state(p->state); 1102 nfs4_put_open_state(p->state);
1094 nfs4_put_state_owner(p->owner); 1103 nfs4_put_state_owner(p->owner);
1095 1104
1105 nfs4_label_free(p->a_label);
1096 nfs4_label_free(p->f_label); 1106 nfs4_label_free(p->f_label);
1097 1107
1098 dput(p->dir); 1108 dput(p->dir);
@@ -1198,12 +1208,15 @@ static bool nfs_need_update_open_stateid(struct nfs4_state *state,
1198 1208
1199static void nfs_resync_open_stateid_locked(struct nfs4_state *state) 1209static void nfs_resync_open_stateid_locked(struct nfs4_state *state)
1200{ 1210{
1211 if (!(state->n_wronly || state->n_rdonly || state->n_rdwr))
1212 return;
1201 if (state->n_wronly) 1213 if (state->n_wronly)
1202 set_bit(NFS_O_WRONLY_STATE, &state->flags); 1214 set_bit(NFS_O_WRONLY_STATE, &state->flags);
1203 if (state->n_rdonly) 1215 if (state->n_rdonly)
1204 set_bit(NFS_O_RDONLY_STATE, &state->flags); 1216 set_bit(NFS_O_RDONLY_STATE, &state->flags);
1205 if (state->n_rdwr) 1217 if (state->n_rdwr)
1206 set_bit(NFS_O_RDWR_STATE, &state->flags); 1218 set_bit(NFS_O_RDWR_STATE, &state->flags);
1219 set_bit(NFS_OPEN_STATE, &state->flags);
1207} 1220}
1208 1221
1209static void nfs_clear_open_stateid_locked(struct nfs4_state *state, 1222static void nfs_clear_open_stateid_locked(struct nfs4_state *state,
@@ -7571,13 +7584,8 @@ static int nfs4_proc_sequence(struct nfs_client *clp, struct rpc_cred *cred)
7571 goto out; 7584 goto out;
7572 } 7585 }
7573 ret = rpc_wait_for_completion_task(task); 7586 ret = rpc_wait_for_completion_task(task);
7574 if (!ret) { 7587 if (!ret)
7575 struct nfs4_sequence_res *res = task->tk_msg.rpc_resp;
7576
7577 if (task->tk_status == 0)
7578 nfs41_handle_sequence_flag_errors(clp, res->sr_status_flags);
7579 ret = task->tk_status; 7588 ret = task->tk_status;
7580 }
7581 rpc_put_task(task); 7589 rpc_put_task(task);
7582out: 7590out:
7583 dprintk("<-- %s status=%d\n", __func__, ret); 7591 dprintk("<-- %s status=%d\n", __func__, ret);
@@ -7965,16 +7973,17 @@ static void nfs4_layoutreturn_release(void *calldata)
7965{ 7973{
7966 struct nfs4_layoutreturn *lrp = calldata; 7974 struct nfs4_layoutreturn *lrp = calldata;
7967 struct pnfs_layout_hdr *lo = lrp->args.layout; 7975 struct pnfs_layout_hdr *lo = lrp->args.layout;
7976 LIST_HEAD(freeme);
7968 7977
7969 dprintk("--> %s\n", __func__); 7978 dprintk("--> %s\n", __func__);
7970 spin_lock(&lo->plh_inode->i_lock); 7979 spin_lock(&lo->plh_inode->i_lock);
7971 if (lrp->res.lrs_present) 7980 if (lrp->res.lrs_present)
7972 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true); 7981 pnfs_set_layout_stateid(lo, &lrp->res.stateid, true);
7982 pnfs_mark_matching_lsegs_invalid(lo, &freeme, &lrp->args.range);
7973 pnfs_clear_layoutreturn_waitbit(lo); 7983 pnfs_clear_layoutreturn_waitbit(lo);
7974 clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
7975 rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
7976 lo->plh_block_lgets--; 7984 lo->plh_block_lgets--;
7977 spin_unlock(&lo->plh_inode->i_lock); 7985 spin_unlock(&lo->plh_inode->i_lock);
7986 pnfs_free_lseg_list(&freeme);
7978 pnfs_put_layout_hdr(lrp->args.layout); 7987 pnfs_put_layout_hdr(lrp->args.layout);
7979 nfs_iput_and_deactive(lrp->inode); 7988 nfs_iput_and_deactive(lrp->inode);
7980 kfree(calldata); 7989 kfree(calldata);
@@ -8588,7 +8597,6 @@ static const struct nfs4_minor_version_ops nfs_v4_0_minor_ops = {
8588 .minor_version = 0, 8597 .minor_version = 0,
8589 .init_caps = NFS_CAP_READDIRPLUS 8598 .init_caps = NFS_CAP_READDIRPLUS
8590 | NFS_CAP_ATOMIC_OPEN 8599 | NFS_CAP_ATOMIC_OPEN
8591 | NFS_CAP_CHANGE_ATTR
8592 | NFS_CAP_POSIX_LOCK, 8600 | NFS_CAP_POSIX_LOCK,
8593 .init_client = nfs40_init_client, 8601 .init_client = nfs40_init_client,
8594 .shutdown_client = nfs40_shutdown_client, 8602 .shutdown_client = nfs40_shutdown_client,
@@ -8614,7 +8622,6 @@ static const struct nfs4_minor_version_ops nfs_v4_1_minor_ops = {
8614 .minor_version = 1, 8622 .minor_version = 1,
8615 .init_caps = NFS_CAP_READDIRPLUS 8623 .init_caps = NFS_CAP_READDIRPLUS
8616 | NFS_CAP_ATOMIC_OPEN 8624 | NFS_CAP_ATOMIC_OPEN
8617 | NFS_CAP_CHANGE_ATTR
8618 | NFS_CAP_POSIX_LOCK 8625 | NFS_CAP_POSIX_LOCK
8619 | NFS_CAP_STATEID_NFSV41 8626 | NFS_CAP_STATEID_NFSV41
8620 | NFS_CAP_ATOMIC_OPEN_V1, 8627 | NFS_CAP_ATOMIC_OPEN_V1,
@@ -8637,7 +8644,6 @@ static const struct nfs4_minor_version_ops nfs_v4_2_minor_ops = {
8637 .minor_version = 2, 8644 .minor_version = 2,
8638 .init_caps = NFS_CAP_READDIRPLUS 8645 .init_caps = NFS_CAP_READDIRPLUS
8639 | NFS_CAP_ATOMIC_OPEN 8646 | NFS_CAP_ATOMIC_OPEN
8640 | NFS_CAP_CHANGE_ATTR
8641 | NFS_CAP_POSIX_LOCK 8647 | NFS_CAP_POSIX_LOCK
8642 | NFS_CAP_STATEID_NFSV41 8648 | NFS_CAP_STATEID_NFSV41
8643 | NFS_CAP_ATOMIC_OPEN_V1 8649 | NFS_CAP_ATOMIC_OPEN_V1
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 605840dc89cf..f2e2ad894461 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -2191,25 +2191,35 @@ static void nfs41_handle_server_reboot(struct nfs_client *clp)
2191 } 2191 }
2192} 2192}
2193 2193
2194static void nfs41_handle_state_revoked(struct nfs_client *clp) 2194static void nfs41_handle_all_state_revoked(struct nfs_client *clp)
2195{ 2195{
2196 nfs4_reset_all_state(clp); 2196 nfs4_reset_all_state(clp);
2197 dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname); 2197 dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
2198} 2198}
2199 2199
2200static void nfs41_handle_some_state_revoked(struct nfs_client *clp)
2201{
2202 nfs4_state_mark_reclaim_helper(clp, nfs4_state_mark_reclaim_nograce);
2203 nfs4_schedule_state_manager(clp);
2204
2205 dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
2206}
2207
2200static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp) 2208static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
2201{ 2209{
2202 /* This will need to handle layouts too */ 2210 /* FIXME: For now, we destroy all layouts. */
2203 nfs_expire_all_delegations(clp); 2211 pnfs_destroy_all_layouts(clp);
2212 /* FIXME: For now, we test all delegations+open state+locks. */
2213 nfs41_handle_some_state_revoked(clp);
2204 dprintk("%s: Recallable state revoked on server %s!\n", __func__, 2214 dprintk("%s: Recallable state revoked on server %s!\n", __func__,
2205 clp->cl_hostname); 2215 clp->cl_hostname);
2206} 2216}
2207 2217
2208static void nfs41_handle_backchannel_fault(struct nfs_client *clp) 2218static void nfs41_handle_backchannel_fault(struct nfs_client *clp)
2209{ 2219{
2210 nfs_expire_all_delegations(clp); 2220 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
2211 if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) 2221 nfs4_schedule_state_manager(clp);
2212 nfs4_schedule_state_manager(clp); 2222
2213 dprintk("%s: server %s declared a backchannel fault\n", __func__, 2223 dprintk("%s: server %s declared a backchannel fault\n", __func__,
2214 clp->cl_hostname); 2224 clp->cl_hostname);
2215} 2225}
@@ -2231,10 +2241,11 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
2231 2241
2232 if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) 2242 if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
2233 nfs41_handle_server_reboot(clp); 2243 nfs41_handle_server_reboot(clp);
2234 if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED | 2244 if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED))
2235 SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED | 2245 nfs41_handle_all_state_revoked(clp);
2246 if (flags & (SEQ4_STATUS_EXPIRED_SOME_STATE_REVOKED |
2236 SEQ4_STATUS_ADMIN_STATE_REVOKED)) 2247 SEQ4_STATUS_ADMIN_STATE_REVOKED))
2237 nfs41_handle_state_revoked(clp); 2248 nfs41_handle_some_state_revoked(clp);
2238 if (flags & SEQ4_STATUS_LEASE_MOVED) 2249 if (flags & SEQ4_STATUS_LEASE_MOVED)
2239 nfs4_schedule_lease_moved_recovery(clp); 2250 nfs4_schedule_lease_moved_recovery(clp);
2240 if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED) 2251 if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index 1da68d3b1eda..4984bbe55ff1 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -1100,8 +1100,6 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
1100 mirror->pg_base = 0; 1100 mirror->pg_base = 0;
1101 mirror->pg_recoalesce = 0; 1101 mirror->pg_recoalesce = 0;
1102 1102
1103 desc->pg_moreio = 0;
1104
1105 while (!list_empty(&head)) { 1103 while (!list_empty(&head)) {
1106 struct nfs_page *req; 1104 struct nfs_page *req;
1107 1105
@@ -1109,8 +1107,11 @@ static int nfs_do_recoalesce(struct nfs_pageio_descriptor *desc)
1109 nfs_list_remove_request(req); 1107 nfs_list_remove_request(req);
1110 if (__nfs_pageio_add_request(desc, req)) 1108 if (__nfs_pageio_add_request(desc, req))
1111 continue; 1109 continue;
1112 if (desc->pg_error < 0) 1110 if (desc->pg_error < 0) {
1111 list_splice_tail(&head, &mirror->pg_list);
1112 mirror->pg_recoalesce = 1;
1113 return 0; 1113 return 0;
1114 }
1114 break; 1115 break;
1115 } 1116 }
1116 } while (mirror->pg_recoalesce); 1117 } while (mirror->pg_recoalesce);
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 0ba9a02c9566..70bf706b1090 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -352,7 +352,7 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
352{ 352{
353 struct pnfs_layout_segment *s; 353 struct pnfs_layout_segment *s;
354 354
355 if (!test_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags)) 355 if (!test_and_clear_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags))
356 return false; 356 return false;
357 357
358 list_for_each_entry(s, &lo->plh_segs, pls_list) 358 list_for_each_entry(s, &lo->plh_segs, pls_list)
@@ -362,6 +362,18 @@ pnfs_layout_need_return(struct pnfs_layout_hdr *lo,
362 return true; 362 return true;
363} 363}
364 364
365static bool
366pnfs_prepare_layoutreturn(struct pnfs_layout_hdr *lo)
367{
368 if (test_and_set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
369 return false;
370 lo->plh_return_iomode = 0;
371 lo->plh_block_lgets++;
372 pnfs_get_layout_hdr(lo);
373 clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, &lo->plh_flags);
374 return true;
375}
376
365static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg, 377static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
366 struct pnfs_layout_hdr *lo, struct inode *inode) 378 struct pnfs_layout_hdr *lo, struct inode *inode)
367{ 379{
@@ -372,17 +384,16 @@ static void pnfs_layoutreturn_before_put_lseg(struct pnfs_layout_segment *lseg,
372 if (pnfs_layout_need_return(lo, lseg)) { 384 if (pnfs_layout_need_return(lo, lseg)) {
373 nfs4_stateid stateid; 385 nfs4_stateid stateid;
374 enum pnfs_iomode iomode; 386 enum pnfs_iomode iomode;
387 bool send;
375 388
376 stateid = lo->plh_stateid; 389 stateid = lo->plh_stateid;
377 iomode = lo->plh_return_iomode; 390 iomode = lo->plh_return_iomode;
378 /* decreased in pnfs_send_layoutreturn() */ 391 send = pnfs_prepare_layoutreturn(lo);
379 lo->plh_block_lgets++;
380 lo->plh_return_iomode = 0;
381 spin_unlock(&inode->i_lock); 392 spin_unlock(&inode->i_lock);
382 pnfs_get_layout_hdr(lo); 393 if (send) {
383 394 /* Send an async layoutreturn so we dont deadlock */
384 /* Send an async layoutreturn so we dont deadlock */ 395 pnfs_send_layoutreturn(lo, stateid, iomode, false);
385 pnfs_send_layoutreturn(lo, stateid, iomode, false); 396 }
386 } else 397 } else
387 spin_unlock(&inode->i_lock); 398 spin_unlock(&inode->i_lock);
388} 399}
@@ -411,6 +422,10 @@ pnfs_put_lseg(struct pnfs_layout_segment *lseg)
411 pnfs_layoutreturn_before_put_lseg(lseg, lo, inode); 422 pnfs_layoutreturn_before_put_lseg(lseg, lo, inode);
412 423
413 if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) { 424 if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
425 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
426 spin_unlock(&inode->i_lock);
427 return;
428 }
414 pnfs_get_layout_hdr(lo); 429 pnfs_get_layout_hdr(lo);
415 pnfs_layout_remove_lseg(lo, lseg); 430 pnfs_layout_remove_lseg(lo, lseg);
416 spin_unlock(&inode->i_lock); 431 spin_unlock(&inode->i_lock);
@@ -451,6 +466,8 @@ pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg)
451 test_bit(NFS_LSEG_VALID, &lseg->pls_flags)); 466 test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
452 if (atomic_dec_and_test(&lseg->pls_refcount)) { 467 if (atomic_dec_and_test(&lseg->pls_refcount)) {
453 struct pnfs_layout_hdr *lo = lseg->pls_layout; 468 struct pnfs_layout_hdr *lo = lseg->pls_layout;
469 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
470 return;
454 pnfs_get_layout_hdr(lo); 471 pnfs_get_layout_hdr(lo);
455 pnfs_layout_remove_lseg(lo, lseg); 472 pnfs_layout_remove_lseg(lo, lseg);
456 pnfs_free_lseg_async(lseg); 473 pnfs_free_lseg_async(lseg);
@@ -924,6 +941,7 @@ void pnfs_clear_layoutreturn_waitbit(struct pnfs_layout_hdr *lo)
924 clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags); 941 clear_bit_unlock(NFS_LAYOUT_RETURN, &lo->plh_flags);
925 smp_mb__after_atomic(); 942 smp_mb__after_atomic();
926 wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN); 943 wake_up_bit(&lo->plh_flags, NFS_LAYOUT_RETURN);
944 rpc_wake_up(&NFS_SERVER(lo->plh_inode)->roc_rpcwaitq);
927} 945}
928 946
929static int 947static int
@@ -978,6 +996,7 @@ _pnfs_return_layout(struct inode *ino)
978 LIST_HEAD(tmp_list); 996 LIST_HEAD(tmp_list);
979 nfs4_stateid stateid; 997 nfs4_stateid stateid;
980 int status = 0, empty; 998 int status = 0, empty;
999 bool send;
981 1000
982 dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino); 1001 dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
983 1002
@@ -1007,17 +1026,18 @@ _pnfs_return_layout(struct inode *ino)
1007 /* Don't send a LAYOUTRETURN if list was initially empty */ 1026 /* Don't send a LAYOUTRETURN if list was initially empty */
1008 if (empty) { 1027 if (empty) {
1009 spin_unlock(&ino->i_lock); 1028 spin_unlock(&ino->i_lock);
1010 pnfs_put_layout_hdr(lo);
1011 dprintk("NFS: %s no layout segments to return\n", __func__); 1029 dprintk("NFS: %s no layout segments to return\n", __func__);
1012 goto out; 1030 goto out_put_layout_hdr;
1013 } 1031 }
1014 1032
1015 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags); 1033 set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
1016 lo->plh_block_lgets++; 1034 send = pnfs_prepare_layoutreturn(lo);
1017 spin_unlock(&ino->i_lock); 1035 spin_unlock(&ino->i_lock);
1018 pnfs_free_lseg_list(&tmp_list); 1036 pnfs_free_lseg_list(&tmp_list);
1019 1037 if (send)
1020 status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true); 1038 status = pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, true);
1039out_put_layout_hdr:
1040 pnfs_put_layout_hdr(lo);
1021out: 1041out:
1022 dprintk("<-- %s status: %d\n", __func__, status); 1042 dprintk("<-- %s status: %d\n", __func__, status);
1023 return status; 1043 return status;
@@ -1097,13 +1117,9 @@ bool pnfs_roc(struct inode *ino)
1097out_noroc: 1117out_noroc:
1098 if (lo) { 1118 if (lo) {
1099 stateid = lo->plh_stateid; 1119 stateid = lo->plh_stateid;
1100 layoutreturn = 1120 if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
1101 test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, 1121 &lo->plh_flags))
1102 &lo->plh_flags); 1122 layoutreturn = pnfs_prepare_layoutreturn(lo);
1103 if (layoutreturn) {
1104 lo->plh_block_lgets++;
1105 pnfs_get_layout_hdr(lo);
1106 }
1107 } 1123 }
1108 spin_unlock(&ino->i_lock); 1124 spin_unlock(&ino->i_lock);
1109 if (layoutreturn) { 1125 if (layoutreturn) {
@@ -1146,15 +1162,18 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
1146 struct pnfs_layout_segment *lseg; 1162 struct pnfs_layout_segment *lseg;
1147 nfs4_stateid stateid; 1163 nfs4_stateid stateid;
1148 u32 current_seqid; 1164 u32 current_seqid;
1149 bool found = false, layoutreturn = false; 1165 bool layoutreturn = false;
1150 1166
1151 spin_lock(&ino->i_lock); 1167 spin_lock(&ino->i_lock);
1152 list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) 1168 list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list) {
1153 if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) { 1169 if (!test_bit(NFS_LSEG_ROC, &lseg->pls_flags))
1154 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL); 1170 continue;
1155 found = true; 1171 if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags))
1156 goto out; 1172 continue;
1157 } 1173 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1174 spin_unlock(&ino->i_lock);
1175 return true;
1176 }
1158 lo = nfsi->layout; 1177 lo = nfsi->layout;
1159 current_seqid = be32_to_cpu(lo->plh_stateid.seqid); 1178 current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
1160 1179
@@ -1162,23 +1181,19 @@ bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
1162 * a barrier, we choose the worst-case barrier. 1181 * a barrier, we choose the worst-case barrier.
1163 */ 1182 */
1164 *barrier = current_seqid + atomic_read(&lo->plh_outstanding); 1183 *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
1165out: 1184 stateid = lo->plh_stateid;
1166 if (!found) { 1185 if (test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE,
1167 stateid = lo->plh_stateid; 1186 &lo->plh_flags))
1168 layoutreturn = 1187 layoutreturn = pnfs_prepare_layoutreturn(lo);
1169 test_and_clear_bit(NFS_LAYOUT_RETURN_BEFORE_CLOSE, 1188 if (test_bit(NFS_LAYOUT_RETURN, &lo->plh_flags))
1170 &lo->plh_flags); 1189 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1171 if (layoutreturn) { 1190
1172 lo->plh_block_lgets++;
1173 pnfs_get_layout_hdr(lo);
1174 }
1175 }
1176 spin_unlock(&ino->i_lock); 1191 spin_unlock(&ino->i_lock);
1177 if (layoutreturn) { 1192 if (layoutreturn) {
1178 rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
1179 pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false); 1193 pnfs_send_layoutreturn(lo, stateid, IOMODE_ANY, false);
1194 return true;
1180 } 1195 }
1181 return found; 1196 return false;
1182} 1197}
1183 1198
1184/* 1199/*
@@ -1695,7 +1710,6 @@ void pnfs_error_mark_layout_for_return(struct inode *inode,
1695 spin_lock(&inode->i_lock); 1710 spin_lock(&inode->i_lock);
1696 /* set failure bit so that pnfs path will be retried later */ 1711 /* set failure bit so that pnfs path will be retried later */
1697 pnfs_layout_set_fail_bit(lo, iomode); 1712 pnfs_layout_set_fail_bit(lo, iomode);
1698 set_bit(NFS_LAYOUT_RETURN, &lo->plh_flags);
1699 if (lo->plh_return_iomode == 0) 1713 if (lo->plh_return_iomode == 0)
1700 lo->plh_return_iomode = range.iomode; 1714 lo->plh_return_iomode = range.iomode;
1701 else if (lo->plh_return_iomode != range.iomode) 1715 else if (lo->plh_return_iomode != range.iomode)
@@ -2207,13 +2221,12 @@ pnfs_layoutcommit_inode(struct inode *inode, bool sync)
2207 if (ld->prepare_layoutcommit) { 2221 if (ld->prepare_layoutcommit) {
2208 status = ld->prepare_layoutcommit(&data->args); 2222 status = ld->prepare_layoutcommit(&data->args);
2209 if (status) { 2223 if (status) {
2224 put_rpccred(data->cred);
2210 spin_lock(&inode->i_lock); 2225 spin_lock(&inode->i_lock);
2211 set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags); 2226 set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
2212 if (end_pos > nfsi->layout->plh_lwb) 2227 if (end_pos > nfsi->layout->plh_lwb)
2213 nfsi->layout->plh_lwb = end_pos; 2228 nfsi->layout->plh_lwb = end_pos;
2214 spin_unlock(&inode->i_lock); 2229 goto out_unlock;
2215 put_rpccred(data->cred);
2216 goto clear_layoutcommitting;
2217 } 2230 }
2218 } 2231 }
2219 2232
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 65869ca9c851..75a35a1afa79 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1379,24 +1379,27 @@ static void nfs_writeback_check_extend(struct nfs_pgio_header *hdr,
1379{ 1379{
1380 struct nfs_pgio_args *argp = &hdr->args; 1380 struct nfs_pgio_args *argp = &hdr->args;
1381 struct nfs_pgio_res *resp = &hdr->res; 1381 struct nfs_pgio_res *resp = &hdr->res;
1382 u64 size = argp->offset + resp->count;
1382 1383
1383 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE)) 1384 if (!(fattr->valid & NFS_ATTR_FATTR_SIZE))
1385 fattr->size = size;
1386 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode)) {
1387 fattr->valid &= ~NFS_ATTR_FATTR_SIZE;
1384 return; 1388 return;
1385 if (argp->offset + resp->count != fattr->size) 1389 }
1386 return; 1390 if (size != fattr->size)
1387 if (nfs_size_to_loff_t(fattr->size) < i_size_read(hdr->inode))
1388 return; 1391 return;
1389 /* Set attribute barrier */ 1392 /* Set attribute barrier */
1390 nfs_fattr_set_barrier(fattr); 1393 nfs_fattr_set_barrier(fattr);
1394 /* ...and update size */
1395 fattr->valid |= NFS_ATTR_FATTR_SIZE;
1391} 1396}
1392 1397
1393void nfs_writeback_update_inode(struct nfs_pgio_header *hdr) 1398void nfs_writeback_update_inode(struct nfs_pgio_header *hdr)
1394{ 1399{
1395 struct nfs_fattr *fattr = hdr->res.fattr; 1400 struct nfs_fattr *fattr = &hdr->fattr;
1396 struct inode *inode = hdr->inode; 1401 struct inode *inode = hdr->inode;
1397 1402
1398 if (fattr == NULL)
1399 return;
1400 spin_lock(&inode->i_lock); 1403 spin_lock(&inode->i_lock);
1401 nfs_writeback_check_extend(hdr, fattr); 1404 nfs_writeback_check_extend(hdr, fattr);
1402 nfs_post_op_update_inode_force_wcc_locked(inode, fattr); 1405 nfs_post_op_update_inode_force_wcc_locked(inode, fattr);
diff --git a/fs/nfsd/nfs4layouts.c b/fs/nfsd/nfs4layouts.c
index 6904213a4363..ebf90e487c75 100644
--- a/fs/nfsd/nfs4layouts.c
+++ b/fs/nfsd/nfs4layouts.c
@@ -212,6 +212,7 @@ nfsd4_alloc_layout_stateid(struct nfsd4_compound_state *cstate,
212 BUG_ON(!ls->ls_file); 212 BUG_ON(!ls->ls_file);
213 213
214 if (nfsd4_layout_setlease(ls)) { 214 if (nfsd4_layout_setlease(ls)) {
215 fput(ls->ls_file);
215 put_nfs4_file(fp); 216 put_nfs4_file(fp);
216 kmem_cache_free(nfs4_layout_stateid_cache, ls); 217 kmem_cache_free(nfs4_layout_stateid_cache, ls);
217 return NULL; 218 return NULL;
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 61dfb33f0559..95202719a1fd 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -4396,9 +4396,9 @@ laundromat_main(struct work_struct *laundry)
4396 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ); 4396 queue_delayed_work(laundry_wq, &nn->laundromat_work, t*HZ);
4397} 4397}
4398 4398
4399static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_ol_stateid *stp) 4399static inline __be32 nfs4_check_fh(struct svc_fh *fhp, struct nfs4_stid *stp)
4400{ 4400{
4401 if (!fh_match(&fhp->fh_handle, &stp->st_stid.sc_file->fi_fhandle)) 4401 if (!fh_match(&fhp->fh_handle, &stp->sc_file->fi_fhandle))
4402 return nfserr_bad_stateid; 4402 return nfserr_bad_stateid;
4403 return nfs_ok; 4403 return nfs_ok;
4404} 4404}
@@ -4601,9 +4601,6 @@ nfs4_check_olstateid(struct svc_fh *fhp, struct nfs4_ol_stateid *ols, int flags)
4601{ 4601{
4602 __be32 status; 4602 __be32 status;
4603 4603
4604 status = nfs4_check_fh(fhp, ols);
4605 if (status)
4606 return status;
4607 status = nfsd4_check_openowner_confirmed(ols); 4604 status = nfsd4_check_openowner_confirmed(ols);
4608 if (status) 4605 if (status)
4609 return status; 4606 return status;
@@ -4690,6 +4687,9 @@ nfs4_preprocess_stateid_op(struct svc_rqst *rqstp,
4690 status = nfserr_bad_stateid; 4687 status = nfserr_bad_stateid;
4691 break; 4688 break;
4692 } 4689 }
4690 if (status)
4691 goto out;
4692 status = nfs4_check_fh(fhp, s);
4693 4693
4694done: 4694done:
4695 if (!status && filpp) 4695 if (!status && filpp)
@@ -4798,7 +4798,7 @@ static __be32 nfs4_seqid_op_checks(struct nfsd4_compound_state *cstate, stateid_
4798 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate)); 4798 status = check_stateid_generation(stateid, &stp->st_stid.sc_stateid, nfsd4_has_session(cstate));
4799 if (status) 4799 if (status)
4800 return status; 4800 return status;
4801 return nfs4_check_fh(current_fh, stp); 4801 return nfs4_check_fh(current_fh, &stp->st_stid);
4802} 4802}
4803 4803
4804/* 4804/*
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 54633858733a..75e0563c09d1 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2143,6 +2143,7 @@ nfsd4_encode_aclname(struct xdr_stream *xdr, struct svc_rqst *rqstp,
2143#define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \ 2143#define WORD0_ABSENT_FS_ATTRS (FATTR4_WORD0_FS_LOCATIONS | FATTR4_WORD0_FSID | \
2144 FATTR4_WORD0_RDATTR_ERROR) 2144 FATTR4_WORD0_RDATTR_ERROR)
2145#define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID 2145#define WORD1_ABSENT_FS_ATTRS FATTR4_WORD1_MOUNTED_ON_FILEID
2146#define WORD2_ABSENT_FS_ATTRS 0
2146 2147
2147#ifdef CONFIG_NFSD_V4_SECURITY_LABEL 2148#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
2148static inline __be32 2149static inline __be32
@@ -2171,7 +2172,7 @@ nfsd4_encode_security_label(struct xdr_stream *xdr, struct svc_rqst *rqstp,
2171{ return 0; } 2172{ return 0; }
2172#endif 2173#endif
2173 2174
2174static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err) 2175static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *bmval2, u32 *rdattr_err)
2175{ 2176{
2176 /* As per referral draft: */ 2177 /* As per referral draft: */
2177 if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS || 2178 if (*bmval0 & ~WORD0_ABSENT_FS_ATTRS ||
@@ -2184,6 +2185,7 @@ static __be32 fattr_handle_absent_fs(u32 *bmval0, u32 *bmval1, u32 *rdattr_err)
2184 } 2185 }
2185 *bmval0 &= WORD0_ABSENT_FS_ATTRS; 2186 *bmval0 &= WORD0_ABSENT_FS_ATTRS;
2186 *bmval1 &= WORD1_ABSENT_FS_ATTRS; 2187 *bmval1 &= WORD1_ABSENT_FS_ATTRS;
2188 *bmval2 &= WORD2_ABSENT_FS_ATTRS;
2187 return 0; 2189 return 0;
2188} 2190}
2189 2191
@@ -2246,8 +2248,7 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
2246 BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion)); 2248 BUG_ON(bmval2 & ~nfsd_suppattrs2(minorversion));
2247 2249
2248 if (exp->ex_fslocs.migrated) { 2250 if (exp->ex_fslocs.migrated) {
2249 BUG_ON(bmval[2]); 2251 status = fattr_handle_absent_fs(&bmval0, &bmval1, &bmval2, &rdattr_err);
2250 status = fattr_handle_absent_fs(&bmval0, &bmval1, &rdattr_err);
2251 if (status) 2252 if (status)
2252 goto out; 2253 goto out;
2253 } 2254 }
@@ -2286,8 +2287,8 @@ nfsd4_encode_fattr(struct xdr_stream *xdr, struct svc_fh *fhp,
2286 } 2287 }
2287 2288
2288#ifdef CONFIG_NFSD_V4_SECURITY_LABEL 2289#ifdef CONFIG_NFSD_V4_SECURITY_LABEL
2289 if ((bmval[2] & FATTR4_WORD2_SECURITY_LABEL) || 2290 if ((bmval2 & FATTR4_WORD2_SECURITY_LABEL) ||
2290 bmval[0] & FATTR4_WORD0_SUPPORTED_ATTRS) { 2291 bmval0 & FATTR4_WORD0_SUPPORTED_ATTRS) {
2291 err = security_inode_getsecctx(d_inode(dentry), 2292 err = security_inode_getsecctx(d_inode(dentry),
2292 &context, &contextlen); 2293 &context, &contextlen);
2293 contextsupport = (err == 0); 2294 contextsupport = (err == 0);
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index 3e594ce41010..39ddcaf0918f 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -152,15 +152,31 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
152 BUG(); 152 BUG();
153 153
154 list_del_init(&mark->g_list); 154 list_del_init(&mark->g_list);
155
155 spin_unlock(&mark->lock); 156 spin_unlock(&mark->lock);
156 157
157 if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED)) 158 if (inode && (mark->flags & FSNOTIFY_MARK_FLAG_OBJECT_PINNED))
158 iput(inode); 159 iput(inode);
160 /* release lock temporarily */
161 mutex_unlock(&group->mark_mutex);
159 162
160 spin_lock(&destroy_lock); 163 spin_lock(&destroy_lock);
161 list_add(&mark->g_list, &destroy_list); 164 list_add(&mark->g_list, &destroy_list);
162 spin_unlock(&destroy_lock); 165 spin_unlock(&destroy_lock);
163 wake_up(&destroy_waitq); 166 wake_up(&destroy_waitq);
167 /*
168 * We don't necessarily have a ref on mark from caller so the above destroy
169 * may have actually freed it, unless this group provides a 'freeing_mark'
170 * function which must be holding a reference.
171 */
172
173 /*
174 * Some groups like to know that marks are being freed. This is a
175 * callback to the group function to let it know that this mark
176 * is being freed.
177 */
178 if (group->ops->freeing_mark)
179 group->ops->freeing_mark(mark, group);
164 180
165 /* 181 /*
166 * __fsnotify_update_child_dentry_flags(inode); 182 * __fsnotify_update_child_dentry_flags(inode);
@@ -175,6 +191,8 @@ void fsnotify_destroy_mark_locked(struct fsnotify_mark *mark,
175 */ 191 */
176 192
177 atomic_dec(&group->num_marks); 193 atomic_dec(&group->num_marks);
194
195 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
178} 196}
179 197
180void fsnotify_destroy_mark(struct fsnotify_mark *mark, 198void fsnotify_destroy_mark(struct fsnotify_mark *mark,
@@ -187,10 +205,7 @@ void fsnotify_destroy_mark(struct fsnotify_mark *mark,
187 205
188/* 206/*
189 * Destroy all marks in the given list. The marks must be already detached from 207 * Destroy all marks in the given list. The marks must be already detached from
190 * the original inode / vfsmount. Note that we can race with 208 * the original inode / vfsmount.
191 * fsnotify_clear_marks_by_group_flags(). However we hold a reference to each
192 * mark so they won't get freed from under us and nobody else touches our
193 * free_list list_head.
194 */ 209 */
195void fsnotify_destroy_marks(struct list_head *to_free) 210void fsnotify_destroy_marks(struct list_head *to_free)
196{ 211{
@@ -391,22 +406,42 @@ struct fsnotify_mark *fsnotify_find_mark(struct hlist_head *head,
391} 406}
392 407
393/* 408/*
394 * Clear any marks in a group in which mark->flags & flags is true. 409 * clear any marks in a group in which mark->flags & flags is true
395 */ 410 */
396void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, 411void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group,
397 unsigned int flags) 412 unsigned int flags)
398{ 413{
399 struct fsnotify_mark *lmark, *mark; 414 struct fsnotify_mark *lmark, *mark;
415 LIST_HEAD(to_free);
400 416
417 /*
418 * We have to be really careful here. Anytime we drop mark_mutex, e.g.
419 * fsnotify_clear_marks_by_inode() can come and free marks. Even in our
420 * to_free list so we have to use mark_mutex even when accessing that
421 * list. And freeing mark requires us to drop mark_mutex. So we can
422 * reliably free only the first mark in the list. That's why we first
423 * move marks to free to to_free list in one go and then free marks in
424 * to_free list one by one.
425 */
401 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING); 426 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
402 list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) { 427 list_for_each_entry_safe(mark, lmark, &group->marks_list, g_list) {
403 if (mark->flags & flags) { 428 if (mark->flags & flags)
404 fsnotify_get_mark(mark); 429 list_move(&mark->g_list, &to_free);
405 fsnotify_destroy_mark_locked(mark, group);
406 fsnotify_put_mark(mark);
407 }
408 } 430 }
409 mutex_unlock(&group->mark_mutex); 431 mutex_unlock(&group->mark_mutex);
432
433 while (1) {
434 mutex_lock_nested(&group->mark_mutex, SINGLE_DEPTH_NESTING);
435 if (list_empty(&to_free)) {
436 mutex_unlock(&group->mark_mutex);
437 break;
438 }
439 mark = list_first_entry(&to_free, struct fsnotify_mark, g_list);
440 fsnotify_get_mark(mark);
441 fsnotify_destroy_mark_locked(mark, group);
442 mutex_unlock(&group->mark_mutex);
443 fsnotify_put_mark(mark);
444 }
410} 445}
411 446
412/* 447/*
@@ -445,7 +480,6 @@ static int fsnotify_mark_destroy(void *ignored)
445{ 480{
446 struct fsnotify_mark *mark, *next; 481 struct fsnotify_mark *mark, *next;
447 struct list_head private_destroy_list; 482 struct list_head private_destroy_list;
448 struct fsnotify_group *group;
449 483
450 for (;;) { 484 for (;;) {
451 spin_lock(&destroy_lock); 485 spin_lock(&destroy_lock);
@@ -457,14 +491,6 @@ static int fsnotify_mark_destroy(void *ignored)
457 491
458 list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) { 492 list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
459 list_del_init(&mark->g_list); 493 list_del_init(&mark->g_list);
460 group = mark->group;
461 /*
462 * Some groups like to know that marks are being freed.
463 * This is a callback to the group function to let it
464 * know that this mark is being freed.
465 */
466 if (group && group->ops->freeing_mark)
467 group->ops->freeing_mark(mark, group);
468 fsnotify_put_mark(mark); 494 fsnotify_put_mark(mark);
469 } 495 }
470 496
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 1a35c6139656..0f5fd9db8194 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -685,7 +685,7 @@ static int ocfs2_direct_IO_zero_extend(struct ocfs2_super *osb,
685 685
686 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) { 686 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
687 u64 s = i_size_read(inode); 687 u64 s = i_size_read(inode);
688 sector_t sector = (p_cpos << (osb->s_clustersize_bits - 9)) + 688 sector_t sector = ((u64)p_cpos << (osb->s_clustersize_bits - 9)) +
689 (do_div(s, osb->s_clustersize) >> 9); 689 (do_div(s, osb->s_clustersize) >> 9);
690 690
691 ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector, 691 ret = blkdev_issue_zeroout(osb->sb->s_bdev, sector,
@@ -910,7 +910,7 @@ static ssize_t ocfs2_direct_IO_write(struct kiocb *iocb,
910 BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN)); 910 BUG_ON(!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN));
911 911
912 ret = blkdev_issue_zeroout(osb->sb->s_bdev, 912 ret = blkdev_issue_zeroout(osb->sb->s_bdev,
913 p_cpos << (osb->s_clustersize_bits - 9), 913 (u64)p_cpos << (osb->s_clustersize_bits - 9),
914 zero_len_head >> 9, GFP_NOFS, false); 914 zero_len_head >> 9, GFP_NOFS, false);
915 if (ret < 0) 915 if (ret < 0)
916 mlog_errno(ret); 916 mlog_errno(ret);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 8b23aa2f52dd..23157e40dd74 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -4025,9 +4025,13 @@ static void ocfs2_downconvert_thread_do_work(struct ocfs2_super *osb)
4025 osb->dc_work_sequence = osb->dc_wake_sequence; 4025 osb->dc_work_sequence = osb->dc_wake_sequence;
4026 4026
4027 processed = osb->blocked_lock_count; 4027 processed = osb->blocked_lock_count;
4028 while (processed) { 4028 /*
4029 BUG_ON(list_empty(&osb->blocked_lock_list)); 4029 * blocked lock processing in this loop might call iput which can
4030 4030 * remove items off osb->blocked_lock_list. Downconvert up to
4031 * 'processed' number of locks, but stop short if we had some
4032 * removed in ocfs2_mark_lockres_freeing when downconverting.
4033 */
4034 while (processed && !list_empty(&osb->blocked_lock_list)) {
4031 lockres = list_entry(osb->blocked_lock_list.next, 4035 lockres = list_entry(osb->blocked_lock_list.next,
4032 struct ocfs2_lock_res, l_blocked_list); 4036 struct ocfs2_lock_res, l_blocked_list);
4033 list_del_init(&lockres->l_blocked_list); 4037 list_del_init(&lockres->l_blocked_list);
diff --git a/fs/pnode.h b/fs/pnode.h
index 7114ce6e6b9e..0fcdbe7ca648 100644
--- a/fs/pnode.h
+++ b/fs/pnode.h
@@ -20,8 +20,6 @@
20#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED) 20#define SET_MNT_MARK(m) ((m)->mnt.mnt_flags |= MNT_MARKED)
21#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED) 21#define CLEAR_MNT_MARK(m) ((m)->mnt.mnt_flags &= ~MNT_MARKED)
22#define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED) 22#define IS_MNT_LOCKED(m) ((m)->mnt.mnt_flags & MNT_LOCKED)
23#define IS_MNT_LOCKED_AND_LAZY(m) \
24 (((m)->mnt.mnt_flags & (MNT_LOCKED|MNT_SYNC_UMOUNT)) == MNT_LOCKED)
25 23
26#define CL_EXPIRE 0x01 24#define CL_EXPIRE 0x01
27#define CL_SLAVE 0x02 25#define CL_SLAVE 0x02
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 7e412ad74836..270221fcef42 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -121,8 +121,9 @@ static int signalfd_copyinfo(struct signalfd_siginfo __user *uinfo,
121 * Other callers might not initialize the si_lsb field, 121 * Other callers might not initialize the si_lsb field,
122 * so check explicitly for the right codes here. 122 * so check explicitly for the right codes here.
123 */ 123 */
124 if (kinfo->si_code == BUS_MCEERR_AR || 124 if (kinfo->si_signo == SIGBUS &&
125 kinfo->si_code == BUS_MCEERR_AO) 125 (kinfo->si_code == BUS_MCEERR_AR ||
126 kinfo->si_code == BUS_MCEERR_AO))
126 err |= __put_user((short) kinfo->si_addr_lsb, 127 err |= __put_user((short) kinfo->si_addr_lsb,
127 &uinfo->ssi_addr_lsb); 128 &uinfo->ssi_addr_lsb);
128#endif 129#endif
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 6afac3d561ac..8d0b3ade0ff0 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1652,17 +1652,9 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1652 iinfo->i_ext.i_data, inode->i_sb->s_blocksize - 1652 iinfo->i_ext.i_data, inode->i_sb->s_blocksize -
1653 sizeof(struct unallocSpaceEntry)); 1653 sizeof(struct unallocSpaceEntry));
1654 use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE); 1654 use->descTag.tagIdent = cpu_to_le16(TAG_IDENT_USE);
1655 use->descTag.tagLocation = 1655 crclen = sizeof(struct unallocSpaceEntry);
1656 cpu_to_le32(iinfo->i_location.logicalBlockNum);
1657 crclen = sizeof(struct unallocSpaceEntry) +
1658 iinfo->i_lenAlloc - sizeof(struct tag);
1659 use->descTag.descCRCLength = cpu_to_le16(crclen);
1660 use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use +
1661 sizeof(struct tag),
1662 crclen));
1663 use->descTag.tagChecksum = udf_tag_checksum(&use->descTag);
1664 1656
1665 goto out; 1657 goto finish;
1666 } 1658 }
1667 1659
1668 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET)) 1660 if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_UID_FORGET))
@@ -1782,6 +1774,8 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1782 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE); 1774 efe->descTag.tagIdent = cpu_to_le16(TAG_IDENT_EFE);
1783 crclen = sizeof(struct extendedFileEntry); 1775 crclen = sizeof(struct extendedFileEntry);
1784 } 1776 }
1777
1778finish:
1785 if (iinfo->i_strat4096) { 1779 if (iinfo->i_strat4096) {
1786 fe->icbTag.strategyType = cpu_to_le16(4096); 1780 fe->icbTag.strategyType = cpu_to_le16(4096);
1787 fe->icbTag.strategyParameter = cpu_to_le16(1); 1781 fe->icbTag.strategyParameter = cpu_to_le16(1);
@@ -1791,7 +1785,9 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1791 fe->icbTag.numEntries = cpu_to_le16(1); 1785 fe->icbTag.numEntries = cpu_to_le16(1);
1792 } 1786 }
1793 1787
1794 if (S_ISDIR(inode->i_mode)) 1788 if (iinfo->i_use)
1789 fe->icbTag.fileType = ICBTAG_FILE_TYPE_USE;
1790 else if (S_ISDIR(inode->i_mode))
1795 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY; 1791 fe->icbTag.fileType = ICBTAG_FILE_TYPE_DIRECTORY;
1796 else if (S_ISREG(inode->i_mode)) 1792 else if (S_ISREG(inode->i_mode))
1797 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR; 1793 fe->icbTag.fileType = ICBTAG_FILE_TYPE_REGULAR;
@@ -1828,7 +1824,6 @@ static int udf_update_inode(struct inode *inode, int do_sync)
1828 crclen)); 1824 crclen));
1829 fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag); 1825 fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
1830 1826
1831out:
1832 set_buffer_uptodate(bh); 1827 set_buffer_uptodate(bh);
1833 unlock_buffer(bh); 1828 unlock_buffer(bh);
1834 1829
diff --git a/fs/xfs/libxfs/xfs_attr_remote.c b/fs/xfs/libxfs/xfs_attr_remote.c
index 20de88d1bf86..dd714037c322 100644
--- a/fs/xfs/libxfs/xfs_attr_remote.c
+++ b/fs/xfs/libxfs/xfs_attr_remote.c
@@ -159,11 +159,10 @@ xfs_attr3_rmt_write_verify(
159 struct xfs_buf *bp) 159 struct xfs_buf *bp)
160{ 160{
161 struct xfs_mount *mp = bp->b_target->bt_mount; 161 struct xfs_mount *mp = bp->b_target->bt_mount;
162 struct xfs_buf_log_item *bip = bp->b_fspriv; 162 int blksize = mp->m_attr_geo->blksize;
163 char *ptr; 163 char *ptr;
164 int len; 164 int len;
165 xfs_daddr_t bno; 165 xfs_daddr_t bno;
166 int blksize = mp->m_attr_geo->blksize;
167 166
168 /* no verification of non-crc buffers */ 167 /* no verification of non-crc buffers */
169 if (!xfs_sb_version_hascrc(&mp->m_sb)) 168 if (!xfs_sb_version_hascrc(&mp->m_sb))
@@ -175,16 +174,22 @@ xfs_attr3_rmt_write_verify(
175 ASSERT(len >= blksize); 174 ASSERT(len >= blksize);
176 175
177 while (len > 0) { 176 while (len > 0) {
177 struct xfs_attr3_rmt_hdr *rmt = (struct xfs_attr3_rmt_hdr *)ptr;
178
178 if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) { 179 if (!xfs_attr3_rmt_verify(mp, ptr, blksize, bno)) {
179 xfs_buf_ioerror(bp, -EFSCORRUPTED); 180 xfs_buf_ioerror(bp, -EFSCORRUPTED);
180 xfs_verifier_error(bp); 181 xfs_verifier_error(bp);
181 return; 182 return;
182 } 183 }
183 if (bip) {
184 struct xfs_attr3_rmt_hdr *rmt;
185 184
186 rmt = (struct xfs_attr3_rmt_hdr *)ptr; 185 /*
187 rmt->rm_lsn = cpu_to_be64(bip->bli_item.li_lsn); 186 * Ensure we aren't writing bogus LSNs to disk. See
187 * xfs_attr3_rmt_hdr_set() for the explanation.
188 */
189 if (rmt->rm_lsn != cpu_to_be64(NULLCOMMITLSN)) {
190 xfs_buf_ioerror(bp, -EFSCORRUPTED);
191 xfs_verifier_error(bp);
192 return;
188 } 193 }
189 xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF); 194 xfs_update_cksum(ptr, blksize, XFS_ATTR3_RMT_CRC_OFF);
190 195
@@ -221,6 +226,18 @@ xfs_attr3_rmt_hdr_set(
221 rmt->rm_owner = cpu_to_be64(ino); 226 rmt->rm_owner = cpu_to_be64(ino);
222 rmt->rm_blkno = cpu_to_be64(bno); 227 rmt->rm_blkno = cpu_to_be64(bno);
223 228
229 /*
230 * Remote attribute blocks are written synchronously, so we don't
231 * have an LSN that we can stamp in them that makes any sense to log
232 * recovery. To ensure that log recovery handles overwrites of these
233 * blocks sanely (i.e. once they've been freed and reallocated as some
234 * other type of metadata) we need to ensure that the LSN has a value
235 * that tells log recovery to ignore the LSN and overwrite the buffer
236 * with whatever is in it's log. To do this, we use the magic
237 * NULLCOMMITLSN to indicate that the LSN is invalid.
238 */
239 rmt->rm_lsn = cpu_to_be64(NULLCOMMITLSN);
240
224 return sizeof(struct xfs_attr3_rmt_hdr); 241 return sizeof(struct xfs_attr3_rmt_hdr);
225} 242}
226 243
@@ -434,14 +451,21 @@ xfs_attr_rmtval_set(
434 451
435 /* 452 /*
436 * Allocate a single extent, up to the size of the value. 453 * Allocate a single extent, up to the size of the value.
454 *
455 * Note that we have to consider this a data allocation as we
456 * write the remote attribute without logging the contents.
457 * Hence we must ensure that we aren't using blocks that are on
458 * the busy list so that we don't overwrite blocks which have
459 * recently been freed but their transactions are not yet
460 * committed to disk. If we overwrite the contents of a busy
461 * extent and then crash then the block may not contain the
462 * correct metadata after log recovery occurs.
437 */ 463 */
438 xfs_bmap_init(args->flist, args->firstblock); 464 xfs_bmap_init(args->flist, args->firstblock);
439 nmap = 1; 465 nmap = 1;
440 error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno, 466 error = xfs_bmapi_write(args->trans, dp, (xfs_fileoff_t)lblkno,
441 blkcnt, 467 blkcnt, XFS_BMAPI_ATTRFORK, args->firstblock,
442 XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA, 468 args->total, &map, &nmap, args->flist);
443 args->firstblock, args->total, &map, &nmap,
444 args->flist);
445 if (!error) { 469 if (!error) {
446 error = xfs_bmap_finish(&args->trans, args->flist, 470 error = xfs_bmap_finish(&args->trans, args->flist,
447 &committed); 471 &committed);
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index f0e8249722d4..db4acc1c3e73 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -1514,18 +1514,27 @@ xfs_filemap_fault(
1514 struct vm_area_struct *vma, 1514 struct vm_area_struct *vma,
1515 struct vm_fault *vmf) 1515 struct vm_fault *vmf)
1516{ 1516{
1517 struct xfs_inode *ip = XFS_I(file_inode(vma->vm_file)); 1517 struct inode *inode = file_inode(vma->vm_file);
1518 int ret; 1518 int ret;
1519 1519
1520 trace_xfs_filemap_fault(ip); 1520 trace_xfs_filemap_fault(XFS_I(inode));
1521 1521
1522 /* DAX can shortcut the normal fault path on write faults! */ 1522 /* DAX can shortcut the normal fault path on write faults! */
1523 if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(VFS_I(ip))) 1523 if ((vmf->flags & FAULT_FLAG_WRITE) && IS_DAX(inode))
1524 return xfs_filemap_page_mkwrite(vma, vmf); 1524 return xfs_filemap_page_mkwrite(vma, vmf);
1525 1525
1526 xfs_ilock(ip, XFS_MMAPLOCK_SHARED); 1526 xfs_ilock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1527 ret = filemap_fault(vma, vmf); 1527 if (IS_DAX(inode)) {
1528 xfs_iunlock(ip, XFS_MMAPLOCK_SHARED); 1528 /*
1529 * we do not want to trigger unwritten extent conversion on read
1530 * faults - that is unnecessary overhead and would also require
1531 * changes to xfs_get_blocks_direct() to map unwritten extent
1532 * ioend for conversion on read-only mappings.
1533 */
1534 ret = __dax_fault(vma, vmf, xfs_get_blocks_direct, NULL);
1535 } else
1536 ret = filemap_fault(vma, vmf);
1537 xfs_iunlock(XFS_I(inode), XFS_MMAPLOCK_SHARED);
1529 1538
1530 return ret; 1539 return ret;
1531} 1540}
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 01dd228ca05e..480ebba8464f 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1886,9 +1886,14 @@ xlog_recover_get_buf_lsn(
1886 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid; 1886 uuid = &((struct xfs_dir3_blk_hdr *)blk)->uuid;
1887 break; 1887 break;
1888 case XFS_ATTR3_RMT_MAGIC: 1888 case XFS_ATTR3_RMT_MAGIC:
1889 lsn = be64_to_cpu(((struct xfs_attr3_rmt_hdr *)blk)->rm_lsn); 1889 /*
1890 uuid = &((struct xfs_attr3_rmt_hdr *)blk)->rm_uuid; 1890 * Remote attr blocks are written synchronously, rather than
1891 break; 1891 * being logged. That means they do not contain a valid LSN
1892 * (i.e. transactionally ordered) in them, and hence any time we
1893 * see a buffer to replay over the top of a remote attribute
1894 * block we should simply do so.
1895 */
1896 goto recover_immediately;
1892 case XFS_SB_MAGIC: 1897 case XFS_SB_MAGIC:
1893 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn); 1898 lsn = be64_to_cpu(((struct xfs_dsb *)blk)->sb_lsn);
1894 uuid = &((struct xfs_dsb *)blk)->sb_uuid; 1899 uuid = &((struct xfs_dsb *)blk)->sb_uuid;
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 1a9791ea1cf0..020afa343dff 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -691,7 +691,7 @@ struct drm_vblank_crtc {
691 struct timer_list disable_timer; /* delayed disable timer */ 691 struct timer_list disable_timer; /* delayed disable timer */
692 692
693 /* vblank counter, protected by dev->vblank_time_lock for writes */ 693 /* vblank counter, protected by dev->vblank_time_lock for writes */
694 unsigned long count; 694 u32 count;
695 /* vblank timestamps, protected by dev->vblank_time_lock for writes */ 695 /* vblank timestamps, protected by dev->vblank_time_lock for writes */
696 struct timeval time[DRM_VBLANKTIME_RBSIZE]; 696 struct timeval time[DRM_VBLANKTIME_RBSIZE];
697 697
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index 800e0d1cf32c..2a747a91fded 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -170,6 +170,7 @@ struct drm_encoder_helper_funcs {
170 * @get_modes: get mode list for this connector 170 * @get_modes: get mode list for this connector
171 * @mode_valid: is this mode valid on the given connector? (optional) 171 * @mode_valid: is this mode valid on the given connector? (optional)
172 * @best_encoder: return the preferred encoder for this connector 172 * @best_encoder: return the preferred encoder for this connector
173 * @atomic_best_encoder: atomic version of @best_encoder
173 * 174 *
174 * The helper operations are called by the mid-layer CRTC helper. 175 * The helper operations are called by the mid-layer CRTC helper.
175 */ 176 */
@@ -178,6 +179,8 @@ struct drm_connector_helper_funcs {
178 enum drm_mode_status (*mode_valid)(struct drm_connector *connector, 179 enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
179 struct drm_display_mode *mode); 180 struct drm_display_mode *mode);
180 struct drm_encoder *(*best_encoder)(struct drm_connector *connector); 181 struct drm_encoder *(*best_encoder)(struct drm_connector *connector);
182 struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
183 struct drm_connector_state *connector_state);
181}; 184};
182 185
183extern void drm_helper_disable_unused_functions(struct drm_device *dev); 186extern void drm_helper_disable_unused_functions(struct drm_device *dev);
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 45c39a37f924..8bc073d297db 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -172,6 +172,7 @@
172 {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ 172 {0x1002, 0x6610, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
173 {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ 173 {0x1002, 0x6611, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
174 {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \ 174 {0x1002, 0x6613, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_NEW_MEMMAP}, \
175 {0x1002, 0x6617, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
175 {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 176 {0x1002, 0x6620, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
176 {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 177 {0x1002, 0x6621, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
177 {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 178 {0x1002, 0x6623, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_OLAND|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
diff --git a/include/linux/ata.h b/include/linux/ata.h
index fed36418dd1c..6c78956aa470 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -45,6 +45,7 @@ enum {
45 ATA_SECT_SIZE = 512, 45 ATA_SECT_SIZE = 512,
46 ATA_MAX_SECTORS_128 = 128, 46 ATA_MAX_SECTORS_128 = 128,
47 ATA_MAX_SECTORS = 256, 47 ATA_MAX_SECTORS = 256,
48 ATA_MAX_SECTORS_1024 = 1024,
48 ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */ 49 ATA_MAX_SECTORS_LBA48 = 65535,/* TODO: 65536? */
49 ATA_MAX_SECTORS_TAPE = 65535, 50 ATA_MAX_SECTORS_TAPE = 65535,
50 51
diff --git a/include/linux/cper.h b/include/linux/cper.h
index 76abba4b238e..dcacb1a72e26 100644
--- a/include/linux/cper.h
+++ b/include/linux/cper.h
@@ -340,7 +340,27 @@ struct cper_ia_proc_ctx {
340 __u64 mm_reg_addr; 340 __u64 mm_reg_addr;
341}; 341};
342 342
343/* Memory Error Section */ 343/* Old Memory Error Section UEFI 2.1, 2.2 */
344struct cper_sec_mem_err_old {
345 __u64 validation_bits;
346 __u64 error_status;
347 __u64 physical_addr;
348 __u64 physical_addr_mask;
349 __u16 node;
350 __u16 card;
351 __u16 module;
352 __u16 bank;
353 __u16 device;
354 __u16 row;
355 __u16 column;
356 __u16 bit_pos;
357 __u64 requestor_id;
358 __u64 responder_id;
359 __u64 target_id;
360 __u8 error_type;
361};
362
363/* Memory Error Section UEFI >= 2.3 */
344struct cper_sec_mem_err { 364struct cper_sec_mem_err {
345 __u64 validation_bits; 365 __u64 validation_bits;
346 __u64 error_status; 366 __u64 error_status;
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 29ad97c34fd5..bde1e567b3a9 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -62,6 +62,7 @@ struct cpufreq_policy {
62 /* CPUs sharing clock, require sw coordination */ 62 /* CPUs sharing clock, require sw coordination */
63 cpumask_var_t cpus; /* Online CPUs only */ 63 cpumask_var_t cpus; /* Online CPUs only */
64 cpumask_var_t related_cpus; /* Online + Offline CPUs */ 64 cpumask_var_t related_cpus; /* Online + Offline CPUs */
65 cpumask_var_t real_cpus; /* Related and present */
65 66
66 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs 67 unsigned int shared_type; /* ACPI: ANY or ALL affected CPUs
67 should set cpufreq */ 68 should set cpufreq */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index cc008c338f5a..84b783f277f7 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -55,7 +55,8 @@ struct vm_fault;
55 55
56extern void __init inode_init(void); 56extern void __init inode_init(void);
57extern void __init inode_init_early(void); 57extern void __init inode_init_early(void);
58extern void __init files_init(unsigned long); 58extern void __init files_init(void);
59extern void __init files_maxfiles_init(void);
59 60
60extern struct files_stat_struct files_stat; 61extern struct files_stat_struct files_stat;
61extern unsigned long get_max_files(void); 62extern unsigned long get_max_files(void);
@@ -2245,7 +2246,7 @@ extern int ioctl_preallocate(struct file *filp, void __user *argp);
2245 2246
2246/* fs/dcache.c */ 2247/* fs/dcache.c */
2247extern void __init vfs_caches_init_early(void); 2248extern void __init vfs_caches_init_early(void);
2248extern void __init vfs_caches_init(unsigned long); 2249extern void __init vfs_caches_init(void);
2249 2250
2250extern struct kmem_cache *names_cachep; 2251extern struct kmem_cache *names_cachep;
2251 2252
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 1da602982cf9..6cd8c0ee4b6f 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -116,6 +116,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
116 * SAVE_REGS. If another ops with this flag set is already registered 116 * SAVE_REGS. If another ops with this flag set is already registered
117 * for any of the functions that this ops will be registered for, then 117 * for any of the functions that this ops will be registered for, then
118 * this ops will fail to register or set_filter_ip. 118 * this ops will fail to register or set_filter_ip.
119 * PID - Is affected by set_ftrace_pid (allows filtering on those pids)
119 */ 120 */
120enum { 121enum {
121 FTRACE_OPS_FL_ENABLED = 1 << 0, 122 FTRACE_OPS_FL_ENABLED = 1 << 0,
@@ -132,6 +133,7 @@ enum {
132 FTRACE_OPS_FL_MODIFYING = 1 << 11, 133 FTRACE_OPS_FL_MODIFYING = 1 << 11,
133 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12, 134 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
134 FTRACE_OPS_FL_IPMODIFY = 1 << 13, 135 FTRACE_OPS_FL_IPMODIFY = 1 << 13,
136 FTRACE_OPS_FL_PID = 1 << 14,
135}; 137};
136 138
137#ifdef CONFIG_DYNAMIC_FTRACE 139#ifdef CONFIG_DYNAMIC_FTRACE
@@ -159,6 +161,7 @@ struct ftrace_ops {
159 struct ftrace_ops *next; 161 struct ftrace_ops *next;
160 unsigned long flags; 162 unsigned long flags;
161 void *private; 163 void *private;
164 ftrace_func_t saved_func;
162 int __percpu *disabled; 165 int __percpu *disabled;
163#ifdef CONFIG_DYNAMIC_FTRACE 166#ifdef CONFIG_DYNAMIC_FTRACE
164 int nr_trampolines; 167 int nr_trampolines;
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 36ce37bcc963..c9cfbcdb8d14 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -431,6 +431,8 @@ enum {
431 ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */ 431 ATA_HORKAGE_WD_BROKEN_LPM = (1 << 21), /* some WDs have broken LPM */
432 ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */ 432 ATA_HORKAGE_ZERO_AFTER_TRIM = (1 << 22),/* guarantees zero after trim */
433 ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */ 433 ATA_HORKAGE_NO_NCQ_LOG = (1 << 23), /* don't use NCQ for log read */
434 ATA_HORKAGE_NOTRIM = (1 << 24), /* don't use TRIM */
435 ATA_HORKAGE_MAX_SEC_1024 = (1 << 25), /* Limit max sects to 1024 */
434 436
435 /* DMA mask for user DMA control: User visible values; DO NOT 437 /* DMA mask for user DMA control: User visible values; DO NOT
436 renumber */ 438 renumber */
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index f25e2bdd188c..272f42952f34 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -178,17 +178,17 @@ typedef enum {
178/* Chip may not exist, so silence any errors in scan */ 178/* Chip may not exist, so silence any errors in scan */
179#define NAND_SCAN_SILENT_NODEV 0x00040000 179#define NAND_SCAN_SILENT_NODEV 0x00040000
180/* 180/*
181 * This option could be defined by controller drivers to protect against
182 * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
183 */
184#define NAND_USE_BOUNCE_BUFFER 0x00080000
185/*
186 * Autodetect nand buswidth with readid/onfi. 181 * Autodetect nand buswidth with readid/onfi.
187 * This suppose the driver will configure the hardware in 8 bits mode 182 * This suppose the driver will configure the hardware in 8 bits mode
188 * when calling nand_scan_ident, and update its configuration 183 * when calling nand_scan_ident, and update its configuration
189 * before calling nand_scan_tail. 184 * before calling nand_scan_tail.
190 */ 185 */
191#define NAND_BUSWIDTH_AUTO 0x00080000 186#define NAND_BUSWIDTH_AUTO 0x00080000
187/*
188 * This option could be defined by controller drivers to protect against
189 * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
190 */
191#define NAND_USE_BOUNCE_BUFFER 0x00100000
192 192
193/* Options set by nand scan */ 193/* Options set by nand scan */
194/* Nand scan has allocated controller struct */ 194/* Nand scan has allocated controller struct */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index f91b5ade30c9..874b77228fb9 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -292,9 +292,12 @@ static inline void nfs_mark_for_revalidate(struct inode *inode)
292 struct nfs_inode *nfsi = NFS_I(inode); 292 struct nfs_inode *nfsi = NFS_I(inode);
293 293
294 spin_lock(&inode->i_lock); 294 spin_lock(&inode->i_lock);
295 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS; 295 nfsi->cache_validity |= NFS_INO_INVALID_ATTR |
296 NFS_INO_REVAL_PAGECACHE |
297 NFS_INO_INVALID_ACCESS |
298 NFS_INO_INVALID_ACL;
296 if (S_ISDIR(inode->i_mode)) 299 if (S_ISDIR(inode->i_mode))
297 nfsi->cache_validity |= NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA; 300 nfsi->cache_validity |= NFS_INO_INVALID_DATA;
298 spin_unlock(&inode->i_lock); 301 spin_unlock(&inode->i_lock);
299} 302}
300 303
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index a2ea1491d3df..20bc8e51b161 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -220,7 +220,7 @@ struct nfs_server {
220#define NFS_CAP_SYMLINKS (1U << 2) 220#define NFS_CAP_SYMLINKS (1U << 2)
221#define NFS_CAP_ACLS (1U << 3) 221#define NFS_CAP_ACLS (1U << 3)
222#define NFS_CAP_ATOMIC_OPEN (1U << 4) 222#define NFS_CAP_ATOMIC_OPEN (1U << 4)
223#define NFS_CAP_CHANGE_ATTR (1U << 5) 223/* #define NFS_CAP_CHANGE_ATTR (1U << 5) */
224#define NFS_CAP_FILEID (1U << 6) 224#define NFS_CAP_FILEID (1U << 6)
225#define NFS_CAP_MODE (1U << 7) 225#define NFS_CAP_MODE (1U << 7)
226#define NFS_CAP_NLINK (1U << 8) 226#define NFS_CAP_NLINK (1U << 8)
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index 4c508549833a..cc7dd687a89d 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -59,7 +59,7 @@ void of_dma_configure(struct device *dev, struct device_node *np);
59#else /* CONFIG_OF */ 59#else /* CONFIG_OF */
60 60
61static inline int of_driver_match_device(struct device *dev, 61static inline int of_driver_match_device(struct device *dev,
62 struct device_driver *drv) 62 const struct device_driver *drv)
63{ 63{
64 return 0; 64 return 0;
65} 65}
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index f34e040b34e9..41c93844fb1d 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -631,15 +631,19 @@ static inline void ClearPageSlabPfmemalloc(struct page *page)
631 1 << PG_private | 1 << PG_private_2 | \ 631 1 << PG_private | 1 << PG_private_2 | \
632 1 << PG_writeback | 1 << PG_reserved | \ 632 1 << PG_writeback | 1 << PG_reserved | \
633 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \ 633 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
634 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \ 634 1 << PG_unevictable | __PG_MLOCKED | \
635 __PG_COMPOUND_LOCK) 635 __PG_COMPOUND_LOCK)
636 636
637/* 637/*
638 * Flags checked when a page is prepped for return by the page allocator. 638 * Flags checked when a page is prepped for return by the page allocator.
639 * Pages being prepped should not have any flags set. It they are set, 639 * Pages being prepped should not have these flags set. It they are set,
640 * there has been a kernel bug or struct page corruption. 640 * there has been a kernel bug or struct page corruption.
641 *
642 * __PG_HWPOISON is exceptional because it needs to be kept beyond page's
643 * alloc-free cycle to prevent from reusing the page.
641 */ 644 */
642#define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1) 645#define PAGE_FLAGS_CHECK_AT_PREP \
646 (((1 << NR_PAGEFLAGS) - 1) & ~__PG_HWPOISON)
643 647
644#define PAGE_FLAGS_PRIVATE \ 648#define PAGE_FLAGS_PRIVATE \
645 (1 << PG_private | 1 << PG_private_2) 649 (1 << PG_private | 1 << PG_private_2)
diff --git a/include/linux/platform_data/macb.h b/include/linux/platform_data/macb.h
index 044a124bfbbc..21b15f6fee25 100644
--- a/include/linux/platform_data/macb.h
+++ b/include/linux/platform_data/macb.h
@@ -8,11 +8,19 @@
8#ifndef __MACB_PDATA_H__ 8#ifndef __MACB_PDATA_H__
9#define __MACB_PDATA_H__ 9#define __MACB_PDATA_H__
10 10
11/**
12 * struct macb_platform_data - platform data for MACB Ethernet
13 * @phy_mask: phy mask passed when register the MDIO bus
14 * within the driver
15 * @phy_irq_pin: PHY IRQ
16 * @is_rmii: using RMII interface?
17 * @rev_eth_addr: reverse Ethernet address byte order
18 */
11struct macb_platform_data { 19struct macb_platform_data {
12 u32 phy_mask; 20 u32 phy_mask;
13 int phy_irq_pin; /* PHY IRQ */ 21 int phy_irq_pin;
14 u8 is_rmii; /* using RMII interface? */ 22 u8 is_rmii;
15 u8 rev_eth_addr; /* reverse Ethernet address byte order */ 23 u8 rev_eth_addr;
16}; 24};
17 25
18#endif /* __MACB_PDATA_H__ */ 26#endif /* __MACB_PDATA_H__ */
diff --git a/include/linux/platform_data/mmc-esdhc-imx.h b/include/linux/platform_data/mmc-esdhc-imx.h
index 75f70f6ac137..e1571efa3f2b 100644
--- a/include/linux/platform_data/mmc-esdhc-imx.h
+++ b/include/linux/platform_data/mmc-esdhc-imx.h
@@ -43,7 +43,6 @@ struct esdhc_platform_data {
43 enum wp_types wp_type; 43 enum wp_types wp_type;
44 enum cd_types cd_type; 44 enum cd_types cd_type;
45 int max_bus_width; 45 int max_bus_width;
46 unsigned int f_max;
47 bool support_vsel; 46 bool support_vsel;
48 unsigned int delay_line; 47 unsigned int delay_line;
49}; 48};
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index d6cdd6e87d53..22b6d9ca1654 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -2884,11 +2884,11 @@ static inline bool skb_defer_rx_timestamp(struct sk_buff *skb)
2884 * 2884 *
2885 * PHY drivers may accept clones of transmitted packets for 2885 * PHY drivers may accept clones of transmitted packets for
2886 * timestamping via their phy_driver.txtstamp method. These drivers 2886 * timestamping via their phy_driver.txtstamp method. These drivers
2887 * must call this function to return the skb back to the stack, with 2887 * must call this function to return the skb back to the stack with a
2888 * or without a timestamp. 2888 * timestamp.
2889 * 2889 *
2890 * @skb: clone of the the original outgoing packet 2890 * @skb: clone of the the original outgoing packet
2891 * @hwtstamps: hardware time stamps, may be NULL if not available 2891 * @hwtstamps: hardware time stamps
2892 * 2892 *
2893 */ 2893 */
2894void skb_complete_tx_timestamp(struct sk_buff *skb, 2894void skb_complete_tx_timestamp(struct sk_buff *skb,
diff --git a/include/net/act_api.h b/include/net/act_api.h
index 3ee4c92afd1b..931738bc5bba 100644
--- a/include/net/act_api.h
+++ b/include/net/act_api.h
@@ -99,7 +99,6 @@ struct tc_action_ops {
99 99
100int tcf_hash_search(struct tc_action *a, u32 index); 100int tcf_hash_search(struct tc_action *a, u32 index);
101void tcf_hash_destroy(struct tc_action *a); 101void tcf_hash_destroy(struct tc_action *a);
102int tcf_hash_release(struct tc_action *a, int bind);
103u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo); 102u32 tcf_hash_new_index(struct tcf_hashinfo *hinfo);
104int tcf_hash_check(u32 index, struct tc_action *a, int bind); 103int tcf_hash_check(u32 index, struct tc_action *a, int bind);
105int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a, 104int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
@@ -107,6 +106,13 @@ int tcf_hash_create(u32 index, struct nlattr *est, struct tc_action *a,
107void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est); 106void tcf_hash_cleanup(struct tc_action *a, struct nlattr *est);
108void tcf_hash_insert(struct tc_action *a); 107void tcf_hash_insert(struct tc_action *a);
109 108
109int __tcf_hash_release(struct tc_action *a, bool bind, bool strict);
110
111static inline int tcf_hash_release(struct tc_action *a, bool bind)
112{
113 return __tcf_hash_release(a, bind, false);
114}
115
110int tcf_register_action(struct tc_action_ops *a, unsigned int mask); 116int tcf_register_action(struct tc_action_ops *a, unsigned int mask);
111int tcf_unregister_action(struct tc_action_ops *a); 117int tcf_unregister_action(struct tc_action_ops *a);
112int tcf_action_destroy(struct list_head *actions, int bind); 118int tcf_action_destroy(struct list_head *actions, int bind);
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index a741678f24a2..883fe1e7c5a1 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -4868,6 +4868,23 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
4868 struct cfg80211_chan_def *chandef, 4868 struct cfg80211_chan_def *chandef,
4869 enum nl80211_iftype iftype); 4869 enum nl80211_iftype iftype);
4870 4870
4871/**
4872 * cfg80211_reg_can_beacon_relax - check if beaconing is allowed with relaxation
4873 * @wiphy: the wiphy
4874 * @chandef: the channel definition
4875 * @iftype: interface type
4876 *
4877 * Return: %true if there is no secondary channel or the secondary channel(s)
4878 * can be used for beaconing (i.e. is not a radar channel etc.). This version
4879 * also checks if IR-relaxation conditions apply, to allow beaconing under
4880 * more permissive conditions.
4881 *
4882 * Requires the RTNL to be held.
4883 */
4884bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
4885 struct cfg80211_chan_def *chandef,
4886 enum nl80211_iftype iftype);
4887
4871/* 4888/*
4872 * cfg80211_ch_switch_notify - update wdev channel and notify userspace 4889 * cfg80211_ch_switch_notify - update wdev channel and notify userspace
4873 * @dev: the device which switched channels 4890 * @dev: the device which switched channels
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index e1300b3dd597..53eead2da743 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -21,13 +21,11 @@ struct netns_frags {
21 * @INET_FRAG_FIRST_IN: first fragment has arrived 21 * @INET_FRAG_FIRST_IN: first fragment has arrived
22 * @INET_FRAG_LAST_IN: final fragment has arrived 22 * @INET_FRAG_LAST_IN: final fragment has arrived
23 * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction 23 * @INET_FRAG_COMPLETE: frag queue has been processed and is due for destruction
24 * @INET_FRAG_EVICTED: frag queue is being evicted
25 */ 24 */
26enum { 25enum {
27 INET_FRAG_FIRST_IN = BIT(0), 26 INET_FRAG_FIRST_IN = BIT(0),
28 INET_FRAG_LAST_IN = BIT(1), 27 INET_FRAG_LAST_IN = BIT(1),
29 INET_FRAG_COMPLETE = BIT(2), 28 INET_FRAG_COMPLETE = BIT(2),
30 INET_FRAG_EVICTED = BIT(3)
31}; 29};
32 30
33/** 31/**
@@ -45,6 +43,7 @@ enum {
45 * @flags: fragment queue flags 43 * @flags: fragment queue flags
46 * @max_size: maximum received fragment size 44 * @max_size: maximum received fragment size
47 * @net: namespace that this frag belongs to 45 * @net: namespace that this frag belongs to
46 * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
48 */ 47 */
49struct inet_frag_queue { 48struct inet_frag_queue {
50 spinlock_t lock; 49 spinlock_t lock;
@@ -59,6 +58,7 @@ struct inet_frag_queue {
59 __u8 flags; 58 __u8 flags;
60 u16 max_size; 59 u16 max_size;
61 struct netns_frags *net; 60 struct netns_frags *net;
61 struct hlist_node list_evictor;
62}; 62};
63 63
64#define INETFRAGS_HASHSZ 1024 64#define INETFRAGS_HASHSZ 1024
@@ -125,6 +125,11 @@ static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f
125 inet_frag_destroy(q, f); 125 inet_frag_destroy(q, f);
126} 126}
127 127
128static inline bool inet_frag_evicting(struct inet_frag_queue *q)
129{
130 return !hlist_unhashed(&q->list_evictor);
131}
132
128/* Memory Tracking Functions. */ 133/* Memory Tracking Functions. */
129 134
130/* The default percpu_counter batch size is not big enough to scale to 135/* The default percpu_counter batch size is not big enough to scale to
@@ -139,14 +144,14 @@ static inline int frag_mem_limit(struct netns_frags *nf)
139 return percpu_counter_read(&nf->mem); 144 return percpu_counter_read(&nf->mem);
140} 145}
141 146
142static inline void sub_frag_mem_limit(struct inet_frag_queue *q, int i) 147static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
143{ 148{
144 __percpu_counter_add(&q->net->mem, -i, frag_percpu_counter_batch); 149 __percpu_counter_add(&nf->mem, -i, frag_percpu_counter_batch);
145} 150}
146 151
147static inline void add_frag_mem_limit(struct inet_frag_queue *q, int i) 152static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
148{ 153{
149 __percpu_counter_add(&q->net->mem, i, frag_percpu_counter_batch); 154 __percpu_counter_add(&nf->mem, i, frag_percpu_counter_batch);
150} 155}
151 156
152static inline void init_frag_mem_limit(struct netns_frags *nf) 157static inline void init_frag_mem_limit(struct netns_frags *nf)
diff --git a/include/net/ip.h b/include/net/ip.h
index 0750a186ea63..d5fe9f2ab699 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -161,6 +161,7 @@ static inline __u8 get_rtconn_flags(struct ipcm_cookie* ipc, struct sock* sk)
161} 161}
162 162
163/* datagram.c */ 163/* datagram.c */
164int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
164int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len); 165int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
165 166
166void ip4_datagram_release_cb(struct sock *sk); 167void ip4_datagram_release_cb(struct sock *sk);
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 49c142bdf01e..5fa643b4e891 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -183,7 +183,6 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
183struct fib_table { 183struct fib_table {
184 struct hlist_node tb_hlist; 184 struct hlist_node tb_hlist;
185 u32 tb_id; 185 u32 tb_id;
186 int tb_default;
187 int tb_num_default; 186 int tb_num_default;
188 struct rcu_head rcu; 187 struct rcu_head rcu;
189 unsigned long *tb_data; 188 unsigned long *tb_data;
@@ -290,7 +289,7 @@ __be32 fib_compute_spec_dst(struct sk_buff *skb);
290int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, 289int fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst,
291 u8 tos, int oif, struct net_device *dev, 290 u8 tos, int oif, struct net_device *dev,
292 struct in_device *idev, u32 *itag); 291 struct in_device *idev, u32 *itag);
293void fib_select_default(struct fib_result *res); 292void fib_select_default(const struct flowi4 *flp, struct fib_result *res);
294#ifdef CONFIG_IP_ROUTE_CLASSID 293#ifdef CONFIG_IP_ROUTE_CLASSID
295static inline int fib_num_tclassid_users(struct net *net) 294static inline int fib_num_tclassid_users(struct net *net)
296{ 295{
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 095433b8a8b0..37cd3911d5c5 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -291,7 +291,7 @@ extern unsigned int nf_conntrack_max;
291extern unsigned int nf_conntrack_hash_rnd; 291extern unsigned int nf_conntrack_hash_rnd;
292void init_nf_conntrack_hash_rnd(void); 292void init_nf_conntrack_hash_rnd(void);
293 293
294void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl); 294struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags);
295 295
296#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count) 296#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
297#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count) 297#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
index 29d6a94db54d..723b61c82b3f 100644
--- a/include/net/netns/conntrack.h
+++ b/include/net/netns/conntrack.h
@@ -68,7 +68,6 @@ struct ct_pcpu {
68 spinlock_t lock; 68 spinlock_t lock;
69 struct hlist_nulls_head unconfirmed; 69 struct hlist_nulls_head unconfirmed;
70 struct hlist_nulls_head dying; 70 struct hlist_nulls_head dying;
71 struct hlist_nulls_head tmpl;
72}; 71};
73 72
74struct netns_ct { 73struct netns_ct {
diff --git a/include/net/sock.h b/include/net/sock.h
index 05a8c1aea251..f21f0708ec59 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -902,7 +902,7 @@ void sk_stream_kill_queues(struct sock *sk);
902void sk_set_memalloc(struct sock *sk); 902void sk_set_memalloc(struct sock *sk);
903void sk_clear_memalloc(struct sock *sk); 903void sk_clear_memalloc(struct sock *sk);
904 904
905int sk_wait_data(struct sock *sk, long *timeo); 905int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb);
906 906
907struct request_sock_ops; 907struct request_sock_ops;
908struct timewait_sock_ops; 908struct timewait_sock_ops;
diff --git a/include/target/iscsi/iscsi_target_core.h b/include/target/iscsi/iscsi_target_core.h
index 34117b8b72e4..0aedbb2c10e0 100644
--- a/include/target/iscsi/iscsi_target_core.h
+++ b/include/target/iscsi/iscsi_target_core.h
@@ -595,6 +595,7 @@ struct iscsi_conn {
595 int bitmap_id; 595 int bitmap_id;
596 int rx_thread_active; 596 int rx_thread_active;
597 struct task_struct *rx_thread; 597 struct task_struct *rx_thread;
598 struct completion rx_login_comp;
598 int tx_thread_active; 599 int tx_thread_active;
599 struct task_struct *tx_thread; 600 struct task_struct *tx_thread;
600 /* list_head for session connection list */ 601 /* list_head for session connection list */
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index b6fce900a833..fbdd11851725 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -32,7 +32,7 @@
32#ifndef __AMDGPU_DRM_H__ 32#ifndef __AMDGPU_DRM_H__
33#define __AMDGPU_DRM_H__ 33#define __AMDGPU_DRM_H__
34 34
35#include <drm/drm.h> 35#include "drm.h"
36 36
37#define DRM_AMDGPU_GEM_CREATE 0x00 37#define DRM_AMDGPU_GEM_CREATE 0x00
38#define DRM_AMDGPU_GEM_MMAP 0x01 38#define DRM_AMDGPU_GEM_MMAP 0x01
@@ -614,6 +614,8 @@ struct drm_amdgpu_info_device {
614 uint32_t vram_type; 614 uint32_t vram_type;
615 /** video memory bit width*/ 615 /** video memory bit width*/
616 uint32_t vram_bit_width; 616 uint32_t vram_bit_width;
617 /* vce harvesting instance */
618 uint32_t vce_harvest_config;
617}; 619};
618 620
619struct drm_amdgpu_info_hw_ip { 621struct drm_amdgpu_info_hw_ip {
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 192027b4f031..dbd16a2d37db 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -1081,6 +1081,14 @@ struct drm_i915_reg_read {
1081 __u64 offset; 1081 __u64 offset;
1082 __u64 val; /* Return value */ 1082 __u64 val; /* Return value */
1083}; 1083};
1084/* Known registers:
1085 *
1086 * Render engine timestamp - 0x2358 + 64bit - gen7+
1087 * - Note this register returns an invalid value if using the default
1088 * single instruction 8byte read, in order to workaround that use
1089 * offset (0x2538 | 1) instead.
1090 *
1091 */
1084 1092
1085struct drm_i915_reset_stats { 1093struct drm_i915_reset_stats {
1086 __u32 ctx_id; 1094 __u32 ctx_id;
diff --git a/include/uapi/drm/radeon_drm.h b/include/uapi/drm/radeon_drm.h
index 1ef76661e1a1..01aa2a8e3f8d 100644
--- a/include/uapi/drm/radeon_drm.h
+++ b/include/uapi/drm/radeon_drm.h
@@ -33,7 +33,7 @@
33#ifndef __RADEON_DRM_H__ 33#ifndef __RADEON_DRM_H__
34#define __RADEON_DRM_H__ 34#define __RADEON_DRM_H__
35 35
36#include <drm/drm.h> 36#include "drm.h"
37 37
38/* WARNING: If you change any of these defines, make sure to change the 38/* WARNING: If you change any of these defines, make sure to change the
39 * defines in the X server file (radeon_sarea.h) 39 * defines in the X server file (radeon_sarea.h)
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index efe3443572ba..413417f3707b 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -319,6 +319,7 @@
319#define PCI_MSIX_PBA 8 /* Pending Bit Array offset */ 319#define PCI_MSIX_PBA 8 /* Pending Bit Array offset */
320#define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */ 320#define PCI_MSIX_PBA_BIR 0x00000007 /* BAR index */
321#define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */ 321#define PCI_MSIX_PBA_OFFSET 0xfffffff8 /* Offset into specified BAR */
322#define PCI_MSIX_FLAGS_BIRMASK PCI_MSIX_PBA_BIR /* deprecated */
322#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */ 323#define PCI_CAP_MSIX_SIZEOF 12 /* size of MSIX registers */
323 324
324/* MSI-X Table entry format */ 325/* MSI-X Table entry format */
diff --git a/include/uapi/linux/virtio_net.h b/include/uapi/linux/virtio_net.h
index 7bbee79ca293..ec32293a00db 100644
--- a/include/uapi/linux/virtio_net.h
+++ b/include/uapi/linux/virtio_net.h
@@ -34,6 +34,7 @@
34/* The feature bitmap for virtio net */ 34/* The feature bitmap for virtio net */
35#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */ 35#define VIRTIO_NET_F_CSUM 0 /* Host handles pkts w/ partial csum */
36#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */ 36#define VIRTIO_NET_F_GUEST_CSUM 1 /* Guest handles pkts w/ partial csum */
37#define VIRTIO_NET_F_CTRL_GUEST_OFFLOADS 2 /* Dynamic offload configuration. */
37#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */ 38#define VIRTIO_NET_F_MAC 5 /* Host has given MAC address. */
38#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */ 39#define VIRTIO_NET_F_GUEST_TSO4 7 /* Guest can handle TSOv4 in. */
39#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */ 40#define VIRTIO_NET_F_GUEST_TSO6 8 /* Guest can handle TSOv6 in. */
@@ -226,4 +227,19 @@ struct virtio_net_ctrl_mq {
226 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1 227 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MIN 1
227 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000 228 #define VIRTIO_NET_CTRL_MQ_VQ_PAIRS_MAX 0x8000
228 229
230/*
231 * Control network offloads
232 *
233 * Reconfigures the network offloads that Guest can handle.
234 *
235 * Available with the VIRTIO_NET_F_CTRL_GUEST_OFFLOADS feature bit.
236 *
237 * Command data format matches the feature bit mask exactly.
238 *
239 * See VIRTIO_NET_F_GUEST_* for the list of offloads
240 * that can be enabled/disabled.
241 */
242#define VIRTIO_NET_CTRL_GUEST_OFFLOADS 5
243#define VIRTIO_NET_CTRL_GUEST_OFFLOADS_SET 0
244
229#endif /* _LINUX_VIRTIO_NET_H */ 245#endif /* _LINUX_VIRTIO_NET_H */
diff --git a/include/uapi/linux/virtio_pci.h b/include/uapi/linux/virtio_pci.h
index 75301468359f..90007a1abcab 100644
--- a/include/uapi/linux/virtio_pci.h
+++ b/include/uapi/linux/virtio_pci.h
@@ -157,6 +157,12 @@ struct virtio_pci_common_cfg {
157 __le32 queue_used_hi; /* read-write */ 157 __le32 queue_used_hi; /* read-write */
158}; 158};
159 159
160/* Fields in VIRTIO_PCI_CAP_PCI_CFG: */
161struct virtio_pci_cfg_cap {
162 struct virtio_pci_cap cap;
163 __u8 pci_cfg_data[4]; /* Data for BAR access. */
164};
165
160/* Macro versions of offsets for the Old Timers! */ 166/* Macro versions of offsets for the Old Timers! */
161#define VIRTIO_PCI_CAP_VNDR 0 167#define VIRTIO_PCI_CAP_VNDR 0
162#define VIRTIO_PCI_CAP_NEXT 1 168#define VIRTIO_PCI_CAP_NEXT 1
diff --git a/include/uapi/linux/virtio_ring.h b/include/uapi/linux/virtio_ring.h
index 915980ac68df..c07295969b7e 100644
--- a/include/uapi/linux/virtio_ring.h
+++ b/include/uapi/linux/virtio_ring.h
@@ -31,6 +31,9 @@
31 * SUCH DAMAGE. 31 * SUCH DAMAGE.
32 * 32 *
33 * Copyright Rusty Russell IBM Corporation 2007. */ 33 * Copyright Rusty Russell IBM Corporation 2007. */
34#ifndef __KERNEL__
35#include <stdint.h>
36#endif
34#include <linux/types.h> 37#include <linux/types.h>
35#include <linux/virtio_types.h> 38#include <linux/virtio_types.h>
36 39
@@ -143,7 +146,7 @@ static inline void vring_init(struct vring *vr, unsigned int num, void *p,
143 vr->num = num; 146 vr->num = num;
144 vr->desc = p; 147 vr->desc = p;
145 vr->avail = p + num*sizeof(struct vring_desc); 148 vr->avail = p + num*sizeof(struct vring_desc);
146 vr->used = (void *)(((unsigned long)&vr->avail->ring[num] + sizeof(__virtio16) 149 vr->used = (void *)(((uintptr_t)&vr->avail->ring[num] + sizeof(__virtio16)
147 + align-1) & ~(align - 1)); 150 + align-1) & ~(align - 1));
148} 151}
149 152
diff --git a/include/uapi/sound/asoc.h b/include/uapi/sound/asoc.h
index 12215205ab8d..51b8066a223b 100644
--- a/include/uapi/sound/asoc.h
+++ b/include/uapi/sound/asoc.h
@@ -77,7 +77,7 @@
77#define SND_SOC_TPLG_NUM_TEXTS 16 77#define SND_SOC_TPLG_NUM_TEXTS 16
78 78
79/* ABI version */ 79/* ABI version */
80#define SND_SOC_TPLG_ABI_VERSION 0x2 80#define SND_SOC_TPLG_ABI_VERSION 0x3
81 81
82/* Max size of TLV data */ 82/* Max size of TLV data */
83#define SND_SOC_TPLG_TLV_SIZE 32 83#define SND_SOC_TPLG_TLV_SIZE 32
@@ -97,7 +97,8 @@
97#define SND_SOC_TPLG_TYPE_PCM 7 97#define SND_SOC_TPLG_TYPE_PCM 7
98#define SND_SOC_TPLG_TYPE_MANIFEST 8 98#define SND_SOC_TPLG_TYPE_MANIFEST 8
99#define SND_SOC_TPLG_TYPE_CODEC_LINK 9 99#define SND_SOC_TPLG_TYPE_CODEC_LINK 9
100#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_CODEC_LINK 100#define SND_SOC_TPLG_TYPE_PDATA 10
101#define SND_SOC_TPLG_TYPE_MAX SND_SOC_TPLG_TYPE_PDATA
101 102
102/* vendor block IDs - please add new vendor types to end */ 103/* vendor block IDs - please add new vendor types to end */
103#define SND_SOC_TPLG_TYPE_VENDOR_FW 1000 104#define SND_SOC_TPLG_TYPE_VENDOR_FW 1000
@@ -110,7 +111,7 @@
110 111
111/* 112/*
112 * Block Header. 113 * Block Header.
113 * This header preceeds all object and object arrays below. 114 * This header precedes all object and object arrays below.
114 */ 115 */
115struct snd_soc_tplg_hdr { 116struct snd_soc_tplg_hdr {
116 __le32 magic; /* magic number */ 117 __le32 magic; /* magic number */
@@ -137,11 +138,19 @@ struct snd_soc_tplg_private {
137/* 138/*
138 * Kcontrol TLV data. 139 * Kcontrol TLV data.
139 */ 140 */
141struct snd_soc_tplg_tlv_dbscale {
142 __le32 min;
143 __le32 step;
144 __le32 mute;
145} __attribute__((packed));
146
140struct snd_soc_tplg_ctl_tlv { 147struct snd_soc_tplg_ctl_tlv {
141 __le32 size; /* in bytes aligned to 4 */ 148 __le32 size; /* in bytes of this structure */
142 __le32 numid; /* control element numeric identification */ 149 __le32 type; /* SNDRV_CTL_TLVT_*, type of TLV */
143 __le32 count; /* number of elem in data array */ 150 union {
144 __le32 data[SND_SOC_TPLG_TLV_SIZE]; 151 __le32 data[SND_SOC_TPLG_TLV_SIZE];
152 struct snd_soc_tplg_tlv_dbscale scale;
153 };
145} __attribute__((packed)); 154} __attribute__((packed));
146 155
147/* 156/*
@@ -155,9 +164,11 @@ struct snd_soc_tplg_channel {
155} __attribute__((packed)); 164} __attribute__((packed));
156 165
157/* 166/*
158 * Kcontrol Operations IDs 167 * Genericl Operations IDs, for binding Kcontrol or Bytes ext ops
168 * Kcontrol ops need get/put/info.
169 * Bytes ext ops need get/put.
159 */ 170 */
160struct snd_soc_tplg_kcontrol_ops_id { 171struct snd_soc_tplg_io_ops {
161 __le32 get; 172 __le32 get;
162 __le32 put; 173 __le32 put;
163 __le32 info; 174 __le32 info;
@@ -171,8 +182,8 @@ struct snd_soc_tplg_ctl_hdr {
171 __le32 type; 182 __le32 type;
172 char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN]; 183 char name[SNDRV_CTL_ELEM_ID_NAME_MAXLEN];
173 __le32 access; 184 __le32 access;
174 struct snd_soc_tplg_kcontrol_ops_id ops; 185 struct snd_soc_tplg_io_ops ops;
175 __le32 tlv_size; /* non zero means control has TLV data */ 186 struct snd_soc_tplg_ctl_tlv tlv;
176} __attribute__((packed)); 187} __attribute__((packed));
177 188
178/* 189/*
@@ -222,7 +233,7 @@ struct snd_soc_tplg_stream_config {
222/* 233/*
223 * Manifest. List totals for each payload type. Not used in parsing, but will 234 * Manifest. List totals for each payload type. Not used in parsing, but will
224 * be passed to the component driver before any other objects in order for any 235 * be passed to the component driver before any other objects in order for any
225 * global componnent resource allocations. 236 * global component resource allocations.
226 * 237 *
227 * File block representation for manifest :- 238 * File block representation for manifest :-
228 * +-----------------------------------+----+ 239 * +-----------------------------------+----+
@@ -238,6 +249,7 @@ struct snd_soc_tplg_manifest {
238 __le32 graph_elems; /* number of graph elements */ 249 __le32 graph_elems; /* number of graph elements */
239 __le32 dai_elems; /* number of DAI elements */ 250 __le32 dai_elems; /* number of DAI elements */
240 __le32 dai_link_elems; /* number of DAI link elements */ 251 __le32 dai_link_elems; /* number of DAI link elements */
252 struct snd_soc_tplg_private priv;
241} __attribute__((packed)); 253} __attribute__((packed));
242 254
243/* 255/*
@@ -259,7 +271,6 @@ struct snd_soc_tplg_mixer_control {
259 __le32 invert; 271 __le32 invert;
260 __le32 num_channels; 272 __le32 num_channels;
261 struct snd_soc_tplg_channel channel[SND_SOC_TPLG_MAX_CHAN]; 273 struct snd_soc_tplg_channel channel[SND_SOC_TPLG_MAX_CHAN];
262 struct snd_soc_tplg_ctl_tlv tlv;
263 struct snd_soc_tplg_private priv; 274 struct snd_soc_tplg_private priv;
264} __attribute__((packed)); 275} __attribute__((packed));
265 276
@@ -303,6 +314,7 @@ struct snd_soc_tplg_bytes_control {
303 __le32 mask; 314 __le32 mask;
304 __le32 base; 315 __le32 base;
305 __le32 num_regs; 316 __le32 num_regs;
317 struct snd_soc_tplg_io_ops ext_ops;
306 struct snd_soc_tplg_private priv; 318 struct snd_soc_tplg_private priv;
307} __attribute__((packed)); 319} __attribute__((packed));
308 320
@@ -347,6 +359,7 @@ struct snd_soc_tplg_dapm_widget {
347 __le32 reg; /* negative reg = no direct dapm */ 359 __le32 reg; /* negative reg = no direct dapm */
348 __le32 shift; /* bits to shift */ 360 __le32 shift; /* bits to shift */
349 __le32 mask; /* non-shifted mask */ 361 __le32 mask; /* non-shifted mask */
362 __le32 subseq; /* sort within widget type */
350 __u32 invert; /* invert the power bit */ 363 __u32 invert; /* invert the power bit */
351 __u32 ignore_suspend; /* kept enabled over suspend */ 364 __u32 ignore_suspend; /* kept enabled over suspend */
352 __u16 event_flags; 365 __u16 event_flags;
diff --git a/init/main.c b/init/main.c
index c5d5626289ce..56506553d4d8 100644
--- a/init/main.c
+++ b/init/main.c
@@ -656,7 +656,7 @@ asmlinkage __visible void __init start_kernel(void)
656 key_init(); 656 key_init();
657 security_init(); 657 security_init();
658 dbg_late_init(); 658 dbg_late_init();
659 vfs_caches_init(totalram_pages); 659 vfs_caches_init();
660 signals_init(); 660 signals_init();
661 /* rootfs populating might need page-writeback */ 661 /* rootfs populating might need page-writeback */
662 page_writeback_init(); 662 page_writeback_init();
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index a24ba9fe5bb8..161a1807e6ef 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -142,7 +142,6 @@ static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
142 if (!leaf) 142 if (!leaf)
143 return -ENOMEM; 143 return -ENOMEM;
144 INIT_LIST_HEAD(&leaf->msg_list); 144 INIT_LIST_HEAD(&leaf->msg_list);
145 info->qsize += sizeof(*leaf);
146 } 145 }
147 leaf->priority = msg->m_type; 146 leaf->priority = msg->m_type;
148 rb_link_node(&leaf->rb_node, parent, p); 147 rb_link_node(&leaf->rb_node, parent, p);
@@ -187,7 +186,6 @@ try_again:
187 "lazy leaf delete!\n"); 186 "lazy leaf delete!\n");
188 rb_erase(&leaf->rb_node, &info->msg_tree); 187 rb_erase(&leaf->rb_node, &info->msg_tree);
189 if (info->node_cache) { 188 if (info->node_cache) {
190 info->qsize -= sizeof(*leaf);
191 kfree(leaf); 189 kfree(leaf);
192 } else { 190 } else {
193 info->node_cache = leaf; 191 info->node_cache = leaf;
@@ -200,7 +198,6 @@ try_again:
200 if (list_empty(&leaf->msg_list)) { 198 if (list_empty(&leaf->msg_list)) {
201 rb_erase(&leaf->rb_node, &info->msg_tree); 199 rb_erase(&leaf->rb_node, &info->msg_tree);
202 if (info->node_cache) { 200 if (info->node_cache) {
203 info->qsize -= sizeof(*leaf);
204 kfree(leaf); 201 kfree(leaf);
205 } else { 202 } else {
206 info->node_cache = leaf; 203 info->node_cache = leaf;
@@ -1034,7 +1031,6 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
1034 /* Save our speculative allocation into the cache */ 1031 /* Save our speculative allocation into the cache */
1035 INIT_LIST_HEAD(&new_leaf->msg_list); 1032 INIT_LIST_HEAD(&new_leaf->msg_list);
1036 info->node_cache = new_leaf; 1033 info->node_cache = new_leaf;
1037 info->qsize += sizeof(*new_leaf);
1038 new_leaf = NULL; 1034 new_leaf = NULL;
1039 } else { 1035 } else {
1040 kfree(new_leaf); 1036 kfree(new_leaf);
@@ -1142,7 +1138,6 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1142 /* Save our speculative allocation into the cache */ 1138 /* Save our speculative allocation into the cache */
1143 INIT_LIST_HEAD(&new_leaf->msg_list); 1139 INIT_LIST_HEAD(&new_leaf->msg_list);
1144 info->node_cache = new_leaf; 1140 info->node_cache = new_leaf;
1145 info->qsize += sizeof(*new_leaf);
1146 } else { 1141 } else {
1147 kfree(new_leaf); 1142 kfree(new_leaf);
1148 } 1143 }
diff --git a/ipc/sem.c b/ipc/sem.c
index bc3d530cb23e..b471e5a3863d 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -253,6 +253,16 @@ static void sem_rcu_free(struct rcu_head *head)
253} 253}
254 254
255/* 255/*
256 * spin_unlock_wait() and !spin_is_locked() are not memory barriers, they
257 * are only control barriers.
258 * The code must pair with spin_unlock(&sem->lock) or
259 * spin_unlock(&sem_perm.lock), thus just the control barrier is insufficient.
260 *
261 * smp_rmb() is sufficient, as writes cannot pass the control barrier.
262 */
263#define ipc_smp_acquire__after_spin_is_unlocked() smp_rmb()
264
265/*
256 * Wait until all currently ongoing simple ops have completed. 266 * Wait until all currently ongoing simple ops have completed.
257 * Caller must own sem_perm.lock. 267 * Caller must own sem_perm.lock.
258 * New simple ops cannot start, because simple ops first check 268 * New simple ops cannot start, because simple ops first check
@@ -275,6 +285,7 @@ static void sem_wait_array(struct sem_array *sma)
275 sem = sma->sem_base + i; 285 sem = sma->sem_base + i;
276 spin_unlock_wait(&sem->lock); 286 spin_unlock_wait(&sem->lock);
277 } 287 }
288 ipc_smp_acquire__after_spin_is_unlocked();
278} 289}
279 290
280/* 291/*
@@ -327,13 +338,12 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
327 /* Then check that the global lock is free */ 338 /* Then check that the global lock is free */
328 if (!spin_is_locked(&sma->sem_perm.lock)) { 339 if (!spin_is_locked(&sma->sem_perm.lock)) {
329 /* 340 /*
330 * The ipc object lock check must be visible on all 341 * We need a memory barrier with acquire semantics,
331 * cores before rechecking the complex count. Otherwise 342 * otherwise we can race with another thread that does:
332 * we can race with another thread that does:
333 * complex_count++; 343 * complex_count++;
334 * spin_unlock(sem_perm.lock); 344 * spin_unlock(sem_perm.lock);
335 */ 345 */
336 smp_rmb(); 346 ipc_smp_acquire__after_spin_is_unlocked();
337 347
338 /* 348 /*
339 * Now repeat the test of complex_count: 349 * Now repeat the test of complex_count:
@@ -2074,17 +2084,28 @@ void exit_sem(struct task_struct *tsk)
2074 rcu_read_lock(); 2084 rcu_read_lock();
2075 un = list_entry_rcu(ulp->list_proc.next, 2085 un = list_entry_rcu(ulp->list_proc.next,
2076 struct sem_undo, list_proc); 2086 struct sem_undo, list_proc);
2077 if (&un->list_proc == &ulp->list_proc) 2087 if (&un->list_proc == &ulp->list_proc) {
2078 semid = -1; 2088 /*
2079 else 2089 * We must wait for freeary() before freeing this ulp,
2080 semid = un->semid; 2090 * in case we raced with last sem_undo. There is a small
2091 * possibility where we exit while freeary() didn't
2092 * finish unlocking sem_undo_list.
2093 */
2094 spin_unlock_wait(&ulp->lock);
2095 rcu_read_unlock();
2096 break;
2097 }
2098 spin_lock(&ulp->lock);
2099 semid = un->semid;
2100 spin_unlock(&ulp->lock);
2081 2101
2102 /* exit_sem raced with IPC_RMID, nothing to do */
2082 if (semid == -1) { 2103 if (semid == -1) {
2083 rcu_read_unlock(); 2104 rcu_read_unlock();
2084 break; 2105 continue;
2085 } 2106 }
2086 2107
2087 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid); 2108 sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, semid);
2088 /* exit_sem raced with IPC_RMID, nothing to do */ 2109 /* exit_sem raced with IPC_RMID, nothing to do */
2089 if (IS_ERR(sma)) { 2110 if (IS_ERR(sma)) {
2090 rcu_read_unlock(); 2111 rcu_read_unlock();
@@ -2112,9 +2133,11 @@ void exit_sem(struct task_struct *tsk)
2112 ipc_assert_locked_object(&sma->sem_perm); 2133 ipc_assert_locked_object(&sma->sem_perm);
2113 list_del(&un->list_id); 2134 list_del(&un->list_id);
2114 2135
2115 spin_lock(&ulp->lock); 2136 /* we are the last process using this ulp, acquiring ulp->lock
2137 * isn't required. Besides that, we are also protected against
2138 * IPC_RMID as we hold sma->sem_perm lock now
2139 */
2116 list_del_rcu(&un->list_proc); 2140 list_del_rcu(&un->list_proc);
2117 spin_unlock(&ulp->lock);
2118 2141
2119 /* perform adjustments registered in un */ 2142 /* perform adjustments registered in un */
2120 for (i = 0; i < sma->sem_nsems; i++) { 2143 for (i = 0; i < sma->sem_nsems; i++) {
diff --git a/ipc/shm.c b/ipc/shm.c
index 06e5cf2fe019..4aef24d91b63 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -545,7 +545,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
545 if ((shmflg & SHM_NORESERVE) && 545 if ((shmflg & SHM_NORESERVE) &&
546 sysctl_overcommit_memory != OVERCOMMIT_NEVER) 546 sysctl_overcommit_memory != OVERCOMMIT_NEVER)
547 acctflag = VM_NORESERVE; 547 acctflag = VM_NORESERVE;
548 file = shmem_file_setup(name, size, acctflag); 548 file = shmem_kernel_file_setup(name, size, acctflag);
549 } 549 }
550 error = PTR_ERR(file); 550 error = PTR_ERR(file);
551 if (IS_ERR(file)) 551 if (IS_ERR(file))
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d3dae3419b99..e6feb5114134 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1868,8 +1868,6 @@ event_sched_in(struct perf_event *event,
1868 1868
1869 perf_pmu_disable(event->pmu); 1869 perf_pmu_disable(event->pmu);
1870 1870
1871 event->tstamp_running += tstamp - event->tstamp_stopped;
1872
1873 perf_set_shadow_time(event, ctx, tstamp); 1871 perf_set_shadow_time(event, ctx, tstamp);
1874 1872
1875 perf_log_itrace_start(event); 1873 perf_log_itrace_start(event);
@@ -1881,6 +1879,8 @@ event_sched_in(struct perf_event *event,
1881 goto out; 1879 goto out;
1882 } 1880 }
1883 1881
1882 event->tstamp_running += tstamp - event->tstamp_stopped;
1883
1884 if (!is_software_event(event)) 1884 if (!is_software_event(event))
1885 cpuctx->active_oncpu++; 1885 cpuctx->active_oncpu++;
1886 if (!ctx->nr_active++) 1886 if (!ctx->nr_active++)
@@ -3958,28 +3958,21 @@ static void perf_event_for_each(struct perf_event *event,
3958 perf_event_for_each_child(sibling, func); 3958 perf_event_for_each_child(sibling, func);
3959} 3959}
3960 3960
3961static int perf_event_period(struct perf_event *event, u64 __user *arg) 3961struct period_event {
3962{ 3962 struct perf_event *event;
3963 struct perf_event_context *ctx = event->ctx;
3964 int ret = 0, active;
3965 u64 value; 3963 u64 value;
3964};
3966 3965
3967 if (!is_sampling_event(event)) 3966static int __perf_event_period(void *info)
3968 return -EINVAL; 3967{
3969 3968 struct period_event *pe = info;
3970 if (copy_from_user(&value, arg, sizeof(value))) 3969 struct perf_event *event = pe->event;
3971 return -EFAULT; 3970 struct perf_event_context *ctx = event->ctx;
3972 3971 u64 value = pe->value;
3973 if (!value) 3972 bool active;
3974 return -EINVAL;
3975 3973
3976 raw_spin_lock_irq(&ctx->lock); 3974 raw_spin_lock(&ctx->lock);
3977 if (event->attr.freq) { 3975 if (event->attr.freq) {
3978 if (value > sysctl_perf_event_sample_rate) {
3979 ret = -EINVAL;
3980 goto unlock;
3981 }
3982
3983 event->attr.sample_freq = value; 3976 event->attr.sample_freq = value;
3984 } else { 3977 } else {
3985 event->attr.sample_period = value; 3978 event->attr.sample_period = value;
@@ -3998,11 +3991,53 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
3998 event->pmu->start(event, PERF_EF_RELOAD); 3991 event->pmu->start(event, PERF_EF_RELOAD);
3999 perf_pmu_enable(ctx->pmu); 3992 perf_pmu_enable(ctx->pmu);
4000 } 3993 }
3994 raw_spin_unlock(&ctx->lock);
4001 3995
4002unlock: 3996 return 0;
3997}
3998
3999static int perf_event_period(struct perf_event *event, u64 __user *arg)
4000{
4001 struct period_event pe = { .event = event, };
4002 struct perf_event_context *ctx = event->ctx;
4003 struct task_struct *task;
4004 u64 value;
4005
4006 if (!is_sampling_event(event))
4007 return -EINVAL;
4008
4009 if (copy_from_user(&value, arg, sizeof(value)))
4010 return -EFAULT;
4011
4012 if (!value)
4013 return -EINVAL;
4014
4015 if (event->attr.freq && value > sysctl_perf_event_sample_rate)
4016 return -EINVAL;
4017
4018 task = ctx->task;
4019 pe.value = value;
4020
4021 if (!task) {
4022 cpu_function_call(event->cpu, __perf_event_period, &pe);
4023 return 0;
4024 }
4025
4026retry:
4027 if (!task_function_call(task, __perf_event_period, &pe))
4028 return 0;
4029
4030 raw_spin_lock_irq(&ctx->lock);
4031 if (ctx->is_active) {
4032 raw_spin_unlock_irq(&ctx->lock);
4033 task = ctx->task;
4034 goto retry;
4035 }
4036
4037 __perf_event_period(&pe);
4003 raw_spin_unlock_irq(&ctx->lock); 4038 raw_spin_unlock_irq(&ctx->lock);
4004 4039
4005 return ret; 4040 return 0;
4006} 4041}
4007 4042
4008static const struct file_operations perf_fops; 4043static const struct file_operations perf_fops;
@@ -4740,12 +4775,20 @@ static const struct file_operations perf_fops = {
4740 * to user-space before waking everybody up. 4775 * to user-space before waking everybody up.
4741 */ 4776 */
4742 4777
4778static inline struct fasync_struct **perf_event_fasync(struct perf_event *event)
4779{
4780 /* only the parent has fasync state */
4781 if (event->parent)
4782 event = event->parent;
4783 return &event->fasync;
4784}
4785
4743void perf_event_wakeup(struct perf_event *event) 4786void perf_event_wakeup(struct perf_event *event)
4744{ 4787{
4745 ring_buffer_wakeup(event); 4788 ring_buffer_wakeup(event);
4746 4789
4747 if (event->pending_kill) { 4790 if (event->pending_kill) {
4748 kill_fasync(&event->fasync, SIGIO, event->pending_kill); 4791 kill_fasync(perf_event_fasync(event), SIGIO, event->pending_kill);
4749 event->pending_kill = 0; 4792 event->pending_kill = 0;
4750 } 4793 }
4751} 4794}
@@ -6124,7 +6167,7 @@ static int __perf_event_overflow(struct perf_event *event,
6124 else 6167 else
6125 perf_event_output(event, data, regs); 6168 perf_event_output(event, data, regs);
6126 6169
6127 if (event->fasync && event->pending_kill) { 6170 if (*perf_event_fasync(event) && event->pending_kill) {
6128 event->pending_wakeup = 1; 6171 event->pending_wakeup = 1;
6129 irq_work_queue(&event->pending); 6172 irq_work_queue(&event->pending);
6130 } 6173 }
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index b2be01b1aa9d..c8aa3f75bc4d 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -559,11 +559,13 @@ static void __rb_free_aux(struct ring_buffer *rb)
559 rb->aux_priv = NULL; 559 rb->aux_priv = NULL;
560 } 560 }
561 561
562 for (pg = 0; pg < rb->aux_nr_pages; pg++) 562 if (rb->aux_nr_pages) {
563 rb_free_aux_page(rb, pg); 563 for (pg = 0; pg < rb->aux_nr_pages; pg++)
564 rb_free_aux_page(rb, pg);
564 565
565 kfree(rb->aux_pages); 566 kfree(rb->aux_pages);
566 rb->aux_nr_pages = 0; 567 rb->aux_nr_pages = 0;
568 }
567} 569}
568 570
569void rb_free_aux(struct ring_buffer *rb) 571void rb_free_aux(struct ring_buffer *rb)
diff --git a/kernel/kthread.c b/kernel/kthread.c
index 10e489c448fe..fdea0bee7b5a 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -97,6 +97,7 @@ bool kthread_should_park(void)
97{ 97{
98 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags); 98 return test_bit(KTHREAD_SHOULD_PARK, &to_kthread(current)->flags);
99} 99}
100EXPORT_SYMBOL_GPL(kthread_should_park);
100 101
101/** 102/**
102 * kthread_freezable_should_stop - should this freezable kthread return now? 103 * kthread_freezable_should_stop - should this freezable kthread return now?
@@ -171,6 +172,7 @@ void kthread_parkme(void)
171{ 172{
172 __kthread_parkme(to_kthread(current)); 173 __kthread_parkme(to_kthread(current));
173} 174}
175EXPORT_SYMBOL_GPL(kthread_parkme);
174 176
175static int kthread(void *_create) 177static int kthread(void *_create)
176{ 178{
@@ -411,6 +413,7 @@ void kthread_unpark(struct task_struct *k)
411 if (kthread) 413 if (kthread)
412 __kthread_unpark(k, kthread); 414 __kthread_unpark(k, kthread);
413} 415}
416EXPORT_SYMBOL_GPL(kthread_unpark);
414 417
415/** 418/**
416 * kthread_park - park a thread created by kthread_create(). 419 * kthread_park - park a thread created by kthread_create().
@@ -441,6 +444,7 @@ int kthread_park(struct task_struct *k)
441 } 444 }
442 return ret; 445 return ret;
443} 446}
447EXPORT_SYMBOL_GPL(kthread_park);
444 448
445/** 449/**
446 * kthread_stop - stop a thread created by kthread_create(). 450 * kthread_stop - stop a thread created by kthread_create().
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
index 04ab18151cc8..df19ae4debd0 100644
--- a/kernel/locking/qspinlock_paravirt.h
+++ b/kernel/locking/qspinlock_paravirt.h
@@ -4,6 +4,7 @@
4 4
5#include <linux/hash.h> 5#include <linux/hash.h>
6#include <linux/bootmem.h> 6#include <linux/bootmem.h>
7#include <linux/debug_locks.h>
7 8
8/* 9/*
9 * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead 10 * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
@@ -286,15 +287,23 @@ __visible void __pv_queued_spin_unlock(struct qspinlock *lock)
286{ 287{
287 struct __qspinlock *l = (void *)lock; 288 struct __qspinlock *l = (void *)lock;
288 struct pv_node *node; 289 struct pv_node *node;
290 u8 lockval = cmpxchg(&l->locked, _Q_LOCKED_VAL, 0);
289 291
290 /* 292 /*
291 * We must not unlock if SLOW, because in that case we must first 293 * We must not unlock if SLOW, because in that case we must first
292 * unhash. Otherwise it would be possible to have multiple @lock 294 * unhash. Otherwise it would be possible to have multiple @lock
293 * entries, which would be BAD. 295 * entries, which would be BAD.
294 */ 296 */
295 if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL)) 297 if (likely(lockval == _Q_LOCKED_VAL))
296 return; 298 return;
297 299
300 if (unlikely(lockval != _Q_SLOW_VAL)) {
301 if (debug_locks_silent)
302 return;
303 WARN(1, "pvqspinlock: lock %p has corrupted value 0x%x!\n", lock, atomic_read(&lock->val));
304 return;
305 }
306
298 /* 307 /*
299 * Since the above failed to release, this must be the SLOW path. 308 * Since the above failed to release, this must be the SLOW path.
300 * Therefore start by looking up the blocked node and unhashing it. 309 * Therefore start by looking up the blocked node and unhashing it.
diff --git a/kernel/module.c b/kernel/module.c
index 4d2b82e610e2..b86b7bf1be38 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -602,13 +602,16 @@ const struct kernel_symbol *find_symbol(const char *name,
602} 602}
603EXPORT_SYMBOL_GPL(find_symbol); 603EXPORT_SYMBOL_GPL(find_symbol);
604 604
605/* Search for module by name: must hold module_mutex. */ 605/*
606 * Search for module by name: must hold module_mutex (or preempt disabled
607 * for read-only access).
608 */
606static struct module *find_module_all(const char *name, size_t len, 609static struct module *find_module_all(const char *name, size_t len,
607 bool even_unformed) 610 bool even_unformed)
608{ 611{
609 struct module *mod; 612 struct module *mod;
610 613
611 module_assert_mutex(); 614 module_assert_mutex_or_preempt();
612 615
613 list_for_each_entry(mod, &modules, list) { 616 list_for_each_entry(mod, &modules, list) {
614 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED) 617 if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
@@ -621,6 +624,7 @@ static struct module *find_module_all(const char *name, size_t len,
621 624
622struct module *find_module(const char *name) 625struct module *find_module(const char *name)
623{ 626{
627 module_assert_mutex();
624 return find_module_all(name, strlen(name), false); 628 return find_module_all(name, strlen(name), false);
625} 629}
626EXPORT_SYMBOL_GPL(find_module); 630EXPORT_SYMBOL_GPL(find_module);
diff --git a/kernel/resource.c b/kernel/resource.c
index 90552aab5f2d..fed052a1bc9f 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -504,13 +504,13 @@ int region_is_ram(resource_size_t start, unsigned long size)
504{ 504{
505 struct resource *p; 505 struct resource *p;
506 resource_size_t end = start + size - 1; 506 resource_size_t end = start + size - 1;
507 int flags = IORESOURCE_MEM | IORESOURCE_BUSY; 507 unsigned long flags = IORESOURCE_MEM | IORESOURCE_BUSY;
508 const char *name = "System RAM"; 508 const char *name = "System RAM";
509 int ret = -1; 509 int ret = -1;
510 510
511 read_lock(&resource_lock); 511 read_lock(&resource_lock);
512 for (p = iomem_resource.child; p ; p = p->sibling) { 512 for (p = iomem_resource.child; p ; p = p->sibling) {
513 if (end < p->start) 513 if (p->end < start)
514 continue; 514 continue;
515 515
516 if (p->start <= start && end <= p->end) { 516 if (p->start <= start && end <= p->end) {
@@ -521,7 +521,7 @@ int region_is_ram(resource_size_t start, unsigned long size)
521 ret = 1; 521 ret = 1;
522 break; 522 break;
523 } 523 }
524 if (p->end < start) 524 if (end < p->start)
525 break; /* not found */ 525 break; /* not found */
526 } 526 }
527 read_unlock(&resource_lock); 527 read_unlock(&resource_lock);
diff --git a/kernel/signal.c b/kernel/signal.c
index 836df8dac6cc..0f6bbbe77b46 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -2748,12 +2748,15 @@ int copy_siginfo_to_user(siginfo_t __user *to, const siginfo_t *from)
2748 * Other callers might not initialize the si_lsb field, 2748 * Other callers might not initialize the si_lsb field,
2749 * so check explicitly for the right codes here. 2749 * so check explicitly for the right codes here.
2750 */ 2750 */
2751 if (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO) 2751 if (from->si_signo == SIGBUS &&
2752 (from->si_code == BUS_MCEERR_AR || from->si_code == BUS_MCEERR_AO))
2752 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb); 2753 err |= __put_user(from->si_addr_lsb, &to->si_addr_lsb);
2753#endif 2754#endif
2754#ifdef SEGV_BNDERR 2755#ifdef SEGV_BNDERR
2755 err |= __put_user(from->si_lower, &to->si_lower); 2756 if (from->si_signo == SIGSEGV && from->si_code == SEGV_BNDERR) {
2756 err |= __put_user(from->si_upper, &to->si_upper); 2757 err |= __put_user(from->si_lower, &to->si_lower);
2758 err |= __put_user(from->si_upper, &to->si_upper);
2759 }
2757#endif 2760#endif
2758 break; 2761 break;
2759 case __SI_CHLD: 2762 case __SI_CHLD:
@@ -3017,7 +3020,7 @@ COMPAT_SYSCALL_DEFINE3(rt_sigqueueinfo,
3017 int, sig, 3020 int, sig,
3018 struct compat_siginfo __user *, uinfo) 3021 struct compat_siginfo __user *, uinfo)
3019{ 3022{
3020 siginfo_t info; 3023 siginfo_t info = {};
3021 int ret = copy_siginfo_from_user32(&info, uinfo); 3024 int ret = copy_siginfo_from_user32(&info, uinfo);
3022 if (unlikely(ret)) 3025 if (unlikely(ret))
3023 return ret; 3026 return ret;
@@ -3061,7 +3064,7 @@ COMPAT_SYSCALL_DEFINE4(rt_tgsigqueueinfo,
3061 int, sig, 3064 int, sig,
3062 struct compat_siginfo __user *, uinfo) 3065 struct compat_siginfo __user *, uinfo)
3063{ 3066{
3064 siginfo_t info; 3067 siginfo_t info = {};
3065 3068
3066 if (copy_siginfo_from_user32(&info, uinfo)) 3069 if (copy_siginfo_from_user32(&info, uinfo))
3067 return -EFAULT; 3070 return -EFAULT;
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index 02bece4a99ea..eb11011b5292 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -98,6 +98,13 @@ struct ftrace_pid {
98 struct pid *pid; 98 struct pid *pid;
99}; 99};
100 100
101static bool ftrace_pids_enabled(void)
102{
103 return !list_empty(&ftrace_pids);
104}
105
106static void ftrace_update_trampoline(struct ftrace_ops *ops);
107
101/* 108/*
102 * ftrace_disabled is set when an anomaly is discovered. 109 * ftrace_disabled is set when an anomaly is discovered.
103 * ftrace_disabled is much stronger than ftrace_enabled. 110 * ftrace_disabled is much stronger than ftrace_enabled.
@@ -109,7 +116,6 @@ static DEFINE_MUTEX(ftrace_lock);
109static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end; 116static struct ftrace_ops *ftrace_control_list __read_mostly = &ftrace_list_end;
110static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end; 117static struct ftrace_ops *ftrace_ops_list __read_mostly = &ftrace_list_end;
111ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub; 118ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
112ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
113static struct ftrace_ops global_ops; 119static struct ftrace_ops global_ops;
114static struct ftrace_ops control_ops; 120static struct ftrace_ops control_ops;
115 121
@@ -183,14 +189,7 @@ static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip,
183 if (!test_tsk_trace_trace(current)) 189 if (!test_tsk_trace_trace(current))
184 return; 190 return;
185 191
186 ftrace_pid_function(ip, parent_ip, op, regs); 192 op->saved_func(ip, parent_ip, op, regs);
187}
188
189static void set_ftrace_pid_function(ftrace_func_t func)
190{
191 /* do not set ftrace_pid_function to itself! */
192 if (func != ftrace_pid_func)
193 ftrace_pid_function = func;
194} 193}
195 194
196/** 195/**
@@ -202,7 +201,6 @@ static void set_ftrace_pid_function(ftrace_func_t func)
202void clear_ftrace_function(void) 201void clear_ftrace_function(void)
203{ 202{
204 ftrace_trace_function = ftrace_stub; 203 ftrace_trace_function = ftrace_stub;
205 ftrace_pid_function = ftrace_stub;
206} 204}
207 205
208static void control_ops_disable_all(struct ftrace_ops *ops) 206static void control_ops_disable_all(struct ftrace_ops *ops)
@@ -436,6 +434,12 @@ static int __register_ftrace_function(struct ftrace_ops *ops)
436 } else 434 } else
437 add_ftrace_ops(&ftrace_ops_list, ops); 435 add_ftrace_ops(&ftrace_ops_list, ops);
438 436
437 /* Always save the function, and reset at unregistering */
438 ops->saved_func = ops->func;
439
440 if (ops->flags & FTRACE_OPS_FL_PID && ftrace_pids_enabled())
441 ops->func = ftrace_pid_func;
442
439 ftrace_update_trampoline(ops); 443 ftrace_update_trampoline(ops);
440 444
441 if (ftrace_enabled) 445 if (ftrace_enabled)
@@ -463,15 +467,28 @@ static int __unregister_ftrace_function(struct ftrace_ops *ops)
463 if (ftrace_enabled) 467 if (ftrace_enabled)
464 update_ftrace_function(); 468 update_ftrace_function();
465 469
470 ops->func = ops->saved_func;
471
466 return 0; 472 return 0;
467} 473}
468 474
469static void ftrace_update_pid_func(void) 475static void ftrace_update_pid_func(void)
470{ 476{
477 bool enabled = ftrace_pids_enabled();
478 struct ftrace_ops *op;
479
471 /* Only do something if we are tracing something */ 480 /* Only do something if we are tracing something */
472 if (ftrace_trace_function == ftrace_stub) 481 if (ftrace_trace_function == ftrace_stub)
473 return; 482 return;
474 483
484 do_for_each_ftrace_op(op, ftrace_ops_list) {
485 if (op->flags & FTRACE_OPS_FL_PID) {
486 op->func = enabled ? ftrace_pid_func :
487 op->saved_func;
488 ftrace_update_trampoline(op);
489 }
490 } while_for_each_ftrace_op(op);
491
475 update_ftrace_function(); 492 update_ftrace_function();
476} 493}
477 494
@@ -1133,7 +1150,8 @@ static struct ftrace_ops global_ops = {
1133 .local_hash.filter_hash = EMPTY_HASH, 1150 .local_hash.filter_hash = EMPTY_HASH,
1134 INIT_OPS_HASH(global_ops) 1151 INIT_OPS_HASH(global_ops)
1135 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 1152 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
1136 FTRACE_OPS_FL_INITIALIZED, 1153 FTRACE_OPS_FL_INITIALIZED |
1154 FTRACE_OPS_FL_PID,
1137}; 1155};
1138 1156
1139/* 1157/*
@@ -5023,7 +5041,9 @@ static void ftrace_update_trampoline(struct ftrace_ops *ops)
5023 5041
5024static struct ftrace_ops global_ops = { 5042static struct ftrace_ops global_ops = {
5025 .func = ftrace_stub, 5043 .func = ftrace_stub,
5026 .flags = FTRACE_OPS_FL_RECURSION_SAFE | FTRACE_OPS_FL_INITIALIZED, 5044 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5045 FTRACE_OPS_FL_INITIALIZED |
5046 FTRACE_OPS_FL_PID,
5027}; 5047};
5028 5048
5029static int __init ftrace_nodyn_init(void) 5049static int __init ftrace_nodyn_init(void)
@@ -5080,11 +5100,6 @@ void ftrace_init_array_ops(struct trace_array *tr, ftrace_func_t func)
5080 if (WARN_ON(tr->ops->func != ftrace_stub)) 5100 if (WARN_ON(tr->ops->func != ftrace_stub))
5081 printk("ftrace ops had %pS for function\n", 5101 printk("ftrace ops had %pS for function\n",
5082 tr->ops->func); 5102 tr->ops->func);
5083 /* Only the top level instance does pid tracing */
5084 if (!list_empty(&ftrace_pids)) {
5085 set_ftrace_pid_function(func);
5086 func = ftrace_pid_func;
5087 }
5088 } 5103 }
5089 tr->ops->func = func; 5104 tr->ops->func = func;
5090 tr->ops->private = tr; 5105 tr->ops->private = tr;
@@ -5371,7 +5386,7 @@ static void *fpid_start(struct seq_file *m, loff_t *pos)
5371{ 5386{
5372 mutex_lock(&ftrace_lock); 5387 mutex_lock(&ftrace_lock);
5373 5388
5374 if (list_empty(&ftrace_pids) && (!*pos)) 5389 if (!ftrace_pids_enabled() && (!*pos))
5375 return (void *) 1; 5390 return (void *) 1;
5376 5391
5377 return seq_list_start(&ftrace_pids, *pos); 5392 return seq_list_start(&ftrace_pids, *pos);
@@ -5610,6 +5625,7 @@ static struct ftrace_ops graph_ops = {
5610 .func = ftrace_stub, 5625 .func = ftrace_stub,
5611 .flags = FTRACE_OPS_FL_RECURSION_SAFE | 5626 .flags = FTRACE_OPS_FL_RECURSION_SAFE |
5612 FTRACE_OPS_FL_INITIALIZED | 5627 FTRACE_OPS_FL_INITIALIZED |
5628 FTRACE_OPS_FL_PID |
5613 FTRACE_OPS_FL_STUB, 5629 FTRACE_OPS_FL_STUB,
5614#ifdef FTRACE_GRAPH_TRAMP_ADDR 5630#ifdef FTRACE_GRAPH_TRAMP_ADDR
5615 .trampoline = FTRACE_GRAPH_TRAMP_ADDR, 5631 .trampoline = FTRACE_GRAPH_TRAMP_ADDR,
diff --git a/lib/iommu-common.c b/lib/iommu-common.c
index df30632f0bef..ff19f66d3f7f 100644
--- a/lib/iommu-common.c
+++ b/lib/iommu-common.c
@@ -119,7 +119,7 @@ unsigned long iommu_tbl_range_alloc(struct device *dev,
119 unsigned long align_mask = 0; 119 unsigned long align_mask = 0;
120 120
121 if (align_order > 0) 121 if (align_order > 0)
122 align_mask = 0xffffffffffffffffl >> (64 - align_order); 122 align_mask = ~0ul >> (BITS_PER_LONG - align_order);
123 123
124 /* Sanity check */ 124 /* Sanity check */
125 if (unlikely(npages == 0)) { 125 if (unlikely(npages == 0)) {
diff --git a/mm/cma.h b/mm/cma.h
index 1132d733556d..17c75a4246c8 100644
--- a/mm/cma.h
+++ b/mm/cma.h
@@ -16,7 +16,7 @@ struct cma {
16extern struct cma cma_areas[MAX_CMA_AREAS]; 16extern struct cma cma_areas[MAX_CMA_AREAS];
17extern unsigned cma_area_count; 17extern unsigned cma_area_count;
18 18
19static unsigned long cma_bitmap_maxno(struct cma *cma) 19static inline unsigned long cma_bitmap_maxno(struct cma *cma)
20{ 20{
21 return cma->count >> cma->order_per_bit; 21 return cma->count >> cma->order_per_bit;
22} 22}
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index c107094f79ba..097c7a4bfbd9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1676,12 +1676,7 @@ static void __split_huge_page_refcount(struct page *page,
1676 /* after clearing PageTail the gup refcount can be released */ 1676 /* after clearing PageTail the gup refcount can be released */
1677 smp_mb__after_atomic(); 1677 smp_mb__after_atomic();
1678 1678
1679 /* 1679 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP;
1680 * retain hwpoison flag of the poisoned tail page:
1681 * fix for the unsuitable process killed on Guest Machine(KVM)
1682 * by the memory-failure.
1683 */
1684 page_tail->flags &= ~PAGE_FLAGS_CHECK_AT_PREP | __PG_HWPOISON;
1685 page_tail->flags |= (page->flags & 1680 page_tail->flags |= (page->flags &
1686 ((1L << PG_referenced) | 1681 ((1L << PG_referenced) |
1687 (1L << PG_swapbacked) | 1682 (1L << PG_swapbacked) |
diff --git a/mm/kasan/kasan.c b/mm/kasan/kasan.c
index 6c513a63ea84..7b28e9cdf1c7 100644
--- a/mm/kasan/kasan.c
+++ b/mm/kasan/kasan.c
@@ -2,7 +2,7 @@
2 * This file contains shadow memory manipulation code. 2 * This file contains shadow memory manipulation code.
3 * 3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com> 5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6 * 6 *
7 * Some of code borrowed from https://github.com/xairy/linux by 7 * Some of code borrowed from https://github.com/xairy/linux by
8 * Andrey Konovalov <adech.fo@gmail.com> 8 * Andrey Konovalov <adech.fo@gmail.com>
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 680ceedf810a..e07c94fbd0ac 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -2,7 +2,7 @@
2 * This file contains error reporting code. 2 * This file contains error reporting code.
3 * 3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd. 4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <a.ryabinin@samsung.com> 5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6 * 6 *
7 * Some of code borrowed from https://github.com/xairy/linux by 7 * Some of code borrowed from https://github.com/xairy/linux by
8 * Andrey Konovalov <adech.fo@gmail.com> 8 * Andrey Konovalov <adech.fo@gmail.com>
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index c53543d89282..1f4446a90cef 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -909,6 +909,18 @@ int get_hwpoison_page(struct page *page)
909 * directly for tail pages. 909 * directly for tail pages.
910 */ 910 */
911 if (PageTransHuge(head)) { 911 if (PageTransHuge(head)) {
912 /*
913 * Non anonymous thp exists only in allocation/free time. We
914 * can't handle such a case correctly, so let's give it up.
915 * This should be better than triggering BUG_ON when kernel
916 * tries to touch the "partially handled" page.
917 */
918 if (!PageAnon(head)) {
919 pr_err("MCE: %#lx: non anonymous thp\n",
920 page_to_pfn(page));
921 return 0;
922 }
923
912 if (get_page_unless_zero(head)) { 924 if (get_page_unless_zero(head)) {
913 if (PageTail(page)) 925 if (PageTail(page))
914 get_page(page); 926 get_page(page);
@@ -1134,17 +1146,11 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1134 } 1146 }
1135 1147
1136 if (!PageHuge(p) && PageTransHuge(hpage)) { 1148 if (!PageHuge(p) && PageTransHuge(hpage)) {
1137 if (!PageAnon(hpage)) { 1149 if (!PageAnon(hpage) || unlikely(split_huge_page(hpage))) {
1138 pr_err("MCE: %#lx: non anonymous thp\n", pfn); 1150 if (!PageAnon(hpage))
1139 if (TestClearPageHWPoison(p)) 1151 pr_err("MCE: %#lx: non anonymous thp\n", pfn);
1140 atomic_long_sub(nr_pages, &num_poisoned_pages); 1152 else
1141 put_page(p); 1153 pr_err("MCE: %#lx: thp split failed\n", pfn);
1142 if (p != hpage)
1143 put_page(hpage);
1144 return -EBUSY;
1145 }
1146 if (unlikely(split_huge_page(hpage))) {
1147 pr_err("MCE: %#lx: thp split failed\n", pfn);
1148 if (TestClearPageHWPoison(p)) 1154 if (TestClearPageHWPoison(p))
1149 atomic_long_sub(nr_pages, &num_poisoned_pages); 1155 atomic_long_sub(nr_pages, &num_poisoned_pages);
1150 put_page(p); 1156 put_page(p);
@@ -1209,9 +1215,9 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
1209 if (!PageHWPoison(p)) { 1215 if (!PageHWPoison(p)) {
1210 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn); 1216 printk(KERN_ERR "MCE %#lx: just unpoisoned\n", pfn);
1211 atomic_long_sub(nr_pages, &num_poisoned_pages); 1217 atomic_long_sub(nr_pages, &num_poisoned_pages);
1218 unlock_page(hpage);
1212 put_page(hpage); 1219 put_page(hpage);
1213 res = 0; 1220 return 0;
1214 goto out;
1215 } 1221 }
1216 if (hwpoison_filter(p)) { 1222 if (hwpoison_filter(p)) {
1217 if (TestClearPageHWPoison(p)) 1223 if (TestClearPageHWPoison(p))
@@ -1535,6 +1541,8 @@ static int get_any_page(struct page *page, unsigned long pfn, int flags)
1535 */ 1541 */
1536 ret = __get_any_page(page, pfn, 0); 1542 ret = __get_any_page(page, pfn, 0);
1537 if (!PageLRU(page)) { 1543 if (!PageLRU(page)) {
1544 /* Drop page reference which is from __get_any_page() */
1545 put_page(page);
1538 pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n", 1546 pr_info("soft_offline: %#lx: unknown non LRU page type %lx\n",
1539 pfn, page->flags); 1547 pfn, page->flags);
1540 return -EIO; 1548 return -EIO;
@@ -1564,13 +1572,12 @@ static int soft_offline_huge_page(struct page *page, int flags)
1564 unlock_page(hpage); 1572 unlock_page(hpage);
1565 1573
1566 ret = isolate_huge_page(hpage, &pagelist); 1574 ret = isolate_huge_page(hpage, &pagelist);
1567 if (ret) { 1575 /*
1568 /* 1576 * get_any_page() and isolate_huge_page() takes a refcount each,
1569 * get_any_page() and isolate_huge_page() takes a refcount each, 1577 * so need to drop one here.
1570 * so need to drop one here. 1578 */
1571 */ 1579 put_page(hpage);
1572 put_page(hpage); 1580 if (!ret) {
1573 } else {
1574 pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn); 1581 pr_info("soft offline: %#lx hugepage failed to isolate\n", pfn);
1575 return -EBUSY; 1582 return -EBUSY;
1576 } 1583 }
@@ -1656,6 +1663,8 @@ static int __soft_offline_page(struct page *page, int flags)
1656 inc_zone_page_state(page, NR_ISOLATED_ANON + 1663 inc_zone_page_state(page, NR_ISOLATED_ANON +
1657 page_is_file_cache(page)); 1664 page_is_file_cache(page));
1658 list_add(&page->lru, &pagelist); 1665 list_add(&page->lru, &pagelist);
1666 if (!TestSetPageHWPoison(page))
1667 atomic_long_inc(&num_poisoned_pages);
1659 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL, 1668 ret = migrate_pages(&pagelist, new_page, NULL, MPOL_MF_MOVE_ALL,
1660 MIGRATE_SYNC, MR_MEMORY_FAILURE); 1669 MIGRATE_SYNC, MR_MEMORY_FAILURE);
1661 if (ret) { 1670 if (ret) {
@@ -1670,9 +1679,8 @@ static int __soft_offline_page(struct page *page, int flags)
1670 pfn, ret, page->flags); 1679 pfn, ret, page->flags);
1671 if (ret > 0) 1680 if (ret > 0)
1672 ret = -EIO; 1681 ret = -EIO;
1673 } else { 1682 if (TestClearPageHWPoison(page))
1674 SetPageHWPoison(page); 1683 atomic_long_dec(&num_poisoned_pages);
1675 atomic_long_inc(&num_poisoned_pages);
1676 } 1684 }
1677 } else { 1685 } else {
1678 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n", 1686 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 26fbba7d888f..6da82bcb0a8b 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -446,7 +446,7 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
446 int nr_pages = PAGES_PER_SECTION; 446 int nr_pages = PAGES_PER_SECTION;
447 int nid = pgdat->node_id; 447 int nid = pgdat->node_id;
448 int zone_type; 448 int zone_type;
449 unsigned long flags; 449 unsigned long flags, pfn;
450 int ret; 450 int ret;
451 451
452 zone_type = zone - pgdat->node_zones; 452 zone_type = zone - pgdat->node_zones;
@@ -461,6 +461,14 @@ static int __meminit __add_zone(struct zone *zone, unsigned long phys_start_pfn)
461 pgdat_resize_unlock(zone->zone_pgdat, &flags); 461 pgdat_resize_unlock(zone->zone_pgdat, &flags);
462 memmap_init_zone(nr_pages, nid, zone_type, 462 memmap_init_zone(nr_pages, nid, zone_type,
463 phys_start_pfn, MEMMAP_HOTPLUG); 463 phys_start_pfn, MEMMAP_HOTPLUG);
464
465 /* online_page_range is called later and expects pages reserved */
466 for (pfn = phys_start_pfn; pfn < phys_start_pfn + nr_pages; pfn++) {
467 if (!pfn_valid(pfn))
468 continue;
469
470 SetPageReserved(pfn_to_page(pfn));
471 }
464 return 0; 472 return 0;
465} 473}
466 474
@@ -1269,6 +1277,7 @@ int __ref add_memory(int nid, u64 start, u64 size)
1269 1277
1270 /* create new memmap entry */ 1278 /* create new memmap entry */
1271 firmware_map_add_hotplug(start, start + size, "System RAM"); 1279 firmware_map_add_hotplug(start, start + size, "System RAM");
1280 memblock_add_node(start, size, nid);
1272 1281
1273 goto out; 1282 goto out;
1274 1283
@@ -2005,6 +2014,8 @@ void __ref remove_memory(int nid, u64 start, u64 size)
2005 2014
2006 /* remove memmap entry */ 2015 /* remove memmap entry */
2007 firmware_map_remove(start, start + size, "System RAM"); 2016 firmware_map_remove(start, start + size, "System RAM");
2017 memblock_free(start, size);
2018 memblock_remove(start, size);
2008 2019
2009 arch_remove_memory(start, size); 2020 arch_remove_memory(start, size);
2010 2021
diff --git a/mm/migrate.c b/mm/migrate.c
index ee401e4e5ef1..eb4267107d1f 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -880,7 +880,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
880 /* Establish migration ptes or remove ptes */ 880 /* Establish migration ptes or remove ptes */
881 if (page_mapped(page)) { 881 if (page_mapped(page)) {
882 try_to_unmap(page, 882 try_to_unmap(page,
883 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS); 883 TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS|
884 TTU_IGNORE_HWPOISON);
884 page_was_mapped = 1; 885 page_was_mapped = 1;
885 } 886 }
886 887
@@ -950,7 +951,10 @@ out:
950 list_del(&page->lru); 951 list_del(&page->lru);
951 dec_zone_page_state(page, NR_ISOLATED_ANON + 952 dec_zone_page_state(page, NR_ISOLATED_ANON +
952 page_is_file_cache(page)); 953 page_is_file_cache(page));
953 if (reason != MR_MEMORY_FAILURE) 954 /* Soft-offlined page shouldn't go through lru cache list */
955 if (reason == MR_MEMORY_FAILURE)
956 put_page(page);
957 else
954 putback_lru_page(page); 958 putback_lru_page(page);
955 } 959 }
956 960
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 22cddd3e5de8..5cccc127ef81 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -2063,10 +2063,10 @@ static struct notifier_block ratelimit_nb = {
2063 */ 2063 */
2064void __init page_writeback_init(void) 2064void __init page_writeback_init(void)
2065{ 2065{
2066 BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2067
2066 writeback_set_ratelimit(); 2068 writeback_set_ratelimit();
2067 register_cpu_notifier(&ratelimit_nb); 2069 register_cpu_notifier(&ratelimit_nb);
2068
2069 BUG_ON(wb_domain_init(&global_wb_domain, GFP_KERNEL));
2070} 2070}
2071 2071
2072/** 2072/**
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ef19f22b2b7d..df959b7d6085 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -18,7 +18,6 @@
18#include <linux/mm.h> 18#include <linux/mm.h>
19#include <linux/swap.h> 19#include <linux/swap.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/rwsem.h>
22#include <linux/pagemap.h> 21#include <linux/pagemap.h>
23#include <linux/jiffies.h> 22#include <linux/jiffies.h>
24#include <linux/bootmem.h> 23#include <linux/bootmem.h>
@@ -981,21 +980,21 @@ static void __init __free_pages_boot_core(struct page *page,
981 980
982#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \ 981#if defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID) || \
983 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) 982 defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP)
984/* Only safe to use early in boot when initialisation is single-threaded */ 983
985static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata; 984static struct mminit_pfnnid_cache early_pfnnid_cache __meminitdata;
986 985
987int __meminit early_pfn_to_nid(unsigned long pfn) 986int __meminit early_pfn_to_nid(unsigned long pfn)
988{ 987{
988 static DEFINE_SPINLOCK(early_pfn_lock);
989 int nid; 989 int nid;
990 990
991 /* The system will behave unpredictably otherwise */ 991 spin_lock(&early_pfn_lock);
992 BUG_ON(system_state != SYSTEM_BOOTING);
993
994 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache); 992 nid = __early_pfn_to_nid(pfn, &early_pfnnid_cache);
995 if (nid >= 0) 993 if (nid < 0)
996 return nid; 994 nid = 0;
997 /* just returns 0 */ 995 spin_unlock(&early_pfn_lock);
998 return 0; 996
997 return nid;
999} 998}
1000#endif 999#endif
1001 1000
@@ -1060,7 +1059,15 @@ static void __init deferred_free_range(struct page *page,
1060 __free_pages_boot_core(page, pfn, 0); 1059 __free_pages_boot_core(page, pfn, 0);
1061} 1060}
1062 1061
1063static __initdata DECLARE_RWSEM(pgdat_init_rwsem); 1062/* Completion tracking for deferred_init_memmap() threads */
1063static atomic_t pgdat_init_n_undone __initdata;
1064static __initdata DECLARE_COMPLETION(pgdat_init_all_done_comp);
1065
1066static inline void __init pgdat_init_report_one_done(void)
1067{
1068 if (atomic_dec_and_test(&pgdat_init_n_undone))
1069 complete(&pgdat_init_all_done_comp);
1070}
1064 1071
1065/* Initialise remaining memory on a node */ 1072/* Initialise remaining memory on a node */
1066static int __init deferred_init_memmap(void *data) 1073static int __init deferred_init_memmap(void *data)
@@ -1077,7 +1084,7 @@ static int __init deferred_init_memmap(void *data)
1077 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); 1084 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
1078 1085
1079 if (first_init_pfn == ULONG_MAX) { 1086 if (first_init_pfn == ULONG_MAX) {
1080 up_read(&pgdat_init_rwsem); 1087 pgdat_init_report_one_done();
1081 return 0; 1088 return 0;
1082 } 1089 }
1083 1090
@@ -1177,7 +1184,8 @@ free_range:
1177 1184
1178 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages, 1185 pr_info("node %d initialised, %lu pages in %ums\n", nid, nr_pages,
1179 jiffies_to_msecs(jiffies - start)); 1186 jiffies_to_msecs(jiffies - start));
1180 up_read(&pgdat_init_rwsem); 1187
1188 pgdat_init_report_one_done();
1181 return 0; 1189 return 0;
1182} 1190}
1183 1191
@@ -1185,14 +1193,17 @@ void __init page_alloc_init_late(void)
1185{ 1193{
1186 int nid; 1194 int nid;
1187 1195
1196 /* There will be num_node_state(N_MEMORY) threads */
1197 atomic_set(&pgdat_init_n_undone, num_node_state(N_MEMORY));
1188 for_each_node_state(nid, N_MEMORY) { 1198 for_each_node_state(nid, N_MEMORY) {
1189 down_read(&pgdat_init_rwsem);
1190 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid); 1199 kthread_run(deferred_init_memmap, NODE_DATA(nid), "pgdatinit%d", nid);
1191 } 1200 }
1192 1201
1193 /* Block until all are initialised */ 1202 /* Block until all are initialised */
1194 down_write(&pgdat_init_rwsem); 1203 wait_for_completion(&pgdat_init_all_done_comp);
1195 up_write(&pgdat_init_rwsem); 1204
1205 /* Reinit limits that are based on free pages after the kernel is up */
1206 files_maxfiles_init();
1196} 1207}
1197#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */ 1208#endif /* CONFIG_DEFERRED_STRUCT_PAGE_INIT */
1198 1209
@@ -1285,6 +1296,10 @@ static inline int check_new_page(struct page *page)
1285 bad_reason = "non-NULL mapping"; 1296 bad_reason = "non-NULL mapping";
1286 if (unlikely(atomic_read(&page->_count) != 0)) 1297 if (unlikely(atomic_read(&page->_count) != 0))
1287 bad_reason = "nonzero _count"; 1298 bad_reason = "nonzero _count";
1299 if (unlikely(page->flags & __PG_HWPOISON)) {
1300 bad_reason = "HWPoisoned (hardware-corrupted)";
1301 bad_flags = __PG_HWPOISON;
1302 }
1288 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) { 1303 if (unlikely(page->flags & PAGE_FLAGS_CHECK_AT_PREP)) {
1289 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set"; 1304 bad_reason = "PAGE_FLAGS_CHECK_AT_PREP flag set";
1290 bad_flags = PAGE_FLAGS_CHECK_AT_PREP; 1305 bad_flags = PAGE_FLAGS_CHECK_AT_PREP;
@@ -5045,6 +5060,10 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid,
5045{ 5060{
5046 unsigned long zone_start_pfn, zone_end_pfn; 5061 unsigned long zone_start_pfn, zone_end_pfn;
5047 5062
5063 /* When hotadd a new node, the node should be empty */
5064 if (!node_start_pfn && !node_end_pfn)
5065 return 0;
5066
5048 /* Get the start and end of the zone */ 5067 /* Get the start and end of the zone */
5049 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type]; 5068 zone_start_pfn = arch_zone_lowest_possible_pfn[zone_type];
5050 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type]; 5069 zone_end_pfn = arch_zone_highest_possible_pfn[zone_type];
@@ -5108,6 +5127,10 @@ static unsigned long __meminit zone_absent_pages_in_node(int nid,
5108 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type]; 5127 unsigned long zone_high = arch_zone_highest_possible_pfn[zone_type];
5109 unsigned long zone_start_pfn, zone_end_pfn; 5128 unsigned long zone_start_pfn, zone_end_pfn;
5110 5129
5130 /* When hotadd a new node, the node should be empty */
5131 if (!node_start_pfn && !node_end_pfn)
5132 return 0;
5133
5111 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high); 5134 zone_start_pfn = clamp(node_start_pfn, zone_low, zone_high);
5112 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high); 5135 zone_end_pfn = clamp(node_end_pfn, zone_low, zone_high);
5113 5136
diff --git a/mm/shmem.c b/mm/shmem.c
index 4caf8ed24d65..dbe0c1e8349c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -3363,8 +3363,8 @@ put_path:
3363 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be 3363 * shmem_kernel_file_setup - get an unlinked file living in tmpfs which must be
3364 * kernel internal. There will be NO LSM permission checks against the 3364 * kernel internal. There will be NO LSM permission checks against the
3365 * underlying inode. So users of this interface must do LSM checks at a 3365 * underlying inode. So users of this interface must do LSM checks at a
3366 * higher layer. The one user is the big_key implementation. LSM checks 3366 * higher layer. The users are the big_key and shm implementations. LSM
3367 * are provided at the key level rather than the inode level. 3367 * checks are provided at the key or shm level rather than the inode.
3368 * @name: name for dentry (to be seen in /proc/<pid>/maps 3368 * @name: name for dentry (to be seen in /proc/<pid>/maps
3369 * @size: size to be set for the file 3369 * @size: size to be set for the file
3370 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size 3370 * @flags: VM_NORESERVE suppresses pre-accounting of the entire object size
diff --git a/mm/slab_common.c b/mm/slab_common.c
index 3e5f8f29c286..86831105a09f 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -37,8 +37,7 @@ struct kmem_cache *kmem_cache;
37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \ 37 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
38 SLAB_FAILSLAB) 38 SLAB_FAILSLAB)
39 39
40#define SLAB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \ 40#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | SLAB_NOTRACK)
41 SLAB_CACHE_DMA | SLAB_NOTRACK)
42 41
43/* 42/*
44 * Merge control. If this is set then no merging of slab caches will occur. 43 * Merge control. If this is set then no merging of slab caches will occur.
diff --git a/mm/vmscan.c b/mm/vmscan.c
index e61445dce04e..8286938c70de 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -973,22 +973,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
973 * caller can stall after page list has been processed. 973 * caller can stall after page list has been processed.
974 * 974 *
975 * 2) Global or new memcg reclaim encounters a page that is 975 * 2) Global or new memcg reclaim encounters a page that is
976 * not marked for immediate reclaim or the caller does not 976 * not marked for immediate reclaim, or the caller does not
977 * have __GFP_IO. In this case mark the page for immediate 977 * have __GFP_FS (or __GFP_IO if it's simply going to swap,
978 * not to fs). In this case mark the page for immediate
978 * reclaim and continue scanning. 979 * reclaim and continue scanning.
979 * 980 *
980 * __GFP_IO is checked because a loop driver thread might 981 * Require may_enter_fs because we would wait on fs, which
982 * may not have submitted IO yet. And the loop driver might
981 * enter reclaim, and deadlock if it waits on a page for 983 * enter reclaim, and deadlock if it waits on a page for
982 * which it is needed to do the write (loop masks off 984 * which it is needed to do the write (loop masks off
983 * __GFP_IO|__GFP_FS for this reason); but more thought 985 * __GFP_IO|__GFP_FS for this reason); but more thought
984 * would probably show more reasons. 986 * would probably show more reasons.
985 * 987 *
986 * Don't require __GFP_FS, since we're not going into the
987 * FS, just waiting on its writeback completion. Worryingly,
988 * ext4 gfs2 and xfs allocate pages with
989 * grab_cache_page_write_begin(,,AOP_FLAG_NOFS), so testing
990 * may_enter_fs here is liable to OOM on them.
991 *
992 * 3) Legacy memcg encounters a page that is not already marked 988 * 3) Legacy memcg encounters a page that is not already marked
993 * PageReclaim. memcg does not have any dirty pages 989 * PageReclaim. memcg does not have any dirty pages
994 * throttling so we could easily OOM just because too many 990 * throttling so we could easily OOM just because too many
@@ -1005,7 +1001,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
1005 1001
1006 /* Case 2 above */ 1002 /* Case 2 above */
1007 } else if (sane_reclaim(sc) || 1003 } else if (sane_reclaim(sc) ||
1008 !PageReclaim(page) || !(sc->gfp_mask & __GFP_IO)) { 1004 !PageReclaim(page) || !may_enter_fs) {
1009 /* 1005 /*
1010 * This is slightly racy - end_page_writeback() 1006 * This is slightly racy - end_page_writeback()
1011 * might have just cleared PageReclaim, then 1007 * might have just cleared PageReclaim, then
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 9dd49ca67dbc..6e70ddb158b4 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -704,6 +704,7 @@ static void p9_virtio_remove(struct virtio_device *vdev)
704 704
705 mutex_unlock(&virtio_9p_lock); 705 mutex_unlock(&virtio_9p_lock);
706 706
707 vdev->config->reset(vdev);
707 vdev->config->del_vqs(vdev); 708 vdev->config->del_vqs(vdev);
708 709
709 sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr); 710 sysfs_remove_file(&(vdev->dev.kobj), &dev_attr_mount_tag.attr);
diff --git a/net/ax25/ax25_subr.c b/net/ax25/ax25_subr.c
index 1997538a5d23..3b78e8473a01 100644
--- a/net/ax25/ax25_subr.c
+++ b/net/ax25/ax25_subr.c
@@ -264,6 +264,7 @@ void ax25_disconnect(ax25_cb *ax25, int reason)
264{ 264{
265 ax25_clear_queues(ax25); 265 ax25_clear_queues(ax25);
266 266
267 ax25_stop_heartbeat(ax25);
267 ax25_stop_t1timer(ax25); 268 ax25_stop_t1timer(ax25);
268 ax25_stop_t2timer(ax25); 269 ax25_stop_t2timer(ax25);
269 ax25_stop_t3timer(ax25); 270 ax25_stop_t3timer(ax25);
diff --git a/net/batman-adv/distributed-arp-table.c b/net/batman-adv/distributed-arp-table.c
index fb54e6aed096..6d0b471eede8 100644
--- a/net/batman-adv/distributed-arp-table.c
+++ b/net/batman-adv/distributed-arp-table.c
@@ -1138,6 +1138,9 @@ void batadv_dat_snoop_outgoing_arp_reply(struct batadv_priv *bat_priv,
1138 * @bat_priv: the bat priv with all the soft interface information 1138 * @bat_priv: the bat priv with all the soft interface information
1139 * @skb: packet to check 1139 * @skb: packet to check
1140 * @hdr_size: size of the encapsulation header 1140 * @hdr_size: size of the encapsulation header
1141 *
1142 * Returns true if the packet was snooped and consumed by DAT. False if the
1143 * packet has to be delivered to the interface
1141 */ 1144 */
1142bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv, 1145bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
1143 struct sk_buff *skb, int hdr_size) 1146 struct sk_buff *skb, int hdr_size)
@@ -1145,7 +1148,7 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
1145 uint16_t type; 1148 uint16_t type;
1146 __be32 ip_src, ip_dst; 1149 __be32 ip_src, ip_dst;
1147 uint8_t *hw_src, *hw_dst; 1150 uint8_t *hw_src, *hw_dst;
1148 bool ret = false; 1151 bool dropped = false;
1149 unsigned short vid; 1152 unsigned short vid;
1150 1153
1151 if (!atomic_read(&bat_priv->distributed_arp_table)) 1154 if (!atomic_read(&bat_priv->distributed_arp_table))
@@ -1174,12 +1177,17 @@ bool batadv_dat_snoop_incoming_arp_reply(struct batadv_priv *bat_priv,
1174 /* if this REPLY is directed to a client of mine, let's deliver the 1177 /* if this REPLY is directed to a client of mine, let's deliver the
1175 * packet to the interface 1178 * packet to the interface
1176 */ 1179 */
1177 ret = !batadv_is_my_client(bat_priv, hw_dst, vid); 1180 dropped = !batadv_is_my_client(bat_priv, hw_dst, vid);
1181
1182 /* if this REPLY is sent on behalf of a client of mine, let's drop the
1183 * packet because the client will reply by itself
1184 */
1185 dropped |= batadv_is_my_client(bat_priv, hw_src, vid);
1178out: 1186out:
1179 if (ret) 1187 if (dropped)
1180 kfree_skb(skb); 1188 kfree_skb(skb);
1181 /* if ret == false -> packet has to be delivered to the interface */ 1189 /* if dropped == false -> deliver to the interface */
1182 return ret; 1190 return dropped;
1183} 1191}
1184 1192
1185/** 1193/**
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index bb0158620628..cffa92dd9877 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -439,6 +439,8 @@ static void batadv_gw_node_add(struct batadv_priv *bat_priv,
439 439
440 INIT_HLIST_NODE(&gw_node->list); 440 INIT_HLIST_NODE(&gw_node->list);
441 gw_node->orig_node = orig_node; 441 gw_node->orig_node = orig_node;
442 gw_node->bandwidth_down = ntohl(gateway->bandwidth_down);
443 gw_node->bandwidth_up = ntohl(gateway->bandwidth_up);
442 atomic_set(&gw_node->refcount, 1); 444 atomic_set(&gw_node->refcount, 1);
443 445
444 spin_lock_bh(&bat_priv->gw.list_lock); 446 spin_lock_bh(&bat_priv->gw.list_lock);
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index c002961da75d..a2fc843c2243 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -479,6 +479,9 @@ out:
479 */ 479 */
480void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan) 480void batadv_softif_vlan_free_ref(struct batadv_softif_vlan *vlan)
481{ 481{
482 if (!vlan)
483 return;
484
482 if (atomic_dec_and_test(&vlan->refcount)) { 485 if (atomic_dec_and_test(&vlan->refcount)) {
483 spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock); 486 spin_lock_bh(&vlan->bat_priv->softif_vlan_list_lock);
484 hlist_del_rcu(&vlan->list); 487 hlist_del_rcu(&vlan->list);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index b4824951010b..5e953297d3b2 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -594,6 +594,9 @@ bool batadv_tt_local_add(struct net_device *soft_iface, const uint8_t *addr,
594 594
595 /* increase the refcounter of the related vlan */ 595 /* increase the refcounter of the related vlan */
596 vlan = batadv_softif_vlan_get(bat_priv, vid); 596 vlan = batadv_softif_vlan_get(bat_priv, vid);
597 if (WARN(!vlan, "adding TT local entry %pM to non-existent VLAN %d",
598 addr, BATADV_PRINT_VID(vid)))
599 goto out;
597 600
598 batadv_dbg(BATADV_DBG_TT, bat_priv, 601 batadv_dbg(BATADV_DBG_TT, bat_priv,
599 "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n", 602 "Creating new local tt entry: %pM (vid: %d, ttvn: %d)\n",
@@ -1034,6 +1037,7 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
1034 struct batadv_tt_local_entry *tt_local_entry; 1037 struct batadv_tt_local_entry *tt_local_entry;
1035 uint16_t flags, curr_flags = BATADV_NO_FLAGS; 1038 uint16_t flags, curr_flags = BATADV_NO_FLAGS;
1036 struct batadv_softif_vlan *vlan; 1039 struct batadv_softif_vlan *vlan;
1040 void *tt_entry_exists;
1037 1041
1038 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid); 1042 tt_local_entry = batadv_tt_local_hash_find(bat_priv, addr, vid);
1039 if (!tt_local_entry) 1043 if (!tt_local_entry)
@@ -1061,11 +1065,22 @@ uint16_t batadv_tt_local_remove(struct batadv_priv *bat_priv,
1061 * immediately purge it 1065 * immediately purge it
1062 */ 1066 */
1063 batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL); 1067 batadv_tt_local_event(bat_priv, tt_local_entry, BATADV_TT_CLIENT_DEL);
1064 hlist_del_rcu(&tt_local_entry->common.hash_entry); 1068
1069 tt_entry_exists = batadv_hash_remove(bat_priv->tt.local_hash,
1070 batadv_compare_tt,
1071 batadv_choose_tt,
1072 &tt_local_entry->common);
1073 if (!tt_entry_exists)
1074 goto out;
1075
1076 /* extra call to free the local tt entry */
1065 batadv_tt_local_entry_free_ref(tt_local_entry); 1077 batadv_tt_local_entry_free_ref(tt_local_entry);
1066 1078
1067 /* decrease the reference held for this vlan */ 1079 /* decrease the reference held for this vlan */
1068 vlan = batadv_softif_vlan_get(bat_priv, vid); 1080 vlan = batadv_softif_vlan_get(bat_priv, vid);
1081 if (!vlan)
1082 goto out;
1083
1069 batadv_softif_vlan_free_ref(vlan); 1084 batadv_softif_vlan_free_ref(vlan);
1070 batadv_softif_vlan_free_ref(vlan); 1085 batadv_softif_vlan_free_ref(vlan);
1071 1086
@@ -1166,8 +1181,10 @@ static void batadv_tt_local_table_free(struct batadv_priv *bat_priv)
1166 /* decrease the reference held for this vlan */ 1181 /* decrease the reference held for this vlan */
1167 vlan = batadv_softif_vlan_get(bat_priv, 1182 vlan = batadv_softif_vlan_get(bat_priv,
1168 tt_common_entry->vid); 1183 tt_common_entry->vid);
1169 batadv_softif_vlan_free_ref(vlan); 1184 if (vlan) {
1170 batadv_softif_vlan_free_ref(vlan); 1185 batadv_softif_vlan_free_ref(vlan);
1186 batadv_softif_vlan_free_ref(vlan);
1187 }
1171 1188
1172 batadv_tt_local_entry_free_ref(tt_local); 1189 batadv_tt_local_entry_free_ref(tt_local);
1173 } 1190 }
@@ -3207,8 +3224,10 @@ static void batadv_tt_local_purge_pending_clients(struct batadv_priv *bat_priv)
3207 3224
3208 /* decrease the reference held for this vlan */ 3225 /* decrease the reference held for this vlan */
3209 vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid); 3226 vlan = batadv_softif_vlan_get(bat_priv, tt_common->vid);
3210 batadv_softif_vlan_free_ref(vlan); 3227 if (vlan) {
3211 batadv_softif_vlan_free_ref(vlan); 3228 batadv_softif_vlan_free_ref(vlan);
3229 batadv_softif_vlan_free_ref(vlan);
3230 }
3212 3231
3213 batadv_tt_local_entry_free_ref(tt_local); 3232 batadv_tt_local_entry_free_ref(tt_local);
3214 } 3233 }
diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c
index 7998fb279165..92720f3fe573 100644
--- a/net/bluetooth/mgmt.c
+++ b/net/bluetooth/mgmt.c
@@ -7820,7 +7820,7 @@ void mgmt_new_ltk(struct hci_dev *hdev, struct smp_ltk *key, bool persistent)
7820 /* Make sure we copy only the significant bytes based on the 7820 /* Make sure we copy only the significant bytes based on the
7821 * encryption key size, and set the rest of the value to zeroes. 7821 * encryption key size, and set the rest of the value to zeroes.
7822 */ 7822 */
7823 memcpy(ev.key.val, key->val, sizeof(key->enc_size)); 7823 memcpy(ev.key.val, key->val, key->enc_size);
7824 memset(ev.key.val + key->enc_size, 0, 7824 memset(ev.key.val + key->enc_size, 0,
7825 sizeof(ev.key.val) - key->enc_size); 7825 sizeof(ev.key.val) - key->enc_size);
7826 7826
diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c
index 3d0f7d2a0616..ad82324f710f 100644
--- a/net/bluetooth/smp.c
+++ b/net/bluetooth/smp.c
@@ -2312,6 +2312,10 @@ int smp_conn_security(struct hci_conn *hcon, __u8 sec_level)
2312 return 1; 2312 return 1;
2313 2313
2314 chan = conn->smp; 2314 chan = conn->smp;
2315 if (!chan) {
2316 BT_ERR("SMP security requested but not available");
2317 return 1;
2318 }
2315 2319
2316 if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED)) 2320 if (!hci_dev_test_flag(hcon->hdev, HCI_LE_ENABLED))
2317 return 1; 2321 return 1;
diff --git a/net/bridge/br_forward.c b/net/bridge/br_forward.c
index 0ff6e1bbca91..fa7bfced888e 100644
--- a/net/bridge/br_forward.c
+++ b/net/bridge/br_forward.c
@@ -37,15 +37,30 @@ static inline int should_deliver(const struct net_bridge_port *p,
37 37
38int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb) 38int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb)
39{ 39{
40 if (!is_skb_forwardable(skb->dev, skb)) { 40 if (!is_skb_forwardable(skb->dev, skb))
41 kfree_skb(skb); 41 goto drop;
42 } else { 42
43 skb_push(skb, ETH_HLEN); 43 skb_push(skb, ETH_HLEN);
44 br_drop_fake_rtable(skb); 44 br_drop_fake_rtable(skb);
45 skb_sender_cpu_clear(skb); 45 skb_sender_cpu_clear(skb);
46 dev_queue_xmit(skb); 46
47 if (skb->ip_summed == CHECKSUM_PARTIAL &&
48 (skb->protocol == htons(ETH_P_8021Q) ||
49 skb->protocol == htons(ETH_P_8021AD))) {
50 int depth;
51
52 if (!__vlan_get_protocol(skb, skb->protocol, &depth))
53 goto drop;
54
55 skb_set_network_header(skb, depth);
47 } 56 }
48 57
58 dev_queue_xmit(skb);
59
60 return 0;
61
62drop:
63 kfree_skb(skb);
49 return 0; 64 return 0;
50} 65}
51EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit); 66EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index c11cf2611db0..c94321955db7 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -351,7 +351,6 @@ static int br_mdb_add_group(struct net_bridge *br, struct net_bridge_port *port,
351 if (state == MDB_TEMPORARY) 351 if (state == MDB_TEMPORARY)
352 mod_timer(&p->timer, now + br->multicast_membership_interval); 352 mod_timer(&p->timer, now + br->multicast_membership_interval);
353 353
354 br_mdb_notify(br->dev, port, group, RTM_NEWMDB);
355 return 0; 354 return 0;
356} 355}
357 356
@@ -446,6 +445,7 @@ static int __br_mdb_del(struct net_bridge *br, struct br_mdb_entry *entry)
446 if (p->port->state == BR_STATE_DISABLED) 445 if (p->port->state == BR_STATE_DISABLED)
447 goto unlock; 446 goto unlock;
448 447
448 entry->state = p->state;
449 rcu_assign_pointer(*pp, p->next); 449 rcu_assign_pointer(*pp, p->next);
450 hlist_del_init(&p->mglist); 450 hlist_del_init(&p->mglist);
451 del_timer(&p->timer); 451 del_timer(&p->timer);
diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c
index 742a6c27d7a2..0b39dcc65b94 100644
--- a/net/bridge/br_multicast.c
+++ b/net/bridge/br_multicast.c
@@ -39,6 +39,16 @@ static void br_multicast_start_querier(struct net_bridge *br,
39 struct bridge_mcast_own_query *query); 39 struct bridge_mcast_own_query *query);
40static void br_multicast_add_router(struct net_bridge *br, 40static void br_multicast_add_router(struct net_bridge *br,
41 struct net_bridge_port *port); 41 struct net_bridge_port *port);
42static void br_ip4_multicast_leave_group(struct net_bridge *br,
43 struct net_bridge_port *port,
44 __be32 group,
45 __u16 vid);
46#if IS_ENABLED(CONFIG_IPV6)
47static void br_ip6_multicast_leave_group(struct net_bridge *br,
48 struct net_bridge_port *port,
49 const struct in6_addr *group,
50 __u16 vid);
51#endif
42unsigned int br_mdb_rehash_seq; 52unsigned int br_mdb_rehash_seq;
43 53
44static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b) 54static inline int br_ip_equal(const struct br_ip *a, const struct br_ip *b)
@@ -1010,9 +1020,15 @@ static int br_ip4_multicast_igmp3_report(struct net_bridge *br,
1010 continue; 1020 continue;
1011 } 1021 }
1012 1022
1013 err = br_ip4_multicast_add_group(br, port, group, vid); 1023 if ((type == IGMPV3_CHANGE_TO_INCLUDE ||
1014 if (err) 1024 type == IGMPV3_MODE_IS_INCLUDE) &&
1015 break; 1025 ntohs(grec->grec_nsrcs) == 0) {
1026 br_ip4_multicast_leave_group(br, port, group, vid);
1027 } else {
1028 err = br_ip4_multicast_add_group(br, port, group, vid);
1029 if (err)
1030 break;
1031 }
1016 } 1032 }
1017 1033
1018 return err; 1034 return err;
@@ -1071,10 +1087,17 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
1071 continue; 1087 continue;
1072 } 1088 }
1073 1089
1074 err = br_ip6_multicast_add_group(br, port, &grec->grec_mca, 1090 if ((grec->grec_type == MLD2_CHANGE_TO_INCLUDE ||
1075 vid); 1091 grec->grec_type == MLD2_MODE_IS_INCLUDE) &&
1076 if (err) 1092 ntohs(*nsrcs) == 0) {
1077 break; 1093 br_ip6_multicast_leave_group(br, port, &grec->grec_mca,
1094 vid);
1095 } else {
1096 err = br_ip6_multicast_add_group(br, port,
1097 &grec->grec_mca, vid);
1098 if (!err)
1099 break;
1100 }
1078 } 1101 }
1079 1102
1080 return err; 1103 return err;
@@ -1393,8 +1416,7 @@ br_multicast_leave_group(struct net_bridge *br,
1393 1416
1394 spin_lock(&br->multicast_lock); 1417 spin_lock(&br->multicast_lock);
1395 if (!netif_running(br->dev) || 1418 if (!netif_running(br->dev) ||
1396 (port && port->state == BR_STATE_DISABLED) || 1419 (port && port->state == BR_STATE_DISABLED))
1397 timer_pending(&other_query->timer))
1398 goto out; 1420 goto out;
1399 1421
1400 mdb = mlock_dereference(br->mdb, br); 1422 mdb = mlock_dereference(br->mdb, br);
@@ -1402,6 +1424,31 @@ br_multicast_leave_group(struct net_bridge *br,
1402 if (!mp) 1424 if (!mp)
1403 goto out; 1425 goto out;
1404 1426
1427 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1428 struct net_bridge_port_group __rcu **pp;
1429
1430 for (pp = &mp->ports;
1431 (p = mlock_dereference(*pp, br)) != NULL;
1432 pp = &p->next) {
1433 if (p->port != port)
1434 continue;
1435
1436 rcu_assign_pointer(*pp, p->next);
1437 hlist_del_init(&p->mglist);
1438 del_timer(&p->timer);
1439 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1440 br_mdb_notify(br->dev, port, group, RTM_DELMDB);
1441
1442 if (!mp->ports && !mp->mglist &&
1443 netif_running(br->dev))
1444 mod_timer(&mp->timer, jiffies);
1445 }
1446 goto out;
1447 }
1448
1449 if (timer_pending(&other_query->timer))
1450 goto out;
1451
1405 if (br->multicast_querier) { 1452 if (br->multicast_querier) {
1406 __br_multicast_send_query(br, port, &mp->addr); 1453 __br_multicast_send_query(br, port, &mp->addr);
1407 1454
@@ -1427,28 +1474,6 @@ br_multicast_leave_group(struct net_bridge *br,
1427 } 1474 }
1428 } 1475 }
1429 1476
1430 if (port && (port->flags & BR_MULTICAST_FAST_LEAVE)) {
1431 struct net_bridge_port_group __rcu **pp;
1432
1433 for (pp = &mp->ports;
1434 (p = mlock_dereference(*pp, br)) != NULL;
1435 pp = &p->next) {
1436 if (p->port != port)
1437 continue;
1438
1439 rcu_assign_pointer(*pp, p->next);
1440 hlist_del_init(&p->mglist);
1441 del_timer(&p->timer);
1442 call_rcu_bh(&p->rcu, br_multicast_free_pg);
1443 br_mdb_notify(br->dev, port, group, RTM_DELMDB);
1444
1445 if (!mp->ports && !mp->mglist &&
1446 netif_running(br->dev))
1447 mod_timer(&mp->timer, jiffies);
1448 }
1449 goto out;
1450 }
1451
1452 now = jiffies; 1477 now = jiffies;
1453 time = now + br->multicast_last_member_count * 1478 time = now + br->multicast_last_member_count *
1454 br->multicast_last_member_interval; 1479 br->multicast_last_member_interval;
diff --git a/net/bridge/br_netlink.c b/net/bridge/br_netlink.c
index 364bdc98bd9b..4d74a0639c4c 100644
--- a/net/bridge/br_netlink.c
+++ b/net/bridge/br_netlink.c
@@ -112,6 +112,8 @@ static inline size_t br_port_info_size(void)
112 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */ 112 + nla_total_size(1) /* IFLA_BRPORT_FAST_LEAVE */
113 + nla_total_size(1) /* IFLA_BRPORT_LEARNING */ 113 + nla_total_size(1) /* IFLA_BRPORT_LEARNING */
114 + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */ 114 + nla_total_size(1) /* IFLA_BRPORT_UNICAST_FLOOD */
115 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP */
116 + nla_total_size(1) /* IFLA_BRPORT_PROXYARP_WIFI */
115 + 0; 117 + 0;
116} 118}
117 119
@@ -506,6 +508,8 @@ static const struct nla_policy br_port_policy[IFLA_BRPORT_MAX + 1] = {
506 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 }, 508 [IFLA_BRPORT_FAST_LEAVE]= { .type = NLA_U8 },
507 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 }, 509 [IFLA_BRPORT_LEARNING] = { .type = NLA_U8 },
508 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 }, 510 [IFLA_BRPORT_UNICAST_FLOOD] = { .type = NLA_U8 },
511 [IFLA_BRPORT_PROXYARP] = { .type = NLA_U8 },
512 [IFLA_BRPORT_PROXYARP_WIFI] = { .type = NLA_U8 },
509}; 513};
510 514
511/* Change the state of the port and notify spanning tree */ 515/* Change the state of the port and notify spanning tree */
@@ -693,9 +697,17 @@ static int br_port_slave_changelink(struct net_device *brdev,
693 struct nlattr *tb[], 697 struct nlattr *tb[],
694 struct nlattr *data[]) 698 struct nlattr *data[])
695{ 699{
700 struct net_bridge *br = netdev_priv(brdev);
701 int ret;
702
696 if (!data) 703 if (!data)
697 return 0; 704 return 0;
698 return br_setport(br_port_get_rtnl(dev), data); 705
706 spin_lock_bh(&br->lock);
707 ret = br_setport(br_port_get_rtnl(dev), data);
708 spin_unlock_bh(&br->lock);
709
710 return ret;
699} 711}
700 712
701static int br_port_fill_slave_info(struct sk_buff *skb, 713static int br_port_fill_slave_info(struct sk_buff *skb,
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index b4b6dab9c285..ed74ffaa851f 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -209,8 +209,9 @@ void br_transmit_config(struct net_bridge_port *p)
209 br_send_config_bpdu(p, &bpdu); 209 br_send_config_bpdu(p, &bpdu);
210 p->topology_change_ack = 0; 210 p->topology_change_ack = 0;
211 p->config_pending = 0; 211 p->config_pending = 0;
212 mod_timer(&p->hold_timer, 212 if (p->br->stp_enabled == BR_KERNEL_STP)
213 round_jiffies(jiffies + BR_HOLD_TIME)); 213 mod_timer(&p->hold_timer,
214 round_jiffies(jiffies + BR_HOLD_TIME));
214 } 215 }
215} 216}
216 217
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index a2730e7196cd..4ca449a16132 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -48,7 +48,8 @@ void br_stp_enable_bridge(struct net_bridge *br)
48 struct net_bridge_port *p; 48 struct net_bridge_port *p;
49 49
50 spin_lock_bh(&br->lock); 50 spin_lock_bh(&br->lock);
51 mod_timer(&br->hello_timer, jiffies + br->hello_time); 51 if (br->stp_enabled == BR_KERNEL_STP)
52 mod_timer(&br->hello_timer, jiffies + br->hello_time);
52 mod_timer(&br->gc_timer, jiffies + HZ/10); 53 mod_timer(&br->gc_timer, jiffies + HZ/10);
53 54
54 br_config_bpdu_generation(br); 55 br_config_bpdu_generation(br);
@@ -127,6 +128,7 @@ static void br_stp_start(struct net_bridge *br)
127 int r; 128 int r;
128 char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL }; 129 char *argv[] = { BR_STP_PROG, br->dev->name, "start", NULL };
129 char *envp[] = { NULL }; 130 char *envp[] = { NULL };
131 struct net_bridge_port *p;
130 132
131 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); 133 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
132 134
@@ -140,6 +142,10 @@ static void br_stp_start(struct net_bridge *br)
140 if (r == 0) { 142 if (r == 0) {
141 br->stp_enabled = BR_USER_STP; 143 br->stp_enabled = BR_USER_STP;
142 br_debug(br, "userspace STP started\n"); 144 br_debug(br, "userspace STP started\n");
145 /* Stop hello and hold timers */
146 del_timer(&br->hello_timer);
147 list_for_each_entry(p, &br->port_list, list)
148 del_timer(&p->hold_timer);
143 } else { 149 } else {
144 br->stp_enabled = BR_KERNEL_STP; 150 br->stp_enabled = BR_KERNEL_STP;
145 br_debug(br, "using kernel STP\n"); 151 br_debug(br, "using kernel STP\n");
@@ -156,12 +162,17 @@ static void br_stp_stop(struct net_bridge *br)
156 int r; 162 int r;
157 char *argv[] = { BR_STP_PROG, br->dev->name, "stop", NULL }; 163 char *argv[] = { BR_STP_PROG, br->dev->name, "stop", NULL };
158 char *envp[] = { NULL }; 164 char *envp[] = { NULL };
165 struct net_bridge_port *p;
159 166
160 if (br->stp_enabled == BR_USER_STP) { 167 if (br->stp_enabled == BR_USER_STP) {
161 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); 168 r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC);
162 br_info(br, "userspace STP stopped, return code %d\n", r); 169 br_info(br, "userspace STP stopped, return code %d\n", r);
163 170
164 /* To start timers on any ports left in blocking */ 171 /* To start timers on any ports left in blocking */
172 mod_timer(&br->hello_timer, jiffies + br->hello_time);
173 list_for_each_entry(p, &br->port_list, list)
174 mod_timer(&p->hold_timer,
175 round_jiffies(jiffies + BR_HOLD_TIME));
165 spin_lock_bh(&br->lock); 176 spin_lock_bh(&br->lock);
166 br_port_state_selection(br); 177 br_port_state_selection(br);
167 spin_unlock_bh(&br->lock); 178 spin_unlock_bh(&br->lock);
diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c
index 7caf7fae2d5b..5f0f5af0ec35 100644
--- a/net/bridge/br_stp_timer.c
+++ b/net/bridge/br_stp_timer.c
@@ -40,7 +40,9 @@ static void br_hello_timer_expired(unsigned long arg)
40 if (br->dev->flags & IFF_UP) { 40 if (br->dev->flags & IFF_UP) {
41 br_config_bpdu_generation(br); 41 br_config_bpdu_generation(br);
42 42
43 mod_timer(&br->hello_timer, round_jiffies(jiffies + br->hello_time)); 43 if (br->stp_enabled != BR_USER_STP)
44 mod_timer(&br->hello_timer,
45 round_jiffies(jiffies + br->hello_time));
44 } 46 }
45 spin_unlock(&br->lock); 47 spin_unlock(&br->lock);
46} 48}
diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c
index 3cc71b9f5517..cc858919108e 100644
--- a/net/caif/caif_socket.c
+++ b/net/caif/caif_socket.c
@@ -121,12 +121,13 @@ static void caif_flow_ctrl(struct sock *sk, int mode)
121 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are 121 * Copied from sock.c:sock_queue_rcv_skb(), but changed so packets are
122 * not dropped, but CAIF is sending flow off instead. 122 * not dropped, but CAIF is sending flow off instead.
123 */ 123 */
124static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) 124static void caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
125{ 125{
126 int err; 126 int err;
127 unsigned long flags; 127 unsigned long flags;
128 struct sk_buff_head *list = &sk->sk_receive_queue; 128 struct sk_buff_head *list = &sk->sk_receive_queue;
129 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); 129 struct caifsock *cf_sk = container_of(sk, struct caifsock, sk);
130 bool queued = false;
130 131
131 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= 132 if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >=
132 (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { 133 (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) {
@@ -139,7 +140,8 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
139 140
140 err = sk_filter(sk, skb); 141 err = sk_filter(sk, skb);
141 if (err) 142 if (err)
142 return err; 143 goto out;
144
143 if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) { 145 if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) {
144 set_rx_flow_off(cf_sk); 146 set_rx_flow_off(cf_sk);
145 net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n"); 147 net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n");
@@ -147,21 +149,16 @@ static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
147 } 149 }
148 skb->dev = NULL; 150 skb->dev = NULL;
149 skb_set_owner_r(skb, sk); 151 skb_set_owner_r(skb, sk);
150 /* Cache the SKB length before we tack it onto the receive
151 * queue. Once it is added it no longer belongs to us and
152 * may be freed by other threads of control pulling packets
153 * from the queue.
154 */
155 spin_lock_irqsave(&list->lock, flags); 152 spin_lock_irqsave(&list->lock, flags);
156 if (!sock_flag(sk, SOCK_DEAD)) 153 queued = !sock_flag(sk, SOCK_DEAD);
154 if (queued)
157 __skb_queue_tail(list, skb); 155 __skb_queue_tail(list, skb);
158 spin_unlock_irqrestore(&list->lock, flags); 156 spin_unlock_irqrestore(&list->lock, flags);
159 157out:
160 if (!sock_flag(sk, SOCK_DEAD)) 158 if (queued)
161 sk->sk_data_ready(sk); 159 sk->sk_data_ready(sk);
162 else 160 else
163 kfree_skb(skb); 161 kfree_skb(skb);
164 return 0;
165} 162}
166 163
167/* Packet Receive Callback function called from CAIF Stack */ 164/* Packet Receive Callback function called from CAIF Stack */
diff --git a/net/core/datagram.c b/net/core/datagram.c
index b80fb91bb3f7..617088aee21d 100644
--- a/net/core/datagram.c
+++ b/net/core/datagram.c
@@ -131,6 +131,35 @@ out_noerr:
131 goto out; 131 goto out;
132} 132}
133 133
134static struct sk_buff *skb_set_peeked(struct sk_buff *skb)
135{
136 struct sk_buff *nskb;
137
138 if (skb->peeked)
139 return skb;
140
141 /* We have to unshare an skb before modifying it. */
142 if (!skb_shared(skb))
143 goto done;
144
145 nskb = skb_clone(skb, GFP_ATOMIC);
146 if (!nskb)
147 return ERR_PTR(-ENOMEM);
148
149 skb->prev->next = nskb;
150 skb->next->prev = nskb;
151 nskb->prev = skb->prev;
152 nskb->next = skb->next;
153
154 consume_skb(skb);
155 skb = nskb;
156
157done:
158 skb->peeked = 1;
159
160 return skb;
161}
162
134/** 163/**
135 * __skb_recv_datagram - Receive a datagram skbuff 164 * __skb_recv_datagram - Receive a datagram skbuff
136 * @sk: socket 165 * @sk: socket
@@ -165,7 +194,9 @@ out_noerr:
165struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags, 194struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
166 int *peeked, int *off, int *err) 195 int *peeked, int *off, int *err)
167{ 196{
197 struct sk_buff_head *queue = &sk->sk_receive_queue;
168 struct sk_buff *skb, *last; 198 struct sk_buff *skb, *last;
199 unsigned long cpu_flags;
169 long timeo; 200 long timeo;
170 /* 201 /*
171 * Caller is allowed not to check sk->sk_err before skb_recv_datagram() 202 * Caller is allowed not to check sk->sk_err before skb_recv_datagram()
@@ -184,8 +215,6 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
184 * Look at current nfs client by the way... 215 * Look at current nfs client by the way...
185 * However, this function was correct in any case. 8) 216 * However, this function was correct in any case. 8)
186 */ 217 */
187 unsigned long cpu_flags;
188 struct sk_buff_head *queue = &sk->sk_receive_queue;
189 int _off = *off; 218 int _off = *off;
190 219
191 last = (struct sk_buff *)queue; 220 last = (struct sk_buff *)queue;
@@ -199,7 +228,12 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
199 _off -= skb->len; 228 _off -= skb->len;
200 continue; 229 continue;
201 } 230 }
202 skb->peeked = 1; 231
232 skb = skb_set_peeked(skb);
233 error = PTR_ERR(skb);
234 if (IS_ERR(skb))
235 goto unlock_err;
236
203 atomic_inc(&skb->users); 237 atomic_inc(&skb->users);
204 } else 238 } else
205 __skb_unlink(skb, queue); 239 __skb_unlink(skb, queue);
@@ -223,6 +257,8 @@ struct sk_buff *__skb_recv_datagram(struct sock *sk, unsigned int flags,
223 257
224 return NULL; 258 return NULL;
225 259
260unlock_err:
261 spin_unlock_irqrestore(&queue->lock, cpu_flags);
226no_packet: 262no_packet:
227 *err = error; 263 *err = error;
228 return NULL; 264 return NULL;
@@ -622,7 +658,8 @@ __sum16 __skb_checksum_complete_head(struct sk_buff *skb, int len)
622 !skb->csum_complete_sw) 658 !skb->csum_complete_sw)
623 netdev_rx_csum_fault(skb->dev); 659 netdev_rx_csum_fault(skb->dev);
624 } 660 }
625 skb->csum_valid = !sum; 661 if (!skb_shared(skb))
662 skb->csum_valid = !sum;
626 return sum; 663 return sum;
627} 664}
628EXPORT_SYMBOL(__skb_checksum_complete_head); 665EXPORT_SYMBOL(__skb_checksum_complete_head);
@@ -642,11 +679,13 @@ __sum16 __skb_checksum_complete(struct sk_buff *skb)
642 netdev_rx_csum_fault(skb->dev); 679 netdev_rx_csum_fault(skb->dev);
643 } 680 }
644 681
645 /* Save full packet checksum */ 682 if (!skb_shared(skb)) {
646 skb->csum = csum; 683 /* Save full packet checksum */
647 skb->ip_summed = CHECKSUM_COMPLETE; 684 skb->csum = csum;
648 skb->csum_complete_sw = 1; 685 skb->ip_summed = CHECKSUM_COMPLETE;
649 skb->csum_valid = !sum; 686 skb->csum_complete_sw = 1;
687 skb->csum_valid = !sum;
688 }
650 689
651 return sum; 690 return sum;
652} 691}
diff --git a/net/core/dst.c b/net/core/dst.c
index e956ce6d1378..002144bea935 100644
--- a/net/core/dst.c
+++ b/net/core/dst.c
@@ -284,7 +284,9 @@ void dst_release(struct dst_entry *dst)
284 int newrefcnt; 284 int newrefcnt;
285 285
286 newrefcnt = atomic_dec_return(&dst->__refcnt); 286 newrefcnt = atomic_dec_return(&dst->__refcnt);
287 WARN_ON(newrefcnt < 0); 287 if (unlikely(newrefcnt < 0))
288 net_warn_ratelimited("%s: dst:%p refcnt:%d\n",
289 __func__, dst, newrefcnt);
288 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt) 290 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
289 call_rcu(&dst->rcu_head, dst_destroy_rcu); 291 call_rcu(&dst->rcu_head, dst_destroy_rcu);
290 } 292 }
diff --git a/net/core/netclassid_cgroup.c b/net/core/netclassid_cgroup.c
index 1f2a126f4ffa..6441f47b1a8f 100644
--- a/net/core/netclassid_cgroup.c
+++ b/net/core/netclassid_cgroup.c
@@ -23,7 +23,8 @@ static inline struct cgroup_cls_state *css_cls_state(struct cgroup_subsys_state
23 23
24struct cgroup_cls_state *task_cls_state(struct task_struct *p) 24struct cgroup_cls_state *task_cls_state(struct task_struct *p)
25{ 25{
26 return css_cls_state(task_css(p, net_cls_cgrp_id)); 26 return css_cls_state(task_css_check(p, net_cls_cgrp_id,
27 rcu_read_lock_bh_held()));
27} 28}
28EXPORT_SYMBOL_GPL(task_cls_state); 29EXPORT_SYMBOL_GPL(task_cls_state);
29 30
diff --git a/net/core/pktgen.c b/net/core/pktgen.c
index 1ebdf1c0d118..1cbd209192ea 100644
--- a/net/core/pktgen.c
+++ b/net/core/pktgen.c
@@ -3514,8 +3514,6 @@ static int pktgen_thread_worker(void *arg)
3514 3514
3515 set_freezable(); 3515 set_freezable();
3516 3516
3517 __set_current_state(TASK_RUNNING);
3518
3519 while (!kthread_should_stop()) { 3517 while (!kthread_should_stop()) {
3520 pkt_dev = next_to_run(t); 3518 pkt_dev = next_to_run(t);
3521 3519
@@ -3560,7 +3558,6 @@ static int pktgen_thread_worker(void *arg)
3560 3558
3561 try_to_freeze(); 3559 try_to_freeze();
3562 } 3560 }
3563 set_current_state(TASK_INTERRUPTIBLE);
3564 3561
3565 pr_debug("%s stopping all device\n", t->tsk->comm); 3562 pr_debug("%s stopping all device\n", t->tsk->comm);
3566 pktgen_stop(t); 3563 pktgen_stop(t);
diff --git a/net/core/request_sock.c b/net/core/request_sock.c
index 87b22c0bc08c..b42f0e26f89e 100644
--- a/net/core/request_sock.c
+++ b/net/core/request_sock.c
@@ -103,10 +103,16 @@ void reqsk_queue_destroy(struct request_sock_queue *queue)
103 spin_lock_bh(&queue->syn_wait_lock); 103 spin_lock_bh(&queue->syn_wait_lock);
104 while ((req = lopt->syn_table[i]) != NULL) { 104 while ((req = lopt->syn_table[i]) != NULL) {
105 lopt->syn_table[i] = req->dl_next; 105 lopt->syn_table[i] = req->dl_next;
106 /* Because of following del_timer_sync(),
107 * we must release the spinlock here
108 * or risk a dead lock.
109 */
110 spin_unlock_bh(&queue->syn_wait_lock);
106 atomic_inc(&lopt->qlen_dec); 111 atomic_inc(&lopt->qlen_dec);
107 if (del_timer(&req->rsk_timer)) 112 if (del_timer_sync(&req->rsk_timer))
108 reqsk_put(req); 113 reqsk_put(req);
109 reqsk_put(req); 114 reqsk_put(req);
115 spin_lock_bh(&queue->syn_wait_lock);
110 } 116 }
111 spin_unlock_bh(&queue->syn_wait_lock); 117 spin_unlock_bh(&queue->syn_wait_lock);
112 } 118 }
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 9e433d58d265..dc004b1e1f85 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1804,10 +1804,13 @@ static int do_setlink(const struct sk_buff *skb,
1804 goto errout; 1804 goto errout;
1805 1805
1806 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) { 1806 nla_for_each_nested(attr, tb[IFLA_VF_PORTS], rem) {
1807 if (nla_type(attr) != IFLA_VF_PORT) 1807 if (nla_type(attr) != IFLA_VF_PORT ||
1808 continue; 1808 nla_len(attr) < NLA_HDRLEN) {
1809 err = nla_parse_nested(port, IFLA_PORT_MAX, 1809 err = -EINVAL;
1810 attr, ifla_port_policy); 1810 goto errout;
1811 }
1812 err = nla_parse_nested(port, IFLA_PORT_MAX, attr,
1813 ifla_port_policy);
1811 if (err < 0) 1814 if (err < 0)
1812 goto errout; 1815 goto errout;
1813 if (!port[IFLA_PORT_VF]) { 1816 if (!port[IFLA_PORT_VF]) {
diff --git a/net/core/sock.c b/net/core/sock.c
index 08f16db46070..193901d09757 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1497,7 +1497,8 @@ struct sock *sk_clone_lock(const struct sock *sk, const gfp_t priority)
1497 sock_copy(newsk, sk); 1497 sock_copy(newsk, sk);
1498 1498
1499 /* SANITY */ 1499 /* SANITY */
1500 get_net(sock_net(newsk)); 1500 if (likely(newsk->sk_net_refcnt))
1501 get_net(sock_net(newsk));
1501 sk_node_init(&newsk->sk_node); 1502 sk_node_init(&newsk->sk_node);
1502 sock_lock_init(newsk); 1503 sock_lock_init(newsk);
1503 bh_lock_sock(newsk); 1504 bh_lock_sock(newsk);
@@ -1967,20 +1968,21 @@ static void __release_sock(struct sock *sk)
1967 * sk_wait_data - wait for data to arrive at sk_receive_queue 1968 * sk_wait_data - wait for data to arrive at sk_receive_queue
1968 * @sk: sock to wait on 1969 * @sk: sock to wait on
1969 * @timeo: for how long 1970 * @timeo: for how long
1971 * @skb: last skb seen on sk_receive_queue
1970 * 1972 *
1971 * Now socket state including sk->sk_err is changed only under lock, 1973 * Now socket state including sk->sk_err is changed only under lock,
1972 * hence we may omit checks after joining wait queue. 1974 * hence we may omit checks after joining wait queue.
1973 * We check receive queue before schedule() only as optimization; 1975 * We check receive queue before schedule() only as optimization;
1974 * it is very likely that release_sock() added new data. 1976 * it is very likely that release_sock() added new data.
1975 */ 1977 */
1976int sk_wait_data(struct sock *sk, long *timeo) 1978int sk_wait_data(struct sock *sk, long *timeo, const struct sk_buff *skb)
1977{ 1979{
1978 int rc; 1980 int rc;
1979 DEFINE_WAIT(wait); 1981 DEFINE_WAIT(wait);
1980 1982
1981 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); 1983 prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
1982 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1984 set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1983 rc = sk_wait_event(sk, timeo, !skb_queue_empty(&sk->sk_receive_queue)); 1985 rc = sk_wait_event(sk, timeo, skb_peek_tail(&sk->sk_receive_queue) != skb);
1984 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); 1986 clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
1985 finish_wait(sk_sleep(sk), &wait); 1987 finish_wait(sk_sleep(sk), &wait);
1986 return rc; 1988 return rc;
diff --git a/net/dccp/proto.c b/net/dccp/proto.c
index 52a94016526d..b5cf13a28009 100644
--- a/net/dccp/proto.c
+++ b/net/dccp/proto.c
@@ -886,7 +886,7 @@ verify_sock_status:
886 break; 886 break;
887 } 887 }
888 888
889 sk_wait_data(sk, &timeo); 889 sk_wait_data(sk, &timeo, NULL);
890 continue; 890 continue;
891 found_ok_skb: 891 found_ok_skb:
892 if (len > skb->len) 892 if (len > skb->len)
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 0917123790ea..35c47ddd04f0 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -756,7 +756,8 @@ static int dsa_slave_phy_connect(struct dsa_slave_priv *p,
756 return -ENODEV; 756 return -ENODEV;
757 757
758 /* Use already configured phy mode */ 758 /* Use already configured phy mode */
759 p->phy_interface = p->phy->interface; 759 if (p->phy_interface == PHY_INTERFACE_MODE_NA)
760 p->phy_interface = p->phy->interface;
760 phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link, 761 phy_connect_direct(slave_dev, p->phy, dsa_slave_adjust_link,
761 p->phy_interface); 762 p->phy_interface);
762 763
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index f46e4d1306f2..214d44aef35b 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -207,7 +207,7 @@ found:
207 } else { 207 } else {
208 fq->q.meat += skb->len; 208 fq->q.meat += skb->len;
209 } 209 }
210 add_frag_mem_limit(&fq->q, skb->truesize); 210 add_frag_mem_limit(fq->q.net, skb->truesize);
211 211
212 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && 212 if (fq->q.flags == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) &&
213 fq->q.meat == fq->q.len) { 213 fq->q.meat == fq->q.len) {
@@ -287,7 +287,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
287 clone->data_len = clone->len; 287 clone->data_len = clone->len;
288 head->data_len -= clone->len; 288 head->data_len -= clone->len;
289 head->len -= clone->len; 289 head->len -= clone->len;
290 add_frag_mem_limit(&fq->q, clone->truesize); 290 add_frag_mem_limit(fq->q.net, clone->truesize);
291 } 291 }
292 292
293 WARN_ON(head == NULL); 293 WARN_ON(head == NULL);
@@ -310,7 +310,7 @@ static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *prev,
310 } 310 }
311 fp = next; 311 fp = next;
312 } 312 }
313 sub_frag_mem_limit(&fq->q, sum_truesize); 313 sub_frag_mem_limit(fq->q.net, sum_truesize);
314 314
315 head->next = NULL; 315 head->next = NULL;
316 head->dev = dev; 316 head->dev = dev;
diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c
index 933a92820d26..6c8b1fbafce8 100644
--- a/net/ipv4/arp.c
+++ b/net/ipv4/arp.c
@@ -1017,14 +1017,16 @@ static int arp_req_get(struct arpreq *r, struct net_device *dev)
1017 1017
1018 neigh = neigh_lookup(&arp_tbl, &ip, dev); 1018 neigh = neigh_lookup(&arp_tbl, &ip, dev);
1019 if (neigh) { 1019 if (neigh) {
1020 read_lock_bh(&neigh->lock); 1020 if (!(neigh->nud_state & NUD_NOARP)) {
1021 memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len); 1021 read_lock_bh(&neigh->lock);
1022 r->arp_flags = arp_state_to_flags(neigh); 1022 memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len);
1023 read_unlock_bh(&neigh->lock); 1023 r->arp_flags = arp_state_to_flags(neigh);
1024 r->arp_ha.sa_family = dev->type; 1024 read_unlock_bh(&neigh->lock);
1025 strlcpy(r->arp_dev, dev->name, sizeof(r->arp_dev)); 1025 r->arp_ha.sa_family = dev->type;
1026 strlcpy(r->arp_dev, dev->name, sizeof(r->arp_dev));
1027 err = 0;
1028 }
1026 neigh_release(neigh); 1029 neigh_release(neigh);
1027 err = 0;
1028 } 1030 }
1029 return err; 1031 return err;
1030} 1032}
diff --git a/net/ipv4/datagram.c b/net/ipv4/datagram.c
index 90c0e8386116..574fad9cca05 100644
--- a/net/ipv4/datagram.c
+++ b/net/ipv4/datagram.c
@@ -20,7 +20,7 @@
20#include <net/route.h> 20#include <net/route.h>
21#include <net/tcp_states.h> 21#include <net/tcp_states.h>
22 22
23int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 23int __ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
24{ 24{
25 struct inet_sock *inet = inet_sk(sk); 25 struct inet_sock *inet = inet_sk(sk);
26 struct sockaddr_in *usin = (struct sockaddr_in *) uaddr; 26 struct sockaddr_in *usin = (struct sockaddr_in *) uaddr;
@@ -39,8 +39,6 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
39 39
40 sk_dst_reset(sk); 40 sk_dst_reset(sk);
41 41
42 lock_sock(sk);
43
44 oif = sk->sk_bound_dev_if; 42 oif = sk->sk_bound_dev_if;
45 saddr = inet->inet_saddr; 43 saddr = inet->inet_saddr;
46 if (ipv4_is_multicast(usin->sin_addr.s_addr)) { 44 if (ipv4_is_multicast(usin->sin_addr.s_addr)) {
@@ -82,9 +80,19 @@ int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
82 sk_dst_set(sk, &rt->dst); 80 sk_dst_set(sk, &rt->dst);
83 err = 0; 81 err = 0;
84out: 82out:
85 release_sock(sk);
86 return err; 83 return err;
87} 84}
85EXPORT_SYMBOL(__ip4_datagram_connect);
86
87int ip4_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
88{
89 int res;
90
91 lock_sock(sk);
92 res = __ip4_datagram_connect(sk, uaddr, addr_len);
93 release_sock(sk);
94 return res;
95}
88EXPORT_SYMBOL(ip4_datagram_connect); 96EXPORT_SYMBOL(ip4_datagram_connect);
89 97
90/* Because UDP xmit path can manipulate sk_dst_cache without holding 98/* Because UDP xmit path can manipulate sk_dst_cache without holding
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index e813196c91c7..2d9cb1748f81 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -882,7 +882,6 @@ static int inet_rtm_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh)
882 queue_delayed_work(system_power_efficient_wq, 882 queue_delayed_work(system_power_efficient_wq,
883 &check_lifetime_work, 0); 883 &check_lifetime_work, 0);
884 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid); 884 rtmsg_ifa(RTM_NEWADDR, ifa, nlh, NETLINK_CB(skb).portid);
885 blocking_notifier_call_chain(&inetaddr_chain, NETDEV_UP, ifa);
886 } 885 }
887 return 0; 886 return 0;
888} 887}
diff --git a/net/ipv4/fib_lookup.h b/net/ipv4/fib_lookup.h
index c6211ed60b03..9c02920725db 100644
--- a/net/ipv4/fib_lookup.h
+++ b/net/ipv4/fib_lookup.h
@@ -13,6 +13,7 @@ struct fib_alias {
13 u8 fa_state; 13 u8 fa_state;
14 u8 fa_slen; 14 u8 fa_slen;
15 u32 tb_id; 15 u32 tb_id;
16 s16 fa_default;
16 struct rcu_head rcu; 17 struct rcu_head rcu;
17}; 18};
18 19
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index c7358ea4ae93..3a06586b170c 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1202,23 +1202,40 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event)
1202} 1202}
1203 1203
1204/* Must be invoked inside of an RCU protected region. */ 1204/* Must be invoked inside of an RCU protected region. */
1205void fib_select_default(struct fib_result *res) 1205void fib_select_default(const struct flowi4 *flp, struct fib_result *res)
1206{ 1206{
1207 struct fib_info *fi = NULL, *last_resort = NULL; 1207 struct fib_info *fi = NULL, *last_resort = NULL;
1208 struct hlist_head *fa_head = res->fa_head; 1208 struct hlist_head *fa_head = res->fa_head;
1209 struct fib_table *tb = res->table; 1209 struct fib_table *tb = res->table;
1210 u8 slen = 32 - res->prefixlen;
1210 int order = -1, last_idx = -1; 1211 int order = -1, last_idx = -1;
1211 struct fib_alias *fa; 1212 struct fib_alias *fa, *fa1 = NULL;
1213 u32 last_prio = res->fi->fib_priority;
1214 u8 last_tos = 0;
1212 1215
1213 hlist_for_each_entry_rcu(fa, fa_head, fa_list) { 1216 hlist_for_each_entry_rcu(fa, fa_head, fa_list) {
1214 struct fib_info *next_fi = fa->fa_info; 1217 struct fib_info *next_fi = fa->fa_info;
1215 1218
1219 if (fa->fa_slen != slen)
1220 continue;
1221 if (fa->fa_tos && fa->fa_tos != flp->flowi4_tos)
1222 continue;
1223 if (fa->tb_id != tb->tb_id)
1224 continue;
1225 if (next_fi->fib_priority > last_prio &&
1226 fa->fa_tos == last_tos) {
1227 if (last_tos)
1228 continue;
1229 break;
1230 }
1231 if (next_fi->fib_flags & RTNH_F_DEAD)
1232 continue;
1233 last_tos = fa->fa_tos;
1234 last_prio = next_fi->fib_priority;
1235
1216 if (next_fi->fib_scope != res->scope || 1236 if (next_fi->fib_scope != res->scope ||
1217 fa->fa_type != RTN_UNICAST) 1237 fa->fa_type != RTN_UNICAST)
1218 continue; 1238 continue;
1219
1220 if (next_fi->fib_priority > res->fi->fib_priority)
1221 break;
1222 if (!next_fi->fib_nh[0].nh_gw || 1239 if (!next_fi->fib_nh[0].nh_gw ||
1223 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK) 1240 next_fi->fib_nh[0].nh_scope != RT_SCOPE_LINK)
1224 continue; 1241 continue;
@@ -1228,10 +1245,11 @@ void fib_select_default(struct fib_result *res)
1228 if (!fi) { 1245 if (!fi) {
1229 if (next_fi != res->fi) 1246 if (next_fi != res->fi)
1230 break; 1247 break;
1248 fa1 = fa;
1231 } else if (!fib_detect_death(fi, order, &last_resort, 1249 } else if (!fib_detect_death(fi, order, &last_resort,
1232 &last_idx, tb->tb_default)) { 1250 &last_idx, fa1->fa_default)) {
1233 fib_result_assign(res, fi); 1251 fib_result_assign(res, fi);
1234 tb->tb_default = order; 1252 fa1->fa_default = order;
1235 goto out; 1253 goto out;
1236 } 1254 }
1237 fi = next_fi; 1255 fi = next_fi;
@@ -1239,20 +1257,21 @@ void fib_select_default(struct fib_result *res)
1239 } 1257 }
1240 1258
1241 if (order <= 0 || !fi) { 1259 if (order <= 0 || !fi) {
1242 tb->tb_default = -1; 1260 if (fa1)
1261 fa1->fa_default = -1;
1243 goto out; 1262 goto out;
1244 } 1263 }
1245 1264
1246 if (!fib_detect_death(fi, order, &last_resort, &last_idx, 1265 if (!fib_detect_death(fi, order, &last_resort, &last_idx,
1247 tb->tb_default)) { 1266 fa1->fa_default)) {
1248 fib_result_assign(res, fi); 1267 fib_result_assign(res, fi);
1249 tb->tb_default = order; 1268 fa1->fa_default = order;
1250 goto out; 1269 goto out;
1251 } 1270 }
1252 1271
1253 if (last_idx >= 0) 1272 if (last_idx >= 0)
1254 fib_result_assign(res, last_resort); 1273 fib_result_assign(res, last_resort);
1255 tb->tb_default = last_idx; 1274 fa1->fa_default = last_idx;
1256out: 1275out:
1257 return; 1276 return;
1258} 1277}
diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c
index 15d32612e3c6..37c4bb89a708 100644
--- a/net/ipv4/fib_trie.c
+++ b/net/ipv4/fib_trie.c
@@ -1171,6 +1171,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1171 new_fa->fa_state = state & ~FA_S_ACCESSED; 1171 new_fa->fa_state = state & ~FA_S_ACCESSED;
1172 new_fa->fa_slen = fa->fa_slen; 1172 new_fa->fa_slen = fa->fa_slen;
1173 new_fa->tb_id = tb->tb_id; 1173 new_fa->tb_id = tb->tb_id;
1174 new_fa->fa_default = -1;
1174 1175
1175 err = switchdev_fib_ipv4_add(key, plen, fi, 1176 err = switchdev_fib_ipv4_add(key, plen, fi,
1176 new_fa->fa_tos, 1177 new_fa->fa_tos,
@@ -1222,6 +1223,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
1222 new_fa->fa_state = 0; 1223 new_fa->fa_state = 0;
1223 new_fa->fa_slen = slen; 1224 new_fa->fa_slen = slen;
1224 new_fa->tb_id = tb->tb_id; 1225 new_fa->tb_id = tb->tb_id;
1226 new_fa->fa_default = -1;
1225 1227
1226 /* (Optionally) offload fib entry to switch hardware. */ 1228 /* (Optionally) offload fib entry to switch hardware. */
1227 err = switchdev_fib_ipv4_add(key, plen, fi, tos, cfg->fc_type, 1229 err = switchdev_fib_ipv4_add(key, plen, fi, tos, cfg->fc_type,
@@ -1791,8 +1793,6 @@ void fib_table_flush_external(struct fib_table *tb)
1791 if (hlist_empty(&n->leaf)) { 1793 if (hlist_empty(&n->leaf)) {
1792 put_child_root(pn, n->key, NULL); 1794 put_child_root(pn, n->key, NULL);
1793 node_free(n); 1795 node_free(n);
1794 } else {
1795 leaf_pull_suffix(pn, n);
1796 } 1796 }
1797 } 1797 }
1798} 1798}
@@ -1862,8 +1862,6 @@ int fib_table_flush(struct fib_table *tb)
1862 if (hlist_empty(&n->leaf)) { 1862 if (hlist_empty(&n->leaf)) {
1863 put_child_root(pn, n->key, NULL); 1863 put_child_root(pn, n->key, NULL);
1864 node_free(n); 1864 node_free(n);
1865 } else {
1866 leaf_pull_suffix(pn, n);
1867 } 1865 }
1868 } 1866 }
1869 1867
@@ -1990,7 +1988,6 @@ struct fib_table *fib_trie_table(u32 id, struct fib_table *alias)
1990 return NULL; 1988 return NULL;
1991 1989
1992 tb->tb_id = id; 1990 tb->tb_id = id;
1993 tb->tb_default = -1;
1994 tb->tb_num_default = 0; 1991 tb->tb_num_default = 0;
1995 tb->tb_data = (alias ? alias->__data : tb->__data); 1992 tb->tb_data = (alias ? alias->__data : tb->__data);
1996 1993
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 60021d0d9326..05e3145f7dc3 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -593,7 +593,7 @@ static bool reqsk_queue_unlink(struct request_sock_queue *queue,
593 } 593 }
594 594
595 spin_unlock(&queue->syn_wait_lock); 595 spin_unlock(&queue->syn_wait_lock);
596 if (del_timer(&req->rsk_timer)) 596 if (del_timer_sync(&req->rsk_timer))
597 reqsk_put(req); 597 reqsk_put(req);
598 return found; 598 return found;
599} 599}
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index 5e346a082e5f..d0a7c0319e3d 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -131,34 +131,22 @@ inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
131 unsigned int evicted = 0; 131 unsigned int evicted = 0;
132 HLIST_HEAD(expired); 132 HLIST_HEAD(expired);
133 133
134evict_again:
135 spin_lock(&hb->chain_lock); 134 spin_lock(&hb->chain_lock);
136 135
137 hlist_for_each_entry_safe(fq, n, &hb->chain, list) { 136 hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
138 if (!inet_fragq_should_evict(fq)) 137 if (!inet_fragq_should_evict(fq))
139 continue; 138 continue;
140 139
141 if (!del_timer(&fq->timer)) { 140 if (!del_timer(&fq->timer))
142 /* q expiring right now thus increment its refcount so 141 continue;
143 * it won't be freed under us and wait until the timer
144 * has finished executing then destroy it
145 */
146 atomic_inc(&fq->refcnt);
147 spin_unlock(&hb->chain_lock);
148 del_timer_sync(&fq->timer);
149 inet_frag_put(fq, f);
150 goto evict_again;
151 }
152 142
153 fq->flags |= INET_FRAG_EVICTED; 143 hlist_add_head(&fq->list_evictor, &expired);
154 hlist_del(&fq->list);
155 hlist_add_head(&fq->list, &expired);
156 ++evicted; 144 ++evicted;
157 } 145 }
158 146
159 spin_unlock(&hb->chain_lock); 147 spin_unlock(&hb->chain_lock);
160 148
161 hlist_for_each_entry_safe(fq, n, &expired, list) 149 hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
162 f->frag_expire((unsigned long) fq); 150 f->frag_expire((unsigned long) fq);
163 151
164 return evicted; 152 return evicted;
@@ -240,18 +228,20 @@ void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
240 int i; 228 int i;
241 229
242 nf->low_thresh = 0; 230 nf->low_thresh = 0;
243 local_bh_disable();
244 231
245evict_again: 232evict_again:
233 local_bh_disable();
246 seq = read_seqbegin(&f->rnd_seqlock); 234 seq = read_seqbegin(&f->rnd_seqlock);
247 235
248 for (i = 0; i < INETFRAGS_HASHSZ ; i++) 236 for (i = 0; i < INETFRAGS_HASHSZ ; i++)
249 inet_evict_bucket(f, &f->hash[i]); 237 inet_evict_bucket(f, &f->hash[i]);
250 238
251 if (read_seqretry(&f->rnd_seqlock, seq))
252 goto evict_again;
253
254 local_bh_enable(); 239 local_bh_enable();
240 cond_resched();
241
242 if (read_seqretry(&f->rnd_seqlock, seq) ||
243 percpu_counter_sum(&nf->mem))
244 goto evict_again;
255 245
256 percpu_counter_destroy(&nf->mem); 246 percpu_counter_destroy(&nf->mem);
257} 247}
@@ -284,8 +274,8 @@ static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
284 struct inet_frag_bucket *hb; 274 struct inet_frag_bucket *hb;
285 275
286 hb = get_frag_bucket_locked(fq, f); 276 hb = get_frag_bucket_locked(fq, f);
287 if (!(fq->flags & INET_FRAG_EVICTED)) 277 hlist_del(&fq->list);
288 hlist_del(&fq->list); 278 fq->flags |= INET_FRAG_COMPLETE;
289 spin_unlock(&hb->chain_lock); 279 spin_unlock(&hb->chain_lock);
290} 280}
291 281
@@ -297,7 +287,6 @@ void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
297 if (!(fq->flags & INET_FRAG_COMPLETE)) { 287 if (!(fq->flags & INET_FRAG_COMPLETE)) {
298 fq_unlink(fq, f); 288 fq_unlink(fq, f);
299 atomic_dec(&fq->refcnt); 289 atomic_dec(&fq->refcnt);
300 fq->flags |= INET_FRAG_COMPLETE;
301 } 290 }
302} 291}
303EXPORT_SYMBOL(inet_frag_kill); 292EXPORT_SYMBOL(inet_frag_kill);
@@ -330,11 +319,12 @@ void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
330 fp = xp; 319 fp = xp;
331 } 320 }
332 sum = sum_truesize + f->qsize; 321 sum = sum_truesize + f->qsize;
333 sub_frag_mem_limit(q, sum);
334 322
335 if (f->destructor) 323 if (f->destructor)
336 f->destructor(q); 324 f->destructor(q);
337 kmem_cache_free(f->frags_cachep, q); 325 kmem_cache_free(f->frags_cachep, q);
326
327 sub_frag_mem_limit(nf, sum);
338} 328}
339EXPORT_SYMBOL(inet_frag_destroy); 329EXPORT_SYMBOL(inet_frag_destroy);
340 330
@@ -390,7 +380,7 @@ static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
390 380
391 q->net = nf; 381 q->net = nf;
392 f->constructor(q, arg); 382 f->constructor(q, arg);
393 add_frag_mem_limit(q, f->qsize); 383 add_frag_mem_limit(nf, f->qsize);
394 384
395 setup_timer(&q->timer, f->frag_expire, (unsigned long)q); 385 setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
396 spin_lock_init(&q->lock); 386 spin_lock_init(&q->lock);
diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
index 5f9b063bbe8a..0cb9165421d4 100644
--- a/net/ipv4/inet_hashtables.c
+++ b/net/ipv4/inet_hashtables.c
@@ -624,22 +624,21 @@ EXPORT_SYMBOL_GPL(inet_hashinfo_init);
624 624
625int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo) 625int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
626{ 626{
627 unsigned int locksz = sizeof(spinlock_t);
627 unsigned int i, nblocks = 1; 628 unsigned int i, nblocks = 1;
628 629
629 if (sizeof(spinlock_t) != 0) { 630 if (locksz != 0) {
630 /* allocate 2 cache lines or at least one spinlock per cpu */ 631 /* allocate 2 cache lines or at least one spinlock per cpu */
631 nblocks = max_t(unsigned int, 632 nblocks = max(2U * L1_CACHE_BYTES / locksz, 1U);
632 2 * L1_CACHE_BYTES / sizeof(spinlock_t),
633 1);
634 nblocks = roundup_pow_of_two(nblocks * num_possible_cpus()); 633 nblocks = roundup_pow_of_two(nblocks * num_possible_cpus());
635 634
636 /* no more locks than number of hash buckets */ 635 /* no more locks than number of hash buckets */
637 nblocks = min(nblocks, hashinfo->ehash_mask + 1); 636 nblocks = min(nblocks, hashinfo->ehash_mask + 1);
638 637
639 hashinfo->ehash_locks = kmalloc_array(nblocks, sizeof(spinlock_t), 638 hashinfo->ehash_locks = kmalloc_array(nblocks, locksz,
640 GFP_KERNEL | __GFP_NOWARN); 639 GFP_KERNEL | __GFP_NOWARN);
641 if (!hashinfo->ehash_locks) 640 if (!hashinfo->ehash_locks)
642 hashinfo->ehash_locks = vmalloc(nblocks * sizeof(spinlock_t)); 641 hashinfo->ehash_locks = vmalloc(nblocks * locksz);
643 642
644 if (!hashinfo->ehash_locks) 643 if (!hashinfo->ehash_locks)
645 return -ENOMEM; 644 return -ENOMEM;
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index a50dc6d408d1..921138f6c97c 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -202,7 +202,7 @@ static void ip_expire(unsigned long arg)
202 ipq_kill(qp); 202 ipq_kill(qp);
203 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); 203 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS);
204 204
205 if (!(qp->q.flags & INET_FRAG_EVICTED)) { 205 if (!inet_frag_evicting(&qp->q)) {
206 struct sk_buff *head = qp->q.fragments; 206 struct sk_buff *head = qp->q.fragments;
207 const struct iphdr *iph; 207 const struct iphdr *iph;
208 int err; 208 int err;
@@ -309,7 +309,7 @@ static int ip_frag_reinit(struct ipq *qp)
309 kfree_skb(fp); 309 kfree_skb(fp);
310 fp = xp; 310 fp = xp;
311 } while (fp); 311 } while (fp);
312 sub_frag_mem_limit(&qp->q, sum_truesize); 312 sub_frag_mem_limit(qp->q.net, sum_truesize);
313 313
314 qp->q.flags = 0; 314 qp->q.flags = 0;
315 qp->q.len = 0; 315 qp->q.len = 0;
@@ -351,7 +351,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
351 ihl = ip_hdrlen(skb); 351 ihl = ip_hdrlen(skb);
352 352
353 /* Determine the position of this fragment. */ 353 /* Determine the position of this fragment. */
354 end = offset + skb->len - ihl; 354 end = offset + skb->len - skb_network_offset(skb) - ihl;
355 err = -EINVAL; 355 err = -EINVAL;
356 356
357 /* Is this the final fragment? */ 357 /* Is this the final fragment? */
@@ -381,7 +381,7 @@ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
381 goto err; 381 goto err;
382 382
383 err = -ENOMEM; 383 err = -ENOMEM;
384 if (!pskb_pull(skb, ihl)) 384 if (!pskb_pull(skb, skb_network_offset(skb) + ihl))
385 goto err; 385 goto err;
386 386
387 err = pskb_trim_rcsum(skb, end - offset); 387 err = pskb_trim_rcsum(skb, end - offset);
@@ -455,7 +455,7 @@ found:
455 qp->q.fragments = next; 455 qp->q.fragments = next;
456 456
457 qp->q.meat -= free_it->len; 457 qp->q.meat -= free_it->len;
458 sub_frag_mem_limit(&qp->q, free_it->truesize); 458 sub_frag_mem_limit(qp->q.net, free_it->truesize);
459 kfree_skb(free_it); 459 kfree_skb(free_it);
460 } 460 }
461 } 461 }
@@ -479,7 +479,7 @@ found:
479 qp->q.stamp = skb->tstamp; 479 qp->q.stamp = skb->tstamp;
480 qp->q.meat += skb->len; 480 qp->q.meat += skb->len;
481 qp->ecn |= ecn; 481 qp->ecn |= ecn;
482 add_frag_mem_limit(&qp->q, skb->truesize); 482 add_frag_mem_limit(qp->q.net, skb->truesize);
483 if (offset == 0) 483 if (offset == 0)
484 qp->q.flags |= INET_FRAG_FIRST_IN; 484 qp->q.flags |= INET_FRAG_FIRST_IN;
485 485
@@ -587,7 +587,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
587 head->len -= clone->len; 587 head->len -= clone->len;
588 clone->csum = 0; 588 clone->csum = 0;
589 clone->ip_summed = head->ip_summed; 589 clone->ip_summed = head->ip_summed;
590 add_frag_mem_limit(&qp->q, clone->truesize); 590 add_frag_mem_limit(qp->q.net, clone->truesize);
591 } 591 }
592 592
593 skb_push(head, head->data - skb_network_header(head)); 593 skb_push(head, head->data - skb_network_header(head));
@@ -615,7 +615,7 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
615 } 615 }
616 fp = next; 616 fp = next;
617 } 617 }
618 sub_frag_mem_limit(&qp->q, sum_truesize); 618 sub_frag_mem_limit(qp->q.net, sum_truesize);
619 619
620 head->next = NULL; 620 head->next = NULL;
621 head->dev = dev; 621 head->dev = dev;
@@ -641,6 +641,8 @@ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
641 iph->frag_off = 0; 641 iph->frag_off = 0;
642 } 642 }
643 643
644 ip_send_check(iph);
645
644 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); 646 IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS);
645 qp->q.fragments = NULL; 647 qp->q.fragments = NULL;
646 qp->q.fragments_tail = NULL; 648 qp->q.fragments_tail = NULL;
diff --git a/net/ipv4/netfilter/ipt_SYNPROXY.c b/net/ipv4/netfilter/ipt_SYNPROXY.c
index fe8cc183411e..95ea633e8356 100644
--- a/net/ipv4/netfilter/ipt_SYNPROXY.c
+++ b/net/ipv4/netfilter/ipt_SYNPROXY.c
@@ -226,7 +226,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
226 226
227 synproxy_build_options(nth, opts); 227 synproxy_build_options(nth, opts);
228 228
229 synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); 229 synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
230 niph, nth, tcp_hdr_size);
230} 231}
231 232
232static bool 233static bool
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index d0362a2de3d3..e681b852ced1 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -2176,7 +2176,7 @@ struct rtable *__ip_route_output_key(struct net *net, struct flowi4 *fl4)
2176 if (!res.prefixlen && 2176 if (!res.prefixlen &&
2177 res.table->tb_num_default > 1 && 2177 res.table->tb_num_default > 1 &&
2178 res.type == RTN_UNICAST && !fl4->flowi4_oif) 2178 res.type == RTN_UNICAST && !fl4->flowi4_oif)
2179 fib_select_default(&res); 2179 fib_select_default(fl4, &res);
2180 2180
2181 if (!fl4->saddr) 2181 if (!fl4->saddr)
2182 fl4->saddr = FIB_RES_PREFSRC(net, res); 2182 fl4->saddr = FIB_RES_PREFSRC(net, res);
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 7f4056785acc..45534a5ab430 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -780,7 +780,7 @@ ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
780 ret = -EAGAIN; 780 ret = -EAGAIN;
781 break; 781 break;
782 } 782 }
783 sk_wait_data(sk, &timeo); 783 sk_wait_data(sk, &timeo, NULL);
784 if (signal_pending(current)) { 784 if (signal_pending(current)) {
785 ret = sock_intr_errno(timeo); 785 ret = sock_intr_errno(timeo);
786 break; 786 break;
@@ -1575,7 +1575,7 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1575 int target; /* Read at least this many bytes */ 1575 int target; /* Read at least this many bytes */
1576 long timeo; 1576 long timeo;
1577 struct task_struct *user_recv = NULL; 1577 struct task_struct *user_recv = NULL;
1578 struct sk_buff *skb; 1578 struct sk_buff *skb, *last;
1579 u32 urg_hole = 0; 1579 u32 urg_hole = 0;
1580 1580
1581 if (unlikely(flags & MSG_ERRQUEUE)) 1581 if (unlikely(flags & MSG_ERRQUEUE))
@@ -1635,7 +1635,9 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1635 1635
1636 /* Next get a buffer. */ 1636 /* Next get a buffer. */
1637 1637
1638 last = skb_peek_tail(&sk->sk_receive_queue);
1638 skb_queue_walk(&sk->sk_receive_queue, skb) { 1639 skb_queue_walk(&sk->sk_receive_queue, skb) {
1640 last = skb;
1639 /* Now that we have two receive queues this 1641 /* Now that we have two receive queues this
1640 * shouldn't happen. 1642 * shouldn't happen.
1641 */ 1643 */
@@ -1754,8 +1756,9 @@ int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
1754 /* Do not sleep, just process backlog. */ 1756 /* Do not sleep, just process backlog. */
1755 release_sock(sk); 1757 release_sock(sk);
1756 lock_sock(sk); 1758 lock_sock(sk);
1757 } else 1759 } else {
1758 sk_wait_data(sk, &timeo); 1760 sk_wait_data(sk, &timeo, last);
1761 }
1759 1762
1760 if (user_recv) { 1763 if (user_recv) {
1761 int chunk; 1764 int chunk;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 684f095d196e..728f5b3d3c64 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1917,14 +1917,13 @@ void tcp_enter_loss(struct sock *sk)
1917 const struct inet_connection_sock *icsk = inet_csk(sk); 1917 const struct inet_connection_sock *icsk = inet_csk(sk);
1918 struct tcp_sock *tp = tcp_sk(sk); 1918 struct tcp_sock *tp = tcp_sk(sk);
1919 struct sk_buff *skb; 1919 struct sk_buff *skb;
1920 bool new_recovery = false; 1920 bool new_recovery = icsk->icsk_ca_state < TCP_CA_Recovery;
1921 bool is_reneg; /* is receiver reneging on SACKs? */ 1921 bool is_reneg; /* is receiver reneging on SACKs? */
1922 1922
1923 /* Reduce ssthresh if it has not yet been made inside this window. */ 1923 /* Reduce ssthresh if it has not yet been made inside this window. */
1924 if (icsk->icsk_ca_state <= TCP_CA_Disorder || 1924 if (icsk->icsk_ca_state <= TCP_CA_Disorder ||
1925 !after(tp->high_seq, tp->snd_una) || 1925 !after(tp->high_seq, tp->snd_una) ||
1926 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) { 1926 (icsk->icsk_ca_state == TCP_CA_Loss && !icsk->icsk_retransmits)) {
1927 new_recovery = true;
1928 tp->prior_ssthresh = tcp_current_ssthresh(sk); 1927 tp->prior_ssthresh = tcp_current_ssthresh(sk);
1929 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk); 1928 tp->snd_ssthresh = icsk->icsk_ca_ops->ssthresh(sk);
1930 tcp_ca_event(sk, CA_EVENT_LOSS); 1929 tcp_ca_event(sk, CA_EVENT_LOSS);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index d7d4c2b79cf2..0ea2e1c5d395 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1348,7 +1348,7 @@ static struct sock *tcp_v4_hnd_req(struct sock *sk, struct sk_buff *skb)
1348 req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr); 1348 req = inet_csk_search_req(sk, th->source, iph->saddr, iph->daddr);
1349 if (req) { 1349 if (req) {
1350 nsk = tcp_check_req(sk, skb, req, false); 1350 nsk = tcp_check_req(sk, skb, req, false);
1351 if (!nsk) 1351 if (!nsk || nsk == sk)
1352 reqsk_put(req); 1352 reqsk_put(req);
1353 return nsk; 1353 return nsk;
1354 } 1354 }
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 83aa604f9273..1b8c5ba7d5f7 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1995,12 +1995,19 @@ void udp_v4_early_demux(struct sk_buff *skb)
1995 1995
1996 skb->sk = sk; 1996 skb->sk = sk;
1997 skb->destructor = sock_efree; 1997 skb->destructor = sock_efree;
1998 dst = sk->sk_rx_dst; 1998 dst = READ_ONCE(sk->sk_rx_dst);
1999 1999
2000 if (dst) 2000 if (dst)
2001 dst = dst_check(dst, 0); 2001 dst = dst_check(dst, 0);
2002 if (dst) 2002 if (dst) {
2003 skb_dst_set_noref(skb, dst); 2003 /* DST_NOCACHE can not be used without taking a reference */
2004 if (dst->flags & DST_NOCACHE) {
2005 if (likely(atomic_inc_not_zero(&dst->__refcnt)))
2006 skb_dst_set(skb, dst);
2007 } else {
2008 skb_dst_set_noref(skb, dst);
2009 }
2010 }
2004} 2011}
2005 2012
2006int udp_rcv(struct sk_buff *skb) 2013int udp_rcv(struct sk_buff *skb)
diff --git a/net/ipv6/datagram.c b/net/ipv6/datagram.c
index 62d908e64eeb..b10a88986a98 100644
--- a/net/ipv6/datagram.c
+++ b/net/ipv6/datagram.c
@@ -40,7 +40,7 @@ static bool ipv6_mapped_addr_any(const struct in6_addr *a)
40 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0); 40 return ipv6_addr_v4mapped(a) && (a->s6_addr32[3] == 0);
41} 41}
42 42
43int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len) 43static int __ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
44{ 44{
45 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr; 45 struct sockaddr_in6 *usin = (struct sockaddr_in6 *) uaddr;
46 struct inet_sock *inet = inet_sk(sk); 46 struct inet_sock *inet = inet_sk(sk);
@@ -56,7 +56,7 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
56 if (usin->sin6_family == AF_INET) { 56 if (usin->sin6_family == AF_INET) {
57 if (__ipv6_only_sock(sk)) 57 if (__ipv6_only_sock(sk))
58 return -EAFNOSUPPORT; 58 return -EAFNOSUPPORT;
59 err = ip4_datagram_connect(sk, uaddr, addr_len); 59 err = __ip4_datagram_connect(sk, uaddr, addr_len);
60 goto ipv4_connected; 60 goto ipv4_connected;
61 } 61 }
62 62
@@ -98,9 +98,9 @@ int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
98 sin.sin_addr.s_addr = daddr->s6_addr32[3]; 98 sin.sin_addr.s_addr = daddr->s6_addr32[3];
99 sin.sin_port = usin->sin6_port; 99 sin.sin_port = usin->sin6_port;
100 100
101 err = ip4_datagram_connect(sk, 101 err = __ip4_datagram_connect(sk,
102 (struct sockaddr *) &sin, 102 (struct sockaddr *) &sin,
103 sizeof(sin)); 103 sizeof(sin));
104 104
105ipv4_connected: 105ipv4_connected:
106 if (err) 106 if (err)
@@ -204,6 +204,16 @@ out:
204 fl6_sock_release(flowlabel); 204 fl6_sock_release(flowlabel);
205 return err; 205 return err;
206} 206}
207
208int ip6_datagram_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
209{
210 int res;
211
212 lock_sock(sk);
213 res = __ip6_datagram_connect(sk, uaddr, addr_len);
214 release_sock(sk);
215 return res;
216}
207EXPORT_SYMBOL_GPL(ip6_datagram_connect); 217EXPORT_SYMBOL_GPL(ip6_datagram_connect);
208 218
209int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr, 219int ip6_datagram_connect_v6_only(struct sock *sk, struct sockaddr *uaddr,
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index e893cd18612f..08b62047c67f 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -292,8 +292,6 @@ static struct packet_offload ipv6_packet_offload __read_mostly = {
292static const struct net_offload sit_offload = { 292static const struct net_offload sit_offload = {
293 .callbacks = { 293 .callbacks = {
294 .gso_segment = ipv6_gso_segment, 294 .gso_segment = ipv6_gso_segment,
295 .gro_receive = ipv6_gro_receive,
296 .gro_complete = ipv6_gro_complete,
297 }, 295 },
298}; 296};
299 297
diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c
index 0a05b35a90fc..c53331cfed95 100644
--- a/net/ipv6/ndisc.c
+++ b/net/ipv6/ndisc.c
@@ -1650,6 +1650,7 @@ int ndisc_rcv(struct sk_buff *skb)
1650static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) 1650static int ndisc_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
1651{ 1651{
1652 struct net_device *dev = netdev_notifier_info_to_dev(ptr); 1652 struct net_device *dev = netdev_notifier_info_to_dev(ptr);
1653 struct netdev_notifier_change_info *change_info;
1653 struct net *net = dev_net(dev); 1654 struct net *net = dev_net(dev);
1654 struct inet6_dev *idev; 1655 struct inet6_dev *idev;
1655 1656
@@ -1664,6 +1665,11 @@ static int ndisc_netdev_event(struct notifier_block *this, unsigned long event,
1664 ndisc_send_unsol_na(dev); 1665 ndisc_send_unsol_na(dev);
1665 in6_dev_put(idev); 1666 in6_dev_put(idev);
1666 break; 1667 break;
1668 case NETDEV_CHANGE:
1669 change_info = ptr;
1670 if (change_info->flags_changed & IFF_NOARP)
1671 neigh_changeaddr(&nd_tbl, dev);
1672 break;
1667 case NETDEV_DOWN: 1673 case NETDEV_DOWN:
1668 neigh_ifdown(&nd_tbl, dev); 1674 neigh_ifdown(&nd_tbl, dev);
1669 fib6_run_gc(0, net, false); 1675 fib6_run_gc(0, net, false);
diff --git a/net/ipv6/netfilter/ip6t_SYNPROXY.c b/net/ipv6/netfilter/ip6t_SYNPROXY.c
index 6edb7b106de7..ebbb754c2111 100644
--- a/net/ipv6/netfilter/ip6t_SYNPROXY.c
+++ b/net/ipv6/netfilter/ip6t_SYNPROXY.c
@@ -37,12 +37,13 @@ synproxy_build_ip(struct sk_buff *skb, const struct in6_addr *saddr,
37} 37}
38 38
39static void 39static void
40synproxy_send_tcp(const struct sk_buff *skb, struct sk_buff *nskb, 40synproxy_send_tcp(const struct synproxy_net *snet,
41 const struct sk_buff *skb, struct sk_buff *nskb,
41 struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo, 42 struct nf_conntrack *nfct, enum ip_conntrack_info ctinfo,
42 struct ipv6hdr *niph, struct tcphdr *nth, 43 struct ipv6hdr *niph, struct tcphdr *nth,
43 unsigned int tcp_hdr_size) 44 unsigned int tcp_hdr_size)
44{ 45{
45 struct net *net = nf_ct_net((struct nf_conn *)nfct); 46 struct net *net = nf_ct_net(snet->tmpl);
46 struct dst_entry *dst; 47 struct dst_entry *dst;
47 struct flowi6 fl6; 48 struct flowi6 fl6;
48 49
@@ -83,7 +84,8 @@ free_nskb:
83} 84}
84 85
85static void 86static void
86synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th, 87synproxy_send_client_synack(const struct synproxy_net *snet,
88 const struct sk_buff *skb, const struct tcphdr *th,
87 const struct synproxy_options *opts) 89 const struct synproxy_options *opts)
88{ 90{
89 struct sk_buff *nskb; 91 struct sk_buff *nskb;
@@ -119,7 +121,7 @@ synproxy_send_client_synack(const struct sk_buff *skb, const struct tcphdr *th,
119 121
120 synproxy_build_options(nth, opts); 122 synproxy_build_options(nth, opts);
121 123
122 synproxy_send_tcp(skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY, 124 synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
123 niph, nth, tcp_hdr_size); 125 niph, nth, tcp_hdr_size);
124} 126}
125 127
@@ -163,7 +165,7 @@ synproxy_send_server_syn(const struct synproxy_net *snet,
163 165
164 synproxy_build_options(nth, opts); 166 synproxy_build_options(nth, opts);
165 167
166 synproxy_send_tcp(skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW, 168 synproxy_send_tcp(snet, skb, nskb, &snet->tmpl->ct_general, IP_CT_NEW,
167 niph, nth, tcp_hdr_size); 169 niph, nth, tcp_hdr_size);
168} 170}
169 171
@@ -203,7 +205,7 @@ synproxy_send_server_ack(const struct synproxy_net *snet,
203 205
204 synproxy_build_options(nth, opts); 206 synproxy_build_options(nth, opts);
205 207
206 synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); 208 synproxy_send_tcp(snet, skb, nskb, NULL, 0, niph, nth, tcp_hdr_size);
207} 209}
208 210
209static void 211static void
@@ -241,7 +243,8 @@ synproxy_send_client_ack(const struct synproxy_net *snet,
241 243
242 synproxy_build_options(nth, opts); 244 synproxy_build_options(nth, opts);
243 245
244 synproxy_send_tcp(skb, nskb, NULL, 0, niph, nth, tcp_hdr_size); 246 synproxy_send_tcp(snet, skb, nskb, skb->nfct, IP_CT_ESTABLISHED_REPLY,
247 niph, nth, tcp_hdr_size);
245} 248}
246 249
247static bool 250static bool
@@ -301,7 +304,7 @@ synproxy_tg6(struct sk_buff *skb, const struct xt_action_param *par)
301 XT_SYNPROXY_OPT_SACK_PERM | 304 XT_SYNPROXY_OPT_SACK_PERM |
302 XT_SYNPROXY_OPT_ECN); 305 XT_SYNPROXY_OPT_ECN);
303 306
304 synproxy_send_client_synack(skb, th, &opts); 307 synproxy_send_client_synack(snet, skb, th, &opts);
305 return NF_DROP; 308 return NF_DROP;
306 309
307 } else if (th->ack && !(th->fin || th->rst || th->syn)) { 310 } else if (th->ack && !(th->fin || th->rst || th->syn)) {
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index 6f187c8d8a1b..6d02498172c1 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -348,7 +348,7 @@ found:
348 fq->ecn |= ecn; 348 fq->ecn |= ecn;
349 if (payload_len > fq->q.max_size) 349 if (payload_len > fq->q.max_size)
350 fq->q.max_size = payload_len; 350 fq->q.max_size = payload_len;
351 add_frag_mem_limit(&fq->q, skb->truesize); 351 add_frag_mem_limit(fq->q.net, skb->truesize);
352 352
353 /* The first fragment. 353 /* The first fragment.
354 * nhoffset is obtained from the first fragment, of course. 354 * nhoffset is obtained from the first fragment, of course.
@@ -430,7 +430,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
430 clone->ip_summed = head->ip_summed; 430 clone->ip_summed = head->ip_summed;
431 431
432 NFCT_FRAG6_CB(clone)->orig = NULL; 432 NFCT_FRAG6_CB(clone)->orig = NULL;
433 add_frag_mem_limit(&fq->q, clone->truesize); 433 add_frag_mem_limit(fq->q.net, clone->truesize);
434 } 434 }
435 435
436 /* We have to remove fragment header from datagram and to relocate 436 /* We have to remove fragment header from datagram and to relocate
@@ -454,7 +454,7 @@ nf_ct_frag6_reasm(struct frag_queue *fq, struct net_device *dev)
454 head->csum = csum_add(head->csum, fp->csum); 454 head->csum = csum_add(head->csum, fp->csum);
455 head->truesize += fp->truesize; 455 head->truesize += fp->truesize;
456 } 456 }
457 sub_frag_mem_limit(&fq->q, head->truesize); 457 sub_frag_mem_limit(fq->q.net, head->truesize);
458 458
459 head->ignore_df = 1; 459 head->ignore_df = 1;
460 head->next = NULL; 460 head->next = NULL;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index 8ffa2c8cce77..f1159bb76e0a 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -144,7 +144,7 @@ void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
144 144
145 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS); 145 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
146 146
147 if (fq->q.flags & INET_FRAG_EVICTED) 147 if (inet_frag_evicting(&fq->q))
148 goto out_rcu_unlock; 148 goto out_rcu_unlock;
149 149
150 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT); 150 IP6_INC_STATS_BH(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
@@ -330,7 +330,7 @@ found:
330 fq->q.stamp = skb->tstamp; 330 fq->q.stamp = skb->tstamp;
331 fq->q.meat += skb->len; 331 fq->q.meat += skb->len;
332 fq->ecn |= ecn; 332 fq->ecn |= ecn;
333 add_frag_mem_limit(&fq->q, skb->truesize); 333 add_frag_mem_limit(fq->q.net, skb->truesize);
334 334
335 /* The first fragment. 335 /* The first fragment.
336 * nhoffset is obtained from the first fragment, of course. 336 * nhoffset is obtained from the first fragment, of course.
@@ -443,7 +443,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
443 head->len -= clone->len; 443 head->len -= clone->len;
444 clone->csum = 0; 444 clone->csum = 0;
445 clone->ip_summed = head->ip_summed; 445 clone->ip_summed = head->ip_summed;
446 add_frag_mem_limit(&fq->q, clone->truesize); 446 add_frag_mem_limit(fq->q.net, clone->truesize);
447 } 447 }
448 448
449 /* We have to remove fragment header from datagram and to relocate 449 /* We have to remove fragment header from datagram and to relocate
@@ -481,7 +481,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
481 } 481 }
482 fp = next; 482 fp = next;
483 } 483 }
484 sub_frag_mem_limit(&fq->q, sum_truesize); 484 sub_frag_mem_limit(fq->q.net, sum_truesize);
485 485
486 head->next = NULL; 486 head->next = NULL;
487 head->dev = dev; 487 head->dev = dev;
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 6090969937f8..9de4d2bcd916 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1831,6 +1831,7 @@ int ip6_route_add(struct fib6_config *cfg)
1831 int gwa_type; 1831 int gwa_type;
1832 1832
1833 gw_addr = &cfg->fc_gateway; 1833 gw_addr = &cfg->fc_gateway;
1834 gwa_type = ipv6_addr_type(gw_addr);
1834 1835
1835 /* if gw_addr is local we will fail to detect this in case 1836 /* if gw_addr is local we will fail to detect this in case
1836 * address is still TENTATIVE (DAD in progress). rt6_lookup() 1837 * address is still TENTATIVE (DAD in progress). rt6_lookup()
@@ -1838,11 +1839,12 @@ int ip6_route_add(struct fib6_config *cfg)
1838 * prefix route was assigned to, which might be non-loopback. 1839 * prefix route was assigned to, which might be non-loopback.
1839 */ 1840 */
1840 err = -EINVAL; 1841 err = -EINVAL;
1841 if (ipv6_chk_addr_and_flags(net, gw_addr, NULL, 0, 0)) 1842 if (ipv6_chk_addr_and_flags(net, gw_addr,
1843 gwa_type & IPV6_ADDR_LINKLOCAL ?
1844 dev : NULL, 0, 0))
1842 goto out; 1845 goto out;
1843 1846
1844 rt->rt6i_gateway = *gw_addr; 1847 rt->rt6i_gateway = *gw_addr;
1845 gwa_type = ipv6_addr_type(gw_addr);
1846 1848
1847 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) { 1849 if (gwa_type != (IPV6_ADDR_LINKLOCAL|IPV6_ADDR_UNICAST)) {
1848 struct rt6_info *grt; 1850 struct rt6_info *grt;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 6748c4277aff..7a6cea5e4274 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -943,7 +943,7 @@ static struct sock *tcp_v6_hnd_req(struct sock *sk, struct sk_buff *skb)
943 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb)); 943 &ipv6_hdr(skb)->daddr, tcp_v6_iif(skb));
944 if (req) { 944 if (req) {
945 nsk = tcp_check_req(sk, skb, req, false); 945 nsk = tcp_check_req(sk, skb, req, false);
946 if (!nsk) 946 if (!nsk || nsk == sk)
947 reqsk_put(req); 947 reqsk_put(req);
948 return nsk; 948 return nsk;
949 } 949 }
diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c
index 8fd9febaa5ba..8dab4e569571 100644
--- a/net/llc/af_llc.c
+++ b/net/llc/af_llc.c
@@ -613,7 +613,7 @@ static int llc_wait_data(struct sock *sk, long timeo)
613 if (signal_pending(current)) 613 if (signal_pending(current))
614 break; 614 break;
615 rc = 0; 615 rc = 0;
616 if (sk_wait_data(sk, &timeo)) 616 if (sk_wait_data(sk, &timeo, NULL))
617 break; 617 break;
618 } 618 }
619 return rc; 619 return rc;
@@ -802,7 +802,7 @@ static int llc_ui_recvmsg(struct socket *sock, struct msghdr *msg, size_t len,
802 release_sock(sk); 802 release_sock(sk);
803 lock_sock(sk); 803 lock_sock(sk);
804 } else 804 } else
805 sk_wait_data(sk, &timeo); 805 sk_wait_data(sk, &timeo, NULL);
806 806
807 if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) { 807 if ((flags & MSG_PEEK) && peek_seq != llc->copied_seq) {
808 net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n", 808 net_dbg_ratelimited("LLC(%s:%d): Application bug, race in MSG_PEEK\n",
diff --git a/net/mac80211/debugfs_netdev.c b/net/mac80211/debugfs_netdev.c
index 29236e832e44..c09c0131bfa2 100644
--- a/net/mac80211/debugfs_netdev.c
+++ b/net/mac80211/debugfs_netdev.c
@@ -723,6 +723,7 @@ void ieee80211_debugfs_remove_netdev(struct ieee80211_sub_if_data *sdata)
723 723
724 debugfs_remove_recursive(sdata->vif.debugfs_dir); 724 debugfs_remove_recursive(sdata->vif.debugfs_dir);
725 sdata->vif.debugfs_dir = NULL; 725 sdata->vif.debugfs_dir = NULL;
726 sdata->debugfs.subdir_stations = NULL;
726} 727}
727 728
728void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata) 729void ieee80211_debugfs_rename_netdev(struct ieee80211_sub_if_data *sdata)
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index ed1edac14372..553ac6dd4867 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -1863,10 +1863,6 @@ void ieee80211_sdata_stop(struct ieee80211_sub_if_data *sdata)
1863 ieee80211_teardown_sdata(sdata); 1863 ieee80211_teardown_sdata(sdata);
1864} 1864}
1865 1865
1866/*
1867 * Remove all interfaces, may only be called at hardware unregistration
1868 * time because it doesn't do RCU-safe list removals.
1869 */
1870void ieee80211_remove_interfaces(struct ieee80211_local *local) 1866void ieee80211_remove_interfaces(struct ieee80211_local *local)
1871{ 1867{
1872 struct ieee80211_sub_if_data *sdata, *tmp; 1868 struct ieee80211_sub_if_data *sdata, *tmp;
@@ -1875,14 +1871,21 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local)
1875 1871
1876 ASSERT_RTNL(); 1872 ASSERT_RTNL();
1877 1873
1878 /* 1874 /* Before destroying the interfaces, make sure they're all stopped so
1879 * Close all AP_VLAN interfaces first, as otherwise they 1875 * that the hardware is stopped. Otherwise, the driver might still be
1880 * might be closed while the AP interface they belong to 1876 * iterating the interfaces during the shutdown, e.g. from a worker
1881 * is closed, causing unregister_netdevice_many() to crash. 1877 * or from RX processing or similar, and if it does so (using atomic
1878 * iteration) while we're manipulating the list, the iteration will
1879 * crash.
1880 *
1881 * After this, the hardware should be stopped and the driver should
1882 * have stopped all of its activities, so that we can do RCU-unaware
1883 * manipulations of the interface list below.
1882 */ 1884 */
1883 list_for_each_entry(sdata, &local->interfaces, list) 1885 cfg80211_shutdown_all_interfaces(local->hw.wiphy);
1884 if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) 1886
1885 dev_close(sdata->dev); 1887 WARN(local->open_count, "%s: open count remains %d\n",
1888 wiphy_name(local->hw.wiphy), local->open_count);
1886 1889
1887 mutex_lock(&local->iflist_mtx); 1890 mutex_lock(&local->iflist_mtx);
1888 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { 1891 list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) {
diff --git a/net/mac80211/mesh_plink.c b/net/mac80211/mesh_plink.c
index 5438d13e2f00..3b59099413fb 100644
--- a/net/mac80211/mesh_plink.c
+++ b/net/mac80211/mesh_plink.c
@@ -306,7 +306,7 @@ static int mesh_plink_frame_tx(struct ieee80211_sub_if_data *sdata,
306 if (action == WLAN_SP_MESH_PEERING_CONFIRM) { 306 if (action == WLAN_SP_MESH_PEERING_CONFIRM) {
307 /* AID */ 307 /* AID */
308 pos = skb_put(skb, 2); 308 pos = skb_put(skb, 2);
309 put_unaligned_le16(plid, pos + 2); 309 put_unaligned_le16(plid, pos);
310 } 310 }
311 if (ieee80211_add_srates_ie(sdata, skb, true, band) || 311 if (ieee80211_add_srates_ie(sdata, skb, true, band) ||
312 ieee80211_add_ext_srates_ie(sdata, skb, true, band) || 312 ieee80211_add_ext_srates_ie(sdata, skb, true, band) ||
@@ -1122,6 +1122,9 @@ void mesh_rx_plink_frame(struct ieee80211_sub_if_data *sdata,
1122 WLAN_SP_MESH_PEERING_CONFIRM) { 1122 WLAN_SP_MESH_PEERING_CONFIRM) {
1123 baseaddr += 4; 1123 baseaddr += 4;
1124 baselen += 4; 1124 baselen += 4;
1125
1126 if (baselen > len)
1127 return;
1125 } 1128 }
1126 ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems); 1129 ieee802_11_parse_elems(baseaddr, len - baselen, true, &elems);
1127 mesh_process_plink_frame(sdata, mgmt, &elems); 1130 mesh_process_plink_frame(sdata, mgmt, &elems);
diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
index 06b60980c62c..b676b9fa707b 100644
--- a/net/mac80211/pm.c
+++ b/net/mac80211/pm.c
@@ -76,6 +76,22 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
76 if (sdata->vif.type != NL80211_IFTYPE_STATION) 76 if (sdata->vif.type != NL80211_IFTYPE_STATION)
77 continue; 77 continue;
78 ieee80211_mgd_quiesce(sdata); 78 ieee80211_mgd_quiesce(sdata);
79 /* If suspended during TX in progress, and wowlan
80 * is enabled (connection will be active) there
81 * can be a race where the driver is put out
82 * of power-save due to TX and during suspend
83 * dynamic_ps_timer is cancelled and TX packet
84 * is flushed, leaving the driver in ACTIVE even
85 * after resuming until dynamic_ps_timer puts
86 * driver back in DOZE.
87 */
88 if (sdata->u.mgd.associated &&
89 sdata->u.mgd.powersave &&
90 !(local->hw.conf.flags & IEEE80211_CONF_PS)) {
91 local->hw.conf.flags |= IEEE80211_CONF_PS;
92 ieee80211_hw_config(local,
93 IEEE80211_CONF_CHANGE_PS);
94 }
79 } 95 }
80 96
81 err = drv_suspend(local, wowlan); 97 err = drv_suspend(local, wowlan);
diff --git a/net/mac80211/tdls.c b/net/mac80211/tdls.c
index ad31b2dab4f5..8db6e2994bbc 100644
--- a/net/mac80211/tdls.c
+++ b/net/mac80211/tdls.c
@@ -60,6 +60,7 @@ ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata,
60 struct ieee80211_channel *ch; 60 struct ieee80211_channel *ch;
61 struct cfg80211_chan_def chandef; 61 struct cfg80211_chan_def chandef;
62 int i, subband_start; 62 int i, subband_start;
63 struct wiphy *wiphy = sdata->local->hw.wiphy;
63 64
64 for (i = start; i <= end; i += spacing) { 65 for (i = start; i <= end; i += spacing) {
65 if (!ch_cnt) 66 if (!ch_cnt)
@@ -70,9 +71,8 @@ ieee80211_tdls_add_subband(struct ieee80211_sub_if_data *sdata,
70 /* we will be active on the channel */ 71 /* we will be active on the channel */
71 cfg80211_chandef_create(&chandef, ch, 72 cfg80211_chandef_create(&chandef, ch,
72 NL80211_CHAN_NO_HT); 73 NL80211_CHAN_NO_HT);
73 if (cfg80211_reg_can_beacon(sdata->local->hw.wiphy, 74 if (cfg80211_reg_can_beacon_relax(wiphy, &chandef,
74 &chandef, 75 sdata->wdev.iftype)) {
75 sdata->wdev.iftype)) {
76 ch_cnt++; 76 ch_cnt++;
77 /* 77 /*
78 * check if the next channel is also part of 78 * check if the next channel is also part of
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 8410bb3bf5e8..b8233505bf9f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -1117,7 +1117,9 @@ static bool ieee80211_tx_prep_agg(struct ieee80211_tx_data *tx,
1117 queued = true; 1117 queued = true;
1118 info->control.vif = &tx->sdata->vif; 1118 info->control.vif = &tx->sdata->vif;
1119 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING; 1119 info->flags |= IEEE80211_TX_INTFL_NEED_TXPROCESSING;
1120 info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS; 1120 info->flags &= ~IEEE80211_TX_TEMPORARY_FLAGS |
1121 IEEE80211_TX_CTL_NO_PS_BUFFER |
1122 IEEE80211_TX_STATUS_EOSP;
1121 __skb_queue_tail(&tid_tx->pending, skb); 1123 __skb_queue_tail(&tid_tx->pending, skb);
1122 if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER) 1124 if (skb_queue_len(&tid_tx->pending) > STA_MAX_TX_BUFFER)
1123 purge_skb = __skb_dequeue(&tid_tx->pending); 1125 purge_skb = __skb_dequeue(&tid_tx->pending);
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 5d2b806a862e..38fbc194b9cb 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -319,7 +319,13 @@ ip_vs_sched_persist(struct ip_vs_service *svc,
319 * return *ignored=0 i.e. ICMP and NF_DROP 319 * return *ignored=0 i.e. ICMP and NF_DROP
320 */ 320 */
321 sched = rcu_dereference(svc->scheduler); 321 sched = rcu_dereference(svc->scheduler);
322 dest = sched->schedule(svc, skb, iph); 322 if (sched) {
323 /* read svc->sched_data after svc->scheduler */
324 smp_rmb();
325 dest = sched->schedule(svc, skb, iph);
326 } else {
327 dest = NULL;
328 }
323 if (!dest) { 329 if (!dest) {
324 IP_VS_DBG(1, "p-schedule: no dest found.\n"); 330 IP_VS_DBG(1, "p-schedule: no dest found.\n");
325 kfree(param.pe_data); 331 kfree(param.pe_data);
@@ -467,7 +473,13 @@ ip_vs_schedule(struct ip_vs_service *svc, struct sk_buff *skb,
467 } 473 }
468 474
469 sched = rcu_dereference(svc->scheduler); 475 sched = rcu_dereference(svc->scheduler);
470 dest = sched->schedule(svc, skb, iph); 476 if (sched) {
477 /* read svc->sched_data after svc->scheduler */
478 smp_rmb();
479 dest = sched->schedule(svc, skb, iph);
480 } else {
481 dest = NULL;
482 }
471 if (dest == NULL) { 483 if (dest == NULL) {
472 IP_VS_DBG(1, "Schedule: no dest found.\n"); 484 IP_VS_DBG(1, "Schedule: no dest found.\n");
473 return NULL; 485 return NULL;
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 285eae3a1454..24c554201a76 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -842,15 +842,16 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
842 __ip_vs_dst_cache_reset(dest); 842 __ip_vs_dst_cache_reset(dest);
843 spin_unlock_bh(&dest->dst_lock); 843 spin_unlock_bh(&dest->dst_lock);
844 844
845 sched = rcu_dereference_protected(svc->scheduler, 1);
846 if (add) { 845 if (add) {
847 ip_vs_start_estimator(svc->net, &dest->stats); 846 ip_vs_start_estimator(svc->net, &dest->stats);
848 list_add_rcu(&dest->n_list, &svc->destinations); 847 list_add_rcu(&dest->n_list, &svc->destinations);
849 svc->num_dests++; 848 svc->num_dests++;
850 if (sched->add_dest) 849 sched = rcu_dereference_protected(svc->scheduler, 1);
850 if (sched && sched->add_dest)
851 sched->add_dest(svc, dest); 851 sched->add_dest(svc, dest);
852 } else { 852 } else {
853 if (sched->upd_dest) 853 sched = rcu_dereference_protected(svc->scheduler, 1);
854 if (sched && sched->upd_dest)
854 sched->upd_dest(svc, dest); 855 sched->upd_dest(svc, dest);
855 } 856 }
856} 857}
@@ -1084,7 +1085,7 @@ static void __ip_vs_unlink_dest(struct ip_vs_service *svc,
1084 struct ip_vs_scheduler *sched; 1085 struct ip_vs_scheduler *sched;
1085 1086
1086 sched = rcu_dereference_protected(svc->scheduler, 1); 1087 sched = rcu_dereference_protected(svc->scheduler, 1);
1087 if (sched->del_dest) 1088 if (sched && sched->del_dest)
1088 sched->del_dest(svc, dest); 1089 sched->del_dest(svc, dest);
1089 } 1090 }
1090} 1091}
@@ -1175,11 +1176,14 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1175 ip_vs_use_count_inc(); 1176 ip_vs_use_count_inc();
1176 1177
1177 /* Lookup the scheduler by 'u->sched_name' */ 1178 /* Lookup the scheduler by 'u->sched_name' */
1178 sched = ip_vs_scheduler_get(u->sched_name); 1179 if (strcmp(u->sched_name, "none")) {
1179 if (sched == NULL) { 1180 sched = ip_vs_scheduler_get(u->sched_name);
1180 pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name); 1181 if (!sched) {
1181 ret = -ENOENT; 1182 pr_info("Scheduler module ip_vs_%s not found\n",
1182 goto out_err; 1183 u->sched_name);
1184 ret = -ENOENT;
1185 goto out_err;
1186 }
1183 } 1187 }
1184 1188
1185 if (u->pe_name && *u->pe_name) { 1189 if (u->pe_name && *u->pe_name) {
@@ -1240,10 +1244,12 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1240 spin_lock_init(&svc->stats.lock); 1244 spin_lock_init(&svc->stats.lock);
1241 1245
1242 /* Bind the scheduler */ 1246 /* Bind the scheduler */
1243 ret = ip_vs_bind_scheduler(svc, sched); 1247 if (sched) {
1244 if (ret) 1248 ret = ip_vs_bind_scheduler(svc, sched);
1245 goto out_err; 1249 if (ret)
1246 sched = NULL; 1250 goto out_err;
1251 sched = NULL;
1252 }
1247 1253
1248 /* Bind the ct retriever */ 1254 /* Bind the ct retriever */
1249 RCU_INIT_POINTER(svc->pe, pe); 1255 RCU_INIT_POINTER(svc->pe, pe);
@@ -1291,17 +1297,20 @@ ip_vs_add_service(struct net *net, struct ip_vs_service_user_kern *u,
1291static int 1297static int
1292ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u) 1298ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
1293{ 1299{
1294 struct ip_vs_scheduler *sched, *old_sched; 1300 struct ip_vs_scheduler *sched = NULL, *old_sched;
1295 struct ip_vs_pe *pe = NULL, *old_pe = NULL; 1301 struct ip_vs_pe *pe = NULL, *old_pe = NULL;
1296 int ret = 0; 1302 int ret = 0;
1297 1303
1298 /* 1304 /*
1299 * Lookup the scheduler, by 'u->sched_name' 1305 * Lookup the scheduler, by 'u->sched_name'
1300 */ 1306 */
1301 sched = ip_vs_scheduler_get(u->sched_name); 1307 if (strcmp(u->sched_name, "none")) {
1302 if (sched == NULL) { 1308 sched = ip_vs_scheduler_get(u->sched_name);
1303 pr_info("Scheduler module ip_vs_%s not found\n", u->sched_name); 1309 if (!sched) {
1304 return -ENOENT; 1310 pr_info("Scheduler module ip_vs_%s not found\n",
1311 u->sched_name);
1312 return -ENOENT;
1313 }
1305 } 1314 }
1306 old_sched = sched; 1315 old_sched = sched;
1307 1316
@@ -1329,14 +1338,20 @@ ip_vs_edit_service(struct ip_vs_service *svc, struct ip_vs_service_user_kern *u)
1329 1338
1330 old_sched = rcu_dereference_protected(svc->scheduler, 1); 1339 old_sched = rcu_dereference_protected(svc->scheduler, 1);
1331 if (sched != old_sched) { 1340 if (sched != old_sched) {
1341 if (old_sched) {
1342 ip_vs_unbind_scheduler(svc, old_sched);
1343 RCU_INIT_POINTER(svc->scheduler, NULL);
1344 /* Wait all svc->sched_data users */
1345 synchronize_rcu();
1346 }
1332 /* Bind the new scheduler */ 1347 /* Bind the new scheduler */
1333 ret = ip_vs_bind_scheduler(svc, sched); 1348 if (sched) {
1334 if (ret) { 1349 ret = ip_vs_bind_scheduler(svc, sched);
1335 old_sched = sched; 1350 if (ret) {
1336 goto out; 1351 ip_vs_scheduler_put(sched);
1352 goto out;
1353 }
1337 } 1354 }
1338 /* Unbind the old scheduler on success */
1339 ip_vs_unbind_scheduler(svc, old_sched);
1340 } 1355 }
1341 1356
1342 /* 1357 /*
@@ -1982,6 +1997,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
1982 const struct ip_vs_iter *iter = seq->private; 1997 const struct ip_vs_iter *iter = seq->private;
1983 const struct ip_vs_dest *dest; 1998 const struct ip_vs_dest *dest;
1984 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler); 1999 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
2000 char *sched_name = sched ? sched->name : "none";
1985 2001
1986 if (iter->table == ip_vs_svc_table) { 2002 if (iter->table == ip_vs_svc_table) {
1987#ifdef CONFIG_IP_VS_IPV6 2003#ifdef CONFIG_IP_VS_IPV6
@@ -1990,18 +2006,18 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
1990 ip_vs_proto_name(svc->protocol), 2006 ip_vs_proto_name(svc->protocol),
1991 &svc->addr.in6, 2007 &svc->addr.in6,
1992 ntohs(svc->port), 2008 ntohs(svc->port),
1993 sched->name); 2009 sched_name);
1994 else 2010 else
1995#endif 2011#endif
1996 seq_printf(seq, "%s %08X:%04X %s %s ", 2012 seq_printf(seq, "%s %08X:%04X %s %s ",
1997 ip_vs_proto_name(svc->protocol), 2013 ip_vs_proto_name(svc->protocol),
1998 ntohl(svc->addr.ip), 2014 ntohl(svc->addr.ip),
1999 ntohs(svc->port), 2015 ntohs(svc->port),
2000 sched->name, 2016 sched_name,
2001 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":""); 2017 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
2002 } else { 2018 } else {
2003 seq_printf(seq, "FWM %08X %s %s", 2019 seq_printf(seq, "FWM %08X %s %s",
2004 svc->fwmark, sched->name, 2020 svc->fwmark, sched_name,
2005 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":""); 2021 (svc->flags & IP_VS_SVC_F_ONEPACKET)?"ops ":"");
2006 } 2022 }
2007 2023
@@ -2427,13 +2443,15 @@ ip_vs_copy_service(struct ip_vs_service_entry *dst, struct ip_vs_service *src)
2427{ 2443{
2428 struct ip_vs_scheduler *sched; 2444 struct ip_vs_scheduler *sched;
2429 struct ip_vs_kstats kstats; 2445 struct ip_vs_kstats kstats;
2446 char *sched_name;
2430 2447
2431 sched = rcu_dereference_protected(src->scheduler, 1); 2448 sched = rcu_dereference_protected(src->scheduler, 1);
2449 sched_name = sched ? sched->name : "none";
2432 dst->protocol = src->protocol; 2450 dst->protocol = src->protocol;
2433 dst->addr = src->addr.ip; 2451 dst->addr = src->addr.ip;
2434 dst->port = src->port; 2452 dst->port = src->port;
2435 dst->fwmark = src->fwmark; 2453 dst->fwmark = src->fwmark;
2436 strlcpy(dst->sched_name, sched->name, sizeof(dst->sched_name)); 2454 strlcpy(dst->sched_name, sched_name, sizeof(dst->sched_name));
2437 dst->flags = src->flags; 2455 dst->flags = src->flags;
2438 dst->timeout = src->timeout / HZ; 2456 dst->timeout = src->timeout / HZ;
2439 dst->netmask = src->netmask; 2457 dst->netmask = src->netmask;
@@ -2892,6 +2910,7 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
2892 struct ip_vs_flags flags = { .flags = svc->flags, 2910 struct ip_vs_flags flags = { .flags = svc->flags,
2893 .mask = ~0 }; 2911 .mask = ~0 };
2894 struct ip_vs_kstats kstats; 2912 struct ip_vs_kstats kstats;
2913 char *sched_name;
2895 2914
2896 nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE); 2915 nl_service = nla_nest_start(skb, IPVS_CMD_ATTR_SERVICE);
2897 if (!nl_service) 2916 if (!nl_service)
@@ -2910,8 +2929,9 @@ static int ip_vs_genl_fill_service(struct sk_buff *skb,
2910 } 2929 }
2911 2930
2912 sched = rcu_dereference_protected(svc->scheduler, 1); 2931 sched = rcu_dereference_protected(svc->scheduler, 1);
2932 sched_name = sched ? sched->name : "none";
2913 pe = rcu_dereference_protected(svc->pe, 1); 2933 pe = rcu_dereference_protected(svc->pe, 1);
2914 if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched->name) || 2934 if (nla_put_string(skb, IPVS_SVC_ATTR_SCHED_NAME, sched_name) ||
2915 (pe && nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, pe->name)) || 2935 (pe && nla_put_string(skb, IPVS_SVC_ATTR_PE_NAME, pe->name)) ||
2916 nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) || 2936 nla_put(skb, IPVS_SVC_ATTR_FLAGS, sizeof(flags), &flags) ||
2917 nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) || 2937 nla_put_u32(skb, IPVS_SVC_ATTR_TIMEOUT, svc->timeout / HZ) ||
diff --git a/net/netfilter/ipvs/ip_vs_sched.c b/net/netfilter/ipvs/ip_vs_sched.c
index 199760c71f39..7e8141647943 100644
--- a/net/netfilter/ipvs/ip_vs_sched.c
+++ b/net/netfilter/ipvs/ip_vs_sched.c
@@ -74,7 +74,7 @@ void ip_vs_unbind_scheduler(struct ip_vs_service *svc,
74 74
75 if (sched->done_service) 75 if (sched->done_service)
76 sched->done_service(svc); 76 sched->done_service(svc);
77 /* svc->scheduler can not be set to NULL */ 77 /* svc->scheduler can be set to NULL only by caller */
78} 78}
79 79
80 80
@@ -147,21 +147,21 @@ void ip_vs_scheduler_put(struct ip_vs_scheduler *scheduler)
147 147
148void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg) 148void ip_vs_scheduler_err(struct ip_vs_service *svc, const char *msg)
149{ 149{
150 struct ip_vs_scheduler *sched; 150 struct ip_vs_scheduler *sched = rcu_dereference(svc->scheduler);
151 char *sched_name = sched ? sched->name : "none";
151 152
152 sched = rcu_dereference(svc->scheduler);
153 if (svc->fwmark) { 153 if (svc->fwmark) {
154 IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n", 154 IP_VS_ERR_RL("%s: FWM %u 0x%08X - %s\n",
155 sched->name, svc->fwmark, svc->fwmark, msg); 155 sched_name, svc->fwmark, svc->fwmark, msg);
156#ifdef CONFIG_IP_VS_IPV6 156#ifdef CONFIG_IP_VS_IPV6
157 } else if (svc->af == AF_INET6) { 157 } else if (svc->af == AF_INET6) {
158 IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n", 158 IP_VS_ERR_RL("%s: %s [%pI6c]:%d - %s\n",
159 sched->name, ip_vs_proto_name(svc->protocol), 159 sched_name, ip_vs_proto_name(svc->protocol),
160 &svc->addr.in6, ntohs(svc->port), msg); 160 &svc->addr.in6, ntohs(svc->port), msg);
161#endif 161#endif
162 } else { 162 } else {
163 IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n", 163 IP_VS_ERR_RL("%s: %s %pI4:%d - %s\n",
164 sched->name, ip_vs_proto_name(svc->protocol), 164 sched_name, ip_vs_proto_name(svc->protocol),
165 &svc->addr.ip, ntohs(svc->port), msg); 165 &svc->addr.ip, ntohs(svc->port), msg);
166 } 166 }
167} 167}
diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
index b08ba9538d12..d99ad93eb855 100644
--- a/net/netfilter/ipvs/ip_vs_sync.c
+++ b/net/netfilter/ipvs/ip_vs_sync.c
@@ -612,7 +612,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
612 pkts = atomic_add_return(1, &cp->in_pkts); 612 pkts = atomic_add_return(1, &cp->in_pkts);
613 else 613 else
614 pkts = sysctl_sync_threshold(ipvs); 614 pkts = sysctl_sync_threshold(ipvs);
615 ip_vs_sync_conn(net, cp->control, pkts); 615 ip_vs_sync_conn(net, cp, pkts);
616 } 616 }
617} 617}
618 618
diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
index bf66a8657a5f..258a0b0e82a2 100644
--- a/net/netfilter/ipvs/ip_vs_xmit.c
+++ b/net/netfilter/ipvs/ip_vs_xmit.c
@@ -130,7 +130,6 @@ static struct rtable *do_output_route4(struct net *net, __be32 daddr,
130 130
131 memset(&fl4, 0, sizeof(fl4)); 131 memset(&fl4, 0, sizeof(fl4));
132 fl4.daddr = daddr; 132 fl4.daddr = daddr;
133 fl4.saddr = (rt_mode & IP_VS_RT_MODE_CONNECT) ? *saddr : 0;
134 fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ? 133 fl4.flowi4_flags = (rt_mode & IP_VS_RT_MODE_KNOWN_NH) ?
135 FLOWI_FLAG_KNOWN_NH : 0; 134 FLOWI_FLAG_KNOWN_NH : 0;
136 135
@@ -505,6 +504,13 @@ err_put:
505 return -1; 504 return -1;
506 505
507err_unreach: 506err_unreach:
507 /* The ip6_link_failure function requires the dev field to be set
508 * in order to get the net (further for the sake of fwmark
509 * reflection).
510 */
511 if (!skb->dev)
512 skb->dev = skb_dst(skb)->dev;
513
508 dst_link_failure(skb); 514 dst_link_failure(skb);
509 return -1; 515 return -1;
510} 516}
@@ -523,10 +529,27 @@ static inline int ip_vs_tunnel_xmit_prepare(struct sk_buff *skb,
523 if (ret == NF_ACCEPT) { 529 if (ret == NF_ACCEPT) {
524 nf_reset(skb); 530 nf_reset(skb);
525 skb_forward_csum(skb); 531 skb_forward_csum(skb);
532 if (!skb->sk)
533 skb_sender_cpu_clear(skb);
526 } 534 }
527 return ret; 535 return ret;
528} 536}
529 537
538/* In the event of a remote destination, it's possible that we would have
539 * matches against an old socket (particularly a TIME-WAIT socket). This
540 * causes havoc down the line (ip_local_out et. al. expect regular sockets
541 * and invalid memory accesses will happen) so simply drop the association
542 * in this case.
543*/
544static inline void ip_vs_drop_early_demux_sk(struct sk_buff *skb)
545{
546 /* If dev is set, the packet came from the LOCAL_IN callback and
547 * not from a local TCP socket.
548 */
549 if (skb->dev)
550 skb_orphan(skb);
551}
552
530/* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */ 553/* return NF_STOLEN (sent) or NF_ACCEPT if local=1 (not sent) */
531static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb, 554static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
532 struct ip_vs_conn *cp, int local) 555 struct ip_vs_conn *cp, int local)
@@ -538,12 +561,23 @@ static inline int ip_vs_nat_send_or_cont(int pf, struct sk_buff *skb,
538 ip_vs_notrack(skb); 561 ip_vs_notrack(skb);
539 else 562 else
540 ip_vs_update_conntrack(skb, cp, 1); 563 ip_vs_update_conntrack(skb, cp, 1);
564
565 /* Remove the early_demux association unless it's bound for the
566 * exact same port and address on this host after translation.
567 */
568 if (!local || cp->vport != cp->dport ||
569 !ip_vs_addr_equal(cp->af, &cp->vaddr, &cp->daddr))
570 ip_vs_drop_early_demux_sk(skb);
571
541 if (!local) { 572 if (!local) {
542 skb_forward_csum(skb); 573 skb_forward_csum(skb);
574 if (!skb->sk)
575 skb_sender_cpu_clear(skb);
543 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb, 576 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
544 NULL, skb_dst(skb)->dev, dst_output_sk); 577 NULL, skb_dst(skb)->dev, dst_output_sk);
545 } else 578 } else
546 ret = NF_ACCEPT; 579 ret = NF_ACCEPT;
580
547 return ret; 581 return ret;
548} 582}
549 583
@@ -557,7 +591,10 @@ static inline int ip_vs_send_or_cont(int pf, struct sk_buff *skb,
557 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT))) 591 if (likely(!(cp->flags & IP_VS_CONN_F_NFCT)))
558 ip_vs_notrack(skb); 592 ip_vs_notrack(skb);
559 if (!local) { 593 if (!local) {
594 ip_vs_drop_early_demux_sk(skb);
560 skb_forward_csum(skb); 595 skb_forward_csum(skb);
596 if (!skb->sk)
597 skb_sender_cpu_clear(skb);
561 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb, 598 NF_HOOK(pf, NF_INET_LOCAL_OUT, NULL, skb,
562 NULL, skb_dst(skb)->dev, dst_output_sk); 599 NULL, skb_dst(skb)->dev, dst_output_sk);
563 } else 600 } else
@@ -845,6 +882,8 @@ ip_vs_prepare_tunneled_skb(struct sk_buff *skb, int skb_af,
845 struct ipv6hdr *old_ipv6h = NULL; 882 struct ipv6hdr *old_ipv6h = NULL;
846#endif 883#endif
847 884
885 ip_vs_drop_early_demux_sk(skb);
886
848 if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) { 887 if (skb_headroom(skb) < max_headroom || skb_cloned(skb)) {
849 new_skb = skb_realloc_headroom(skb, max_headroom); 888 new_skb = skb_realloc_headroom(skb, max_headroom);
850 if (!new_skb) 889 if (!new_skb)
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 13fad8668f83..3c20d02aee73 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -287,6 +287,46 @@ static void nf_ct_del_from_dying_or_unconfirmed_list(struct nf_conn *ct)
287 spin_unlock(&pcpu->lock); 287 spin_unlock(&pcpu->lock);
288} 288}
289 289
290/* Released via destroy_conntrack() */
291struct nf_conn *nf_ct_tmpl_alloc(struct net *net, u16 zone, gfp_t flags)
292{
293 struct nf_conn *tmpl;
294
295 tmpl = kzalloc(sizeof(*tmpl), flags);
296 if (tmpl == NULL)
297 return NULL;
298
299 tmpl->status = IPS_TEMPLATE;
300 write_pnet(&tmpl->ct_net, net);
301
302#ifdef CONFIG_NF_CONNTRACK_ZONES
303 if (zone) {
304 struct nf_conntrack_zone *nf_ct_zone;
305
306 nf_ct_zone = nf_ct_ext_add(tmpl, NF_CT_EXT_ZONE, flags);
307 if (!nf_ct_zone)
308 goto out_free;
309 nf_ct_zone->id = zone;
310 }
311#endif
312 atomic_set(&tmpl->ct_general.use, 0);
313
314 return tmpl;
315#ifdef CONFIG_NF_CONNTRACK_ZONES
316out_free:
317 kfree(tmpl);
318 return NULL;
319#endif
320}
321EXPORT_SYMBOL_GPL(nf_ct_tmpl_alloc);
322
323static void nf_ct_tmpl_free(struct nf_conn *tmpl)
324{
325 nf_ct_ext_destroy(tmpl);
326 nf_ct_ext_free(tmpl);
327 kfree(tmpl);
328}
329
290static void 330static void
291destroy_conntrack(struct nf_conntrack *nfct) 331destroy_conntrack(struct nf_conntrack *nfct)
292{ 332{
@@ -298,6 +338,10 @@ destroy_conntrack(struct nf_conntrack *nfct)
298 NF_CT_ASSERT(atomic_read(&nfct->use) == 0); 338 NF_CT_ASSERT(atomic_read(&nfct->use) == 0);
299 NF_CT_ASSERT(!timer_pending(&ct->timeout)); 339 NF_CT_ASSERT(!timer_pending(&ct->timeout));
300 340
341 if (unlikely(nf_ct_is_template(ct))) {
342 nf_ct_tmpl_free(ct);
343 return;
344 }
301 rcu_read_lock(); 345 rcu_read_lock();
302 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct)); 346 l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), nf_ct_protonum(ct));
303 if (l4proto && l4proto->destroy) 347 if (l4proto && l4proto->destroy)
@@ -540,28 +584,6 @@ out:
540} 584}
541EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert); 585EXPORT_SYMBOL_GPL(nf_conntrack_hash_check_insert);
542 586
543/* deletion from this larval template list happens via nf_ct_put() */
544void nf_conntrack_tmpl_insert(struct net *net, struct nf_conn *tmpl)
545{
546 struct ct_pcpu *pcpu;
547
548 __set_bit(IPS_TEMPLATE_BIT, &tmpl->status);
549 __set_bit(IPS_CONFIRMED_BIT, &tmpl->status);
550 nf_conntrack_get(&tmpl->ct_general);
551
552 /* add this conntrack to the (per cpu) tmpl list */
553 local_bh_disable();
554 tmpl->cpu = smp_processor_id();
555 pcpu = per_cpu_ptr(nf_ct_net(tmpl)->ct.pcpu_lists, tmpl->cpu);
556
557 spin_lock(&pcpu->lock);
558 /* Overload tuple linked list to put us in template list. */
559 hlist_nulls_add_head_rcu(&tmpl->tuplehash[IP_CT_DIR_ORIGINAL].hnnode,
560 &pcpu->tmpl);
561 spin_unlock_bh(&pcpu->lock);
562}
563EXPORT_SYMBOL_GPL(nf_conntrack_tmpl_insert);
564
565/* Confirm a connection given skb; places it in hash table */ 587/* Confirm a connection given skb; places it in hash table */
566int 588int
567__nf_conntrack_confirm(struct sk_buff *skb) 589__nf_conntrack_confirm(struct sk_buff *skb)
@@ -1522,10 +1544,8 @@ void *nf_ct_alloc_hashtable(unsigned int *sizep, int nulls)
1522 sz = nr_slots * sizeof(struct hlist_nulls_head); 1544 sz = nr_slots * sizeof(struct hlist_nulls_head);
1523 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO, 1545 hash = (void *)__get_free_pages(GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO,
1524 get_order(sz)); 1546 get_order(sz));
1525 if (!hash) { 1547 if (!hash)
1526 printk(KERN_WARNING "nf_conntrack: falling back to vmalloc.\n");
1527 hash = vzalloc(sz); 1548 hash = vzalloc(sz);
1528 }
1529 1549
1530 if (hash && nulls) 1550 if (hash && nulls)
1531 for (i = 0; i < nr_slots; i++) 1551 for (i = 0; i < nr_slots; i++)
@@ -1751,7 +1771,6 @@ int nf_conntrack_init_net(struct net *net)
1751 spin_lock_init(&pcpu->lock); 1771 spin_lock_init(&pcpu->lock);
1752 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL); 1772 INIT_HLIST_NULLS_HEAD(&pcpu->unconfirmed, UNCONFIRMED_NULLS_VAL);
1753 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL); 1773 INIT_HLIST_NULLS_HEAD(&pcpu->dying, DYING_NULLS_VAL);
1754 INIT_HLIST_NULLS_HEAD(&pcpu->tmpl, TEMPLATE_NULLS_VAL);
1755 } 1774 }
1756 1775
1757 net->ct.stat = alloc_percpu(struct ip_conntrack_stat); 1776 net->ct.stat = alloc_percpu(struct ip_conntrack_stat);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 7a17070c5dab..b45a4223cb05 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -219,7 +219,8 @@ static inline int expect_clash(const struct nf_conntrack_expect *a,
219 a->mask.src.u3.all[count] & b->mask.src.u3.all[count]; 219 a->mask.src.u3.all[count] & b->mask.src.u3.all[count];
220 } 220 }
221 221
222 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask); 222 return nf_ct_tuple_mask_cmp(&a->tuple, &b->tuple, &intersect_mask) &&
223 nf_ct_zone(a->master) == nf_ct_zone(b->master);
223} 224}
224 225
225static inline int expect_matches(const struct nf_conntrack_expect *a, 226static inline int expect_matches(const struct nf_conntrack_expect *a,
diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c
index d1c23940a86a..6b8b0abbfab4 100644
--- a/net/netfilter/nf_conntrack_netlink.c
+++ b/net/netfilter/nf_conntrack_netlink.c
@@ -2995,11 +2995,6 @@ ctnetlink_create_expect(struct net *net, u16 zone,
2995 } 2995 }
2996 2996
2997 err = nf_ct_expect_related_report(exp, portid, report); 2997 err = nf_ct_expect_related_report(exp, portid, report);
2998 if (err < 0)
2999 goto err_exp;
3000
3001 return 0;
3002err_exp:
3003 nf_ct_expect_put(exp); 2998 nf_ct_expect_put(exp);
3004err_ct: 2999err_ct:
3005 nf_ct_put(ct); 3000 nf_ct_put(ct);
diff --git a/net/netfilter/nf_synproxy_core.c b/net/netfilter/nf_synproxy_core.c
index 789feeae6c44..d7f168527903 100644
--- a/net/netfilter/nf_synproxy_core.c
+++ b/net/netfilter/nf_synproxy_core.c
@@ -349,23 +349,20 @@ static void __net_exit synproxy_proc_exit(struct net *net)
349static int __net_init synproxy_net_init(struct net *net) 349static int __net_init synproxy_net_init(struct net *net)
350{ 350{
351 struct synproxy_net *snet = synproxy_pernet(net); 351 struct synproxy_net *snet = synproxy_pernet(net);
352 struct nf_conntrack_tuple t;
353 struct nf_conn *ct; 352 struct nf_conn *ct;
354 int err = -ENOMEM; 353 int err = -ENOMEM;
355 354
356 memset(&t, 0, sizeof(t)); 355 ct = nf_ct_tmpl_alloc(net, 0, GFP_KERNEL);
357 ct = nf_conntrack_alloc(net, 0, &t, &t, GFP_KERNEL); 356 if (!ct)
358 if (IS_ERR(ct)) {
359 err = PTR_ERR(ct);
360 goto err1; 357 goto err1;
361 }
362 358
363 if (!nfct_seqadj_ext_add(ct)) 359 if (!nfct_seqadj_ext_add(ct))
364 goto err2; 360 goto err2;
365 if (!nfct_synproxy_ext_add(ct)) 361 if (!nfct_synproxy_ext_add(ct))
366 goto err2; 362 goto err2;
367 363
368 nf_conntrack_tmpl_insert(net, ct); 364 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
365 nf_conntrack_get(&ct->ct_general);
369 snet->tmpl = ct; 366 snet->tmpl = ct;
370 367
371 snet->stats = alloc_percpu(struct synproxy_stats); 368 snet->stats = alloc_percpu(struct synproxy_stats);
diff --git a/net/netfilter/xt_CT.c b/net/netfilter/xt_CT.c
index 75747aecdebe..43ddeee404e9 100644
--- a/net/netfilter/xt_CT.c
+++ b/net/netfilter/xt_CT.c
@@ -184,7 +184,6 @@ out:
184static int xt_ct_tg_check(const struct xt_tgchk_param *par, 184static int xt_ct_tg_check(const struct xt_tgchk_param *par,
185 struct xt_ct_target_info_v1 *info) 185 struct xt_ct_target_info_v1 *info)
186{ 186{
187 struct nf_conntrack_tuple t;
188 struct nf_conn *ct; 187 struct nf_conn *ct;
189 int ret = -EOPNOTSUPP; 188 int ret = -EOPNOTSUPP;
190 189
@@ -202,11 +201,11 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
202 if (ret < 0) 201 if (ret < 0)
203 goto err1; 202 goto err1;
204 203
205 memset(&t, 0, sizeof(t)); 204 ct = nf_ct_tmpl_alloc(par->net, info->zone, GFP_KERNEL);
206 ct = nf_conntrack_alloc(par->net, info->zone, &t, &t, GFP_KERNEL); 205 if (!ct) {
207 ret = PTR_ERR(ct); 206 ret = -ENOMEM;
208 if (IS_ERR(ct))
209 goto err2; 207 goto err2;
208 }
210 209
211 ret = 0; 210 ret = 0;
212 if ((info->ct_events || info->exp_events) && 211 if ((info->ct_events || info->exp_events) &&
@@ -227,8 +226,8 @@ static int xt_ct_tg_check(const struct xt_tgchk_param *par,
227 if (ret < 0) 226 if (ret < 0)
228 goto err3; 227 goto err3;
229 } 228 }
230 229 __set_bit(IPS_CONFIRMED_BIT, &ct->status);
231 nf_conntrack_tmpl_insert(par->net, ct); 230 nf_conntrack_get(&ct->ct_general);
232out: 231out:
233 info->ct = ct; 232 info->ct = ct;
234 return 0; 233 return 0;
diff --git a/net/netfilter/xt_IDLETIMER.c b/net/netfilter/xt_IDLETIMER.c
index f407ebc13481..29d2c31f406c 100644
--- a/net/netfilter/xt_IDLETIMER.c
+++ b/net/netfilter/xt_IDLETIMER.c
@@ -126,6 +126,7 @@ static int idletimer_tg_create(struct idletimer_tg_info *info)
126 goto out; 126 goto out;
127 } 127 }
128 128
129 sysfs_attr_init(&info->timer->attr.attr);
129 info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL); 130 info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL);
130 if (!info->timer->attr.attr.name) { 131 if (!info->timer->attr.attr.name) {
131 ret = -ENOMEM; 132 ret = -ENOMEM;
diff --git a/net/netlink/af_netlink.c b/net/netlink/af_netlink.c
index 9a0ae7172f92..67d210477863 100644
--- a/net/netlink/af_netlink.c
+++ b/net/netlink/af_netlink.c
@@ -357,25 +357,52 @@ err1:
357 return NULL; 357 return NULL;
358} 358}
359 359
360
361static void
362__netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, bool tx_ring, void **pg_vec,
363 unsigned int order)
364{
365 struct netlink_sock *nlk = nlk_sk(sk);
366 struct sk_buff_head *queue;
367 struct netlink_ring *ring;
368
369 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
370 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
371
372 spin_lock_bh(&queue->lock);
373
374 ring->frame_max = req->nm_frame_nr - 1;
375 ring->head = 0;
376 ring->frame_size = req->nm_frame_size;
377 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
378
379 swap(ring->pg_vec_len, req->nm_block_nr);
380 swap(ring->pg_vec_order, order);
381 swap(ring->pg_vec, pg_vec);
382
383 __skb_queue_purge(queue);
384 spin_unlock_bh(&queue->lock);
385
386 WARN_ON(atomic_read(&nlk->mapped));
387
388 if (pg_vec)
389 free_pg_vec(pg_vec, order, req->nm_block_nr);
390}
391
360static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req, 392static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
361 bool closing, bool tx_ring) 393 bool tx_ring)
362{ 394{
363 struct netlink_sock *nlk = nlk_sk(sk); 395 struct netlink_sock *nlk = nlk_sk(sk);
364 struct netlink_ring *ring; 396 struct netlink_ring *ring;
365 struct sk_buff_head *queue;
366 void **pg_vec = NULL; 397 void **pg_vec = NULL;
367 unsigned int order = 0; 398 unsigned int order = 0;
368 int err;
369 399
370 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring; 400 ring = tx_ring ? &nlk->tx_ring : &nlk->rx_ring;
371 queue = tx_ring ? &sk->sk_write_queue : &sk->sk_receive_queue;
372 401
373 if (!closing) { 402 if (atomic_read(&nlk->mapped))
374 if (atomic_read(&nlk->mapped)) 403 return -EBUSY;
375 return -EBUSY; 404 if (atomic_read(&ring->pending))
376 if (atomic_read(&ring->pending)) 405 return -EBUSY;
377 return -EBUSY;
378 }
379 406
380 if (req->nm_block_nr) { 407 if (req->nm_block_nr) {
381 if (ring->pg_vec != NULL) 408 if (ring->pg_vec != NULL)
@@ -407,31 +434,19 @@ static int netlink_set_ring(struct sock *sk, struct nl_mmap_req *req,
407 return -EINVAL; 434 return -EINVAL;
408 } 435 }
409 436
410 err = -EBUSY;
411 mutex_lock(&nlk->pg_vec_lock); 437 mutex_lock(&nlk->pg_vec_lock);
412 if (closing || atomic_read(&nlk->mapped) == 0) { 438 if (atomic_read(&nlk->mapped) == 0) {
413 err = 0; 439 __netlink_set_ring(sk, req, tx_ring, pg_vec, order);
414 spin_lock_bh(&queue->lock); 440 mutex_unlock(&nlk->pg_vec_lock);
415 441 return 0;
416 ring->frame_max = req->nm_frame_nr - 1;
417 ring->head = 0;
418 ring->frame_size = req->nm_frame_size;
419 ring->pg_vec_pages = req->nm_block_size / PAGE_SIZE;
420
421 swap(ring->pg_vec_len, req->nm_block_nr);
422 swap(ring->pg_vec_order, order);
423 swap(ring->pg_vec, pg_vec);
424
425 __skb_queue_purge(queue);
426 spin_unlock_bh(&queue->lock);
427
428 WARN_ON(atomic_read(&nlk->mapped));
429 } 442 }
443
430 mutex_unlock(&nlk->pg_vec_lock); 444 mutex_unlock(&nlk->pg_vec_lock);
431 445
432 if (pg_vec) 446 if (pg_vec)
433 free_pg_vec(pg_vec, order, req->nm_block_nr); 447 free_pg_vec(pg_vec, order, req->nm_block_nr);
434 return err; 448
449 return -EBUSY;
435} 450}
436 451
437static void netlink_mm_open(struct vm_area_struct *vma) 452static void netlink_mm_open(struct vm_area_struct *vma)
@@ -900,10 +915,10 @@ static void netlink_sock_destruct(struct sock *sk)
900 915
901 memset(&req, 0, sizeof(req)); 916 memset(&req, 0, sizeof(req));
902 if (nlk->rx_ring.pg_vec) 917 if (nlk->rx_ring.pg_vec)
903 netlink_set_ring(sk, &req, true, false); 918 __netlink_set_ring(sk, &req, false, NULL, 0);
904 memset(&req, 0, sizeof(req)); 919 memset(&req, 0, sizeof(req));
905 if (nlk->tx_ring.pg_vec) 920 if (nlk->tx_ring.pg_vec)
906 netlink_set_ring(sk, &req, true, true); 921 __netlink_set_ring(sk, &req, true, NULL, 0);
907 } 922 }
908#endif /* CONFIG_NETLINK_MMAP */ 923#endif /* CONFIG_NETLINK_MMAP */
909 924
@@ -1081,6 +1096,11 @@ static int netlink_insert(struct sock *sk, u32 portid)
1081 1096
1082 err = __netlink_insert(table, sk); 1097 err = __netlink_insert(table, sk);
1083 if (err) { 1098 if (err) {
1099 /* In case the hashtable backend returns with -EBUSY
1100 * from here, it must not escape to the caller.
1101 */
1102 if (unlikely(err == -EBUSY))
1103 err = -EOVERFLOW;
1084 if (err == -EEXIST) 1104 if (err == -EEXIST)
1085 err = -EADDRINUSE; 1105 err = -EADDRINUSE;
1086 nlk_sk(sk)->portid = 0; 1106 nlk_sk(sk)->portid = 0;
@@ -2223,7 +2243,7 @@ static int netlink_setsockopt(struct socket *sock, int level, int optname,
2223 return -EINVAL; 2243 return -EINVAL;
2224 if (copy_from_user(&req, optval, sizeof(req))) 2244 if (copy_from_user(&req, optval, sizeof(req)))
2225 return -EFAULT; 2245 return -EFAULT;
2226 err = netlink_set_ring(sk, &req, false, 2246 err = netlink_set_ring(sk, &req,
2227 optname == NETLINK_TX_RING); 2247 optname == NETLINK_TX_RING);
2228 break; 2248 break;
2229 } 2249 }
diff --git a/net/openvswitch/actions.c b/net/openvswitch/actions.c
index 8a8c0b8b4f63..ee34f474ad14 100644
--- a/net/openvswitch/actions.c
+++ b/net/openvswitch/actions.c
@@ -273,28 +273,36 @@ static int set_eth_addr(struct sk_buff *skb, struct sw_flow_key *flow_key,
273 return 0; 273 return 0;
274} 274}
275 275
276static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh, 276static void update_ip_l4_checksum(struct sk_buff *skb, struct iphdr *nh,
277 __be32 *addr, __be32 new_addr) 277 __be32 addr, __be32 new_addr)
278{ 278{
279 int transport_len = skb->len - skb_transport_offset(skb); 279 int transport_len = skb->len - skb_transport_offset(skb);
280 280
281 if (nh->frag_off & htons(IP_OFFSET))
282 return;
283
281 if (nh->protocol == IPPROTO_TCP) { 284 if (nh->protocol == IPPROTO_TCP) {
282 if (likely(transport_len >= sizeof(struct tcphdr))) 285 if (likely(transport_len >= sizeof(struct tcphdr)))
283 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb, 286 inet_proto_csum_replace4(&tcp_hdr(skb)->check, skb,
284 *addr, new_addr, 1); 287 addr, new_addr, 1);
285 } else if (nh->protocol == IPPROTO_UDP) { 288 } else if (nh->protocol == IPPROTO_UDP) {
286 if (likely(transport_len >= sizeof(struct udphdr))) { 289 if (likely(transport_len >= sizeof(struct udphdr))) {
287 struct udphdr *uh = udp_hdr(skb); 290 struct udphdr *uh = udp_hdr(skb);
288 291
289 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) { 292 if (uh->check || skb->ip_summed == CHECKSUM_PARTIAL) {
290 inet_proto_csum_replace4(&uh->check, skb, 293 inet_proto_csum_replace4(&uh->check, skb,
291 *addr, new_addr, 1); 294 addr, new_addr, 1);
292 if (!uh->check) 295 if (!uh->check)
293 uh->check = CSUM_MANGLED_0; 296 uh->check = CSUM_MANGLED_0;
294 } 297 }
295 } 298 }
296 } 299 }
300}
297 301
302static void set_ip_addr(struct sk_buff *skb, struct iphdr *nh,
303 __be32 *addr, __be32 new_addr)
304{
305 update_ip_l4_checksum(skb, nh, *addr, new_addr);
298 csum_replace4(&nh->check, *addr, new_addr); 306 csum_replace4(&nh->check, *addr, new_addr);
299 skb_clear_hash(skb); 307 skb_clear_hash(skb);
300 *addr = new_addr; 308 *addr = new_addr;
diff --git a/net/openvswitch/flow_table.c b/net/openvswitch/flow_table.c
index 4613df8c8290..65523948fb95 100644
--- a/net/openvswitch/flow_table.c
+++ b/net/openvswitch/flow_table.c
@@ -752,7 +752,7 @@ int ovs_flow_init(void)
752 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long)); 752 BUILD_BUG_ON(sizeof(struct sw_flow_key) % sizeof(long));
753 753
754 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow) 754 flow_cache = kmem_cache_create("sw_flow", sizeof(struct sw_flow)
755 + (num_possible_nodes() 755 + (nr_node_ids
756 * sizeof(struct flow_stats *)), 756 * sizeof(struct flow_stats *)),
757 0, 0, NULL); 757 0, 0, NULL);
758 if (flow_cache == NULL) 758 if (flow_cache == NULL)
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index c9e8741226c6..ed458b315ef4 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2403,7 +2403,8 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
2403 } 2403 }
2404 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto, 2404 tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
2405 addr, hlen); 2405 addr, hlen);
2406 if (tp_len > dev->mtu + dev->hard_header_len) { 2406 if (likely(tp_len >= 0) &&
2407 tp_len > dev->mtu + dev->hard_header_len) {
2407 struct ethhdr *ehdr; 2408 struct ethhdr *ehdr;
2408 /* Earlier code assumed this would be a VLAN pkt, 2409 /* Earlier code assumed this would be a VLAN pkt,
2409 * double-check this now that we have the actual 2410 * double-check this now that we have the actual
@@ -2784,7 +2785,7 @@ static int packet_release(struct socket *sock)
2784static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto) 2785static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
2785{ 2786{
2786 struct packet_sock *po = pkt_sk(sk); 2787 struct packet_sock *po = pkt_sk(sk);
2787 const struct net_device *dev_curr; 2788 struct net_device *dev_curr;
2788 __be16 proto_curr; 2789 __be16 proto_curr;
2789 bool need_rehook; 2790 bool need_rehook;
2790 2791
@@ -2808,15 +2809,13 @@ static int packet_do_bind(struct sock *sk, struct net_device *dev, __be16 proto)
2808 2809
2809 po->num = proto; 2810 po->num = proto;
2810 po->prot_hook.type = proto; 2811 po->prot_hook.type = proto;
2811
2812 if (po->prot_hook.dev)
2813 dev_put(po->prot_hook.dev);
2814
2815 po->prot_hook.dev = dev; 2812 po->prot_hook.dev = dev;
2816 2813
2817 po->ifindex = dev ? dev->ifindex : 0; 2814 po->ifindex = dev ? dev->ifindex : 0;
2818 packet_cached_dev_assign(po, dev); 2815 packet_cached_dev_assign(po, dev);
2819 } 2816 }
2817 if (dev_curr)
2818 dev_put(dev_curr);
2820 2819
2821 if (proto == 0 || !need_rehook) 2820 if (proto == 0 || !need_rehook)
2822 goto out_unlock; 2821 goto out_unlock;
diff --git a/net/rds/info.c b/net/rds/info.c
index 9a6b4f66187c..140a44a5f7b7 100644
--- a/net/rds/info.c
+++ b/net/rds/info.c
@@ -176,7 +176,7 @@ int rds_info_getsockopt(struct socket *sock, int optname, char __user *optval,
176 176
177 /* check for all kinds of wrapping and the like */ 177 /* check for all kinds of wrapping and the like */
178 start = (unsigned long)optval; 178 start = (unsigned long)optval;
179 if (len < 0 || len + PAGE_SIZE - 1 < len || start + len < start) { 179 if (len < 0 || len > INT_MAX - PAGE_SIZE + 1 || start + len < start) {
180 ret = -EINVAL; 180 ret = -EINVAL;
181 goto out; 181 goto out;
182 } 182 }
diff --git a/net/sched/act_api.c b/net/sched/act_api.c
index af427a3dbcba..43ec92680ae8 100644
--- a/net/sched/act_api.c
+++ b/net/sched/act_api.c
@@ -45,7 +45,7 @@ void tcf_hash_destroy(struct tc_action *a)
45} 45}
46EXPORT_SYMBOL(tcf_hash_destroy); 46EXPORT_SYMBOL(tcf_hash_destroy);
47 47
48int tcf_hash_release(struct tc_action *a, int bind) 48int __tcf_hash_release(struct tc_action *a, bool bind, bool strict)
49{ 49{
50 struct tcf_common *p = a->priv; 50 struct tcf_common *p = a->priv;
51 int ret = 0; 51 int ret = 0;
@@ -53,7 +53,7 @@ int tcf_hash_release(struct tc_action *a, int bind)
53 if (p) { 53 if (p) {
54 if (bind) 54 if (bind)
55 p->tcfc_bindcnt--; 55 p->tcfc_bindcnt--;
56 else if (p->tcfc_bindcnt > 0) 56 else if (strict && p->tcfc_bindcnt > 0)
57 return -EPERM; 57 return -EPERM;
58 58
59 p->tcfc_refcnt--; 59 p->tcfc_refcnt--;
@@ -64,9 +64,10 @@ int tcf_hash_release(struct tc_action *a, int bind)
64 ret = 1; 64 ret = 1;
65 } 65 }
66 } 66 }
67
67 return ret; 68 return ret;
68} 69}
69EXPORT_SYMBOL(tcf_hash_release); 70EXPORT_SYMBOL(__tcf_hash_release);
70 71
71static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb, 72static int tcf_dump_walker(struct sk_buff *skb, struct netlink_callback *cb,
72 struct tc_action *a) 73 struct tc_action *a)
@@ -136,7 +137,7 @@ static int tcf_del_walker(struct sk_buff *skb, struct tc_action *a)
136 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)]; 137 head = &hinfo->htab[tcf_hash(i, hinfo->hmask)];
137 hlist_for_each_entry_safe(p, n, head, tcfc_head) { 138 hlist_for_each_entry_safe(p, n, head, tcfc_head) {
138 a->priv = p; 139 a->priv = p;
139 ret = tcf_hash_release(a, 0); 140 ret = __tcf_hash_release(a, false, true);
140 if (ret == ACT_P_DELETED) { 141 if (ret == ACT_P_DELETED) {
141 module_put(a->ops->owner); 142 module_put(a->ops->owner);
142 n_i++; 143 n_i++;
@@ -408,7 +409,7 @@ int tcf_action_destroy(struct list_head *actions, int bind)
408 int ret = 0; 409 int ret = 0;
409 410
410 list_for_each_entry_safe(a, tmp, actions, list) { 411 list_for_each_entry_safe(a, tmp, actions, list) {
411 ret = tcf_hash_release(a, bind); 412 ret = __tcf_hash_release(a, bind, true);
412 if (ret == ACT_P_DELETED) 413 if (ret == ACT_P_DELETED)
413 module_put(a->ops->owner); 414 module_put(a->ops->owner);
414 else if (ret < 0) 415 else if (ret < 0)
diff --git a/net/sched/act_bpf.c b/net/sched/act_bpf.c
index 1d56903fd4c7..d0edeb7a1950 100644
--- a/net/sched/act_bpf.c
+++ b/net/sched/act_bpf.c
@@ -27,9 +27,10 @@
27struct tcf_bpf_cfg { 27struct tcf_bpf_cfg {
28 struct bpf_prog *filter; 28 struct bpf_prog *filter;
29 struct sock_filter *bpf_ops; 29 struct sock_filter *bpf_ops;
30 char *bpf_name; 30 const char *bpf_name;
31 u32 bpf_fd; 31 u32 bpf_fd;
32 u16 bpf_num_ops; 32 u16 bpf_num_ops;
33 bool is_ebpf;
33}; 34};
34 35
35static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act, 36static int tcf_bpf(struct sk_buff *skb, const struct tc_action *act,
@@ -207,6 +208,7 @@ static int tcf_bpf_init_from_ops(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
207 cfg->bpf_ops = bpf_ops; 208 cfg->bpf_ops = bpf_ops;
208 cfg->bpf_num_ops = bpf_num_ops; 209 cfg->bpf_num_ops = bpf_num_ops;
209 cfg->filter = fp; 210 cfg->filter = fp;
211 cfg->is_ebpf = false;
210 212
211 return 0; 213 return 0;
212} 214}
@@ -241,18 +243,40 @@ static int tcf_bpf_init_from_efd(struct nlattr **tb, struct tcf_bpf_cfg *cfg)
241 cfg->bpf_fd = bpf_fd; 243 cfg->bpf_fd = bpf_fd;
242 cfg->bpf_name = name; 244 cfg->bpf_name = name;
243 cfg->filter = fp; 245 cfg->filter = fp;
246 cfg->is_ebpf = true;
244 247
245 return 0; 248 return 0;
246} 249}
247 250
251static void tcf_bpf_cfg_cleanup(const struct tcf_bpf_cfg *cfg)
252{
253 if (cfg->is_ebpf)
254 bpf_prog_put(cfg->filter);
255 else
256 bpf_prog_destroy(cfg->filter);
257
258 kfree(cfg->bpf_ops);
259 kfree(cfg->bpf_name);
260}
261
262static void tcf_bpf_prog_fill_cfg(const struct tcf_bpf *prog,
263 struct tcf_bpf_cfg *cfg)
264{
265 cfg->is_ebpf = tcf_bpf_is_ebpf(prog);
266 cfg->filter = prog->filter;
267
268 cfg->bpf_ops = prog->bpf_ops;
269 cfg->bpf_name = prog->bpf_name;
270}
271
248static int tcf_bpf_init(struct net *net, struct nlattr *nla, 272static int tcf_bpf_init(struct net *net, struct nlattr *nla,
249 struct nlattr *est, struct tc_action *act, 273 struct nlattr *est, struct tc_action *act,
250 int replace, int bind) 274 int replace, int bind)
251{ 275{
252 struct nlattr *tb[TCA_ACT_BPF_MAX + 1]; 276 struct nlattr *tb[TCA_ACT_BPF_MAX + 1];
277 struct tcf_bpf_cfg cfg, old;
253 struct tc_act_bpf *parm; 278 struct tc_act_bpf *parm;
254 struct tcf_bpf *prog; 279 struct tcf_bpf *prog;
255 struct tcf_bpf_cfg cfg;
256 bool is_bpf, is_ebpf; 280 bool is_bpf, is_ebpf;
257 int ret; 281 int ret;
258 282
@@ -301,6 +325,9 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
301 prog = to_bpf(act); 325 prog = to_bpf(act);
302 spin_lock_bh(&prog->tcf_lock); 326 spin_lock_bh(&prog->tcf_lock);
303 327
328 if (ret != ACT_P_CREATED)
329 tcf_bpf_prog_fill_cfg(prog, &old);
330
304 prog->bpf_ops = cfg.bpf_ops; 331 prog->bpf_ops = cfg.bpf_ops;
305 prog->bpf_name = cfg.bpf_name; 332 prog->bpf_name = cfg.bpf_name;
306 333
@@ -316,29 +343,22 @@ static int tcf_bpf_init(struct net *net, struct nlattr *nla,
316 343
317 if (ret == ACT_P_CREATED) 344 if (ret == ACT_P_CREATED)
318 tcf_hash_insert(act); 345 tcf_hash_insert(act);
346 else
347 tcf_bpf_cfg_cleanup(&old);
319 348
320 return ret; 349 return ret;
321 350
322destroy_fp: 351destroy_fp:
323 if (is_ebpf) 352 tcf_bpf_cfg_cleanup(&cfg);
324 bpf_prog_put(cfg.filter);
325 else
326 bpf_prog_destroy(cfg.filter);
327
328 kfree(cfg.bpf_ops);
329 kfree(cfg.bpf_name);
330
331 return ret; 353 return ret;
332} 354}
333 355
334static void tcf_bpf_cleanup(struct tc_action *act, int bind) 356static void tcf_bpf_cleanup(struct tc_action *act, int bind)
335{ 357{
336 const struct tcf_bpf *prog = act->priv; 358 struct tcf_bpf_cfg tmp;
337 359
338 if (tcf_bpf_is_ebpf(prog)) 360 tcf_bpf_prog_fill_cfg(act->priv, &tmp);
339 bpf_prog_put(prog->filter); 361 tcf_bpf_cfg_cleanup(&tmp);
340 else
341 bpf_prog_destroy(prog->filter);
342} 362}
343 363
344static struct tc_action_ops act_bpf_ops __read_mostly = { 364static struct tc_action_ops act_bpf_ops __read_mostly = {
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index a42a3b257226..268545050ddb 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -98,6 +98,8 @@ static int tcf_mirred_init(struct net *net, struct nlattr *nla,
98 return ret; 98 return ret;
99 ret = ACT_P_CREATED; 99 ret = ACT_P_CREATED;
100 } else { 100 } else {
101 if (bind)
102 return 0;
101 if (!ovr) { 103 if (!ovr) {
102 tcf_hash_release(a, bind); 104 tcf_hash_release(a, bind);
103 return -EEXIST; 105 return -EEXIST;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index 17e6d6669c7f..ff8b466a73f6 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -68,13 +68,12 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
68 } 68 }
69 ret = ACT_P_CREATED; 69 ret = ACT_P_CREATED;
70 } else { 70 } else {
71 p = to_pedit(a);
72 tcf_hash_release(a, bind);
73 if (bind) 71 if (bind)
74 return 0; 72 return 0;
73 tcf_hash_release(a, bind);
75 if (!ovr) 74 if (!ovr)
76 return -EEXIST; 75 return -EEXIST;
77 76 p = to_pedit(a);
78 if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) { 77 if (p->tcfp_nkeys && p->tcfp_nkeys != parm->nkeys) {
79 keys = kmalloc(ksize, GFP_KERNEL); 78 keys = kmalloc(ksize, GFP_KERNEL);
80 if (keys == NULL) 79 if (keys == NULL)
diff --git a/net/sched/cls_bpf.c b/net/sched/cls_bpf.c
index c79ecfd36e0f..e5168f8b9640 100644
--- a/net/sched/cls_bpf.c
+++ b/net/sched/cls_bpf.c
@@ -378,7 +378,7 @@ static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
378 goto errout; 378 goto errout;
379 379
380 if (oldprog) { 380 if (oldprog) {
381 list_replace_rcu(&prog->link, &oldprog->link); 381 list_replace_rcu(&oldprog->link, &prog->link);
382 tcf_unbind_filter(tp, &oldprog->res); 382 tcf_unbind_filter(tp, &oldprog->res);
383 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog); 383 call_rcu(&oldprog->rcu, __cls_bpf_delete_prog);
384 } else { 384 } else {
diff --git a/net/sched/cls_flow.c b/net/sched/cls_flow.c
index 76bc3a20ffdb..bb2a0f529c1f 100644
--- a/net/sched/cls_flow.c
+++ b/net/sched/cls_flow.c
@@ -425,6 +425,8 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
425 if (!fnew) 425 if (!fnew)
426 goto err2; 426 goto err2;
427 427
428 tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
429
428 fold = (struct flow_filter *)*arg; 430 fold = (struct flow_filter *)*arg;
429 if (fold) { 431 if (fold) {
430 err = -EINVAL; 432 err = -EINVAL;
@@ -486,7 +488,6 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
486 fnew->mask = ~0U; 488 fnew->mask = ~0U;
487 fnew->tp = tp; 489 fnew->tp = tp;
488 get_random_bytes(&fnew->hashrnd, 4); 490 get_random_bytes(&fnew->hashrnd, 4);
489 tcf_exts_init(&fnew->exts, TCA_FLOW_ACT, TCA_FLOW_POLICE);
490 } 491 }
491 492
492 fnew->perturb_timer.function = flow_perturbation; 493 fnew->perturb_timer.function = flow_perturbation;
@@ -526,7 +527,7 @@ static int flow_change(struct net *net, struct sk_buff *in_skb,
526 if (*arg == 0) 527 if (*arg == 0)
527 list_add_tail_rcu(&fnew->list, &head->filters); 528 list_add_tail_rcu(&fnew->list, &head->filters);
528 else 529 else
529 list_replace_rcu(&fnew->list, &fold->list); 530 list_replace_rcu(&fold->list, &fnew->list);
530 531
531 *arg = (unsigned long)fnew; 532 *arg = (unsigned long)fnew;
532 533
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 9d37ccd95062..2f3d03f99487 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -499,7 +499,7 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
499 *arg = (unsigned long) fnew; 499 *arg = (unsigned long) fnew;
500 500
501 if (fold) { 501 if (fold) {
502 list_replace_rcu(&fnew->list, &fold->list); 502 list_replace_rcu(&fold->list, &fnew->list);
503 tcf_unbind_filter(tp, &fold->res); 503 tcf_unbind_filter(tp, &fold->res);
504 call_rcu(&fold->rcu, fl_destroy_filter); 504 call_rcu(&fold->rcu, fl_destroy_filter);
505 } else { 505 } else {
diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c
index 93d5742dc7e0..6a783afe4960 100644
--- a/net/sched/sch_choke.c
+++ b/net/sched/sch_choke.c
@@ -385,6 +385,19 @@ static void choke_reset(struct Qdisc *sch)
385{ 385{
386 struct choke_sched_data *q = qdisc_priv(sch); 386 struct choke_sched_data *q = qdisc_priv(sch);
387 387
388 while (q->head != q->tail) {
389 struct sk_buff *skb = q->tab[q->head];
390
391 q->head = (q->head + 1) & q->tab_mask;
392 if (!skb)
393 continue;
394 qdisc_qstats_backlog_dec(sch, skb);
395 --sch->q.qlen;
396 qdisc_drop(skb, sch);
397 }
398
399 memset(q->tab, 0, (q->tab_mask + 1) * sizeof(struct sk_buff *));
400 q->head = q->tail = 0;
388 red_restart(&q->vars); 401 red_restart(&q->vars);
389} 402}
390 403
diff --git a/net/sched/sch_fq_codel.c b/net/sched/sch_fq_codel.c
index d75993f89fac..a9ba030435a2 100644
--- a/net/sched/sch_fq_codel.c
+++ b/net/sched/sch_fq_codel.c
@@ -155,14 +155,23 @@ static unsigned int fq_codel_drop(struct Qdisc *sch)
155 skb = dequeue_head(flow); 155 skb = dequeue_head(flow);
156 len = qdisc_pkt_len(skb); 156 len = qdisc_pkt_len(skb);
157 q->backlogs[idx] -= len; 157 q->backlogs[idx] -= len;
158 kfree_skb(skb);
159 sch->q.qlen--; 158 sch->q.qlen--;
160 qdisc_qstats_drop(sch); 159 qdisc_qstats_drop(sch);
161 qdisc_qstats_backlog_dec(sch, skb); 160 qdisc_qstats_backlog_dec(sch, skb);
161 kfree_skb(skb);
162 flow->dropped++; 162 flow->dropped++;
163 return idx; 163 return idx;
164} 164}
165 165
166static unsigned int fq_codel_qdisc_drop(struct Qdisc *sch)
167{
168 unsigned int prev_backlog;
169
170 prev_backlog = sch->qstats.backlog;
171 fq_codel_drop(sch);
172 return prev_backlog - sch->qstats.backlog;
173}
174
166static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch) 175static int fq_codel_enqueue(struct sk_buff *skb, struct Qdisc *sch)
167{ 176{
168 struct fq_codel_sched_data *q = qdisc_priv(sch); 177 struct fq_codel_sched_data *q = qdisc_priv(sch);
@@ -279,10 +288,26 @@ begin:
279 288
280static void fq_codel_reset(struct Qdisc *sch) 289static void fq_codel_reset(struct Qdisc *sch)
281{ 290{
282 struct sk_buff *skb; 291 struct fq_codel_sched_data *q = qdisc_priv(sch);
292 int i;
283 293
284 while ((skb = fq_codel_dequeue(sch)) != NULL) 294 INIT_LIST_HEAD(&q->new_flows);
285 kfree_skb(skb); 295 INIT_LIST_HEAD(&q->old_flows);
296 for (i = 0; i < q->flows_cnt; i++) {
297 struct fq_codel_flow *flow = q->flows + i;
298
299 while (flow->head) {
300 struct sk_buff *skb = dequeue_head(flow);
301
302 qdisc_qstats_backlog_dec(sch, skb);
303 kfree_skb(skb);
304 }
305
306 INIT_LIST_HEAD(&flow->flowchain);
307 codel_vars_init(&flow->cvars);
308 }
309 memset(q->backlogs, 0, q->flows_cnt * sizeof(u32));
310 sch->q.qlen = 0;
286} 311}
287 312
288static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = { 313static const struct nla_policy fq_codel_policy[TCA_FQ_CODEL_MAX + 1] = {
@@ -604,7 +629,7 @@ static struct Qdisc_ops fq_codel_qdisc_ops __read_mostly = {
604 .enqueue = fq_codel_enqueue, 629 .enqueue = fq_codel_enqueue,
605 .dequeue = fq_codel_dequeue, 630 .dequeue = fq_codel_dequeue,
606 .peek = qdisc_peek_dequeued, 631 .peek = qdisc_peek_dequeued,
607 .drop = fq_codel_drop, 632 .drop = fq_codel_qdisc_drop,
608 .init = fq_codel_init, 633 .init = fq_codel_init,
609 .reset = fq_codel_reset, 634 .reset = fq_codel_reset,
610 .destroy = fq_codel_destroy, 635 .destroy = fq_codel_destroy,
diff --git a/net/sched/sch_plug.c b/net/sched/sch_plug.c
index 89f8fcf73f18..ade9445a55ab 100644
--- a/net/sched/sch_plug.c
+++ b/net/sched/sch_plug.c
@@ -216,6 +216,7 @@ static struct Qdisc_ops plug_qdisc_ops __read_mostly = {
216 .peek = qdisc_peek_head, 216 .peek = qdisc_peek_head,
217 .init = plug_init, 217 .init = plug_init,
218 .change = plug_change, 218 .change = plug_change,
219 .reset = qdisc_reset_queue,
219 .owner = THIS_MODULE, 220 .owner = THIS_MODULE,
220}; 221};
221 222
diff --git a/net/sched/sch_sfq.c b/net/sched/sch_sfq.c
index 7d1492663360..52f75a5473e1 100644
--- a/net/sched/sch_sfq.c
+++ b/net/sched/sch_sfq.c
@@ -306,10 +306,10 @@ drop:
306 len = qdisc_pkt_len(skb); 306 len = qdisc_pkt_len(skb);
307 slot->backlog -= len; 307 slot->backlog -= len;
308 sfq_dec(q, x); 308 sfq_dec(q, x);
309 kfree_skb(skb);
310 sch->q.qlen--; 309 sch->q.qlen--;
311 qdisc_qstats_drop(sch); 310 qdisc_qstats_drop(sch);
312 qdisc_qstats_backlog_dec(sch, skb); 311 qdisc_qstats_backlog_dec(sch, skb);
312 kfree_skb(skb);
313 return len; 313 return len;
314 } 314 }
315 315
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 1425ec2bbd5a..17bef01b9aa3 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -2200,12 +2200,6 @@ static int sctp_setsockopt_events(struct sock *sk, char __user *optval,
2200 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen)) 2200 if (copy_from_user(&sctp_sk(sk)->subscribe, optval, optlen))
2201 return -EFAULT; 2201 return -EFAULT;
2202 2202
2203 if (sctp_sk(sk)->subscribe.sctp_data_io_event)
2204 pr_warn_ratelimited(DEPRECATED "%s (pid %d) "
2205 "Requested SCTP_SNDRCVINFO event.\n"
2206 "Use SCTP_RCVINFO through SCTP_RECVRCVINFO option instead.\n",
2207 current->comm, task_pid_nr(current));
2208
2209 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT, 2203 /* At the time when a user app subscribes to SCTP_SENDER_DRY_EVENT,
2210 * if there is no data to be sent or retransmit, the stack will 2204 * if there is no data to be sent or retransmit, the stack will
2211 * immediately send up this notification. 2205 * immediately send up this notification.
diff --git a/net/sunrpc/backchannel_rqst.c b/net/sunrpc/backchannel_rqst.c
index 9825ff0f91d6..6255d141133b 100644
--- a/net/sunrpc/backchannel_rqst.c
+++ b/net/sunrpc/backchannel_rqst.c
@@ -240,8 +240,8 @@ static struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt, __be32 xid)
240 req = xprt_alloc_bc_req(xprt, GFP_ATOMIC); 240 req = xprt_alloc_bc_req(xprt, GFP_ATOMIC);
241 if (!req) 241 if (!req)
242 goto not_found; 242 goto not_found;
243 /* Note: this 'free' request adds it to xprt->bc_pa_list */ 243 list_add_tail(&req->rq_bc_pa_list, &xprt->bc_pa_list);
244 xprt_free_bc_request(req); 244 xprt->bc_alloc_count++;
245 } 245 }
246 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst, 246 req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
247 rq_bc_pa_list); 247 rq_bc_pa_list);
@@ -336,7 +336,7 @@ void xprt_complete_bc_request(struct rpc_rqst *req, uint32_t copied)
336 336
337 spin_lock(&xprt->bc_pa_lock); 337 spin_lock(&xprt->bc_pa_lock);
338 list_del(&req->rq_bc_pa_list); 338 list_del(&req->rq_bc_pa_list);
339 xprt->bc_alloc_count--; 339 xprt_dec_alloc_count(xprt, 1);
340 spin_unlock(&xprt->bc_pa_lock); 340 spin_unlock(&xprt->bc_pa_lock);
341 341
342 req->rq_private_buf.len = copied; 342 req->rq_private_buf.len = copied;
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index cbc6af923dd1..23608eb0ded2 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1902,6 +1902,7 @@ call_transmit_status(struct rpc_task *task)
1902 1902
1903 switch (task->tk_status) { 1903 switch (task->tk_status) {
1904 case -EAGAIN: 1904 case -EAGAIN:
1905 case -ENOBUFS:
1905 break; 1906 break;
1906 default: 1907 default:
1907 dprint_status(task); 1908 dprint_status(task);
@@ -1928,7 +1929,6 @@ call_transmit_status(struct rpc_task *task)
1928 case -ECONNABORTED: 1929 case -ECONNABORTED:
1929 case -EADDRINUSE: 1930 case -EADDRINUSE:
1930 case -ENOTCONN: 1931 case -ENOTCONN:
1931 case -ENOBUFS:
1932 case -EPIPE: 1932 case -EPIPE:
1933 rpc_task_force_reencode(task); 1933 rpc_task_force_reencode(task);
1934 } 1934 }
@@ -2057,12 +2057,13 @@ call_status(struct rpc_task *task)
2057 case -ECONNABORTED: 2057 case -ECONNABORTED:
2058 rpc_force_rebind(clnt); 2058 rpc_force_rebind(clnt);
2059 case -EADDRINUSE: 2059 case -EADDRINUSE:
2060 case -ENOBUFS:
2061 rpc_delay(task, 3*HZ); 2060 rpc_delay(task, 3*HZ);
2062 case -EPIPE: 2061 case -EPIPE:
2063 case -ENOTCONN: 2062 case -ENOTCONN:
2064 task->tk_action = call_bind; 2063 task->tk_action = call_bind;
2065 break; 2064 break;
2065 case -ENOBUFS:
2066 rpc_delay(task, HZ>>2);
2066 case -EAGAIN: 2067 case -EAGAIN:
2067 task->tk_action = call_transmit; 2068 task->tk_action = call_transmit;
2068 break; 2069 break;
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index e193c2b5476b..0030376327b7 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -527,6 +527,10 @@ static int xs_local_send_request(struct rpc_task *task)
527 true, &sent); 527 true, &sent);
528 dprintk("RPC: %s(%u) = %d\n", 528 dprintk("RPC: %s(%u) = %d\n",
529 __func__, xdr->len - req->rq_bytes_sent, status); 529 __func__, xdr->len - req->rq_bytes_sent, status);
530
531 if (status == -EAGAIN && sock_writeable(transport->inet))
532 status = -ENOBUFS;
533
530 if (likely(sent > 0) || status == 0) { 534 if (likely(sent > 0) || status == 0) {
531 req->rq_bytes_sent += sent; 535 req->rq_bytes_sent += sent;
532 req->rq_xmit_bytes_sent += sent; 536 req->rq_xmit_bytes_sent += sent;
@@ -539,6 +543,7 @@ static int xs_local_send_request(struct rpc_task *task)
539 543
540 switch (status) { 544 switch (status) {
541 case -ENOBUFS: 545 case -ENOBUFS:
546 break;
542 case -EAGAIN: 547 case -EAGAIN:
543 status = xs_nospace(task); 548 status = xs_nospace(task);
544 break; 549 break;
@@ -589,6 +594,9 @@ static int xs_udp_send_request(struct rpc_task *task)
589 if (status == -EPERM) 594 if (status == -EPERM)
590 goto process_status; 595 goto process_status;
591 596
597 if (status == -EAGAIN && sock_writeable(transport->inet))
598 status = -ENOBUFS;
599
592 if (sent > 0 || status == 0) { 600 if (sent > 0 || status == 0) {
593 req->rq_xmit_bytes_sent += sent; 601 req->rq_xmit_bytes_sent += sent;
594 if (sent >= req->rq_slen) 602 if (sent >= req->rq_slen)
@@ -669,9 +677,6 @@ static int xs_tcp_send_request(struct rpc_task *task)
669 dprintk("RPC: xs_tcp_send_request(%u) = %d\n", 677 dprintk("RPC: xs_tcp_send_request(%u) = %d\n",
670 xdr->len - req->rq_bytes_sent, status); 678 xdr->len - req->rq_bytes_sent, status);
671 679
672 if (unlikely(sent == 0 && status < 0))
673 break;
674
675 /* If we've sent the entire packet, immediately 680 /* If we've sent the entire packet, immediately
676 * reset the count of bytes sent. */ 681 * reset the count of bytes sent. */
677 req->rq_bytes_sent += sent; 682 req->rq_bytes_sent += sent;
@@ -681,18 +686,21 @@ static int xs_tcp_send_request(struct rpc_task *task)
681 return 0; 686 return 0;
682 } 687 }
683 688
684 if (sent != 0) 689 if (status < 0)
685 continue; 690 break;
686 status = -EAGAIN; 691 if (sent == 0) {
687 break; 692 status = -EAGAIN;
693 break;
694 }
688 } 695 }
696 if (status == -EAGAIN && sk_stream_is_writeable(transport->inet))
697 status = -ENOBUFS;
689 698
690 switch (status) { 699 switch (status) {
691 case -ENOTSOCK: 700 case -ENOTSOCK:
692 status = -ENOTCONN; 701 status = -ENOTCONN;
693 /* Should we call xs_close() here? */ 702 /* Should we call xs_close() here? */
694 break; 703 break;
695 case -ENOBUFS:
696 case -EAGAIN: 704 case -EAGAIN:
697 status = xs_nospace(task); 705 status = xs_nospace(task);
698 break; 706 break;
@@ -703,6 +711,7 @@ static int xs_tcp_send_request(struct rpc_task *task)
703 case -ECONNREFUSED: 711 case -ECONNREFUSED:
704 case -ENOTCONN: 712 case -ENOTCONN:
705 case -EADDRINUSE: 713 case -EADDRINUSE:
714 case -ENOBUFS:
706 case -EPIPE: 715 case -EPIPE:
707 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); 716 clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags);
708 } 717 }
diff --git a/net/wireless/chan.c b/net/wireless/chan.c
index 915b328b9ac5..59cabc9bce69 100644
--- a/net/wireless/chan.c
+++ b/net/wireless/chan.c
@@ -797,23 +797,18 @@ static bool cfg80211_ir_permissive_chan(struct wiphy *wiphy,
797 return false; 797 return false;
798} 798}
799 799
800bool cfg80211_reg_can_beacon(struct wiphy *wiphy, 800static bool _cfg80211_reg_can_beacon(struct wiphy *wiphy,
801 struct cfg80211_chan_def *chandef, 801 struct cfg80211_chan_def *chandef,
802 enum nl80211_iftype iftype) 802 enum nl80211_iftype iftype,
803 bool check_no_ir)
803{ 804{
804 bool res; 805 bool res;
805 u32 prohibited_flags = IEEE80211_CHAN_DISABLED | 806 u32 prohibited_flags = IEEE80211_CHAN_DISABLED |
806 IEEE80211_CHAN_RADAR; 807 IEEE80211_CHAN_RADAR;
807 808
808 trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype); 809 trace_cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir);
809 810
810 /* 811 if (check_no_ir)
811 * Under certain conditions suggested by some regulatory bodies a
812 * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag
813 * only if such relaxations are not enabled and the conditions are not
814 * met.
815 */
816 if (!cfg80211_ir_permissive_chan(wiphy, iftype, chandef->chan))
817 prohibited_flags |= IEEE80211_CHAN_NO_IR; 812 prohibited_flags |= IEEE80211_CHAN_NO_IR;
818 813
819 if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 && 814 if (cfg80211_chandef_dfs_required(wiphy, chandef, iftype) > 0 &&
@@ -827,8 +822,36 @@ bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
827 trace_cfg80211_return_bool(res); 822 trace_cfg80211_return_bool(res);
828 return res; 823 return res;
829} 824}
825
826bool cfg80211_reg_can_beacon(struct wiphy *wiphy,
827 struct cfg80211_chan_def *chandef,
828 enum nl80211_iftype iftype)
829{
830 return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, true);
831}
830EXPORT_SYMBOL(cfg80211_reg_can_beacon); 832EXPORT_SYMBOL(cfg80211_reg_can_beacon);
831 833
834bool cfg80211_reg_can_beacon_relax(struct wiphy *wiphy,
835 struct cfg80211_chan_def *chandef,
836 enum nl80211_iftype iftype)
837{
838 bool check_no_ir;
839
840 ASSERT_RTNL();
841
842 /*
843 * Under certain conditions suggested by some regulatory bodies a
844 * GO/STA can IR on channels marked with IEEE80211_NO_IR. Set this flag
845 * only if such relaxations are not enabled and the conditions are not
846 * met.
847 */
848 check_no_ir = !cfg80211_ir_permissive_chan(wiphy, iftype,
849 chandef->chan);
850
851 return _cfg80211_reg_can_beacon(wiphy, chandef, iftype, check_no_ir);
852}
853EXPORT_SYMBOL(cfg80211_reg_can_beacon_relax);
854
832int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev, 855int cfg80211_set_monitor_channel(struct cfg80211_registered_device *rdev,
833 struct cfg80211_chan_def *chandef) 856 struct cfg80211_chan_def *chandef)
834{ 857{
diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c
index c264effd00a6..76b41578a838 100644
--- a/net/wireless/nl80211.c
+++ b/net/wireless/nl80211.c
@@ -2003,7 +2003,8 @@ static int __nl80211_set_channel(struct cfg80211_registered_device *rdev,
2003 switch (iftype) { 2003 switch (iftype) {
2004 case NL80211_IFTYPE_AP: 2004 case NL80211_IFTYPE_AP:
2005 case NL80211_IFTYPE_P2P_GO: 2005 case NL80211_IFTYPE_P2P_GO:
2006 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, iftype)) { 2006 if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef,
2007 iftype)) {
2007 result = -EINVAL; 2008 result = -EINVAL;
2008 break; 2009 break;
2009 } 2010 }
@@ -3403,8 +3404,8 @@ static int nl80211_start_ap(struct sk_buff *skb, struct genl_info *info)
3403 } else if (!nl80211_get_ap_channel(rdev, &params)) 3404 } else if (!nl80211_get_ap_channel(rdev, &params))
3404 return -EINVAL; 3405 return -EINVAL;
3405 3406
3406 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef, 3407 if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params.chandef,
3407 wdev->iftype)) 3408 wdev->iftype))
3408 return -EINVAL; 3409 return -EINVAL;
3409 3410
3410 if (info->attrs[NL80211_ATTR_ACL_POLICY]) { 3411 if (info->attrs[NL80211_ATTR_ACL_POLICY]) {
@@ -6492,8 +6493,8 @@ skip_beacons:
6492 if (err) 6493 if (err)
6493 return err; 6494 return err;
6494 6495
6495 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &params.chandef, 6496 if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &params.chandef,
6496 wdev->iftype)) 6497 wdev->iftype))
6497 return -EINVAL; 6498 return -EINVAL;
6498 6499
6499 err = cfg80211_chandef_dfs_required(wdev->wiphy, 6500 err = cfg80211_chandef_dfs_required(wdev->wiphy,
@@ -10170,7 +10171,8 @@ static int nl80211_tdls_channel_switch(struct sk_buff *skb,
10170 return -EINVAL; 10171 return -EINVAL;
10171 10172
10172 /* we will be active on the TDLS link */ 10173 /* we will be active on the TDLS link */
10173 if (!cfg80211_reg_can_beacon(&rdev->wiphy, &chandef, wdev->iftype)) 10174 if (!cfg80211_reg_can_beacon_relax(&rdev->wiphy, &chandef,
10175 wdev->iftype))
10174 return -EINVAL; 10176 return -EINVAL;
10175 10177
10176 /* don't allow switching to DFS channels */ 10178 /* don't allow switching to DFS channels */
diff --git a/net/wireless/reg.c b/net/wireless/reg.c
index d359e0610198..aa2d75482017 100644
--- a/net/wireless/reg.c
+++ b/net/wireless/reg.c
@@ -544,15 +544,15 @@ static int call_crda(const char *alpha2)
544 reg_regdb_query(alpha2); 544 reg_regdb_query(alpha2);
545 545
546 if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) { 546 if (reg_crda_timeouts > REG_MAX_CRDA_TIMEOUTS) {
547 pr_info("Exceeded CRDA call max attempts. Not calling CRDA\n"); 547 pr_debug("Exceeded CRDA call max attempts. Not calling CRDA\n");
548 return -EINVAL; 548 return -EINVAL;
549 } 549 }
550 550
551 if (!is_world_regdom((char *) alpha2)) 551 if (!is_world_regdom((char *) alpha2))
552 pr_info("Calling CRDA for country: %c%c\n", 552 pr_debug("Calling CRDA for country: %c%c\n",
553 alpha2[0], alpha2[1]); 553 alpha2[0], alpha2[1]);
554 else 554 else
555 pr_info("Calling CRDA to update world regulatory domain\n"); 555 pr_debug("Calling CRDA to update world regulatory domain\n");
556 556
557 return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, env); 557 return kobject_uevent_env(&reg_pdev->dev.kobj, KOBJ_CHANGE, env);
558} 558}
@@ -1589,7 +1589,7 @@ static bool reg_wdev_chan_valid(struct wiphy *wiphy, struct wireless_dev *wdev)
1589 case NL80211_IFTYPE_AP: 1589 case NL80211_IFTYPE_AP:
1590 case NL80211_IFTYPE_P2P_GO: 1590 case NL80211_IFTYPE_P2P_GO:
1591 case NL80211_IFTYPE_ADHOC: 1591 case NL80211_IFTYPE_ADHOC:
1592 return cfg80211_reg_can_beacon(wiphy, &chandef, iftype); 1592 return cfg80211_reg_can_beacon_relax(wiphy, &chandef, iftype);
1593 case NL80211_IFTYPE_STATION: 1593 case NL80211_IFTYPE_STATION:
1594 case NL80211_IFTYPE_P2P_CLIENT: 1594 case NL80211_IFTYPE_P2P_CLIENT:
1595 return cfg80211_chandef_usable(wiphy, &chandef, 1595 return cfg80211_chandef_usable(wiphy, &chandef,
diff --git a/net/wireless/trace.h b/net/wireless/trace.h
index af3617c9879e..a808279a432a 100644
--- a/net/wireless/trace.h
+++ b/net/wireless/trace.h
@@ -2358,20 +2358,23 @@ TRACE_EVENT(cfg80211_cqm_rssi_notify,
2358 2358
2359TRACE_EVENT(cfg80211_reg_can_beacon, 2359TRACE_EVENT(cfg80211_reg_can_beacon,
2360 TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef, 2360 TP_PROTO(struct wiphy *wiphy, struct cfg80211_chan_def *chandef,
2361 enum nl80211_iftype iftype), 2361 enum nl80211_iftype iftype, bool check_no_ir),
2362 TP_ARGS(wiphy, chandef, iftype), 2362 TP_ARGS(wiphy, chandef, iftype, check_no_ir),
2363 TP_STRUCT__entry( 2363 TP_STRUCT__entry(
2364 WIPHY_ENTRY 2364 WIPHY_ENTRY
2365 CHAN_DEF_ENTRY 2365 CHAN_DEF_ENTRY
2366 __field(enum nl80211_iftype, iftype) 2366 __field(enum nl80211_iftype, iftype)
2367 __field(bool, check_no_ir)
2367 ), 2368 ),
2368 TP_fast_assign( 2369 TP_fast_assign(
2369 WIPHY_ASSIGN; 2370 WIPHY_ASSIGN;
2370 CHAN_DEF_ASSIGN(chandef); 2371 CHAN_DEF_ASSIGN(chandef);
2371 __entry->iftype = iftype; 2372 __entry->iftype = iftype;
2373 __entry->check_no_ir = check_no_ir;
2372 ), 2374 ),
2373 TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d", 2375 TP_printk(WIPHY_PR_FMT ", " CHAN_DEF_PR_FMT ", iftype=%d check_no_ir=%s",
2374 WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype) 2376 WIPHY_PR_ARG, CHAN_DEF_PR_ARG, __entry->iftype,
2377 BOOL_TO_STR(__entry->check_no_ir))
2375); 2378);
2376 2379
2377TRACE_EVENT(cfg80211_chandef_dfs_required, 2380TRACE_EVENT(cfg80211_chandef_dfs_required,
diff --git a/samples/trace_events/trace-events-sample.h b/samples/trace_events/trace-events-sample.h
index 8965d1bb8811..125d6402f64f 100644
--- a/samples/trace_events/trace-events-sample.h
+++ b/samples/trace_events/trace-events-sample.h
@@ -168,7 +168,10 @@
168 * 168 *
169 * For __dynamic_array(int, foo, bar) use __get_dynamic_array(foo) 169 * For __dynamic_array(int, foo, bar) use __get_dynamic_array(foo)
170 * Use __get_dynamic_array_len(foo) to get the length of the array 170 * Use __get_dynamic_array_len(foo) to get the length of the array
171 * saved. 171 * saved. Note, __get_dynamic_array_len() returns the total allocated
172 * length of the dynamic array; __print_array() expects the second
173 * parameter to be the number of elements. To get that, the array length
174 * needs to be divided by the element size.
172 * 175 *
173 * For __string(foo, bar) use __get_str(foo) 176 * For __string(foo, bar) use __get_str(foo)
174 * 177 *
@@ -288,7 +291,7 @@ TRACE_EVENT(foo_bar,
288 * This prints out the array that is defined by __array in a nice format. 291 * This prints out the array that is defined by __array in a nice format.
289 */ 292 */
290 __print_array(__get_dynamic_array(list), 293 __print_array(__get_dynamic_array(list),
291 __get_dynamic_array_len(list), 294 __get_dynamic_array_len(list) / sizeof(int),
292 sizeof(int)), 295 sizeof(int)),
293 __get_str(str), __get_bitmask(cpus)) 296 __get_str(str), __get_bitmask(cpus))
294); 297);
diff --git a/scripts/kconfig/streamline_config.pl b/scripts/kconfig/streamline_config.pl
index 9cb8522d8d22..f3d3fb42b873 100755
--- a/scripts/kconfig/streamline_config.pl
+++ b/scripts/kconfig/streamline_config.pl
@@ -137,7 +137,7 @@ my $ksource = ($ARGV[0] ? $ARGV[0] : '.');
137my $kconfig = $ARGV[1]; 137my $kconfig = $ARGV[1];
138my $lsmod_file = $ENV{'LSMOD'}; 138my $lsmod_file = $ENV{'LSMOD'};
139 139
140my @makefiles = `find $ksource -name Makefile 2>/dev/null`; 140my @makefiles = `find $ksource -name Makefile -or -name Kbuild 2>/dev/null`;
141chomp @makefiles; 141chomp @makefiles;
142 142
143my %depends; 143my %depends;
diff --git a/security/keys/keyring.c b/security/keys/keyring.c
index e72548b5897e..d33437007ad2 100644
--- a/security/keys/keyring.c
+++ b/security/keys/keyring.c
@@ -1181,9 +1181,11 @@ void __key_link_end(struct key *keyring,
1181 if (index_key->type == &key_type_keyring) 1181 if (index_key->type == &key_type_keyring)
1182 up_write(&keyring_serialise_link_sem); 1182 up_write(&keyring_serialise_link_sem);
1183 1183
1184 if (edit && !edit->dead_leaf) { 1184 if (edit) {
1185 key_payload_reserve(keyring, 1185 if (!edit->dead_leaf) {
1186 keyring->datalen - KEYQUOTA_LINK_BYTES); 1186 key_payload_reserve(keyring,
1187 keyring->datalen - KEYQUOTA_LINK_BYTES);
1188 }
1187 assoc_array_cancel_edit(edit); 1189 assoc_array_cancel_edit(edit);
1188 } 1190 }
1189 up_write(&keyring->sem); 1191 up_write(&keyring->sem);
diff --git a/security/yama/yama_lsm.c b/security/yama/yama_lsm.c
index 9ed32502470e..5ebb89687936 100644
--- a/security/yama/yama_lsm.c
+++ b/security/yama/yama_lsm.c
@@ -406,6 +406,7 @@ static __init int yama_init(void)
406 */ 406 */
407 if (!security_module_enable("yama")) 407 if (!security_module_enable("yama"))
408 return 0; 408 return 0;
409 yama_add_hooks();
409#endif 410#endif
410 pr_info("Yama: becoming mindful.\n"); 411 pr_info("Yama: becoming mindful.\n");
411 412
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index d126c03361ae..75888dd38a7f 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -85,7 +85,7 @@ static DECLARE_RWSEM(snd_pcm_link_rwsem);
85void snd_pcm_stream_lock(struct snd_pcm_substream *substream) 85void snd_pcm_stream_lock(struct snd_pcm_substream *substream)
86{ 86{
87 if (substream->pcm->nonatomic) { 87 if (substream->pcm->nonatomic) {
88 down_read(&snd_pcm_link_rwsem); 88 down_read_nested(&snd_pcm_link_rwsem, SINGLE_DEPTH_NESTING);
89 mutex_lock(&substream->self_group.mutex); 89 mutex_lock(&substream->self_group.mutex);
90 } else { 90 } else {
91 read_lock(&snd_pcm_link_rwlock); 91 read_lock(&snd_pcm_link_rwlock);
diff --git a/sound/firewire/amdtp.c b/sound/firewire/amdtp.c
index 7bb988fa6b6d..2a153d260836 100644
--- a/sound/firewire/amdtp.c
+++ b/sound/firewire/amdtp.c
@@ -740,8 +740,9 @@ static int handle_in_packet(struct amdtp_stream *s,
740 s->data_block_counter != UINT_MAX) 740 s->data_block_counter != UINT_MAX)
741 data_block_counter = s->data_block_counter; 741 data_block_counter = s->data_block_counter;
742 742
743 if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) && data_block_counter == 0) || 743 if (((s->flags & CIP_SKIP_DBC_ZERO_CHECK) &&
744 (s->data_block_counter == UINT_MAX)) { 744 data_block_counter == s->tx_first_dbc) ||
745 s->data_block_counter == UINT_MAX) {
745 lost = false; 746 lost = false;
746 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) { 747 } else if (!(s->flags & CIP_DBC_IS_END_EVENT)) {
747 lost = data_block_counter != s->data_block_counter; 748 lost = data_block_counter != s->data_block_counter;
diff --git a/sound/firewire/amdtp.h b/sound/firewire/amdtp.h
index 26b909329e54..b2cf9e75693b 100644
--- a/sound/firewire/amdtp.h
+++ b/sound/firewire/amdtp.h
@@ -157,6 +157,8 @@ struct amdtp_stream {
157 157
158 /* quirk: fixed interval of dbc between previos/current packets. */ 158 /* quirk: fixed interval of dbc between previos/current packets. */
159 unsigned int tx_dbc_interval; 159 unsigned int tx_dbc_interval;
160 /* quirk: indicate the value of dbc field in a first packet. */
161 unsigned int tx_first_dbc;
160 162
161 bool callbacked; 163 bool callbacked;
162 wait_queue_head_t callback_wait; 164 wait_queue_head_t callback_wait;
diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
index 2682e7e3e5c9..c94a432f7cc6 100644
--- a/sound/firewire/fireworks/fireworks.c
+++ b/sound/firewire/fireworks/fireworks.c
@@ -248,8 +248,16 @@ efw_probe(struct fw_unit *unit,
248 err = get_hardware_info(efw); 248 err = get_hardware_info(efw);
249 if (err < 0) 249 if (err < 0)
250 goto error; 250 goto error;
251 /* AudioFire8 (since 2009) and AudioFirePre8 */
251 if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9) 252 if (entry->model_id == MODEL_ECHO_AUDIOFIRE_9)
252 efw->is_af9 = true; 253 efw->is_af9 = true;
254 /* These models uses the same firmware. */
255 if (entry->model_id == MODEL_ECHO_AUDIOFIRE_2 ||
256 entry->model_id == MODEL_ECHO_AUDIOFIRE_4 ||
257 entry->model_id == MODEL_ECHO_AUDIOFIRE_9 ||
258 entry->model_id == MODEL_GIBSON_RIP ||
259 entry->model_id == MODEL_GIBSON_GOLDTOP)
260 efw->is_fireworks3 = true;
253 261
254 snd_efw_proc_init(efw); 262 snd_efw_proc_init(efw);
255 263
diff --git a/sound/firewire/fireworks/fireworks.h b/sound/firewire/fireworks/fireworks.h
index 4f0201a95222..084d414b228c 100644
--- a/sound/firewire/fireworks/fireworks.h
+++ b/sound/firewire/fireworks/fireworks.h
@@ -71,6 +71,7 @@ struct snd_efw {
71 71
72 /* for quirks */ 72 /* for quirks */
73 bool is_af9; 73 bool is_af9;
74 bool is_fireworks3;
74 u32 firmware_version; 75 u32 firmware_version;
75 76
76 unsigned int midi_in_ports; 77 unsigned int midi_in_ports;
diff --git a/sound/firewire/fireworks/fireworks_stream.c b/sound/firewire/fireworks/fireworks_stream.c
index c55db1bddc80..7e353f1f7bff 100644
--- a/sound/firewire/fireworks/fireworks_stream.c
+++ b/sound/firewire/fireworks/fireworks_stream.c
@@ -172,6 +172,15 @@ int snd_efw_stream_init_duplex(struct snd_efw *efw)
172 efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT; 172 efw->tx_stream.flags |= CIP_DBC_IS_END_EVENT;
173 /* Fireworks reset dbc at bus reset. */ 173 /* Fireworks reset dbc at bus reset. */
174 efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK; 174 efw->tx_stream.flags |= CIP_SKIP_DBC_ZERO_CHECK;
175 /*
176 * But Recent firmwares starts packets with non-zero dbc.
177 * Driver version 5.7.6 installs firmware version 5.7.3.
178 */
179 if (efw->is_fireworks3 &&
180 (efw->firmware_version == 0x5070000 ||
181 efw->firmware_version == 0x5070300 ||
182 efw->firmware_version == 0x5080000))
183 efw->tx_stream.tx_first_dbc = 0x02;
175 /* AudioFire9 always reports wrong dbs. */ 184 /* AudioFire9 always reports wrong dbs. */
176 if (efw->is_af9) 185 if (efw->is_af9)
177 efw->tx_stream.flags |= CIP_WRONG_DBS; 186 efw->tx_stream.flags |= CIP_WRONG_DBS;
diff --git a/sound/hda/ext/hdac_ext_controller.c b/sound/hda/ext/hdac_ext_controller.c
index b2da19b60f4e..358f16195483 100644
--- a/sound/hda/ext/hdac_ext_controller.c
+++ b/sound/hda/ext/hdac_ext_controller.c
@@ -44,16 +44,10 @@ int snd_hdac_ext_bus_parse_capabilities(struct hdac_ext_bus *ebus)
44 44
45 offset = snd_hdac_chip_readl(bus, LLCH); 45 offset = snd_hdac_chip_readl(bus, LLCH);
46 46
47 if (offset < 0)
48 return -EIO;
49
50 /* Lets walk the linked capabilities list */ 47 /* Lets walk the linked capabilities list */
51 do { 48 do {
52 cur_cap = _snd_hdac_chip_read(l, bus, offset); 49 cur_cap = _snd_hdac_chip_read(l, bus, offset);
53 50
54 if (cur_cap < 0)
55 return -EIO;
56
57 dev_dbg(bus->dev, "Capability version: 0x%x\n", 51 dev_dbg(bus->dev, "Capability version: 0x%x\n",
58 ((cur_cap & AZX_CAP_HDR_VER_MASK) >> AZX_CAP_HDR_VER_OFF)); 52 ((cur_cap & AZX_CAP_HDR_VER_MASK) >> AZX_CAP_HDR_VER_OFF));
59 53
diff --git a/sound/hda/ext/hdac_ext_stream.c b/sound/hda/ext/hdac_ext_stream.c
index f8ffbdbb450d..3de47dd1a76d 100644
--- a/sound/hda/ext/hdac_ext_stream.c
+++ b/sound/hda/ext/hdac_ext_stream.c
@@ -299,7 +299,7 @@ hdac_ext_host_stream_assign(struct hdac_ext_bus *ebus,
299 if (stream->direction != substream->stream) 299 if (stream->direction != substream->stream)
300 continue; 300 continue;
301 301
302 if (stream->opened) { 302 if (!stream->opened) {
303 if (!hstream->decoupled) 303 if (!hstream->decoupled)
304 snd_hdac_ext_stream_decouple(ebus, hstream, true); 304 snd_hdac_ext_stream_decouple(ebus, hstream, true);
305 res = hstream; 305 res = hstream;
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index 442500e06b7c..5676b849379d 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -56,8 +56,11 @@ int snd_hdac_display_power(struct hdac_bus *bus, bool enable)
56 enable ? "enable" : "disable"); 56 enable ? "enable" : "disable");
57 57
58 if (enable) { 58 if (enable) {
59 if (!bus->i915_power_refcount++) 59 if (!bus->i915_power_refcount++) {
60 acomp->ops->get_power(acomp->dev); 60 acomp->ops->get_power(acomp->dev);
61 snd_hdac_set_codec_wakeup(bus, true);
62 snd_hdac_set_codec_wakeup(bus, false);
63 }
61 } else { 64 } else {
62 WARN_ON(!bus->i915_power_refcount); 65 WARN_ON(!bus->i915_power_refcount);
63 if (!--bus->i915_power_refcount) 66 if (!--bus->i915_power_refcount)
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 745535d1840a..c38c68f57938 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -867,7 +867,7 @@ static int azx_suspend(struct device *dev)
867 867
868 chip = card->private_data; 868 chip = card->private_data;
869 hda = container_of(chip, struct hda_intel, chip); 869 hda = container_of(chip, struct hda_intel, chip);
870 if (chip->disabled || hda->init_failed) 870 if (chip->disabled || hda->init_failed || !chip->running)
871 return 0; 871 return 0;
872 872
873 bus = azx_bus(chip); 873 bus = azx_bus(chip);
@@ -902,7 +902,7 @@ static int azx_resume(struct device *dev)
902 902
903 chip = card->private_data; 903 chip = card->private_data;
904 hda = container_of(chip, struct hda_intel, chip); 904 hda = container_of(chip, struct hda_intel, chip);
905 if (chip->disabled || hda->init_failed) 905 if (chip->disabled || hda->init_failed || !chip->running)
906 return 0; 906 return 0;
907 907
908 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL 908 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL
@@ -979,14 +979,16 @@ static int azx_runtime_resume(struct device *dev)
979 if (!azx_has_pm_runtime(chip)) 979 if (!azx_has_pm_runtime(chip))
980 return 0; 980 return 0;
981 981
982 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL 982 if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
983 && hda->need_i915_power) { 983 bus = azx_bus(chip);
984 bus = azx_bus(chip); 984 if (hda->need_i915_power) {
985 snd_hdac_display_power(bus, true); 985 snd_hdac_display_power(bus, true);
986 haswell_set_bclk(hda); 986 haswell_set_bclk(hda);
987 /* toggle codec wakeup bit for STATESTS read */ 987 } else {
988 snd_hdac_set_codec_wakeup(bus, true); 988 /* toggle codec wakeup bit for STATESTS read */
989 snd_hdac_set_codec_wakeup(bus, false); 989 snd_hdac_set_codec_wakeup(bus, true);
990 snd_hdac_set_codec_wakeup(bus, false);
991 }
990 } 992 }
991 993
992 /* Read STATESTS before controller reset */ 994 /* Read STATESTS before controller reset */
@@ -1025,7 +1027,7 @@ static int azx_runtime_idle(struct device *dev)
1025 return 0; 1027 return 0;
1026 1028
1027 if (!power_save_controller || !azx_has_pm_runtime(chip) || 1029 if (!power_save_controller || !azx_has_pm_runtime(chip) ||
1028 azx_bus(chip)->codec_powered) 1030 azx_bus(chip)->codec_powered || !chip->running)
1029 return -EBUSY; 1031 return -EBUSY;
1030 1032
1031 return 0; 1033 return 0;
@@ -2182,6 +2184,8 @@ static const struct pci_device_id azx_ids[] = {
2182 /* ATI HDMI */ 2184 /* ATI HDMI */
2183 { PCI_DEVICE(0x1002, 0x1308), 2185 { PCI_DEVICE(0x1002, 0x1308),
2184 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2186 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2187 { PCI_DEVICE(0x1002, 0x157a),
2188 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2185 { PCI_DEVICE(0x1002, 0x793b), 2189 { PCI_DEVICE(0x1002, 0x793b),
2186 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI }, 2190 .driver_data = AZX_DRIVER_ATIHDMI | AZX_DCAPS_PRESET_ATI_HDMI },
2187 { PCI_DEVICE(0x1002, 0x7919), 2191 { PCI_DEVICE(0x1002, 0x7919),
@@ -2236,8 +2240,14 @@ static const struct pci_device_id azx_ids[] = {
2236 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2240 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2237 { PCI_DEVICE(0x1002, 0xaab0), 2241 { PCI_DEVICE(0x1002, 0xaab0),
2238 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2242 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2243 { PCI_DEVICE(0x1002, 0xaac0),
2244 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2239 { PCI_DEVICE(0x1002, 0xaac8), 2245 { PCI_DEVICE(0x1002, 0xaac8),
2240 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS }, 2246 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2247 { PCI_DEVICE(0x1002, 0xaad8),
2248 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2249 { PCI_DEVICE(0x1002, 0xaae8),
2250 .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
2241 /* VIA VT8251/VT8237A */ 2251 /* VIA VT8251/VT8237A */
2242 { PCI_DEVICE(0x1106, 0x3288), 2252 { PCI_DEVICE(0x1106, 0x3288),
2243 .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA }, 2253 .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
diff --git a/sound/pci/hda/patch_cirrus.c b/sound/pci/hda/patch_cirrus.c
index 25ccf781fbe7..584a0343ab0c 100644
--- a/sound/pci/hda/patch_cirrus.c
+++ b/sound/pci/hda/patch_cirrus.c
@@ -999,9 +999,7 @@ static void cs4210_spdif_automute(struct hda_codec *codec,
999 999
1000 spec->spdif_present = spdif_present; 1000 spec->spdif_present = spdif_present;
1001 /* SPDIF TX on/off */ 1001 /* SPDIF TX on/off */
1002 if (spdif_present) 1002 snd_hda_set_pin_ctl(codec, spdif_pin, spdif_present ? PIN_OUT : 0);
1003 snd_hda_set_pin_ctl(codec, spdif_pin,
1004 spdif_present ? PIN_OUT : 0);
1005 1003
1006 cs_automute(codec); 1004 cs_automute(codec);
1007} 1005}
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 95158914cc6c..a97db5fc8a15 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -3512,6 +3512,7 @@ static const struct hda_codec_preset snd_hda_preset_hdmi[] = {
3512{ .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi }, 3512{ .id = 0x10de0070, .name = "GPU 70 HDMI/DP", .patch = patch_nvhdmi },
3513{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi }, 3513{ .id = 0x10de0071, .name = "GPU 71 HDMI/DP", .patch = patch_nvhdmi },
3514{ .id = 0x10de0072, .name = "GPU 72 HDMI/DP", .patch = patch_nvhdmi }, 3514{ .id = 0x10de0072, .name = "GPU 72 HDMI/DP", .patch = patch_nvhdmi },
3515{ .id = 0x10de007d, .name = "GPU 7d HDMI/DP", .patch = patch_nvhdmi },
3515{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch }, 3516{ .id = 0x10de8001, .name = "MCP73 HDMI", .patch = patch_nvhdmi_2ch },
3516{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, 3517{ .id = 0x11069f80, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
3517{ .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi }, 3518{ .id = 0x11069f81, .name = "VX900 HDMI/DP", .patch = patch_via_hdmi },
@@ -3576,6 +3577,7 @@ MODULE_ALIAS("snd-hda-codec-id:10de0067");
3576MODULE_ALIAS("snd-hda-codec-id:10de0070"); 3577MODULE_ALIAS("snd-hda-codec-id:10de0070");
3577MODULE_ALIAS("snd-hda-codec-id:10de0071"); 3578MODULE_ALIAS("snd-hda-codec-id:10de0071");
3578MODULE_ALIAS("snd-hda-codec-id:10de0072"); 3579MODULE_ALIAS("snd-hda-codec-id:10de0072");
3580MODULE_ALIAS("snd-hda-codec-id:10de007d");
3579MODULE_ALIAS("snd-hda-codec-id:10de8001"); 3581MODULE_ALIAS("snd-hda-codec-id:10de8001");
3580MODULE_ALIAS("snd-hda-codec-id:11069f80"); 3582MODULE_ALIAS("snd-hda-codec-id:11069f80");
3581MODULE_ALIAS("snd-hda-codec-id:11069f81"); 3583MODULE_ALIAS("snd-hda-codec-id:11069f81");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index d35cf506a7db..0b9847affbec 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2222,7 +2222,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = {
2222 SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF), 2222 SND_PCI_QUIRK(0x106b, 0x4300, "iMac 9,1", ALC889_FIXUP_IMAC91_VREF),
2223 SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF), 2223 SND_PCI_QUIRK(0x106b, 0x4600, "MacbookPro 5,2", ALC889_FIXUP_IMAC91_VREF),
2224 SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF), 2224 SND_PCI_QUIRK(0x106b, 0x4900, "iMac 9,1 Aluminum", ALC889_FIXUP_IMAC91_VREF),
2225 SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_IMAC91_VREF), 2225 SND_PCI_QUIRK(0x106b, 0x4a00, "Macbook 5,2", ALC889_FIXUP_MBA11_VREF),
2226 2226
2227 SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD), 2227 SND_PCI_QUIRK(0x1071, 0x8258, "Evesham Voyaeger", ALC882_FIXUP_EAPD),
2228 SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), 2228 SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
@@ -5061,7 +5061,7 @@ static const struct hda_fixup alc269_fixups[] = {
5061 { 0x14, 0x90170110 }, 5061 { 0x14, 0x90170110 },
5062 { 0x17, 0x40000008 }, 5062 { 0x17, 0x40000008 },
5063 { 0x18, 0x411111f0 }, 5063 { 0x18, 0x411111f0 },
5064 { 0x19, 0x411111f0 }, 5064 { 0x19, 0x01a1913c },
5065 { 0x1a, 0x411111f0 }, 5065 { 0x1a, 0x411111f0 },
5066 { 0x1b, 0x411111f0 }, 5066 { 0x1b, 0x411111f0 },
5067 { 0x1d, 0x40f89b2d }, 5067 { 0x1d, 0x40f89b2d },
@@ -5185,9 +5185,11 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
5185 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5185 SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5186 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5186 SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5187 SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13), 5187 SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC288_FIXUP_DELL_XPS_13),
5188 SND_PCI_QUIRK(0x1028, 0x069a, "Dell Vostro 5480", ALC290_FIXUP_SUBWOOFER_HSJACK),
5188 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE), 5189 SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
5189 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5190 SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5190 SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5191 SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5192 SND_PCI_QUIRK(0x1028, 0x06de, "Dell", ALC292_FIXUP_DISABLE_AAMIX),
5191 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5193 SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5192 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE), 5194 SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
5193 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2), 5195 SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5398,8 +5400,6 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
5398 {0x19, 0x411111f0}, \ 5400 {0x19, 0x411111f0}, \
5399 {0x1a, 0x411111f0}, \ 5401 {0x1a, 0x411111f0}, \
5400 {0x1b, 0x411111f0}, \ 5402 {0x1b, 0x411111f0}, \
5401 {0x1d, 0x40700001}, \
5402 {0x1e, 0x411111f0}, \
5403 {0x21, 0x02211020} 5403 {0x21, 0x02211020}
5404 5404
5405#define ALC282_STANDARD_PINS \ 5405#define ALC282_STANDARD_PINS \
@@ -5430,8 +5430,7 @@ static const struct hda_model_fixup alc269_fixup_models[] = {
5430 {0x15, 0x0221401f}, \ 5430 {0x15, 0x0221401f}, \
5431 {0x1a, 0x411111f0}, \ 5431 {0x1a, 0x411111f0}, \
5432 {0x1b, 0x411111f0}, \ 5432 {0x1b, 0x411111f0}, \
5433 {0x1d, 0x40700001}, \ 5433 {0x1d, 0x40700001}
5434 {0x1e, 0x411111f0}
5435 5434
5436#define ALC298_STANDARD_PINS \ 5435#define ALC298_STANDARD_PINS \
5437 {0x18, 0x411111f0}, \ 5436 {0x18, 0x411111f0}, \
@@ -5463,6 +5462,39 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5463 {0x1d, 0x40700001}, 5462 {0x1d, 0x40700001},
5464 {0x21, 0x02211030}), 5463 {0x21, 0x02211030}),
5465 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5464 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5465 {0x12, 0x40000000},
5466 {0x14, 0x90170130},
5467 {0x17, 0x411111f0},
5468 {0x18, 0x411111f0},
5469 {0x19, 0x411111f0},
5470 {0x1a, 0x411111f0},
5471 {0x1b, 0x01014020},
5472 {0x1d, 0x4054c029},
5473 {0x1e, 0x411111f0},
5474 {0x21, 0x0221103f}),
5475 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5476 {0x12, 0x40000000},
5477 {0x14, 0x90170150},
5478 {0x17, 0x411111f0},
5479 {0x18, 0x411111f0},
5480 {0x19, 0x411111f0},
5481 {0x1a, 0x411111f0},
5482 {0x1b, 0x02011020},
5483 {0x1d, 0x4054c029},
5484 {0x1e, 0x411111f0},
5485 {0x21, 0x0221105f}),
5486 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5487 {0x12, 0x40000000},
5488 {0x14, 0x90170110},
5489 {0x17, 0x411111f0},
5490 {0x18, 0x411111f0},
5491 {0x19, 0x411111f0},
5492 {0x1a, 0x411111f0},
5493 {0x1b, 0x01014020},
5494 {0x1d, 0x4054c029},
5495 {0x1e, 0x411111f0},
5496 {0x21, 0x0221101f}),
5497 SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5466 {0x12, 0x90a60160}, 5498 {0x12, 0x90a60160},
5467 {0x14, 0x90170120}, 5499 {0x14, 0x90170120},
5468 {0x17, 0x90170140}, 5500 {0x17, 0x90170140},
@@ -5524,10 +5556,19 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5524 {0x21, 0x02211030}), 5556 {0x21, 0x02211030}),
5525 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5557 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5526 ALC256_STANDARD_PINS, 5558 ALC256_STANDARD_PINS,
5527 {0x13, 0x40000000}), 5559 {0x13, 0x40000000},
5560 {0x1d, 0x40700001},
5561 {0x1e, 0x411111f0}),
5562 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5563 ALC256_STANDARD_PINS,
5564 {0x13, 0x411111f0},
5565 {0x1d, 0x40700001},
5566 {0x1e, 0x411111f0}),
5528 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE, 5567 SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
5529 ALC256_STANDARD_PINS, 5568 ALC256_STANDARD_PINS,
5530 {0x13, 0x411111f0}), 5569 {0x13, 0x411111f0},
5570 {0x1d, 0x4077992d},
5571 {0x1e, 0x411111ff}),
5531 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4, 5572 SND_HDA_PIN_QUIRK(0x10ec0280, 0x103c, "HP", ALC280_FIXUP_HP_GPIO4,
5532 {0x12, 0x90a60130}, 5573 {0x12, 0x90a60130},
5533 {0x13, 0x40000000}, 5574 {0x13, 0x40000000},
@@ -5690,35 +5731,48 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
5690 {0x13, 0x411111f0}, 5731 {0x13, 0x411111f0},
5691 {0x16, 0x01014020}, 5732 {0x16, 0x01014020},
5692 {0x18, 0x411111f0}, 5733 {0x18, 0x411111f0},
5693 {0x19, 0x01a19030}), 5734 {0x19, 0x01a19030},
5735 {0x1e, 0x411111f0}),
5694 SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE, 5736 SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL2_MIC_NO_PRESENCE,
5695 ALC292_STANDARD_PINS, 5737 ALC292_STANDARD_PINS,
5696 {0x12, 0x90a60140}, 5738 {0x12, 0x90a60140},
5697 {0x13, 0x411111f0}, 5739 {0x13, 0x411111f0},
5698 {0x16, 0x01014020}, 5740 {0x16, 0x01014020},
5699 {0x18, 0x02a19031}, 5741 {0x18, 0x02a19031},
5700 {0x19, 0x01a1903e}), 5742 {0x19, 0x01a1903e},
5743 {0x1e, 0x411111f0}),
5701 SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE, 5744 SND_HDA_PIN_QUIRK(0x10ec0292, 0x1028, "Dell", ALC269_FIXUP_DELL3_MIC_NO_PRESENCE,
5702 ALC292_STANDARD_PINS, 5745 ALC292_STANDARD_PINS,
5703 {0x12, 0x90a60140}, 5746 {0x12, 0x90a60140},
5704 {0x13, 0x411111f0}, 5747 {0x13, 0x411111f0},
5705 {0x16, 0x411111f0}, 5748 {0x16, 0x411111f0},
5706 {0x18, 0x411111f0}, 5749 {0x18, 0x411111f0},
5707 {0x19, 0x411111f0}), 5750 {0x19, 0x411111f0},
5751 {0x1e, 0x411111f0}),
5708 SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, 5752 SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
5709 ALC292_STANDARD_PINS, 5753 ALC292_STANDARD_PINS,
5710 {0x12, 0x40000000}, 5754 {0x12, 0x40000000},
5711 {0x13, 0x90a60140}, 5755 {0x13, 0x90a60140},
5712 {0x16, 0x21014020}, 5756 {0x16, 0x21014020},
5713 {0x18, 0x411111f0}, 5757 {0x18, 0x411111f0},
5714 {0x19, 0x21a19030}), 5758 {0x19, 0x21a19030},
5759 {0x1e, 0x411111f0}),
5715 SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE, 5760 SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
5716 ALC292_STANDARD_PINS, 5761 ALC292_STANDARD_PINS,
5717 {0x12, 0x40000000}, 5762 {0x12, 0x40000000},
5718 {0x13, 0x90a60140}, 5763 {0x13, 0x90a60140},
5719 {0x16, 0x411111f0}, 5764 {0x16, 0x411111f0},
5720 {0x18, 0x411111f0}, 5765 {0x18, 0x411111f0},
5721 {0x19, 0x411111f0}), 5766 {0x19, 0x411111f0},
5767 {0x1e, 0x411111f0}),
5768 SND_HDA_PIN_QUIRK(0x10ec0293, 0x1028, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE,
5769 ALC292_STANDARD_PINS,
5770 {0x12, 0x40000000},
5771 {0x13, 0x90a60140},
5772 {0x16, 0x21014020},
5773 {0x18, 0x411111f0},
5774 {0x19, 0x21a19030},
5775 {0x1e, 0x411111ff}),
5722 SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE, 5776 SND_HDA_PIN_QUIRK(0x10ec0298, 0x1028, "Dell", ALC298_FIXUP_DELL1_MIC_NO_PRESENCE,
5723 ALC298_STANDARD_PINS, 5777 ALC298_STANDARD_PINS,
5724 {0x12, 0x90a60130}, 5778 {0x12, 0x90a60130},
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index dcc7fe91244c..9d947aef2c8b 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -2920,7 +2920,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
2920 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x148a, 2920 SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x148a,
2921 "HP Mini", STAC_92HD83XXX_HP_LED), 2921 "HP Mini", STAC_92HD83XXX_HP_LED),
2922 SND_PCI_QUIRK_VENDOR(PCI_VENDOR_ID_HP, "HP", STAC_92HD83XXX_HP), 2922 SND_PCI_QUIRK_VENDOR(PCI_VENDOR_ID_HP, "HP", STAC_92HD83XXX_HP),
2923 SND_PCI_QUIRK(PCI_VENDOR_ID_TOSHIBA, 0xfa91, 2923 /* match both for 0xfa91 and 0xfa93 */
2924 SND_PCI_QUIRK_MASK(PCI_VENDOR_ID_TOSHIBA, 0xfffd, 0xfa91,
2924 "Toshiba Satellite S50D", STAC_92HD83XXX_GPIO10_EAPD), 2925 "Toshiba Satellite S50D", STAC_92HD83XXX_GPIO10_EAPD),
2925 {} /* terminator */ 2926 {} /* terminator */
2926}; 2927};
diff --git a/sound/pci/oxygen/oxygen_mixer.c b/sound/pci/oxygen/oxygen_mixer.c
index 6492bca8c70f..4ca12665ff73 100644
--- a/sound/pci/oxygen/oxygen_mixer.c
+++ b/sound/pci/oxygen/oxygen_mixer.c
@@ -88,7 +88,7 @@ static int dac_mute_put(struct snd_kcontrol *ctl,
88 int changed; 88 int changed;
89 89
90 mutex_lock(&chip->mutex); 90 mutex_lock(&chip->mutex);
91 changed = !value->value.integer.value[0] != chip->dac_mute; 91 changed = (!value->value.integer.value[0]) != chip->dac_mute;
92 if (changed) { 92 if (changed) {
93 chip->dac_mute = !value->value.integer.value[0]; 93 chip->dac_mute = !value->value.integer.value[0];
94 chip->model.update_dac_mute(chip); 94 chip->model.update_dac_mute(chip);
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index d7ec4756e45b..8e36198474d9 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -457,14 +457,14 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
457 case SND_SOC_DAIFMT_RIGHT_J: 457 case SND_SOC_DAIFMT_RIGHT_J:
458 if (params_width(params) == 16) { 458 if (params_width(params) == 16) {
459 snd_soc_update_bits(codec, CS4265_DAC_CTL, 459 snd_soc_update_bits(codec, CS4265_DAC_CTL,
460 CS4265_DAC_CTL_DIF, (1 << 5)); 460 CS4265_DAC_CTL_DIF, (2 << 4));
461 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, 461 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
462 CS4265_SPDIF_CTL2_DIF, (1 << 7)); 462 CS4265_SPDIF_CTL2_DIF, (2 << 6));
463 } else { 463 } else {
464 snd_soc_update_bits(codec, CS4265_DAC_CTL, 464 snd_soc_update_bits(codec, CS4265_DAC_CTL,
465 CS4265_DAC_CTL_DIF, (3 << 5)); 465 CS4265_DAC_CTL_DIF, (3 << 4));
466 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, 466 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
467 CS4265_SPDIF_CTL2_DIF, (1 << 7)); 467 CS4265_SPDIF_CTL2_DIF, (3 << 6));
468 } 468 }
469 break; 469 break;
470 case SND_SOC_DAIFMT_LEFT_J: 470 case SND_SOC_DAIFMT_LEFT_J:
@@ -473,7 +473,7 @@ static int cs4265_pcm_hw_params(struct snd_pcm_substream *substream,
473 snd_soc_update_bits(codec, CS4265_ADC_CTL, 473 snd_soc_update_bits(codec, CS4265_ADC_CTL,
474 CS4265_ADC_DIF, 0); 474 CS4265_ADC_DIF, 0);
475 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2, 475 snd_soc_update_bits(codec, CS4265_SPDIF_CTL2,
476 CS4265_SPDIF_CTL2_DIF, (1 << 6)); 476 CS4265_SPDIF_CTL2_DIF, 0);
477 477
478 break; 478 break;
479 default: 479 default:
diff --git a/sound/soc/codecs/pcm1681.c b/sound/soc/codecs/pcm1681.c
index 477e13d30971..e7ba557979cb 100644
--- a/sound/soc/codecs/pcm1681.c
+++ b/sound/soc/codecs/pcm1681.c
@@ -102,7 +102,7 @@ static int pcm1681_set_deemph(struct snd_soc_codec *codec)
102 102
103 if (val != -1) { 103 if (val != -1) {
104 regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL, 104 regmap_update_bits(priv->regmap, PCM1681_DEEMPH_CONTROL,
105 PCM1681_DEEMPH_RATE_MASK, val); 105 PCM1681_DEEMPH_RATE_MASK, val << 3);
106 enable = 1; 106 enable = 1;
107 } else 107 } else
108 enable = 0; 108 enable = 0;
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index 9ce311e088fc..961bd7e5877e 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -2943,6 +2943,9 @@ static int rt5645_irq_detection(struct rt5645_priv *rt5645)
2943{ 2943{
2944 int val, btn_type, gpio_state = 0, report = 0; 2944 int val, btn_type, gpio_state = 0, report = 0;
2945 2945
2946 if (!rt5645->codec)
2947 return -EINVAL;
2948
2946 switch (rt5645->pdata.jd_mode) { 2949 switch (rt5645->pdata.jd_mode) {
2947 case 0: /* Not using rt5645 JD */ 2950 case 0: /* Not using rt5645 JD */
2948 if (rt5645->gpiod_hp_det) { 2951 if (rt5645->gpiod_hp_det) {
@@ -3338,6 +3341,8 @@ static int rt5645_i2c_probe(struct i2c_client *i2c,
3338 break; 3341 break;
3339 3342
3340 case RT5645_DMIC_DATA_GPIO5: 3343 case RT5645_DMIC_DATA_GPIO5:
3344 regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
3345 RT5645_I2S2_DAC_PIN_MASK, RT5645_I2S2_DAC_PIN_GPIO);
3341 regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1, 3346 regmap_update_bits(rt5645->regmap, RT5645_DMIC_CTRL1,
3342 RT5645_DMIC_1_DP_MASK, RT5645_DMIC_1_DP_GPIO5); 3347 RT5645_DMIC_1_DP_MASK, RT5645_DMIC_1_DP_GPIO5);
3343 regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1, 3348 regmap_update_bits(rt5645->regmap, RT5645_GPIO_CTRL1,
diff --git a/sound/soc/codecs/rt5645.h b/sound/soc/codecs/rt5645.h
index 0353a6a273ab..278bb9f464c4 100644
--- a/sound/soc/codecs/rt5645.h
+++ b/sound/soc/codecs/rt5645.h
@@ -1693,6 +1693,10 @@
1693#define RT5645_GP6_PIN_SFT 6 1693#define RT5645_GP6_PIN_SFT 6
1694#define RT5645_GP6_PIN_GPIO6 (0x0 << 6) 1694#define RT5645_GP6_PIN_GPIO6 (0x0 << 6)
1695#define RT5645_GP6_PIN_DMIC2_SDA (0x1 << 6) 1695#define RT5645_GP6_PIN_DMIC2_SDA (0x1 << 6)
1696#define RT5645_I2S2_DAC_PIN_MASK (0x1 << 4)
1697#define RT5645_I2S2_DAC_PIN_SFT 4
1698#define RT5645_I2S2_DAC_PIN_I2S (0x0 << 4)
1699#define RT5645_I2S2_DAC_PIN_GPIO (0x1 << 4)
1696#define RT5645_GP8_PIN_MASK (0x1 << 3) 1700#define RT5645_GP8_PIN_MASK (0x1 << 3)
1697#define RT5645_GP8_PIN_SFT 3 1701#define RT5645_GP8_PIN_SFT 3
1698#define RT5645_GP8_PIN_GPIO8 (0x0 << 3) 1702#define RT5645_GP8_PIN_GPIO8 (0x0 << 3)
diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h
index bd7a344bf8c5..1c317de26176 100644
--- a/sound/soc/codecs/sgtl5000.h
+++ b/sound/soc/codecs/sgtl5000.h
@@ -275,7 +275,7 @@
275#define SGTL5000_BIAS_CTRL_MASK 0x000e 275#define SGTL5000_BIAS_CTRL_MASK 0x000e
276#define SGTL5000_BIAS_CTRL_SHIFT 1 276#define SGTL5000_BIAS_CTRL_SHIFT 1
277#define SGTL5000_BIAS_CTRL_WIDTH 3 277#define SGTL5000_BIAS_CTRL_WIDTH 3
278#define SGTL5000_SMALL_POP 0 278#define SGTL5000_SMALL_POP 1
279 279
280/* 280/*
281 * SGTL5000_CHIP_MIC_CTRL 281 * SGTL5000_CHIP_MIC_CTRL
diff --git a/sound/soc/codecs/ssm4567.c b/sound/soc/codecs/ssm4567.c
index 938d2cb6d78b..84a4f5ad8064 100644
--- a/sound/soc/codecs/ssm4567.c
+++ b/sound/soc/codecs/ssm4567.c
@@ -315,7 +315,13 @@ static int ssm4567_set_dai_fmt(struct snd_soc_dai *dai, unsigned int fmt)
315 if (invert_fclk) 315 if (invert_fclk)
316 ctrl1 |= SSM4567_SAI_CTRL_1_FSYNC; 316 ctrl1 |= SSM4567_SAI_CTRL_1_FSYNC;
317 317
318 return regmap_write(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1, ctrl1); 318 return regmap_update_bits(ssm4567->regmap, SSM4567_REG_SAI_CTRL_1,
319 SSM4567_SAI_CTRL_1_BCLK |
320 SSM4567_SAI_CTRL_1_FSYNC |
321 SSM4567_SAI_CTRL_1_LJ |
322 SSM4567_SAI_CTRL_1_TDM |
323 SSM4567_SAI_CTRL_1_PDM,
324 ctrl1);
319} 325}
320 326
321static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable) 327static int ssm4567_set_power(struct ssm4567 *ssm4567, bool enable)
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index c7647e066cfd..c0b940e2019f 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -633,7 +633,7 @@ static int fsl_ssi_set_bclk(struct snd_pcm_substream *substream,
633 sub *= 100000; 633 sub *= 100000;
634 do_div(sub, freq); 634 do_div(sub, freq);
635 635
636 if (sub < savesub) { 636 if (sub < savesub && !(i == 0 && psr == 0 && div2 == 0)) {
637 baudrate = tmprate; 637 baudrate = tmprate;
638 savesub = sub; 638 savesub = sub;
639 pm = i; 639 pm = i;
diff --git a/sound/soc/intel/Makefile b/sound/soc/intel/Makefile
index 3853ec2ddbc7..6de5d5cd3280 100644
--- a/sound/soc/intel/Makefile
+++ b/sound/soc/intel/Makefile
@@ -7,4 +7,4 @@ obj-$(CONFIG_SND_SOC_INTEL_BAYTRAIL) += baytrail/
7obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += atom/ 7obj-$(CONFIG_SND_SST_MFLD_PLATFORM) += atom/
8 8
9# Machine support 9# Machine support
10obj-$(CONFIG_SND_SOC_INTEL_SST) += boards/ 10obj-$(CONFIG_SND_SOC) += boards/
diff --git a/sound/soc/intel/atom/sst/sst_drv_interface.c b/sound/soc/intel/atom/sst/sst_drv_interface.c
index 620da1d1b9e3..0e0e4d9c021f 100644
--- a/sound/soc/intel/atom/sst/sst_drv_interface.c
+++ b/sound/soc/intel/atom/sst/sst_drv_interface.c
@@ -42,6 +42,11 @@
42#define MIN_FRAGMENT_SIZE (50 * 1024) 42#define MIN_FRAGMENT_SIZE (50 * 1024)
43#define MAX_FRAGMENT_SIZE (1024 * 1024) 43#define MAX_FRAGMENT_SIZE (1024 * 1024)
44#define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz) (((pcm_wd_sz + 15) >> 4) << 1) 44#define SST_GET_BYTES_PER_SAMPLE(pcm_wd_sz) (((pcm_wd_sz + 15) >> 4) << 1)
45#ifdef CONFIG_PM
46#define GET_USAGE_COUNT(dev) (atomic_read(&dev->power.usage_count))
47#else
48#define GET_USAGE_COUNT(dev) 1
49#endif
45 50
46int free_stream_context(struct intel_sst_drv *ctx, unsigned int str_id) 51int free_stream_context(struct intel_sst_drv *ctx, unsigned int str_id)
47{ 52{
@@ -141,15 +146,9 @@ static int sst_power_control(struct device *dev, bool state)
141 int ret = 0; 146 int ret = 0;
142 int usage_count = 0; 147 int usage_count = 0;
143 148
144#ifdef CONFIG_PM
145 usage_count = atomic_read(&dev->power.usage_count);
146#else
147 usage_count = 1;
148#endif
149
150 if (state == true) { 149 if (state == true) {
151 ret = pm_runtime_get_sync(dev); 150 ret = pm_runtime_get_sync(dev);
152 151 usage_count = GET_USAGE_COUNT(dev);
153 dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count); 152 dev_dbg(ctx->dev, "Enable: pm usage count: %d\n", usage_count);
154 if (ret < 0) { 153 if (ret < 0) {
155 dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret); 154 dev_err(ctx->dev, "Runtime get failed with err: %d\n", ret);
@@ -164,6 +163,7 @@ static int sst_power_control(struct device *dev, bool state)
164 } 163 }
165 } 164 }
166 } else { 165 } else {
166 usage_count = GET_USAGE_COUNT(dev);
167 dev_dbg(ctx->dev, "Disable: pm usage count: %d\n", usage_count); 167 dev_dbg(ctx->dev, "Disable: pm usage count: %d\n", usage_count);
168 return sst_pm_runtime_put(ctx); 168 return sst_pm_runtime_put(ctx);
169 } 169 }
diff --git a/sound/soc/intel/baytrail/sst-baytrail-ipc.c b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
index 4c01bb43928d..5bbaa667bec1 100644
--- a/sound/soc/intel/baytrail/sst-baytrail-ipc.c
+++ b/sound/soc/intel/baytrail/sst-baytrail-ipc.c
@@ -701,6 +701,8 @@ int sst_byt_dsp_init(struct device *dev, struct sst_pdata *pdata)
701 if (byt == NULL) 701 if (byt == NULL)
702 return -ENOMEM; 702 return -ENOMEM;
703 703
704 byt->dev = dev;
705
704 ipc = &byt->ipc; 706 ipc = &byt->ipc;
705 ipc->dev = dev; 707 ipc->dev = dev;
706 ipc->ops.tx_msg = byt_tx_msg; 708 ipc->ops.tx_msg = byt_tx_msg;
diff --git a/sound/soc/intel/boards/cht_bsw_max98090_ti.c b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
index d604ee80eda4..70f832114a5a 100644
--- a/sound/soc/intel/boards/cht_bsw_max98090_ti.c
+++ b/sound/soc/intel/boards/cht_bsw_max98090_ti.c
@@ -69,12 +69,12 @@ static const struct snd_soc_dapm_route cht_audio_map[] = {
69 {"Headphone", NULL, "HPR"}, 69 {"Headphone", NULL, "HPR"},
70 {"Ext Spk", NULL, "SPKL"}, 70 {"Ext Spk", NULL, "SPKL"},
71 {"Ext Spk", NULL, "SPKR"}, 71 {"Ext Spk", NULL, "SPKR"},
72 {"AIF1 Playback", NULL, "ssp2 Tx"}, 72 {"HiFi Playback", NULL, "ssp2 Tx"},
73 {"ssp2 Tx", NULL, "codec_out0"}, 73 {"ssp2 Tx", NULL, "codec_out0"},
74 {"ssp2 Tx", NULL, "codec_out1"}, 74 {"ssp2 Tx", NULL, "codec_out1"},
75 {"codec_in0", NULL, "ssp2 Rx" }, 75 {"codec_in0", NULL, "ssp2 Rx" },
76 {"codec_in1", NULL, "ssp2 Rx" }, 76 {"codec_in1", NULL, "ssp2 Rx" },
77 {"ssp2 Rx", NULL, "AIF1 Capture"}, 77 {"ssp2 Rx", NULL, "HiFi Capture"},
78}; 78};
79 79
80static const struct snd_kcontrol_new cht_mc_controls[] = { 80static const struct snd_kcontrol_new cht_mc_controls[] = {
diff --git a/sound/soc/intel/haswell/sst-haswell-ipc.c b/sound/soc/intel/haswell/sst-haswell-ipc.c
index f95f271aab0c..f6efa9d4acad 100644
--- a/sound/soc/intel/haswell/sst-haswell-ipc.c
+++ b/sound/soc/intel/haswell/sst-haswell-ipc.c
@@ -2119,6 +2119,8 @@ int sst_hsw_dsp_init(struct device *dev, struct sst_pdata *pdata)
2119 if (hsw == NULL) 2119 if (hsw == NULL)
2120 return -ENOMEM; 2120 return -ENOMEM;
2121 2121
2122 hsw->dev = dev;
2123
2122 ipc = &hsw->ipc; 2124 ipc = &hsw->ipc;
2123 ipc->dev = dev; 2125 ipc->dev = dev;
2124 ipc->ops.tx_msg = hsw_tx_msg; 2126 ipc->ops.tx_msg = hsw_tx_msg;
diff --git a/sound/soc/mediatek/mt8173-max98090.c b/sound/soc/mediatek/mt8173-max98090.c
index 4d44b5803e55..2d2536af141f 100644
--- a/sound/soc/mediatek/mt8173-max98090.c
+++ b/sound/soc/mediatek/mt8173-max98090.c
@@ -103,7 +103,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = {
103 .name = "MAX98090 Playback", 103 .name = "MAX98090 Playback",
104 .stream_name = "MAX98090 Playback", 104 .stream_name = "MAX98090 Playback",
105 .cpu_dai_name = "DL1", 105 .cpu_dai_name = "DL1",
106 .platform_name = "11220000.mt8173-afe-pcm",
107 .codec_name = "snd-soc-dummy", 106 .codec_name = "snd-soc-dummy",
108 .codec_dai_name = "snd-soc-dummy-dai", 107 .codec_dai_name = "snd-soc-dummy-dai",
109 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, 108 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -114,7 +113,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = {
114 .name = "MAX98090 Capture", 113 .name = "MAX98090 Capture",
115 .stream_name = "MAX98090 Capture", 114 .stream_name = "MAX98090 Capture",
116 .cpu_dai_name = "VUL", 115 .cpu_dai_name = "VUL",
117 .platform_name = "11220000.mt8173-afe-pcm",
118 .codec_name = "snd-soc-dummy", 116 .codec_name = "snd-soc-dummy",
119 .codec_dai_name = "snd-soc-dummy-dai", 117 .codec_dai_name = "snd-soc-dummy-dai",
120 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, 118 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -125,7 +123,6 @@ static struct snd_soc_dai_link mt8173_max98090_dais[] = {
125 { 123 {
126 .name = "Codec", 124 .name = "Codec",
127 .cpu_dai_name = "I2S", 125 .cpu_dai_name = "I2S",
128 .platform_name = "11220000.mt8173-afe-pcm",
129 .no_pcm = 1, 126 .no_pcm = 1,
130 .codec_dai_name = "HiFi", 127 .codec_dai_name = "HiFi",
131 .init = mt8173_max98090_init, 128 .init = mt8173_max98090_init,
@@ -152,9 +149,21 @@ static struct snd_soc_card mt8173_max98090_card = {
152static int mt8173_max98090_dev_probe(struct platform_device *pdev) 149static int mt8173_max98090_dev_probe(struct platform_device *pdev)
153{ 150{
154 struct snd_soc_card *card = &mt8173_max98090_card; 151 struct snd_soc_card *card = &mt8173_max98090_card;
155 struct device_node *codec_node; 152 struct device_node *codec_node, *platform_node;
156 int ret, i; 153 int ret, i;
157 154
155 platform_node = of_parse_phandle(pdev->dev.of_node,
156 "mediatek,platform", 0);
157 if (!platform_node) {
158 dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
159 return -EINVAL;
160 }
161 for (i = 0; i < card->num_links; i++) {
162 if (mt8173_max98090_dais[i].platform_name)
163 continue;
164 mt8173_max98090_dais[i].platform_of_node = platform_node;
165 }
166
158 codec_node = of_parse_phandle(pdev->dev.of_node, 167 codec_node = of_parse_phandle(pdev->dev.of_node,
159 "mediatek,audio-codec", 0); 168 "mediatek,audio-codec", 0);
160 if (!codec_node) { 169 if (!codec_node) {
diff --git a/sound/soc/mediatek/mt8173-rt5650-rt5676.c b/sound/soc/mediatek/mt8173-rt5650-rt5676.c
index 094055323059..6f52eca05e26 100644
--- a/sound/soc/mediatek/mt8173-rt5650-rt5676.c
+++ b/sound/soc/mediatek/mt8173-rt5650-rt5676.c
@@ -138,7 +138,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
138 .name = "rt5650_rt5676 Playback", 138 .name = "rt5650_rt5676 Playback",
139 .stream_name = "rt5650_rt5676 Playback", 139 .stream_name = "rt5650_rt5676 Playback",
140 .cpu_dai_name = "DL1", 140 .cpu_dai_name = "DL1",
141 .platform_name = "11220000.mt8173-afe-pcm",
142 .codec_name = "snd-soc-dummy", 141 .codec_name = "snd-soc-dummy",
143 .codec_dai_name = "snd-soc-dummy-dai", 142 .codec_dai_name = "snd-soc-dummy-dai",
144 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, 143 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -149,7 +148,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
149 .name = "rt5650_rt5676 Capture", 148 .name = "rt5650_rt5676 Capture",
150 .stream_name = "rt5650_rt5676 Capture", 149 .stream_name = "rt5650_rt5676 Capture",
151 .cpu_dai_name = "VUL", 150 .cpu_dai_name = "VUL",
152 .platform_name = "11220000.mt8173-afe-pcm",
153 .codec_name = "snd-soc-dummy", 151 .codec_name = "snd-soc-dummy",
154 .codec_dai_name = "snd-soc-dummy-dai", 152 .codec_dai_name = "snd-soc-dummy-dai",
155 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST}, 153 .trigger = {SND_SOC_DPCM_TRIGGER_POST, SND_SOC_DPCM_TRIGGER_POST},
@@ -161,7 +159,6 @@ static struct snd_soc_dai_link mt8173_rt5650_rt5676_dais[] = {
161 { 159 {
162 .name = "Codec", 160 .name = "Codec",
163 .cpu_dai_name = "I2S", 161 .cpu_dai_name = "I2S",
164 .platform_name = "11220000.mt8173-afe-pcm",
165 .no_pcm = 1, 162 .no_pcm = 1,
166 .codecs = mt8173_rt5650_rt5676_codecs, 163 .codecs = mt8173_rt5650_rt5676_codecs,
167 .num_codecs = 2, 164 .num_codecs = 2,
@@ -209,7 +206,21 @@ static struct snd_soc_card mt8173_rt5650_rt5676_card = {
209static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev) 206static int mt8173_rt5650_rt5676_dev_probe(struct platform_device *pdev)
210{ 207{
211 struct snd_soc_card *card = &mt8173_rt5650_rt5676_card; 208 struct snd_soc_card *card = &mt8173_rt5650_rt5676_card;
212 int ret; 209 struct device_node *platform_node;
210 int i, ret;
211
212 platform_node = of_parse_phandle(pdev->dev.of_node,
213 "mediatek,platform", 0);
214 if (!platform_node) {
215 dev_err(&pdev->dev, "Property 'platform' missing or invalid\n");
216 return -EINVAL;
217 }
218
219 for (i = 0; i < card->num_links; i++) {
220 if (mt8173_rt5650_rt5676_dais[i].platform_name)
221 continue;
222 mt8173_rt5650_rt5676_dais[i].platform_of_node = platform_node;
223 }
213 224
214 mt8173_rt5650_rt5676_codecs[0].of_node = 225 mt8173_rt5650_rt5676_codecs[0].of_node =
215 of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 0); 226 of_parse_phandle(pdev->dev.of_node, "mediatek,audio-codec", 0);
diff --git a/sound/soc/mediatek/mtk-afe-pcm.c b/sound/soc/mediatek/mtk-afe-pcm.c
index cc228db5fb76..9863da73dfe0 100644
--- a/sound/soc/mediatek/mtk-afe-pcm.c
+++ b/sound/soc/mediatek/mtk-afe-pcm.c
@@ -1199,6 +1199,8 @@ err_pm_disable:
1199static int mtk_afe_pcm_dev_remove(struct platform_device *pdev) 1199static int mtk_afe_pcm_dev_remove(struct platform_device *pdev)
1200{ 1200{
1201 pm_runtime_disable(&pdev->dev); 1201 pm_runtime_disable(&pdev->dev);
1202 if (!pm_runtime_status_suspended(&pdev->dev))
1203 mtk_afe_runtime_suspend(&pdev->dev);
1202 snd_soc_unregister_component(&pdev->dev); 1204 snd_soc_unregister_component(&pdev->dev);
1203 snd_soc_unregister_platform(&pdev->dev); 1205 snd_soc_unregister_platform(&pdev->dev);
1204 return 0; 1206 return 0;
diff --git a/sound/soc/soc-core.c b/sound/soc/soc-core.c
index 3a4a5c0e3f97..0e1e69c7abd5 100644
--- a/sound/soc/soc-core.c
+++ b/sound/soc/soc-core.c
@@ -1716,6 +1716,7 @@ card_probe_error:
1716 if (card->remove) 1716 if (card->remove)
1717 card->remove(card); 1717 card->remove(card);
1718 1718
1719 snd_soc_dapm_free(&card->dapm);
1719 soc_cleanup_card_debugfs(card); 1720 soc_cleanup_card_debugfs(card);
1720 snd_card_free(card->snd_card); 1721 snd_card_free(card->snd_card);
1721 1722
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index aa327c92480c..e0de8072c514 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -358,9 +358,10 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
358 data->widget = 358 data->widget =
359 snd_soc_dapm_new_control_unlocked(widget->dapm, 359 snd_soc_dapm_new_control_unlocked(widget->dapm,
360 &template); 360 &template);
361 kfree(name);
361 if (!data->widget) { 362 if (!data->widget) {
362 ret = -ENOMEM; 363 ret = -ENOMEM;
363 goto err_name; 364 goto err_data;
364 } 365 }
365 } 366 }
366 break; 367 break;
@@ -389,11 +390,12 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
389 390
390 data->value = template.on_val; 391 data->value = template.on_val;
391 392
392 data->widget = snd_soc_dapm_new_control(widget->dapm, 393 data->widget = snd_soc_dapm_new_control_unlocked(
393 &template); 394 widget->dapm, &template);
395 kfree(name);
394 if (!data->widget) { 396 if (!data->widget) {
395 ret = -ENOMEM; 397 ret = -ENOMEM;
396 goto err_name; 398 goto err_data;
397 } 399 }
398 400
399 snd_soc_dapm_add_path(widget->dapm, data->widget, 401 snd_soc_dapm_add_path(widget->dapm, data->widget,
@@ -408,8 +410,6 @@ static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
408 410
409 return 0; 411 return 0;
410 412
411err_name:
412 kfree(name);
413err_data: 413err_data:
414 kfree(data); 414 kfree(data);
415 return ret; 415 return ret;
@@ -418,8 +418,6 @@ err_data:
418static void dapm_kcontrol_free(struct snd_kcontrol *kctl) 418static void dapm_kcontrol_free(struct snd_kcontrol *kctl)
419{ 419{
420 struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl); 420 struct dapm_kcontrol_data *data = snd_kcontrol_chip(kctl);
421 if (data->widget)
422 kfree(data->widget->name);
423 kfree(data->wlist); 421 kfree(data->wlist);
424 kfree(data); 422 kfree(data);
425} 423}
@@ -1952,6 +1950,7 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
1952 size_t count, loff_t *ppos) 1950 size_t count, loff_t *ppos)
1953{ 1951{
1954 struct snd_soc_dapm_widget *w = file->private_data; 1952 struct snd_soc_dapm_widget *w = file->private_data;
1953 struct snd_soc_card *card = w->dapm->card;
1955 char *buf; 1954 char *buf;
1956 int in, out; 1955 int in, out;
1957 ssize_t ret; 1956 ssize_t ret;
@@ -1961,6 +1960,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
1961 if (!buf) 1960 if (!buf)
1962 return -ENOMEM; 1961 return -ENOMEM;
1963 1962
1963 mutex_lock(&card->dapm_mutex);
1964
1964 /* Supply widgets are not handled by is_connected_{input,output}_ep() */ 1965 /* Supply widgets are not handled by is_connected_{input,output}_ep() */
1965 if (w->is_supply) { 1966 if (w->is_supply) {
1966 in = 0; 1967 in = 0;
@@ -2007,6 +2008,8 @@ static ssize_t dapm_widget_power_read_file(struct file *file,
2007 p->sink->name); 2008 p->sink->name);
2008 } 2009 }
2009 2010
2011 mutex_unlock(&card->dapm_mutex);
2012
2010 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret); 2013 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
2011 2014
2012 kfree(buf); 2015 kfree(buf);
@@ -2281,11 +2284,15 @@ static ssize_t dapm_widget_show(struct device *dev,
2281 struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev); 2284 struct snd_soc_pcm_runtime *rtd = dev_get_drvdata(dev);
2282 int i, count = 0; 2285 int i, count = 0;
2283 2286
2287 mutex_lock(&rtd->card->dapm_mutex);
2288
2284 for (i = 0; i < rtd->num_codecs; i++) { 2289 for (i = 0; i < rtd->num_codecs; i++) {
2285 struct snd_soc_codec *codec = rtd->codec_dais[i]->codec; 2290 struct snd_soc_codec *codec = rtd->codec_dais[i]->codec;
2286 count += dapm_widget_show_codec(codec, buf + count); 2291 count += dapm_widget_show_codec(codec, buf + count);
2287 } 2292 }
2288 2293
2294 mutex_unlock(&rtd->card->dapm_mutex);
2295
2289 return count; 2296 return count;
2290} 2297}
2291 2298
@@ -3334,16 +3341,10 @@ snd_soc_dapm_new_control_unlocked(struct snd_soc_dapm_context *dapm,
3334 } 3341 }
3335 3342
3336 prefix = soc_dapm_prefix(dapm); 3343 prefix = soc_dapm_prefix(dapm);
3337 if (prefix) { 3344 if (prefix)
3338 w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name); 3345 w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name);
3339 if (widget->sname) 3346 else
3340 w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix,
3341 widget->sname);
3342 } else {
3343 w->name = kasprintf(GFP_KERNEL, "%s", widget->name); 3347 w->name = kasprintf(GFP_KERNEL, "%s", widget->name);
3344 if (widget->sname)
3345 w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname);
3346 }
3347 if (w->name == NULL) { 3348 if (w->name == NULL) {
3348 kfree(w); 3349 kfree(w);
3349 return NULL; 3350 return NULL;
@@ -3792,7 +3793,7 @@ int snd_soc_dapm_link_dai_widgets(struct snd_soc_card *card)
3792 break; 3793 break;
3793 } 3794 }
3794 3795
3795 if (!w->sname || !strstr(w->sname, dai_w->name)) 3796 if (!w->sname || !strstr(w->sname, dai_w->sname))
3796 continue; 3797 continue;
3797 3798
3798 if (dai_w->id == snd_soc_dapm_dai_in) { 3799 if (dai_w->id == snd_soc_dapm_dai_in) {
diff --git a/sound/soc/soc-topology.c b/sound/soc/soc-topology.c
index d0960683c409..31068b8f3db0 100644
--- a/sound/soc/soc-topology.c
+++ b/sound/soc/soc-topology.c
@@ -33,6 +33,7 @@
33#include <sound/soc.h> 33#include <sound/soc.h>
34#include <sound/soc-dapm.h> 34#include <sound/soc-dapm.h>
35#include <sound/soc-topology.h> 35#include <sound/soc-topology.h>
36#include <sound/tlv.h>
36 37
37/* 38/*
38 * We make several passes over the data (since it wont necessarily be ordered) 39 * We make several passes over the data (since it wont necessarily be ordered)
@@ -144,7 +145,7 @@ static const struct snd_soc_tplg_kcontrol_ops io_ops[] = {
144 {SND_SOC_TPLG_CTL_STROBE, snd_soc_get_strobe, 145 {SND_SOC_TPLG_CTL_STROBE, snd_soc_get_strobe,
145 snd_soc_put_strobe, NULL}, 146 snd_soc_put_strobe, NULL},
146 {SND_SOC_TPLG_DAPM_CTL_VOLSW, snd_soc_dapm_get_volsw, 147 {SND_SOC_TPLG_DAPM_CTL_VOLSW, snd_soc_dapm_get_volsw,
147 snd_soc_dapm_put_volsw, NULL}, 148 snd_soc_dapm_put_volsw, snd_soc_info_volsw},
148 {SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE, snd_soc_dapm_get_enum_double, 149 {SND_SOC_TPLG_DAPM_CTL_ENUM_DOUBLE, snd_soc_dapm_get_enum_double,
149 snd_soc_dapm_put_enum_double, snd_soc_info_enum_double}, 150 snd_soc_dapm_put_enum_double, snd_soc_info_enum_double},
150 {SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT, snd_soc_dapm_get_enum_double, 151 {SND_SOC_TPLG_DAPM_CTL_ENUM_VIRT, snd_soc_dapm_get_enum_double,
@@ -534,7 +535,7 @@ static int soc_tplg_kcontrol_bind_io(struct snd_soc_tplg_ctl_hdr *hdr,
534 k->put = bops[i].put; 535 k->put = bops[i].put;
535 if (k->get == NULL && bops[i].id == hdr->ops.get) 536 if (k->get == NULL && bops[i].id == hdr->ops.get)
536 k->get = bops[i].get; 537 k->get = bops[i].get;
537 if (k->info == NULL && ops[i].id == hdr->ops.info) 538 if (k->info == NULL && bops[i].id == hdr->ops.info)
538 k->info = bops[i].info; 539 k->info = bops[i].info;
539 } 540 }
540 541
@@ -579,29 +580,51 @@ static int soc_tplg_init_kcontrol(struct soc_tplg *tplg,
579 return 0; 580 return 0;
580} 581}
581 582
583
584static int soc_tplg_create_tlv_db_scale(struct soc_tplg *tplg,
585 struct snd_kcontrol_new *kc, struct snd_soc_tplg_tlv_dbscale *scale)
586{
587 unsigned int item_len = 2 * sizeof(unsigned int);
588 unsigned int *p;
589
590 p = kzalloc(item_len + 2 * sizeof(unsigned int), GFP_KERNEL);
591 if (!p)
592 return -ENOMEM;
593
594 p[0] = SNDRV_CTL_TLVT_DB_SCALE;
595 p[1] = item_len;
596 p[2] = scale->min;
597 p[3] = (scale->step & TLV_DB_SCALE_MASK)
598 | (scale->mute ? TLV_DB_SCALE_MUTE : 0);
599
600 kc->tlv.p = (void *)p;
601 return 0;
602}
603
582static int soc_tplg_create_tlv(struct soc_tplg *tplg, 604static int soc_tplg_create_tlv(struct soc_tplg *tplg,
583 struct snd_kcontrol_new *kc, u32 tlv_size) 605 struct snd_kcontrol_new *kc, struct snd_soc_tplg_ctl_hdr *tc)
584{ 606{
585 struct snd_soc_tplg_ctl_tlv *tplg_tlv; 607 struct snd_soc_tplg_ctl_tlv *tplg_tlv;
586 struct snd_ctl_tlv *tlv;
587 608
588 if (tlv_size == 0) 609 if (!(tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_READWRITE))
589 return 0; 610 return 0;
590 611
591 tplg_tlv = (struct snd_soc_tplg_ctl_tlv *) tplg->pos; 612 if (tc->access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) {
592 tplg->pos += tlv_size; 613 kc->tlv.c = snd_soc_bytes_tlv_callback;
593 614 } else {
594 tlv = kzalloc(sizeof(*tlv) + tlv_size, GFP_KERNEL); 615 tplg_tlv = &tc->tlv;
595 if (tlv == NULL) 616 switch (tplg_tlv->type) {
596 return -ENOMEM; 617 case SNDRV_CTL_TLVT_DB_SCALE:
597 618 return soc_tplg_create_tlv_db_scale(tplg, kc,
598 dev_dbg(tplg->dev, " created TLV type %d size %d bytes\n", 619 &tplg_tlv->scale);
599 tplg_tlv->numid, tplg_tlv->size);
600 620
601 tlv->numid = tplg_tlv->numid; 621 /* TODO: add support for other TLV types */
602 tlv->length = tplg_tlv->size; 622 default:
603 memcpy(tlv->tlv, tplg_tlv + 1, tplg_tlv->size); 623 dev_dbg(tplg->dev, "Unsupported TLV type %d\n",
604 kc->tlv.p = (void *)tlv; 624 tplg_tlv->type);
625 return -EINVAL;
626 }
627 }
605 628
606 return 0; 629 return 0;
607} 630}
@@ -773,7 +796,7 @@ static int soc_tplg_dmixer_create(struct soc_tplg *tplg, unsigned int count,
773 } 796 }
774 797
775 /* create any TLV data */ 798 /* create any TLV data */
776 soc_tplg_create_tlv(tplg, &kc, mc->hdr.tlv_size); 799 soc_tplg_create_tlv(tplg, &kc, &mc->hdr);
777 800
778 /* register control here */ 801 /* register control here */
779 err = soc_tplg_add_kcontrol(tplg, &kc, 802 err = soc_tplg_add_kcontrol(tplg, &kc,
@@ -1351,6 +1374,7 @@ static int soc_tplg_dapm_widget_create(struct soc_tplg *tplg,
1351 template.reg = w->reg; 1374 template.reg = w->reg;
1352 template.shift = w->shift; 1375 template.shift = w->shift;
1353 template.mask = w->mask; 1376 template.mask = w->mask;
1377 template.subseq = w->subseq;
1354 template.on_val = w->invert ? 0 : 1; 1378 template.on_val = w->invert ? 0 : 1;
1355 template.off_val = w->invert ? 1 : 0; 1379 template.off_val = w->invert ? 1 : 0;
1356 template.ignore_suspend = w->ignore_suspend; 1380 template.ignore_suspend = w->ignore_suspend;
diff --git a/sound/soc/zte/zx296702-i2s.c b/sound/soc/zte/zx296702-i2s.c
index 98d96e1b17e0..1930c42e1f55 100644
--- a/sound/soc/zte/zx296702-i2s.c
+++ b/sound/soc/zte/zx296702-i2s.c
@@ -393,9 +393,9 @@ static int zx_i2s_probe(struct platform_device *pdev)
393 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 393 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
394 zx_i2s->mapbase = res->start; 394 zx_i2s->mapbase = res->start;
395 zx_i2s->reg_base = devm_ioremap_resource(&pdev->dev, res); 395 zx_i2s->reg_base = devm_ioremap_resource(&pdev->dev, res);
396 if (!zx_i2s->reg_base) { 396 if (IS_ERR(zx_i2s->reg_base)) {
397 dev_err(&pdev->dev, "ioremap failed!\n"); 397 dev_err(&pdev->dev, "ioremap failed!\n");
398 return -EIO; 398 return PTR_ERR(zx_i2s->reg_base);
399 } 399 }
400 400
401 writel_relaxed(0, zx_i2s->reg_base + ZX_I2S_FIFO_CTRL); 401 writel_relaxed(0, zx_i2s->reg_base + ZX_I2S_FIFO_CTRL);
diff --git a/sound/soc/zte/zx296702-spdif.c b/sound/soc/zte/zx296702-spdif.c
index 11a0e46a1156..26265ce4caca 100644
--- a/sound/soc/zte/zx296702-spdif.c
+++ b/sound/soc/zte/zx296702-spdif.c
@@ -322,9 +322,9 @@ static int zx_spdif_probe(struct platform_device *pdev)
322 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 322 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
323 zx_spdif->mapbase = res->start; 323 zx_spdif->mapbase = res->start;
324 zx_spdif->reg_base = devm_ioremap_resource(&pdev->dev, res); 324 zx_spdif->reg_base = devm_ioremap_resource(&pdev->dev, res);
325 if (!zx_spdif->reg_base) { 325 if (IS_ERR(zx_spdif->reg_base)) {
326 dev_err(&pdev->dev, "ioremap failed!\n"); 326 dev_err(&pdev->dev, "ioremap failed!\n");
327 return -EIO; 327 return PTR_ERR(zx_spdif->reg_base);
328 } 328 }
329 329
330 zx_spdif_dev_init(zx_spdif->reg_base); 330 zx_spdif_dev_init(zx_spdif->reg_base);
diff --git a/sound/sparc/amd7930.c b/sound/sparc/amd7930.c
index 1b1a89e80d13..784ceb85b2d9 100644
--- a/sound/sparc/amd7930.c
+++ b/sound/sparc/amd7930.c
@@ -956,6 +956,7 @@ static int snd_amd7930_create(struct snd_card *card,
956 if (!amd->regs) { 956 if (!amd->regs) {
957 snd_printk(KERN_ERR 957 snd_printk(KERN_ERR
958 "amd7930-%d: Unable to map chip registers.\n", dev); 958 "amd7930-%d: Unable to map chip registers.\n", dev);
959 kfree(amd);
959 return -EIO; 960 return -EIO;
960 } 961 }
961 962
diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c
index e5000da9e9d7..6a803eff87f7 100644
--- a/sound/usb/mixer_maps.c
+++ b/sound/usb/mixer_maps.c
@@ -341,6 +341,20 @@ static const struct usbmix_name_map scms_usb3318_map[] = {
341 { 0 } 341 { 0 }
342}; 342};
343 343
344/* Bose companion 5, the dB conversion factor is 16 instead of 256 */
345static struct usbmix_dB_map bose_companion5_dB = {-5006, -6};
346static struct usbmix_name_map bose_companion5_map[] = {
347 { 3, NULL, .dB = &bose_companion5_dB },
348 { 0 } /* terminator */
349};
350
351/* Dragonfly DAC 1.2, the dB conversion factor is 1 instead of 256 */
352static struct usbmix_dB_map dragonfly_1_2_dB = {0, 5000};
353static struct usbmix_name_map dragonfly_1_2_map[] = {
354 { 7, NULL, .dB = &dragonfly_1_2_dB },
355 { 0 } /* terminator */
356};
357
344/* 358/*
345 * Control map entries 359 * Control map entries
346 */ 360 */
@@ -451,6 +465,16 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
451 .id = USB_ID(0x25c4, 0x0003), 465 .id = USB_ID(0x25c4, 0x0003),
452 .map = scms_usb3318_map, 466 .map = scms_usb3318_map,
453 }, 467 },
468 {
469 /* Bose Companion 5 */
470 .id = USB_ID(0x05a7, 0x1020),
471 .map = bose_companion5_map,
472 },
473 {
474 /* Dragonfly DAC 1.2 */
475 .id = USB_ID(0x21b4, 0x0081),
476 .map = dragonfly_1_2_map,
477 },
454 { 0 } /* terminator */ 478 { 0 } /* terminator */
455}; 479};
456 480
diff --git a/tools/perf/config/Makefile b/tools/perf/config/Makefile
index 094ddaee104c..d31fac19c30b 100644
--- a/tools/perf/config/Makefile
+++ b/tools/perf/config/Makefile
@@ -638,7 +638,7 @@ ifndef DESTDIR
638prefix ?= $(HOME) 638prefix ?= $(HOME)
639endif 639endif
640bindir_relative = bin 640bindir_relative = bin
641bindir = $(prefix)/$(bindir_relative) 641bindir = $(abspath $(prefix)/$(bindir_relative))
642mandir = share/man 642mandir = share/man
643infodir = share/info 643infodir = share/info
644perfexecdir = libexec/perf-core 644perfexecdir = libexec/perf-core
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
index 53e8bb7bc852..2a5d8d7698ae 100644
--- a/tools/perf/util/stat-shadow.c
+++ b/tools/perf/util/stat-shadow.c
@@ -85,7 +85,7 @@ void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
85 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES)) 85 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
86 update_stats(&runtime_cycles_stats[ctx][cpu], count[0]); 86 update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
87 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX)) 87 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
88 update_stats(&runtime_transaction_stats[ctx][cpu], count[0]); 88 update_stats(&runtime_cycles_in_tx_stats[ctx][cpu], count[0]);
89 else if (perf_stat_evsel__is(counter, TRANSACTION_START)) 89 else if (perf_stat_evsel__is(counter, TRANSACTION_START))
90 update_stats(&runtime_transaction_stats[ctx][cpu], count[0]); 90 update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
91 else if (perf_stat_evsel__is(counter, ELISION_START)) 91 else if (perf_stat_evsel__is(counter, ELISION_START))
@@ -398,20 +398,18 @@ void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
398 " # %5.2f%% aborted cycles ", 398 " # %5.2f%% aborted cycles ",
399 100.0 * ((total2-avg) / total)); 399 100.0 * ((total2-avg) / total));
400 } else if (perf_stat_evsel__is(evsel, TRANSACTION_START) && 400 } else if (perf_stat_evsel__is(evsel, TRANSACTION_START) &&
401 avg > 0 &&
402 runtime_cycles_in_tx_stats[ctx][cpu].n != 0) { 401 runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
403 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]); 402 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
404 403
405 if (total) 404 if (avg)
406 ratio = total / avg; 405 ratio = total / avg;
407 406
408 fprintf(out, " # %8.0f cycles / transaction ", ratio); 407 fprintf(out, " # %8.0f cycles / transaction ", ratio);
409 } else if (perf_stat_evsel__is(evsel, ELISION_START) && 408 } else if (perf_stat_evsel__is(evsel, ELISION_START) &&
410 avg > 0 &&
411 runtime_cycles_in_tx_stats[ctx][cpu].n != 0) { 409 runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
412 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]); 410 total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
413 411
414 if (total) 412 if (avg)
415 ratio = total / avg; 413 ratio = total / avg;
416 414
417 fprintf(out, " # %8.0f cycles / elision ", ratio); 415 fprintf(out, " # %8.0f cycles / elision ", ratio);
diff --git a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
index 7f0c756993af..3d7dc6afc3f8 100644
--- a/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
+++ b/tools/testing/selftests/futex/functional/futex_requeue_pi_signal_restart.c
@@ -191,7 +191,7 @@ int main(int argc, char *argv[])
191 if (res > 0) { 191 if (res > 0) {
192 atomic_set(&requeued, 1); 192 atomic_set(&requeued, 1);
193 break; 193 break;
194 } else if (res > 0) { 194 } else if (res < 0) {
195 error("FUTEX_CMP_REQUEUE_PI failed\n", errno); 195 error("FUTEX_CMP_REQUEUE_PI failed\n", errno);
196 ret = RET_ERROR; 196 ret = RET_ERROR;
197 break; 197 break;